From a8150021e01d34ecbd6198fe03a57eacf47a16f2 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Mon, 7 Feb 2022 18:37:36 +0800 Subject: [PATCH 01/25] Use modified transducer loss in training. (#179) * Use modified transducer loss in training. * Minor fix. * Add modified beam search. * Add modified beam search. * Minor fixes. * Fix typo. * Update RESULTS. * Fix a typo. * Minor fixes. --- .../run-pretrained-transducer-stateless.yml | 71 +++++++-- README.md | 6 +- egs/librispeech/ASR/RESULTS.md | 53 ++++--- .../ASR/transducer_stateless/beam_search.py | 140 ++++++++++++++++-- .../ASR/transducer_stateless/decode.py | 21 ++- .../ASR/transducer_stateless/decoder.py | 2 +- .../ASR/transducer_stateless/model.py | 16 ++ .../ASR/transducer_stateless/pretrained.py | 22 ++- .../ASR/transducer_stateless/train.py | 18 ++- 9 files changed, 288 insertions(+), 61 deletions(-) diff --git a/.github/workflows/run-pretrained-transducer-stateless.yml b/.github/workflows/run-pretrained-transducer-stateless.yml index 5f4a425d9..de66b90c5 100644 --- a/.github/workflows/run-pretrained-transducer-stateless.yml +++ b/.github/workflows/run-pretrained-transducer-stateless.yml @@ -74,24 +74,53 @@ jobs: mkdir tmp cd tmp git lfs install - git clone https://huggingface.co/csukuangfj/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-01-10 + git clone https://huggingface.co/csukuangfj/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07 cd .. tree tmp - soxi tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-01-10/test_wavs/*.wav - ls -lh tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-01-10/test_wavs/*.wav + soxi tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/test_wavs/*.wav + ls -lh tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/test_wavs/*.wav - - name: Run greedy search decoding + - name: Run greedy search decoding (max-sym-per-frame 1) shell: bash run: | export PYTHONPATH=$PWD:PYTHONPATH cd egs/librispeech/ASR ./transducer_stateless/pretrained.py \ --method greedy_search \ - --checkpoint ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-01-10/exp/pretrained.pt \ - --bpe-model ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-01-10/data/lang_bpe_500/bpe.model \ - ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-01-10/test_wavs/1089-134686-0001.wav \ - ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-01-10/test_wavs/1221-135766-0001.wav \ - ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-01-10/test_wavs/1221-135766-0002.wav + --max-sym-per-frame 1 \ + --checkpoint ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/exp/pretrained.pt \ + --bpe-model ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/data/lang_bpe_500/bpe.model \ + ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/test_wavs/1089-134686-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/test_wavs/1221-135766-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/test_wavs/1221-135766-0002.wav + + - name: Run greedy search decoding (max-sym-per-frame 2) + shell: bash + run: | + export PYTHONPATH=$PWD:PYTHONPATH + cd egs/librispeech/ASR + ./transducer_stateless/pretrained.py \ + --method greedy_search \ + --max-sym-per-frame 2 \ + --checkpoint ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/exp/pretrained.pt \ + --bpe-model ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/data/lang_bpe_500/bpe.model \ + ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/test_wavs/1089-134686-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/test_wavs/1221-135766-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/test_wavs/1221-135766-0002.wav + + - name: Run greedy search decoding (max-sym-per-frame 3) + shell: bash + run: | + export PYTHONPATH=$PWD:PYTHONPATH + cd egs/librispeech/ASR + ./transducer_stateless/pretrained.py \ + --method greedy_search \ + --max-sym-per-frame 3 \ + --checkpoint ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/exp/pretrained.pt \ + --bpe-model ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/data/lang_bpe_500/bpe.model \ + ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/test_wavs/1089-134686-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/test_wavs/1221-135766-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/test_wavs/1221-135766-0002.wav - name: Run beam search decoding shell: bash @@ -101,8 +130,22 @@ jobs: ./transducer_stateless/pretrained.py \ --method beam_search \ --beam-size 4 \ - --checkpoint ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-01-10/exp/pretrained.pt \ - --bpe-model ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-01-10/data/lang_bpe_500/bpe.model \ - ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-01-10/test_wavs/1089-134686-0001.wav \ - ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-01-10/test_wavs/1221-135766-0001.wav \ - ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-01-10/test_wavs/1221-135766-0002.wav + --checkpoint ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/exp/pretrained.pt \ + --bpe-model ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/data/lang_bpe_500/bpe.model \ + ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/test_wavs/1089-134686-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/test_wavs/1221-135766-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/test_wavs/1221-135766-0002.wav + + - name: Run modified beam search decoding + shell: bash + run: | + export PYTHONPATH=$PWD:$PYTHONPATH + cd egs/librispeech/ASR + ./transducer_stateless/pretrained.py \ + --method modified_beam_search \ + --beam-size 4 \ + --checkpoint ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/exp/pretrained.pt \ + --bpe-model ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/data/lang_bpe_500/bpe.model \ + ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/test_wavs/1089-134686-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/test_wavs/1221-135766-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07/test_wavs/1221-135766-0002.wav diff --git a/README.md b/README.md index 38c25900f..28c9b6ce4 100644 --- a/README.md +++ b/README.md @@ -80,16 +80,16 @@ We provide a Colab notebook to run a pre-trained RNN-T conformer model: [![Open Using Conformer as encoder. The decoder consists of 1 embedding layer and 1 convolutional layer. -The best WER using beam search with beam size 4 is: +The best WER using modified beam search with beam size 4 is: | | test-clean | test-other | |-----|------------|------------| -| WER | 2.68 | 6.72 | +| WER | 2.67 | 6.64 | Note: No auxiliary losses are used in the training and no LMs are used in the decoding. -We provide a Colab notebook to run a pre-trained transducer conformer + stateless decoder model: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1Rc4Is-3Yp9LbcEz_Iy8hfyenyHsyjvqE?usp=sharing) +We provide a Colab notebook to run a pre-trained transducer conformer + stateless decoder model: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1CO1bXJ-2khDckZIW8zjOPHGSKLHpTDlp?usp=sharing) ### Aishell diff --git a/egs/librispeech/ASR/RESULTS.md b/egs/librispeech/ASR/RESULTS.md index ffeaaae68..17679ba3d 100644 --- a/egs/librispeech/ASR/RESULTS.md +++ b/egs/librispeech/ASR/RESULTS.md @@ -4,62 +4,73 @@ #### Conformer encoder + embedding decoder -Using commit `4c1b3665ee6efb935f4dd93a80ff0e154b13efb6`. +Using commit `TODO`. -Conformer encoder + non-current decoder. The decoder +Conformer encoder + non-recurrent decoder. The decoder contains only an embedding layer and a Conv1d (with kernel size 2). The WERs are -| | test-clean | test-other | comment | -|---------------------------|------------|------------|------------------------------------------| -| greedy search | 2.69 | 6.81 | --epoch 71, --avg 15, --max-duration 100 | -| beam search (beam size 4) | 2.68 | 6.72 | --epoch 71, --avg 15, --max-duration 100 | +| | test-clean | test-other | comment | +|-------------------------------------|------------|------------|------------------------------------------| +| greedy search (max sym per frame 1) | 2.68 | 6.71 | --epoch 61, --avg 18, --max-duration 100 | +| greedy search (max sym per frame 2) | 2.69 | 6.71 | --epoch 61, --avg 18, --max-duration 100 | +| greedy search (max sym per frame 3) | 2.69 | 6.71 | --epoch 61, --avg 18, --max-duration 100 | +| modified beam search (beam size 4) | 2.67 | 6.64 | --epoch 61, --avg 18, --max-duration 100 | + The training command for reproducing is given below: ``` +cd egs/librispeech/ASR/ +./prepare.sh export CUDA_VISIBLE_DEVICES="0,1,2,3" - ./transducer_stateless/train.py \ --world-size 4 \ --num-epochs 76 \ --start-epoch 0 \ --exp-dir transducer_stateless/exp-full \ --full-libri 1 \ - --max-duration 250 \ - --lr-factor 3 + --max-duration 300 \ + --lr-factor 5 \ + --bpe-model data/lang_bpe_500/bpe.model \ + --modified-transducer-prob 0.25 ``` The tensorboard training log can be found at - + The decoding command is: ``` -epoch=71 -avg=15 +epoch=61 +avg=18 ## greedy search -./transducer_stateless/decode.py \ - --epoch $epoch \ - --avg $avg \ - --exp-dir transducer_stateless/exp-full \ - --bpe-model ./data/lang_bpe_500/bpe.model \ - --max-duration 100 +for sym in 1 2 3; do + ./transducer_stateless/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir transducer_stateless/exp-full \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --max-duration 100 \ + --max-sym-per-frame $sym +done + +## modified beam search -## beam search ./transducer_stateless/decode.py \ --epoch $epoch \ --avg $avg \ --exp-dir transducer_stateless/exp-full \ --bpe-model ./data/lang_bpe_500/bpe.model \ --max-duration 100 \ - --decoding-method beam_search \ + --context-size 2 \ + --decoding-method modified_beam_search \ --beam-size 4 ``` You can find a pretrained model by visiting - + #### Conformer encoder + LSTM decoder diff --git a/egs/librispeech/ASR/transducer_stateless/beam_search.py b/egs/librispeech/ASR/transducer_stateless/beam_search.py index 1cce48235..c5efb733d 100644 --- a/egs/librispeech/ASR/transducer_stateless/beam_search.py +++ b/egs/librispeech/ASR/transducer_stateless/beam_search.py @@ -17,7 +17,6 @@ from dataclasses import dataclass from typing import Dict, List, Optional -import numpy as np import torch from model import Transducer @@ -108,8 +107,9 @@ class Hypothesis: # Newly predicted tokens are appended to `ys`. ys: List[int] - # The log prob of ys - log_prob: float + # The log prob of ys. + # It contains only one entry. + log_prob: torch.Tensor @property def key(self) -> str: @@ -145,8 +145,10 @@ class HypothesisList(object): """ key = hyp.key if key in self: - old_hyp = self._data[key] - old_hyp.log_prob = np.logaddexp(old_hyp.log_prob, hyp.log_prob) + old_hyp = self._data[key] # shallow copy + torch.logaddexp( + old_hyp.log_prob, hyp.log_prob, out=old_hyp.log_prob + ) else: self._data[key] = hyp @@ -184,7 +186,7 @@ class HypothesisList(object): assert key in self, f"{key} does not exist" del self._data[key] - def filter(self, threshold: float) -> "HypothesisList": + def filter(self, threshold: torch.Tensor) -> "HypothesisList": """Remove all Hypotheses whose log_prob is less than threshold. Caution: @@ -312,6 +314,113 @@ def run_joiner( return log_prob +def modified_beam_search( + model: Transducer, + encoder_out: torch.Tensor, + beam: int = 4, +) -> List[int]: + """It limits the maximum number of symbols per frame to 1. + + Args: + model: + An instance of `Transducer`. + encoder_out: + A tensor of shape (N, T, C) from the encoder. Support only N==1 for now. + beam: + Beam size. + Returns: + Return the decoded result. + """ + + assert encoder_out.ndim == 3 + + # support only batch_size == 1 for now + assert encoder_out.size(0) == 1, encoder_out.size(0) + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + + device = model.device + + decoder_input = torch.tensor( + [blank_id] * context_size, device=device + ).reshape(1, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False) + + T = encoder_out.size(1) + + B = HypothesisList() + B.add( + Hypothesis( + ys=[blank_id] * context_size, + log_prob=torch.zeros(1, dtype=torch.float32, device=device), + ) + ) + + encoder_out_len = torch.tensor([1]) + decoder_out_len = torch.tensor([1]) + + for t in range(T): + # fmt: off + current_encoder_out = encoder_out[:, t:t+1, :] + # current_encoder_out is of shape (1, 1, encoder_out_dim) + # fmt: on + A = list(B) + B = HypothesisList() + + ys_log_probs = torch.cat([hyp.log_prob.reshape(1, 1) for hyp in A]) + # ys_log_probs is of shape (num_hyps, 1) + + decoder_input = torch.tensor( + [hyp.ys[-context_size:] for hyp in A], + device=device, + ) + # decoder_input is of shape (num_hyps, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False) + # decoder_output is of shape (num_hyps, 1, decoder_output_dim) + + current_encoder_out = current_encoder_out.expand( + decoder_out.size(0), 1, -1 + ) + + logits = model.joiner( + current_encoder_out, + decoder_out, + encoder_out_len.expand(decoder_out.size(0)), + decoder_out_len.expand(decoder_out.size(0)), + ) + # logits is of shape (num_hyps, vocab_size) + log_probs = logits.log_softmax(dim=-1) + + log_probs.add_(ys_log_probs) + + log_probs = log_probs.reshape(-1) + topk_log_probs, topk_indexes = log_probs.topk(beam) + + # topk_hyp_indexes are indexes into `A` + topk_hyp_indexes = topk_indexes // logits.size(-1) + topk_token_indexes = topk_indexes % logits.size(-1) + + topk_hyp_indexes = topk_hyp_indexes.tolist() + topk_token_indexes = topk_token_indexes.tolist() + + for i in range(len(topk_hyp_indexes)): + hyp = A[topk_hyp_indexes[i]] + new_ys = hyp.ys[:] + new_token = topk_token_indexes[i] + if new_token != blank_id: + new_ys.append(new_token) + new_log_prob = topk_log_probs[i] + new_hyp = Hypothesis(ys=new_ys, log_prob=new_log_prob) + B.add(new_hyp) + + best_hyp = B.get_most_probable(length_norm=True) + ys = best_hyp.ys[context_size:] # [context_size:] to remove blanks + + return ys + + def beam_search( model: Transducer, encoder_out: torch.Tensor, @@ -351,7 +460,12 @@ def beam_search( t = 0 B = HypothesisList() - B.add(Hypothesis(ys=[blank_id] * context_size, log_prob=0.0)) + B.add( + Hypothesis( + ys=[blank_id] * context_size, + log_prob=torch.zeros(1, dtype=torch.float32, device=device), + ) + ) max_sym_per_utt = 20000 @@ -371,9 +485,6 @@ def beam_search( joint_cache: Dict[str, torch.Tensor] = {} - # TODO(fangjun): Implement prefix search to update the `log_prob` - # of hypotheses in A - while True: y_star = A.get_most_probable() A.remove(y_star) @@ -396,18 +507,21 @@ def beam_search( # First, process the blank symbol skip_log_prob = log_prob[blank_id] - new_y_star_log_prob = y_star.log_prob + skip_log_prob.item() + new_y_star_log_prob = y_star.log_prob + skip_log_prob # ys[:] returns a copy of ys B.add(Hypothesis(ys=y_star.ys[:], log_prob=new_y_star_log_prob)) # Second, process other non-blank labels values, indices = log_prob.topk(beam + 1) - for i, v in zip(indices.tolist(), values.tolist()): + for idx in range(values.size(0)): + i = indices[idx].item() if i == blank_id: continue + new_ys = y_star.ys + [i] - new_log_prob = y_star.log_prob + v + + new_log_prob = y_star.log_prob + values[idx] A.add(Hypothesis(ys=new_ys, log_prob=new_log_prob)) # Check whether B contains more than "beam" elements more probable diff --git a/egs/librispeech/ASR/transducer_stateless/decode.py b/egs/librispeech/ASR/transducer_stateless/decode.py index e5987b75e..c101d9397 100755 --- a/egs/librispeech/ASR/transducer_stateless/decode.py +++ b/egs/librispeech/ASR/transducer_stateless/decode.py @@ -46,7 +46,7 @@ import sentencepiece as spm import torch import torch.nn as nn from asr_datamodule import LibriSpeechAsrDataModule -from beam_search import beam_search, greedy_search +from beam_search import beam_search, greedy_search, modified_beam_search from conformer import Conformer from decoder import Decoder from joiner import Joiner @@ -104,6 +104,7 @@ def get_parser(): help="""Possible values are: - greedy_search - beam_search + - modified_beam_search """, ) @@ -111,7 +112,8 @@ def get_parser(): "--beam-size", type=int, default=4, - help="Used only when --decoding-method is beam_search", + help="""Used only when --decoding-method is + beam_search or modified_beam_search""", ) parser.add_argument( @@ -125,7 +127,8 @@ def get_parser(): "--max-sym-per-frame", type=int, default=3, - help="Maximum number of symbols per frame", + help="""Maximum number of symbols per frame. + Used only when --decoding_method is greedy_search""", ) return parser @@ -256,6 +259,10 @@ def decode_one_batch( hyp = beam_search( model=model, encoder_out=encoder_out_i, beam=params.beam_size ) + elif params.decoding_method == "modified_beam_search": + hyp = modified_beam_search( + model=model, encoder_out=encoder_out_i, beam=params.beam_size + ) else: raise ValueError( f"Unsupported decoding method: {params.decoding_method}" @@ -389,11 +396,15 @@ def main(): params = get_params() params.update(vars(args)) - assert params.decoding_method in ("greedy_search", "beam_search") + assert params.decoding_method in ( + "greedy_search", + "beam_search", + "modified_beam_search", + ) params.res_dir = params.exp_dir / params.decoding_method params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" - if params.decoding_method == "beam_search": + if "beam_search" in params.decoding_method: params.suffix += f"-beam-{params.beam_size}" else: params.suffix += f"-context-{params.context_size}" diff --git a/egs/librispeech/ASR/transducer_stateless/decoder.py b/egs/librispeech/ASR/transducer_stateless/decoder.py index c2c6552a9..b82fed37b 100644 --- a/egs/librispeech/ASR/transducer_stateless/decoder.py +++ b/egs/librispeech/ASR/transducer_stateless/decoder.py @@ -75,7 +75,7 @@ class Decoder(nn.Module): """ Args: y: - A 2-D tensor of shape (N, U) with blank prepended. + A 2-D tensor of shape (N, U). need_pad: True to left pad the input. Should be True during training. False to not pad the input. Should be False during inference. diff --git a/egs/librispeech/ASR/transducer_stateless/model.py b/egs/librispeech/ASR/transducer_stateless/model.py index 17b5f63e5..8281e1fb5 100644 --- a/egs/librispeech/ASR/transducer_stateless/model.py +++ b/egs/librispeech/ASR/transducer_stateless/model.py @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import random + import k2 import torch import torch.nn as nn @@ -62,6 +64,7 @@ class Transducer(nn.Module): x: torch.Tensor, x_lens: torch.Tensor, y: k2.RaggedTensor, + modified_transducer_prob: float = 0.0, ) -> torch.Tensor: """ Args: @@ -73,6 +76,8 @@ class Transducer(nn.Module): y: A ragged tensor with 2 axes [utt][label]. It contains labels of each utterance. + modified_transducer_prob: + The probability to use modified transducer loss. Returns: Return the transducer loss. """ @@ -114,6 +119,16 @@ class Transducer(nn.Module): # reference stage import optimized_transducer + assert 0 <= modified_transducer_prob <= 1 + + if modified_transducer_prob == 0: + one_sym_per_frame = False + elif random.random() < modified_transducer_prob: + # random.random() returns a float in the range [0, 1) + one_sym_per_frame = True + else: + one_sym_per_frame = False + loss = optimized_transducer.transducer_loss( logits=logits, targets=y_padded, @@ -121,6 +136,7 @@ class Transducer(nn.Module): target_lengths=y_lens, blank=blank_id, reduction="sum", + one_sym_per_frame=one_sym_per_frame, from_log_softmax=False, ) diff --git a/egs/librispeech/ASR/transducer_stateless/pretrained.py b/egs/librispeech/ASR/transducer_stateless/pretrained.py index c248de777..ad8d89918 100755 --- a/egs/librispeech/ASR/transducer_stateless/pretrained.py +++ b/egs/librispeech/ASR/transducer_stateless/pretrained.py @@ -22,10 +22,11 @@ Usage: --checkpoint ./transducer_stateless/exp/pretrained.pt \ --bpe-model ./data/lang_bpe_500/bpe.model \ --method greedy_search \ + --max-sym-per-frame 1 \ /path/to/foo.wav \ /path/to/bar.wav \ -(1) beam search +(2) beam search ./transducer_stateless/pretrained.py \ --checkpoint ./transducer_stateless/exp/pretrained.pt \ --bpe-model ./data/lang_bpe_500/bpe.model \ @@ -34,6 +35,15 @@ Usage: /path/to/foo.wav \ /path/to/bar.wav \ +(3) modified beam search +./transducer_stateless/pretrained.py \ + --checkpoint ./transducer_stateless/exp/pretrained.pt \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --method modified_beam_search \ + --beam-size 4 \ + /path/to/foo.wav \ + /path/to/bar.wav \ + You can also use `./transducer_stateless/exp/epoch-xx.pt`. Note: ./transducer_stateless/exp/pretrained.pt is generated by @@ -51,7 +61,7 @@ import sentencepiece as spm import torch import torch.nn as nn import torchaudio -from beam_search import beam_search, greedy_search +from beam_search import beam_search, greedy_search, modified_beam_search from conformer import Conformer from decoder import Decoder from joiner import Joiner @@ -91,6 +101,7 @@ def get_parser(): help="""Possible values are: - greedy_search - beam_search + - modified_beam_search """, ) @@ -108,7 +119,7 @@ def get_parser(): "--beam-size", type=int, default=4, - help="Used only when --method is beam_search", + help="Used only when --method is beam_search and modified_beam_search ", ) parser.add_argument( @@ -218,6 +229,7 @@ def read_sound_files( return ans +@torch.no_grad() def main(): parser = get_parser() args = parser.parse_args() @@ -301,6 +313,10 @@ def main(): hyp = beam_search( model=model, encoder_out=encoder_out_i, beam=params.beam_size ) + elif params.method == "modified_beam_search": + hyp = modified_beam_search( + model=model, encoder_out=encoder_out_i, beam=params.beam_size + ) else: raise ValueError(f"Unsupported method: {params.method}") diff --git a/egs/librispeech/ASR/transducer_stateless/train.py b/egs/librispeech/ASR/transducer_stateless/train.py index 950a88a35..544f6e9b1 100755 --- a/egs/librispeech/ASR/transducer_stateless/train.py +++ b/egs/librispeech/ASR/transducer_stateless/train.py @@ -138,6 +138,17 @@ def get_parser(): "2 means tri-gram", ) + parser.add_argument( + "--modified-transducer-prob", + type=float, + default=0.25, + help="""The probability to use modified transducer loss. + In modified transduer, it limits the maximum number of symbols + per frame to 1. See also the option --max-sym-per-frame in + transducer_stateless/decode.py + """, + ) + return parser @@ -383,7 +394,12 @@ def compute_loss( y = k2.RaggedTensor(y).to(device) with torch.set_grad_enabled(is_training): - loss = model(x=feature, x_lens=feature_lens, y=y) + loss = model( + x=feature, + x_lens=feature_lens, + y=y, + modified_transducer_prob=params.modified_transducer_prob, + ) assert loss.requires_grad == is_training From 27fa5f05d36323088d2cdd743cefcd9f05d266e1 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Mon, 7 Feb 2022 18:45:45 +0800 Subject: [PATCH 02/25] Update git SHA-1 in RESULTS.md for transducer_stateless. (#202) --- egs/librispeech/ASR/RESULTS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/egs/librispeech/ASR/RESULTS.md b/egs/librispeech/ASR/RESULTS.md index 17679ba3d..d78447593 100644 --- a/egs/librispeech/ASR/RESULTS.md +++ b/egs/librispeech/ASR/RESULTS.md @@ -4,7 +4,7 @@ #### Conformer encoder + embedding decoder -Using commit `TODO`. +Using commit `a8150021e01d34ecbd6198fe03a57eacf47a16f2`. Conformer encoder + non-recurrent decoder. The decoder contains only an embedding layer and a Conv1d (with kernel size 2). From be1c86b06cbaa3b3d63f0b129ee54293176d95b5 Mon Sep 17 00:00:00 2001 From: "Wang, Guanbo" Date: Tue, 8 Feb 2022 01:56:58 -0500 Subject: [PATCH 03/25] print num_frame as %.2f (#204) --- icefall/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/icefall/utils.py b/icefall/utils.py index 7237c8d62..6d4578de6 100644 --- a/icefall/utils.py +++ b/icefall/utils.py @@ -521,8 +521,8 @@ class MetricsTracker(collections.defaultdict): for k, v in self.norm_items(): norm_value = "%.4g" % v ans += str(k) + "=" + str(norm_value) + ", " - frames = str(self["frames"]) - ans += "over " + frames + " frames." + frames = "%.2f" % self["frames"] + ans += "over " + str(frames) + " frames." return ans def norm_items(self) -> List[Tuple[str, float]]: From 70a3c56a18d726d0a04c2774247e132e4464f54f Mon Sep 17 00:00:00 2001 From: "Wang, Guanbo" Date: Wed, 9 Feb 2022 03:42:28 -0500 Subject: [PATCH 04/25] Fix librispeech train.py (#211) * fix librispeech train.py * remove note --- egs/librispeech/ASR/conformer_ctc/train.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/egs/librispeech/ASR/conformer_ctc/train.py b/egs/librispeech/ASR/conformer_ctc/train.py index cb0bd5c2d..058efd061 100755 --- a/egs/librispeech/ASR/conformer_ctc/train.py +++ b/egs/librispeech/ASR/conformer_ctc/train.py @@ -601,14 +601,14 @@ def run(rank, world_size, args): if torch.cuda.is_available(): device = torch.device("cuda", rank) - if "lang_bpe" in params.lang_dir: + if "lang_bpe" in str(params.lang_dir): graph_compiler = BpeCtcTrainingGraphCompiler( params.lang_dir, device=device, sos_token="", eos_token="", ) - elif "lang_phone" in params.lang_dir: + elif "lang_phone" in str(params.lang_dir): assert params.att_rate == 0, ( "Attention decoder training does not support phone lang dirs " "at this time due to a missing symbol. Set --att-rate=0 " @@ -650,9 +650,7 @@ def run(rank, world_size, args): model.to(device) if world_size > 1: - # Note: find_unused_parameters=True is needed in case we - # want to set params.att_rate = 0 (i.e. att decoder is not trained) - model = DDP(model, device_ids=[rank], find_unused_parameters=True) + model = DDP(model, device_ids=[rank]) optimizer = Noam( model.parameters(), From e8eb408760bcb92fb80d4280b103decaeb9c8398 Mon Sep 17 00:00:00 2001 From: "Wang, Guanbo" Date: Wed, 16 Feb 2022 03:59:27 -0500 Subject: [PATCH 05/25] Incremental pruning threshold (#214) * Incremental pruning threshold * flake8 * black * minor fix --- icefall/decode.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/icefall/decode.py b/icefall/decode.py index 4c2a8e01b..d3e420eec 100644 --- a/icefall/decode.py +++ b/icefall/decode.py @@ -716,10 +716,13 @@ def rescore_with_whole_lattice( b_to_a_map = torch.zeros(num_seqs, device=device, dtype=torch.int32) + # NOTE: The choice of the threshold list is arbitrary here to avoid OOM. + # You may need to fine tune it. + prune_th_list = [1e-10, 1e-9, 1e-8, 1e-7, 1e-6] + prune_th_list += [1e-5, 1e-4, 1e-3, 1e-2, 1e-1] max_loop_count = 10 loop_count = 0 while loop_count <= max_loop_count: - loop_count += 1 try: rescoring_lattice = k2.intersect_device( G_with_epsilon_loops, @@ -731,6 +734,11 @@ def rescore_with_whole_lattice( break except RuntimeError as e: logging.info(f"Caught exception:\n{e}\n") + if loop_count >= max_loop_count: + logging.info( + "Return None as the resulting lattice is too large." + ) + return None logging.info( f"num_arcs before pruning: {inv_lattice.arcs.num_elements()}" ) @@ -740,16 +748,15 @@ def rescore_with_whole_lattice( "is too large, or the input sound file is difficult to " "decode, you will meet this exception." ) - - # NOTE(fangjun): The choice of the threshold 1e-9 is arbitrary here - # to avoid OOM. You may need to fine tune it. - inv_lattice = k2.prune_on_arc_post(inv_lattice, 1e-9, True) + inv_lattice = k2.prune_on_arc_post( + inv_lattice, + prune_th_list[loop_count], + True, + ) logging.info( f"num_arcs after pruning: {inv_lattice.arcs.num_elements()}" ) - if loop_count > max_loop_count: - logging.info("Return None as the resulting lattice is too large") - return None + loop_count += 1 # lat has token IDs as labels # and word IDs as aux_labels. From b702281e9031309cdc862ce61ea057f9ee7e7972 Mon Sep 17 00:00:00 2001 From: Wei Kang Date: Thu, 17 Feb 2022 13:33:54 +0800 Subject: [PATCH 06/25] Use k2 pruned transducer loss to train conformer-transducer model (#194) * Using k2 pruned version transducer loss to train model * Fix style * Minor fixes --- .flake8 | 2 + egs/librispeech/ASR/RESULTS.md | 48 + .../pruned_transducer_stateless/__init__.py | 0 .../asr_datamodule.py | 1 + .../beam_search.py | 340 +++++++ .../pruned_transducer_stateless/conformer.py | 1 + .../ASR/pruned_transducer_stateless/decode.py | 476 ++++++++++ .../pruned_transducer_stateless/decoder.py | 100 +++ .../encoder_interface.py | 1 + .../ASR/pruned_transducer_stateless/export.py | 252 ++++++ .../ASR/pruned_transducer_stateless/joiner.py | 50 ++ .../ASR/pruned_transducer_stateless/model.py | 169 ++++ .../pruned_transducer_stateless/pretrained.py | 326 +++++++ .../subsampling.py | 1 + .../test_decoder.py | 58 ++ .../ASR/pruned_transducer_stateless/train.py | 830 ++++++++++++++++++ .../transformer.py | 1 + icefall/utils.py | 95 +- 18 files changed, 2750 insertions(+), 1 deletion(-) create mode 100644 egs/librispeech/ASR/pruned_transducer_stateless/__init__.py create mode 120000 egs/librispeech/ASR/pruned_transducer_stateless/asr_datamodule.py create mode 100644 egs/librispeech/ASR/pruned_transducer_stateless/beam_search.py create mode 120000 egs/librispeech/ASR/pruned_transducer_stateless/conformer.py create mode 100755 egs/librispeech/ASR/pruned_transducer_stateless/decode.py create mode 100644 egs/librispeech/ASR/pruned_transducer_stateless/decoder.py create mode 120000 egs/librispeech/ASR/pruned_transducer_stateless/encoder_interface.py create mode 100755 egs/librispeech/ASR/pruned_transducer_stateless/export.py create mode 100644 egs/librispeech/ASR/pruned_transducer_stateless/joiner.py create mode 100644 egs/librispeech/ASR/pruned_transducer_stateless/model.py create mode 100755 egs/librispeech/ASR/pruned_transducer_stateless/pretrained.py create mode 120000 egs/librispeech/ASR/pruned_transducer_stateless/subsampling.py create mode 100755 egs/librispeech/ASR/pruned_transducer_stateless/test_decoder.py create mode 100755 egs/librispeech/ASR/pruned_transducer_stateless/train.py create mode 120000 egs/librispeech/ASR/pruned_transducer_stateless/transformer.py diff --git a/.flake8 b/.flake8 index 19c3a9bd6..c56cd6fba 100644 --- a/.flake8 +++ b/.flake8 @@ -6,6 +6,8 @@ per-file-ignores = # line too long egs/librispeech/ASR/*/conformer.py: E501, egs/aishell/ASR/*/conformer.py: E501, + # invalid escape sequence (cause by tex formular), W605 + icefall/utils.py: E501, W605 exclude = .git, diff --git a/egs/librispeech/ASR/RESULTS.md b/egs/librispeech/ASR/RESULTS.md index d78447593..a62434184 100644 --- a/egs/librispeech/ASR/RESULTS.md +++ b/egs/librispeech/ASR/RESULTS.md @@ -1,5 +1,53 @@ ## Results +### LibriSpeech BPE training results (Pruned Transducer) + +#### Conformer encoder + embedding decoder + +Conformer encoder + non-current decoder. The decoder +contains only an embedding layer, a Conv1d (with kernel size 2) and a linear +layer (to transform tensor dim). + +The WERs are + +| | test-clean | test-other | comment | +|---------------------------|------------|------------|------------------------------------------| +| greedy search | 2.85 | 6.98 | --epoch 28, --avg 15, --max-duration 100 | + +The training command for reproducing is given below: + +``` +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +./pruned_transducer_stateless/train.py \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 0 \ + --exp-dir pruned_transducer_stateless/exp \ + --full-libri 1 \ + --max-duration 300 \ + --prune-range 5 \ + --lr-factor 5 \ + --lm-scale 0.25 \ +``` + +The tensorboard training log can be found at + + +The decoding command is: +``` +epoch=28 +avg=15 + +## greedy search +./pruned_transducer_stateless/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir pruned_transducer_stateless/exp \ + --max-duration 100 +``` + + ### LibriSpeech BPE training results (Transducer) #### Conformer encoder + embedding decoder diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/__init__.py b/egs/librispeech/ASR/pruned_transducer_stateless/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/asr_datamodule.py b/egs/librispeech/ASR/pruned_transducer_stateless/asr_datamodule.py new file mode 120000 index 000000000..07f39b451 --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless/asr_datamodule.py @@ -0,0 +1 @@ +../transducer/asr_datamodule.py \ No newline at end of file diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/beam_search.py b/egs/librispeech/ASR/pruned_transducer_stateless/beam_search.py new file mode 100644 index 000000000..3d4818509 --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless/beam_search.py @@ -0,0 +1,340 @@ +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Dict, List, Optional + +import numpy as np +import torch +from model import Transducer + + +def greedy_search( + model: Transducer, encoder_out: torch.Tensor, max_sym_per_frame: int +) -> List[int]: + """ + Args: + model: + An instance of `Transducer`. + encoder_out: + A tensor of shape (N, T, C) from the encoder. Support only N==1 for now. + max_sym_per_frame: + Maximum number of symbols per frame. If it is set to 0, the WER + would be 100%. + Returns: + Return the decoded result. + """ + assert encoder_out.ndim == 3 + + # support only batch_size == 1 for now + assert encoder_out.size(0) == 1, encoder_out.size(0) + + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + + device = model.device + + decoder_input = torch.tensor( + [blank_id] * context_size, device=device + ).reshape(1, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False) + + T = encoder_out.size(1) + t = 0 + hyp = [blank_id] * context_size + + # Maximum symbols per utterance. + max_sym_per_utt = 1000 + + # symbols per frame + sym_per_frame = 0 + + # symbols per utterance decoded so far + sym_per_utt = 0 + + while t < T and sym_per_utt < max_sym_per_utt: + if sym_per_frame >= max_sym_per_frame: + sym_per_frame = 0 + t += 1 + continue + + # fmt: off + current_encoder_out = encoder_out[:, t:t+1, :].unsqueeze(2) + # fmt: on + logits = model.joiner(current_encoder_out, decoder_out.unsqueeze(1)) + # logits is (1, 1, 1, vocab_size) + + y = logits.argmax().item() + if y != blank_id: + hyp.append(y) + decoder_input = torch.tensor( + [hyp[-context_size:]], device=device + ).reshape(1, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False) + + sym_per_utt += 1 + sym_per_frame += 1 + else: + sym_per_frame = 0 + t += 1 + hyp = hyp[context_size:] # remove blanks + + return hyp + + +@dataclass +class Hypothesis: + # The predicted tokens so far. + # Newly predicted tokens are appended to `ys`. + ys: List[int] + + # The log prob of ys + log_prob: float + + @property + def key(self) -> str: + """Return a string representation of self.ys""" + return "_".join(map(str, self.ys)) + + +class HypothesisList(object): + def __init__(self, data: Optional[Dict[str, Hypothesis]] = None): + """ + Args: + data: + A dict of Hypotheses. Its key is its `value.key`. + """ + if data is None: + self._data = {} + else: + self._data = data + + @property + def data(self): + return self._data + + def add(self, hyp: Hypothesis): + """Add a Hypothesis to `self`. + + If `hyp` already exists in `self`, its probability is updated using + `log-sum-exp` with the existed one. + + Args: + hyp: + The hypothesis to be added. + """ + key = hyp.key + if key in self: + old_hyp = self._data[key] + old_hyp.log_prob = np.logaddexp(old_hyp.log_prob, hyp.log_prob) + else: + self._data[key] = hyp + + def get_most_probable(self, length_norm: bool = False) -> Hypothesis: + """Get the most probable hypothesis, i.e., the one with + the largest `log_prob`. + + Args: + length_norm: + If True, the `log_prob` of a hypothesis is normalized by the + number of tokens in it. + + """ + if length_norm: + return max( + self._data.values(), key=lambda hyp: hyp.log_prob / len(hyp.ys) + ) + else: + return max(self._data.values(), key=lambda hyp: hyp.log_prob) + + def remove(self, hyp: Hypothesis) -> None: + """Remove a given hypothesis. + + Args: + hyp: + The hypothesis to be removed from `self`. + Note: It must be contained in `self`. Otherwise, + an exception is raised. + """ + key = hyp.key + assert key in self, f"{key} does not exist" + del self._data[key] + + def filter(self, threshold: float) -> "HypothesisList": + """Remove all Hypotheses whose log_prob is less than threshold. + + Caution: + `self` is not modified. Instead, a new HypothesisList is returned. + + Returns: + Return a new HypothesisList containing all hypotheses from `self` + that have `log_prob` being greater than the given `threshold`. + """ + ans = HypothesisList() + for key, hyp in self._data.items(): + if hyp.log_prob > threshold: + ans.add(hyp) # shallow copy + return ans + + def topk(self, k: int) -> "HypothesisList": + """Return the top-k hypothesis.""" + hyps = list(self._data.items()) + + hyps = sorted(hyps, key=lambda h: h[1].log_prob, reverse=True)[:k] + + ans = HypothesisList(dict(hyps)) + return ans + + def __contains__(self, key: str): + return key in self._data + + def __iter__(self): + return iter(self._data.values()) + + def __len__(self) -> int: + return len(self._data) + + def __str__(self) -> str: + s = [] + for key in self: + s.append(key) + return ", ".join(s) + + +def beam_search( + model: Transducer, + encoder_out: torch.Tensor, + beam: int = 4, +) -> List[int]: + """ + It implements Algorithm 1 in https://arxiv.org/pdf/1211.3711.pdf + + espnet/nets/beam_search_transducer.py#L247 is used as a reference. + + Args: + model: + An instance of `Transducer`. + encoder_out: + A tensor of shape (N, T, C) from the encoder. Support only N==1 for now. + beam: + Beam size. + Returns: + Return the decoded result. + """ + assert encoder_out.ndim == 3 + + # support only batch_size == 1 for now + assert encoder_out.size(0) == 1, encoder_out.size(0) + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + + device = model.device + + decoder_input = torch.tensor( + [blank_id] * context_size, device=device + ).reshape(1, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False) + + T = encoder_out.size(1) + t = 0 + + B = HypothesisList() + B.add(Hypothesis(ys=[blank_id] * context_size, log_prob=0.0)) + + max_sym_per_utt = 20000 + + sym_per_utt = 0 + + decoder_cache: Dict[str, torch.Tensor] = {} + + while t < T and sym_per_utt < max_sym_per_utt: + # fmt: off + current_encoder_out = encoder_out[:, t:t+1, :].unsqueeze(2) + # fmt: on + A = B + B = HypothesisList() + + joint_cache: Dict[str, torch.Tensor] = {} + + # TODO(fangjun): Implement prefix search to update the `log_prob` + # of hypotheses in A + + while True: + y_star = A.get_most_probable() + A.remove(y_star) + + cached_key = y_star.key + + if cached_key not in decoder_cache: + decoder_input = torch.tensor( + [y_star.ys[-context_size:]], device=device + ).reshape(1, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False) + decoder_cache[cached_key] = decoder_out + else: + decoder_out = decoder_cache[cached_key] + + cached_key += f"-t-{t}" + if cached_key not in joint_cache: + logits = model.joiner( + current_encoder_out, decoder_out.unsqueeze(1) + ) + + # TODO(fangjun): Cache the blank posterior + + log_prob = logits.log_softmax(dim=-1) + # log_prob is (1, 1, 1, vocab_size) + log_prob = log_prob.squeeze() + # Now log_prob is (vocab_size,) + joint_cache[cached_key] = log_prob + else: + log_prob = joint_cache[cached_key] + + # First, process the blank symbol + skip_log_prob = log_prob[blank_id] + new_y_star_log_prob = y_star.log_prob + skip_log_prob.item() + + # ys[:] returns a copy of ys + B.add(Hypothesis(ys=y_star.ys[:], log_prob=new_y_star_log_prob)) + + # Second, process other non-blank labels + values, indices = log_prob.topk(beam + 1) + for i, v in zip(indices.tolist(), values.tolist()): + if i == blank_id: + continue + new_ys = y_star.ys + [i] + new_log_prob = y_star.log_prob + v + A.add(Hypothesis(ys=new_ys, log_prob=new_log_prob)) + + # Check whether B contains more than "beam" elements more probable + # than the most probable in A + A_most_probable = A.get_most_probable() + + kept_B = B.filter(A_most_probable.log_prob) + + if len(kept_B) >= beam: + B = kept_B.topk(beam) + break + + t += 1 + + best_hyp = B.get_most_probable(length_norm=True) + ys = best_hyp.ys[context_size:] # [context_size:] to remove blanks + return ys diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/conformer.py b/egs/librispeech/ASR/pruned_transducer_stateless/conformer.py new file mode 120000 index 000000000..70a7ddf11 --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless/conformer.py @@ -0,0 +1 @@ +../transducer_stateless/conformer.py \ No newline at end of file diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/decode.py b/egs/librispeech/ASR/pruned_transducer_stateless/decode.py new file mode 100755 index 000000000..9479d57a8 --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless/decode.py @@ -0,0 +1,476 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +(1) greedy search +./pruned_transducer_stateless/decode.py \ + --epoch 28 \ + --avg 15 \ + --exp-dir ./pruned_transducer_stateless/exp \ + --max-duration 100 \ + --decoding-method greedy_search + +(2) beam search +./pruned_transducer_stateless/decode.py \ + --epoch 28 \ + --avg 15 \ + --exp-dir ./pruned_transducer_stateless/exp \ + --max-duration 100 \ + --decoding-method beam_search \ + --beam-size 4 +""" + + +import argparse +import logging +from collections import defaultdict +from pathlib import Path +from typing import Dict, List, Tuple + +import sentencepiece as spm +import torch +import torch.nn as nn +from asr_datamodule import LibriSpeechAsrDataModule +from beam_search import beam_search, greedy_search +from conformer import Conformer +from decoder import Decoder +from joiner import Joiner +from model import Transducer + +from icefall.checkpoint import average_checkpoints, load_checkpoint +from icefall.env import get_env_info +from icefall.utils import ( + AttributeDict, + setup_logger, + store_transcripts, + write_error_stats, +) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=28, + help="It specifies the checkpoint to use for decoding." + "Note: Epoch counts from 0.", + ) + parser.add_argument( + "--avg", + type=int, + default=15, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch'. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="pruned_transducer_stateless/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + """, + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="Used only when --decoding-method is beam_search", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=3, + help="Maximum number of symbols per frame", + ) + + return parser + + +def get_params() -> AttributeDict: + params = AttributeDict( + { + # parameters for conformer + "feature_dim": 80, + "subsampling_factor": 4, + "attention_dim": 512, + "nhead": 8, + "dim_feedforward": 2048, + "num_encoder_layers": 12, + "vgg_frontend": False, + # parameters for decoder + "embedding_dim": 512, + "env_info": get_env_info(), + } + ) + return params + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + # TODO: We can add an option to switch between Conformer and Transformer + encoder = Conformer( + num_features=params.feature_dim, + output_dim=params.vocab_size, + subsampling_factor=params.subsampling_factor, + d_model=params.attention_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + vgg_frontend=params.vgg_frontend, + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + embedding_dim=params.embedding_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + input_dim=params.vocab_size, + inner_dim=params.embedding_dim, + output_dim=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict) -> nn.Module: + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + ) + return model + + +def decode_one_batch( + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + batch: dict, +) -> Dict[str, List[List[str]]]: + """Decode one batch and return the result in a dict. The dict has the + following format: + + - key: It indicates the setting used for decoding. For example, + if greedy_search is used, it would be "greedy_search" + If beam search with a beam size of 7 is used, it would be + "beam_7" + - value: It contains the decoding result. `len(value)` equals to + batch size. `value[i]` is the decoding result for the i-th + utterance in the given batch. + Args: + params: + It's the return value of :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + batch: + It is the return value from iterating + `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation + for the format of the `batch`. + Returns: + Return the decoding result. See above description for the format of + the returned dict. + """ + device = model.device + feature = batch["inputs"] + assert feature.ndim == 3 + + feature = feature.to(device) + # at entry, feature is (N, T, C) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + encoder_out, encoder_out_lens = model.encoder( + x=feature, x_lens=feature_lens + ) + hyps = [] + batch_size = encoder_out.size(0) + + for i in range(batch_size): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + if params.decoding_method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.decoding_method == "beam_search": + hyp = beam_search( + model=model, encoder_out=encoder_out_i, beam=params.beam_size + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + hyps.append(sp.decode(hyp).split()) + + if params.decoding_method == "greedy_search": + return {"greedy_search": hyps} + else: + return {f"beam_{params.beam_size}": hyps} + + +def decode_dataset( + dl: torch.utils.data.DataLoader, + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, +) -> Dict[str, List[Tuple[List[str], List[str]]]]: + """Decode dataset. + + Args: + dl: + PyTorch's dataloader containing the dataset to decode. + params: + It is returned by :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + Returns: + Return a dict, whose key may be "greedy_search" if greedy search + is used, or it may be "beam_7" if beam size of 7 is used. + Its value is a list of tuples. Each tuple contains two elements: + The first is the reference transcript, and the second is the + predicted result. + """ + num_cuts = 0 + + try: + num_batches = len(dl) + except TypeError: + num_batches = "?" + + if params.decoding_method == "greedy_search": + log_interval = 100 + else: + log_interval = 2 + + results = defaultdict(list) + for batch_idx, batch in enumerate(dl): + texts = batch["supervisions"]["text"] + + hyps_dict = decode_one_batch( + params=params, + model=model, + sp=sp, + batch=batch, + ) + + for name, hyps in hyps_dict.items(): + this_batch = [] + assert len(hyps) == len(texts) + for hyp_words, ref_text in zip(hyps, texts): + ref_words = ref_text.split() + this_batch.append((ref_words, hyp_words)) + + results[name].extend(this_batch) + + num_cuts += len(texts) + + if batch_idx % log_interval == 0: + batch_str = f"{batch_idx}/{num_batches}" + + logging.info( + f"batch {batch_str}, cuts processed until now is {num_cuts}" + ) + return results + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[List[int], List[int]]]], +): + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = ( + params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + ) + store_transcripts(filename=recog_path, texts=results) + logging.info(f"The transcripts are stored in {recog_path}") + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = ( + params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_filename, "w") as f: + wer = write_error_stats( + f, f"{test_set_name}-{key}", results, enable_log=True + ) + test_set_wers[key] = wer + + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = ( + params.res_dir + / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_info, "w") as f: + print("settings\tWER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, WER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + LibriSpeechAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + assert params.decoding_method in ("greedy_search", "beam_search") + params.res_dir = params.exp_dir / params.decoding_method + + params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + if params.decoding_method == "beam_search": + params.suffix += f"-beam-{params.beam_size}" + else: + params.suffix += f"-context-{params.context_size}" + params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}" + + setup_logger(f"{params.res_dir}/log-decode-{params.suffix}") + logging.info("Decoding started") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + if params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if start >= 0: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + + model.to(device) + model.eval() + model.device = device + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + librispeech = LibriSpeechAsrDataModule(args) + + test_clean_cuts = librispeech.test_clean_cuts() + test_other_cuts = librispeech.test_other_cuts() + + test_clean_dl = librispeech.test_dataloaders(test_clean_cuts) + test_other_dl = librispeech.test_dataloaders(test_other_cuts) + + test_sets = ["test-clean", "test-other"] + test_dl = [test_clean_dl, test_other_dl] + + for test_set, test_dl in zip(test_sets, test_dl): + results_dict = decode_dataset( + dl=test_dl, + params=params, + model=model, + sp=sp, + ) + + save_results( + params=params, + test_set_name=test_set, + results_dict=results_dict, + ) + + logging.info("Done!") + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/decoder.py b/egs/librispeech/ASR/pruned_transducer_stateless/decoder.py new file mode 100644 index 000000000..3d4e69a4b --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless/decoder.py @@ -0,0 +1,100 @@ +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Decoder(nn.Module): + """This class modifies the stateless decoder from the following paper: + + RNN-transducer with stateless prediction network + https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9054419 + + It removes the recurrent connection from the decoder, i.e., the prediction + network. Different from the above paper, it adds an extra Conv1d + right after the embedding layer. + + TODO: Implement https://arxiv.org/pdf/2109.07513.pdf + """ + + def __init__( + self, + vocab_size: int, + embedding_dim: int, + blank_id: int, + context_size: int, + ): + """ + Args: + vocab_size: + Number of tokens of the modeling unit including blank. + embedding_dim: + Dimension of the input embedding. + blank_id: + The ID of the blank symbol. + context_size: + Number of previous words to use to predict the next word. + 1 means bigram; 2 means trigram. n means (n+1)-gram. + """ + super().__init__() + self.embedding = nn.Embedding( + num_embeddings=vocab_size, + embedding_dim=embedding_dim, + padding_idx=blank_id, + ) + self.blank_id = blank_id + + assert context_size >= 1, context_size + self.context_size = context_size + if context_size > 1: + self.conv = nn.Conv1d( + in_channels=embedding_dim, + out_channels=embedding_dim, + kernel_size=context_size, + padding=0, + groups=embedding_dim, + bias=False, + ) + self.output_linear = nn.Linear(embedding_dim, vocab_size) + + def forward(self, y: torch.Tensor, need_pad: bool = True) -> torch.Tensor: + """ + Args: + y: + A 2-D tensor of shape (N, U) with blank prepended. + need_pad: + True to left pad the input. Should be True during training. + False to not pad the input. Should be False during inference. + Returns: + Return a tensor of shape (N, U, embedding_dim). + """ + embedding_out = self.embedding(y) + if self.context_size > 1: + embedding_out = embedding_out.permute(0, 2, 1) + if need_pad is True: + embedding_out = F.pad( + embedding_out, pad=(self.context_size - 1, 0) + ) + else: + # During inference time, there is no need to do extra padding + # as we only need one output + assert embedding_out.size(-1) == self.context_size + embedding_out = self.conv(embedding_out) + embedding_out = embedding_out.permute(0, 2, 1) + embedding_out = self.output_linear(F.relu(embedding_out)) + return embedding_out diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/encoder_interface.py b/egs/librispeech/ASR/pruned_transducer_stateless/encoder_interface.py new file mode 120000 index 000000000..aa5d0217a --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless/encoder_interface.py @@ -0,0 +1 @@ +../transducer_stateless/encoder_interface.py \ No newline at end of file diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/export.py b/egs/librispeech/ASR/pruned_transducer_stateless/export.py new file mode 100755 index 000000000..94987c39a --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless/export.py @@ -0,0 +1,252 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script converts several saved checkpoints +# to a single one using model averaging. +""" +Usage: +./pruned_transducer_stateless/export.py \ + --exp-dir ./pruned_transducer_stateless/exp \ + --bpe-model data/lang_bpe_500/bpe.model \ + --epoch 20 \ + --avg 10 + +It will generate a file exp_dir/pretrained.pt + +To use the generated file with `pruned_transducer_stateless/decode.py`, +you can do: + + cd /path/to/exp_dir + ln -s pretrained.pt epoch-9999.pt + + cd /path/to/egs/librispeech/ASR + ./pruned_transducer_stateless/decode.py \ + --exp-dir ./pruned_transducer_stateless/exp \ + --epoch 9999 \ + --avg 1 \ + --max-duration 1 \ + --bpe-model data/lang_bpe_500/bpe.model +""" + +import argparse +import logging +from pathlib import Path + +import sentencepiece as spm +import torch +import torch.nn as nn +from conformer import Conformer +from decoder import Decoder +from joiner import Joiner +from model import Transducer + +from icefall.checkpoint import average_checkpoints, load_checkpoint +from icefall.env import get_env_info +from icefall.utils import AttributeDict, str2bool + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=28, + help="It specifies the checkpoint to use for decoding." + "Note: Epoch counts from 0.", + ) + + parser.add_argument( + "--avg", + type=int, + default=15, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch'. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="pruned_transducer_stateless/exp", + help="""It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--jit", + type=str2bool, + default=False, + help="""True to save a model after applying torch.jit.script. + """, + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + return parser + + +def get_params() -> AttributeDict: + params = AttributeDict( + { + # parameters for conformer + "feature_dim": 80, + "subsampling_factor": 4, + "attention_dim": 512, + "nhead": 8, + "dim_feedforward": 2048, + "num_encoder_layers": 12, + "vgg_frontend": False, + # parameters for decoder + "embedding_dim": 512, + "env_info": get_env_info(), + } + ) + return params + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + encoder = Conformer( + num_features=params.feature_dim, + output_dim=params.vocab_size, + subsampling_factor=params.subsampling_factor, + d_model=params.attention_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + vgg_frontend=params.vgg_frontend, + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + embedding_dim=params.embedding_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + input_dim=params.vocab_size, + inner_dim=params.embedding_dim, + output_dim=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict) -> nn.Module: + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + ) + return model + + +def main(): + args = get_parser().parse_args() + args.exp_dir = Path(args.exp_dir) + + assert args.jit is False, "Support torchscript will be added later" + + params = get_params() + params.update(vars(args)) + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + model.to(device) + + if params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if start >= 0: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + + model.eval() + + model.to("cpu") + model.eval() + + if params.jit: + logging.info("Using torch.jit.script") + model = torch.jit.script(model) + filename = params.exp_dir / "cpu_jit.pt" + model.save(str(filename)) + logging.info(f"Saved to {filename}") + else: + logging.info("Not using torch.jit.script") + # Save it using a format so that it can be loaded + # by :func:`load_checkpoint` + filename = params.exp_dir / "pretrained.pt" + torch.save({"model": model.state_dict()}, str(filename)) + logging.info(f"Saved to {filename}") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/joiner.py b/egs/librispeech/ASR/pruned_transducer_stateless/joiner.py new file mode 100644 index 000000000..7c5a93a86 --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless/joiner.py @@ -0,0 +1,50 @@ +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Joiner(nn.Module): + def __init__(self, input_dim: int, inner_dim: int, output_dim: int): + super().__init__() + + self.inner_linear = nn.Linear(input_dim, inner_dim) + self.output_linear = nn.Linear(inner_dim, output_dim) + + def forward( + self, encoder_out: torch.Tensor, decoder_out: torch.Tensor + ) -> torch.Tensor: + """ + Args: + encoder_out: + Output from the encoder. Its shape is (N, T, s_range, C). + decoder_out: + Output from the decoder. Its shape is (N, T, s_range, C). + Returns: + Return a tensor of shape (N, T, s_range, C). + """ + assert encoder_out.ndim == decoder_out.ndim == 4 + assert encoder_out.shape == decoder_out.shape + + logit = encoder_out + decoder_out + + logit = self.inner_linear(torch.tanh(logit)) + + output = self.output_linear(F.relu(logit)) + + return output diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/model.py b/egs/librispeech/ASR/pruned_transducer_stateless/model.py new file mode 100644 index 000000000..2f019bcdb --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless/model.py @@ -0,0 +1,169 @@ +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, Wei Kang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import k2 +import torch +import torch.nn as nn +from encoder_interface import EncoderInterface + +from icefall.utils import add_sos + + +class Transducer(nn.Module): + """It implements https://arxiv.org/pdf/1211.3711.pdf + "Sequence Transduction with Recurrent Neural Networks" + """ + + def __init__( + self, + encoder: EncoderInterface, + decoder: nn.Module, + joiner: nn.Module, + ): + """ + Args: + encoder: + It is the transcription network in the paper. Its accepts + two inputs: `x` of (N, T, C) and `x_lens` of shape (N,). + It returns two tensors: `logits` of shape (N, T, C) and + `logit_lens` of shape (N,). + decoder: + It is the prediction network in the paper. Its input shape + is (N, U) and its output shape is (N, U, C). It should contain + one attribute: `blank_id`. + joiner: + It has two inputs with shapes: (N, T, C) and (N, U, C). Its + output shape is (N, T, U, C). Note that its output contains + unnormalized probs, i.e., not processed by log-softmax. + """ + super().__init__() + assert isinstance(encoder, EncoderInterface), type(encoder) + assert hasattr(decoder, "blank_id") + + self.encoder = encoder + self.decoder = decoder + self.joiner = joiner + + def forward( + self, + x: torch.Tensor, + x_lens: torch.Tensor, + y: k2.RaggedTensor, + prune_range: int = 5, + am_scale: float = 0.0, + lm_scale: float = 0.0, + ) -> torch.Tensor: + """ + Args: + x: + A 3-D tensor of shape (N, T, C). + x_lens: + A 1-D tensor of shape (N,). It contains the number of frames in `x` + before padding. + y: + A ragged tensor with 2 axes [utt][label]. It contains labels of each + utterance. + prune_range: + The prune range for rnnt loss, it means how many symbols(context) + we are considering for each frame to compute the loss. + am_scale: + The scale to smooth the loss with am (output of encoder network) + part + lm_scale: + The scale to smooth the loss with lm (output of predictor network) + part + Returns: + Return the transducer loss. + + Note: + Regarding am_scale & lm_scale, it will make the loss-function one of + the form: + lm_scale * lm_probs + am_scale * am_probs + + (1-lm_scale-am_scale) * combined_probs + """ + assert x.ndim == 3, x.shape + assert x_lens.ndim == 1, x_lens.shape + assert y.num_axes == 2, y.num_axes + + assert x.size(0) == x_lens.size(0) == y.dim0 + + encoder_out, x_lens = self.encoder(x, x_lens) + assert torch.all(x_lens > 0) + + # Now for the decoder, i.e., the prediction network + row_splits = y.shape.row_splits(1) + y_lens = row_splits[1:] - row_splits[:-1] + + blank_id = self.decoder.blank_id + sos_y = add_sos(y, sos_id=blank_id) + + # sos_y_padded: [B, S + 1], start with SOS. + sos_y_padded = sos_y.pad(mode="constant", padding_value=blank_id) + + # decoder_out: [B, S + 1, C] + decoder_out = self.decoder(sos_y_padded) + + # Note: y does not start with SOS + # y_padded : [B, S] + y_padded = y.pad(mode="constant", padding_value=0) + + y_padded = y_padded.to(torch.int64) + boundary = torch.zeros( + (x.size(0), 4), dtype=torch.int64, device=x.device + ) + boundary[:, 2] = y_lens + boundary[:, 3] = x_lens + + simple_loss, (px_grad, py_grad) = k2.rnnt_loss_smoothed( + lm=decoder_out, + am=encoder_out, + symbols=y_padded, + termination_symbol=blank_id, + lm_only_scale=lm_scale, + am_only_scale=am_scale, + boundary=boundary, + reduction="sum", + return_grad=True, + ) + + # ranges : [B, T, prune_range] + ranges = k2.get_rnnt_prune_ranges( + px_grad=px_grad, + py_grad=py_grad, + boundary=boundary, + s_range=prune_range, + ) + + # am_pruned : [B, T, prune_range, C] + # lm_pruned : [B, T, prune_range, C] + am_pruned, lm_pruned = k2.do_rnnt_pruning( + am=encoder_out, lm=decoder_out, ranges=ranges + ) + + # logits : [B, T, prune_range, C] + logits = self.joiner(am_pruned, lm_pruned) + + pruned_loss = k2.rnnt_loss_pruned( + logits=logits, + symbols=y_padded, + ranges=ranges, + termination_symbol=blank_id, + boundary=boundary, + reduction="sum", + ) + + return (simple_loss, pruned_loss) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/pretrained.py b/egs/librispeech/ASR/pruned_transducer_stateless/pretrained.py new file mode 100755 index 000000000..73c5aee5c --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless/pretrained.py @@ -0,0 +1,326 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +(1) greedy search +./pruned_transducer_stateless/pretrained.py \ + --checkpoint ./pruned_transducer_stateless/exp/pretrained.pt \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --method greedy_search \ + /path/to/foo.wav \ + /path/to/bar.wav \ + +(1) beam search +./pruned_transducer_stateless/pretrained.py \ + --checkpoint ./pruned_transducer_stateless/exp/pretrained.pt \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --method beam_search \ + --beam-size 4 \ + /path/to/foo.wav \ + /path/to/bar.wav \ + +You can also use `./pruned_transducer_stateless/exp/epoch-xx.pt`. + +Note: ./pruned_transducer_stateless/exp/pretrained.pt is generated by +./pruned_transducer_stateless/export.py +""" + + +import argparse +import logging +import math +from typing import List + +import kaldifeat +import sentencepiece as spm +import torch +import torch.nn as nn +import torchaudio +from beam_search import beam_search, greedy_search +from conformer import Conformer +from decoder import Decoder +from joiner import Joiner +from model import Transducer +from torch.nn.utils.rnn import pad_sequence + +from icefall.env import get_env_info +from icefall.utils import AttributeDict + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--checkpoint", + type=str, + required=True, + help="Path to the checkpoint. " + "The checkpoint is assumed to be saved by " + "icefall.checkpoint.save_checkpoint().", + ) + + parser.add_argument( + "--bpe-model", + type=str, + help="""Path to bpe.model. + Used only when method is ctc-decoding. + """, + ) + + parser.add_argument( + "--method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + """, + ) + + parser.add_argument( + "sound_files", + type=str, + nargs="+", + help="The input sound file(s) to transcribe. " + "Supported formats are those supported by torchaudio.load(). " + "For example, wav and flac are supported. " + "The sample rate has to be 16kHz.", + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="Used only when --method is beam_search", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=3, + help="""Maximum number of symbols per frame. Used only when + --method is greedy_search. + """, + ) + + return parser + + +def get_params() -> AttributeDict: + params = AttributeDict( + { + "sample_rate": 16000, + # parameters for conformer + "feature_dim": 80, + "subsampling_factor": 4, + "attention_dim": 512, + "nhead": 8, + "dim_feedforward": 2048, + "num_encoder_layers": 12, + "vgg_frontend": False, + # parameters for decoder + "embedding_dim": 512, + "env_info": get_env_info(), + } + ) + return params + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + encoder = Conformer( + num_features=params.feature_dim, + output_dim=params.vocab_size, + subsampling_factor=params.subsampling_factor, + d_model=params.attention_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + vgg_frontend=params.vgg_frontend, + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + embedding_dim=params.embedding_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + input_dim=params.vocab_size, + inner_dim=params.embedding_dim, + output_dim=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict) -> nn.Module: + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + ) + return model + + +def read_sound_files( + filenames: List[str], expected_sample_rate: float +) -> List[torch.Tensor]: + """Read a list of sound files into a list 1-D float32 torch tensors. + Args: + filenames: + A list of sound filenames. + expected_sample_rate: + The expected sample rate of the sound files. + Returns: + Return a list of 1-D float32 torch tensors. + """ + ans = [] + for f in filenames: + wave, sample_rate = torchaudio.load(f) + assert sample_rate == expected_sample_rate, ( + f"expected sample rate: {expected_sample_rate}. " + f"Given: {sample_rate}" + ) + # We use only the first channel + ans.append(wave[0]) + return ans + + +def main(): + parser = get_parser() + args = parser.parse_args() + + params = get_params() + + params.update(vars(args)) + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(f"{params}") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + logging.info("Creating model") + model = get_transducer_model(params) + + checkpoint = torch.load(args.checkpoint, map_location="cpu") + model.load_state_dict(checkpoint["model"], strict=False) + model.to(device) + model.eval() + model.device = device + + logging.info("Constructing Fbank computer") + opts = kaldifeat.FbankOptions() + opts.device = device + opts.frame_opts.dither = 0 + opts.frame_opts.snip_edges = False + opts.frame_opts.samp_freq = params.sample_rate + opts.mel_opts.num_bins = params.feature_dim + + fbank = kaldifeat.Fbank(opts) + + logging.info(f"Reading sound files: {params.sound_files}") + waves = read_sound_files( + filenames=params.sound_files, expected_sample_rate=params.sample_rate + ) + waves = [w.to(device) for w in waves] + + logging.info("Decoding started") + features = fbank(waves) + feature_lengths = [f.size(0) for f in features] + + features = pad_sequence( + features, batch_first=True, padding_value=math.log(1e-10) + ) + + feature_lengths = torch.tensor(feature_lengths, device=device) + + with torch.no_grad(): + encoder_out, encoder_out_lens = model.encoder( + x=features, x_lens=feature_lengths + ) + + num_waves = encoder_out.size(0) + hyps = [] + msg = f"Using {params.method}" + if params.method == "beam_search": + msg += f" with beam size {params.beam_size}" + logging.info(msg) + for i in range(num_waves): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + if params.method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.method == "beam_search": + hyp = beam_search( + model=model, encoder_out=encoder_out_i, beam=params.beam_size + ) + else: + raise ValueError(f"Unsupported method: {params.method}") + + hyps.append(sp.decode(hyp).split()) + + s = "\n" + for filename, hyp in zip(params.sound_files, hyps): + words = " ".join(hyp) + s += f"{filename}:\n{words}\n\n" + logging.info(s) + + logging.info("Decoding Done") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/subsampling.py b/egs/librispeech/ASR/pruned_transducer_stateless/subsampling.py new file mode 120000 index 000000000..73068da26 --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless/subsampling.py @@ -0,0 +1 @@ +../transducer/subsampling.py \ No newline at end of file diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/test_decoder.py b/egs/librispeech/ASR/pruned_transducer_stateless/test_decoder.py new file mode 100755 index 000000000..937d55c2a --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless/test_decoder.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, Wei Kang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +To run this file, do: + + cd icefall/egs/librispeech/ASR + python ./pruned_transducer_stateless/test_decoder.py +""" + +import torch +from decoder import Decoder + + +def test_decoder(): + vocab_size = 3 + blank_id = 0 + embedding_dim = 128 + context_size = 4 + + decoder = Decoder( + vocab_size=vocab_size, + embedding_dim=embedding_dim, + blank_id=blank_id, + context_size=context_size, + ) + N = 100 + U = 20 + x = torch.randint(low=0, high=vocab_size, size=(N, U)) + y = decoder(x) + assert y.shape == (N, U, vocab_size) + + # for inference + x = torch.randint(low=0, high=vocab_size, size=(N, context_size)) + y = decoder(x, need_pad=False) + assert y.shape == (N, 1, vocab_size) + + +def main(): + test_decoder() + + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/train.py b/egs/librispeech/ASR/pruned_transducer_stateless/train.py new file mode 100755 index 000000000..e19473788 --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless/train.py @@ -0,0 +1,830 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, +# Wei Kang +# Mingshuang Luo) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +./pruned_transducer_stateless/train.py \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 0 \ + --exp-dir pruned_transducer_stateless/exp \ + --full-libri 1 \ + --max-duration 300 +""" + + +import argparse +import logging +from pathlib import Path +from shutil import copyfile +from typing import Optional, Tuple + +import k2 +import sentencepiece as spm +import torch +import torch.multiprocessing as mp +import torch.nn as nn +from asr_datamodule import LibriSpeechAsrDataModule +from conformer import Conformer +from decoder import Decoder +from joiner import Joiner +from lhotse.cut import Cut +from lhotse.utils import fix_random_seed +from model import Transducer +from torch import Tensor +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.nn.utils import clip_grad_norm_ +from torch.utils.tensorboard import SummaryWriter +from transformer import Noam + +from icefall.checkpoint import load_checkpoint +from icefall.checkpoint import save_checkpoint as save_checkpoint_impl +from icefall.dist import cleanup_dist, setup_dist +from icefall.env import get_env_info +from icefall.utils import ( + AttributeDict, + MetricsTracker, + measure_gradient_norms, + measure_weight_norms, + optim_step_and_measure_param_change, + setup_logger, + str2bool, +) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--world-size", + type=int, + default=1, + help="Number of GPUs for DDP training.", + ) + + parser.add_argument( + "--master-port", + type=int, + default=12354, + help="Master port to use for DDP training.", + ) + + parser.add_argument( + "--tensorboard", + type=str2bool, + default=True, + help="Should various information be logged in tensorboard.", + ) + + parser.add_argument( + "--num-epochs", + type=int, + default=30, + help="Number of epochs to train.", + ) + + parser.add_argument( + "--start-epoch", + type=int, + default=0, + help="""Resume training from from this epoch. + If it is positive, it will load checkpoint from + transducer_stateless/exp/epoch-{start_epoch-1}.pt + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="pruned_transducer_stateless/exp", + help="""The experiment dir. + It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--lr-factor", + type=float, + default=5.0, + help="The lr_factor for Noam optimizer", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + parser.add_argument( + "--prune-range", + type=int, + default=5, + help="The prune range for rnnt loss, it means how many symbols(context)" + "we are using to compute the loss", + ) + + parser.add_argument( + "--lm-scale", + type=float, + default=0.25, + help="The scale to smooth the loss with lm " + "(output of prediction network) part.", + ) + + parser.add_argument( + "--am-scale", + type=float, + default=0.0, + help="The scale to smooth the loss with am (output of encoder network)" + "part.", + ) + + parser.add_argument( + "--simple-loss-scale", + type=float, + default=0.5, + help="To get pruning ranges, we will calculate a simple version" + "loss(joiner is just addition), this simple loss also uses for" + "training (as a regularization item). We will scale the simple loss" + "with this parameter before adding to the final loss.", + ) + + return parser + + +def get_params() -> AttributeDict: + """Return a dict containing training parameters. + + All training related parameters that are not passed from the commandline + are saved in the variable `params`. + + Commandline options are merged into `params` after they are parsed, so + you can also access them via `params`. + + Explanation of options saved in `params`: + + - best_train_loss: Best training loss so far. It is used to select + the model that has the lowest training loss. It is + updated during the training. + + - best_valid_loss: Best validation loss so far. It is used to select + the model that has the lowest validation loss. It is + updated during the training. + + - best_train_epoch: It is the epoch that has the best training loss. + + - best_valid_epoch: It is the epoch that has the best validation loss. + + - batch_idx_train: Used to writing statistics to tensorboard. It + contains number of batches trained so far across + epochs. + + - log_interval: Print training loss if batch_idx % log_interval` is 0 + + - reset_interval: Reset statistics if batch_idx % reset_interval is 0 + + - valid_interval: Run validation if batch_idx % valid_interval is 0 + + - feature_dim: The model input dim. It has to match the one used + in computing features. + + - subsampling_factor: The subsampling factor for the model. + + - attention_dim: Hidden dim for multi-head attention model. + + - num_decoder_layers: Number of decoder layer of transformer decoder. + + - warm_step: The warm_step for Noam optimizer. + """ + params = AttributeDict( + { + "best_train_loss": float("inf"), + "best_valid_loss": float("inf"), + "best_train_epoch": -1, + "best_valid_epoch": -1, + "batch_idx_train": 0, + "log_interval": 50, + "reset_interval": 200, + "valid_interval": 3000, # For the 100h subset, use 800 + "log_diagnostics": False, + # parameters for conformer + "feature_dim": 80, + "subsampling_factor": 4, + "attention_dim": 512, + "nhead": 8, + "dim_feedforward": 2048, + "num_encoder_layers": 12, + "vgg_frontend": False, + # parameters for decoder + "embedding_dim": 512, + # parameters for Noam + "warm_step": 80000, # For the 100h subset, use 30000 + "env_info": get_env_info(), + } + ) + + return params + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + # TODO: We can add an option to switch between Conformer and Transformer + encoder = Conformer( + num_features=params.feature_dim, + output_dim=params.vocab_size, + subsampling_factor=params.subsampling_factor, + d_model=params.attention_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + vgg_frontend=params.vgg_frontend, + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + embedding_dim=params.embedding_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + input_dim=params.vocab_size, + inner_dim=params.embedding_dim, + output_dim=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict) -> nn.Module: + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + ) + return model + + +def load_checkpoint_if_available( + params: AttributeDict, + model: nn.Module, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None, +) -> None: + """Load checkpoint from file. + + If params.start_epoch is positive, it will load the checkpoint from + `params.start_epoch - 1`. Otherwise, this function does nothing. + + Apart from loading state dict for `model`, `optimizer` and `scheduler`, + it also updates `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, + and `best_valid_loss` in `params`. + + Args: + params: + The return value of :func:`get_params`. + model: + The training model. + optimizer: + The optimizer that we are using. + scheduler: + The learning rate scheduler we are using. + Returns: + Return None. + """ + if params.start_epoch <= 0: + return + + filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" + saved_params = load_checkpoint( + filename, + model=model, + optimizer=optimizer, + scheduler=scheduler, + ) + + keys = [ + "best_train_epoch", + "best_valid_epoch", + "batch_idx_train", + "best_train_loss", + "best_valid_loss", + ] + for k in keys: + params[k] = saved_params[k] + + return saved_params + + +def save_checkpoint( + params: AttributeDict, + model: nn.Module, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None, + rank: int = 0, +) -> None: + """Save model, optimizer, scheduler and training stats to file. + + Args: + params: + It is returned by :func:`get_params`. + model: + The training model. + """ + if rank != 0: + return + filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" + save_checkpoint_impl( + filename=filename, + model=model, + params=params, + optimizer=optimizer, + scheduler=scheduler, + rank=rank, + ) + + if params.best_train_epoch == params.cur_epoch: + best_train_filename = params.exp_dir / "best-train-loss.pt" + copyfile(src=filename, dst=best_train_filename) + + if params.best_valid_epoch == params.cur_epoch: + best_valid_filename = params.exp_dir / "best-valid-loss.pt" + copyfile(src=filename, dst=best_valid_filename) + + +def compute_loss( + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + batch: dict, + is_training: bool, +) -> Tuple[Tensor, MetricsTracker]: + """ + Compute CTC loss given the model and its inputs. + + Args: + params: + Parameters for training. See :func:`get_params`. + model: + The model for training. It is an instance of Conformer in our case. + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + is_training: + True for training. False for validation. When it is True, this + function enables autograd during computation; when it is False, it + disables autograd. + """ + device = model.device + feature = batch["inputs"] + # at entry, feature is (N, T, C) + assert feature.ndim == 3 + feature = feature.to(device) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + texts = batch["supervisions"]["text"] + y = sp.encode(texts, out_type=int) + y = k2.RaggedTensor(y).to(device) + + with torch.set_grad_enabled(is_training): + simple_loss, pruned_loss = model( + x=feature, + x_lens=feature_lens, + y=y, + prune_range=params.prune_range, + am_scale=params.am_scale, + lm_scale=params.lm_scale, + ) + loss = params.simple_loss_scale * simple_loss + pruned_loss + + assert loss.requires_grad == is_training + + info = MetricsTracker() + info["frames"] = (feature_lens // params.subsampling_factor).sum().item() + + # Note: We use reduction=sum while computing the loss. + info["loss"] = loss.detach().cpu().item() + info["simple_loss"] = simple_loss.detach().cpu().item() + info["pruned_loss"] = pruned_loss.detach().cpu().item() + + return loss, info + + +def compute_validation_loss( + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + valid_dl: torch.utils.data.DataLoader, + world_size: int = 1, +) -> MetricsTracker: + """Run the validation process.""" + model.eval() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(valid_dl): + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=False, + ) + assert loss.requires_grad is False + tot_loss = tot_loss + loss_info + + if world_size > 1: + tot_loss.reduce(loss.device) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + if loss_value < params.best_valid_loss: + params.best_valid_epoch = params.cur_epoch + params.best_valid_loss = loss_value + + return tot_loss + + +def train_one_epoch( + params: AttributeDict, + model: nn.Module, + optimizer: torch.optim.Optimizer, + sp: spm.SentencePieceProcessor, + train_dl: torch.utils.data.DataLoader, + valid_dl: torch.utils.data.DataLoader, + tb_writer: Optional[SummaryWriter] = None, + world_size: int = 1, +) -> None: + """Train the model for one epoch. + + The training loss from the mean of all frames is saved in + `params.train_loss`. It runs the validation process every + `params.valid_interval` batches. + + Args: + params: + It is returned by :func:`get_params`. + model: + The model for training. + optimizer: + The optimizer we are using. + train_dl: + Dataloader for the training dataset. + valid_dl: + Dataloader for the validation dataset. + tb_writer: + Writer to write log messages to tensorboard. + world_size: + Number of nodes in DDP training. If it is 1, DDP is disabled. + """ + model.train() + + tot_loss = MetricsTracker() + + def maybe_log_gradients(tag: str): + if ( + params.log_diagnostics + and tb_writer is not None + and params.batch_idx_train % (params.log_interval * 5) == 0 + ): + tb_writer.add_scalars( + tag, + measure_gradient_norms(model, norm="l2"), + global_step=params.batch_idx_train, + ) + + def maybe_log_weights(tag: str): + if ( + params.log_diagnostics + and tb_writer is not None + and params.batch_idx_train % (params.log_interval * 5) == 0 + ): + tb_writer.add_scalars( + tag, + measure_weight_norms(model, norm="l2"), + global_step=params.batch_idx_train, + ) + + def maybe_log_param_relative_changes(): + if ( + params.log_diagnostics + and tb_writer is not None + and params.batch_idx_train % (params.log_interval * 5) == 0 + ): + deltas = optim_step_and_measure_param_change(model, optimizer) + tb_writer.add_scalars( + "train/relative_param_change_per_minibatch", + deltas, + global_step=params.batch_idx_train, + ) + else: + optimizer.step() + + for batch_idx, batch in enumerate(train_dl): + params.batch_idx_train += 1 + batch_size = len(batch["supervisions"]["text"]) + + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=True, + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + + loss.backward() + + maybe_log_weights("train/param_norms") + maybe_log_gradients("train/grad_norms") + maybe_log_param_relative_changes() + + optimizer.zero_grad() + + if batch_idx % params.log_interval == 0: + logging.info( + f"Epoch {params.cur_epoch}, " + f"batch {batch_idx}, loss[{loss_info}], " + f"tot_loss[{tot_loss}], batch size: {batch_size}" + ) + + if batch_idx % params.log_interval == 0: + + if tb_writer is not None: + loss_info.write_summary( + tb_writer, "train/current_", params.batch_idx_train + ) + tot_loss.write_summary( + tb_writer, "train/tot_", params.batch_idx_train + ) + + if batch_idx > 0 and batch_idx % params.valid_interval == 0: + logging.info("Computing validation loss") + valid_info = compute_validation_loss( + params=params, + model=model, + sp=sp, + valid_dl=valid_dl, + world_size=world_size, + ) + model.train() + logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") + if tb_writer is not None: + valid_info.write_summary( + tb_writer, "train/valid_", params.batch_idx_train + ) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + params.train_loss = loss_value + if params.train_loss < params.best_train_loss: + params.best_train_epoch = params.cur_epoch + params.best_train_loss = params.train_loss + + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + if params.full_libri is False: + params.valid_interval = 800 + params.warm_step = 30000 + + fix_random_seed(42) + if world_size > 1: + setup_dist(rank, world_size, params.master_port) + + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info("Training started") + + if args.tensorboard and rank == 0: + tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") + else: + tb_writer = None + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", rank) + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + checkpoints = load_checkpoint_if_available(params=params, model=model) + + model.to(device) + if world_size > 1: + logging.info("Using DDP") + model = DDP(model, device_ids=[rank]) + model.device = device + + optimizer = Noam( + model.parameters(), + model_size=params.attention_dim, + factor=params.lr_factor, + warm_step=params.warm_step, + ) + + if checkpoints and "optimizer" in checkpoints: + logging.info("Loading optimizer state dict") + optimizer.load_state_dict(checkpoints["optimizer"]) + + librispeech = LibriSpeechAsrDataModule(args) + + train_cuts = librispeech.train_clean_100_cuts() + if params.full_libri: + train_cuts += librispeech.train_clean_360_cuts() + train_cuts += librispeech.train_other_500_cuts() + + def remove_short_and_long_utt(c: Cut): + # Keep only utterances with duration between 1 second and 20 seconds + return 1.0 <= c.duration <= 20.0 + + num_in_total = len(train_cuts) + + train_cuts = train_cuts.filter(remove_short_and_long_utt) + + num_left = len(train_cuts) + num_removed = num_in_total - num_left + removed_percent = num_removed / num_in_total * 100 + + logging.info(f"Before removing short and long utterances: {num_in_total}") + logging.info(f"After removing short and long utterances: {num_left}") + logging.info(f"Removed {num_removed} utterances ({removed_percent:.5f}%)") + + train_dl = librispeech.train_dataloaders(train_cuts) + + valid_cuts = librispeech.dev_clean_cuts() + valid_cuts += librispeech.dev_other_cuts() + valid_dl = librispeech.valid_dataloaders(valid_cuts) + + scan_pessimistic_batches_for_oom( + model=model, + train_dl=train_dl, + optimizer=optimizer, + sp=sp, + params=params, + ) + + for epoch in range(params.start_epoch, params.num_epochs): + train_dl.sampler.set_epoch(epoch) + + cur_lr = optimizer._rate + if tb_writer is not None: + tb_writer.add_scalar( + "train/learning_rate", cur_lr, params.batch_idx_train + ) + tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) + + if rank == 0: + logging.info("epoch {}, learning rate {}".format(epoch, cur_lr)) + + params.cur_epoch = epoch + + train_one_epoch( + params=params, + model=model, + optimizer=optimizer, + sp=sp, + train_dl=train_dl, + valid_dl=valid_dl, + tb_writer=tb_writer, + world_size=world_size, + ) + + save_checkpoint( + params=params, + model=model, + optimizer=optimizer, + rank=rank, + ) + + logging.info("Done!") + + if world_size > 1: + torch.distributed.barrier() + cleanup_dist() + + +def scan_pessimistic_batches_for_oom( + model: nn.Module, + train_dl: torch.utils.data.DataLoader, + optimizer: torch.optim.Optimizer, + sp: spm.SentencePieceProcessor, + params: AttributeDict, +): + from lhotse.dataset import find_pessimistic_batches + + logging.info( + "Sanity check -- see if any of the batches in epoch 0 would cause OOM." + ) + batches, crit_values = find_pessimistic_batches(train_dl.sampler) + for criterion, cuts in batches.items(): + batch = train_dl.dataset[cuts] + try: + optimizer.zero_grad() + loss, _ = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=True, + ) + loss.backward() + clip_grad_norm_(model.parameters(), 5.0, 2.0) + optimizer.step() + except RuntimeError as e: + if "CUDA out of memory" in str(e): + logging.error( + "Your GPU ran out of memory with the current " + "max_duration setting. We recommend decreasing " + "max_duration and trying again.\n" + f"Failing criterion: {criterion} " + f"(={crit_values[criterion]}) ..." + ) + raise + + +def main(): + parser = get_parser() + LibriSpeechAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + world_size = args.world_size + assert world_size >= 1 + if world_size > 1: + mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) + else: + run(rank=0, world_size=1, args=args) + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/transformer.py b/egs/librispeech/ASR/pruned_transducer_stateless/transformer.py new file mode 120000 index 000000000..e43f520f9 --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless/transformer.py @@ -0,0 +1 @@ +../transducer_stateless/transformer.py \ No newline at end of file diff --git a/icefall/utils.py b/icefall/utils.py index 6d4578de6..c231dbbe4 100644 --- a/icefall/utils.py +++ b/icefall/utils.py @@ -25,13 +25,15 @@ from collections import defaultdict from contextlib import contextmanager from datetime import datetime from pathlib import Path -from typing import Dict, Iterable, List, TextIO, Tuple, Union +from typing import Dict, Iterable, List, TextIO, Optional, Tuple, Union import k2 import k2.version import kaldialign import torch +import torch.nn as nn import torch.distributed as dist +from torch.cuda.amp import GradScaler from torch.utils.tensorboard import SummaryWriter Pathlike = Union[str, Path] @@ -690,3 +692,94 @@ def make_pad_mask(lengths: torch.Tensor) -> torch.Tensor: expaned_lengths = torch.arange(max_len).expand(n, max_len).to(lengths) return expaned_lengths >= lengths.unsqueeze(1) + + +def l1_norm(x): + return torch.sum(torch.abs(x)) + + +def l2_norm(x): + return torch.sum(torch.pow(x, 2)) + + +def linf_norm(x): + return torch.max(torch.abs(x)) + + +def measure_weight_norms( + model: nn.Module, norm: str = "l2" +) -> Dict[str, float]: + """ + Compute the norms of the model's parameters. + + :param model: a torch.nn.Module instance + :param norm: how to compute the norm. Available values: 'l1', 'l2', 'linf' + :return: a dict mapping from parameter's name to its norm. + """ + with torch.no_grad(): + norms = {} + for name, param in model.named_parameters(): + if norm == "l1": + val = l1_norm(param) + elif norm == "l2": + val = l2_norm(param) + elif norm == "linf": + val = linf_norm(param) + else: + raise ValueError(f"Unknown norm type: {norm}") + norms[name] = val.item() + return norms + + +def measure_gradient_norms( + model: nn.Module, norm: str = "l1" +) -> Dict[str, float]: + """ + Compute the norms of the gradients for each of model's parameters. + + :param model: a torch.nn.Module instance + :param norm: how to compute the norm. Available values: 'l1', 'l2', 'linf' + :return: a dict mapping from parameter's name to its gradient's norm. + """ + with torch.no_grad(): + norms = {} + for name, param in model.named_parameters(): + if norm == "l1": + val = l1_norm(param.grad) + elif norm == "l2": + val = l2_norm(param.grad) + elif norm == "linf": + val = linf_norm(param.grad) + else: + raise ValueError(f"Unknown norm type: {norm}") + norms[name] = val.item() + return norms + + +def optim_step_and_measure_param_change( + model: nn.Module, + optimizer: torch.optim.Optimizer, + scaler: Optional[GradScaler] = None, +) -> Dict[str, float]: + """ + Perform model weight update and measure the "relative change in parameters per minibatch." + It is understood as a ratio between the L2 norm of the difference between original and updates parameters, + and the L2 norm of the original parameter. It is given by the formula: + + .. math:: + \begin{aligned} + \delta = \frac{\Vert\theta - \theta_{new}\Vert^2}{\Vert\theta\Vert^2} + \end{aligned} + """ + param_copy = {n: p.detach().clone() for n, p in model.named_parameters()} + if scaler: + scaler.step(optimizer) + else: + optimizer.step() + relative_change = {} + with torch.no_grad(): + for n, p_new in model.named_parameters(): + p_orig = param_copy[n] + delta = l2_norm(p_orig - p_new) / l2_norm(p_orig) + relative_change[n] = delta.item() + return relative_change From 827b9df51ac81732d6d84075801c77ac07ffba5a Mon Sep 17 00:00:00 2001 From: Duo Ma <39255927+shanguanma@users.noreply.github.com> Date: Sat, 19 Feb 2022 15:56:04 +0800 Subject: [PATCH 07/25] Updated Aishell-1 transducer-stateless result (#217) * Update RESULTS.md * Update RESULTS.md --- egs/aishell/ASR/RESULTS.md | 44 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/egs/aishell/ASR/RESULTS.md b/egs/aishell/ASR/RESULTS.md index dd27e1f35..61f7e500e 100644 --- a/egs/aishell/ASR/RESULTS.md +++ b/egs/aishell/ASR/RESULTS.md @@ -1,5 +1,49 @@ ## Results +### Aishell training result(Transducer-stateless) +#### 2022-2-19 +(Duo Ma): The tensorboard log for training is available at https://tensorboard.dev/experiment/25PmX3MxSVGTdvIdhOwllw/#scalars +You can find a pretrained model by visiting https://huggingface.co/shuanguanma/icefall_aishell_transducer_stateless_context_size2_epoch60_2022_2_19 +| | test |comment | +|---------------------------|------|-----------------------------------------| +| greedy search | 5.4 |--epoch 59, --avg 10, --max-duration 100 | +| beam search | 5.05|--epoch 59, --avg 10, --max-duration 100 | +You can use the following commands to reproduce our results: +``` + export CUDA_VISIBLE_DEVICES="0,1,2,3" + python3 ./transducer_stateless/train.py \ + --world-size 4 \ + --num-epochs 60 \ + --start-epoch 0 \ + --exp-dir exp/transducer_stateless_context_size2 \ + --max-duration 100 \ + --lr-factor 2.5\ + --context-size 2 + + lang_dir=data/lang_char + dir=exp/transducer_stateless_context_size2 + python3 ./transducer_stateless/decode.py\ + --epoch 59\ + --avg 10\ + --exp-dir $dir \ + --lang-dir $lang_dir\ + --decoding-method greedy_search\ + --context-size 2\ + --max-sym-per-frame 3 + lang_dir=data/lang_char + dir=exp/transducer_stateless_context_size2 + python3 ./transducer_stateless/decode.py\ + --epoch 59\ + --avg 10\ + --exp-dir $dir \ + --lang-dir $lang_dir\ + --decoding-method beam_search\ + --context-size 2\ + --max-sym-per-frame 3 + ``` + + + ### Aishell training results (Transducer-stateless) #### 2021-12-29 (Pingfeng Luo) : The tensorboard log for training is available at From 277cc3f9bf07d58a3f0b833ed74bca3fde49bf1a Mon Sep 17 00:00:00 2001 From: PF Luo Date: Sat, 19 Feb 2022 15:56:39 +0800 Subject: [PATCH 08/25] update aishell-1 recipe with k2.rnnt_loss (#215) * update aishell-1 recipe with k2.rnnt_loss * fix flak8 style * typo * add pretrained model link to result.md --- README.md | 2 +- egs/aishell/ASR/RESULTS.md | 7 ++++--- egs/aishell/ASR/transducer_stateless/decode.py | 8 ++------ egs/aishell/ASR/transducer_stateless/model.py | 18 +++++++++--------- egs/aishell/ASR/transducer_stateless/train.py | 3 ++- 5 files changed, 18 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index 28c9b6ce4..214e85ad0 100644 --- a/README.md +++ b/README.md @@ -113,7 +113,7 @@ The best CER we currently have is: | | test | |-----|------| -| CER | 5.7 | +| CER | 5.4 | We provide a Colab notebook to run a pre-trained TransducerStateless model: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/14XaT2MhnBkK-3_RqqWq3K90Xlbin-GZC?usp=sharing) diff --git a/egs/aishell/ASR/RESULTS.md b/egs/aishell/ASR/RESULTS.md index 61f7e500e..688e0f60c 100644 --- a/egs/aishell/ASR/RESULTS.md +++ b/egs/aishell/ASR/RESULTS.md @@ -45,12 +45,13 @@ You can use the following commands to reproduce our results: ### Aishell training results (Transducer-stateless) -#### 2021-12-29 -(Pingfeng Luo) : The tensorboard log for training is available at +#### 2022-02-18 +(Pingfeng Luo) : The tensorboard log for training is available at +And pretrained model is available at ||test| |--|--| -|CER| 5.7% | +|CER| 5.4% | You can use the following commands to reproduce our results: diff --git a/egs/aishell/ASR/transducer_stateless/decode.py b/egs/aishell/ASR/transducer_stateless/decode.py index f27e4cdcf..a7b030fa5 100755 --- a/egs/aishell/ASR/transducer_stateless/decode.py +++ b/egs/aishell/ASR/transducer_stateless/decode.py @@ -31,7 +31,6 @@ from decoder import Decoder from joiner import Joiner from model import Transducer -from icefall.char_graph_compiler import CharCtcTrainingGraphCompiler from icefall.checkpoint import average_checkpoints, load_checkpoint from icefall.env import get_env_info from icefall.lexicon import Lexicon @@ -403,12 +402,9 @@ def main(): logging.info(f"Device: {device}") lexicon = Lexicon(params.lang_dir) - graph_compiler = CharCtcTrainingGraphCompiler( - lexicon=lexicon, - device=device, - ) - params.blank_id = graph_compiler.texts_to_ids("")[0][0] + # params.blank_id = graph_compiler.texts_to_ids("")[0][0] + params.blank_id = 0 params.vocab_size = max(lexicon.tokens) + 1 logging.info(params) diff --git a/egs/aishell/ASR/transducer_stateless/model.py b/egs/aishell/ASR/transducer_stateless/model.py index 2f0f9a183..0322edeed 100644 --- a/egs/aishell/ASR/transducer_stateless/model.py +++ b/egs/aishell/ASR/transducer_stateless/model.py @@ -108,18 +108,18 @@ class Transducer(nn.Module): # Note: y does not start with SOS y_padded = y.pad(mode="constant", padding_value=0) + y_padded = y_padded.to(torch.int64) + boundary = torch.zeros( + (x.size(0), 4), dtype=torch.int64, device=x.device + ) + boundary[:, 2] = y_lens + boundary[:, 3] = x_lens + assert hasattr(torchaudio.functional, "rnnt_loss"), ( f"Current torchaudio version: {torchaudio.__version__}\n" "Please install a version >= 0.10.0" ) - loss = torchaudio.functional.rnnt_loss( - logits=logits, - targets=y_padded, - logit_lengths=x_lens, - target_lengths=y_lens, - blank=blank_id, - reduction="sum", - ) + loss = k2.rnnt_loss(logits, y_padded, blank_id, boundary) - return loss + return torch.sum(loss) diff --git a/egs/aishell/ASR/transducer_stateless/train.py b/egs/aishell/ASR/transducer_stateless/train.py index 0c180b260..b562f9dd4 100755 --- a/egs/aishell/ASR/transducer_stateless/train.py +++ b/egs/aishell/ASR/transducer_stateless/train.py @@ -558,7 +558,8 @@ def run(rank, world_size, args): oov="", ) - params.blank_id = graph_compiler.texts_to_ids("")[0][0] + # params.blank_id = graph_compiler.texts_to_ids("")[0][0] + params.blank_id = 0 params.vocab_size = max(lexicon.tokens) + 1 logging.info(params) From cbf8c18ebd274dfeea9b8aa224ff5faad713c28c Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Sat, 19 Feb 2022 22:28:19 +0800 Subject: [PATCH 09/25] Minor fixes for aishell (#218) * Minor fixes to aishell. * Minor fixes. --- egs/aishell/ASR/RESULTS.md | 74 +++++------ .../ASR/local/display_manifest_statistics.py | 118 ++++++++++++++++++ egs/aishell/ASR/prepare.sh | 36 ++++-- egs/aishell/ASR/transducer_stateless/train.py | 1 - icefall/char_graph_compiler.py | 2 +- 5 files changed, 181 insertions(+), 50 deletions(-) create mode 100755 egs/aishell/ASR/local/display_manifest_statistics.py diff --git a/egs/aishell/ASR/RESULTS.md b/egs/aishell/ASR/RESULTS.md index 688e0f60c..ceb63b4cf 100644 --- a/egs/aishell/ASR/RESULTS.md +++ b/egs/aishell/ASR/RESULTS.md @@ -1,49 +1,49 @@ ## Results -### Aishell training result(Transducer-stateless) +### Aishell training result(Transducer-stateless) #### 2022-2-19 -(Duo Ma): The tensorboard log for training is available at https://tensorboard.dev/experiment/25PmX3MxSVGTdvIdhOwllw/#scalars +(Duo Ma): The tensorboard log for training is available at https://tensorboard.dev/experiment/25PmX3MxSVGTdvIdhOwllw/#scalars You can find a pretrained model by visiting https://huggingface.co/shuanguanma/icefall_aishell_transducer_stateless_context_size2_epoch60_2022_2_19 | | test |comment | |---------------------------|------|-----------------------------------------| | greedy search | 5.4 |--epoch 59, --avg 10, --max-duration 100 | -| beam search | 5.05|--epoch 59, --avg 10, --max-duration 100 | +| beam search | 5.05|--epoch 59, --avg 10, --max-duration 100 | You can use the following commands to reproduce our results: + +```bash +export CUDA_VISIBLE_DEVICES="0,1,2,3" +python3 ./transducer_stateless/train.py \ + --world-size 4 \ + --num-epochs 60 \ + --start-epoch 0 \ + --exp-dir exp/transducer_stateless_context_size2 \ + --max-duration 100 \ + --lr-factor 2.5 \ + --context-size 2 + +lang_dir=data/lang_char +dir=exp/transducer_stateless_context_size2 +python3 ./transducer_stateless/decode.py\ + --epoch 59 \ + --avg 10 \ + --exp-dir $dir \ + --lang-dir $lang_dir \ + --decoding-method greedy_search \ + --context-size 2 \ + --max-sym-per-frame 3 + +lang_dir=data/lang_char +dir=exp/transducer_stateless_context_size2 +python3 ./transducer_stateless/decode.py \ + --epoch 59\ + --avg 10\ + --exp-dir $dir \ + --lang-dir $lang_dir \ + --decoding-method beam_search \ + --context-size 2 \ + --max-sym-per-frame 3 ``` - export CUDA_VISIBLE_DEVICES="0,1,2,3" - python3 ./transducer_stateless/train.py \ - --world-size 4 \ - --num-epochs 60 \ - --start-epoch 0 \ - --exp-dir exp/transducer_stateless_context_size2 \ - --max-duration 100 \ - --lr-factor 2.5\ - --context-size 2 - - lang_dir=data/lang_char - dir=exp/transducer_stateless_context_size2 - python3 ./transducer_stateless/decode.py\ - --epoch 59\ - --avg 10\ - --exp-dir $dir \ - --lang-dir $lang_dir\ - --decoding-method greedy_search\ - --context-size 2\ - --max-sym-per-frame 3 - lang_dir=data/lang_char - dir=exp/transducer_stateless_context_size2 - python3 ./transducer_stateless/decode.py\ - --epoch 59\ - --avg 10\ - --exp-dir $dir \ - --lang-dir $lang_dir\ - --decoding-method beam_search\ - --context-size 2\ - --max-sym-per-frame 3 - ``` - - - + ### Aishell training results (Transducer-stateless) #### 2022-02-18 (Pingfeng Luo) : The tensorboard log for training is available at diff --git a/egs/aishell/ASR/local/display_manifest_statistics.py b/egs/aishell/ASR/local/display_manifest_statistics.py new file mode 100755 index 000000000..5e8b5cd3a --- /dev/null +++ b/egs/aishell/ASR/local/display_manifest_statistics.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This file displays duration statistics of utterances in a manifest. +You can use the displayed value to choose minimum/maximum duration +to remove short and long utterances during the training. + +See the function `remove_short_and_long_utt()` in transducer_stateless/train.py +for usage. +""" + + +from lhotse import load_manifest + + +def main(): + # path = "./data/fbank/cuts_train.json.gz" + # path = "./data/fbank/cuts_test.json.gz" + path = "./data/fbank/cuts_dev.json.gz" + + cuts = load_manifest(path) + cuts.describe() + + +if __name__ == "__main__": + main() + +""" +## train (after speed perturb) +Cuts count: 360294 +Total duration (hours): 455.6 +Speech duration (hours): 455.6 (100.0%) +*** +Duration statistics (seconds): +mean 4.6 +std 1.4 +min 1.1 +0.1% 1.8 +0.5% 2.2 +1% 2.3 +5% 2.7 +10% 3.0 +10% 3.0 +25% 3.5 +50% 4.3 +75% 5.4 +90% 6.5 +95% 7.2 +99% 8.8 +99.5% 9.4 +99.9% 10.9 +max 16.1 + +## test +Cuts count: 7176 +Total duration (hours): 10.0 +Speech duration (hours): 10.0 (100.0%) +*** +Duration statistics (seconds): +mean 5.0 +std 1.6 +min 1.9 +0.1% 2.2 +0.5% 2.4 +1% 2.6 +5% 3.0 +10% 3.2 +10% 3.2 +25% 3.8 +50% 4.7 +75% 5.9 +90% 7.3 +95% 8.2 +99% 9.9 +99.5% 10.7 +99.9% 11.9 +max 14.7 + +## dev +Cuts count: 14326 +Total duration (hours): 18.1 +Speech duration (hours): 18.1 (100.0%) +*** +Duration statistics (seconds): +mean 4.5 +std 1.3 +min 1.6 +0.1% 2.1 +0.5% 2.3 +1% 2.4 +5% 2.9 +10% 3.1 +10% 3.1 +25% 3.5 +50% 4.3 +75% 5.4 +90% 6.4 +95% 7.0 +99% 8.4 +99.5% 8.9 +99.9% 10.3 +max 12.5 +""" diff --git a/egs/aishell/ASR/prepare.sh b/egs/aishell/ASR/prepare.sh index a99558395..68f5c54d3 100755 --- a/egs/aishell/ASR/prepare.sh +++ b/egs/aishell/ASR/prepare.sh @@ -48,8 +48,9 @@ if [ $stage -le -1 ] && [ $stop_stage -ge -1 ]; then log "stage -1: Download LM" # We assume that you have installed the git-lfs, if not, you could install it # using: `sudo apt-get install git-lfs && git-lfs install` - [ ! -e $dl_dir/lm ] && mkdir -p $dl_dir/lm - git clone https://huggingface.co/pkufool/aishell_lm $dl_dir/lm + if [ ! -f $dl_dir/lm/3-gram.unpruned.arpa ]; then + git clone https://huggingface.co/pkufool/aishell_lm $dl_dir/lm + fi fi if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then @@ -87,28 +88,41 @@ if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then log "Stage 1: Prepare aishell manifest" # We assume that you have downloaded the aishell corpus # to $dl_dir/aishell - mkdir -p data/manifests - lhotse prepare aishell -j $nj $dl_dir/aishell data/manifests + if [ ! -f data/manifests/.aishell_manifests.done ]; then + mkdir -p data/manifests + lhotse prepare aishell $dl_dir/aishell data/manifests + touch data/manifests/.aishell_manifests.done + fi fi if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then log "Stage 2: Prepare musan manifest" # We assume that you have downloaded the musan corpus # to data/musan - mkdir -p data/manifests - lhotse prepare musan $dl_dir/musan data/manifests + if [ ! -f data/manifests/.musan_manifests.done ]; then + log "It may take 6 minutes" + mkdir -p data/manifests + lhotse prepare musan $dl_dir/musan data/manifests + touch data/manifests/.musan_manifests.done + fi fi if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then log "Stage 3: Compute fbank for aishell" - mkdir -p data/fbank - ./local/compute_fbank_aishell.py + if [ ! -f data/fbank/.aishell.done ]; then + mkdir -p data/fbank + ./local/compute_fbank_aishell.py + touch data/fbank/.aishell.done + fi fi if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then log "Stage 4: Compute fbank for musan" - mkdir -p data/fbank - ./local/compute_fbank_musan.py + if [ ! -f data/fbank/.msuan.done ]; then + mkdir -p data/fbank + ./local/compute_fbank_musan.py + touch data/fbank/.msuan.done + fi fi lang_phone_dir=data/lang_phone @@ -134,7 +148,7 @@ if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then aishell_train_uid=$dl_dir/aishell/data_aishell/transcript/aishell_train_uid find $dl_dir/aishell/data_aishell/wav/train -name "*.wav" | sed 's/\.wav//g' | awk -F '/' '{print $NF}' > $aishell_train_uid awk 'NR==FNR{uid[$1]=$1} NR!=FNR{if($1 in uid) print $0}' $aishell_train_uid $aishell_text | - cut -d " " -f 2- > $lang_phone_dir/transcript_words.txt + cut -d " " -f 2- > $lang_phone_dir/transcript_words.txt fi if [ ! -f $lang_phone_dir/transcript_tokens.txt ]; then diff --git a/egs/aishell/ASR/transducer_stateless/train.py b/egs/aishell/ASR/transducer_stateless/train.py index b562f9dd4..cd37810dd 100755 --- a/egs/aishell/ASR/transducer_stateless/train.py +++ b/egs/aishell/ASR/transducer_stateless/train.py @@ -558,7 +558,6 @@ def run(rank, world_size, args): oov="", ) - # params.blank_id = graph_compiler.texts_to_ids("")[0][0] params.blank_id = 0 params.vocab_size = max(lexicon.tokens) + 1 diff --git a/icefall/char_graph_compiler.py b/icefall/char_graph_compiler.py index 4a79a300a..a50b57d40 100644 --- a/icefall/char_graph_compiler.py +++ b/icefall/char_graph_compiler.py @@ -36,7 +36,7 @@ class CharCtcTrainingGraphCompiler(object): """ Args: lexicon: - It is built from `data/lang/lexicon.txt`. + It is built from `data/lang_char/lexicon.txt`. device: The device to use for operations compiling transcripts to FSAs. oov: From 1c35ae1dba719c03aef3be9198840ca621c131bb Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Mon, 21 Feb 2022 15:16:39 +0800 Subject: [PATCH 10/25] Reset seed at the beginning of each epoch. (#221) * Reset seed at the beginning of each epoch. * Use a different seed for each epoch. --- egs/aishell/ASR/conformer_ctc/train.py | 10 +++++++++- egs/aishell/ASR/conformer_mmi/train.py | 10 +++++++++- egs/aishell/ASR/tdnn_lstm_ctc/train.py | 10 +++++++++- egs/aishell/ASR/transducer_stateless/train.py | 10 +++++++++- egs/librispeech/ASR/conformer_ctc/train.py | 10 +++++++++- egs/librispeech/ASR/conformer_mmi/train.py | 10 +++++++++- .../ASR/pruned_transducer_stateless/train.py | 10 +++++++++- egs/librispeech/ASR/streaming_conformer_ctc/train.py | 10 +++++++++- egs/librispeech/ASR/tdnn_lstm_ctc/train.py | 10 +++++++++- egs/librispeech/ASR/transducer/train.py | 10 +++++++++- egs/librispeech/ASR/transducer_lstm/train.py | 10 +++++++++- egs/librispeech/ASR/transducer_stateless/train.py | 10 +++++++++- egs/timit/ASR/tdnn_ligru_ctc/train.py | 10 +++++++++- egs/timit/ASR/tdnn_lstm_ctc/train.py | 10 +++++++++- egs/yesno/ASR/tdnn/train.py | 10 +++++++++- egs/yesno/ASR/transducer/train.py | 10 +++++++++- 16 files changed, 144 insertions(+), 16 deletions(-) diff --git a/egs/aishell/ASR/conformer_ctc/train.py b/egs/aishell/ASR/conformer_ctc/train.py index a4bc8e3bb..369ad310f 100755 --- a/egs/aishell/ASR/conformer_ctc/train.py +++ b/egs/aishell/ASR/conformer_ctc/train.py @@ -121,6 +121,13 @@ def get_parser(): """, ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + return parser @@ -555,7 +562,7 @@ def run(rank, world_size, args): params = get_params() params.update(vars(args)) - fix_random_seed(42) + fix_random_seed(params.seed) if world_size > 1: setup_dist(rank, world_size, params.master_port) @@ -618,6 +625,7 @@ def run(rank, world_size, args): valid_dl = aishell.valid_dataloaders(aishell.valid_cuts()) for epoch in range(params.start_epoch, params.num_epochs): + fix_random_seed(params.seed + epoch) train_dl.sampler.set_epoch(epoch) cur_lr = optimizer._rate diff --git a/egs/aishell/ASR/conformer_mmi/train.py b/egs/aishell/ASR/conformer_mmi/train.py index 79c16d1cc..685831d09 100755 --- a/egs/aishell/ASR/conformer_mmi/train.py +++ b/egs/aishell/ASR/conformer_mmi/train.py @@ -124,6 +124,13 @@ def get_parser(): """, ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + return parser @@ -546,7 +553,7 @@ def run(rank, world_size, args): params = get_params() params.update(vars(args)) - fix_random_seed(42) + fix_random_seed(params.seed) if world_size > 1: setup_dist(rank, world_size, params.master_port) @@ -613,6 +620,7 @@ def run(rank, world_size, args): valid_dl = aishell.valid_dataloaders(aishell.valid_cuts()) for epoch in range(params.start_epoch, params.num_epochs): + fix_random_seed(params.seed + epoch) train_dl.sampler.set_epoch(epoch) cur_lr = optimizer._rate diff --git a/egs/aishell/ASR/tdnn_lstm_ctc/train.py b/egs/aishell/ASR/tdnn_lstm_ctc/train.py index a0045115d..3327cdb79 100755 --- a/egs/aishell/ASR/tdnn_lstm_ctc/train.py +++ b/egs/aishell/ASR/tdnn_lstm_ctc/train.py @@ -92,6 +92,13 @@ def get_parser(): """, ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + return parser @@ -507,7 +514,7 @@ def run(rank, world_size, args): params = get_params() params.update(vars(args)) - fix_random_seed(42) + fix_random_seed(params.seed) if world_size > 1: setup_dist(rank, world_size, params.master_port) @@ -557,6 +564,7 @@ def run(rank, world_size, args): valid_dl = aishell.valid_dataloaders(aishell.valid_cuts()) for epoch in range(params.start_epoch, params.num_epochs): + fix_random_seed(params.seed + epoch) train_dl.sampler.set_epoch(epoch) if epoch > params.start_epoch: diff --git a/egs/aishell/ASR/transducer_stateless/train.py b/egs/aishell/ASR/transducer_stateless/train.py index cd37810dd..f615c78f4 100755 --- a/egs/aishell/ASR/transducer_stateless/train.py +++ b/egs/aishell/ASR/transducer_stateless/train.py @@ -129,6 +129,13 @@ def get_parser(): "2 means tri-gram", ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + return parser @@ -534,7 +541,7 @@ def run(rank, world_size, args): params = get_params() params.update(vars(args)) - fix_random_seed(42) + fix_random_seed(params.seed) if world_size > 1: setup_dist(rank, world_size, params.master_port) @@ -611,6 +618,7 @@ def run(rank, world_size, args): valid_dl = aishell.valid_dataloaders(aishell.valid_cuts()) for epoch in range(params.start_epoch, params.num_epochs): + fix_random_seed(params.seed + epoch) train_dl.sampler.set_epoch(epoch) cur_lr = optimizer._rate diff --git a/egs/librispeech/ASR/conformer_ctc/train.py b/egs/librispeech/ASR/conformer_ctc/train.py index 058efd061..b81bd6330 100755 --- a/egs/librispeech/ASR/conformer_ctc/train.py +++ b/egs/librispeech/ASR/conformer_ctc/train.py @@ -140,6 +140,13 @@ def get_parser(): help="The lr_factor for Noam optimizer", ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + return parser @@ -580,7 +587,7 @@ def run(rank, world_size, args): params = get_params() params.update(vars(args)) - fix_random_seed(42) + fix_random_seed(params.seed) if world_size > 1: setup_dist(rank, world_size, params.master_port) @@ -684,6 +691,7 @@ def run(rank, world_size, args): ) for epoch in range(params.start_epoch, params.num_epochs): + fix_random_seed(params.seed + epoch) train_dl.sampler.set_epoch(epoch) cur_lr = optimizer._rate diff --git a/egs/librispeech/ASR/conformer_mmi/train.py b/egs/librispeech/ASR/conformer_mmi/train.py index c36677762..9a5bdcce2 100755 --- a/egs/librispeech/ASR/conformer_mmi/train.py +++ b/egs/librispeech/ASR/conformer_mmi/train.py @@ -109,6 +109,13 @@ def get_parser(): """, ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + return parser @@ -673,7 +680,7 @@ def run(rank, world_size, args): params = get_params() params.update(vars(args)) - fix_random_seed(42) + fix_random_seed(params.seed) if world_size > 1: setup_dist(rank, world_size, params.master_port) @@ -761,6 +768,7 @@ def run(rank, world_size, args): valid_dl = librispeech.valid_dataloaders() for epoch in range(params.start_epoch, params.num_epochs): + fix_random_seed(params.seed + epoch) train_dl.sampler.set_epoch(epoch) if ( params.batch_idx_train >= params.use_ali_until diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/train.py b/egs/librispeech/ASR/pruned_transducer_stateless/train.py index e19473788..f0ea2ccaa 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless/train.py @@ -179,6 +179,13 @@ def get_parser(): "with this parameter before adding to the final loss.", ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + return parser @@ -642,7 +649,7 @@ def run(rank, world_size, args): params.valid_interval = 800 params.warm_step = 30000 - fix_random_seed(42) + fix_random_seed(params.seed) if world_size > 1: setup_dist(rank, world_size, params.master_port) @@ -731,6 +738,7 @@ def run(rank, world_size, args): ) for epoch in range(params.start_epoch, params.num_epochs): + fix_random_seed(params.seed + epoch) train_dl.sampler.set_epoch(epoch) cur_lr = optimizer._rate diff --git a/egs/librispeech/ASR/streaming_conformer_ctc/train.py b/egs/librispeech/ASR/streaming_conformer_ctc/train.py index 8b4d6701e..9beb185a2 100755 --- a/egs/librispeech/ASR/streaming_conformer_ctc/train.py +++ b/egs/librispeech/ASR/streaming_conformer_ctc/train.py @@ -138,6 +138,13 @@ def get_parser(): help="Proportion of samples trained with short right context", ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + return parser @@ -575,7 +582,7 @@ def run(rank, world_size, args): params = get_params() params.update(vars(args)) - fix_random_seed(42) + fix_random_seed(params.seed) if world_size > 1: setup_dist(rank, world_size, params.master_port) @@ -645,6 +652,7 @@ def run(rank, world_size, args): ) for epoch in range(params.start_epoch, params.num_epochs): + fix_random_seed(params.seed + epoch) train_dl.sampler.set_epoch(epoch) cur_lr = optimizer._rate diff --git a/egs/librispeech/ASR/tdnn_lstm_ctc/train.py b/egs/librispeech/ASR/tdnn_lstm_ctc/train.py index 7439e157a..8597525ba 100755 --- a/egs/librispeech/ASR/tdnn_lstm_ctc/train.py +++ b/egs/librispeech/ASR/tdnn_lstm_ctc/train.py @@ -95,6 +95,13 @@ def get_parser(): """, ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + return parser @@ -486,7 +493,7 @@ def run(rank, world_size, args): params = get_params() params.update(vars(args)) - fix_random_seed(42) + fix_random_seed(params.seed) if world_size > 1: setup_dist(rank, world_size, params.master_port) @@ -544,6 +551,7 @@ def run(rank, world_size, args): valid_dl = librispeech.valid_dataloaders(valid_cuts) for epoch in range(params.start_epoch, params.num_epochs): + fix_random_seed(params.seed + epoch) train_dl.sampler.set_epoch(epoch) if epoch > params.start_epoch: diff --git a/egs/librispeech/ASR/transducer/train.py b/egs/librispeech/ASR/transducer/train.py index 903ba8491..a6ce79520 100755 --- a/egs/librispeech/ASR/transducer/train.py +++ b/egs/librispeech/ASR/transducer/train.py @@ -130,6 +130,13 @@ def get_parser(): help="The lr_factor for Noam optimizer", ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + return parser @@ -544,7 +551,7 @@ def run(rank, world_size, args): params.valid_interval = 800 params.warm_step = 8000 - fix_random_seed(42) + fix_random_seed(params.seed) if world_size > 1: setup_dist(rank, world_size, params.master_port) @@ -633,6 +640,7 @@ def run(rank, world_size, args): ) for epoch in range(params.start_epoch, params.num_epochs): + fix_random_seed(params.seed + epoch) train_dl.sampler.set_epoch(epoch) cur_lr = optimizer._rate diff --git a/egs/librispeech/ASR/transducer_lstm/train.py b/egs/librispeech/ASR/transducer_lstm/train.py index 62e9b5b12..9f06ed512 100755 --- a/egs/librispeech/ASR/transducer_lstm/train.py +++ b/egs/librispeech/ASR/transducer_lstm/train.py @@ -131,6 +131,13 @@ def get_parser(): help="The lr_factor for Noam optimizer", ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + return parser @@ -548,7 +555,7 @@ def run(rank, world_size, args): params.valid_interval = 800 params.warm_step = 8000 - fix_random_seed(42) + fix_random_seed(params.seed) if world_size > 1: setup_dist(rank, world_size, params.master_port) @@ -639,6 +646,7 @@ def run(rank, world_size, args): ) for epoch in range(params.start_epoch, params.num_epochs): + fix_random_seed(params.seed + epoch) train_dl.sampler.set_epoch(epoch) cur_lr = optimizer._rate diff --git a/egs/librispeech/ASR/transducer_stateless/train.py b/egs/librispeech/ASR/transducer_stateless/train.py index 544f6e9b1..4f5379e53 100755 --- a/egs/librispeech/ASR/transducer_stateless/train.py +++ b/egs/librispeech/ASR/transducer_stateless/train.py @@ -149,6 +149,13 @@ def get_parser(): """, ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + return parser @@ -562,7 +569,7 @@ def run(rank, world_size, args): params.valid_interval = 800 params.warm_step = 8000 - fix_random_seed(42) + fix_random_seed(params.seed) if world_size > 1: setup_dist(rank, world_size, params.master_port) @@ -651,6 +658,7 @@ def run(rank, world_size, args): ) for epoch in range(params.start_epoch, params.num_epochs): + fix_random_seed(params.seed + epoch) train_dl.sampler.set_epoch(epoch) cur_lr = optimizer._rate diff --git a/egs/timit/ASR/tdnn_ligru_ctc/train.py b/egs/timit/ASR/tdnn_ligru_ctc/train.py index 9ac4743b4..452c2a7cb 100644 --- a/egs/timit/ASR/tdnn_ligru_ctc/train.py +++ b/egs/timit/ASR/tdnn_ligru_ctc/train.py @@ -95,6 +95,13 @@ def get_parser(): """, ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + return parser @@ -486,7 +493,7 @@ def run(rank, world_size, args): params = get_params() params.update(vars(args)) - fix_random_seed(42) + fix_random_seed(params.seed) if world_size > 1: setup_dist(rank, world_size, params.master_port) @@ -536,6 +543,7 @@ def run(rank, world_size, args): valid_dl = timit.valid_dataloaders() for epoch in range(params.start_epoch, params.num_epochs): + fix_random_seed(params.seed + epoch) train_dl.sampler.set_epoch(epoch) if epoch > params.start_epoch: diff --git a/egs/timit/ASR/tdnn_lstm_ctc/train.py b/egs/timit/ASR/tdnn_lstm_ctc/train.py index 2a6ff4787..849256b98 100644 --- a/egs/timit/ASR/tdnn_lstm_ctc/train.py +++ b/egs/timit/ASR/tdnn_lstm_ctc/train.py @@ -95,6 +95,13 @@ def get_parser(): """, ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + return parser @@ -486,7 +493,7 @@ def run(rank, world_size, args): params = get_params() params.update(vars(args)) - fix_random_seed(42) + fix_random_seed(params.seed) if world_size > 1: setup_dist(rank, world_size, params.master_port) @@ -536,6 +543,7 @@ def run(rank, world_size, args): valid_dl = timit.valid_dataloaders() for epoch in range(params.start_epoch, params.num_epochs): + fix_random_seed(params.seed + epoch) train_dl.sampler.set_epoch(epoch) if epoch > params.start_epoch: diff --git a/egs/yesno/ASR/tdnn/train.py b/egs/yesno/ASR/tdnn/train.py index d8454b7c5..f32a27f35 100755 --- a/egs/yesno/ASR/tdnn/train.py +++ b/egs/yesno/ASR/tdnn/train.py @@ -71,6 +71,13 @@ def get_parser(): """, ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + return parser @@ -468,7 +475,7 @@ def run(rank, world_size, args): params.update(vars(args)) params["env_info"] = get_env_info() - fix_random_seed(42) + fix_random_seed(params.seed) if world_size > 1: setup_dist(rank, world_size, params.master_port) @@ -520,6 +527,7 @@ def run(rank, world_size, args): valid_dl = yes_no.test_dataloaders() for epoch in range(params.start_epoch, params.num_epochs): + fix_random_seed(params.seed + epoch) train_dl.sampler.set_epoch(epoch) if tb_writer is not None: diff --git a/egs/yesno/ASR/transducer/train.py b/egs/yesno/ASR/transducer/train.py index 7d2d1edeb..deb92107d 100755 --- a/egs/yesno/ASR/transducer/train.py +++ b/egs/yesno/ASR/transducer/train.py @@ -114,6 +114,13 @@ def get_parser(): help="Directory to save results", ) + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + return parser @@ -487,7 +494,7 @@ def run(rank, world_size, args): params.update(vars(args)) params["env_info"] = get_env_info() - fix_random_seed(42) + fix_random_seed(params.seed) if world_size > 1: setup_dist(rank, world_size, params.master_port) @@ -532,6 +539,7 @@ def run(rank, world_size, args): valid_dl = yes_no.test_dataloaders() for epoch in range(params.start_epoch, params.num_epochs): + fix_random_seed(params.seed + epoch) train_dl.sampler.set_epoch(epoch) if tb_writer is not None: From 2332ba312d7ce72f08c7bac1e3312f7e3dd722dc Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Mon, 21 Feb 2022 15:27:27 +0800 Subject: [PATCH 11/25] Begin to use multiple datasets in training (#213) * Begin to use multiple datasets. * Finish preparing training datasets. * Minor fixes * Copy files. * Finish training code. * Display losses for gigaspeech and librispeech separately. * Fix decode.py * Make the probability to select a batch from GigaSpeech configurable. * Update results. * Minor fixes. --- ...-transducer-stateless-librispeech-100h.yml | 152 +++ egs/librispeech/ASR/README.md | 11 +- egs/librispeech/ASR/RESULTS-100hours.md | 75 ++ .../ASR/local/compute_fbank_librispeech.py | 4 +- .../ASR/local/compute_fbank_musan.py | 4 +- .../ASR/local/preprocess_gigaspeech.py | 123 +++ egs/librispeech/ASR/prepare_giga_speech.sh | 109 +++ .../README.md | 27 + .../__init__.py | 0 .../asr_datamodule.py | 304 ++++++ .../beam_search.py | 541 ++++++++++ .../conformer.py | 920 ++++++++++++++++++ .../decode.py | 490 ++++++++++ .../decoder.py | 98 ++ .../encoder_interface.py | 43 + .../export.py | 252 +++++ .../gigaspeech.py | 75 ++ .../joiner.py | 72 ++ .../librispeech.py | 74 ++ .../model.py | 168 ++++ .../pretrained.py | 340 +++++++ .../subsampling.py | 1 + .../test_asr_datamodule.py | 102 ++ .../test_decoder.py | 58 ++ .../train.py | 890 +++++++++++++++++ .../transformer.py | 418 ++++++++ 26 files changed, 5342 insertions(+), 9 deletions(-) create mode 100644 .github/workflows/run-pretrained-transducer-stateless-librispeech-100h.yml create mode 100644 egs/librispeech/ASR/RESULTS-100hours.md create mode 100644 egs/librispeech/ASR/local/preprocess_gigaspeech.py create mode 100755 egs/librispeech/ASR/prepare_giga_speech.sh create mode 100644 egs/librispeech/ASR/transducer_stateless_multi_datasets/README.md create mode 100644 egs/librispeech/ASR/transducer_stateless_multi_datasets/__init__.py create mode 100644 egs/librispeech/ASR/transducer_stateless_multi_datasets/asr_datamodule.py create mode 100644 egs/librispeech/ASR/transducer_stateless_multi_datasets/beam_search.py create mode 100644 egs/librispeech/ASR/transducer_stateless_multi_datasets/conformer.py create mode 100755 egs/librispeech/ASR/transducer_stateless_multi_datasets/decode.py create mode 100644 egs/librispeech/ASR/transducer_stateless_multi_datasets/decoder.py create mode 100644 egs/librispeech/ASR/transducer_stateless_multi_datasets/encoder_interface.py create mode 100755 egs/librispeech/ASR/transducer_stateless_multi_datasets/export.py create mode 100644 egs/librispeech/ASR/transducer_stateless_multi_datasets/gigaspeech.py create mode 100644 egs/librispeech/ASR/transducer_stateless_multi_datasets/joiner.py create mode 100644 egs/librispeech/ASR/transducer_stateless_multi_datasets/librispeech.py create mode 100644 egs/librispeech/ASR/transducer_stateless_multi_datasets/model.py create mode 100755 egs/librispeech/ASR/transducer_stateless_multi_datasets/pretrained.py create mode 120000 egs/librispeech/ASR/transducer_stateless_multi_datasets/subsampling.py create mode 100755 egs/librispeech/ASR/transducer_stateless_multi_datasets/test_asr_datamodule.py create mode 100755 egs/librispeech/ASR/transducer_stateless_multi_datasets/test_decoder.py create mode 100755 egs/librispeech/ASR/transducer_stateless_multi_datasets/train.py create mode 100644 egs/librispeech/ASR/transducer_stateless_multi_datasets/transformer.py diff --git a/.github/workflows/run-pretrained-transducer-stateless-librispeech-100h.yml b/.github/workflows/run-pretrained-transducer-stateless-librispeech-100h.yml new file mode 100644 index 000000000..efea5366b --- /dev/null +++ b/.github/workflows/run-pretrained-transducer-stateless-librispeech-100h.yml @@ -0,0 +1,152 @@ +# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com) + +# See ../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: run-pre-trained-trandsucer-stateless-multi-datasets-librispeech-100h + +on: + push: + branches: + - master + pull_request: + types: [labeled] + +jobs: + run_pre_trained_transducer_stateless_multi_datasets_librispeech_100h: + if: github.event.label.name == 'ready' || github.event_name == 'push' + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-18.04] + python-version: [3.7, 3.8, 3.9] + torch: ["1.10.0"] + torchaudio: ["0.10.0"] + k2-version: ["1.9.dev20211101"] + + fail-fast: false + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Python dependencies + run: | + python3 -m pip install --upgrade pip pytest + # numpy 1.20.x does not support python 3.6 + pip install numpy==1.19 + pip install torch==${{ matrix.torch }}+cpu torchaudio==${{ matrix.torchaudio }}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html + pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.org/nightly/ + + python3 -m pip install git+https://github.com/lhotse-speech/lhotse + python3 -m pip install kaldifeat + # We are in ./icefall and there is a file: requirements.txt in it + pip install -r requirements.txt + + - name: Install graphviz + shell: bash + run: | + python3 -m pip install -qq graphviz + sudo apt-get -qq install graphviz + + - name: Download pre-trained model + shell: bash + run: | + sudo apt-get -qq install git-lfs tree sox + cd egs/librispeech/ASR + mkdir tmp + cd tmp + git lfs install + git clone https://huggingface.co/csukuangfj/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21 + + cd .. + tree tmp + soxi tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/test_wavs/*.wav + ls -lh tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/test_wavs/*.wav + + - name: Run greedy search decoding (max-sym-per-frame 1) + shell: bash + run: | + export PYTHONPATH=$PWD:PYTHONPATH + cd egs/librispeech/ASR + ./transducer_stateless_multi_datasets/pretrained.py \ + --method greedy_search \ + --max-sym-per-frame 1 \ + --checkpoint ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/exp/pretrained.pt \ + --bpe-model ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/data/lang_bpe_500/bpe.model \ + ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/test_wavs/1089-134686-0001.wav \ + ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/test_wavs/1221-135766-0001.wav \ + ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/test_wavs/1221-135766-0002.wav + + - name: Run greedy search decoding (max-sym-per-frame 2) + shell: bash + run: | + export PYTHONPATH=$PWD:PYTHONPATH + cd egs/librispeech/ASR + ./transducer_stateless_multi_datasets/pretrained.py \ + --method greedy_search \ + --max-sym-per-frame 2 \ + --checkpoint ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/exp/pretrained.pt \ + --bpe-model ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/data/lang_bpe_500/bpe.model \ + ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/test_wavs/1089-134686-0001.wav \ + ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/test_wavs/1221-135766-0001.wav \ + ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/test_wavs/1221-135766-0002.wav + + - name: Run greedy search decoding (max-sym-per-frame 3) + shell: bash + run: | + export PYTHONPATH=$PWD:PYTHONPATH + cd egs/librispeech/ASR + ./transducer_stateless_multi_datasets/pretrained.py \ + --method greedy_search \ + --max-sym-per-frame 3 \ + --checkpoint ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/exp/pretrained.pt \ + --bpe-model ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/data/lang_bpe_500/bpe.model \ + ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/test_wavs/1089-134686-0001.wav \ + ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/test_wavs/1221-135766-0001.wav \ + ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/test_wavs/1221-135766-0002.wav + + - name: Run beam search decoding + shell: bash + run: | + export PYTHONPATH=$PWD:$PYTHONPATH + cd egs/librispeech/ASR + ./transducer_stateless_multi_datasets/pretrained.py \ + --method beam_search \ + --beam-size 4 \ + --checkpoint ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/exp/pretrained.pt \ + --bpe-model ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/data/lang_bpe_500/bpe.model \ + ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/test_wavs/1089-134686-0001.wav \ + ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/test_wavs/1221-135766-0001.wav \ + ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/test_wavs/1221-135766-0002.wav + + - name: Run modified beam search decoding + shell: bash + run: | + export PYTHONPATH=$PWD:$PYTHONPATH + cd egs/librispeech/ASR + ./transducer_stateless_multi_datasets/pretrained.py \ + --method modified_beam_search \ + --beam-size 4 \ + --checkpoint ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/exp/pretrained.pt \ + --bpe-model ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/data/lang_bpe_500/bpe.model \ + ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/test_wavs/1089-134686-0001.wav \ + ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/test_wavs/1221-135766-0001.wav \ + ./tmp/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21/test_wavs/1221-135766-0002.wav diff --git a/egs/librispeech/ASR/README.md b/egs/librispeech/ASR/README.md index c8ee98d7d..211a7d120 100644 --- a/egs/librispeech/ASR/README.md +++ b/egs/librispeech/ASR/README.md @@ -9,11 +9,12 @@ for how to run models in this recipe. There are various folders containing the name `transducer` in this folder. The following table lists the differences among them. -| | Encoder | Decoder | -|------------------------|-----------|--------------------| -| `transducer` | Conformer | LSTM | -| `transducer_stateless` | Conformer | Embedding + Conv1d | -| `transducer_lstm ` | LSTM | LSTM | +| | Encoder | Decoder | Comment | +|---------------------------------------|-----------|--------------------|---------------------------------------------------| +| `transducer` | Conformer | LSTM | | +| `transducer_stateless` | Conformer | Embedding + Conv1d | | +| `transducer_lstm` | LSTM | LSTM | | +| `transducer_stateless_multi_datasets` | Conformer | Embedding + Conv1d | Using data from GigaSpeech as extra training data | The decoder in `transducer_stateless` is modified from the paper [Rnn-Transducer with Stateless Prediction Network](https://ieeexplore.ieee.org/document/9054419/). diff --git a/egs/librispeech/ASR/RESULTS-100hours.md b/egs/librispeech/ASR/RESULTS-100hours.md new file mode 100644 index 000000000..40245c917 --- /dev/null +++ b/egs/librispeech/ASR/RESULTS-100hours.md @@ -0,0 +1,75 @@ +# Results for train-clean-100 + +This page shows the WERs for test-clean/test-other using only +train-clean-100 subset as training data. + +## Conformer encoder + embedding decoder + +### 2022-02-21 + +| | test-clean | test-other | comment | +|-------------------------------------|------------|------------|------------------------------------------| +| greedy search (max sym per frame 1) | 6.34 | 16.7 | --epoch 57, --avg 17, --max-duration 100 | +| greedy search (max sym per frame 2) | 6.34 | 16.7 | --epoch 57, --avg 17, --max-duration 100 | +| greedy search (max sym per frame 3) | 6.34 | 16.7 | --epoch 57, --avg 17, --max-duration 100 | +| modified beam search (beam size 4) | 6.31 | 16.3 | --epoch 57, --avg 17, --max-duration 100 | + + +The training command for reproducing is given below: + +```bash +cd egs/librispeech/ASR/ +./prepare.sh +./prepare_giga_speech.sh + +export CUDA_VISIBLE_DEVICES="0,1" + +./transducer_stateless_multi_datasets/train.py \ + --world-size 2 \ + --num-epochs 60 \ + --start-epoch 0 \ + --exp-dir transducer_stateless_multi_datasets/exp-100-2 \ + --full-libri 0 \ + --max-duration 300 \ + --lr-factor 1 \ + --bpe-model data/lang_bpe_500/bpe.model \ + --modified-transducer-prob 0.25 + --giga-prob 0.2 +``` + +The decoding command is given below: + +```bash +for epoch in 57; do + for avg in 17; do + for sym in 1 2 3; do + ./transducer_stateless_multi_datasets/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir transducer_stateless_multi_datasets/exp-100-2 \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --max-duration 100 \ + --context-size 2 \ + --max-sym-per-frame $sym + done + done +done + +epoch=57 +avg=17 +./transducer_stateless_multi_datasets/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir transducer_stateless_multi_datasets/exp-100-2 \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --max-duration 100 \ + --context-size 2 \ + --decoding-method modified_beam_search \ + --beam-size 4 +``` + +The tensorboard log is available at + + +A pre-trained model and decoding logs can be found at + diff --git a/egs/librispeech/ASR/local/compute_fbank_librispeech.py b/egs/librispeech/ASR/local/compute_fbank_librispeech.py index b26034eb2..5c33ff8be 100755 --- a/egs/librispeech/ASR/local/compute_fbank_librispeech.py +++ b/egs/librispeech/ASR/local/compute_fbank_librispeech.py @@ -28,7 +28,7 @@ import os from pathlib import Path import torch -from lhotse import CutSet, Fbank, FbankConfig, LilcomHdf5Writer +from lhotse import ChunkedLilcomHdf5Writer, CutSet, Fbank, FbankConfig from lhotse.recipes.utils import read_manifests_if_cached from icefall.utils import get_executor @@ -85,7 +85,7 @@ def compute_fbank_librispeech(): # when an executor is specified, make more partitions num_jobs=num_jobs if ex is None else 80, executor=ex, - storage_type=LilcomHdf5Writer, + storage_type=ChunkedLilcomHdf5Writer, ) cut_set.to_json(output_dir / f"cuts_{partition}.json.gz") diff --git a/egs/librispeech/ASR/local/compute_fbank_musan.py b/egs/librispeech/ASR/local/compute_fbank_musan.py index d44524e70..f5911746b 100755 --- a/egs/librispeech/ASR/local/compute_fbank_musan.py +++ b/egs/librispeech/ASR/local/compute_fbank_musan.py @@ -28,7 +28,7 @@ import os from pathlib import Path import torch -from lhotse import CutSet, Fbank, FbankConfig, LilcomHdf5Writer, combine +from lhotse import ChunkedLilcomHdf5Writer, CutSet, Fbank, FbankConfig, combine from lhotse.recipes.utils import read_manifests_if_cached from icefall.utils import get_executor @@ -82,7 +82,7 @@ def compute_fbank_musan(): storage_path=f"{output_dir}/feats_musan", num_jobs=num_jobs if ex is None else 80, executor=ex, - storage_type=LilcomHdf5Writer, + storage_type=ChunkedLilcomHdf5Writer, ) ) musan_cuts.to_json(musan_cuts_path) diff --git a/egs/librispeech/ASR/local/preprocess_gigaspeech.py b/egs/librispeech/ASR/local/preprocess_gigaspeech.py new file mode 100644 index 000000000..4168a7185 --- /dev/null +++ b/egs/librispeech/ASR/local/preprocess_gigaspeech.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python3 +# Copyright 2021 Johns Hopkins University (Piotr Żelasko) +# Copyright 2021 Xiaomi Corp. (Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import re +from pathlib import Path + +from lhotse import CutSet, SupervisionSegment +from lhotse.recipes.utils import read_manifests_if_cached + +# Similar text filtering and normalization procedure as in: +# https://github.com/SpeechColab/GigaSpeech/blob/main/toolkits/kaldi/gigaspeech_data_prep.sh + + +def normalize_text( + utt: str, + punct_pattern=re.compile(r"<(COMMA|PERIOD|QUESTIONMARK|EXCLAMATIONPOINT)>"), + whitespace_pattern=re.compile(r"\s\s+"), +) -> str: + return whitespace_pattern.sub(" ", punct_pattern.sub("", utt)) + + +def has_no_oov( + sup: SupervisionSegment, + oov_pattern=re.compile(r"<(SIL|MUSIC|NOISE|OTHER)>"), +) -> bool: + return oov_pattern.search(sup.text) is None + + +def preprocess_giga_speech(): + src_dir = Path("data/manifests") + output_dir = Path("data/fbank") + output_dir.mkdir(exist_ok=True) + + dataset_parts = ( + "DEV", + "TEST", + "XS", + "S", + "M", + "L", + "XL", + ) + + logging.info("Loading manifest (may take 4 minutes)") + manifests = read_manifests_if_cached( + dataset_parts=dataset_parts, + output_dir=src_dir, + prefix="gigaspeech", + suffix="jsonl.gz", + ) + assert manifests is not None + + for partition, m in manifests.items(): + logging.info(f"Processing {partition}") + raw_cuts_path = output_dir / f"cuts_{partition}_raw.jsonl.gz" + if raw_cuts_path.is_file(): + logging.info(f"{partition} already exists - skipping") + continue + + # Note this step makes the recipe different than LibriSpeech: + # We must filter out some utterances and remove punctuation + # to be consistent with Kaldi. + logging.info("Filtering OOV utterances from supervisions") + m["supervisions"] = m["supervisions"].filter(has_no_oov) + logging.info(f"Normalizing text in {partition}") + for sup in m["supervisions"]: + sup.text = normalize_text(sup.text) + sup.custom = {"origin": "giga"} + + # Create long-recording cut manifests. + logging.info(f"Processing {partition}") + cut_set = CutSet.from_manifests( + recordings=m["recordings"], + supervisions=m["supervisions"], + ) + # Run data augmentation that needs to be done in the + # time domain. + if partition not in ["DEV", "TEST"]: + logging.info( + f"Speed perturb for {partition} with factors 0.9 and 1.1 " + "(Perturbing may take 8 minutes and saving may take 20 minutes)" + ) + cut_set = ( + cut_set + + cut_set.perturb_speed(0.9) + + cut_set.perturb_speed(1.1) + ) + + logging.info("About to split cuts into smaller chunks.") + cut_set = cut_set.trim_to_supervisions( + keep_overlapping=False, min_duration=None + ) + logging.info(f"Saving to {raw_cuts_path}") + cut_set.to_file(raw_cuts_path) + + +def main(): + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + logging.basicConfig(format=formatter, level=logging.INFO) + + preprocess_giga_speech() + + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/prepare_giga_speech.sh b/egs/librispeech/ASR/prepare_giga_speech.sh new file mode 100755 index 000000000..49124c4d7 --- /dev/null +++ b/egs/librispeech/ASR/prepare_giga_speech.sh @@ -0,0 +1,109 @@ +#!/usr/bin/env bash + +set -eou pipefail + +nj=15 +stage=-1 +stop_stage=100 + +# We assume dl_dir (download dir) contains the following +# directories and files. If not, they will be downloaded +# by this script automatically. +# +# - $dl_dir/GigaSpeech +# You can find audio, dict, GigaSpeech.json inside it. +# You can apply for the download credentials by following +# https://github.com/SpeechColab/GigaSpeech#download + +# Number of hours for GigaSpeech subsets +# XL 10k hours +# L 2.5k hours +# M 1k hours +# S 250 hours +# XS 10 hours +# DEV 12 hours +# Test 40 hours + +dl_dir=$PWD/download + +. shared/parse_options.sh || exit 1 + +# All files generated by this script are saved in "data". +# You can safely remove "data" and rerun this script to regenerate it. +mkdir -p data + +log() { + # This function is from espnet + local fname=${BASH_SOURCE[1]##*/} + echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*" +} + +log "dl_dir: $dl_dir" + +if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then + log "Stage 0: Download data" + + [ ! -e $dl_dir/GigaSpeech ] && mkdir -p $dl_dir/GigaSpeech + + # If you have pre-downloaded it to /path/to/GigaSpeech, + # you can create a symlink + # + # ln -sfv /path/to/GigaSpeech $dl_dir/GigaSpeech + # + if [ ! -d $dl_dir/GigaSpeech/audio ] && [ ! -f $dl_dir/GigaSpeech.json ]; then + # Check credentials. + if [ ! -f $dl_dir/password ]; then + echo -n "$0: Please apply for the download credentials by following" + echo -n "https://github.com/SpeechColab/GigaSpeech#dataset-download" + echo " and save it to $dl_dir/password." + exit 1; + fi + PASSWORD=`cat $dl_dir/password 2>/dev/null` + if [ -z "$PASSWORD" ]; then + echo "$0: Error, $dl_dir/password is empty." + exit 1; + fi + PASSWORD_MD5=`echo $PASSWORD | md5sum | cut -d ' ' -f 1` + if [[ $PASSWORD_MD5 != "dfbf0cde1a3ce23749d8d81e492741b8" ]]; then + echo "$0: Error, invalid $dl_dir/password." + exit 1; + fi + # Download XL, DEV and TEST sets by default. + lhotse download gigaspeech \ + --subset XL \ + --subset L \ + --subset M \ + --subset S \ + --subset XS \ + --subset DEV \ + --subset TEST \ + --host tsinghua \ + $dl_dir/password $dl_dir/GigaSpeech + fi +fi + +if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then + log "Stage 1: Prepare GigaSpeech manifest (may take 30 minutes)" + # We assume that you have downloaded the GigaSpeech corpus + # to $dl_dir/GigaSpeech + mkdir -p data/manifests + lhotse prepare gigaspeech \ + --subset XL \ + --subset L \ + --subset M \ + --subset S \ + --subset XS \ + --subset DEV \ + --subset TEST \ + -j $nj \ + $dl_dir/GigaSpeech data/manifests +fi + +if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then + log "Stage 2: Preprocess GigaSpeech manifest" + if [ ! -f data/fbank/.preprocess_complete ]; then + log "It may take 2 hours for this stage" + python3 ./local/preprocess_gigaspeech.py + touch data/fbank/.preprocess_complete + fi +fi diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/README.md b/egs/librispeech/ASR/transducer_stateless_multi_datasets/README.md new file mode 100644 index 000000000..574fbf78e --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/README.md @@ -0,0 +1,27 @@ +## Introduction + +The decoder, i.e., the prediction network, is from +https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9054419 +(Rnn-Transducer with Stateless Prediction Network) + +You can use the following command to start the training: + +```bash +cd egs/librispeech/ASR +./prepare.sh +./prepare_giga_speech.sh + +export CUDA_VISIBLE_DEVICES="0,1" + +./transducer_stateless_multi_datasets/train.py \ + --world-size 2 \ + --num-epochs 60 \ + --start-epoch 0 \ + --exp-dir transducer_stateless_multi_datasets/exp-100 \ + --full-libri 0 \ + --max-duration 300 \ + --lr-factor 1 \ + --bpe-model data/lang_bpe_500/bpe.model \ + --modified-transducer-prob 0.25 + --giga-prob 0.2 +``` diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/__init__.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/asr_datamodule.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/asr_datamodule.py new file mode 100644 index 000000000..fe0d0a872 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/asr_datamodule.py @@ -0,0 +1,304 @@ +# Copyright 2021 Piotr Żelasko +# 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import logging +from pathlib import Path +from typing import Optional + +from lhotse import CutSet, Fbank, FbankConfig +from lhotse.dataset import ( + BucketingSampler, + CutMix, + DynamicBucketingSampler, + K2SpeechRecognitionDataset, + SpecAugment, +) +from lhotse.dataset.input_strategies import ( + OnTheFlyFeatures, + PrecomputedFeatures, +) +from torch.utils.data import DataLoader + +from icefall.utils import str2bool + + +class AsrDataModule: + def __init__(self, args: argparse.Namespace): + self.args = args + + @classmethod + def add_arguments(cls, parser: argparse.ArgumentParser): + group = parser.add_argument_group( + title="ASR data related options", + description="These options are used for the preparation of " + "PyTorch DataLoaders from Lhotse CutSet's -- they control the " + "effective batch sizes, sampling strategies, applied data " + "augmentations, etc.", + ) + + group.add_argument( + "--max-duration", + type=int, + default=200.0, + help="Maximum pooled recordings duration (seconds) in a " + "single batch. You can reduce it if it causes CUDA OOM.", + ) + + group.add_argument( + "--bucketing-sampler", + type=str2bool, + default=True, + help="When enabled, the batches will come from buckets of " + "similar duration (saves padding frames).", + ) + + group.add_argument( + "--num-buckets", + type=int, + default=30, + help="The number of buckets for the BucketingSampler " + "and DynamicBucketingSampler." + "(you might want to increase it for larger datasets).", + ) + + group.add_argument( + "--shuffle", + type=str2bool, + default=True, + help="When enabled (=default), the examples will be " + "shuffled for each epoch.", + ) + + group.add_argument( + "--return-cuts", + type=str2bool, + default=True, + help="When enabled, each batch will have the " + "field: batch['supervisions']['cut'] with the cuts that " + "were used to construct it.", + ) + + group.add_argument( + "--num-workers", + type=int, + default=2, + help="The number of training dataloader workers that " + "collect the batches.", + ) + + group.add_argument( + "--enable-spec-aug", + type=str2bool, + default=True, + help="When enabled, use SpecAugment for training dataset.", + ) + + group.add_argument( + "--spec-aug-time-warp-factor", + type=int, + default=80, + help="Used only when --enable-spec-aug is True. " + "It specifies the factor for time warping in SpecAugment. " + "Larger values mean more warping. " + "A value less than 1 means to disable time warp.", + ) + + group.add_argument( + "--enable-musan", + type=str2bool, + default=True, + help="When enabled, select noise from MUSAN and mix it" + "with training dataset. ", + ) + + group.add_argument( + "--manifest-dir", + type=Path, + default=Path("data/fbank"), + help="Path to directory with train/valid/test cuts.", + ) + + group.add_argument( + "--on-the-fly-feats", + type=str2bool, + default=False, + help="When enabled, use on-the-fly cut mixing and feature " + "extraction. Will drop existing precomputed feature manifests " + "if available. Used only in dev/test CutSet", + ) + + def train_dataloaders( + self, + cuts_train: CutSet, + dynamic_bucketing: bool, + on_the_fly_feats: bool, + cuts_musan: Optional[CutSet] = None, + ) -> DataLoader: + """ + Args: + cuts_train: + Cuts for training. + cuts_musan: + If not None, it is the cuts for mixing. + dynamic_bucketing: + True to use DynamicBucketingSampler; + False to use BucketingSampler. + on_the_fly_feats: + True to use OnTheFlyFeatures; + False to use PrecomputedFeatures. + """ + transforms = [] + if cuts_musan is not None: + logging.info("Enable MUSAN") + transforms.append( + CutMix( + cuts=cuts_musan, prob=0.5, snr=(10, 20), preserve_id=True + ) + ) + else: + logging.info("Disable MUSAN") + + input_transforms = [] + + if self.args.enable_spec_aug: + logging.info("Enable SpecAugment") + logging.info( + f"Time warp factor: {self.args.spec_aug_time_warp_factor}" + ) + input_transforms.append( + SpecAugment( + time_warp_factor=self.args.spec_aug_time_warp_factor, + num_frame_masks=2, + features_mask_size=27, + num_feature_masks=2, + frames_mask_size=100, + ) + ) + else: + logging.info("Disable SpecAugment") + + logging.info("About to create train dataset") + train = K2SpeechRecognitionDataset( + cut_transforms=transforms, + input_transforms=input_transforms, + return_cuts=self.args.return_cuts, + ) + + # NOTE: the PerturbSpeed transform should be added only if we + # remove it from data prep stage. + # Add on-the-fly speed perturbation; since originally it would + # have increased epoch size by 3, we will apply prob 2/3 and use + # 3x more epochs. + # Speed perturbation probably should come first before + # concatenation, but in principle the transforms order doesn't have + # to be strict (e.g. could be randomized) + # transforms = [PerturbSpeed(factors=[0.9, 1.1], p=2/3)] + transforms # noqa + # Drop feats to be on the safe side. + train = K2SpeechRecognitionDataset( + cut_transforms=transforms, + input_strategy=( + OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80))) + if on_the_fly_feats + else PrecomputedFeatures() + ), + input_transforms=input_transforms, + return_cuts=self.args.return_cuts, + ) + + if dynamic_bucketing: + logging.info("Using DynamicBucketingSampler.") + train_sampler = DynamicBucketingSampler( + cuts_train, + max_duration=self.args.max_duration, + shuffle=self.args.shuffle, + num_buckets=self.args.num_buckets, + drop_last=True, + ) + else: + logging.info("Using BucketingSampler.") + train_sampler = BucketingSampler( + cuts_train, + max_duration=self.args.max_duration, + shuffle=self.args.shuffle, + num_buckets=self.args.num_buckets, + bucket_method="equal_duration", + drop_last=True, + ) + + logging.info("About to create train dataloader") + train_dl = DataLoader( + train, + sampler=train_sampler, + batch_size=None, + num_workers=self.args.num_workers, + persistent_workers=False, + ) + return train_dl + + def valid_dataloaders(self, cuts_valid: CutSet) -> DataLoader: + transforms = [] + + logging.info("About to create dev dataset") + if self.args.on_the_fly_feats: + validate = K2SpeechRecognitionDataset( + cut_transforms=transforms, + input_strategy=OnTheFlyFeatures( + Fbank(FbankConfig(num_mel_bins=80)) + ), + return_cuts=self.args.return_cuts, + ) + else: + validate = K2SpeechRecognitionDataset( + cut_transforms=transforms, + return_cuts=self.args.return_cuts, + ) + valid_sampler = BucketingSampler( + cuts_valid, + max_duration=self.args.max_duration, + shuffle=False, + ) + logging.info("About to create dev dataloader") + valid_dl = DataLoader( + validate, + sampler=valid_sampler, + batch_size=None, + num_workers=2, + persistent_workers=False, + ) + + return valid_dl + + def test_dataloaders(self, cuts: CutSet) -> DataLoader: + logging.debug("About to create test dataset") + test = K2SpeechRecognitionDataset( + input_strategy=OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80))) + if self.args.on_the_fly_feats + else PrecomputedFeatures(), + return_cuts=self.args.return_cuts, + ) + sampler = BucketingSampler( + cuts, max_duration=self.args.max_duration, shuffle=False + ) + logging.debug("About to create test dataloader") + test_dl = DataLoader( + test, + batch_size=None, + sampler=sampler, + num_workers=self.args.num_workers, + ) + return test_dl diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/beam_search.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/beam_search.py new file mode 100644 index 000000000..c5efb733d --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/beam_search.py @@ -0,0 +1,541 @@ +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dataclasses import dataclass +from typing import Dict, List, Optional + +import torch +from model import Transducer + + +def greedy_search( + model: Transducer, encoder_out: torch.Tensor, max_sym_per_frame: int +) -> List[int]: + """ + Args: + model: + An instance of `Transducer`. + encoder_out: + A tensor of shape (N, T, C) from the encoder. Support only N==1 for now. + max_sym_per_frame: + Maximum number of symbols per frame. If it is set to 0, the WER + would be 100%. + Returns: + Return the decoded result. + """ + assert encoder_out.ndim == 3 + + # support only batch_size == 1 for now + assert encoder_out.size(0) == 1, encoder_out.size(0) + + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + + device = model.device + + decoder_input = torch.tensor( + [blank_id] * context_size, device=device, dtype=torch.int64 + ).reshape(1, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False) + + T = encoder_out.size(1) + t = 0 + hyp = [blank_id] * context_size + + # Maximum symbols per utterance. + max_sym_per_utt = 1000 + + # symbols per frame + sym_per_frame = 0 + + # symbols per utterance decoded so far + sym_per_utt = 0 + + encoder_out_len = torch.tensor([1]) + decoder_out_len = torch.tensor([1]) + + while t < T and sym_per_utt < max_sym_per_utt: + if sym_per_frame >= max_sym_per_frame: + sym_per_frame = 0 + t += 1 + continue + + # fmt: off + current_encoder_out = encoder_out[:, t:t+1, :] + # fmt: on + logits = model.joiner( + current_encoder_out, decoder_out, encoder_out_len, decoder_out_len + ) + # logits is (1, 1, 1, vocab_size) + + y = logits.argmax().item() + if y != blank_id: + hyp.append(y) + decoder_input = torch.tensor( + [hyp[-context_size:]], device=device + ).reshape(1, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False) + + sym_per_utt += 1 + sym_per_frame += 1 + else: + sym_per_frame = 0 + t += 1 + hyp = hyp[context_size:] # remove blanks + + return hyp + + +@dataclass +class Hypothesis: + # The predicted tokens so far. + # Newly predicted tokens are appended to `ys`. + ys: List[int] + + # The log prob of ys. + # It contains only one entry. + log_prob: torch.Tensor + + @property + def key(self) -> str: + """Return a string representation of self.ys""" + return "_".join(map(str, self.ys)) + + +class HypothesisList(object): + def __init__(self, data: Optional[Dict[str, Hypothesis]] = None) -> None: + """ + Args: + data: + A dict of Hypotheses. Its key is its `value.key`. + """ + if data is None: + self._data = {} + else: + self._data = data + + @property + def data(self) -> Dict[str, Hypothesis]: + return self._data + + def add(self, hyp: Hypothesis) -> None: + """Add a Hypothesis to `self`. + + If `hyp` already exists in `self`, its probability is updated using + `log-sum-exp` with the existed one. + + Args: + hyp: + The hypothesis to be added. + """ + key = hyp.key + if key in self: + old_hyp = self._data[key] # shallow copy + torch.logaddexp( + old_hyp.log_prob, hyp.log_prob, out=old_hyp.log_prob + ) + else: + self._data[key] = hyp + + def get_most_probable(self, length_norm: bool = False) -> Hypothesis: + """Get the most probable hypothesis, i.e., the one with + the largest `log_prob`. + + Args: + length_norm: + If True, the `log_prob` of a hypothesis is normalized by the + number of tokens in it. + Returns: + Return the hypothesis that has the largest `log_prob`. + """ + if length_norm: + return max( + self._data.values(), key=lambda hyp: hyp.log_prob / len(hyp.ys) + ) + else: + return max(self._data.values(), key=lambda hyp: hyp.log_prob) + + def remove(self, hyp: Hypothesis) -> None: + """Remove a given hypothesis. + + Caution: + `self` is modified **in-place**. + + Args: + hyp: + The hypothesis to be removed from `self`. + Note: It must be contained in `self`. Otherwise, + an exception is raised. + """ + key = hyp.key + assert key in self, f"{key} does not exist" + del self._data[key] + + def filter(self, threshold: torch.Tensor) -> "HypothesisList": + """Remove all Hypotheses whose log_prob is less than threshold. + + Caution: + `self` is not modified. Instead, a new HypothesisList is returned. + + Returns: + Return a new HypothesisList containing all hypotheses from `self` + with `log_prob` being greater than the given `threshold`. + """ + ans = HypothesisList() + for _, hyp in self._data.items(): + if hyp.log_prob > threshold: + ans.add(hyp) # shallow copy + return ans + + def topk(self, k: int) -> "HypothesisList": + """Return the top-k hypothesis.""" + hyps = list(self._data.items()) + + hyps = sorted(hyps, key=lambda h: h[1].log_prob, reverse=True)[:k] + + ans = HypothesisList(dict(hyps)) + return ans + + def __contains__(self, key: str): + return key in self._data + + def __iter__(self): + return iter(self._data.values()) + + def __len__(self) -> int: + return len(self._data) + + def __str__(self) -> str: + s = [] + for key in self: + s.append(key) + return ", ".join(s) + + +def run_decoder( + ys: List[int], + model: Transducer, + decoder_cache: Dict[str, torch.Tensor], +) -> torch.Tensor: + """Run the neural decoder model for a given hypothesis. + + Args: + ys: + The current hypothesis. + model: + The transducer model. + decoder_cache: + Cache to save computations. + Returns: + Return a 1-D tensor of shape (decoder_out_dim,) containing + output of `model.decoder`. + """ + context_size = model.decoder.context_size + key = "_".join(map(str, ys[-context_size:])) + if key in decoder_cache: + return decoder_cache[key] + + device = model.device + + decoder_input = torch.tensor([ys[-context_size:]], device=device).reshape( + 1, context_size + ) + + decoder_out = model.decoder(decoder_input, need_pad=False) + decoder_cache[key] = decoder_out + + return decoder_out + + +def run_joiner( + key: str, + model: Transducer, + encoder_out: torch.Tensor, + decoder_out: torch.Tensor, + encoder_out_len: torch.Tensor, + decoder_out_len: torch.Tensor, + joint_cache: Dict[str, torch.Tensor], +): + """Run the joint network given outputs from the encoder and decoder. + + Args: + key: + A key into the `joint_cache`. + model: + The transducer model. + encoder_out: + A tensor of shape (1, 1, encoder_out_dim). + decoder_out: + A tensor of shape (1, 1, decoder_out_dim). + encoder_out_len: + A tensor with value [1]. + decoder_out_len: + A tensor with value [1]. + joint_cache: + A dict to save computations. + Returns: + Return a tensor from the output of log-softmax. + Its shape is (vocab_size,). + """ + if key in joint_cache: + return joint_cache[key] + + logits = model.joiner( + encoder_out, + decoder_out, + encoder_out_len, + decoder_out_len, + ) + + # TODO(fangjun): Scale the blank posterior + log_prob = logits.log_softmax(dim=-1) + # log_prob is (1, 1, 1, vocab_size) + + log_prob = log_prob.squeeze() + # Now log_prob is (vocab_size,) + + joint_cache[key] = log_prob + + return log_prob + + +def modified_beam_search( + model: Transducer, + encoder_out: torch.Tensor, + beam: int = 4, +) -> List[int]: + """It limits the maximum number of symbols per frame to 1. + + Args: + model: + An instance of `Transducer`. + encoder_out: + A tensor of shape (N, T, C) from the encoder. Support only N==1 for now. + beam: + Beam size. + Returns: + Return the decoded result. + """ + + assert encoder_out.ndim == 3 + + # support only batch_size == 1 for now + assert encoder_out.size(0) == 1, encoder_out.size(0) + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + + device = model.device + + decoder_input = torch.tensor( + [blank_id] * context_size, device=device + ).reshape(1, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False) + + T = encoder_out.size(1) + + B = HypothesisList() + B.add( + Hypothesis( + ys=[blank_id] * context_size, + log_prob=torch.zeros(1, dtype=torch.float32, device=device), + ) + ) + + encoder_out_len = torch.tensor([1]) + decoder_out_len = torch.tensor([1]) + + for t in range(T): + # fmt: off + current_encoder_out = encoder_out[:, t:t+1, :] + # current_encoder_out is of shape (1, 1, encoder_out_dim) + # fmt: on + A = list(B) + B = HypothesisList() + + ys_log_probs = torch.cat([hyp.log_prob.reshape(1, 1) for hyp in A]) + # ys_log_probs is of shape (num_hyps, 1) + + decoder_input = torch.tensor( + [hyp.ys[-context_size:] for hyp in A], + device=device, + ) + # decoder_input is of shape (num_hyps, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False) + # decoder_output is of shape (num_hyps, 1, decoder_output_dim) + + current_encoder_out = current_encoder_out.expand( + decoder_out.size(0), 1, -1 + ) + + logits = model.joiner( + current_encoder_out, + decoder_out, + encoder_out_len.expand(decoder_out.size(0)), + decoder_out_len.expand(decoder_out.size(0)), + ) + # logits is of shape (num_hyps, vocab_size) + log_probs = logits.log_softmax(dim=-1) + + log_probs.add_(ys_log_probs) + + log_probs = log_probs.reshape(-1) + topk_log_probs, topk_indexes = log_probs.topk(beam) + + # topk_hyp_indexes are indexes into `A` + topk_hyp_indexes = topk_indexes // logits.size(-1) + topk_token_indexes = topk_indexes % logits.size(-1) + + topk_hyp_indexes = topk_hyp_indexes.tolist() + topk_token_indexes = topk_token_indexes.tolist() + + for i in range(len(topk_hyp_indexes)): + hyp = A[topk_hyp_indexes[i]] + new_ys = hyp.ys[:] + new_token = topk_token_indexes[i] + if new_token != blank_id: + new_ys.append(new_token) + new_log_prob = topk_log_probs[i] + new_hyp = Hypothesis(ys=new_ys, log_prob=new_log_prob) + B.add(new_hyp) + + best_hyp = B.get_most_probable(length_norm=True) + ys = best_hyp.ys[context_size:] # [context_size:] to remove blanks + + return ys + + +def beam_search( + model: Transducer, + encoder_out: torch.Tensor, + beam: int = 4, +) -> List[int]: + """ + It implements Algorithm 1 in https://arxiv.org/pdf/1211.3711.pdf + + espnet/nets/beam_search_transducer.py#L247 is used as a reference. + + Args: + model: + An instance of `Transducer`. + encoder_out: + A tensor of shape (N, T, C) from the encoder. Support only N==1 for now. + beam: + Beam size. + Returns: + Return the decoded result. + """ + assert encoder_out.ndim == 3 + + # support only batch_size == 1 for now + assert encoder_out.size(0) == 1, encoder_out.size(0) + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + + device = model.device + + decoder_input = torch.tensor( + [blank_id] * context_size, device=device + ).reshape(1, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False) + + T = encoder_out.size(1) + t = 0 + + B = HypothesisList() + B.add( + Hypothesis( + ys=[blank_id] * context_size, + log_prob=torch.zeros(1, dtype=torch.float32, device=device), + ) + ) + + max_sym_per_utt = 20000 + + sym_per_utt = 0 + + encoder_out_len = torch.tensor([1]) + decoder_out_len = torch.tensor([1]) + + decoder_cache: Dict[str, torch.Tensor] = {} + + while t < T and sym_per_utt < max_sym_per_utt: + # fmt: off + current_encoder_out = encoder_out[:, t:t+1, :] + # fmt: on + A = B + B = HypothesisList() + + joint_cache: Dict[str, torch.Tensor] = {} + + while True: + y_star = A.get_most_probable() + A.remove(y_star) + + decoder_out = run_decoder( + ys=y_star.ys, model=model, decoder_cache=decoder_cache + ) + + key = "_".join(map(str, y_star.ys[-context_size:])) + key += f"-t-{t}" + log_prob = run_joiner( + key=key, + model=model, + encoder_out=current_encoder_out, + decoder_out=decoder_out, + encoder_out_len=encoder_out_len, + decoder_out_len=decoder_out_len, + joint_cache=joint_cache, + ) + + # First, process the blank symbol + skip_log_prob = log_prob[blank_id] + new_y_star_log_prob = y_star.log_prob + skip_log_prob + + # ys[:] returns a copy of ys + B.add(Hypothesis(ys=y_star.ys[:], log_prob=new_y_star_log_prob)) + + # Second, process other non-blank labels + values, indices = log_prob.topk(beam + 1) + for idx in range(values.size(0)): + i = indices[idx].item() + if i == blank_id: + continue + + new_ys = y_star.ys + [i] + + new_log_prob = y_star.log_prob + values[idx] + A.add(Hypothesis(ys=new_ys, log_prob=new_log_prob)) + + # Check whether B contains more than "beam" elements more probable + # than the most probable in A + A_most_probable = A.get_most_probable() + + kept_B = B.filter(A_most_probable.log_prob) + + if len(kept_B) >= beam: + B = kept_B.topk(beam) + break + + t += 1 + + best_hyp = B.get_most_probable(length_norm=True) + ys = best_hyp.ys[context_size:] # [context_size:] to remove blanks + return ys diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/conformer.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/conformer.py new file mode 100644 index 000000000..81d7708f9 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/conformer.py @@ -0,0 +1,920 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021 University of Chinese Academy of Sciences (author: Han Zhu) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import math +import warnings +from typing import Optional, Tuple + +import torch +from torch import Tensor, nn +from transformer import Transformer + +from icefall.utils import make_pad_mask + + +class Conformer(Transformer): + """ + Args: + num_features (int): Number of input features + output_dim (int): Number of output dimension + subsampling_factor (int): subsampling factor of encoder (the convolution layers before transformers) + d_model (int): attention dimension + nhead (int): number of head + dim_feedforward (int): feedforward dimention + num_encoder_layers (int): number of encoder layers + dropout (float): dropout rate + cnn_module_kernel (int): Kernel size of convolution module + normalize_before (bool): whether to use layer_norm before the first block. + vgg_frontend (bool): whether to use vgg frontend. + """ + + def __init__( + self, + num_features: int, + output_dim: int, + subsampling_factor: int = 4, + d_model: int = 256, + nhead: int = 4, + dim_feedforward: int = 2048, + num_encoder_layers: int = 12, + dropout: float = 0.1, + cnn_module_kernel: int = 31, + normalize_before: bool = True, + vgg_frontend: bool = False, + ) -> None: + super(Conformer, self).__init__( + num_features=num_features, + output_dim=output_dim, + subsampling_factor=subsampling_factor, + d_model=d_model, + nhead=nhead, + dim_feedforward=dim_feedforward, + num_encoder_layers=num_encoder_layers, + dropout=dropout, + normalize_before=normalize_before, + vgg_frontend=vgg_frontend, + ) + + self.encoder_pos = RelPositionalEncoding(d_model, dropout) + + encoder_layer = ConformerEncoderLayer( + d_model, + nhead, + dim_feedforward, + dropout, + cnn_module_kernel, + normalize_before, + ) + self.encoder = ConformerEncoder(encoder_layer, num_encoder_layers) + self.normalize_before = normalize_before + if self.normalize_before: + self.after_norm = nn.LayerNorm(d_model) + else: + # Note: TorchScript detects that self.after_norm could be used inside forward() + # and throws an error without this change. + self.after_norm = identity + + def forward( + self, x: torch.Tensor, x_lens: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Args: + x: + The input tensor. Its shape is (batch_size, seq_len, feature_dim). + x_lens: + A tensor of shape (batch_size,) containing the number of frames in + `x` before padding. + Returns: + Return a tuple containing 2 tensors: + - logits, its shape is (batch_size, output_seq_len, output_dim) + - logit_lens, a tensor of shape (batch_size,) containing the number + of frames in `logits` before padding. + """ + x = self.encoder_embed(x) + x, pos_emb = self.encoder_pos(x) + x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C) + + # Caution: We assume the subsampling factor is 4! + lengths = ((x_lens - 1) // 2 - 1) // 2 + assert x.size(0) == lengths.max().item() + mask = make_pad_mask(lengths) + + x = self.encoder(x, pos_emb, src_key_padding_mask=mask) # (T, N, C) + + if self.normalize_before: + x = self.after_norm(x) + + logits = self.encoder_output_layer(x) + logits = logits.permute(1, 0, 2) # (T, N, C) ->(N, T, C) + + return logits, lengths + + +class ConformerEncoderLayer(nn.Module): + """ + ConformerEncoderLayer is made up of self-attn, feedforward and convolution networks. + See: "Conformer: Convolution-augmented Transformer for Speech Recognition" + + Args: + d_model: the number of expected features in the input (required). + nhead: the number of heads in the multiheadattention models (required). + dim_feedforward: the dimension of the feedforward network model (default=2048). + dropout: the dropout value (default=0.1). + cnn_module_kernel (int): Kernel size of convolution module. + normalize_before: whether to use layer_norm before the first block. + + Examples:: + >>> encoder_layer = ConformerEncoderLayer(d_model=512, nhead=8) + >>> src = torch.rand(10, 32, 512) + >>> pos_emb = torch.rand(32, 19, 512) + >>> out = encoder_layer(src, pos_emb) + """ + + def __init__( + self, + d_model: int, + nhead: int, + dim_feedforward: int = 2048, + dropout: float = 0.1, + cnn_module_kernel: int = 31, + normalize_before: bool = True, + ) -> None: + super(ConformerEncoderLayer, self).__init__() + self.self_attn = RelPositionMultiheadAttention( + d_model, nhead, dropout=0.0 + ) + + self.feed_forward = nn.Sequential( + nn.Linear(d_model, dim_feedforward), + Swish(), + nn.Dropout(dropout), + nn.Linear(dim_feedforward, d_model), + ) + + self.feed_forward_macaron = nn.Sequential( + nn.Linear(d_model, dim_feedforward), + Swish(), + nn.Dropout(dropout), + nn.Linear(dim_feedforward, d_model), + ) + + self.conv_module = ConvolutionModule(d_model, cnn_module_kernel) + + self.norm_ff_macaron = nn.LayerNorm( + d_model + ) # for the macaron style FNN module + self.norm_ff = nn.LayerNorm(d_model) # for the FNN module + self.norm_mha = nn.LayerNorm(d_model) # for the MHA module + + self.ff_scale = 0.5 + + self.norm_conv = nn.LayerNorm(d_model) # for the CNN module + self.norm_final = nn.LayerNorm( + d_model + ) # for the final output of the block + + self.dropout = nn.Dropout(dropout) + + self.normalize_before = normalize_before + + def forward( + self, + src: Tensor, + pos_emb: Tensor, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + ) -> Tensor: + """ + Pass the input through the encoder layer. + + Args: + src: the sequence to the encoder layer (required). + pos_emb: Positional embedding tensor (required). + src_mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional). + + Shape: + src: (S, N, E). + pos_emb: (N, 2*S-1, E) + src_mask: (S, S). + src_key_padding_mask: (N, S). + S is the source sequence length, N is the batch size, E is the feature number + """ + + # macaron style feed forward module + residual = src + if self.normalize_before: + src = self.norm_ff_macaron(src) + src = residual + self.ff_scale * self.dropout( + self.feed_forward_macaron(src) + ) + if not self.normalize_before: + src = self.norm_ff_macaron(src) + + # multi-headed self-attention module + residual = src + if self.normalize_before: + src = self.norm_mha(src) + src_att = self.self_attn( + src, + src, + src, + pos_emb=pos_emb, + attn_mask=src_mask, + key_padding_mask=src_key_padding_mask, + )[0] + src = residual + self.dropout(src_att) + if not self.normalize_before: + src = self.norm_mha(src) + + # convolution module + residual = src + if self.normalize_before: + src = self.norm_conv(src) + src = residual + self.dropout(self.conv_module(src)) + if not self.normalize_before: + src = self.norm_conv(src) + + # feed forward module + residual = src + if self.normalize_before: + src = self.norm_ff(src) + src = residual + self.ff_scale * self.dropout(self.feed_forward(src)) + if not self.normalize_before: + src = self.norm_ff(src) + + if self.normalize_before: + src = self.norm_final(src) + + return src + + +class ConformerEncoder(nn.TransformerEncoder): + r"""ConformerEncoder is a stack of N encoder layers + + Args: + encoder_layer: an instance of the ConformerEncoderLayer() class (required). + num_layers: the number of sub-encoder-layers in the encoder (required). + norm: the layer normalization component (optional). + + Examples:: + >>> encoder_layer = ConformerEncoderLayer(d_model=512, nhead=8) + >>> conformer_encoder = ConformerEncoder(encoder_layer, num_layers=6) + >>> src = torch.rand(10, 32, 512) + >>> pos_emb = torch.rand(32, 19, 512) + >>> out = conformer_encoder(src, pos_emb) + """ + + def __init__( + self, encoder_layer: nn.Module, num_layers: int, norm: nn.Module = None + ) -> None: + super(ConformerEncoder, self).__init__( + encoder_layer=encoder_layer, num_layers=num_layers, norm=norm + ) + + def forward( + self, + src: Tensor, + pos_emb: Tensor, + mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + ) -> Tensor: + r"""Pass the input through the encoder layers in turn. + + Args: + src: the sequence to the encoder (required). + pos_emb: Positional embedding tensor (required). + mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional). + + Shape: + src: (S, N, E). + pos_emb: (N, 2*S-1, E) + mask: (S, S). + src_key_padding_mask: (N, S). + S is the source sequence length, T is the target sequence length, N is the batch size, E is the feature number + + """ + output = src + + for mod in self.layers: + output = mod( + output, + pos_emb, + src_mask=mask, + src_key_padding_mask=src_key_padding_mask, + ) + + if self.norm is not None: + output = self.norm(output) + + return output + + +class RelPositionalEncoding(torch.nn.Module): + """Relative positional encoding module. + + See : Appendix B in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" + Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/embedding.py + + Args: + d_model: Embedding dimension. + dropout_rate: Dropout rate. + max_len: Maximum input length. + + """ + + def __init__( + self, d_model: int, dropout_rate: float, max_len: int = 5000 + ) -> None: + """Construct an PositionalEncoding object.""" + super(RelPositionalEncoding, self).__init__() + self.d_model = d_model + self.xscale = math.sqrt(self.d_model) + self.dropout = torch.nn.Dropout(p=dropout_rate) + self.pe = None + self.extend_pe(torch.tensor(0.0).expand(1, max_len)) + + def extend_pe(self, x: Tensor) -> None: + """Reset the positional encodings.""" + if self.pe is not None: + # self.pe contains both positive and negative parts + # the length of self.pe is 2 * input_len - 1 + if self.pe.size(1) >= x.size(1) * 2 - 1: + # Note: TorchScript doesn't implement operator== for torch.Device + if self.pe.dtype != x.dtype or str(self.pe.device) != str( + x.device + ): + self.pe = self.pe.to(dtype=x.dtype, device=x.device) + return + # Suppose `i` means to the position of query vecotr and `j` means the + # position of key vector. We use position relative positions when keys + # are to the left (i>j) and negative relative positions otherwise (i Tuple[Tensor, Tensor]: + """Add positional encoding. + + Args: + x (torch.Tensor): Input tensor (batch, time, `*`). + + Returns: + torch.Tensor: Encoded tensor (batch, time, `*`). + torch.Tensor: Encoded tensor (batch, 2*time-1, `*`). + + """ + self.extend_pe(x) + x = x * self.xscale + pos_emb = self.pe[ + :, + self.pe.size(1) // 2 + - x.size(1) + + 1 : self.pe.size(1) // 2 # noqa E203 + + x.size(1), + ] + return self.dropout(x), self.dropout(pos_emb) + + +class RelPositionMultiheadAttention(nn.Module): + r"""Multi-Head Attention layer with relative position encoding + + See reference: "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" + + Args: + embed_dim: total dimension of the model. + num_heads: parallel attention heads. + dropout: a Dropout layer on attn_output_weights. Default: 0.0. + + Examples:: + + >>> rel_pos_multihead_attn = RelPositionMultiheadAttention(embed_dim, num_heads) + >>> attn_output, attn_output_weights = multihead_attn(query, key, value, pos_emb) + """ + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + ) -> None: + super(RelPositionMultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert ( + self.head_dim * num_heads == self.embed_dim + ), "embed_dim must be divisible by num_heads" + + self.in_proj = nn.Linear(embed_dim, 3 * embed_dim, bias=True) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=True) + + # linear transformation for positional encoding. + self.linear_pos = nn.Linear(embed_dim, embed_dim, bias=False) + # these two learnable bias are used in matrix c and matrix d + # as described in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" Section 3.3 + self.pos_bias_u = nn.Parameter(torch.Tensor(num_heads, self.head_dim)) + self.pos_bias_v = nn.Parameter(torch.Tensor(num_heads, self.head_dim)) + + self._reset_parameters() + + def _reset_parameters(self) -> None: + nn.init.xavier_uniform_(self.in_proj.weight) + nn.init.constant_(self.in_proj.bias, 0.0) + nn.init.constant_(self.out_proj.bias, 0.0) + + nn.init.xavier_uniform_(self.pos_bias_u) + nn.init.xavier_uniform_(self.pos_bias_v) + + def forward( + self, + query: Tensor, + key: Tensor, + value: Tensor, + pos_emb: Tensor, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + pos_emb: Positional embedding tensor + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. When given a binary mask and a value is True, + the corresponding value on the attention layer will be ignored. When given + a byte mask and a value is non-zero, the corresponding value on the attention + layer will be ignored + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + + Shape: + - Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - pos_emb: :math:`(N, 2*L-1, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the position + with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + + - Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + return self.multi_head_attention_forward( + query, + key, + value, + pos_emb, + self.embed_dim, + self.num_heads, + self.in_proj.weight, + self.in_proj.bias, + self.dropout, + self.out_proj.weight, + self.out_proj.bias, + training=self.training, + key_padding_mask=key_padding_mask, + need_weights=need_weights, + attn_mask=attn_mask, + ) + + def rel_shift(self, x: Tensor) -> Tensor: + """Compute relative positional encoding. + + Args: + x: Input tensor (batch, head, time1, 2*time1-1). + time1 means the length of query vector. + + Returns: + Tensor: tensor of shape (batch, head, time1, time2) + (note: time2 has the same value as time1, but it is for + the key, while time1 is for the query). + """ + (batch_size, num_heads, time1, n) = x.shape + assert n == 2 * time1 - 1 + # Note: TorchScript requires explicit arg for stride() + batch_stride = x.stride(0) + head_stride = x.stride(1) + time1_stride = x.stride(2) + n_stride = x.stride(3) + return x.as_strided( + (batch_size, num_heads, time1, time1), + (batch_stride, head_stride, time1_stride - n_stride, n_stride), + storage_offset=n_stride * (time1 - 1), + ) + + def multi_head_attention_forward( + self, + query: Tensor, + key: Tensor, + value: Tensor, + pos_emb: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Tensor, + in_proj_bias: Tensor, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Tensor, + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + pos_emb: Positional embedding tensor + embed_dim_to_check: total dimension of the model. + num_heads: parallel attention heads. + in_proj_weight, in_proj_bias: input projection weight and bias. + dropout_p: probability of an element to be zeroed. + out_proj_weight, out_proj_bias: the output projection weight and bias. + training: apply dropout if is ``True``. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + + Shape: + Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - pos_emb: :math:`(N, 2*L-1, E)` or :math:`(1, 2*L-1, E)` where L is the target sequence + length, N is the batch size, E is the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions + will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + + Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == embed_dim_to_check + assert key.size(0) == value.size(0) and key.size(1) == value.size(1) + + head_dim = embed_dim // num_heads + assert ( + head_dim * num_heads == embed_dim + ), "embed_dim must be divisible by num_heads" + scaling = float(head_dim) ** -0.5 + + if torch.equal(query, key) and torch.equal(key, value): + # self-attention + q, k, v = nn.functional.linear( + query, in_proj_weight, in_proj_bias + ).chunk(3, dim=-1) + + elif torch.equal(key, value): + # encoder-decoder attention + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = nn.functional.linear(query, _w, _b) + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + k, v = nn.functional.linear(key, _w, _b).chunk(2, dim=-1) + + else: + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = nn.functional.linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = embed_dim * 2 + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + k = nn.functional.linear(key, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim * 2 + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + v = nn.functional.linear(value, _w, _b) + + if attn_mask is not None: + assert ( + attn_mask.dtype == torch.float32 + or attn_mask.dtype == torch.float64 + or attn_mask.dtype == torch.float16 + or attn_mask.dtype == torch.uint8 + or attn_mask.dtype == torch.bool + ), "Only float, byte, and bool types are supported for attn_mask, not {}".format( + attn_mask.dtype + ) + if attn_mask.dtype == torch.uint8: + warnings.warn( + "Byte tensor for attn_mask is deprecated. Use bool tensor instead." + ) + attn_mask = attn_mask.to(torch.bool) + + if attn_mask.dim() == 2: + attn_mask = attn_mask.unsqueeze(0) + if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: + raise RuntimeError( + "The size of the 2D attn_mask is not correct." + ) + elif attn_mask.dim() == 3: + if list(attn_mask.size()) != [ + bsz * num_heads, + query.size(0), + key.size(0), + ]: + raise RuntimeError( + "The size of the 3D attn_mask is not correct." + ) + else: + raise RuntimeError( + "attn_mask's dimension {} is not supported".format( + attn_mask.dim() + ) + ) + # attn_mask's dim is 3 now. + + # convert ByteTensor key_padding_mask to bool + if ( + key_padding_mask is not None + and key_padding_mask.dtype == torch.uint8 + ): + warnings.warn( + "Byte tensor for key_padding_mask is deprecated. Use bool tensor instead." + ) + key_padding_mask = key_padding_mask.to(torch.bool) + + q = q.contiguous().view(tgt_len, bsz, num_heads, head_dim) + k = k.contiguous().view(-1, bsz, num_heads, head_dim) + v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + + src_len = k.size(0) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz, "{} == {}".format( + key_padding_mask.size(0), bsz + ) + assert key_padding_mask.size(1) == src_len, "{} == {}".format( + key_padding_mask.size(1), src_len + ) + + q = q.transpose(0, 1) # (batch, time1, head, d_k) + + pos_emb_bsz = pos_emb.size(0) + assert pos_emb_bsz in (1, bsz) # actually it is 1 + p = self.linear_pos(pos_emb).view(pos_emb_bsz, -1, num_heads, head_dim) + p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k) + + q_with_bias_u = (q + self.pos_bias_u).transpose( + 1, 2 + ) # (batch, head, time1, d_k) + + q_with_bias_v = (q + self.pos_bias_v).transpose( + 1, 2 + ) # (batch, head, time1, d_k) + + # compute attention score + # first compute matrix a and matrix c + # as described in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" Section 3.3 + k = k.permute(1, 2, 3, 0) # (batch, head, d_k, time2) + matrix_ac = torch.matmul( + q_with_bias_u, k + ) # (batch, head, time1, time2) + + # compute matrix b and matrix d + matrix_bd = torch.matmul( + q_with_bias_v, p.transpose(-2, -1) + ) # (batch, head, time1, 2*time1-1) + matrix_bd = self.rel_shift(matrix_bd) + + attn_output_weights = ( + matrix_ac + matrix_bd + ) * scaling # (batch, head, time1, time2) + + attn_output_weights = attn_output_weights.view( + bsz * num_heads, tgt_len, -1 + ) + + assert list(attn_output_weights.size()) == [ + bsz * num_heads, + tgt_len, + src_len, + ] + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_output_weights.masked_fill_(attn_mask, float("-inf")) + else: + attn_output_weights += attn_mask + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view( + bsz, num_heads, tgt_len, src_len + ) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float("-inf"), + ) + attn_output_weights = attn_output_weights.view( + bsz * num_heads, tgt_len, src_len + ) + + attn_output_weights = nn.functional.softmax(attn_output_weights, dim=-1) + attn_output_weights = nn.functional.dropout( + attn_output_weights, p=dropout_p, training=training + ) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + attn_output = ( + attn_output.transpose(0, 1) + .contiguous() + .view(tgt_len, bsz, embed_dim) + ) + attn_output = nn.functional.linear( + attn_output, out_proj_weight, out_proj_bias + ) + + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view( + bsz, num_heads, tgt_len, src_len + ) + return attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return attn_output, None + + +class ConvolutionModule(nn.Module): + """ConvolutionModule in Conformer model. + Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/conformer/convolution.py + + Args: + channels (int): The number of channels of conv layers. + kernel_size (int): Kernerl size of conv layers. + bias (bool): Whether to use bias in conv layers (default=True). + + """ + + def __init__( + self, channels: int, kernel_size: int, bias: bool = True + ) -> None: + """Construct an ConvolutionModule object.""" + super(ConvolutionModule, self).__init__() + # kernerl_size should be a odd number for 'SAME' padding + assert (kernel_size - 1) % 2 == 0 + + self.pointwise_conv1 = nn.Conv1d( + channels, + 2 * channels, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + ) + self.depthwise_conv = nn.Conv1d( + channels, + channels, + kernel_size, + stride=1, + padding=(kernel_size - 1) // 2, + groups=channels, + bias=bias, + ) + self.norm = nn.LayerNorm(channels) + self.pointwise_conv2 = nn.Conv1d( + channels, + channels, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + ) + self.activation = Swish() + + def forward(self, x: Tensor) -> Tensor: + """Compute convolution module. + + Args: + x: Input tensor (#time, batch, channels). + + Returns: + Tensor: Output tensor (#time, batch, channels). + + """ + # exchange the temporal dimension and the feature dimension + x = x.permute(1, 2, 0) # (#batch, channels, time). + + # GLU mechanism + x = self.pointwise_conv1(x) # (batch, 2*channels, time) + x = nn.functional.glu(x, dim=1) # (batch, channels, time) + + # 1D Depthwise Conv + x = self.depthwise_conv(x) + # x is (batch, channels, time) + x = x.permute(0, 2, 1) + x = self.norm(x) + x = x.permute(0, 2, 1) + + x = self.activation(x) + + x = self.pointwise_conv2(x) # (batch, channel, time) + + return x.permute(2, 0, 1) + + +class Swish(torch.nn.Module): + """Construct an Swish object.""" + + def forward(self, x: Tensor) -> Tensor: + """Return Swich activation function.""" + return x * torch.sigmoid(x) + + +def identity(x): + return x diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/decode.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/decode.py new file mode 100755 index 000000000..136afe9c0 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/decode.py @@ -0,0 +1,490 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +(1) greedy search +./transducer_stateless_multi_datasets/decode.py \ + --epoch 14 \ + --avg 7 \ + --exp-dir ./transducer_stateless_multi_datasets/exp \ + --max-duration 100 \ + --decoding-method greedy_search + +(2) beam search +./transducer_stateless_multi_datasets/decode.py \ + --epoch 14 \ + --avg 7 \ + --exp-dir ./transducer_stateless_multi_datasets/exp \ + --max-duration 100 \ + --decoding-method beam_search \ + --beam-size 4 +""" + + +import argparse +import logging +from collections import defaultdict +from pathlib import Path +from typing import Dict, List, Tuple + +import sentencepiece as spm +import torch +import torch.nn as nn +from asr_datamodule import AsrDataModule +from beam_search import beam_search, greedy_search, modified_beam_search +from conformer import Conformer +from decoder import Decoder +from joiner import Joiner +from librispeech import LibriSpeech +from model import Transducer + +from icefall.checkpoint import average_checkpoints, load_checkpoint +from icefall.env import get_env_info +from icefall.utils import ( + AttributeDict, + setup_logger, + store_transcripts, + write_error_stats, +) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=29, + help="It specifies the checkpoint to use for decoding." + "Note: Epoch counts from 0.", + ) + parser.add_argument( + "--avg", + type=int, + default=13, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch'. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="transducer_stateless_multi_datasets/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + - modified_beam_search + """, + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="""Used only when --decoding-method is + beam_search or modified_beam_search""", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=3, + help="""Maximum number of symbols per frame. + Used only when --decoding_method is greedy_search""", + ) + + return parser + + +def get_params() -> AttributeDict: + params = AttributeDict( + { + # parameters for conformer + "feature_dim": 80, + "encoder_out_dim": 512, + "subsampling_factor": 4, + "attention_dim": 512, + "nhead": 8, + "dim_feedforward": 2048, + "num_encoder_layers": 12, + "vgg_frontend": False, + "env_info": get_env_info(), + } + ) + return params + + +def get_encoder_model(params: AttributeDict): + # TODO: We can add an option to switch between Conformer and Transformer + encoder = Conformer( + num_features=params.feature_dim, + output_dim=params.encoder_out_dim, + subsampling_factor=params.subsampling_factor, + d_model=params.attention_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + vgg_frontend=params.vgg_frontend, + ) + return encoder + + +def get_decoder_model(params: AttributeDict): + decoder = Decoder( + vocab_size=params.vocab_size, + embedding_dim=params.encoder_out_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict): + joiner = Joiner( + input_dim=params.encoder_out_dim, + output_dim=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict): + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + ) + + return model + + +def decode_one_batch( + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + batch: dict, +) -> Dict[str, List[List[str]]]: + """Decode one batch and return the result in a dict. The dict has the + following format: + + - key: It indicates the setting used for decoding. For example, + if greedy_search is used, it would be "greedy_search" + If beam search with a beam size of 7 is used, it would be + "beam_7" + - value: It contains the decoding result. `len(value)` equals to + batch size. `value[i]` is the decoding result for the i-th + utterance in the given batch. + Args: + params: + It's the return value of :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + batch: + It is the return value from iterating + `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation + for the format of the `batch`. + Returns: + Return the decoding result. See above description for the format of + the returned dict. + """ + device = model.device + feature = batch["inputs"] + assert feature.ndim == 3 + + feature = feature.to(device) + # at entry, feature is (N, T, C) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + encoder_out, encoder_out_lens = model.encoder( + x=feature, x_lens=feature_lens + ) + hyps = [] + batch_size = encoder_out.size(0) + + for i in range(batch_size): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + if params.decoding_method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.decoding_method == "beam_search": + hyp = beam_search( + model=model, encoder_out=encoder_out_i, beam=params.beam_size + ) + elif params.decoding_method == "modified_beam_search": + hyp = modified_beam_search( + model=model, encoder_out=encoder_out_i, beam=params.beam_size + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + hyps.append(sp.decode(hyp).split()) + + if params.decoding_method == "greedy_search": + return {"greedy_search": hyps} + else: + return {f"beam_{params.beam_size}": hyps} + + +def decode_dataset( + dl: torch.utils.data.DataLoader, + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, +) -> Dict[str, List[Tuple[List[str], List[str]]]]: + """Decode dataset. + + Args: + dl: + PyTorch's dataloader containing the dataset to decode. + params: + It is returned by :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + Returns: + Return a dict, whose key may be "greedy_search" if greedy search + is used, or it may be "beam_7" if beam size of 7 is used. + Its value is a list of tuples. Each tuple contains two elements: + The first is the reference transcript, and the second is the + predicted result. + """ + num_cuts = 0 + + try: + num_batches = len(dl) + except TypeError: + num_batches = "?" + + if params.decoding_method == "greedy_search": + log_interval = 100 + else: + log_interval = 2 + + results = defaultdict(list) + for batch_idx, batch in enumerate(dl): + texts = batch["supervisions"]["text"] + + hyps_dict = decode_one_batch( + params=params, + model=model, + sp=sp, + batch=batch, + ) + + for name, hyps in hyps_dict.items(): + this_batch = [] + assert len(hyps) == len(texts) + for hyp_words, ref_text in zip(hyps, texts): + ref_words = ref_text.split() + this_batch.append((ref_words, hyp_words)) + + results[name].extend(this_batch) + + num_cuts += len(texts) + + if batch_idx % log_interval == 0: + batch_str = f"{batch_idx}/{num_batches}" + + logging.info( + f"batch {batch_str}, cuts processed until now is {num_cuts}" + ) + return results + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[List[int], List[int]]]], +): + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = ( + params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + ) + store_transcripts(filename=recog_path, texts=results) + logging.info(f"The transcripts are stored in {recog_path}") + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = ( + params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_filename, "w") as f: + wer = write_error_stats( + f, f"{test_set_name}-{key}", results, enable_log=True + ) + test_set_wers[key] = wer + + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = ( + params.res_dir + / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_info, "w") as f: + print("settings\tWER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, WER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + AsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + assert params.decoding_method in ( + "greedy_search", + "beam_search", + "modified_beam_search", + ) + params.res_dir = params.exp_dir / params.decoding_method + + params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + if "beam_search" in params.decoding_method: + params.suffix += f"-beam-{params.beam_size}" + else: + params.suffix += f"-context-{params.context_size}" + params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}" + + setup_logger(f"{params.res_dir}/log-decode-{params.suffix}") + logging.info("Decoding started") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + if params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if start >= 0: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict( + average_checkpoints(filenames, device=device), strict=False + ) + + model.to(device) + model.eval() + model.device = device + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + asr_datamodule = AsrDataModule(args) + librispeech = LibriSpeech(manifest_dir=args.manifest_dir) + + test_clean_cuts = librispeech.test_clean_cuts() + test_other_cuts = librispeech.test_other_cuts() + + test_clean_dl = asr_datamodule.test_dataloaders(test_clean_cuts) + test_other_dl = asr_datamodule.test_dataloaders(test_other_cuts) + + test_sets = ["test-clean", "test-other"] + test_dl = [test_clean_dl, test_other_dl] + + for test_set, test_dl in zip(test_sets, test_dl): + results_dict = decode_dataset( + dl=test_dl, + params=params, + model=model, + sp=sp, + ) + + save_results( + params=params, + test_set_name=test_set, + results_dict=results_dict, + ) + + logging.info("Done!") + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/decoder.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/decoder.py new file mode 100644 index 000000000..b82fed37b --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/decoder.py @@ -0,0 +1,98 @@ +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Decoder(nn.Module): + """This class modifies the stateless decoder from the following paper: + + RNN-transducer with stateless prediction network + https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9054419 + + It removes the recurrent connection from the decoder, i.e., the prediction + network. Different from the above paper, it adds an extra Conv1d + right after the embedding layer. + + TODO: Implement https://arxiv.org/pdf/2109.07513.pdf + """ + + def __init__( + self, + vocab_size: int, + embedding_dim: int, + blank_id: int, + context_size: int, + ): + """ + Args: + vocab_size: + Number of tokens of the modeling unit including blank. + embedding_dim: + Dimension of the input embedding. + blank_id: + The ID of the blank symbol. + context_size: + Number of previous words to use to predict the next word. + 1 means bigram; 2 means trigram. n means (n+1)-gram. + """ + super().__init__() + self.embedding = nn.Embedding( + num_embeddings=vocab_size, + embedding_dim=embedding_dim, + padding_idx=blank_id, + ) + self.blank_id = blank_id + + assert context_size >= 1, context_size + self.context_size = context_size + if context_size > 1: + self.conv = nn.Conv1d( + in_channels=embedding_dim, + out_channels=embedding_dim, + kernel_size=context_size, + padding=0, + groups=embedding_dim, + bias=False, + ) + + def forward(self, y: torch.Tensor, need_pad: bool = True) -> torch.Tensor: + """ + Args: + y: + A 2-D tensor of shape (N, U). + need_pad: + True to left pad the input. Should be True during training. + False to not pad the input. Should be False during inference. + Returns: + Return a tensor of shape (N, U, embedding_dim). + """ + embedding_out = self.embedding(y) + if self.context_size > 1: + embedding_out = embedding_out.permute(0, 2, 1) + if need_pad is True: + embedding_out = F.pad( + embedding_out, pad=(self.context_size - 1, 0) + ) + else: + # During inference time, there is no need to do extra padding + # as we only need one output + assert embedding_out.size(-1) == self.context_size + embedding_out = self.conv(embedding_out) + embedding_out = embedding_out.permute(0, 2, 1) + return embedding_out diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/encoder_interface.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/encoder_interface.py new file mode 100644 index 000000000..257facce4 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/encoder_interface.py @@ -0,0 +1,43 @@ +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple + +import torch +import torch.nn as nn + + +class EncoderInterface(nn.Module): + def forward( + self, x: torch.Tensor, x_lens: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Args: + x: + A tensor of shape (batch_size, input_seq_len, num_features) + containing the input features. + x_lens: + A tensor of shape (batch_size,) containing the number of frames + in `x` before padding. + Returns: + Return a tuple containing two tensors: + - encoder_out, a tensor of (batch_size, out_seq_len, output_dim) + containing unnormalized probabilities, i.e., the output of a + linear layer. + - encoder_out_lens, a tensor of shape (batch_size,) containing + the number of frames in `encoder_out` before padding. + """ + raise NotImplementedError("Please implement it in a subclass") diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/export.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/export.py new file mode 100755 index 000000000..7d14d011d --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/export.py @@ -0,0 +1,252 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script converts several saved checkpoints +# to a single one using model averaging. +""" +Usage: +./transducer_stateless_multi_datasets/export.py \ + --exp-dir ./transducer_stateless_multi_datasets/exp \ + --bpe-model data/lang_bpe_500/bpe.model \ + --epoch 20 \ + --avg 10 + +It will generate a file exp_dir/pretrained.pt + +To use the generated file with `transducer_stateless_multi_datasets/decode.py`, +you can do:: + + cd /path/to/exp_dir + ln -s pretrained.pt epoch-9999.pt + + cd /path/to/egs/librispeech/ASR + ./transducer_stateless_multi_datasets/decode.py \ + --exp-dir ./transducer_stateless_multi_datasets/exp \ + --epoch 9999 \ + --avg 1 \ + --max-duration 1 \ + --bpe-model data/lang_bpe_500/bpe.model +""" + +import argparse +import logging +from pathlib import Path + +import sentencepiece as spm +import torch +import torch.nn as nn +from conformer import Conformer +from decoder import Decoder +from joiner import Joiner +from model import Transducer + +from icefall.checkpoint import average_checkpoints, load_checkpoint +from icefall.env import get_env_info +from icefall.utils import AttributeDict, str2bool + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=20, + help="It specifies the checkpoint to use for decoding." + "Note: Epoch counts from 0.", + ) + + parser.add_argument( + "--avg", + type=int, + default=10, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch'. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="transducer_stateless_multi_datasets/exp", + help="""It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--jit", + type=str2bool, + default=False, + help="""True to save a model after applying torch.jit.script. + """, + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + return parser + + +def get_params() -> AttributeDict: + params = AttributeDict( + { + # parameters for conformer + "feature_dim": 80, + "encoder_out_dim": 512, + "subsampling_factor": 4, + "attention_dim": 512, + "nhead": 8, + "dim_feedforward": 2048, + "num_encoder_layers": 12, + "vgg_frontend": False, + "env_info": get_env_info(), + } + ) + return params + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + encoder = Conformer( + num_features=params.feature_dim, + output_dim=params.encoder_out_dim, + subsampling_factor=params.subsampling_factor, + d_model=params.attention_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + vgg_frontend=params.vgg_frontend, + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + embedding_dim=params.encoder_out_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + input_dim=params.encoder_out_dim, + output_dim=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict) -> nn.Module: + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + ) + return model + + +def main(): + args = get_parser().parse_args() + args.exp_dir = Path(args.exp_dir) + + assert args.jit is False, "Support torchscript will be added later" + + params = get_params() + params.update(vars(args)) + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + model.to(device) + + if params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if start >= 0: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict( + average_checkpoints(filenames, device=device), strict=False + ) + + model.eval() + + model.to("cpu") + model.eval() + + if params.jit: + logging.info("Using torch.jit.script") + model = torch.jit.script(model) + filename = params.exp_dir / "cpu_jit.pt" + model.save(str(filename)) + logging.info(f"Saved to {filename}") + else: + logging.info("Not using torch.jit.script") + # Save it using a format so that it can be loaded + # by :func:`load_checkpoint` + filename = params.exp_dir / "pretrained.pt" + torch.save({"model": model.state_dict()}, str(filename)) + logging.info(f"Saved to {filename}") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/gigaspeech.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/gigaspeech.py new file mode 100644 index 000000000..286771d7d --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/gigaspeech.py @@ -0,0 +1,75 @@ +# Copyright 2021 Piotr Żelasko +# 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from pathlib import Path + +from lhotse import CutSet, load_manifest + + +class GigaSpeech: + def __init__(self, manifest_dir: str): + """ + Args: + manifest_dir: + It is expected to contain the following files:: + + - cuts_XL_raw.jsonl.gz + - cuts_L_raw.jsonl.gz + - cuts_M_raw.jsonl.gz + - cuts_S_raw.jsonl.gz + - cuts_XS_raw.jsonl.gz + - cuts_DEV_raw.jsonl.gz + - cuts_TEST_raw.jsonl.gz + """ + self.manifest_dir = Path(manifest_dir) + + def train_XL_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_XL_raw.jsonl.gz" + logging.info(f"About to get train-XL cuts from {f}") + return CutSet.from_jsonl_lazy(f) + + def train_L_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_L_raw.jsonl.gz" + logging.info(f"About to get train-L cuts from {f}") + return CutSet.from_jsonl_lazy(f) + + def train_M_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_M_raw.jsonl.gz" + logging.info(f"About to get train-M cuts from {f}") + return CutSet.from_jsonl_lazy(f) + + def train_S_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_S_raw.jsonl.gz" + logging.info(f"About to get train-S cuts from {f}") + return CutSet.from_jsonl_lazy(f) + + def train_XS_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_XS_raw.jsonl.gz" + logging.info(f"About to get train-XS cuts from {f}") + return CutSet.from_jsonl_lazy(f) + + def test_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_TEST.jsonl.gz" + logging.info(f"About to get TEST cuts from {f}") + return load_manifest(f) + + def dev_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_DEV.jsonl.gz" + logging.info(f"About to get DEV cuts from {f}") + return load_manifest(f) diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/joiner.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/joiner.py new file mode 100644 index 000000000..9fd9da4f1 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/joiner.py @@ -0,0 +1,72 @@ +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn + + +class Joiner(nn.Module): + def __init__(self, input_dim: int, output_dim: int): + super().__init__() + + self.input_dim = input_dim + self.output_dim = output_dim + self.output_linear = nn.Linear(input_dim, output_dim) + + def forward( + self, + encoder_out: torch.Tensor, + decoder_out: torch.Tensor, + encoder_out_len: torch.Tensor, + decoder_out_len: torch.Tensor, + ) -> torch.Tensor: + """ + Args: + encoder_out: + Output from the encoder. Its shape is (N, T, self.input_dim). + decoder_out: + Output from the decoder. Its shape is (N, U, self.input_dim). + Returns: + Return a tensor of shape (sum_all_TU, self.output_dim). + """ + assert encoder_out.ndim == decoder_out.ndim == 3 + assert encoder_out.size(0) == decoder_out.size(0) + assert encoder_out.size(2) == self.input_dim + assert decoder_out.size(2) == self.input_dim + + N = encoder_out.size(0) + + encoder_out_list = [ + encoder_out[i, : encoder_out_len[i], :] for i in range(N) + ] + + decoder_out_list = [ + decoder_out[i, : decoder_out_len[i], :] for i in range(N) + ] + + x = [ + e.unsqueeze(1) + d.unsqueeze(0) + for e, d in zip(encoder_out_list, decoder_out_list) + ] + + x = [p.reshape(-1, self.input_dim) for p in x] + x = torch.cat(x) + + activations = torch.tanh(x) + + logits = self.output_linear(activations) + + return logits diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/librispeech.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/librispeech.py new file mode 100644 index 000000000..00b7c8334 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/librispeech.py @@ -0,0 +1,74 @@ +# Copyright 2021 Piotr Żelasko +# 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from pathlib import Path + +from lhotse import CutSet, load_manifest + + +class LibriSpeech: + def __init__(self, manifest_dir: str): + """ + Args: + manifest_dir: + It is expected to contain the following files:: + + - cuts_dev-clean.json.gz + - cuts_dev-other.json.gz + - cuts_test-clean.json.gz + - cuts_test-other.json.gz + - cuts_train-clean-100.json.gz + - cuts_train-clean-360.json.gz + - cuts_train-other-500.json.gz + """ + self.manifest_dir = Path(manifest_dir) + + def train_clean_100_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_train-clean-100.json.gz" + logging.info(f"About to get train-clean-100 cuts from {f}") + return load_manifest(f) + + def train_clean_360_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_train-clean-360.json.gz" + logging.info(f"About to get train-clean-360 cuts from {f}") + return load_manifest(f) + + def train_other_500_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_train-other-500.json.gz" + logging.info(f"About to get train-other-500 cuts from {f}") + return load_manifest(f) + + def test_clean_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_test-clean.json.gz" + logging.info(f"About to get test-clean cuts from {f}") + return load_manifest(f) + + def test_other_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_test-other.json.gz" + logging.info(f"About to get test-other cuts from {f}") + return load_manifest(f) + + def dev_clean_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_dev-clean.json.gz" + logging.info(f"About to get dev-clean cuts from {f}") + return load_manifest(f) + + def dev_other_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_dev-other.json.gz" + logging.info(f"About to get dev-other cuts from {f}") + return load_manifest(f) diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/model.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/model.py new file mode 100644 index 000000000..8141f9a83 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/model.py @@ -0,0 +1,168 @@ +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +from typing import Optional + +import k2 +import torch +import torch.nn as nn +from encoder_interface import EncoderInterface + +from icefall.utils import add_sos + + +class Transducer(nn.Module): + """It implements https://arxiv.org/pdf/1211.3711.pdf + "Sequence Transduction with Recurrent Neural Networks" + """ + + def __init__( + self, + encoder: EncoderInterface, + decoder: nn.Module, + joiner: nn.Module, + decoder_giga: Optional[nn.Module] = None, + joiner_giga: Optional[nn.Module] = None, + ): + """ + Args: + encoder: + It is the transcription network in the paper. Its accepts + two inputs: `x` of (N, T, C) and `x_lens` of shape (N,). + It returns two tensors: `logits` of shape (N, T, C) and + `logit_lens` of shape (N,). + decoder: + It is the prediction network in the paper. Its input shape + is (N, U) and its output shape is (N, U, C). It should contain + one attribute: `blank_id`. + joiner: + It has two inputs with shapes: (N, T, C) and (N, U, C). Its + output shape is (N, T, U, C). Note that its output contains + unnormalized probs, i.e., not processed by log-softmax. + decoder_giga: + The decoder for the GigaSpeech dataset. + joiner_giga: + The joiner for the GigaSpeech dataset. + """ + super().__init__() + assert isinstance(encoder, EncoderInterface), type(encoder) + assert hasattr(decoder, "blank_id") + + if decoder_giga is not None: + assert hasattr(decoder_giga, "blank_id") + + self.encoder = encoder + + self.decoder = decoder + self.joiner = joiner + + self.decoder_giga = decoder_giga + self.joiner_giga = joiner_giga + + def forward( + self, + x: torch.Tensor, + x_lens: torch.Tensor, + y: k2.RaggedTensor, + libri: bool = True, + modified_transducer_prob: float = 0.0, + ) -> torch.Tensor: + """ + Args: + x: + A 3-D tensor of shape (N, T, C). + x_lens: + A 1-D tensor of shape (N,). It contains the number of frames in `x` + before padding. + y: + A ragged tensor with 2 axes [utt][label]. It contains labels of each + utterance. + libri: + True to use the decoder and joiner for the LibriSpeech dataset. + False to use the decoder and joiner for the GigaSpeech dataset. + modified_transducer_prob: + The probability to use modified transducer loss. + Returns: + Return the transducer loss. + """ + assert x.ndim == 3, x.shape + assert x_lens.ndim == 1, x_lens.shape + assert y.num_axes == 2, y.num_axes + + assert x.size(0) == x_lens.size(0) == y.dim0 + + encoder_out, x_lens = self.encoder(x, x_lens) + assert torch.all(x_lens > 0) + + # Now for the decoder, i.e., the prediction network + row_splits = y.shape.row_splits(1) + y_lens = row_splits[1:] - row_splits[:-1] + + blank_id = self.decoder.blank_id + sos_y = add_sos(y, sos_id=blank_id) + + sos_y_padded = sos_y.pad(mode="constant", padding_value=blank_id) + sos_y_padded = sos_y_padded.to(torch.int64) + + if libri: + decoder = self.decoder + joiner = self.joiner + else: + decoder = self.decoder_giga + joiner = self.joiner_giga + + decoder_out = decoder(sos_y_padded) + + # +1 here since a blank is prepended to each utterance. + logits = joiner( + encoder_out=encoder_out, + decoder_out=decoder_out, + encoder_out_len=x_lens, + decoder_out_len=y_lens + 1, + ) + + # rnnt_loss requires 0 padded targets + # Note: y does not start with SOS + y_padded = y.pad(mode="constant", padding_value=0) + + # We don't put this `import` at the beginning of the file + # as it is required only in the training, not during the + # reference stage + import optimized_transducer + + assert 0 <= modified_transducer_prob <= 1 + + if modified_transducer_prob == 0: + one_sym_per_frame = False + elif random.random() < modified_transducer_prob: + # random.random() returns a float in the range [0, 1) + one_sym_per_frame = True + else: + one_sym_per_frame = False + + loss = optimized_transducer.transducer_loss( + logits=logits, + targets=y_padded, + logit_lengths=x_lens, + target_lengths=y_lens, + blank=blank_id, + reduction="sum", + one_sym_per_frame=one_sym_per_frame, + from_log_softmax=False, + ) + + return loss diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/pretrained.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/pretrained.py new file mode 100755 index 000000000..5ba3acea1 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/pretrained.py @@ -0,0 +1,340 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +(1) greedy search +./transducer_stateless_multi_datasets/pretrained.py \ + --checkpoint ./transducer_stateless_multi_datasets/exp/pretrained.pt \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --method greedy_search \ + --max-sym-per-frame 1 \ + /path/to/foo.wav \ + /path/to/bar.wav + +(2) beam search +./transducer_stateless_multi_datasets/pretrained.py \ + --checkpoint ./transducer_stateless_multi_datasets/exp/pretrained.pt \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --method beam_search \ + --beam-size 4 \ + /path/to/foo.wav \ + /path/to/bar.wav + +(3) modified beam search +./transducer_stateless_multi_datasets/pretrained.py \ + --checkpoint ./transducer_stateless_multi_datasets/exp/pretrained.pt \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --method modified_beam_search \ + --beam-size 4 \ + /path/to/foo.wav \ + /path/to/bar.wav + +You can also use `./transducer_stateless_multi_datasets/exp/epoch-xx.pt`. + +Note: ./transducer_stateless_multi_datasets/exp/pretrained.pt is generated by +./transducer_stateless_multi_datasets/export.py +""" + + +import argparse +import logging +import math +from typing import List + +import kaldifeat +import sentencepiece as spm +import torch +import torch.nn as nn +import torchaudio +from beam_search import beam_search, greedy_search, modified_beam_search +from conformer import Conformer +from decoder import Decoder +from joiner import Joiner +from model import Transducer +from torch.nn.utils.rnn import pad_sequence + +from icefall.env import get_env_info +from icefall.utils import AttributeDict + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--checkpoint", + type=str, + required=True, + help="Path to the checkpoint. " + "The checkpoint is assumed to be saved by " + "icefall.checkpoint.save_checkpoint().", + ) + + parser.add_argument( + "--bpe-model", + type=str, + help="""Path to bpe.model. + Used only when method is ctc-decoding. + """, + ) + + parser.add_argument( + "--method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + - modified_beam_search + """, + ) + + parser.add_argument( + "sound_files", + type=str, + nargs="+", + help="The input sound file(s) to transcribe. " + "Supported formats are those supported by torchaudio.load(). " + "For example, wav and flac are supported. " + "The sample rate has to be 16kHz.", + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="Used only when --method is beam_search and modified_beam_search ", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=3, + help="""Maximum number of symbols per frame. Used only when + --method is greedy_search. + """, + ) + + return parser + + +def get_params() -> AttributeDict: + params = AttributeDict( + { + "sample_rate": 16000, + # parameters for conformer + "feature_dim": 80, + "encoder_out_dim": 512, + "subsampling_factor": 4, + "attention_dim": 512, + "nhead": 8, + "dim_feedforward": 2048, + "num_encoder_layers": 12, + "vgg_frontend": False, + "env_info": get_env_info(), + } + ) + return params + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + encoder = Conformer( + num_features=params.feature_dim, + output_dim=params.encoder_out_dim, + subsampling_factor=params.subsampling_factor, + d_model=params.attention_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + vgg_frontend=params.vgg_frontend, + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + embedding_dim=params.encoder_out_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + input_dim=params.encoder_out_dim, + output_dim=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict) -> nn.Module: + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + ) + return model + + +def read_sound_files( + filenames: List[str], expected_sample_rate: float +) -> List[torch.Tensor]: + """Read a list of sound files into a list 1-D float32 torch tensors. + Args: + filenames: + A list of sound filenames. + expected_sample_rate: + The expected sample rate of the sound files. + Returns: + Return a list of 1-D float32 torch tensors. + """ + ans = [] + for f in filenames: + wave, sample_rate = torchaudio.load(f) + assert sample_rate == expected_sample_rate, ( + f"expected sample rate: {expected_sample_rate}. " + f"Given: {sample_rate}" + ) + # We use only the first channel + ans.append(wave[0]) + return ans + + +@torch.no_grad() +def main(): + parser = get_parser() + args = parser.parse_args() + + params = get_params() + + params.update(vars(args)) + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(f"{params}") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + logging.info("Creating model") + model = get_transducer_model(params) + + checkpoint = torch.load(args.checkpoint, map_location="cpu") + model.load_state_dict(checkpoint["model"], strict=False) + model.to(device) + model.eval() + model.device = device + + logging.info("Constructing Fbank computer") + opts = kaldifeat.FbankOptions() + opts.device = device + opts.frame_opts.dither = 0 + opts.frame_opts.snip_edges = False + opts.frame_opts.samp_freq = params.sample_rate + opts.mel_opts.num_bins = params.feature_dim + + fbank = kaldifeat.Fbank(opts) + + logging.info(f"Reading sound files: {params.sound_files}") + waves = read_sound_files( + filenames=params.sound_files, expected_sample_rate=params.sample_rate + ) + waves = [w.to(device) for w in waves] + + logging.info("Decoding started") + features = fbank(waves) + feature_lengths = [f.size(0) for f in features] + + features = pad_sequence( + features, batch_first=True, padding_value=math.log(1e-10) + ) + + feature_lengths = torch.tensor(feature_lengths, device=device) + + with torch.no_grad(): + encoder_out, encoder_out_lens = model.encoder( + x=features, x_lens=feature_lengths + ) + + num_waves = encoder_out.size(0) + hyps = [] + msg = f"Using {params.method}" + if params.method == "beam_search": + msg += f" with beam size {params.beam_size}" + logging.info(msg) + for i in range(num_waves): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + if params.method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.method == "beam_search": + hyp = beam_search( + model=model, encoder_out=encoder_out_i, beam=params.beam_size + ) + elif params.method == "modified_beam_search": + hyp = modified_beam_search( + model=model, encoder_out=encoder_out_i, beam=params.beam_size + ) + else: + raise ValueError(f"Unsupported method: {params.method}") + + hyps.append(sp.decode(hyp).split()) + + s = "\n" + for filename, hyp in zip(params.sound_files, hyps): + words = " ".join(hyp) + s += f"{filename}:\n{words}\n\n" + logging.info(s) + + logging.info("Decoding Done") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/subsampling.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/subsampling.py new file mode 120000 index 000000000..73068da26 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/subsampling.py @@ -0,0 +1 @@ +../transducer/subsampling.py \ No newline at end of file diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/test_asr_datamodule.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/test_asr_datamodule.py new file mode 100755 index 000000000..e1833b841 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/test_asr_datamodule.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +To run this file, do: + + cd icefall/egs/librispeech/ASR + python ./transducer_stateless_multi_datasets/test_asr_datamodule.py +""" + +import argparse +import random +from pathlib import Path + +from asr_datamodule import AsrDataModule +from gigaspeech import GigaSpeech +from lhotse import load_manifest +from librispeech import LibriSpeech + + +def test_dataset(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + AsrDataModule.add_arguments(parser) + args = parser.parse_args() + print(args) + + if args.enable_musan: + cuts_musan = load_manifest( + Path(args.manifest_dir) / "cuts_musan.json.gz" + ) + else: + cuts_musan = None + + librispeech = LibriSpeech(manifest_dir=args.manifest_dir) + gigaspeech = GigaSpeech(manifest_dir=args.manifest_dir) + + train_clean_100 = librispeech.train_clean_100_cuts() + train_S = gigaspeech.train_S_cuts() + + asr_datamodule = AsrDataModule(args) + + libri_train_dl = asr_datamodule.train_dataloaders( + train_clean_100, + dynamic_bucketing=False, + on_the_fly_feats=False, + cuts_musan=cuts_musan, + ) + + giga_train_dl = asr_datamodule.train_dataloaders( + train_S, + dynamic_bucketing=True, + on_the_fly_feats=True, + cuts_musan=cuts_musan, + ) + + seed = 20220216 + rng = random.Random(seed) + + for epoch in range(2): + print("epoch", epoch) + batch_idx = 0 + libri_train_dl.sampler.set_epoch(epoch) + giga_train_dl.sampler.set_epoch(epoch) + + iter_libri = iter(libri_train_dl) + iter_giga = iter(giga_train_dl) + while True: + idx = rng.choices((0, 1), weights=[0.8, 0.2], k=1)[0] + dl = iter_libri if idx == 0 else iter_giga + batch_idx += 1 + + print("dl idx", idx, "batch_idx", batch_idx) + try: + _ = next(dl) + except StopIteration: + print("dl idx", idx) + print("Go to the next epoch") + break + + +def main(): + test_dataset() + + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/test_decoder.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/test_decoder.py new file mode 100755 index 000000000..9ee197ee8 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/test_decoder.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +To run this file, do: + + cd icefall/egs/librispeech/ASR + python ./transducer_stateless_multi_datasets/test_decoder.py +""" + +import torch +from decoder import Decoder + + +def test_decoder(): + vocab_size = 3 + blank_id = 0 + embedding_dim = 128 + context_size = 4 + + decoder = Decoder( + vocab_size=vocab_size, + embedding_dim=embedding_dim, + blank_id=blank_id, + context_size=context_size, + ) + N = 100 + U = 20 + x = torch.randint(low=0, high=vocab_size, size=(N, U)) + y = decoder(x) + assert y.shape == (N, U, embedding_dim) + + # for inference + x = torch.randint(low=0, high=vocab_size, size=(N, context_size)) + y = decoder(x, need_pad=False) + assert y.shape == (N, 1, embedding_dim) + + +def main(): + test_decoder() + + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/train.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/train.py new file mode 100755 index 000000000..720151ea0 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/train.py @@ -0,0 +1,890 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, +# Wei Kang +# Mingshuang Luo) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +./transducer_stateless_multi_datasets/train.py \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 0 \ + --exp-dir transducer_stateless_multi_datasets/exp \ + --full-libri 1 \ + --max-duration 250 \ + --lr-factor 2.5 +""" + + +import argparse +import logging +import random +from pathlib import Path +from shutil import copyfile +from typing import Optional, Tuple + +import k2 +import sentencepiece as spm +import torch +import torch.multiprocessing as mp +import torch.nn as nn +from asr_datamodule import AsrDataModule +from conformer import Conformer +from decoder import Decoder +from gigaspeech import GigaSpeech +from joiner import Joiner +from lhotse import CutSet, load_manifest +from lhotse.cut import Cut +from lhotse.utils import fix_random_seed +from librispeech import LibriSpeech +from model import Transducer +from torch import Tensor +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.nn.utils import clip_grad_norm_ +from torch.utils.tensorboard import SummaryWriter +from transformer import Noam + +from icefall.checkpoint import load_checkpoint +from icefall.checkpoint import save_checkpoint as save_checkpoint_impl +from icefall.dist import cleanup_dist, setup_dist +from icefall.env import get_env_info +from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--world-size", + type=int, + default=1, + help="Number of GPUs for DDP training.", + ) + + parser.add_argument( + "--master-port", + type=int, + default=12354, + help="Master port to use for DDP training.", + ) + + parser.add_argument( + "--full-libri", + type=str2bool, + default=True, + help="When enabled, use 960h LibriSpeech. " + "Otherwise, use 100h subset.", + ) + + parser.add_argument( + "--tensorboard", + type=str2bool, + default=True, + help="Should various information be logged in tensorboard.", + ) + + parser.add_argument( + "--num-epochs", + type=int, + default=30, + help="Number of epochs to train.", + ) + + parser.add_argument( + "--start-epoch", + type=int, + default=0, + help="""Resume training from from this epoch. + If it is positive, it will load checkpoint from + transducer_stateless/exp/epoch-{start_epoch-1}.pt + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="transducer_stateless_multi_datasets/exp", + help="""The experiment dir. + It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--lr-factor", + type=float, + default=5.0, + help="The lr_factor for Noam optimizer", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + parser.add_argument( + "--modified-transducer-prob", + type=float, + default=0.25, + help="""The probability to use modified transducer loss. + In modified transduer, it limits the maximum number of symbols + per frame to 1. See also the option --max-sym-per-frame in + transducer_stateless/decode.py + """, + ) + + parser.add_argument( + "--giga-prob", + type=float, + default=0.2, + help="The probability to select a batch from the GigaSpeech dataset", + ) + + return parser + + +def get_params() -> AttributeDict: + """Return a dict containing training parameters. + + All training related parameters that are not passed from the commandline + are saved in the variable `params`. + + Commandline options are merged into `params` after they are parsed, so + you can also access them via `params`. + + Explanation of options saved in `params`: + + - best_train_loss: Best training loss so far. It is used to select + the model that has the lowest training loss. It is + updated during the training. + + - best_valid_loss: Best validation loss so far. It is used to select + the model that has the lowest validation loss. It is + updated during the training. + + - best_train_epoch: It is the epoch that has the best training loss. + + - best_valid_epoch: It is the epoch that has the best validation loss. + + - batch_idx_train: Used to writing statistics to tensorboard. It + contains number of batches trained so far across + epochs. + + - log_interval: Print training loss if batch_idx % log_interval` is 0 + + - reset_interval: Reset statistics if batch_idx % reset_interval is 0 + + - valid_interval: Run validation if batch_idx % valid_interval is 0 + + - feature_dim: The model input dim. It has to match the one used + in computing features. + + - subsampling_factor: The subsampling factor for the model. + + - attention_dim: Hidden dim for multi-head attention model. + + - num_decoder_layers: Number of decoder layer of transformer decoder. + + - warm_step: The warm_step for Noam optimizer. + """ + params = AttributeDict( + { + "best_train_loss": float("inf"), + "best_valid_loss": float("inf"), + "best_train_epoch": -1, + "best_valid_epoch": -1, + "batch_idx_train": 0, + "log_interval": 50, + "reset_interval": 200, + "valid_interval": 3000, # For the 100h subset, use 800 + # parameters for conformer + "feature_dim": 80, + "encoder_out_dim": 512, + "subsampling_factor": 4, + "attention_dim": 512, + "nhead": 8, + "dim_feedforward": 2048, + "num_encoder_layers": 12, + "vgg_frontend": False, + # parameters for Noam + "warm_step": 80000, # For the 100h subset, use 8k + "env_info": get_env_info(), + } + ) + + return params + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + # TODO: We can add an option to switch between Conformer and Transformer + encoder = Conformer( + num_features=params.feature_dim, + output_dim=params.encoder_out_dim, + subsampling_factor=params.subsampling_factor, + d_model=params.attention_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + vgg_frontend=params.vgg_frontend, + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + embedding_dim=params.encoder_out_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + input_dim=params.encoder_out_dim, + output_dim=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict) -> nn.Module: + encoder = get_encoder_model(params) + + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + decoder_giga = get_decoder_model(params) + joiner_giga = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + decoder_giga=decoder_giga, + joiner_giga=joiner_giga, + ) + return model + + +def load_checkpoint_if_available( + params: AttributeDict, + model: nn.Module, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None, +) -> None: + """Load checkpoint from file. + + If params.start_epoch is positive, it will load the checkpoint from + `params.start_epoch - 1`. Otherwise, this function does nothing. + + Apart from loading state dict for `model`, `optimizer` and `scheduler`, + it also updates `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, + and `best_valid_loss` in `params`. + + Args: + params: + The return value of :func:`get_params`. + model: + The training model. + optimizer: + The optimizer that we are using. + scheduler: + The learning rate scheduler we are using. + Returns: + Return None. + """ + if params.start_epoch <= 0: + return + + filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" + saved_params = load_checkpoint( + filename, + model=model, + optimizer=optimizer, + scheduler=scheduler, + ) + + keys = [ + "best_train_epoch", + "best_valid_epoch", + "batch_idx_train", + "best_train_loss", + "best_valid_loss", + ] + for k in keys: + params[k] = saved_params[k] + + return saved_params + + +def save_checkpoint( + params: AttributeDict, + model: nn.Module, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None, + rank: int = 0, +) -> None: + """Save model, optimizer, scheduler and training stats to file. + + Args: + params: + It is returned by :func:`get_params`. + model: + The training model. + """ + if rank != 0: + return + filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" + save_checkpoint_impl( + filename=filename, + model=model, + params=params, + optimizer=optimizer, + scheduler=scheduler, + rank=rank, + ) + + if params.best_train_epoch == params.cur_epoch: + best_train_filename = params.exp_dir / "best-train-loss.pt" + copyfile(src=filename, dst=best_train_filename) + + if params.best_valid_epoch == params.cur_epoch: + best_valid_filename = params.exp_dir / "best-valid-loss.pt" + copyfile(src=filename, dst=best_valid_filename) + + +def is_libri(c: Cut) -> bool: + """Return True if this cut is from the LibriSpeech dataset. + + Note: + During data preparation, we set the custom field in + the supervision segment of GigaSpeech to dict(origin='giga') + See ../local/preprocess_gigaspeech.py. + """ + return c.supervisions[0].custom is None + + +def compute_loss( + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + batch: dict, + is_training: bool, +) -> Tuple[Tensor, MetricsTracker]: + """ + Compute CTC loss given the model and its inputs. + + Args: + params: + Parameters for training. See :func:`get_params`. + model: + The model for training. It is an instance of Conformer in our case. + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + is_training: + True for training. False for validation. When it is True, this + function enables autograd during computation; when it is False, it + disables autograd. + """ + device = model.device + feature = batch["inputs"] + # at entry, feature is (N, T, C) + assert feature.ndim == 3 + feature = feature.to(device) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + libri = is_libri(supervisions["cut"][0]) + + texts = batch["supervisions"]["text"] + y = sp.encode(texts, out_type=int) + y = k2.RaggedTensor(y).to(device) + + with torch.set_grad_enabled(is_training): + loss = model( + x=feature, + x_lens=feature_lens, + y=y, + libri=libri, + modified_transducer_prob=params.modified_transducer_prob, + ) + + assert loss.requires_grad == is_training + + info = MetricsTracker() + info["frames"] = (feature_lens // params.subsampling_factor).sum().item() + + # Note: We use reduction=sum while computing the loss. + info["loss"] = loss.detach().cpu().item() + + return loss, info + + +def compute_validation_loss( + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + valid_dl: torch.utils.data.DataLoader, + world_size: int = 1, +) -> MetricsTracker: + """Run the validation process.""" + model.eval() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(valid_dl): + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=False, + ) + assert loss.requires_grad is False + tot_loss = tot_loss + loss_info + + if world_size > 1: + tot_loss.reduce(loss.device) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + if loss_value < params.best_valid_loss: + params.best_valid_epoch = params.cur_epoch + params.best_valid_loss = loss_value + + return tot_loss + + +def train_one_epoch( + params: AttributeDict, + model: nn.Module, + optimizer: torch.optim.Optimizer, + sp: spm.SentencePieceProcessor, + train_dl: torch.utils.data.DataLoader, + giga_train_dl: torch.utils.data.DataLoader, + valid_dl: torch.utils.data.DataLoader, + rng: random.Random, + tb_writer: Optional[SummaryWriter] = None, + world_size: int = 1, +) -> None: + """Train the model for one epoch. + + The training loss from the mean of all frames is saved in + `params.train_loss`. It runs the validation process every + `params.valid_interval` batches. + + Args: + params: + It is returned by :func:`get_params`. + model: + The model for training. + optimizer: + The optimizer we are using. + train_dl: + Dataloader for the training dataset. + valid_dl: + Dataloader for the validation dataset. + rng: + For select which dataset to use. + tb_writer: + Writer to write log messages to tensorboard. + world_size: + Number of nodes in DDP training. If it is 1, DDP is disabled. + """ + model.train() + + libri_tot_loss = MetricsTracker() + giga_tot_loss = MetricsTracker() + tot_loss = MetricsTracker() + + # index 0: for LibriSpeech + # index 1: for GigaSpeech + # This sets the probabilities for choosing which datasets + dl_weights = [1 - params.giga_prob, params.giga_prob] + + iter_libri = iter(train_dl) + iter_giga = iter(giga_train_dl) + + batch_idx = 0 + + while True: + idx = rng.choices((0, 1), weights=dl_weights, k=1)[0] + dl = iter_libri if idx == 0 else iter_giga + + try: + batch = next(dl) + except StopIteration: + break + + batch_idx += 1 + + params.batch_idx_train += 1 + batch_size = len(batch["supervisions"]["text"]) + + libri = is_libri(batch["supervisions"]["cut"][0]) + + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=True, + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + if libri: + libri_tot_loss = ( + libri_tot_loss * (1 - 1 / params.reset_interval) + ) + loss_info + prefix = "libri" # for logging only + else: + giga_tot_loss = ( + giga_tot_loss * (1 - 1 / params.reset_interval) + ) + loss_info + prefix = "giga" + + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + + optimizer.zero_grad() + loss.backward() + clip_grad_norm_(model.parameters(), 5.0, 2.0) + optimizer.step() + + if batch_idx % params.log_interval == 0: + logging.info( + f"Epoch {params.cur_epoch}, " + f"batch {batch_idx}, {prefix}_loss[{loss_info}], " + f"tot_loss[{tot_loss}], " + f"libri_tot_loss[{libri_tot_loss}], " + f"giga_tot_loss[{giga_tot_loss}], " + f"batch size: {batch_size}" + ) + + if batch_idx % params.log_interval == 0: + if tb_writer is not None: + loss_info.write_summary( + tb_writer, + f"train/current_{prefix}_", + params.batch_idx_train, + ) + tot_loss.write_summary( + tb_writer, "train/tot_", params.batch_idx_train + ) + libri_tot_loss.write_summary( + tb_writer, "train/libri_tot_", params.batch_idx_train + ) + giga_tot_loss.write_summary( + tb_writer, "train/giga_tot_", params.batch_idx_train + ) + + if batch_idx > 0 and batch_idx % params.valid_interval == 0: + logging.info("Computing validation loss") + valid_info = compute_validation_loss( + params=params, + model=model, + sp=sp, + valid_dl=valid_dl, + world_size=world_size, + ) + model.train() + logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") + if tb_writer is not None: + valid_info.write_summary( + tb_writer, "train/valid_", params.batch_idx_train + ) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + params.train_loss = loss_value + if params.train_loss < params.best_train_loss: + params.best_train_epoch = params.cur_epoch + params.best_train_loss = params.train_loss + + +def filter_short_and_long_utterances(cuts: CutSet) -> CutSet: + def remove_short_and_long_utt(c: Cut): + # Keep only utterances with duration between 1 second and 20 seconds + return 1.0 <= c.duration <= 20.0 + + num_in_total = len(cuts) + cuts = cuts.filter(remove_short_and_long_utt) + + num_left = len(cuts) + num_removed = num_in_total - num_left + removed_percent = num_removed / num_in_total * 100 + + logging.info(f"Before removing short and long utterances: {num_in_total}") + logging.info(f"After removing short and long utterances: {num_left}") + logging.info(f"Removed {num_removed} utterances ({removed_percent:.5f}%)") + + return cuts + + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + if params.full_libri is False: + params.valid_interval = 800 + params.warm_step = 8000 + + seed = 42 + fix_random_seed(seed) + rng = random.Random(seed) + if world_size > 1: + setup_dist(rank, world_size, params.master_port) + + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info("Training started") + + if args.tensorboard and rank == 0: + tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") + else: + tb_writer = None + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", rank) + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + checkpoints = load_checkpoint_if_available(params=params, model=model) + + model.to(device) + if world_size > 1: + logging.info("Using DDP") + model = DDP(model, device_ids=[rank], find_unused_parameters=True) + model.device = device + + optimizer = Noam( + model.parameters(), + model_size=params.attention_dim, + factor=params.lr_factor, + warm_step=params.warm_step, + ) + + if checkpoints and "optimizer" in checkpoints: + logging.info("Loading optimizer state dict") + optimizer.load_state_dict(checkpoints["optimizer"]) + + librispeech = LibriSpeech(manifest_dir=args.manifest_dir) + + train_cuts = librispeech.train_clean_100_cuts() + if params.full_libri: + train_cuts += librispeech.train_clean_360_cuts() + train_cuts += librispeech.train_other_500_cuts() + + train_cuts = filter_short_and_long_utterances(train_cuts) + + gigaspeech = GigaSpeech(manifest_dir=args.manifest_dir) + # XL 10k hours + # L 2.5k hours + # M 1k hours + # S 250 hours + # XS 10 hours + # DEV 12 hours + # Test 40 hours + if params.full_libri: + logging.info("Using the L subset of GigaSpeech (2.5k hours)") + train_giga_cuts = gigaspeech.train_L_cuts() + else: + logging.info("Using the S subset of GigaSpeech (250 hours)") + train_giga_cuts = gigaspeech.train_S_cuts() + + train_giga_cuts = filter_short_and_long_utterances(train_giga_cuts) + + if args.enable_musan: + cuts_musan = load_manifest( + Path(args.manifest_dir) / "cuts_musan.json.gz" + ) + else: + cuts_musan = None + + asr_datamodule = AsrDataModule(args) + + train_dl = asr_datamodule.train_dataloaders( + train_cuts, + dynamic_bucketing=False, + on_the_fly_feats=False, + cuts_musan=cuts_musan, + ) + + giga_train_dl = asr_datamodule.train_dataloaders( + train_giga_cuts, + dynamic_bucketing=True, + on_the_fly_feats=True, + cuts_musan=cuts_musan, + ) + + valid_cuts = librispeech.dev_clean_cuts() + valid_cuts += librispeech.dev_other_cuts() + valid_dl = asr_datamodule.valid_dataloaders(valid_cuts) + + # It's time consuming to include `giga_train_dl` here + # for dl in [train_dl, giga_train_dl]: + for dl in [train_dl]: + scan_pessimistic_batches_for_oom( + model=model, + train_dl=dl, + optimizer=optimizer, + sp=sp, + params=params, + ) + + for epoch in range(params.start_epoch, params.num_epochs): + train_dl.sampler.set_epoch(epoch) + giga_train_dl.sampler.set_epoch(epoch) + + cur_lr = optimizer._rate + if tb_writer is not None: + tb_writer.add_scalar( + "train/learning_rate", cur_lr, params.batch_idx_train + ) + tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) + + if rank == 0: + logging.info("epoch {}, learning rate {}".format(epoch, cur_lr)) + + params.cur_epoch = epoch + + train_one_epoch( + params=params, + model=model, + optimizer=optimizer, + sp=sp, + train_dl=train_dl, + giga_train_dl=giga_train_dl, + valid_dl=valid_dl, + rng=rng, + tb_writer=tb_writer, + world_size=world_size, + ) + + save_checkpoint( + params=params, + model=model, + optimizer=optimizer, + rank=rank, + ) + + logging.info("Done!") + + if world_size > 1: + torch.distributed.barrier() + cleanup_dist() + + +def scan_pessimistic_batches_for_oom( + model: nn.Module, + train_dl: torch.utils.data.DataLoader, + optimizer: torch.optim.Optimizer, + sp: spm.SentencePieceProcessor, + params: AttributeDict, +): + from lhotse.dataset import find_pessimistic_batches + + logging.info( + "Sanity check -- see if any of the batches in epoch 0 would cause OOM." + ) + batches, crit_values = find_pessimistic_batches(train_dl.sampler) + for criterion, cuts in batches.items(): + batch = train_dl.dataset[cuts] + try: + optimizer.zero_grad() + loss, _ = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=True, + ) + loss.backward() + clip_grad_norm_(model.parameters(), 5.0, 2.0) + optimizer.step() + except RuntimeError as e: + if "CUDA out of memory" in str(e): + logging.error( + "Your GPU ran out of memory with the current " + "max_duration setting. We recommend decreasing " + "max_duration and trying again.\n" + f"Failing criterion: {criterion} " + f"(={crit_values[criterion]}) ..." + ) + raise + + +def main(): + parser = get_parser() + AsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + assert 0 <= args.giga_prob < 1, args.giga_prob + + world_size = args.world_size + assert world_size >= 1 + if world_size > 1: + mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) + else: + run(rank=0, world_size=1, args=args) + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/transformer.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/transformer.py new file mode 100644 index 000000000..e851dcc32 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/transformer.py @@ -0,0 +1,418 @@ +# Copyright 2021 University of Chinese Academy of Sciences (author: Han Zhu) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import math +from typing import Optional, Tuple + +import torch +import torch.nn as nn +from encoder_interface import EncoderInterface +from subsampling import Conv2dSubsampling, VggSubsampling + +from icefall.utils import make_pad_mask + + +class Transformer(EncoderInterface): + def __init__( + self, + num_features: int, + output_dim: int, + subsampling_factor: int = 4, + d_model: int = 256, + nhead: int = 4, + dim_feedforward: int = 2048, + num_encoder_layers: int = 12, + dropout: float = 0.1, + normalize_before: bool = True, + vgg_frontend: bool = False, + ) -> None: + """ + Args: + num_features: + The input dimension of the model. + output_dim: + The output dimension of the model. + subsampling_factor: + Number of output frames is num_in_frames // subsampling_factor. + Currently, subsampling_factor MUST be 4. + d_model: + Attention dimension. + nhead: + Number of heads in multi-head attention. + Must satisfy d_model // nhead == 0. + dim_feedforward: + The output dimension of the feedforward layers in encoder. + num_encoder_layers: + Number of encoder layers. + dropout: + Dropout in encoder. + normalize_before: + If True, use pre-layer norm; False to use post-layer norm. + vgg_frontend: + True to use vgg style frontend for subsampling. + """ + super().__init__() + + self.num_features = num_features + self.output_dim = output_dim + self.subsampling_factor = subsampling_factor + if subsampling_factor != 4: + raise NotImplementedError("Support only 'subsampling_factor=4'.") + + # self.encoder_embed converts the input of shape (N, T, num_features) + # to the shape (N, T//subsampling_factor, d_model). + # That is, it does two things simultaneously: + # (1) subsampling: T -> T//subsampling_factor + # (2) embedding: num_features -> d_model + if vgg_frontend: + self.encoder_embed = VggSubsampling(num_features, d_model) + else: + self.encoder_embed = Conv2dSubsampling(num_features, d_model) + + self.encoder_pos = PositionalEncoding(d_model, dropout) + + encoder_layer = TransformerEncoderLayer( + d_model=d_model, + nhead=nhead, + dim_feedforward=dim_feedforward, + dropout=dropout, + normalize_before=normalize_before, + ) + + if normalize_before: + encoder_norm = nn.LayerNorm(d_model) + else: + encoder_norm = None + + self.encoder = nn.TransformerEncoder( + encoder_layer=encoder_layer, + num_layers=num_encoder_layers, + norm=encoder_norm, + ) + + # TODO(fangjun): remove dropout + self.encoder_output_layer = nn.Sequential( + nn.Dropout(p=dropout), nn.Linear(d_model, output_dim) + ) + + def forward( + self, x: torch.Tensor, x_lens: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Args: + x: + The input tensor. Its shape is (batch_size, seq_len, feature_dim). + x_lens: + A tensor of shape (batch_size,) containing the number of frames in + `x` before padding. + Returns: + Return a tuple containing 2 tensors: + - logits, its shape is (batch_size, output_seq_len, output_dim) + - logit_lens, a tensor of shape (batch_size,) containing the number + of frames in `logits` before padding. + """ + x = self.encoder_embed(x) + x = self.encoder_pos(x) + x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C) + + # Caution: We assume the subsampling factor is 4! + lengths = ((x_lens - 1) // 2 - 1) // 2 + assert x.size(0) == lengths.max().item() + + mask = make_pad_mask(lengths) + x = self.encoder(x, src_key_padding_mask=mask) # (T, N, C) + + logits = self.encoder_output_layer(x) + logits = logits.permute(1, 0, 2) # (T, N, C) ->(N, T, C) + + return logits, lengths + + +class TransformerEncoderLayer(nn.Module): + """ + Modified from torch.nn.TransformerEncoderLayer. + Add support of normalize_before, + i.e., use layer_norm before the first block. + + Args: + d_model: + the number of expected features in the input (required). + nhead: + the number of heads in the multiheadattention models (required). + dim_feedforward: + the dimension of the feedforward network model (default=2048). + dropout: + the dropout value (default=0.1). + activation: + the activation function of intermediate layer, relu or + gelu (default=relu). + normalize_before: + whether to use layer_norm before the first block. + + Examples:: + >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8) + >>> src = torch.rand(10, 32, 512) + >>> out = encoder_layer(src) + """ + + def __init__( + self, + d_model: int, + nhead: int, + dim_feedforward: int = 2048, + dropout: float = 0.1, + activation: str = "relu", + normalize_before: bool = True, + ) -> None: + super(TransformerEncoderLayer, self).__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + + self.normalize_before = normalize_before + + def __setstate__(self, state): + if "activation" not in state: + state["activation"] = nn.functional.relu + super(TransformerEncoderLayer, self).__setstate__(state) + + def forward( + self, + src: torch.Tensor, + src_mask: Optional[torch.Tensor] = None, + src_key_padding_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + """ + Pass the input through the encoder layer. + + Args: + src: the sequence to the encoder layer (required). + src_mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional) + + Shape: + src: (S, N, E). + src_mask: (S, S). + src_key_padding_mask: (N, S). + S is the source sequence length, T is the target sequence length, + N is the batch size, E is the feature number + """ + residual = src + if self.normalize_before: + src = self.norm1(src) + src2 = self.self_attn( + src, + src, + src, + attn_mask=src_mask, + key_padding_mask=src_key_padding_mask, + )[0] + src = residual + self.dropout1(src2) + if not self.normalize_before: + src = self.norm1(src) + + residual = src + if self.normalize_before: + src = self.norm2(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) + src = residual + self.dropout2(src2) + if not self.normalize_before: + src = self.norm2(src) + return src + + +def _get_activation_fn(activation: str): + if activation == "relu": + return nn.functional.relu + elif activation == "gelu": + return nn.functional.gelu + + raise RuntimeError( + "activation should be relu/gelu, not {}".format(activation) + ) + + +class PositionalEncoding(nn.Module): + """This class implements the positional encoding + proposed in the following paper: + + - Attention Is All You Need: https://arxiv.org/pdf/1706.03762.pdf + + PE(pos, 2i) = sin(pos / (10000^(2i/d_modle)) + PE(pos, 2i+1) = cos(pos / (10000^(2i/d_modle)) + + Note:: + + 1 / (10000^(2i/d_model)) = exp(-log(10000^(2i/d_model))) + = exp(-1* 2i / d_model * log(100000)) + = exp(2i * -(log(10000) / d_model)) + """ + + def __init__(self, d_model: int, dropout: float = 0.1) -> None: + """ + Args: + d_model: + Embedding dimension. + dropout: + Dropout probability to be applied to the output of this module. + """ + super().__init__() + self.d_model = d_model + self.xscale = math.sqrt(self.d_model) + self.dropout = nn.Dropout(p=dropout) + # not doing: self.pe = None because of errors thrown by torchscript + self.pe = torch.zeros(1, 0, self.d_model, dtype=torch.float32) + + def extend_pe(self, x: torch.Tensor) -> None: + """Extend the time t in the positional encoding if required. + + The shape of `self.pe` is (1, T1, d_model). The shape of the input x + is (N, T, d_model). If T > T1, then we change the shape of self.pe + to (N, T, d_model). Otherwise, nothing is done. + + Args: + x: + It is a tensor of shape (N, T, C). + Returns: + Return None. + """ + if self.pe is not None: + if self.pe.size(1) >= x.size(1): + self.pe = self.pe.to(dtype=x.dtype, device=x.device) + return + pe = torch.zeros(x.size(1), self.d_model, dtype=torch.float32) + position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1) + div_term = torch.exp( + torch.arange(0, self.d_model, 2, dtype=torch.float32) + * -(math.log(10000.0) / self.d_model) + ) + pe[:, 0::2] = torch.sin(position * div_term) + pe[:, 1::2] = torch.cos(position * div_term) + pe = pe.unsqueeze(0) + # Now pe is of shape (1, T, d_model), where T is x.size(1) + self.pe = pe.to(device=x.device, dtype=x.dtype) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Add positional encoding. + + Args: + x: + Its shape is (N, T, C) + + Returns: + Return a tensor of shape (N, T, C) + """ + self.extend_pe(x) + x = x * self.xscale + self.pe[:, : x.size(1), :] + return self.dropout(x) + + +class Noam(object): + """ + Implements Noam optimizer. + + Proposed in + "Attention Is All You Need", https://arxiv.org/pdf/1706.03762.pdf + + Modified from + https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/optimizer.py # noqa + + Args: + params: + iterable of parameters to optimize or dicts defining parameter groups + model_size: + attention dimension of the transformer model + factor: + learning rate factor + warm_step: + warmup steps + """ + + def __init__( + self, + params, + model_size: int = 256, + factor: float = 10.0, + warm_step: int = 25000, + weight_decay=0, + ) -> None: + """Construct an Noam object.""" + self.optimizer = torch.optim.Adam( + params, lr=0, betas=(0.9, 0.98), eps=1e-9, weight_decay=weight_decay + ) + self._step = 0 + self.warmup = warm_step + self.factor = factor + self.model_size = model_size + self._rate = 0 + + @property + def param_groups(self): + """Return param_groups.""" + return self.optimizer.param_groups + + def step(self): + """Update parameters and rate.""" + self._step += 1 + rate = self.rate() + for p in self.optimizer.param_groups: + p["lr"] = rate + self._rate = rate + self.optimizer.step() + + def rate(self, step=None): + """Implement `lrate` above.""" + if step is None: + step = self._step + return ( + self.factor + * self.model_size ** (-0.5) + * min(step ** (-0.5), step * self.warmup ** (-1.5)) + ) + + def zero_grad(self): + """Reset gradient.""" + self.optimizer.zero_grad() + + def state_dict(self): + """Return state_dict.""" + return { + "_step": self._step, + "warmup": self.warmup, + "factor": self.factor, + "model_size": self.model_size, + "_rate": self._rate, + "optimizer": self.optimizer.state_dict(), + } + + def load_state_dict(self, state_dict): + """Load state_dict.""" + for key, value in state_dict.items(): + if key == "optimizer": + self.optimizer.load_state_dict(state_dict["optimizer"]) + else: + setattr(self, key, value) From ac7c2d84bc44dd83a5653f68677ae1ef16551eea Mon Sep 17 00:00:00 2001 From: PF Luo Date: Wed, 23 Feb 2022 08:33:20 +0800 Subject: [PATCH 12/25] minor fix for aishell recipe (#223) * just remove unnecessary torch.sum * minor fixs for aishell --- README.md | 2 +- egs/aishell/ASR/RESULTS.md | 14 +++++++------- egs/aishell/ASR/transducer_stateless/model.py | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 214e85ad0..6363aa2be 100644 --- a/README.md +++ b/README.md @@ -113,7 +113,7 @@ The best CER we currently have is: | | test | |-----|------| -| CER | 5.4 | +| CER | 5.05 | We provide a Colab notebook to run a pre-trained TransducerStateless model: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/14XaT2MhnBkK-3_RqqWq3K90Xlbin-GZC?usp=sharing) diff --git a/egs/aishell/ASR/RESULTS.md b/egs/aishell/ASR/RESULTS.md index ceb63b4cf..53cc394a1 100644 --- a/egs/aishell/ASR/RESULTS.md +++ b/egs/aishell/ASR/RESULTS.md @@ -46,12 +46,12 @@ python3 ./transducer_stateless/decode.py \ ### Aishell training results (Transducer-stateless) #### 2022-02-18 -(Pingfeng Luo) : The tensorboard log for training is available at +(Pingfeng Luo) : The tensorboard log for training is available at And pretrained model is available at ||test| |--|--| -|CER| 5.4% | +|CER| 5.05% | You can use the following commands to reproduce our results: @@ -61,17 +61,17 @@ export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7,8" --bucketing-sampler True \ --world-size 8 \ --lang-dir data/lang_char \ - --num-epochs 40 \ + --num-epochs 60 \ --start-epoch 0 \ - --exp-dir transducer_stateless/exp_char \ - --max-duration 160 \ + --exp-dir transducer_stateless/exp_rnnt_k2 \ + --max-duration 80 \ --lr-factor 3 ./transducer_stateless/decode.py \ - --epoch 39 \ + --epoch 59 \ --avg 10 \ --lang-dir data/lang_char \ - --exp-dir transducer_stateless/exp_char \ + --exp-dir transducer_stateless/exp_rnnt_k2 \ --max-duration 100 \ --decoding-method beam_search \ --beam-size 4 diff --git a/egs/aishell/ASR/transducer_stateless/model.py b/egs/aishell/ASR/transducer_stateless/model.py index 0322edeed..c19325a15 100644 --- a/egs/aishell/ASR/transducer_stateless/model.py +++ b/egs/aishell/ASR/transducer_stateless/model.py @@ -122,4 +122,4 @@ class Transducer(nn.Module): loss = k2.rnnt_loss(logits, y_padded, blank_id, boundary) - return torch.sum(loss) + return loss From 72f838dee1d95f3194ba9dad4a202be80b1e295c Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Tue, 1 Mar 2022 16:35:02 +0800 Subject: [PATCH 13/25] Update results for transducer_stateless after training for more epochs. (#207) --- README.md | 2 +- egs/librispeech/ASR/RESULTS.md | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 6363aa2be..aa77b5aa7 100644 --- a/README.md +++ b/README.md @@ -84,7 +84,7 @@ The best WER using modified beam search with beam size 4 is: | | test-clean | test-other | |-----|------------|------------| -| WER | 2.67 | 6.64 | +| WER | 2.67 | 6.57 | Note: No auxiliary losses are used in the training and no LMs are used in the decoding. diff --git a/egs/librispeech/ASR/RESULTS.md b/egs/librispeech/ASR/RESULTS.md index a62434184..45f23e95e 100644 --- a/egs/librispeech/ASR/RESULTS.md +++ b/egs/librispeech/ASR/RESULTS.md @@ -61,10 +61,10 @@ The WERs are | | test-clean | test-other | comment | |-------------------------------------|------------|------------|------------------------------------------| -| greedy search (max sym per frame 1) | 2.68 | 6.71 | --epoch 61, --avg 18, --max-duration 100 | -| greedy search (max sym per frame 2) | 2.69 | 6.71 | --epoch 61, --avg 18, --max-duration 100 | -| greedy search (max sym per frame 3) | 2.69 | 6.71 | --epoch 61, --avg 18, --max-duration 100 | -| modified beam search (beam size 4) | 2.67 | 6.64 | --epoch 61, --avg 18, --max-duration 100 | +| greedy search (max sym per frame 1) | 2.67 | 6.67 | --epoch 63, --avg 19, --max-duration 100 | +| greedy search (max sym per frame 2) | 2.67 | 6.67 | --epoch 63, --avg 19, --max-duration 100 | +| greedy search (max sym per frame 3) | 2.67 | 6.67 | --epoch 63, --avg 19, --max-duration 100 | +| modified beam search (beam size 4) | 2.67 | 6.57 | --epoch 63, --avg 19, --max-duration 100 | The training command for reproducing is given below: @@ -90,8 +90,8 @@ The tensorboard training log can be found at The decoding command is: ``` -epoch=61 -avg=18 +epoch=63 +avg=19 ## greedy search for sym in 1 2 3; do From 05cb29785876b8594cc6be3d683a3da3f5c89c05 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Tue, 1 Mar 2022 17:01:46 +0800 Subject: [PATCH 14/25] Update result for full libri + GigaSpeech using transducer_stateless. (#231) --- ...r-stateless-librispeech-multi-datasets.yml | 154 ++++++++++++++++++ README.md | 2 +- egs/librispeech/ASR/RESULTS-100hours.md | 2 + egs/librispeech/ASR/RESULTS.md | 82 +++++++++- .../train.py | 31 +++- 5 files changed, 264 insertions(+), 7 deletions(-) create mode 100644 .github/workflows/run-pretrained-transducer-stateless-librispeech-multi-datasets.yml diff --git a/.github/workflows/run-pretrained-transducer-stateless-librispeech-multi-datasets.yml b/.github/workflows/run-pretrained-transducer-stateless-librispeech-multi-datasets.yml new file mode 100644 index 000000000..ccf9028cb --- /dev/null +++ b/.github/workflows/run-pretrained-transducer-stateless-librispeech-multi-datasets.yml @@ -0,0 +1,154 @@ +# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com) + +# See ../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: run-pre-trained-trandsucer-stateless-multi-datasets-librispeech-960h + +on: + push: + branches: + - master + pull_request: + types: [labeled] + +jobs: + run_pre_trained_transducer_stateless_multi_datasets_librispeech_960h: + if: github.event.label.name == 'ready' || github.event_name == 'push' + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-18.04] + python-version: [3.7, 3.8, 3.9] + torch: ["1.10.0"] + torchaudio: ["0.10.0"] + k2-version: ["1.9.dev20211101"] + + fail-fast: false + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Python dependencies + run: | + python3 -m pip install --upgrade pip pytest + # numpy 1.20.x does not support python 3.6 + pip install numpy==1.19 + pip install torch==${{ matrix.torch }}+cpu torchaudio==${{ matrix.torchaudio }}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html + pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.org/nightly/ + + python3 -m pip install git+https://github.com/lhotse-speech/lhotse + python3 -m pip install kaldifeat + # We are in ./icefall and there is a file: requirements.txt in it + pip install -r requirements.txt + + - name: Install graphviz + shell: bash + run: | + python3 -m pip install -qq graphviz + sudo apt-get -qq install graphviz + + - name: Download pre-trained model + shell: bash + run: | + sudo apt-get -qq install git-lfs tree sox + cd egs/librispeech/ASR + mkdir tmp + cd tmp + git lfs install + git clone https://huggingface.co/csukuangfj/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01 + + + cd .. + tree tmp + soxi tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/test_wavs/*.wav + ls -lh tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/test_wavs/*.wav + + - name: Run greedy search decoding (max-sym-per-frame 1) + shell: bash + run: | + export PYTHONPATH=$PWD:PYTHONPATH + cd egs/librispeech/ASR + ./transducer_stateless_multi_datasets/pretrained.py \ + --method greedy_search \ + --max-sym-per-frame 1 \ + --checkpoint ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/exp/pretrained.pt \ + --bpe-model ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/data/lang_bpe_500/bpe.model \ + ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/test_wavs/1089-134686-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/test_wavs/1221-135766-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/test_wavs/1221-135766-0002.wav + + - name: Run greedy search decoding (max-sym-per-frame 2) + shell: bash + run: | + export PYTHONPATH=$PWD:PYTHONPATH + cd egs/librispeech/ASR + ./transducer_stateless_multi_datasets/pretrained.py \ + --method greedy_search \ + --max-sym-per-frame 2 \ + --checkpoint ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/exp/pretrained.pt \ + --bpe-model ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/data/lang_bpe_500/bpe.model \ + ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/test_wavs/1089-134686-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/test_wavs/1221-135766-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/test_wavs/1221-135766-0002.wav + + - name: Run greedy search decoding (max-sym-per-frame 3) + shell: bash + run: | + export PYTHONPATH=$PWD:PYTHONPATH + cd egs/librispeech/ASR + ./transducer_stateless_multi_datasets/pretrained.py \ + --method greedy_search \ + --max-sym-per-frame 3 \ + --checkpoint ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/exp/pretrained.pt \ + --bpe-model ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/data/lang_bpe_500/bpe.model \ + ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/test_wavs/1089-134686-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/test_wavs/1221-135766-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/test_wavs/1221-135766-0002.wav + + - name: Run beam search decoding + shell: bash + run: | + export PYTHONPATH=$PWD:$PYTHONPATH + cd egs/librispeech/ASR + ./transducer_stateless_multi_datasets/pretrained.py \ + --method beam_search \ + --beam-size 4 \ + --checkpoint ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/exp/pretrained.pt \ + --bpe-model ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/data/lang_bpe_500/bpe.model \ + ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/test_wavs/1089-134686-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/test_wavs/1221-135766-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/test_wavs/1221-135766-0002.wav + + + - name: Run modified beam search decoding + shell: bash + run: | + export PYTHONPATH=$PWD:$PYTHONPATH + cd egs/librispeech/ASR + ./transducer_stateless_multi_datasets/pretrained.py \ + --method modified_beam_search \ + --beam-size 4 \ + --checkpoint ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/exp/pretrained.pt \ + --bpe-model ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/data/lang_bpe_500/bpe.model \ + ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/test_wavs/1089-134686-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/test_wavs/1221-135766-0001.wav \ + ./tmp/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01/test_wavs/1221-135766-0002.wav diff --git a/README.md b/README.md index aa77b5aa7..ec9d7e69c 100644 --- a/README.md +++ b/README.md @@ -84,7 +84,7 @@ The best WER using modified beam search with beam size 4 is: | | test-clean | test-other | |-----|------------|------------| -| WER | 2.67 | 6.57 | +| WER | 2.61 | 6.46 | Note: No auxiliary losses are used in the training and no LMs are used in the decoding. diff --git a/egs/librispeech/ASR/RESULTS-100hours.md b/egs/librispeech/ASR/RESULTS-100hours.md index 40245c917..2e1bbd687 100644 --- a/egs/librispeech/ASR/RESULTS-100hours.md +++ b/egs/librispeech/ASR/RESULTS-100hours.md @@ -7,6 +7,8 @@ train-clean-100 subset as training data. ### 2022-02-21 +Using commit `2332ba312d7ce72f08c7bac1e3312f7e3dd722dc`. + | | test-clean | test-other | comment | |-------------------------------------|------------|------------|------------------------------------------| | greedy search (max sym per frame 1) | 6.34 | 16.7 | --epoch 57, --avg 17, --max-duration 100 | diff --git a/egs/librispeech/ASR/RESULTS.md b/egs/librispeech/ASR/RESULTS.md index 45f23e95e..cc2aebac1 100644 --- a/egs/librispeech/ASR/RESULTS.md +++ b/egs/librispeech/ASR/RESULTS.md @@ -52,11 +52,89 @@ avg=15 #### Conformer encoder + embedding decoder -Using commit `a8150021e01d34ecbd6198fe03a57eacf47a16f2`. - Conformer encoder + non-recurrent decoder. The decoder contains only an embedding layer and a Conv1d (with kernel size 2). +See + +- [./transducer_stateless](./transducer_stateless) +- [./transducer_stateless_multi_datasets](./transducer_stateless_multi_datasets) + +##### 2022-03-01 + +Using commit `fill in it after merging`. + +It uses [GigaSpeech](https://github.com/SpeechColab/GigaSpeech) +as extra training data. 20% of the time it selects a batch from L subset of +GigaSpeech and 80% of the time it selects a batch from LibriSpeech. + +The WERs are + +| | test-clean | test-other | comment | +|-------------------------------------|------------|------------|------------------------------------------| +| greedy search (max sym per frame 1) | 2.64 | 6.55 | --epoch 39, --avg 15, --max-duration 100 | +| modified beam search (beam size 4) | 2.61 | 6.46 | --epoch 39, --avg 15, --max-duration 100 | + +The training command for reproducing is given below: + +```bash +cd egs/librispeech/ASR/ +./prepare.sh +./prepare_giga_speech.sh + +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +./transducer_stateless_multi_datasets/train.py \ + --world-size 4 \ + --num-epochs 40 \ + --start-epoch 0 \ + --exp-dir transducer_stateless_multi_datasets/exp-full-2 \ + --full-libri 1 \ + --max-duration 300 \ + --lr-factor 5 \ + --bpe-model data/lang_bpe_500/bpe.model \ + --modified-transducer-prob 0.25 \ + --giga-prob 0.2 +``` + +The tensorboard training log can be found at + + +The decoding command is: + +```bash +epoch=39 +avg=15 +sym=1 + +# greedy search +./transducer_stateless_multi_datasets/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir transducer_stateless_multi_datasets/exp-full-2 \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --max-duration 100 \ + --context-size 2 \ + --max-sym-per-frame $sym + +# modified beam search +./transducer_stateless_multi_datasets/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir transducer_stateless_multi_datasets/exp-full-2 \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --max-duration 100 \ + --context-size 2 \ + --decoding-method modified_beam_search \ + --beam-size 4 +``` + + +##### 2022-02-07 + +Using commit `a8150021e01d34ecbd6198fe03a57eacf47a16f2`. + + The WERs are | | test-clean | test-other | comment | diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/train.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/train.py index 720151ea0..105f82417 100755 --- a/egs/librispeech/ASR/transducer_stateless_multi_datasets/train.py +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/train.py @@ -19,16 +19,39 @@ """ Usage: +cd egs/librispeech/ASR/ +./prepare.sh +./prepare_giga_speech.sh + +# 100-hours +export CUDA_VISIBLE_DEVICES="0,1" + +./transducer_stateless_multi_datasets/train.py \ + --world-size 2 \ + --num-epochs 60 \ + --start-epoch 0 \ + --exp-dir transducer_stateless_multi_datasets/exp-100-2 \ + --full-libri 0 \ + --max-duration 300 \ + --lr-factor 1 \ + --bpe-model data/lang_bpe_500/bpe.model \ + --modified-transducer-prob 0.25 + --giga-prob 0.2 + +# 960-hours export CUDA_VISIBLE_DEVICES="0,1,2,3" ./transducer_stateless_multi_datasets/train.py \ --world-size 4 \ - --num-epochs 30 \ + --num-epochs 40 \ --start-epoch 0 \ - --exp-dir transducer_stateless_multi_datasets/exp \ + --exp-dir transducer_stateless_multi_datasets/exp-full-2 \ --full-libri 1 \ - --max-duration 250 \ - --lr-factor 2.5 + --max-duration 300 \ + --lr-factor 5 \ + --bpe-model data/lang_bpe_500/bpe.model \ + --modified-transducer-prob 0.25 \ + --giga-prob 0.2 """ From 50d2281524651c4179de2f57d3af518d8ae29dd3 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Wed, 2 Mar 2022 16:02:38 +0800 Subject: [PATCH 15/25] Add modified transducer loss for AIShell dataset (#219) * Add modified transducer for aishell. * Minor fixes. * Add extra data in transducer training. The extra data is from http://www.openslr.org/62/ * Update export.py and pretrained.py * Update CI to install pretrained models with aishell. * Update results. * Update results. * Update README. * Use symlinks to avoid copies. --- ...ransducer-stateless-modified-2-aishell.yml | 153 +++ ...-transducer-stateless-modified-aishell.yml | 153 +++ README.md | 2 +- egs/aishell/ASR/README.md | 17 + egs/aishell/ASR/RESULTS.md | 151 ++- .../ASR/local/display_manifest_statistics.py | 80 +- .../ASR/local/process_aidatatang_200zh.py | 72 ++ egs/aishell/ASR/prepare_aidatatang_200zh.sh | 59 ++ egs/aishell/ASR/transducer_stateless/model.py | 11 - .../transducer_stateless_modified-2/README.md | 59 ++ .../__init__.py | 0 .../aidatatang_200zh.py | 53 ++ .../aishell.py | 53 ++ .../asr_datamodule.py | 304 ++++++ .../beam_search.py | 1 + .../conformer.py | 1 + .../transducer_stateless_modified-2/decode.py | 491 ++++++++++ .../decoder.py | 1 + .../encoder_interface.py | 1 + .../transducer_stateless_modified-2/export.py | 246 +++++ .../transducer_stateless_modified-2/joiner.py | 1 + .../transducer_stateless_modified-2/model.py | 163 ++++ .../pretrained.py | 331 +++++++ .../subsampling.py | 1 + .../test_decoder.py | 1 + .../transducer_stateless_modified-2/train.py | 875 ++++++++++++++++++ .../transformer.py | 1 + .../transducer_stateless_modified/README.md | 21 + .../transducer_stateless_modified/__init__.py | 0 .../asr_datamodule.py | 1 + .../beam_search.py | 1 + .../conformer.py | 1 + .../transducer_stateless_modified/decode.py | 486 ++++++++++ .../transducer_stateless_modified/decoder.py | 1 + .../encoder_interface.py | 1 + .../transducer_stateless_modified/export.py | 246 +++++ .../transducer_stateless_modified/joiner.py | 1 + .../transducer_stateless_modified/model.py | 1 + .../pretrained.py | 331 +++++++ .../subsampling.py | 1 + .../test_decoder.py | 58 ++ .../transducer_stateless_modified/train.py | 751 +++++++++++++++ .../transformer.py | 1 + .../ASR/transducer_stateless/decode.py | 9 + .../ASR/transducer_stateless/joiner.py | 6 + 45 files changed, 5180 insertions(+), 18 deletions(-) create mode 100644 .github/workflows/run-pretrained-transducer-stateless-modified-2-aishell.yml create mode 100644 .github/workflows/run-pretrained-transducer-stateless-modified-aishell.yml create mode 100755 egs/aishell/ASR/local/process_aidatatang_200zh.py create mode 100755 egs/aishell/ASR/prepare_aidatatang_200zh.sh create mode 100644 egs/aishell/ASR/transducer_stateless_modified-2/README.md create mode 100644 egs/aishell/ASR/transducer_stateless_modified-2/__init__.py create mode 100644 egs/aishell/ASR/transducer_stateless_modified-2/aidatatang_200zh.py create mode 100644 egs/aishell/ASR/transducer_stateless_modified-2/aishell.py create mode 100644 egs/aishell/ASR/transducer_stateless_modified-2/asr_datamodule.py create mode 120000 egs/aishell/ASR/transducer_stateless_modified-2/beam_search.py create mode 120000 egs/aishell/ASR/transducer_stateless_modified-2/conformer.py create mode 100755 egs/aishell/ASR/transducer_stateless_modified-2/decode.py create mode 120000 egs/aishell/ASR/transducer_stateless_modified-2/decoder.py create mode 120000 egs/aishell/ASR/transducer_stateless_modified-2/encoder_interface.py create mode 100755 egs/aishell/ASR/transducer_stateless_modified-2/export.py create mode 120000 egs/aishell/ASR/transducer_stateless_modified-2/joiner.py create mode 100644 egs/aishell/ASR/transducer_stateless_modified-2/model.py create mode 100755 egs/aishell/ASR/transducer_stateless_modified-2/pretrained.py create mode 120000 egs/aishell/ASR/transducer_stateless_modified-2/subsampling.py create mode 120000 egs/aishell/ASR/transducer_stateless_modified-2/test_decoder.py create mode 100755 egs/aishell/ASR/transducer_stateless_modified-2/train.py create mode 120000 egs/aishell/ASR/transducer_stateless_modified-2/transformer.py create mode 100644 egs/aishell/ASR/transducer_stateless_modified/README.md create mode 100644 egs/aishell/ASR/transducer_stateless_modified/__init__.py create mode 120000 egs/aishell/ASR/transducer_stateless_modified/asr_datamodule.py create mode 120000 egs/aishell/ASR/transducer_stateless_modified/beam_search.py create mode 120000 egs/aishell/ASR/transducer_stateless_modified/conformer.py create mode 100755 egs/aishell/ASR/transducer_stateless_modified/decode.py create mode 120000 egs/aishell/ASR/transducer_stateless_modified/decoder.py create mode 120000 egs/aishell/ASR/transducer_stateless_modified/encoder_interface.py create mode 100755 egs/aishell/ASR/transducer_stateless_modified/export.py create mode 120000 egs/aishell/ASR/transducer_stateless_modified/joiner.py create mode 120000 egs/aishell/ASR/transducer_stateless_modified/model.py create mode 100755 egs/aishell/ASR/transducer_stateless_modified/pretrained.py create mode 120000 egs/aishell/ASR/transducer_stateless_modified/subsampling.py create mode 100755 egs/aishell/ASR/transducer_stateless_modified/test_decoder.py create mode 100755 egs/aishell/ASR/transducer_stateless_modified/train.py create mode 120000 egs/aishell/ASR/transducer_stateless_modified/transformer.py diff --git a/.github/workflows/run-pretrained-transducer-stateless-modified-2-aishell.yml b/.github/workflows/run-pretrained-transducer-stateless-modified-2-aishell.yml new file mode 100644 index 000000000..c27ffc374 --- /dev/null +++ b/.github/workflows/run-pretrained-transducer-stateless-modified-2-aishell.yml @@ -0,0 +1,153 @@ +# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com) + +# See ../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: run-pre-trained-trandsucer-stateless-modified-2-aishell + +on: + push: + branches: + - master + pull_request: + types: [labeled] + +jobs: + run_pre_trained_transducer_stateless_modified_2_aishell: + if: github.event.label.name == 'ready' || github.event_name == 'push' + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-18.04] + python-version: [3.7, 3.8, 3.9] + torch: ["1.10.0"] + torchaudio: ["0.10.0"] + k2-version: ["1.9.dev20211101"] + + fail-fast: false + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Python dependencies + run: | + python3 -m pip install --upgrade pip pytest + # numpy 1.20.x does not support python 3.6 + pip install numpy==1.19 + pip install torch==${{ matrix.torch }}+cpu torchaudio==${{ matrix.torchaudio }}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html + pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.org/nightly/ + + python3 -m pip install git+https://github.com/lhotse-speech/lhotse + python3 -m pip install kaldifeat + # We are in ./icefall and there is a file: requirements.txt in it + pip install -r requirements.txt + + - name: Install graphviz + shell: bash + run: | + python3 -m pip install -qq graphviz + sudo apt-get -qq install graphviz + + - name: Download pre-trained model + shell: bash + run: | + sudo apt-get -qq install git-lfs tree sox + cd egs/aishell/ASR + mkdir tmp + cd tmp + git lfs install + git clone https://huggingface.co/csukuangfj/icefall-aishell-transducer-stateless-modified-2-2022-03-01 + + cd .. + tree tmp + soxi tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/test_wavs/*.wav + ls -lh tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/test_wavs/*.wav + + - name: Run greedy search decoding (max-sym-per-frame 1) + shell: bash + run: | + export PYTHONPATH=$PWD:PYTHONPATH + cd egs/aishell/ASR + ./transducer_stateless_modified-2/pretrained.py \ + --method greedy_search \ + --max-sym-per-frame 1 \ + --checkpoint ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/exp/pretrained.pt \ + --lang-dir ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/data/lang_char \ + ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/test_wavs/BAC009S0764W0121.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/test_wavs/BAC009S0764W0122.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/test_wavs/BAC009S0764W0123.wav + + - name: Run greedy search decoding (max-sym-per-frame 2) + shell: bash + run: | + export PYTHONPATH=$PWD:PYTHONPATH + cd egs/aishell/ASR + ./transducer_stateless_modified-2/pretrained.py \ + --method greedy_search \ + --max-sym-per-frame 2 \ + --checkpoint ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/exp/pretrained.pt \ + --lang-dir ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/data/lang_char \ + ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/test_wavs/BAC009S0764W0121.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/test_wavs/BAC009S0764W0122.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/test_wavs/BAC009S0764W0123.wav + + - name: Run greedy search decoding (max-sym-per-frame 3) + shell: bash + run: | + export PYTHONPATH=$PWD:PYTHONPATH + cd egs/aishell/ASR + ./transducer_stateless_modified-2/pretrained.py \ + --method greedy_search \ + --max-sym-per-frame 3 \ + --checkpoint ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/exp/pretrained.pt \ + --lang-dir ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/data/lang_char \ + ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/test_wavs/BAC009S0764W0121.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/test_wavs/BAC009S0764W0122.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/test_wavs/BAC009S0764W0123.wav + + - name: Run beam search decoding + shell: bash + run: | + export PYTHONPATH=$PWD:$PYTHONPATH + cd egs/aishell/ASR + ./transducer_stateless_modified-2/pretrained.py \ + --method beam_search \ + --beam-size 4 \ + --checkpoint ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/exp/pretrained.pt \ + --lang-dir ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/data/lang_char \ + ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/test_wavs/BAC009S0764W0121.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/test_wavs/BAC009S0764W0122.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/test_wavs/BAC009S0764W0123.wav + + + - name: Run modified beam search decoding + shell: bash + run: | + export PYTHONPATH=$PWD:$PYTHONPATH + cd egs/aishell/ASR + ./transducer_stateless_modified-2/pretrained.py \ + --method modified_beam_search \ + --beam-size 4 \ + --checkpoint ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/exp/pretrained.pt \ + --lang-dir ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/data/lang_char \ + ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/test_wavs/BAC009S0764W0121.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/test_wavs/BAC009S0764W0122.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2-2022-03-01/test_wavs/BAC009S0764W0123.wav diff --git a/.github/workflows/run-pretrained-transducer-stateless-modified-aishell.yml b/.github/workflows/run-pretrained-transducer-stateless-modified-aishell.yml new file mode 100644 index 000000000..2e38abb5a --- /dev/null +++ b/.github/workflows/run-pretrained-transducer-stateless-modified-aishell.yml @@ -0,0 +1,153 @@ +# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com) + +# See ../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: run-pre-trained-trandsucer-stateless-modified-aishell + +on: + push: + branches: + - master + pull_request: + types: [labeled] + +jobs: + run_pre_trained_transducer_stateless_modified_aishell: + if: github.event.label.name == 'ready' || github.event_name == 'push' + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-18.04] + python-version: [3.7, 3.8, 3.9] + torch: ["1.10.0"] + torchaudio: ["0.10.0"] + k2-version: ["1.9.dev20211101"] + + fail-fast: false + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Python dependencies + run: | + python3 -m pip install --upgrade pip pytest + # numpy 1.20.x does not support python 3.6 + pip install numpy==1.19 + pip install torch==${{ matrix.torch }}+cpu torchaudio==${{ matrix.torchaudio }}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html + pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.org/nightly/ + + python3 -m pip install git+https://github.com/lhotse-speech/lhotse + python3 -m pip install kaldifeat + # We are in ./icefall and there is a file: requirements.txt in it + pip install -r requirements.txt + + - name: Install graphviz + shell: bash + run: | + python3 -m pip install -qq graphviz + sudo apt-get -qq install graphviz + + - name: Download pre-trained model + shell: bash + run: | + sudo apt-get -qq install git-lfs tree sox + cd egs/aishell/ASR + mkdir tmp + cd tmp + git lfs install + git clone https://huggingface.co/csukuangfj/icefall-aishell-transducer-stateless-modified-2022-03-01 + + cd .. + tree tmp + soxi tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/*.wav + ls -lh tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/*.wav + + - name: Run greedy search decoding (max-sym-per-frame 1) + shell: bash + run: | + export PYTHONPATH=$PWD:PYTHONPATH + cd egs/aishell/ASR + ./transducer_stateless_modified/pretrained.py \ + --method greedy_search \ + --max-sym-per-frame 1 \ + --checkpoint ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/exp/pretrained.pt \ + --lang-dir ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/data/lang_char \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0121.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0122.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0123.wav + + - name: Run greedy search decoding (max-sym-per-frame 2) + shell: bash + run: | + export PYTHONPATH=$PWD:PYTHONPATH + cd egs/aishell/ASR + ./transducer_stateless_modified/pretrained.py \ + --method greedy_search \ + --max-sym-per-frame 2 \ + --checkpoint ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/exp/pretrained.pt \ + --lang-dir ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/data/lang_char \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0121.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0122.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0123.wav + + - name: Run greedy search decoding (max-sym-per-frame 3) + shell: bash + run: | + export PYTHONPATH=$PWD:PYTHONPATH + cd egs/aishell/ASR + ./transducer_stateless_modified/pretrained.py \ + --method greedy_search \ + --max-sym-per-frame 3 \ + --checkpoint ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/exp/pretrained.pt \ + --lang-dir ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/data/lang_char \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0121.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0122.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0123.wav + + - name: Run beam search decoding + shell: bash + run: | + export PYTHONPATH=$PWD:$PYTHONPATH + cd egs/aishell/ASR + ./transducer_stateless_modified/pretrained.py \ + --method beam_search \ + --beam-size 4 \ + --checkpoint ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/exp/pretrained.pt \ + --lang-dir ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/data/lang_char \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0121.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0122.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0123.wav + + + - name: Run modified beam search decoding + shell: bash + run: | + export PYTHONPATH=$PWD:$PYTHONPATH + cd egs/aishell/ASR + ./transducer_stateless_modified/pretrained.py \ + --method modified_beam_search \ + --beam-size 4 \ + --checkpoint ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/exp/pretrained.pt \ + --lang-dir ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/data/lang_char \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0121.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0122.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0123.wav diff --git a/README.md b/README.md index ec9d7e69c..a49b30df0 100644 --- a/README.md +++ b/README.md @@ -113,7 +113,7 @@ The best CER we currently have is: | | test | |-----|------| -| CER | 5.05 | +| CER | 4.68 | We provide a Colab notebook to run a pre-trained TransducerStateless model: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/14XaT2MhnBkK-3_RqqWq3K90Xlbin-GZC?usp=sharing) diff --git a/egs/aishell/ASR/README.md b/egs/aishell/ASR/README.md index 3fd177376..1b3c5a2e3 100644 --- a/egs/aishell/ASR/README.md +++ b/egs/aishell/ASR/README.md @@ -1,3 +1,20 @@ +# Introduction + Please refer to for how to run models in this recipe. + +# Transducers + +There are various folders containing the name `transducer` in this folder. +The following table lists the differences among them. + +| | Encoder | Decoder | Comment | +|------------------------------------|-----------|--------------------|-----------------------------------------------------------------------------------| +| `transducer_stateless` | Conformer | Embedding + Conv1d | with `k2.rnnt_loss` | +| `transducer_stateless_modified` | Conformer | Embedding + Conv1d | with modified transducer from `optimized_transducer` | +| `transducer_stateless_modified-2` | Conformer | Embedding + Conv1d | with modified transducer from `optimized_transducer` + extra data | + +The decoder in `transducer_stateless` is modified from the paper +[Rnn-Transducer with Stateless Prediction Network](https://ieeexplore.ieee.org/document/9054419/). +We place an additional Conv1d layer right after the input embedding layer. diff --git a/egs/aishell/ASR/RESULTS.md b/egs/aishell/ASR/RESULTS.md index 53cc394a1..ecc93c21b 100644 --- a/egs/aishell/ASR/RESULTS.md +++ b/egs/aishell/ASR/RESULTS.md @@ -1,12 +1,153 @@ ## Results ### Aishell training result(Transducer-stateless) + +#### 2022-03-01 + +[./transducer_stateless_modified-2](./transducer_stateless_modified-2) + +Stateless transducer + modified transducer + using [aidatatang_200zh](http://www.openslr.org/62/) as extra training data. + + +| | test |comment | +|------------------------|------|----------------------------------------------------------------| +| greedy search | 4.94 |--epoch 89, --avg 38, --max-duration 100, --max-sym-per-frame 1 | +| modified beam search | 4.68 |--epoch 89, --avg 38, --max-duration 100 --beam-size 4 | + +The training commands are: + +```bash +cd egs/aishell/ASR +./prepare.sh --stop-stage 6 +./prepare_aidatatang_200zh.sh + +export CUDA_VISIBLE_DEVICES="0,1,2" + +./transducer_stateless_modified-2/train.py \ + --world-size 3 \ + --num-epochs 90 \ + --start-epoch 0 \ + --exp-dir transducer_stateless_modified-2/exp-2 \ + --max-duration 250 \ + --lr-factor 2.0 \ + --context-size 2 \ + --modified-transducer-prob 0.25 \ + --datatang-prob 0.2 +``` + +The tensorboard log is available at + + +The commands for decoding are + +```bash +# greedy search +for epoch in 89; do + for avg in 38; do + ./transducer_stateless_modified-2/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir transducer_stateless_modified-2/exp-2 \ + --max-duration 100 \ + --context-size 2 \ + --decoding-method greedy_search \ + --max-sym-per-frame 1 + done +done + +# modified beam search +for epoch in 89; do + for avg in 38; do + ./transducer_stateless_modified-2/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir transducer_stateless_modified-2/exp-2 \ + --max-duration 100 \ + --context-size 2 \ + --decoding-method modified_beam_search \ + --beam-size 4 + done +done +``` + +You can find a pre-trained model, decoding logs, and decoding results at + + +#### 2022-03-01 + +[./transducer_stateless_modified](./transducer_stateless_modified) + +Stateless transducer + modified transducer. + +| | test |comment | +|------------------------|------|----------------------------------------------------------------| +| greedy search | 5.22 |--epoch 64, --avg 33, --max-duration 100, --max-sym-per-frame 1 | +| modified beam search | 5.02 |--epoch 64, --avg 33, --max-duration 100 --beam-size 4 | + +The training commands are: + +```bash +cd egs/aishell/ASR +./prepare.sh --stop-stage 6 + +export CUDA_VISIBLE_DEVICES="0,1,2" + +./transducer_stateless_modified/train.py \ + --world-size 3 \ + --num-epochs 90 \ + --start-epoch 0 \ + --exp-dir transducer_stateless_modified/exp-4 \ + --max-duration 250 \ + --lr-factor 2.0 \ + --context-size 2 \ + --modified-transducer-prob 0.25 +``` + +The tensorboard log is available at + + +The commands for decoding are + +```bash +# greedy search +for epoch in 64; do + for avg in 33; do + ./transducer_stateless_modified/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir transducer_stateless_modified/exp-4 \ + --max-duration 100 \ + --context-size 2 \ + --decoding-method greedy_search \ + --max-sym-per-frame 1 + done +done + +# modified beam search +for epoch in 64; do + for avg in 33; do + ./transducer_stateless_modified/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir transducer_stateless_modified/exp-4 \ + --max-duration 100 \ + --context-size 2 \ + --decoding-method modified_beam_search \ + --beam-size 4 + done +done +``` + +You can find a pre-trained model, decoding logs, and decoding results at + + + #### 2022-2-19 (Duo Ma): The tensorboard log for training is available at https://tensorboard.dev/experiment/25PmX3MxSVGTdvIdhOwllw/#scalars You can find a pretrained model by visiting https://huggingface.co/shuanguanma/icefall_aishell_transducer_stateless_context_size2_epoch60_2022_2_19 | | test |comment | |---------------------------|------|-----------------------------------------| -| greedy search | 5.4 |--epoch 59, --avg 10, --max-duration 100 | -| beam search | 5.05|--epoch 59, --avg 10, --max-duration 100 | +| greedy search | 5.4 |--epoch 59, --avg 10, --max-duration 100 | +| beam search | 5.05|--epoch 59, --avg 10, --max-duration 100 | You can use the following commands to reproduce our results: @@ -23,7 +164,7 @@ python3 ./transducer_stateless/train.py \ lang_dir=data/lang_char dir=exp/transducer_stateless_context_size2 -python3 ./transducer_stateless/decode.py\ +python3 ./transducer_stateless/decode.py \ --epoch 59 \ --avg 10 \ --exp-dir $dir \ @@ -35,8 +176,8 @@ python3 ./transducer_stateless/decode.py\ lang_dir=data/lang_char dir=exp/transducer_stateless_context_size2 python3 ./transducer_stateless/decode.py \ - --epoch 59\ - --avg 10\ + --epoch 59 \ + --avg 10 \ --exp-dir $dir \ --lang-dir $lang_dir \ --decoding-method beam_search \ diff --git a/egs/aishell/ASR/local/display_manifest_statistics.py b/egs/aishell/ASR/local/display_manifest_statistics.py index 5e8b5cd3a..0ae731a1d 100755 --- a/egs/aishell/ASR/local/display_manifest_statistics.py +++ b/egs/aishell/ASR/local/display_manifest_statistics.py @@ -31,7 +31,10 @@ from lhotse import load_manifest def main(): # path = "./data/fbank/cuts_train.json.gz" # path = "./data/fbank/cuts_test.json.gz" - path = "./data/fbank/cuts_dev.json.gz" + # path = "./data/fbank/cuts_dev.json.gz" + # path = "./data/fbank/aidatatang_200zh/cuts_train_raw.jsonl.gz" + # path = "./data/fbank/aidatatang_200zh/cuts_test_raw.jsonl.gz" + path = "./data/fbank/aidatatang_200zh/cuts_dev_raw.jsonl.gz" cuts = load_manifest(path) cuts.describe() @@ -115,4 +118,79 @@ min 1.6 99.5% 8.9 99.9% 10.3 max 12.5 + +## aidatatang_200zh (train) +Cuts count: 164905 +Total duration (hours): 139.9 +Speech duration (hours): 139.9 (100.0%) +*** +Duration statistics (seconds): +mean 3.1 +std 1.1 +min 1.1 +0.1% 1.5 +0.5% 1.7 +1% 1.8 +5% 2.0 +10% 2.1 +10% 2.1 +25% 2.3 +50% 2.7 +75% 3.4 +90% 4.6 +95% 5.4 +99% 7.1 +99.5% 7.8 +99.9% 9.1 +max 16.3 + +## aidatatang_200zh (test) +Cuts count: 48144 +Total duration (hours): 40.2 +Speech duration (hours): 40.2 (100.0%) +*** +Duration statistics (seconds): +mean 3.0 +std 1.1 +min 0.9 +0.1% 1.5 +0.5% 1.8 +1% 1.8 +5% 2.0 +10% 2.1 +10% 2.1 +25% 2.3 +50% 2.6 +75% 3.4 +90% 4.4 +95% 5.2 +99% 6.9 +99.5% 7.5 +99.9% 9.0 +max 21.8 + +## aidatatang_200zh (dev) +Cuts count: 24216 +Total duration (hours): 20.2 +Speech duration (hours): 20.2 (100.0%) +*** +Duration statistics (seconds): +mean 3.0 +std 1.0 +min 1.2 +0.1% 1.6 +0.5% 1.7 +1% 1.8 +5% 2.0 +10% 2.1 +10% 2.1 +25% 2.3 +50% 2.7 +75% 3.4 +90% 4.4 +95% 5.1 +99% 6.7 +99.5% 7.3 +99.9% 8.8 +max 11.3 """ diff --git a/egs/aishell/ASR/local/process_aidatatang_200zh.py b/egs/aishell/ASR/local/process_aidatatang_200zh.py new file mode 100755 index 000000000..2c6951d42 --- /dev/null +++ b/egs/aishell/ASR/local/process_aidatatang_200zh.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 +# Copyright 2022 Xiaomi Corp. (Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from pathlib import Path + +from lhotse import CutSet +from lhotse.recipes.utils import read_manifests_if_cached + + +def preprocess_aidatatang_200zh(): + src_dir = Path("data/manifests/aidatatang_200zh") + output_dir = Path("data/fbank/aidatatang_200zh") + output_dir.mkdir(exist_ok=True, parents=True) + + dataset_parts = ( + "train", + "test", + "dev", + ) + + logging.info("Loading manifest") + manifests = read_manifests_if_cached( + dataset_parts=dataset_parts, + output_dir=src_dir, + ) + assert len(manifests) > 0 + + for partition, m in manifests.items(): + logging.info(f"Processing {partition}") + raw_cuts_path = output_dir / f"cuts_{partition}_raw.jsonl.gz" + if raw_cuts_path.is_file(): + logging.info(f"{partition} already exists - skipping") + continue + + for sup in m["supervisions"]: + sup.custom = {"origin": "aidatatang_200zh"} + + cut_set = CutSet.from_manifests( + recordings=m["recordings"], + supervisions=m["supervisions"], + ) + + logging.info(f"Saving to {raw_cuts_path}") + cut_set.to_file(raw_cuts_path) + + +def main(): + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + logging.basicConfig(format=formatter, level=logging.INFO) + + preprocess_aidatatang_200zh() + + +if __name__ == "__main__": + main() diff --git a/egs/aishell/ASR/prepare_aidatatang_200zh.sh b/egs/aishell/ASR/prepare_aidatatang_200zh.sh new file mode 100755 index 000000000..60b2060ec --- /dev/null +++ b/egs/aishell/ASR/prepare_aidatatang_200zh.sh @@ -0,0 +1,59 @@ +#!/usr/bin/env bash + +set -eou pipefail + +stage=-1 +stop_stage=100 + +# We assume dl_dir (download dir) contains the following +# directories and files. If not, they will be downloaded +# by this script automatically. +# +# - $dl_dir/aidatatang_200zh +# You can find "corpus" and "transcript" inside it. +# You can download it at +# https://openslr.org/62/ + +dl_dir=$PWD/download + +. shared/parse_options.sh || exit 1 + +# All files generated by this script are saved in "data". +# You can safely remove "data" and rerun this script to regenerate it. +mkdir -p data + +log() { + # This function is from espnet + local fname=${BASH_SOURCE[1]##*/} + echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*" +} + +log "dl_dir: $dl_dir" + +if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then + log "Stage 0: Download data" + + if [ ! -f $dl_dir/aidatatang_200zh/transcript/aidatatang_200_zh_transcript.txt ]; then + lhotse download aidatatang-200zh $dl_dir + fi +fi + +if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then + log "Stage 1: Prepare manifest" + # We assume that you have downloaded the aidatatang_200zh corpus + # to $dl_dir/aidatatang_200zh + if [ ! -f data/manifests/aidatatang_200zh/.manifests.done ]; then + mkdir -p data/manifests/aidatatang_200zh + lhotse prepare aidatatang-200zh $dl_dir data/manifests/aidatatang_200zh + touch data/manifests/aidatatang_200zh/.manifests.done + fi +fi + +if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then + log "Stage 2: Process aidatatang_200zh" + if [ ! -f data/fbank/aidatatang_200zh/.fbank.done ]; then + mkdir -p data/fbank/aidatatang_200zh + lhotse prepare aidatatang-200zh $dl_dir data/manifests/aidatatang_200zh + touch data/fbank/aidatatang_200zh/.fbank.done + fi +fi diff --git a/egs/aishell/ASR/transducer_stateless/model.py b/egs/aishell/ASR/transducer_stateless/model.py index c19325a15..994305fc1 100644 --- a/egs/aishell/ASR/transducer_stateless/model.py +++ b/egs/aishell/ASR/transducer_stateless/model.py @@ -14,15 +14,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -""" -Note we use `rnnt_loss` from torchaudio, which exists only in -torchaudio >= v0.10.0. It also means you have to use torch >= v1.10.0 -""" import k2 import torch import torch.nn as nn -import torchaudio -import torchaudio.functional from encoder_interface import EncoderInterface from icefall.utils import add_sos @@ -115,11 +109,6 @@ class Transducer(nn.Module): boundary[:, 2] = y_lens boundary[:, 3] = x_lens - assert hasattr(torchaudio.functional, "rnnt_loss"), ( - f"Current torchaudio version: {torchaudio.__version__}\n" - "Please install a version >= 0.10.0" - ) - loss = k2.rnnt_loss(logits, y_padded, blank_id, boundary) return loss diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/README.md b/egs/aishell/ASR/transducer_stateless_modified-2/README.md new file mode 100644 index 000000000..b3c539670 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified-2/README.md @@ -0,0 +1,59 @@ +## Introduction + +The decoder, i.e., the prediction network, is from +https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9054419 +(Rnn-Transducer with Stateless Prediction Network) + +Different from `../transducer_stateless_modified`, this folder +uses extra data, i.e., http://www.openslr.org/62/, during training. + +You can use the following command to start the training: + +```bash +cd egs/aishell/ASR +./prepare.sh --stop-stage 6 +./prepare_aidatatang_200zh.sh + +export CUDA_VISIBLE_DEVICES="0,1,2" + +./transducer_stateless_modified-2/train.py \ + --world-size 3 \ + --num-epochs 90 \ + --start-epoch 0 \ + --exp-dir transducer_stateless_modified-2/exp-2 \ + --max-duration 250 \ + --lr-factor 2.0 \ + --context-size 2 \ + --modified-transducer-prob 0.25 \ + --datatang-prob 0.2 +``` + +To decode, you can use + +```bash +for epoch in 89; do + for avg in 30 38; do + ./transducer_stateless_modified-2/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir transducer_stateless_modified-2/exp-2 \ + --max-duration 100 \ + --context-size 2 \ + --decoding-method greedy_search \ + --max-sym-per-frame 1 + done +done + +for epoch in 89; do + for avg in 38; do + ./transducer_stateless_modified-2/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir transducer_stateless_modified-2/exp-2 \ + --max-duration 100 \ + --context-size 2 \ + --decoding-method modified_beam_search \ + --beam-size 4 + done +done +``` diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/__init__.py b/egs/aishell/ASR/transducer_stateless_modified-2/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/aidatatang_200zh.py b/egs/aishell/ASR/transducer_stateless_modified-2/aidatatang_200zh.py new file mode 100644 index 000000000..84ca64c89 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified-2/aidatatang_200zh.py @@ -0,0 +1,53 @@ +# Copyright 2021 Piotr Żelasko +# 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from pathlib import Path + +from lhotse import CutSet, load_manifest + + +class AIDatatang200zh: + def __init__(self, manifest_dir: str): + """ + Args: + manifest_dir: + It is expected to contain the following files:: + + - cuts_dev_raw.jsonl.gz + - cuts_train_raw.jsonl.gz + - cuts_test_raw.jsonl.gz + """ + self.manifest_dir = Path(manifest_dir) + + def train_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_train_raw.jsonl.gz" + logging.info(f"About to get train cuts from {f}") + cuts_train = load_manifest(f) + return cuts_train + + def valid_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_valid_raw.jsonl.gz" + logging.info(f"About to get valid cuts from {f}") + cuts_valid = load_manifest(f) + return cuts_valid + + def test_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_test_raw.jsonl.gz" + logging.info(f"About to get test cuts from {f}") + cuts_test = load_manifest(f) + return cuts_test diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/aishell.py b/egs/aishell/ASR/transducer_stateless_modified-2/aishell.py new file mode 100644 index 000000000..94d1da066 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified-2/aishell.py @@ -0,0 +1,53 @@ +# Copyright 2021 Piotr Żelasko +# 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from pathlib import Path + +from lhotse import CutSet, load_manifest + + +class AIShell: + def __init__(self, manifest_dir: str): + """ + Args: + manifest_dir: + It is expected to contain the following files:: + + - cuts_dev.json.gz + - cuts_train.json.gz + - cuts_test.json.gz + """ + self.manifest_dir = Path(manifest_dir) + + def train_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_train.json.gz" + logging.info(f"About to get train cuts from {f}") + cuts_train = load_manifest(f) + return cuts_train + + def valid_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_dev.json.gz" + logging.info(f"About to get valid cuts from {f}") + cuts_valid = load_manifest(f) + return cuts_valid + + def test_cuts(self) -> CutSet: + f = self.manifest_dir / "cuts_test.json.gz" + logging.info(f"About to get test cuts from {f}") + cuts_test = load_manifest(f) + return cuts_test diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/asr_datamodule.py b/egs/aishell/ASR/transducer_stateless_modified-2/asr_datamodule.py new file mode 100644 index 000000000..fe0d0a872 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified-2/asr_datamodule.py @@ -0,0 +1,304 @@ +# Copyright 2021 Piotr Żelasko +# 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import logging +from pathlib import Path +from typing import Optional + +from lhotse import CutSet, Fbank, FbankConfig +from lhotse.dataset import ( + BucketingSampler, + CutMix, + DynamicBucketingSampler, + K2SpeechRecognitionDataset, + SpecAugment, +) +from lhotse.dataset.input_strategies import ( + OnTheFlyFeatures, + PrecomputedFeatures, +) +from torch.utils.data import DataLoader + +from icefall.utils import str2bool + + +class AsrDataModule: + def __init__(self, args: argparse.Namespace): + self.args = args + + @classmethod + def add_arguments(cls, parser: argparse.ArgumentParser): + group = parser.add_argument_group( + title="ASR data related options", + description="These options are used for the preparation of " + "PyTorch DataLoaders from Lhotse CutSet's -- they control the " + "effective batch sizes, sampling strategies, applied data " + "augmentations, etc.", + ) + + group.add_argument( + "--max-duration", + type=int, + default=200.0, + help="Maximum pooled recordings duration (seconds) in a " + "single batch. You can reduce it if it causes CUDA OOM.", + ) + + group.add_argument( + "--bucketing-sampler", + type=str2bool, + default=True, + help="When enabled, the batches will come from buckets of " + "similar duration (saves padding frames).", + ) + + group.add_argument( + "--num-buckets", + type=int, + default=30, + help="The number of buckets for the BucketingSampler " + "and DynamicBucketingSampler." + "(you might want to increase it for larger datasets).", + ) + + group.add_argument( + "--shuffle", + type=str2bool, + default=True, + help="When enabled (=default), the examples will be " + "shuffled for each epoch.", + ) + + group.add_argument( + "--return-cuts", + type=str2bool, + default=True, + help="When enabled, each batch will have the " + "field: batch['supervisions']['cut'] with the cuts that " + "were used to construct it.", + ) + + group.add_argument( + "--num-workers", + type=int, + default=2, + help="The number of training dataloader workers that " + "collect the batches.", + ) + + group.add_argument( + "--enable-spec-aug", + type=str2bool, + default=True, + help="When enabled, use SpecAugment for training dataset.", + ) + + group.add_argument( + "--spec-aug-time-warp-factor", + type=int, + default=80, + help="Used only when --enable-spec-aug is True. " + "It specifies the factor for time warping in SpecAugment. " + "Larger values mean more warping. " + "A value less than 1 means to disable time warp.", + ) + + group.add_argument( + "--enable-musan", + type=str2bool, + default=True, + help="When enabled, select noise from MUSAN and mix it" + "with training dataset. ", + ) + + group.add_argument( + "--manifest-dir", + type=Path, + default=Path("data/fbank"), + help="Path to directory with train/valid/test cuts.", + ) + + group.add_argument( + "--on-the-fly-feats", + type=str2bool, + default=False, + help="When enabled, use on-the-fly cut mixing and feature " + "extraction. Will drop existing precomputed feature manifests " + "if available. Used only in dev/test CutSet", + ) + + def train_dataloaders( + self, + cuts_train: CutSet, + dynamic_bucketing: bool, + on_the_fly_feats: bool, + cuts_musan: Optional[CutSet] = None, + ) -> DataLoader: + """ + Args: + cuts_train: + Cuts for training. + cuts_musan: + If not None, it is the cuts for mixing. + dynamic_bucketing: + True to use DynamicBucketingSampler; + False to use BucketingSampler. + on_the_fly_feats: + True to use OnTheFlyFeatures; + False to use PrecomputedFeatures. + """ + transforms = [] + if cuts_musan is not None: + logging.info("Enable MUSAN") + transforms.append( + CutMix( + cuts=cuts_musan, prob=0.5, snr=(10, 20), preserve_id=True + ) + ) + else: + logging.info("Disable MUSAN") + + input_transforms = [] + + if self.args.enable_spec_aug: + logging.info("Enable SpecAugment") + logging.info( + f"Time warp factor: {self.args.spec_aug_time_warp_factor}" + ) + input_transforms.append( + SpecAugment( + time_warp_factor=self.args.spec_aug_time_warp_factor, + num_frame_masks=2, + features_mask_size=27, + num_feature_masks=2, + frames_mask_size=100, + ) + ) + else: + logging.info("Disable SpecAugment") + + logging.info("About to create train dataset") + train = K2SpeechRecognitionDataset( + cut_transforms=transforms, + input_transforms=input_transforms, + return_cuts=self.args.return_cuts, + ) + + # NOTE: the PerturbSpeed transform should be added only if we + # remove it from data prep stage. + # Add on-the-fly speed perturbation; since originally it would + # have increased epoch size by 3, we will apply prob 2/3 and use + # 3x more epochs. + # Speed perturbation probably should come first before + # concatenation, but in principle the transforms order doesn't have + # to be strict (e.g. could be randomized) + # transforms = [PerturbSpeed(factors=[0.9, 1.1], p=2/3)] + transforms # noqa + # Drop feats to be on the safe side. + train = K2SpeechRecognitionDataset( + cut_transforms=transforms, + input_strategy=( + OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80))) + if on_the_fly_feats + else PrecomputedFeatures() + ), + input_transforms=input_transforms, + return_cuts=self.args.return_cuts, + ) + + if dynamic_bucketing: + logging.info("Using DynamicBucketingSampler.") + train_sampler = DynamicBucketingSampler( + cuts_train, + max_duration=self.args.max_duration, + shuffle=self.args.shuffle, + num_buckets=self.args.num_buckets, + drop_last=True, + ) + else: + logging.info("Using BucketingSampler.") + train_sampler = BucketingSampler( + cuts_train, + max_duration=self.args.max_duration, + shuffle=self.args.shuffle, + num_buckets=self.args.num_buckets, + bucket_method="equal_duration", + drop_last=True, + ) + + logging.info("About to create train dataloader") + train_dl = DataLoader( + train, + sampler=train_sampler, + batch_size=None, + num_workers=self.args.num_workers, + persistent_workers=False, + ) + return train_dl + + def valid_dataloaders(self, cuts_valid: CutSet) -> DataLoader: + transforms = [] + + logging.info("About to create dev dataset") + if self.args.on_the_fly_feats: + validate = K2SpeechRecognitionDataset( + cut_transforms=transforms, + input_strategy=OnTheFlyFeatures( + Fbank(FbankConfig(num_mel_bins=80)) + ), + return_cuts=self.args.return_cuts, + ) + else: + validate = K2SpeechRecognitionDataset( + cut_transforms=transforms, + return_cuts=self.args.return_cuts, + ) + valid_sampler = BucketingSampler( + cuts_valid, + max_duration=self.args.max_duration, + shuffle=False, + ) + logging.info("About to create dev dataloader") + valid_dl = DataLoader( + validate, + sampler=valid_sampler, + batch_size=None, + num_workers=2, + persistent_workers=False, + ) + + return valid_dl + + def test_dataloaders(self, cuts: CutSet) -> DataLoader: + logging.debug("About to create test dataset") + test = K2SpeechRecognitionDataset( + input_strategy=OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80))) + if self.args.on_the_fly_feats + else PrecomputedFeatures(), + return_cuts=self.args.return_cuts, + ) + sampler = BucketingSampler( + cuts, max_duration=self.args.max_duration, shuffle=False + ) + logging.debug("About to create test dataloader") + test_dl = DataLoader( + test, + batch_size=None, + sampler=sampler, + num_workers=self.args.num_workers, + ) + return test_dl diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/beam_search.py b/egs/aishell/ASR/transducer_stateless_modified-2/beam_search.py new file mode 120000 index 000000000..e188617a8 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified-2/beam_search.py @@ -0,0 +1 @@ +../../../librispeech/ASR/transducer_stateless/beam_search.py \ No newline at end of file diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/conformer.py b/egs/aishell/ASR/transducer_stateless_modified-2/conformer.py new file mode 120000 index 000000000..88975988f --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified-2/conformer.py @@ -0,0 +1 @@ +../transducer_stateless_modified/conformer.py \ No newline at end of file diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/decode.py b/egs/aishell/ASR/transducer_stateless_modified-2/decode.py new file mode 100755 index 000000000..8b851bd17 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified-2/decode.py @@ -0,0 +1,491 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +(1) greedy search +./transducer_stateless_modified-2/decode.py \ + --epoch 89 \ + --avg 38 \ + --exp-dir ./transducer_stateless_modified-2/exp \ + --max-duration 100 \ + --decoding-method greedy_search + +(2) beam search +./transducer_stateless_modified/decode.py \ + --epoch 89 \ + --avg 38 \ + --exp-dir ./transducer_stateless_modified-2/exp \ + --max-duration 100 \ + --decoding-method beam_search \ + --beam-size 4 + +(3) modified beam search +./transducer_stateless_modified-2/decode.py \ + --epoch 89 \ + --avg 38 \ + --exp-dir ./transducer_stateless_modified/exp \ + --max-duration 100 \ + --decoding-method modified_beam_search \ + --beam-size 4 +""" + +import argparse +import logging +from collections import defaultdict +from pathlib import Path +from typing import Dict, List, Tuple + +import torch +import torch.nn as nn +from aishell import AIShell +from asr_datamodule import AsrDataModule +from beam_search import beam_search, greedy_search, modified_beam_search +from conformer import Conformer +from decoder import Decoder +from joiner import Joiner +from model import Transducer + +from icefall.checkpoint import average_checkpoints, load_checkpoint +from icefall.env import get_env_info +from icefall.lexicon import Lexicon +from icefall.utils import ( + AttributeDict, + setup_logger, + store_transcripts, + write_error_stats, +) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=30, + help="It specifies the checkpoint to use for decoding." + "Note: Epoch counts from 0.", + ) + parser.add_argument( + "--avg", + type=int, + default=10, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch'. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="transducer_stateless_modified-2/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--lang-dir", + type=str, + default="data/lang_char", + help="The lang dir", + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + - modified_beam_search + """, + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="Used only when --decoding-method is beam_search " + "and modified_beam_search", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=3, + help="Maximum number of symbols per frame", + ) + return parser + + +def get_params() -> AttributeDict: + params = AttributeDict( + { + # parameters for conformer + "feature_dim": 80, + "encoder_out_dim": 512, + "subsampling_factor": 4, + "attention_dim": 512, + "nhead": 8, + "dim_feedforward": 2048, + "num_encoder_layers": 12, + "vgg_frontend": False, + "env_info": get_env_info(), + } + ) + return params + + +def get_encoder_model(params: AttributeDict): + # TODO: We can add an option to switch between Conformer and Transformer + encoder = Conformer( + num_features=params.feature_dim, + output_dim=params.encoder_out_dim, + subsampling_factor=params.subsampling_factor, + d_model=params.attention_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + vgg_frontend=params.vgg_frontend, + ) + return encoder + + +def get_decoder_model(params: AttributeDict): + decoder = Decoder( + vocab_size=params.vocab_size, + embedding_dim=params.encoder_out_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict): + joiner = Joiner( + input_dim=params.encoder_out_dim, + output_dim=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict): + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + ) + return model + + +def decode_one_batch( + params: AttributeDict, + model: nn.Module, + lexicon: Lexicon, + batch: dict, +) -> Dict[str, List[List[str]]]: + """Decode one batch and return the result in a dict. The dict has the + following format: + + - key: It indicates the setting used for decoding. For example, + if greedy_search is used, it would be "greedy_search" + If beam search with a beam size of 7 is used, it would be + "beam_7" + - value: It contains the decoding result. `len(value)` equals to + batch size. `value[i]` is the decoding result for the i-th + utterance in the given batch. + Args: + params: + It's the return value of :func:`get_params`. + model: + The neural model. + batch: + It is the return value from iterating + `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation + for the format of the `batch`. + lexicon: + It contains the token symbol table and the word symbol table. + Returns: + Return the decoding result. See above description for the format of + the returned dict. + """ + device = model.device + feature = batch["inputs"] + assert feature.ndim == 3 + + feature = feature.to(device) + # at entry, feature is (N, T, C) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + encoder_out, encoder_out_lens = model.encoder( + x=feature, x_lens=feature_lens + ) + hyps = [] + batch_size = encoder_out.size(0) + + for i in range(batch_size): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + if params.decoding_method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.decoding_method == "beam_search": + hyp = beam_search( + model=model, encoder_out=encoder_out_i, beam=params.beam_size + ) + elif params.decoding_method == "modified_beam_search": + hyp = modified_beam_search( + model=model, encoder_out=encoder_out_i, beam=params.beam_size + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + hyps.append([lexicon.token_table[i] for i in hyp]) + + if params.decoding_method == "greedy_search": + return {"greedy_search": hyps} + else: + return {f"beam_{params.beam_size}": hyps} + + +def decode_dataset( + dl: torch.utils.data.DataLoader, + params: AttributeDict, + model: nn.Module, + lexicon: Lexicon, +) -> Dict[str, List[Tuple[List[str], List[str]]]]: + """Decode dataset. + + Args: + dl: + PyTorch's dataloader containing the dataset to decode. + params: + It is returned by :func:`get_params`. + model: + The neural model. + Returns: + Return a dict, whose key may be "greedy_search" if greedy search + is used, or it may be "beam_7" if beam size of 7 is used. + Its value is a list of tuples. Each tuple contains two elements: + The first is the reference transcript, and the second is the + predicted result. + """ + num_cuts = 0 + + try: + num_batches = len(dl) + except TypeError: + num_batches = "?" + + if params.decoding_method == "greedy_search": + log_interval = 100 + else: + log_interval = 2 + + results = defaultdict(list) + for batch_idx, batch in enumerate(dl): + texts = batch["supervisions"]["text"] + + hyps_dict = decode_one_batch( + params=params, + model=model, + lexicon=lexicon, + batch=batch, + ) + + for name, hyps in hyps_dict.items(): + this_batch = [] + assert len(hyps) == len(texts) + for hyp_words, ref_text in zip(hyps, texts): + ref_words = ref_text.split() + this_batch.append((ref_words, hyp_words)) + + results[name].extend(this_batch) + + num_cuts += len(texts) + + if batch_idx % log_interval == 0: + batch_str = f"{batch_idx}/{num_batches}" + + logging.info( + f"batch {batch_str}, cuts processed until now is {num_cuts}" + ) + return results + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[List[int], List[int]]]], +): + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = ( + params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + ) + store_transcripts(filename=recog_path, texts=results) + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = ( + params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + ) + # we compute CER for aishell dataset. + results_char = [] + for res in results: + results_char.append((list("".join(res[0])), list("".join(res[1])))) + with open(errs_filename, "w") as f: + wer = write_error_stats( + f, f"{test_set_name}-{key}", results_char, enable_log=True + ) + test_set_wers[key] = wer + + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = ( + params.res_dir + / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_info, "w") as f: + print("settings\tCER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, CER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + AsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + args.lang_dir = Path(args.lang_dir) + + params = get_params() + params.update(vars(args)) + + assert params.decoding_method in ( + "greedy_search", + "beam_search", + "modified_beam_search", + ) + params.res_dir = params.exp_dir / params.decoding_method + + params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + if "beam_search" in params.decoding_method: + params.suffix += f"-beam-{params.beam_size}" + else: + params.suffix += f"-context-{params.context_size}" + params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}" + + setup_logger(f"{params.res_dir}/log-decode-{params.suffix}") + logging.info("Decoding started") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"Device: {device}") + + lexicon = Lexicon(params.lang_dir) + + params.blank_id = 0 + params.vocab_size = max(lexicon.tokens) + 1 + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + if params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if start >= 0: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict( + average_checkpoints(filenames, device=device), strict=False + ) + + model.to(device) + model.eval() + model.device = device + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + asr_datamodule = AsrDataModule(args) + aishell = AIShell(manifest_dir=args.manifest_dir) + test_cuts = aishell.test_cuts() + test_dl = asr_datamodule.test_dataloaders(test_cuts) + + test_sets = ["test"] + test_dls = [test_dl] + + for test_set, test_dl in zip(test_sets, test_dls): + results_dict = decode_dataset( + dl=test_dl, + params=params, + model=model, + lexicon=lexicon, + ) + + save_results( + params=params, + test_set_name=test_set, + results_dict=results_dict, + ) + + logging.info("Done!") + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + +if __name__ == "__main__": + main() diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/decoder.py b/egs/aishell/ASR/transducer_stateless_modified-2/decoder.py new file mode 120000 index 000000000..bdfcea5c2 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified-2/decoder.py @@ -0,0 +1 @@ +../transducer_stateless_modified/decoder.py \ No newline at end of file diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/encoder_interface.py b/egs/aishell/ASR/transducer_stateless_modified-2/encoder_interface.py new file mode 120000 index 000000000..a2a5f22cf --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified-2/encoder_interface.py @@ -0,0 +1 @@ +../transducer_stateless_modified/encoder_interface.py \ No newline at end of file diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/export.py b/egs/aishell/ASR/transducer_stateless_modified-2/export.py new file mode 100755 index 000000000..d009de603 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified-2/export.py @@ -0,0 +1,246 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script converts several saved checkpoints +# to a single one using model averaging. +""" +Usage: +./transducer_stateless_modified-2/export.py \ + --exp-dir ./transducer_stateless_modified-2/exp \ + --epoch 89 \ + --avg 38 + +It will generate a file exp_dir/pretrained.pt + +To use the generated file with `transducer_stateless_modified-2/decode.py`, +you can do:: + + cd /path/to/exp_dir + ln -s pretrained.pt epoch-9999.pt + + cd /path/to/egs/aishell/ASR + ./transducer_stateless_modified-2/decode.py \ + --exp-dir ./transducer_stateless_modified-2/exp \ + --epoch 9999 \ + --avg 1 \ + --max-duration 100 \ + --lang-dir data/lang_char +""" + +import argparse +import logging +from pathlib import Path + +import torch +import torch.nn as nn +from conformer import Conformer +from decoder import Decoder +from joiner import Joiner +from model import Transducer + +from icefall.checkpoint import average_checkpoints, load_checkpoint +from icefall.env import get_env_info +from icefall.lexicon import Lexicon +from icefall.utils import AttributeDict, str2bool + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=20, + help="It specifies the checkpoint to use for decoding." + "Note: Epoch counts from 0.", + ) + + parser.add_argument( + "--avg", + type=int, + default=10, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch'. ", + ) + + parser.add_argument( + "--exp-dir", + type=Path, + default=Path("transducer_stateless_modified-2/exp"), + help="""It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--jit", + type=str2bool, + default=False, + help="""True to save a model after applying torch.jit.script. + """, + ) + + parser.add_argument( + "--lang-dir", + type=Path, + default=Path("data/lang_char"), + help="The lang dir", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + return parser + + +def get_params() -> AttributeDict: + params = AttributeDict( + { + # parameters for conformer + "feature_dim": 80, + "encoder_out_dim": 512, + "subsampling_factor": 4, + "attention_dim": 512, + "nhead": 8, + "dim_feedforward": 2048, + "num_encoder_layers": 12, + "vgg_frontend": False, + "env_info": get_env_info(), + } + ) + return params + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + encoder = Conformer( + num_features=params.feature_dim, + output_dim=params.encoder_out_dim, + subsampling_factor=params.subsampling_factor, + d_model=params.attention_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + vgg_frontend=params.vgg_frontend, + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + embedding_dim=params.encoder_out_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + input_dim=params.encoder_out_dim, + output_dim=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict) -> nn.Module: + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + ) + return model + + +def main(): + args = get_parser().parse_args() + + assert args.jit is False, "torchscript support will be added later" + + params = get_params() + params.update(vars(args)) + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + lexicon = Lexicon(params.lang_dir) + + params.blank_id = 0 + params.vocab_size = max(lexicon.tokens) + 1 + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + model.to(device) + + if params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if start >= 0: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict( + average_checkpoints(filenames, device=device), strict=False + ) + + model.to("cpu") + model.eval() + + if params.jit: + logging.info("Using torch.jit.script") + model = torch.jit.script(model) + filename = params.exp_dir / "cpu_jit.pt" + model.save(str(filename)) + logging.info(f"Saved to {filename}") + else: + logging.info("Not using torch.jit.script") + # Save it using a format so that it can be loaded + # by :func:`load_checkpoint` + filename = params.exp_dir / "pretrained.pt" + torch.save({"model": model.state_dict()}, str(filename)) + logging.info(f"Saved to {filename}") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/joiner.py b/egs/aishell/ASR/transducer_stateless_modified-2/joiner.py new file mode 120000 index 000000000..e9e435ecd --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified-2/joiner.py @@ -0,0 +1 @@ +../transducer_stateless_modified/joiner.py \ No newline at end of file diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/model.py b/egs/aishell/ASR/transducer_stateless_modified-2/model.py new file mode 100644 index 000000000..086957d0b --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified-2/model.py @@ -0,0 +1,163 @@ +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +from typing import Optional + +import k2 +import torch +import torch.nn as nn +from encoder_interface import EncoderInterface + +from icefall.utils import add_sos + + +class Transducer(nn.Module): + """It implements https://arxiv.org/pdf/1211.3711.pdf + "Sequence Transduction with Recurrent Neural Networks" + """ + + def __init__( + self, + encoder: EncoderInterface, + decoder: nn.Module, + joiner: nn.Module, + decoder_datatang: Optional[nn.Module] = None, + joiner_datatang: Optional[nn.Module] = None, + ): + """ + Args: + encoder: + It is the transcription network in the paper. Its accepts + two inputs: `x` of (N, T, C) and `x_lens` of shape (N,). + It returns two tensors: `logits` of shape (N, T, C) and + `logit_lens` of shape (N,). + decoder: + It is the prediction network in the paper. Its input shape + is (N, U) and its output shape is (N, U, C). It should contain + one attribute: `blank_id`. + joiner: + It has two inputs with shapes: (N, T, C) and (N, U, C). Its + output shape is (N, T, U, C). Note that its output contains + unnormalized probs, i.e., not processed by log-softmax. + decoder_datatang: + The decoder for the aidatatang_200zh dataset. + joiner_datatang: + The joiner for the aidatatang_200zh dataset. + """ + super().__init__() + assert isinstance(encoder, EncoderInterface), type(encoder) + assert hasattr(decoder, "blank_id") + if decoder_datatang is not None: + assert hasattr(decoder_datatang, "blank_id") + + self.encoder = encoder + self.decoder = decoder + self.joiner = joiner + + self.decoder_datatang = decoder_datatang + self.joiner_datatang = joiner_datatang + + def forward( + self, + x: torch.Tensor, + x_lens: torch.Tensor, + y: k2.RaggedTensor, + aishell: bool = True, + modified_transducer_prob: float = 0.0, + ) -> torch.Tensor: + """ + Args: + x: + A 3-D tensor of shape (N, T, C). + x_lens: + A 1-D tensor of shape (N,). It contains the number of frames in `x` + before padding. + y: + A ragged tensor with 2 axes [utt][label]. It contains labels of each + utterance. + modified_transducer_prob: + The probability to use modified transducer loss. + Returns: + Return the transducer loss. + """ + assert x.ndim == 3, x.shape + assert x_lens.ndim == 1, x_lens.shape + assert y.num_axes == 2, y.num_axes + + assert x.size(0) == x_lens.size(0) == y.dim0 + + encoder_out, x_lens = self.encoder(x, x_lens) + assert torch.all(x_lens > 0) + + # Now for the decoder, i.e., the prediction network + row_splits = y.shape.row_splits(1) + y_lens = row_splits[1:] - row_splits[:-1] + + blank_id = self.decoder.blank_id + sos_y = add_sos(y, sos_id=blank_id) + + sos_y_padded = sos_y.pad(mode="constant", padding_value=blank_id) + sos_y_padded = sos_y_padded.to(torch.int64) + + if aishell: + decoder = self.decoder + joiner = self.joiner + else: + decoder = self.decoder_datatang + joiner = self.joiner_datatang + + decoder_out = decoder(sos_y_padded) + + # +1 here since a blank is prepended to each utterance. + logits = joiner( + encoder_out=encoder_out, + decoder_out=decoder_out, + encoder_out_len=x_lens, + decoder_out_len=y_lens + 1, + ) + + # rnnt_loss requires 0 padded targets + # Note: y does not start with SOS + y_padded = y.pad(mode="constant", padding_value=0) + + # We don't put this `import` at the beginning of the file + # as it is required only in the training, not during the + # reference stage + import optimized_transducer + + assert 0 <= modified_transducer_prob <= 1 + + if modified_transducer_prob == 0: + one_sym_per_frame = False + elif random.random() < modified_transducer_prob: + # random.random() returns a float in the range [0, 1) + one_sym_per_frame = True + else: + one_sym_per_frame = False + + loss = optimized_transducer.transducer_loss( + logits=logits, + targets=y_padded, + logit_lengths=x_lens, + target_lengths=y_lens, + blank=blank_id, + reduction="sum", + one_sym_per_frame=one_sym_per_frame, + from_log_softmax=False, + ) + + return loss diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/pretrained.py b/egs/aishell/ASR/transducer_stateless_modified-2/pretrained.py new file mode 100755 index 000000000..31bab122c --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified-2/pretrained.py @@ -0,0 +1,331 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, +# Wei Kang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Usage: + +# greedy search +./transducer_stateless_modified-2/pretrained.py \ + --checkpoint /path/to/pretrained.pt \ + --lang-dir /path/to/lang_char \ + --method greedy_search \ + /path/to/foo.wav \ + /path/to/bar.wav + +# beam search +./transducer_stateless_modified-2/pretrained.py \ + --checkpoint /path/to/pretrained.pt \ + --lang-dir /path/to/lang_char \ + --method beam_search \ + --beam-size 4 \ + /path/to/foo.wav \ + /path/to/bar.wav + +# modified beam search +./transducer_stateless_modified-2/pretrained.py \ + --checkpoint /path/to/pretrained.pt \ + --lang-dir /path/to/lang_char \ + --method modified_beam_search \ + --beam-size 4 \ + /path/to/foo.wav \ + /path/to/bar.wav + +""" + +import argparse +import logging +import math +from pathlib import Path +from typing import List + +import kaldifeat +import torch +import torch.nn as nn +import torchaudio +from beam_search import beam_search, greedy_search, modified_beam_search +from conformer import Conformer +from decoder import Decoder +from joiner import Joiner +from model import Transducer +from torch.nn.utils.rnn import pad_sequence + +from icefall.env import get_env_info +from icefall.lexicon import Lexicon +from icefall.utils import AttributeDict + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--checkpoint", + type=str, + required=True, + help="Path to the checkpoint. " + "The checkpoint is assumed to be saved by " + "icefall.checkpoint.save_checkpoint().", + ) + + parser.add_argument( + "--lang-dir", + type=Path, + default=Path("data/lang_char"), + help="The lang dir", + ) + + parser.add_argument( + "--method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + - modified_beam_search + """, + ) + + parser.add_argument( + "sound_files", + type=str, + nargs="+", + help="The input sound file(s) to transcribe. " + "Supported formats are those supported by torchaudio.load(). " + "For example, wav and flac are supported. " + "The sample rate has to be 16kHz.", + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="Used only when --method is beam_search and modified_beam_search", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=3, + help="Maximum number of symbols per frame. " + "Use only when --method is greedy_search", + ) + return parser + + return parser + + +def get_params() -> AttributeDict: + params = AttributeDict( + { + # parameters for conformer + "feature_dim": 80, + "encoder_out_dim": 512, + "subsampling_factor": 4, + "attention_dim": 512, + "nhead": 8, + "dim_feedforward": 2048, + "num_encoder_layers": 12, + "vgg_frontend": False, + "env_info": get_env_info(), + "sample_rate": 16000, + } + ) + return params + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + encoder = Conformer( + num_features=params.feature_dim, + output_dim=params.encoder_out_dim, + subsampling_factor=params.subsampling_factor, + d_model=params.attention_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + vgg_frontend=params.vgg_frontend, + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + embedding_dim=params.encoder_out_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + input_dim=params.encoder_out_dim, + output_dim=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict) -> nn.Module: + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + ) + return model + + +def read_sound_files( + filenames: List[str], expected_sample_rate: float +) -> List[torch.Tensor]: + """Read a list of sound files into a list 1-D float32 torch tensors. + Args: + filenames: + A list of sound filenames. + expected_sample_rate: + The expected sample rate of the sound files. + Returns: + Return a list of 1-D float32 torch tensors. + """ + ans = [] + for f in filenames: + wave, sample_rate = torchaudio.load(f) + assert sample_rate == expected_sample_rate, ( + f"expected sample rate: {expected_sample_rate}. " + f"Given: {sample_rate}" + ) + # We use only the first channel + ans.append(wave[0]) + return ans + + +def main(): + parser = get_parser() + args = parser.parse_args() + + params = get_params() + params.update(vars(args)) + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + lexicon = Lexicon(params.lang_dir) + + params.blank_id = 0 + params.vocab_size = max(lexicon.tokens) + 1 + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + checkpoint = torch.load(args.checkpoint, map_location="cpu") + model.load_state_dict(checkpoint["model"]) + model.to(device) + model.eval() + model.device = device + + logging.info("Constructing Fbank computer") + opts = kaldifeat.FbankOptions() + opts.device = device + opts.frame_opts.dither = 0 + opts.frame_opts.snip_edges = False + opts.frame_opts.samp_freq = params.sample_rate + opts.mel_opts.num_bins = params.feature_dim + + fbank = kaldifeat.Fbank(opts) + + logging.info(f"Reading sound files: {params.sound_files}") + waves = read_sound_files( + filenames=params.sound_files, expected_sample_rate=params.sample_rate + ) + waves = [w.to(device) for w in waves] + + logging.info("Decoding started") + features = fbank(waves) + feature_lens = [f.size(0) for f in features] + feature_lens = torch.tensor(feature_lens, device=device) + + features = pad_sequence( + features, batch_first=True, padding_value=math.log(1e-10) + ) + + hyps = [] + with torch.no_grad(): + encoder_out, encoder_out_lens = model.encoder( + x=features, x_lens=feature_lens + ) + + for i in range(encoder_out.size(0)): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + if params.method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.method == "beam_search": + hyp = beam_search( + model=model, + encoder_out=encoder_out_i, + beam=params.beam_size, + ) + elif params.method == "modified_beam_search": + hyp = modified_beam_search( + model=model, + encoder_out=encoder_out_i, + beam=params.beam_size, + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.method}" + ) + hyps.append([lexicon.token_table[i] for i in hyp]) + + s = "\n" + for filename, hyp in zip(params.sound_files, hyps): + words = " ".join(hyp) + s += f"{filename}:\n{words}\n\n" + logging.info(s) + + logging.info("Decoding Done") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/subsampling.py b/egs/aishell/ASR/transducer_stateless_modified-2/subsampling.py new file mode 120000 index 000000000..6fee09e58 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified-2/subsampling.py @@ -0,0 +1 @@ +../conformer_ctc/subsampling.py \ No newline at end of file diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/test_decoder.py b/egs/aishell/ASR/transducer_stateless_modified-2/test_decoder.py new file mode 120000 index 000000000..fbe1679ea --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified-2/test_decoder.py @@ -0,0 +1 @@ +../transducer_stateless_modified/test_decoder.py \ No newline at end of file diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/train.py b/egs/aishell/ASR/transducer_stateless_modified-2/train.py new file mode 100755 index 000000000..53d4e455f --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified-2/train.py @@ -0,0 +1,875 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, +# Wei Kang +# Mingshuang Luo) +# Copyright 2021 (Pingfeng Luo) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Usage: +./prepare.sh +./prepare_aidatatang_200zh.sh + +export CUDA_VISIBLE_DEVICES="0,1,2" + +./transducer_stateless_modified-2/train.py \ + --world-size 3 \ + --num-epochs 90 \ + --start-epoch 0 \ + --exp-dir transducer_stateless_modified-2/exp-2 \ + --max-duration 250 \ + --lr-factor 2.0 \ + --context-size 2 \ + --modified-transducer-prob 0.25 \ + --datatang-prob 0.2 +""" + + +import argparse +import logging +import random +from pathlib import Path +from shutil import copyfile +from typing import Optional, Tuple + +import k2 +import torch +import torch.multiprocessing as mp +import torch.nn as nn +from aidatatang_200zh import AIDatatang200zh +from aishell import AIShell +from asr_datamodule import AsrDataModule +from conformer import Conformer +from decoder import Decoder +from joiner import Joiner +from lhotse import CutSet, load_manifest +from lhotse.cut import Cut +from lhotse.utils import fix_random_seed +from model import Transducer +from torch import Tensor +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.nn.utils import clip_grad_norm_ +from torch.utils.tensorboard import SummaryWriter +from transformer import Noam + +from icefall.char_graph_compiler import CharCtcTrainingGraphCompiler +from icefall.checkpoint import load_checkpoint +from icefall.checkpoint import save_checkpoint as save_checkpoint_impl +from icefall.dist import cleanup_dist, setup_dist +from icefall.env import get_env_info +from icefall.lexicon import Lexicon +from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--world-size", + type=int, + default=1, + help="Number of GPUs for DDP training.", + ) + + parser.add_argument( + "--master-port", + type=int, + default=12354, + help="Master port to use for DDP training.", + ) + + parser.add_argument( + "--tensorboard", + type=str2bool, + default=True, + help="Should various information be logged in tensorboard.", + ) + + parser.add_argument( + "--num-epochs", + type=int, + default=30, + help="Number of epochs to train.", + ) + + parser.add_argument( + "--start-epoch", + type=int, + default=0, + help="""Resume training from from this epoch. + If it is positive, it will load checkpoint from + transducer_stateless/exp/epoch-{start_epoch-1}.pt + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="transducer_stateless_modified-2/exp", + help="""The experiment dir. + It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--lang-dir", + type=str, + default="data/lang_char", + help="""The lang dir + It contains language related input files such as + "lexicon.txt" + """, + ) + + parser.add_argument( + "--lr-factor", + type=float, + default=5.0, + help="The lr_factor for Noam optimizer", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + parser.add_argument( + "--modified-transducer-prob", + type=float, + default=0.25, + help="""The probability to use modified transducer loss. + In modified transduer, it limits the maximum number of symbols + per frame to 1. See also the option --max-sym-per-frame in + transducer_stateless/decode.py + """, + ) + + parser.add_argument( + "--datatang-prob", + type=float, + default=0.2, + help="The probability to select a batch from the " + "aidatatang_200zh dataset", + ) + + return parser + + +def get_params() -> AttributeDict: + """Return a dict containing training parameters. + + All training related parameters that are not passed from the commandline + are saved in the variable `params`. + + Commandline options are merged into `params` after they are parsed, so + you can also access them via `params`. + + Explanation of options saved in `params`: + + - best_train_loss: Best training loss so far. It is used to select + the model that has the lowest training loss. It is + updated during the training. + + - best_valid_loss: Best validation loss so far. It is used to select + the model that has the lowest validation loss. It is + updated during the training. + + - best_train_epoch: It is the epoch that has the best training loss. + + - best_valid_epoch: It is the epoch that has the best validation loss. + + - batch_idx_train: Used to writing statistics to tensorboard. It + contains number of batches trained so far across + epochs. + + - log_interval: Print training loss if batch_idx % log_interval` is 0 + + - reset_interval: Reset statistics if batch_idx % reset_interval is 0 + + - valid_interval: Run validation if batch_idx % valid_interval is 0 + + - feature_dim: The model input dim. It has to match the one used + in computing features. + + - subsampling_factor: The subsampling factor for the model. + + - attention_dim: Hidden dim for multi-head attention model. + + - num_decoder_layers: Number of decoder layer of transformer decoder. + + - warm_step: The warm_step for Noam optimizer. + """ + params = AttributeDict( + { + "best_train_loss": float("inf"), + "best_valid_loss": float("inf"), + "best_train_epoch": -1, + "best_valid_epoch": -1, + "batch_idx_train": 0, + "log_interval": 50, + "reset_interval": 200, + "valid_interval": 800, # For the 100h subset, use 800 + # parameters for conformer + "feature_dim": 80, + "encoder_out_dim": 512, + "subsampling_factor": 4, + "attention_dim": 512, + "nhead": 8, + "dim_feedforward": 2048, + "num_encoder_layers": 12, + "vgg_frontend": False, + # parameters for Noam + "warm_step": 80000, # For the 100h subset, use 8k + "env_info": get_env_info(), + } + ) + + return params + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + # TODO: We can add an option to switch between Conformer and Transformer + encoder = Conformer( + num_features=params.feature_dim, + output_dim=params.encoder_out_dim, + subsampling_factor=params.subsampling_factor, + d_model=params.attention_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + vgg_frontend=params.vgg_frontend, + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + embedding_dim=params.encoder_out_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + input_dim=params.encoder_out_dim, + output_dim=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict) -> nn.Module: + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + decoder_datatang = get_decoder_model(params) + joiner_datatang = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + decoder_datatang=decoder_datatang, + joiner_datatang=joiner_datatang, + ) + return model + + +def load_checkpoint_if_available( + params: AttributeDict, + model: nn.Module, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None, +) -> None: + """Load checkpoint from file. + + If params.start_epoch is positive, it will load the checkpoint from + `params.start_epoch - 1`. Otherwise, this function does nothing. + + Apart from loading state dict for `model`, `optimizer` and `scheduler`, + it also updates `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, + and `best_valid_loss` in `params`. + + Args: + params: + The return value of :func:`get_params`. + model: + The training model. + optimizer: + The optimizer that we are using. + scheduler: + The learning rate scheduler we are using. + Returns: + Return None. + """ + if params.start_epoch <= 0: + return + + filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" + saved_params = load_checkpoint( + filename, + model=model, + optimizer=optimizer, + scheduler=scheduler, + ) + + keys = [ + "best_train_epoch", + "best_valid_epoch", + "batch_idx_train", + "best_train_loss", + "best_valid_loss", + ] + for k in keys: + params[k] = saved_params[k] + + return saved_params + + +def save_checkpoint( + params: AttributeDict, + model: nn.Module, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None, + rank: int = 0, +) -> None: + """Save model, optimizer, scheduler and training stats to file. + + Args: + params: + It is returned by :func:`get_params`. + model: + The training model. + """ + if rank != 0: + return + filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" + save_checkpoint_impl( + filename=filename, + model=model, + params=params, + optimizer=optimizer, + scheduler=scheduler, + rank=rank, + ) + + if params.best_train_epoch == params.cur_epoch: + best_train_filename = params.exp_dir / "best-train-loss.pt" + copyfile(src=filename, dst=best_train_filename) + + if params.best_valid_epoch == params.cur_epoch: + best_valid_filename = params.exp_dir / "best-valid-loss.pt" + copyfile(src=filename, dst=best_valid_filename) + + +def is_aishell(c: Cut) -> bool: + """Return True if this cut is from the AIShell dataset. + + Note: + During data preparation, we set the custom field in + the supervision segment of aidatatang_200zh to + dict(origin='aidatatang_200zh') + See ../local/process_aidatatang_200zh.py. + """ + return c.supervisions[0].custom is None + + +def compute_loss( + params: AttributeDict, + model: nn.Module, + graph_compiler: CharCtcTrainingGraphCompiler, + batch: dict, + is_training: bool, +) -> Tuple[Tensor, MetricsTracker]: + """ + Compute CTC loss given the model and its inputs. + + Args: + params: + Parameters for training. See :func:`get_params`. + model: + The model for training. It is an instance of Conformer in our case. + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + is_training: + True for training. False for validation. When it is True, this + function enables autograd during computation; when it is False, it + disables autograd. + """ + device = model.device + feature = batch["inputs"] + # at entry, feature is (N, T, C) + assert feature.ndim == 3 + feature = feature.to(device) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + aishell = is_aishell(supervisions["cut"][0]) + + texts = batch["supervisions"]["text"] + y = graph_compiler.texts_to_ids(texts) + y = k2.RaggedTensor(y).to(device) + + with torch.set_grad_enabled(is_training): + loss = model( + x=feature, + x_lens=feature_lens, + y=y, + aishell=aishell, + modified_transducer_prob=params.modified_transducer_prob, + ) + + assert loss.requires_grad == is_training + + info = MetricsTracker() + info["frames"] = (feature_lens // params.subsampling_factor).sum().item() + + # Note: We use reduction=sum while computing the loss. + info["loss"] = loss.detach().cpu().item() + + return loss, info + + +def compute_validation_loss( + params: AttributeDict, + model: nn.Module, + graph_compiler: CharCtcTrainingGraphCompiler, + valid_dl: torch.utils.data.DataLoader, + world_size: int = 1, +) -> MetricsTracker: + """Run the validation process.""" + model.eval() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(valid_dl): + loss, loss_info = compute_loss( + params=params, + model=model, + graph_compiler=graph_compiler, + batch=batch, + is_training=False, + ) + assert loss.requires_grad is False + tot_loss = tot_loss + loss_info + + if world_size > 1: + tot_loss.reduce(loss.device) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + if loss_value < params.best_valid_loss: + params.best_valid_epoch = params.cur_epoch + params.best_valid_loss = loss_value + + return tot_loss + + +def train_one_epoch( + params: AttributeDict, + model: nn.Module, + optimizer: torch.optim.Optimizer, + graph_compiler: CharCtcTrainingGraphCompiler, + train_dl: torch.utils.data.DataLoader, + datatang_train_dl: torch.utils.data.DataLoader, + valid_dl: torch.utils.data.DataLoader, + rng: random.Random, + tb_writer: Optional[SummaryWriter] = None, + world_size: int = 1, +) -> None: + """Train the model for one epoch. + + The training loss from the mean of all frames is saved in + `params.train_loss`. It runs the validation process every + `params.valid_interval` batches. + + Args: + params: + It is returned by :func:`get_params`. + model: + The model for training. + optimizer: + The optimizer we are using. + train_dl: + Dataloader for the training dataset. + datatang_train_dl: + Dataloader for the aidatatang_200zh training dataset. + valid_dl: + Dataloader for the validation dataset. + tb_writer: + Writer to write log messages to tensorboard. + world_size: + Number of nodes in DDP training. If it is 1, DDP is disabled. + """ + model.train() + + aishell_tot_loss = MetricsTracker() + datatang_tot_loss = MetricsTracker() + tot_loss = MetricsTracker() + + # index 0: for LibriSpeech + # index 1: for GigaSpeech + # This sets the probabilities for choosing which datasets + dl_weights = [1 - params.datatang_prob, params.datatang_prob] + + iter_aishell = iter(train_dl) + iter_datatang = iter(datatang_train_dl) + + batch_idx = 0 + + while True: + idx = rng.choices((0, 1), weights=dl_weights, k=1)[0] + dl = iter_aishell if idx == 0 else iter_datatang + + try: + batch = next(dl) + except StopIteration: + break + batch_idx += 1 + + params.batch_idx_train += 1 + batch_size = len(batch["supervisions"]["text"]) + + aishell = is_aishell(batch["supervisions"]["cut"][0]) + + loss, loss_info = compute_loss( + params=params, + model=model, + graph_compiler=graph_compiler, + batch=batch, + is_training=True, + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + if aishell: + aishell_tot_loss = ( + aishell_tot_loss * (1 - 1 / params.reset_interval) + ) + loss_info + prefix = "aishell" # for logging only + else: + datatang_tot_loss = ( + datatang_tot_loss * (1 - 1 / params.reset_interval) + ) + loss_info + prefix = "datatang" + + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + + optimizer.zero_grad() + loss.backward() + clip_grad_norm_(model.parameters(), 5.0, 2.0) + optimizer.step() + + if batch_idx % params.log_interval == 0: + logging.info( + f"Epoch {params.cur_epoch}, " + f"batch {batch_idx}, {prefix}_loss[{loss_info}], " + f"tot_loss[{tot_loss}], batch size: {batch_size}, " + f"aishell_tot_loss[{aishell_tot_loss}], " + f"datatang_tot_loss[{datatang_tot_loss}], " + f"batch size: {batch_size}" + ) + + if batch_idx % params.log_interval == 0: + if tb_writer is not None: + loss_info.write_summary( + tb_writer, + f"train/current_{prefix}_", + params.batch_idx_train, + ) + tot_loss.write_summary( + tb_writer, "train/tot_", params.batch_idx_train + ) + aishell_tot_loss.write_summary( + tb_writer, "train/aishell_tot_", params.batch_idx_train + ) + datatang_tot_loss.write_summary( + tb_writer, "train/datatang_tot_", params.batch_idx_train + ) + + if batch_idx > 0 and batch_idx % params.valid_interval == 0: + logging.info("Computing validation loss") + valid_info = compute_validation_loss( + params=params, + model=model, + graph_compiler=graph_compiler, + valid_dl=valid_dl, + world_size=world_size, + ) + model.train() + logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") + if tb_writer is not None: + valid_info.write_summary( + tb_writer, "train/valid_", params.batch_idx_train + ) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + params.train_loss = loss_value + if params.train_loss < params.best_train_loss: + params.best_train_epoch = params.cur_epoch + params.best_train_loss = params.train_loss + + +def filter_short_and_long_utterances(cuts: CutSet) -> CutSet: + def remove_short_and_long_utt(c: Cut): + # Keep only utterances with duration between 1 second and 12 seconds + return 1.0 <= c.duration <= 12.0 + + num_in_total = len(cuts) + cuts = cuts.filter(remove_short_and_long_utt) + + num_left = len(cuts) + num_removed = num_in_total - num_left + removed_percent = num_removed / num_in_total * 100 + + logging.info(f"Before removing short and long utterances: {num_in_total}") + logging.info(f"After removing short and long utterances: {num_left}") + logging.info(f"Removed {num_removed} utterances ({removed_percent:.5f}%)") + + return cuts + + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + + seed = 42 + fix_random_seed(seed) + rng = random.Random(seed) + if world_size > 1: + setup_dist(rank, world_size, params.master_port) + + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info("Training started") + + if args.tensorboard and rank == 0: + tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") + else: + tb_writer = None + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", rank) + logging.info(f"Device: {device}") + + lexicon = Lexicon(params.lang_dir) + graph_compiler = CharCtcTrainingGraphCompiler( + lexicon=lexicon, + device=device, + oov="", + ) + + params.blank_id = 0 + params.vocab_size = max(lexicon.tokens) + 1 + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + checkpoints = load_checkpoint_if_available(params=params, model=model) + + model.to(device) + if world_size > 1: + logging.info("Using DDP") + model = DDP(model, device_ids=[rank], find_unused_parameters=True) + model.device = device + + optimizer = Noam( + model.parameters(), + model_size=params.attention_dim, + factor=params.lr_factor, + warm_step=params.warm_step, + ) + + if checkpoints and "optimizer" in checkpoints: + logging.info("Loading optimizer state dict") + optimizer.load_state_dict(checkpoints["optimizer"]) + + aishell = AIShell(manifest_dir=args.manifest_dir) + + train_cuts = aishell.train_cuts() + train_cuts = filter_short_and_long_utterances(train_cuts) + + datatang = AIDatatang200zh( + manifest_dir=f"{args.manifest_dir}/aidatatang_200zh" + ) + train_datatang_cuts = datatang.train_cuts() + train_datatang_cuts = filter_short_and_long_utterances(train_datatang_cuts) + + if args.enable_musan: + cuts_musan = load_manifest( + Path(args.manifest_dir) / "cuts_musan.json.gz" + ) + else: + cuts_musan = None + + asr_datamodule = AsrDataModule(args) + + train_dl = asr_datamodule.train_dataloaders( + train_cuts, + dynamic_bucketing=False, + on_the_fly_feats=False, + cuts_musan=cuts_musan, + ) + + datatang_train_dl = asr_datamodule.train_dataloaders( + train_datatang_cuts, + dynamic_bucketing=True, + on_the_fly_feats=True, + cuts_musan=cuts_musan, + ) + + valid_cuts = aishell.valid_cuts() + valid_dl = asr_datamodule.valid_dataloaders(valid_cuts) + + for dl in [train_dl, datatang_train_dl]: + scan_pessimistic_batches_for_oom( + model=model, + train_dl=dl, + optimizer=optimizer, + graph_compiler=graph_compiler, + params=params, + ) + + for epoch in range(params.start_epoch, params.num_epochs): + train_dl.sampler.set_epoch(epoch) + datatang_train_dl.sampler.set_epoch(epoch) + + cur_lr = optimizer._rate + if tb_writer is not None: + tb_writer.add_scalar( + "train/learning_rate", cur_lr, params.batch_idx_train + ) + tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) + + if rank == 0: + logging.info("epoch {}, learning rate {}".format(epoch, cur_lr)) + + params.cur_epoch = epoch + + train_one_epoch( + params=params, + model=model, + optimizer=optimizer, + graph_compiler=graph_compiler, + train_dl=train_dl, + datatang_train_dl=datatang_train_dl, + valid_dl=valid_dl, + rng=rng, + tb_writer=tb_writer, + world_size=world_size, + ) + + save_checkpoint( + params=params, + model=model, + optimizer=optimizer, + rank=rank, + ) + + logging.info("Done!") + + if world_size > 1: + torch.distributed.barrier() + cleanup_dist() + + +def scan_pessimistic_batches_for_oom( + model: nn.Module, + train_dl: torch.utils.data.DataLoader, + optimizer: torch.optim.Optimizer, + graph_compiler: CharCtcTrainingGraphCompiler, + params: AttributeDict, +): + from lhotse.dataset import find_pessimistic_batches + + logging.info( + "Sanity check -- see if any of the batches in epoch 0 would cause OOM." + ) + batches, crit_values = find_pessimistic_batches(train_dl.sampler) + for criterion, cuts in batches.items(): + batch = train_dl.dataset[cuts] + try: + optimizer.zero_grad() + loss, _ = compute_loss( + params=params, + model=model, + graph_compiler=graph_compiler, + batch=batch, + is_training=True, + ) + loss.backward() + clip_grad_norm_(model.parameters(), 5.0, 2.0) + optimizer.step() + except RuntimeError as e: + if "CUDA out of memory" in str(e): + logging.error( + "Your GPU ran out of memory with the current " + "max_duration setting. We recommend decreasing " + "max_duration and trying again.\n" + f"Failing criterion: {criterion} " + f"(={crit_values[criterion]}) ..." + ) + raise + + +def main(): + parser = get_parser() + AsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + args.lang_dir = Path(args.lang_dir) + + assert 0 <= args.datatang_prob < 1, args.datatang_prob + + world_size = args.world_size + assert world_size >= 1 + if world_size > 1: + mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) + else: + run(rank=0, world_size=1, args=args) + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + +if __name__ == "__main__": + main() diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/transformer.py b/egs/aishell/ASR/transducer_stateless_modified-2/transformer.py new file mode 120000 index 000000000..4320d1105 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified-2/transformer.py @@ -0,0 +1 @@ +../transducer_stateless_modified/transformer.py \ No newline at end of file diff --git a/egs/aishell/ASR/transducer_stateless_modified/README.md b/egs/aishell/ASR/transducer_stateless_modified/README.md new file mode 100644 index 000000000..9709eb9a0 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified/README.md @@ -0,0 +1,21 @@ +## Introduction + +The decoder, i.e., the prediction network, is from +https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9054419 +(Rnn-Transducer with Stateless Prediction Network) + +You can use the following command to start the training: + +```bash +cd egs/aishell/ASR + +export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7" + +./transducer_stateless_modified/train.py \ + --world-size 8 \ + --num-epochs 30 \ + --start-epoch 0 \ + --exp-dir transducer_stateless_modified/exp \ + --max-duration 250 \ + --lr-factor 2.5 +``` diff --git a/egs/aishell/ASR/transducer_stateless_modified/__init__.py b/egs/aishell/ASR/transducer_stateless_modified/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/egs/aishell/ASR/transducer_stateless_modified/asr_datamodule.py b/egs/aishell/ASR/transducer_stateless_modified/asr_datamodule.py new file mode 120000 index 000000000..a73848de9 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified/asr_datamodule.py @@ -0,0 +1 @@ +../conformer_ctc/asr_datamodule.py \ No newline at end of file diff --git a/egs/aishell/ASR/transducer_stateless_modified/beam_search.py b/egs/aishell/ASR/transducer_stateless_modified/beam_search.py new file mode 120000 index 000000000..e188617a8 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified/beam_search.py @@ -0,0 +1 @@ +../../../librispeech/ASR/transducer_stateless/beam_search.py \ No newline at end of file diff --git a/egs/aishell/ASR/transducer_stateless_modified/conformer.py b/egs/aishell/ASR/transducer_stateless_modified/conformer.py new file mode 120000 index 000000000..8be0dc864 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified/conformer.py @@ -0,0 +1 @@ +../../../librispeech/ASR/transducer_stateless/conformer.py \ No newline at end of file diff --git a/egs/aishell/ASR/transducer_stateless_modified/decode.py b/egs/aishell/ASR/transducer_stateless_modified/decode.py new file mode 100755 index 000000000..5b5fe6ffa --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified/decode.py @@ -0,0 +1,486 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +(1) greedy search +./transducer_stateless_modified/decode.py \ + --epoch 64 \ + --avg 33 \ + --exp-dir ./transducer_stateless_modified/exp \ + --max-duration 100 \ + --decoding-method greedy_search + +(2) beam search +./transducer_stateless_modified/decode.py \ + --epoch 14 \ + --avg 7 \ + --exp-dir ./transducer_stateless_modified/exp \ + --max-duration 100 \ + --decoding-method beam_search \ + --beam-size 4 + +(3) modified beam search +./transducer_stateless_modified/decode.py \ + --epoch 14 \ + --avg 7 \ + --exp-dir ./transducer_stateless_modified/exp \ + --max-duration 100 \ + --decoding-method modified_beam_search \ + --beam-size 4 +""" + +import argparse +import logging +from collections import defaultdict +from pathlib import Path +from typing import Dict, List, Tuple + +import torch +import torch.nn as nn +from asr_datamodule import AishellAsrDataModule +from beam_search import beam_search, greedy_search, modified_beam_search +from conformer import Conformer +from decoder import Decoder +from joiner import Joiner +from model import Transducer + +from icefall.checkpoint import average_checkpoints, load_checkpoint +from icefall.env import get_env_info +from icefall.lexicon import Lexicon +from icefall.utils import ( + AttributeDict, + setup_logger, + store_transcripts, + write_error_stats, +) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=30, + help="It specifies the checkpoint to use for decoding." + "Note: Epoch counts from 0.", + ) + parser.add_argument( + "--avg", + type=int, + default=10, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch'. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="transducer_stateless_modified/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--lang-dir", + type=str, + default="data/lang_char", + help="The lang dir", + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + - modified_beam_search + """, + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="Used only when --decoding-method is beam_search", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=3, + help="Maximum number of symbols per frame", + ) + return parser + + +def get_params() -> AttributeDict: + params = AttributeDict( + { + # parameters for conformer + "feature_dim": 80, + "encoder_out_dim": 512, + "subsampling_factor": 4, + "attention_dim": 512, + "nhead": 8, + "dim_feedforward": 2048, + "num_encoder_layers": 12, + "vgg_frontend": False, + "env_info": get_env_info(), + } + ) + return params + + +def get_encoder_model(params: AttributeDict): + # TODO: We can add an option to switch between Conformer and Transformer + encoder = Conformer( + num_features=params.feature_dim, + output_dim=params.encoder_out_dim, + subsampling_factor=params.subsampling_factor, + d_model=params.attention_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + vgg_frontend=params.vgg_frontend, + ) + return encoder + + +def get_decoder_model(params: AttributeDict): + decoder = Decoder( + vocab_size=params.vocab_size, + embedding_dim=params.encoder_out_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict): + joiner = Joiner( + input_dim=params.encoder_out_dim, + output_dim=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict): + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + ) + return model + + +def decode_one_batch( + params: AttributeDict, + model: nn.Module, + lexicon: Lexicon, + batch: dict, +) -> Dict[str, List[List[str]]]: + """Decode one batch and return the result in a dict. The dict has the + following format: + + - key: It indicates the setting used for decoding. For example, + if greedy_search is used, it would be "greedy_search" + If beam search with a beam size of 7 is used, it would be + "beam_7" + - value: It contains the decoding result. `len(value)` equals to + batch size. `value[i]` is the decoding result for the i-th + utterance in the given batch. + Args: + params: + It's the return value of :func:`get_params`. + model: + The neural model. + batch: + It is the return value from iterating + `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation + for the format of the `batch`. + lexicon: + It contains the token symbol table and the word symbol table. + Returns: + Return the decoding result. See above description for the format of + the returned dict. + """ + device = model.device + feature = batch["inputs"] + assert feature.ndim == 3 + + feature = feature.to(device) + # at entry, feature is (N, T, C) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + encoder_out, encoder_out_lens = model.encoder( + x=feature, x_lens=feature_lens + ) + hyps = [] + batch_size = encoder_out.size(0) + + for i in range(batch_size): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + if params.decoding_method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.decoding_method == "beam_search": + hyp = beam_search( + model=model, encoder_out=encoder_out_i, beam=params.beam_size + ) + elif params.decoding_method == "modified_beam_search": + hyp = modified_beam_search( + model=model, encoder_out=encoder_out_i, beam=params.beam_size + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + hyps.append([lexicon.token_table[i] for i in hyp]) + + if params.decoding_method == "greedy_search": + return {"greedy_search": hyps} + else: + return {f"beam_{params.beam_size}": hyps} + + +def decode_dataset( + dl: torch.utils.data.DataLoader, + params: AttributeDict, + model: nn.Module, + lexicon: Lexicon, +) -> Dict[str, List[Tuple[List[str], List[str]]]]: + """Decode dataset. + + Args: + dl: + PyTorch's dataloader containing the dataset to decode. + params: + It is returned by :func:`get_params`. + model: + The neural model. + Returns: + Return a dict, whose key may be "greedy_search" if greedy search + is used, or it may be "beam_7" if beam size of 7 is used. + Its value is a list of tuples. Each tuple contains two elements: + The first is the reference transcript, and the second is the + predicted result. + """ + num_cuts = 0 + + try: + num_batches = len(dl) + except TypeError: + num_batches = "?" + + if params.decoding_method == "greedy_search": + log_interval = 100 + else: + log_interval = 2 + + results = defaultdict(list) + for batch_idx, batch in enumerate(dl): + texts = batch["supervisions"]["text"] + + hyps_dict = decode_one_batch( + params=params, + model=model, + lexicon=lexicon, + batch=batch, + ) + + for name, hyps in hyps_dict.items(): + this_batch = [] + assert len(hyps) == len(texts) + for hyp_words, ref_text in zip(hyps, texts): + ref_words = ref_text.split() + this_batch.append((ref_words, hyp_words)) + + results[name].extend(this_batch) + + num_cuts += len(texts) + + if batch_idx % log_interval == 0: + batch_str = f"{batch_idx}/{num_batches}" + + logging.info( + f"batch {batch_str}, cuts processed until now is {num_cuts}" + ) + return results + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[List[int], List[int]]]], +): + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = ( + params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + ) + store_transcripts(filename=recog_path, texts=results) + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = ( + params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + ) + # we compute CER for aishell dataset. + results_char = [] + for res in results: + results_char.append((list("".join(res[0])), list("".join(res[1])))) + with open(errs_filename, "w") as f: + wer = write_error_stats( + f, f"{test_set_name}-{key}", results_char, enable_log=True + ) + test_set_wers[key] = wer + + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = ( + params.res_dir + / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_info, "w") as f: + print("settings\tCER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, CER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + AishellAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + args.lang_dir = Path(args.lang_dir) + + params = get_params() + params.update(vars(args)) + + assert params.decoding_method in ( + "greedy_search", + "beam_search", + "modified_beam_search", + ) + params.res_dir = params.exp_dir / params.decoding_method + + params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + if "beam_search" in params.decoding_method: + params.suffix += f"-beam-{params.beam_size}" + else: + params.suffix += f"-context-{params.context_size}" + params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}" + + setup_logger(f"{params.res_dir}/log-decode-{params.suffix}") + logging.info("Decoding started") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"Device: {device}") + + lexicon = Lexicon(params.lang_dir) + + params.blank_id = 0 + params.vocab_size = max(lexicon.tokens) + 1 + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + if params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if start >= 0: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + + model.to(device) + model.eval() + model.device = device + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + aishell = AishellAsrDataModule(args) + test_cuts = aishell.test_cuts() + test_dl = aishell.test_dataloaders(test_cuts) + + test_sets = ["test"] + test_dls = [test_dl] + + for test_set, test_dl in zip(test_sets, test_dls): + results_dict = decode_dataset( + dl=test_dl, + params=params, + model=model, + lexicon=lexicon, + ) + + save_results( + params=params, + test_set_name=test_set, + results_dict=results_dict, + ) + + logging.info("Done!") + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + +if __name__ == "__main__": + main() diff --git a/egs/aishell/ASR/transducer_stateless_modified/decoder.py b/egs/aishell/ASR/transducer_stateless_modified/decoder.py new file mode 120000 index 000000000..82337f7ef --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified/decoder.py @@ -0,0 +1 @@ +../../../librispeech/ASR/transducer_stateless/decoder.py \ No newline at end of file diff --git a/egs/aishell/ASR/transducer_stateless_modified/encoder_interface.py b/egs/aishell/ASR/transducer_stateless_modified/encoder_interface.py new file mode 120000 index 000000000..653c5b09a --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified/encoder_interface.py @@ -0,0 +1 @@ +../../../librispeech/ASR/transducer_stateless/encoder_interface.py \ No newline at end of file diff --git a/egs/aishell/ASR/transducer_stateless_modified/export.py b/egs/aishell/ASR/transducer_stateless_modified/export.py new file mode 100755 index 000000000..9a20fab6f --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified/export.py @@ -0,0 +1,246 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script converts several saved checkpoints +# to a single one using model averaging. +""" +Usage: +./transducer_stateless_modified/export.py \ + --exp-dir ./transducer_stateless_modified/exp \ + --epoch 64 \ + --avg 33 + +It will generate a file exp_dir/pretrained.pt + +To use the generated file with `transducer_stateless_modified/decode.py`, +you can do:: + + cd /path/to/exp_dir + ln -s pretrained.pt epoch-9999.pt + + cd /path/to/egs/aishell/ASR + ./transducer_stateless_modified/decode.py \ + --exp-dir ./transducer_stateless_modified/exp \ + --epoch 9999 \ + --avg 1 \ + --max-duration 100 \ + --lang-dir data/lang_char +""" + +import argparse +import logging +from pathlib import Path + +import torch +import torch.nn as nn +from conformer import Conformer +from decoder import Decoder +from joiner import Joiner +from model import Transducer + +from icefall.checkpoint import average_checkpoints, load_checkpoint +from icefall.env import get_env_info +from icefall.lexicon import Lexicon +from icefall.utils import AttributeDict, str2bool + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=20, + help="It specifies the checkpoint to use for decoding." + "Note: Epoch counts from 0.", + ) + + parser.add_argument( + "--avg", + type=int, + default=10, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch'. ", + ) + + parser.add_argument( + "--exp-dir", + type=Path, + default=Path("transducer_stateless_modified/exp"), + help="""It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--jit", + type=str2bool, + default=False, + help="""True to save a model after applying torch.jit.script. + """, + ) + + parser.add_argument( + "--lang-dir", + type=Path, + default=Path("data/lang_char"), + help="The lang dir", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + return parser + + +def get_params() -> AttributeDict: + params = AttributeDict( + { + # parameters for conformer + "feature_dim": 80, + "encoder_out_dim": 512, + "subsampling_factor": 4, + "attention_dim": 512, + "nhead": 8, + "dim_feedforward": 2048, + "num_encoder_layers": 12, + "vgg_frontend": False, + "env_info": get_env_info(), + } + ) + return params + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + encoder = Conformer( + num_features=params.feature_dim, + output_dim=params.encoder_out_dim, + subsampling_factor=params.subsampling_factor, + d_model=params.attention_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + vgg_frontend=params.vgg_frontend, + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + embedding_dim=params.encoder_out_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + input_dim=params.encoder_out_dim, + output_dim=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict) -> nn.Module: + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + ) + return model + + +def main(): + args = get_parser().parse_args() + + assert args.jit is False, "torchscript support will be added later" + + params = get_params() + params.update(vars(args)) + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + lexicon = Lexicon(params.lang_dir) + + params.blank_id = 0 + params.vocab_size = max(lexicon.tokens) + 1 + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + model.to(device) + + if params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if start >= 0: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict( + average_checkpoints(filenames, device=device), strict=False + ) + + model.to("cpu") + model.eval() + + if params.jit: + logging.info("Using torch.jit.script") + model = torch.jit.script(model) + filename = params.exp_dir / "cpu_jit.pt" + model.save(str(filename)) + logging.info(f"Saved to {filename}") + else: + logging.info("Not using torch.jit.script") + # Save it using a format so that it can be loaded + # by :func:`load_checkpoint` + filename = params.exp_dir / "pretrained.pt" + torch.save({"model": model.state_dict()}, str(filename)) + logging.info(f"Saved to {filename}") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/aishell/ASR/transducer_stateless_modified/joiner.py b/egs/aishell/ASR/transducer_stateless_modified/joiner.py new file mode 120000 index 000000000..1aec6bfaf --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified/joiner.py @@ -0,0 +1 @@ +../../../librispeech/ASR/transducer_stateless/joiner.py \ No newline at end of file diff --git a/egs/aishell/ASR/transducer_stateless_modified/model.py b/egs/aishell/ASR/transducer_stateless_modified/model.py new file mode 120000 index 000000000..16ddd93f0 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified/model.py @@ -0,0 +1 @@ +../../../librispeech/ASR/transducer_stateless/model.py \ No newline at end of file diff --git a/egs/aishell/ASR/transducer_stateless_modified/pretrained.py b/egs/aishell/ASR/transducer_stateless_modified/pretrained.py new file mode 100755 index 000000000..698594e92 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified/pretrained.py @@ -0,0 +1,331 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, +# Wei Kang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Usage: + +# greedy search +./transducer_stateless_modified/pretrained.py \ + --checkpoint /path/to/pretrained.pt \ + --lang-dir /path/to/lang_char \ + --method greedy_search \ + /path/to/foo.wav \ + /path/to/bar.wav + +# beam search +./transducer_stateless_modified/pretrained.py \ + --checkpoint /path/to/pretrained.pt \ + --lang-dir /path/to/lang_char \ + --method beam_search \ + --beam-size 4 \ + /path/to/foo.wav \ + /path/to/bar.wav + +# modified beam search +./transducer_stateless_modified/pretrained.py \ + --checkpoint /path/to/pretrained.pt \ + --lang-dir /path/to/lang_char \ + --method modified_beam_search \ + --beam-size 4 \ + /path/to/foo.wav \ + /path/to/bar.wav + +""" + +import argparse +import logging +import math +from pathlib import Path +from typing import List + +import kaldifeat +import torch +import torch.nn as nn +import torchaudio +from beam_search import beam_search, greedy_search, modified_beam_search +from conformer import Conformer +from decoder import Decoder +from joiner import Joiner +from model import Transducer +from torch.nn.utils.rnn import pad_sequence + +from icefall.env import get_env_info +from icefall.lexicon import Lexicon +from icefall.utils import AttributeDict + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--checkpoint", + type=str, + required=True, + help="Path to the checkpoint. " + "The checkpoint is assumed to be saved by " + "icefall.checkpoint.save_checkpoint().", + ) + + parser.add_argument( + "--lang-dir", + type=Path, + default=Path("data/lang_char"), + help="The lang dir", + ) + + parser.add_argument( + "--method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + - modified_beam_search + """, + ) + + parser.add_argument( + "sound_files", + type=str, + nargs="+", + help="The input sound file(s) to transcribe. " + "Supported formats are those supported by torchaudio.load(). " + "For example, wav and flac are supported. " + "The sample rate has to be 16kHz.", + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="Used only when --method is beam_search and modified_beam_search", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=3, + help="Maximum number of symbols per frame. " + "Use only when --method is greedy_search", + ) + return parser + + return parser + + +def get_params() -> AttributeDict: + params = AttributeDict( + { + # parameters for conformer + "feature_dim": 80, + "encoder_out_dim": 512, + "subsampling_factor": 4, + "attention_dim": 512, + "nhead": 8, + "dim_feedforward": 2048, + "num_encoder_layers": 12, + "vgg_frontend": False, + "env_info": get_env_info(), + "sample_rate": 16000, + } + ) + return params + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + encoder = Conformer( + num_features=params.feature_dim, + output_dim=params.encoder_out_dim, + subsampling_factor=params.subsampling_factor, + d_model=params.attention_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + vgg_frontend=params.vgg_frontend, + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + embedding_dim=params.encoder_out_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + input_dim=params.encoder_out_dim, + output_dim=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict) -> nn.Module: + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + ) + return model + + +def read_sound_files( + filenames: List[str], expected_sample_rate: float +) -> List[torch.Tensor]: + """Read a list of sound files into a list 1-D float32 torch tensors. + Args: + filenames: + A list of sound filenames. + expected_sample_rate: + The expected sample rate of the sound files. + Returns: + Return a list of 1-D float32 torch tensors. + """ + ans = [] + for f in filenames: + wave, sample_rate = torchaudio.load(f) + assert sample_rate == expected_sample_rate, ( + f"expected sample rate: {expected_sample_rate}. " + f"Given: {sample_rate}" + ) + # We use only the first channel + ans.append(wave[0]) + return ans + + +def main(): + parser = get_parser() + args = parser.parse_args() + + params = get_params() + params.update(vars(args)) + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + lexicon = Lexicon(params.lang_dir) + + params.blank_id = 0 + params.vocab_size = max(lexicon.tokens) + 1 + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + checkpoint = torch.load(args.checkpoint, map_location="cpu") + model.load_state_dict(checkpoint["model"]) + model.to(device) + model.eval() + model.device = device + + logging.info("Constructing Fbank computer") + opts = kaldifeat.FbankOptions() + opts.device = device + opts.frame_opts.dither = 0 + opts.frame_opts.snip_edges = False + opts.frame_opts.samp_freq = params.sample_rate + opts.mel_opts.num_bins = params.feature_dim + + fbank = kaldifeat.Fbank(opts) + + logging.info(f"Reading sound files: {params.sound_files}") + waves = read_sound_files( + filenames=params.sound_files, expected_sample_rate=params.sample_rate + ) + waves = [w.to(device) for w in waves] + + logging.info("Decoding started") + features = fbank(waves) + feature_lens = [f.size(0) for f in features] + feature_lens = torch.tensor(feature_lens, device=device) + + features = pad_sequence( + features, batch_first=True, padding_value=math.log(1e-10) + ) + + hyps = [] + with torch.no_grad(): + encoder_out, encoder_out_lens = model.encoder( + x=features, x_lens=feature_lens + ) + + for i in range(encoder_out.size(0)): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + if params.method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.method == "beam_search": + hyp = beam_search( + model=model, + encoder_out=encoder_out_i, + beam=params.beam_size, + ) + elif params.method == "modified_beam_search": + hyp = modified_beam_search( + model=model, + encoder_out=encoder_out_i, + beam=params.beam_size, + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.method}" + ) + hyps.append([lexicon.token_table[i] for i in hyp]) + + s = "\n" + for filename, hyp in zip(params.sound_files, hyps): + words = " ".join(hyp) + s += f"{filename}:\n{words}\n\n" + logging.info(s) + + logging.info("Decoding Done") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/aishell/ASR/transducer_stateless_modified/subsampling.py b/egs/aishell/ASR/transducer_stateless_modified/subsampling.py new file mode 120000 index 000000000..6fee09e58 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified/subsampling.py @@ -0,0 +1 @@ +../conformer_ctc/subsampling.py \ No newline at end of file diff --git a/egs/aishell/ASR/transducer_stateless_modified/test_decoder.py b/egs/aishell/ASR/transducer_stateless_modified/test_decoder.py new file mode 100755 index 000000000..fe0bdee70 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified/test_decoder.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +To run this file, do: + + cd icefall/egs/aishell/ASR + python ./transducer_stateless/test_decoder.py +""" + +import torch +from decoder import Decoder + + +def test_decoder(): + vocab_size = 3 + blank_id = 0 + embedding_dim = 128 + context_size = 4 + + decoder = Decoder( + vocab_size=vocab_size, + embedding_dim=embedding_dim, + blank_id=blank_id, + context_size=context_size, + ) + N = 100 + U = 20 + x = torch.randint(low=0, high=vocab_size, size=(N, U)) + y = decoder(x) + assert y.shape == (N, U, embedding_dim) + + # for inference + x = torch.randint(low=0, high=vocab_size, size=(N, context_size)) + y = decoder(x, need_pad=False) + assert y.shape == (N, 1, embedding_dim) + + +def main(): + test_decoder() + + +if __name__ == "__main__": + main() diff --git a/egs/aishell/ASR/transducer_stateless_modified/train.py b/egs/aishell/ASR/transducer_stateless_modified/train.py new file mode 100755 index 000000000..524854b73 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified/train.py @@ -0,0 +1,751 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, +# Wei Kang +# Mingshuang Luo) +# Copyright 2021 (Pingfeng Luo) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Usage: + +export CUDA_VISIBLE_DEVICES="0,1,2" + +./transducer_stateless_modified/train.py \ + --world-size 3 \ + --num-epochs 65 \ + --start-epoch 0 \ + --exp-dir transducer_stateless_modified/exp \ + --max-duration 250 \ + --lr-factor 2.0 \ + --context-size 2 \ + --modified-transducer-prob 0.25 +""" + + +import argparse +import logging +from pathlib import Path +from shutil import copyfile +from typing import Optional, Tuple + +import k2 +import torch +import torch.multiprocessing as mp +import torch.nn as nn +from asr_datamodule import AishellAsrDataModule +from conformer import Conformer +from decoder import Decoder +from joiner import Joiner +from lhotse.cut import Cut +from lhotse.utils import fix_random_seed +from model import Transducer +from torch import Tensor +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.nn.utils import clip_grad_norm_ +from torch.utils.tensorboard import SummaryWriter +from transformer import Noam + +from icefall.char_graph_compiler import CharCtcTrainingGraphCompiler +from icefall.checkpoint import load_checkpoint +from icefall.checkpoint import save_checkpoint as save_checkpoint_impl +from icefall.dist import cleanup_dist, setup_dist +from icefall.env import get_env_info +from icefall.lexicon import Lexicon +from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--world-size", + type=int, + default=1, + help="Number of GPUs for DDP training.", + ) + + parser.add_argument( + "--master-port", + type=int, + default=12354, + help="Master port to use for DDP training.", + ) + + parser.add_argument( + "--tensorboard", + type=str2bool, + default=True, + help="Should various information be logged in tensorboard.", + ) + + parser.add_argument( + "--num-epochs", + type=int, + default=30, + help="Number of epochs to train.", + ) + + parser.add_argument( + "--start-epoch", + type=int, + default=0, + help="""Resume training from from this epoch. + If it is positive, it will load checkpoint from + transducer_stateless/exp/epoch-{start_epoch-1}.pt + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="transducer_stateless_modified/exp", + help="""The experiment dir. + It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--lang-dir", + type=str, + default="data/lang_char", + help="""The lang dir + It contains language related input files such as + "lexicon.txt" + """, + ) + + parser.add_argument( + "--lr-factor", + type=float, + default=5.0, + help="The lr_factor for Noam optimizer", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + parser.add_argument( + "--modified-transducer-prob", + type=float, + default=0.25, + help="""The probability to use modified transducer loss. + In modified transduer, it limits the maximum number of symbols + per frame to 1. See also the option --max-sym-per-frame in + transducer_stateless/decode.py + """, + ) + + return parser + + +def get_params() -> AttributeDict: + """Return a dict containing training parameters. + + All training related parameters that are not passed from the commandline + are saved in the variable `params`. + + Commandline options are merged into `params` after they are parsed, so + you can also access them via `params`. + + Explanation of options saved in `params`: + + - best_train_loss: Best training loss so far. It is used to select + the model that has the lowest training loss. It is + updated during the training. + + - best_valid_loss: Best validation loss so far. It is used to select + the model that has the lowest validation loss. It is + updated during the training. + + - best_train_epoch: It is the epoch that has the best training loss. + + - best_valid_epoch: It is the epoch that has the best validation loss. + + - batch_idx_train: Used to writing statistics to tensorboard. It + contains number of batches trained so far across + epochs. + + - log_interval: Print training loss if batch_idx % log_interval` is 0 + + - reset_interval: Reset statistics if batch_idx % reset_interval is 0 + + - valid_interval: Run validation if batch_idx % valid_interval is 0 + + - feature_dim: The model input dim. It has to match the one used + in computing features. + + - subsampling_factor: The subsampling factor for the model. + + - attention_dim: Hidden dim for multi-head attention model. + + - num_decoder_layers: Number of decoder layer of transformer decoder. + + - warm_step: The warm_step for Noam optimizer. + """ + params = AttributeDict( + { + "best_train_loss": float("inf"), + "best_valid_loss": float("inf"), + "best_train_epoch": -1, + "best_valid_epoch": -1, + "batch_idx_train": 0, + "log_interval": 50, + "reset_interval": 200, + "valid_interval": 800, + # parameters for conformer + "feature_dim": 80, + "encoder_out_dim": 512, + "subsampling_factor": 4, + "attention_dim": 512, + "nhead": 8, + "dim_feedforward": 2048, + "num_encoder_layers": 12, + "vgg_frontend": False, + # parameters for Noam + "warm_step": 80000, # For the 100h subset, use 8k + "env_info": get_env_info(), + } + ) + + return params + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + # TODO: We can add an option to switch between Conformer and Transformer + encoder = Conformer( + num_features=params.feature_dim, + output_dim=params.encoder_out_dim, + subsampling_factor=params.subsampling_factor, + d_model=params.attention_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + vgg_frontend=params.vgg_frontend, + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + embedding_dim=params.encoder_out_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + input_dim=params.encoder_out_dim, + output_dim=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict) -> nn.Module: + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + ) + return model + + +def load_checkpoint_if_available( + params: AttributeDict, + model: nn.Module, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None, +) -> None: + """Load checkpoint from file. + + If params.start_epoch is positive, it will load the checkpoint from + `params.start_epoch - 1`. Otherwise, this function does nothing. + + Apart from loading state dict for `model`, `optimizer` and `scheduler`, + it also updates `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, + and `best_valid_loss` in `params`. + + Args: + params: + The return value of :func:`get_params`. + model: + The training model. + optimizer: + The optimizer that we are using. + scheduler: + The learning rate scheduler we are using. + Returns: + Return None. + """ + if params.start_epoch <= 0: + return + + filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" + saved_params = load_checkpoint( + filename, + model=model, + optimizer=optimizer, + scheduler=scheduler, + ) + + keys = [ + "best_train_epoch", + "best_valid_epoch", + "batch_idx_train", + "best_train_loss", + "best_valid_loss", + ] + for k in keys: + params[k] = saved_params[k] + + return saved_params + + +def save_checkpoint( + params: AttributeDict, + model: nn.Module, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None, + rank: int = 0, +) -> None: + """Save model, optimizer, scheduler and training stats to file. + + Args: + params: + It is returned by :func:`get_params`. + model: + The training model. + """ + if rank != 0: + return + filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" + save_checkpoint_impl( + filename=filename, + model=model, + params=params, + optimizer=optimizer, + scheduler=scheduler, + rank=rank, + ) + + if params.best_train_epoch == params.cur_epoch: + best_train_filename = params.exp_dir / "best-train-loss.pt" + copyfile(src=filename, dst=best_train_filename) + + if params.best_valid_epoch == params.cur_epoch: + best_valid_filename = params.exp_dir / "best-valid-loss.pt" + copyfile(src=filename, dst=best_valid_filename) + + +def compute_loss( + params: AttributeDict, + model: nn.Module, + graph_compiler: CharCtcTrainingGraphCompiler, + batch: dict, + is_training: bool, +) -> Tuple[Tensor, MetricsTracker]: + """ + Compute CTC loss given the model and its inputs. + + Args: + params: + Parameters for training. See :func:`get_params`. + model: + The model for training. It is an instance of Conformer in our case. + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + is_training: + True for training. False for validation. When it is True, this + function enables autograd during computation; when it is False, it + disables autograd. + """ + device = model.device + feature = batch["inputs"] + # at entry, feature is (N, T, C) + assert feature.ndim == 3 + feature = feature.to(device) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + texts = batch["supervisions"]["text"] + y = graph_compiler.texts_to_ids(texts) + y = k2.RaggedTensor(y).to(device) + + with torch.set_grad_enabled(is_training): + loss = model( + x=feature, + x_lens=feature_lens, + y=y, + modified_transducer_prob=params.modified_transducer_prob, + ) + + assert loss.requires_grad == is_training + + info = MetricsTracker() + info["frames"] = (feature_lens // params.subsampling_factor).sum().item() + + # Note: We use reduction=sum while computing the loss. + info["loss"] = loss.detach().cpu().item() + + return loss, info + + +def compute_validation_loss( + params: AttributeDict, + model: nn.Module, + graph_compiler: CharCtcTrainingGraphCompiler, + valid_dl: torch.utils.data.DataLoader, + world_size: int = 1, +) -> MetricsTracker: + """Run the validation process.""" + model.eval() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(valid_dl): + loss, loss_info = compute_loss( + params=params, + model=model, + graph_compiler=graph_compiler, + batch=batch, + is_training=False, + ) + assert loss.requires_grad is False + tot_loss = tot_loss + loss_info + + if world_size > 1: + tot_loss.reduce(loss.device) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + if loss_value < params.best_valid_loss: + params.best_valid_epoch = params.cur_epoch + params.best_valid_loss = loss_value + + return tot_loss + + +def train_one_epoch( + params: AttributeDict, + model: nn.Module, + optimizer: torch.optim.Optimizer, + graph_compiler: CharCtcTrainingGraphCompiler, + train_dl: torch.utils.data.DataLoader, + valid_dl: torch.utils.data.DataLoader, + tb_writer: Optional[SummaryWriter] = None, + world_size: int = 1, +) -> None: + """Train the model for one epoch. + + The training loss from the mean of all frames is saved in + `params.train_loss`. It runs the validation process every + `params.valid_interval` batches. + + Args: + params: + It is returned by :func:`get_params`. + model: + The model for training. + optimizer: + The optimizer we are using. + train_dl: + Dataloader for the training dataset. + valid_dl: + Dataloader for the validation dataset. + tb_writer: + Writer to write log messages to tensorboard. + world_size: + Number of nodes in DDP training. If it is 1, DDP is disabled. + """ + model.train() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(train_dl): + params.batch_idx_train += 1 + batch_size = len(batch["supervisions"]["text"]) + + loss, loss_info = compute_loss( + params=params, + model=model, + graph_compiler=graph_compiler, + batch=batch, + is_training=True, + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + + optimizer.zero_grad() + loss.backward() + clip_grad_norm_(model.parameters(), 5.0, 2.0) + optimizer.step() + + if batch_idx % params.log_interval == 0: + logging.info( + f"Epoch {params.cur_epoch}, " + f"batch {batch_idx}, loss[{loss_info}], " + f"tot_loss[{tot_loss}], batch size: {batch_size}" + ) + + if batch_idx % params.log_interval == 0: + + if tb_writer is not None: + loss_info.write_summary( + tb_writer, "train/current_", params.batch_idx_train + ) + tot_loss.write_summary( + tb_writer, "train/tot_", params.batch_idx_train + ) + + if batch_idx > 0 and batch_idx % params.valid_interval == 0: + logging.info("Computing validation loss") + valid_info = compute_validation_loss( + params=params, + model=model, + graph_compiler=graph_compiler, + valid_dl=valid_dl, + world_size=world_size, + ) + model.train() + logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") + if tb_writer is not None: + valid_info.write_summary( + tb_writer, "train/valid_", params.batch_idx_train + ) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + params.train_loss = loss_value + if params.train_loss < params.best_train_loss: + params.best_train_epoch = params.cur_epoch + params.best_train_loss = params.train_loss + + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + + fix_random_seed(42) + if world_size > 1: + setup_dist(rank, world_size, params.master_port) + + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info("Training started") + + if args.tensorboard and rank == 0: + tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") + else: + tb_writer = None + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", rank) + logging.info(f"Device: {device}") + + lexicon = Lexicon(params.lang_dir) + graph_compiler = CharCtcTrainingGraphCompiler( + lexicon=lexicon, + device=device, + oov="", + ) + + params.blank_id = 0 + params.vocab_size = max(lexicon.tokens) + 1 + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + checkpoints = load_checkpoint_if_available(params=params, model=model) + + model.to(device) + if world_size > 1: + logging.info("Using DDP") + model = DDP(model, device_ids=[rank]) + model.device = device + + optimizer = Noam( + model.parameters(), + model_size=params.attention_dim, + factor=params.lr_factor, + warm_step=params.warm_step, + ) + + if checkpoints and "optimizer" in checkpoints: + logging.info("Loading optimizer state dict") + optimizer.load_state_dict(checkpoints["optimizer"]) + + aishell = AishellAsrDataModule(args) + train_cuts = aishell.train_cuts() + + def remove_short_and_long_utt(c: Cut): + # Keep only utterances with duration between 1 second and 12 seconds + return 1.0 <= c.duration <= 12.0 + + num_in_total = len(train_cuts) + + train_cuts = train_cuts.filter(remove_short_and_long_utt) + + num_left = len(train_cuts) + num_removed = num_in_total - num_left + removed_percent = num_removed / num_in_total * 100 + + logging.info(f"Before removing short and long utterances: {num_in_total}") + logging.info(f"After removing short and long utterances: {num_left}") + logging.info(f"Removed {num_removed} utterances ({removed_percent:.5f}%)") + + train_dl = aishell.train_dataloaders(train_cuts) + valid_dl = aishell.valid_dataloaders(aishell.valid_cuts()) + + scan_pessimistic_batches_for_oom( + model=model, + train_dl=train_dl, + optimizer=optimizer, + graph_compiler=graph_compiler, + params=params, + ) + + for epoch in range(params.start_epoch, params.num_epochs): + train_dl.sampler.set_epoch(epoch) + + cur_lr = optimizer._rate + if tb_writer is not None: + tb_writer.add_scalar( + "train/learning_rate", cur_lr, params.batch_idx_train + ) + tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) + + if rank == 0: + logging.info("epoch {}, learning rate {}".format(epoch, cur_lr)) + + params.cur_epoch = epoch + + train_one_epoch( + params=params, + model=model, + optimizer=optimizer, + graph_compiler=graph_compiler, + train_dl=train_dl, + valid_dl=valid_dl, + tb_writer=tb_writer, + world_size=world_size, + ) + + save_checkpoint( + params=params, + model=model, + optimizer=optimizer, + rank=rank, + ) + + logging.info("Done!") + + if world_size > 1: + torch.distributed.barrier() + cleanup_dist() + + +def scan_pessimistic_batches_for_oom( + model: nn.Module, + train_dl: torch.utils.data.DataLoader, + optimizer: torch.optim.Optimizer, + graph_compiler: CharCtcTrainingGraphCompiler, + params: AttributeDict, +): + from lhotse.dataset import find_pessimistic_batches + + logging.info( + "Sanity check -- see if any of the batches in epoch 0 would cause OOM." + ) + batches, crit_values = find_pessimistic_batches(train_dl.sampler) + for criterion, cuts in batches.items(): + batch = train_dl.dataset[cuts] + try: + optimizer.zero_grad() + loss, _ = compute_loss( + params=params, + model=model, + graph_compiler=graph_compiler, + batch=batch, + is_training=True, + ) + loss.backward() + clip_grad_norm_(model.parameters(), 5.0, 2.0) + optimizer.step() + except RuntimeError as e: + if "CUDA out of memory" in str(e): + logging.error( + "Your GPU ran out of memory with the current " + "max_duration setting. We recommend decreasing " + "max_duration and trying again.\n" + f"Failing criterion: {criterion} " + f"(={crit_values[criterion]}) ..." + ) + raise + + +def main(): + parser = get_parser() + AishellAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + args.lang_dir = Path(args.lang_dir) + + world_size = args.world_size + assert world_size >= 1 + if world_size > 1: + mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) + else: + run(rank=0, world_size=1, args=args) + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + +if __name__ == "__main__": + main() diff --git a/egs/aishell/ASR/transducer_stateless_modified/transformer.py b/egs/aishell/ASR/transducer_stateless_modified/transformer.py new file mode 120000 index 000000000..214afed39 --- /dev/null +++ b/egs/aishell/ASR/transducer_stateless_modified/transformer.py @@ -0,0 +1 @@ +../../../librispeech/ASR/transducer_stateless/transformer.py \ No newline at end of file diff --git a/egs/librispeech/ASR/transducer_stateless/decode.py b/egs/librispeech/ASR/transducer_stateless/decode.py index c101d9397..f23a3a300 100755 --- a/egs/librispeech/ASR/transducer_stateless/decode.py +++ b/egs/librispeech/ASR/transducer_stateless/decode.py @@ -33,6 +33,15 @@ Usage: --max-duration 100 \ --decoding-method beam_search \ --beam-size 4 + +(3) modified beam search +./transducer_stateless/decode.py \ + --epoch 14 \ + --avg 7 \ + --exp-dir ./transducer_stateless/exp \ + --max-duration 100 \ + --decoding-method modified_beam_search \ + --beam-size 4 """ diff --git a/egs/librispeech/ASR/transducer_stateless/joiner.py b/egs/librispeech/ASR/transducer_stateless/joiner.py index 9fd9da4f1..55f0a81f1 100644 --- a/egs/librispeech/ASR/transducer_stateless/joiner.py +++ b/egs/librispeech/ASR/transducer_stateless/joiner.py @@ -39,6 +39,12 @@ class Joiner(nn.Module): Output from the encoder. Its shape is (N, T, self.input_dim). decoder_out: Output from the decoder. Its shape is (N, U, self.input_dim). + encoder_out_len: + A 1-D tensor of shape (N,) containing valid number of frames + before padding in `encoder_out`. + decoder_out_len: + A 1-D tensor of shape (N,) containing valid number of frames + before padding in `decoder_out`. Returns: Return a tensor of shape (sum_all_TU, self.output_dim). """ From 1ff6196c445c2eeb491f318448e04626af2dc22f Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Wed, 2 Mar 2022 16:41:14 +0800 Subject: [PATCH 16/25] Fix joiner (#234) * Add tests for Joiner * Remove duplicate files. --- egs/librispeech/ASR/transducer/conformer.py | 921 +----------------- .../ASR/transducer/encoder_interface.py | 44 +- egs/librispeech/ASR/transducer/transformer.py | 419 +------- .../ASR/transducer_stateless/joiner.py | 3 + .../ASR/transducer_stateless/test_joiner.py | 57 ++ .../beam_search.py | 542 +---------- .../conformer.py | 921 +----------------- .../decoder.py | 99 +- .../encoder_interface.py | 44 +- .../joiner.py | 73 +- .../transformer.py | 419 +------- 11 files changed, 69 insertions(+), 3473 deletions(-) mode change 100644 => 120000 egs/librispeech/ASR/transducer/conformer.py mode change 100644 => 120000 egs/librispeech/ASR/transducer/encoder_interface.py mode change 100644 => 120000 egs/librispeech/ASR/transducer/transformer.py create mode 100755 egs/librispeech/ASR/transducer_stateless/test_joiner.py mode change 100644 => 120000 egs/librispeech/ASR/transducer_stateless_multi_datasets/beam_search.py mode change 100644 => 120000 egs/librispeech/ASR/transducer_stateless_multi_datasets/conformer.py mode change 100644 => 120000 egs/librispeech/ASR/transducer_stateless_multi_datasets/decoder.py mode change 100644 => 120000 egs/librispeech/ASR/transducer_stateless_multi_datasets/encoder_interface.py mode change 100644 => 120000 egs/librispeech/ASR/transducer_stateless_multi_datasets/joiner.py mode change 100644 => 120000 egs/librispeech/ASR/transducer_stateless_multi_datasets/transformer.py diff --git a/egs/librispeech/ASR/transducer/conformer.py b/egs/librispeech/ASR/transducer/conformer.py deleted file mode 100644 index 81d7708f9..000000000 --- a/egs/librispeech/ASR/transducer/conformer.py +++ /dev/null @@ -1,920 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2021 University of Chinese Academy of Sciences (author: Han Zhu) -# -# See ../../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import math -import warnings -from typing import Optional, Tuple - -import torch -from torch import Tensor, nn -from transformer import Transformer - -from icefall.utils import make_pad_mask - - -class Conformer(Transformer): - """ - Args: - num_features (int): Number of input features - output_dim (int): Number of output dimension - subsampling_factor (int): subsampling factor of encoder (the convolution layers before transformers) - d_model (int): attention dimension - nhead (int): number of head - dim_feedforward (int): feedforward dimention - num_encoder_layers (int): number of encoder layers - dropout (float): dropout rate - cnn_module_kernel (int): Kernel size of convolution module - normalize_before (bool): whether to use layer_norm before the first block. - vgg_frontend (bool): whether to use vgg frontend. - """ - - def __init__( - self, - num_features: int, - output_dim: int, - subsampling_factor: int = 4, - d_model: int = 256, - nhead: int = 4, - dim_feedforward: int = 2048, - num_encoder_layers: int = 12, - dropout: float = 0.1, - cnn_module_kernel: int = 31, - normalize_before: bool = True, - vgg_frontend: bool = False, - ) -> None: - super(Conformer, self).__init__( - num_features=num_features, - output_dim=output_dim, - subsampling_factor=subsampling_factor, - d_model=d_model, - nhead=nhead, - dim_feedforward=dim_feedforward, - num_encoder_layers=num_encoder_layers, - dropout=dropout, - normalize_before=normalize_before, - vgg_frontend=vgg_frontend, - ) - - self.encoder_pos = RelPositionalEncoding(d_model, dropout) - - encoder_layer = ConformerEncoderLayer( - d_model, - nhead, - dim_feedforward, - dropout, - cnn_module_kernel, - normalize_before, - ) - self.encoder = ConformerEncoder(encoder_layer, num_encoder_layers) - self.normalize_before = normalize_before - if self.normalize_before: - self.after_norm = nn.LayerNorm(d_model) - else: - # Note: TorchScript detects that self.after_norm could be used inside forward() - # and throws an error without this change. - self.after_norm = identity - - def forward( - self, x: torch.Tensor, x_lens: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Args: - x: - The input tensor. Its shape is (batch_size, seq_len, feature_dim). - x_lens: - A tensor of shape (batch_size,) containing the number of frames in - `x` before padding. - Returns: - Return a tuple containing 2 tensors: - - logits, its shape is (batch_size, output_seq_len, output_dim) - - logit_lens, a tensor of shape (batch_size,) containing the number - of frames in `logits` before padding. - """ - x = self.encoder_embed(x) - x, pos_emb = self.encoder_pos(x) - x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C) - - # Caution: We assume the subsampling factor is 4! - lengths = ((x_lens - 1) // 2 - 1) // 2 - assert x.size(0) == lengths.max().item() - mask = make_pad_mask(lengths) - - x = self.encoder(x, pos_emb, src_key_padding_mask=mask) # (T, N, C) - - if self.normalize_before: - x = self.after_norm(x) - - logits = self.encoder_output_layer(x) - logits = logits.permute(1, 0, 2) # (T, N, C) ->(N, T, C) - - return logits, lengths - - -class ConformerEncoderLayer(nn.Module): - """ - ConformerEncoderLayer is made up of self-attn, feedforward and convolution networks. - See: "Conformer: Convolution-augmented Transformer for Speech Recognition" - - Args: - d_model: the number of expected features in the input (required). - nhead: the number of heads in the multiheadattention models (required). - dim_feedforward: the dimension of the feedforward network model (default=2048). - dropout: the dropout value (default=0.1). - cnn_module_kernel (int): Kernel size of convolution module. - normalize_before: whether to use layer_norm before the first block. - - Examples:: - >>> encoder_layer = ConformerEncoderLayer(d_model=512, nhead=8) - >>> src = torch.rand(10, 32, 512) - >>> pos_emb = torch.rand(32, 19, 512) - >>> out = encoder_layer(src, pos_emb) - """ - - def __init__( - self, - d_model: int, - nhead: int, - dim_feedforward: int = 2048, - dropout: float = 0.1, - cnn_module_kernel: int = 31, - normalize_before: bool = True, - ) -> None: - super(ConformerEncoderLayer, self).__init__() - self.self_attn = RelPositionMultiheadAttention( - d_model, nhead, dropout=0.0 - ) - - self.feed_forward = nn.Sequential( - nn.Linear(d_model, dim_feedforward), - Swish(), - nn.Dropout(dropout), - nn.Linear(dim_feedforward, d_model), - ) - - self.feed_forward_macaron = nn.Sequential( - nn.Linear(d_model, dim_feedforward), - Swish(), - nn.Dropout(dropout), - nn.Linear(dim_feedforward, d_model), - ) - - self.conv_module = ConvolutionModule(d_model, cnn_module_kernel) - - self.norm_ff_macaron = nn.LayerNorm( - d_model - ) # for the macaron style FNN module - self.norm_ff = nn.LayerNorm(d_model) # for the FNN module - self.norm_mha = nn.LayerNorm(d_model) # for the MHA module - - self.ff_scale = 0.5 - - self.norm_conv = nn.LayerNorm(d_model) # for the CNN module - self.norm_final = nn.LayerNorm( - d_model - ) # for the final output of the block - - self.dropout = nn.Dropout(dropout) - - self.normalize_before = normalize_before - - def forward( - self, - src: Tensor, - pos_emb: Tensor, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - ) -> Tensor: - """ - Pass the input through the encoder layer. - - Args: - src: the sequence to the encoder layer (required). - pos_emb: Positional embedding tensor (required). - src_mask: the mask for the src sequence (optional). - src_key_padding_mask: the mask for the src keys per batch (optional). - - Shape: - src: (S, N, E). - pos_emb: (N, 2*S-1, E) - src_mask: (S, S). - src_key_padding_mask: (N, S). - S is the source sequence length, N is the batch size, E is the feature number - """ - - # macaron style feed forward module - residual = src - if self.normalize_before: - src = self.norm_ff_macaron(src) - src = residual + self.ff_scale * self.dropout( - self.feed_forward_macaron(src) - ) - if not self.normalize_before: - src = self.norm_ff_macaron(src) - - # multi-headed self-attention module - residual = src - if self.normalize_before: - src = self.norm_mha(src) - src_att = self.self_attn( - src, - src, - src, - pos_emb=pos_emb, - attn_mask=src_mask, - key_padding_mask=src_key_padding_mask, - )[0] - src = residual + self.dropout(src_att) - if not self.normalize_before: - src = self.norm_mha(src) - - # convolution module - residual = src - if self.normalize_before: - src = self.norm_conv(src) - src = residual + self.dropout(self.conv_module(src)) - if not self.normalize_before: - src = self.norm_conv(src) - - # feed forward module - residual = src - if self.normalize_before: - src = self.norm_ff(src) - src = residual + self.ff_scale * self.dropout(self.feed_forward(src)) - if not self.normalize_before: - src = self.norm_ff(src) - - if self.normalize_before: - src = self.norm_final(src) - - return src - - -class ConformerEncoder(nn.TransformerEncoder): - r"""ConformerEncoder is a stack of N encoder layers - - Args: - encoder_layer: an instance of the ConformerEncoderLayer() class (required). - num_layers: the number of sub-encoder-layers in the encoder (required). - norm: the layer normalization component (optional). - - Examples:: - >>> encoder_layer = ConformerEncoderLayer(d_model=512, nhead=8) - >>> conformer_encoder = ConformerEncoder(encoder_layer, num_layers=6) - >>> src = torch.rand(10, 32, 512) - >>> pos_emb = torch.rand(32, 19, 512) - >>> out = conformer_encoder(src, pos_emb) - """ - - def __init__( - self, encoder_layer: nn.Module, num_layers: int, norm: nn.Module = None - ) -> None: - super(ConformerEncoder, self).__init__( - encoder_layer=encoder_layer, num_layers=num_layers, norm=norm - ) - - def forward( - self, - src: Tensor, - pos_emb: Tensor, - mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - ) -> Tensor: - r"""Pass the input through the encoder layers in turn. - - Args: - src: the sequence to the encoder (required). - pos_emb: Positional embedding tensor (required). - mask: the mask for the src sequence (optional). - src_key_padding_mask: the mask for the src keys per batch (optional). - - Shape: - src: (S, N, E). - pos_emb: (N, 2*S-1, E) - mask: (S, S). - src_key_padding_mask: (N, S). - S is the source sequence length, T is the target sequence length, N is the batch size, E is the feature number - - """ - output = src - - for mod in self.layers: - output = mod( - output, - pos_emb, - src_mask=mask, - src_key_padding_mask=src_key_padding_mask, - ) - - if self.norm is not None: - output = self.norm(output) - - return output - - -class RelPositionalEncoding(torch.nn.Module): - """Relative positional encoding module. - - See : Appendix B in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" - Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/embedding.py - - Args: - d_model: Embedding dimension. - dropout_rate: Dropout rate. - max_len: Maximum input length. - - """ - - def __init__( - self, d_model: int, dropout_rate: float, max_len: int = 5000 - ) -> None: - """Construct an PositionalEncoding object.""" - super(RelPositionalEncoding, self).__init__() - self.d_model = d_model - self.xscale = math.sqrt(self.d_model) - self.dropout = torch.nn.Dropout(p=dropout_rate) - self.pe = None - self.extend_pe(torch.tensor(0.0).expand(1, max_len)) - - def extend_pe(self, x: Tensor) -> None: - """Reset the positional encodings.""" - if self.pe is not None: - # self.pe contains both positive and negative parts - # the length of self.pe is 2 * input_len - 1 - if self.pe.size(1) >= x.size(1) * 2 - 1: - # Note: TorchScript doesn't implement operator== for torch.Device - if self.pe.dtype != x.dtype or str(self.pe.device) != str( - x.device - ): - self.pe = self.pe.to(dtype=x.dtype, device=x.device) - return - # Suppose `i` means to the position of query vecotr and `j` means the - # position of key vector. We use position relative positions when keys - # are to the left (i>j) and negative relative positions otherwise (i Tuple[Tensor, Tensor]: - """Add positional encoding. - - Args: - x (torch.Tensor): Input tensor (batch, time, `*`). - - Returns: - torch.Tensor: Encoded tensor (batch, time, `*`). - torch.Tensor: Encoded tensor (batch, 2*time-1, `*`). - - """ - self.extend_pe(x) - x = x * self.xscale - pos_emb = self.pe[ - :, - self.pe.size(1) // 2 - - x.size(1) - + 1 : self.pe.size(1) // 2 # noqa E203 - + x.size(1), - ] - return self.dropout(x), self.dropout(pos_emb) - - -class RelPositionMultiheadAttention(nn.Module): - r"""Multi-Head Attention layer with relative position encoding - - See reference: "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" - - Args: - embed_dim: total dimension of the model. - num_heads: parallel attention heads. - dropout: a Dropout layer on attn_output_weights. Default: 0.0. - - Examples:: - - >>> rel_pos_multihead_attn = RelPositionMultiheadAttention(embed_dim, num_heads) - >>> attn_output, attn_output_weights = multihead_attn(query, key, value, pos_emb) - """ - - def __init__( - self, - embed_dim: int, - num_heads: int, - dropout: float = 0.0, - ) -> None: - super(RelPositionMultiheadAttention, self).__init__() - self.embed_dim = embed_dim - self.num_heads = num_heads - self.dropout = dropout - self.head_dim = embed_dim // num_heads - assert ( - self.head_dim * num_heads == self.embed_dim - ), "embed_dim must be divisible by num_heads" - - self.in_proj = nn.Linear(embed_dim, 3 * embed_dim, bias=True) - self.out_proj = nn.Linear(embed_dim, embed_dim, bias=True) - - # linear transformation for positional encoding. - self.linear_pos = nn.Linear(embed_dim, embed_dim, bias=False) - # these two learnable bias are used in matrix c and matrix d - # as described in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" Section 3.3 - self.pos_bias_u = nn.Parameter(torch.Tensor(num_heads, self.head_dim)) - self.pos_bias_v = nn.Parameter(torch.Tensor(num_heads, self.head_dim)) - - self._reset_parameters() - - def _reset_parameters(self) -> None: - nn.init.xavier_uniform_(self.in_proj.weight) - nn.init.constant_(self.in_proj.bias, 0.0) - nn.init.constant_(self.out_proj.bias, 0.0) - - nn.init.xavier_uniform_(self.pos_bias_u) - nn.init.xavier_uniform_(self.pos_bias_v) - - def forward( - self, - query: Tensor, - key: Tensor, - value: Tensor, - pos_emb: Tensor, - key_padding_mask: Optional[Tensor] = None, - need_weights: bool = True, - attn_mask: Optional[Tensor] = None, - ) -> Tuple[Tensor, Optional[Tensor]]: - r""" - Args: - query, key, value: map a query and a set of key-value pairs to an output. - pos_emb: Positional embedding tensor - key_padding_mask: if provided, specified padding elements in the key will - be ignored by the attention. When given a binary mask and a value is True, - the corresponding value on the attention layer will be ignored. When given - a byte mask and a value is non-zero, the corresponding value on the attention - layer will be ignored - need_weights: output attn_output_weights. - attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all - the batches while a 3D mask allows to specify a different mask for the entries of each batch. - - Shape: - - Inputs: - - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is - the embedding dimension. - - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is - the embedding dimension. - - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is - the embedding dimension. - - pos_emb: :math:`(N, 2*L-1, E)` where L is the target sequence length, N is the batch size, E is - the embedding dimension. - - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. - If a ByteTensor is provided, the non-zero positions will be ignored while the position - with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the - value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. - - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. - 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, - S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked - positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend - while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` - is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor - is provided, it will be added to the attention weight. - - - Outputs: - - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, - E is the embedding dimension. - - attn_output_weights: :math:`(N, L, S)` where N is the batch size, - L is the target sequence length, S is the source sequence length. - """ - return self.multi_head_attention_forward( - query, - key, - value, - pos_emb, - self.embed_dim, - self.num_heads, - self.in_proj.weight, - self.in_proj.bias, - self.dropout, - self.out_proj.weight, - self.out_proj.bias, - training=self.training, - key_padding_mask=key_padding_mask, - need_weights=need_weights, - attn_mask=attn_mask, - ) - - def rel_shift(self, x: Tensor) -> Tensor: - """Compute relative positional encoding. - - Args: - x: Input tensor (batch, head, time1, 2*time1-1). - time1 means the length of query vector. - - Returns: - Tensor: tensor of shape (batch, head, time1, time2) - (note: time2 has the same value as time1, but it is for - the key, while time1 is for the query). - """ - (batch_size, num_heads, time1, n) = x.shape - assert n == 2 * time1 - 1 - # Note: TorchScript requires explicit arg for stride() - batch_stride = x.stride(0) - head_stride = x.stride(1) - time1_stride = x.stride(2) - n_stride = x.stride(3) - return x.as_strided( - (batch_size, num_heads, time1, time1), - (batch_stride, head_stride, time1_stride - n_stride, n_stride), - storage_offset=n_stride * (time1 - 1), - ) - - def multi_head_attention_forward( - self, - query: Tensor, - key: Tensor, - value: Tensor, - pos_emb: Tensor, - embed_dim_to_check: int, - num_heads: int, - in_proj_weight: Tensor, - in_proj_bias: Tensor, - dropout_p: float, - out_proj_weight: Tensor, - out_proj_bias: Tensor, - training: bool = True, - key_padding_mask: Optional[Tensor] = None, - need_weights: bool = True, - attn_mask: Optional[Tensor] = None, - ) -> Tuple[Tensor, Optional[Tensor]]: - r""" - Args: - query, key, value: map a query and a set of key-value pairs to an output. - pos_emb: Positional embedding tensor - embed_dim_to_check: total dimension of the model. - num_heads: parallel attention heads. - in_proj_weight, in_proj_bias: input projection weight and bias. - dropout_p: probability of an element to be zeroed. - out_proj_weight, out_proj_bias: the output projection weight and bias. - training: apply dropout if is ``True``. - key_padding_mask: if provided, specified padding elements in the key will - be ignored by the attention. This is an binary mask. When the value is True, - the corresponding value on the attention layer will be filled with -inf. - need_weights: output attn_output_weights. - attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all - the batches while a 3D mask allows to specify a different mask for the entries of each batch. - - Shape: - Inputs: - - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is - the embedding dimension. - - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is - the embedding dimension. - - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is - the embedding dimension. - - pos_emb: :math:`(N, 2*L-1, E)` or :math:`(1, 2*L-1, E)` where L is the target sequence - length, N is the batch size, E is the embedding dimension. - - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. - If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions - will be unchanged. If a BoolTensor is provided, the positions with the - value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. - - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. - 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, - S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked - positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend - while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` - are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor - is provided, it will be added to the attention weight. - - Outputs: - - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, - E is the embedding dimension. - - attn_output_weights: :math:`(N, L, S)` where N is the batch size, - L is the target sequence length, S is the source sequence length. - """ - - tgt_len, bsz, embed_dim = query.size() - assert embed_dim == embed_dim_to_check - assert key.size(0) == value.size(0) and key.size(1) == value.size(1) - - head_dim = embed_dim // num_heads - assert ( - head_dim * num_heads == embed_dim - ), "embed_dim must be divisible by num_heads" - scaling = float(head_dim) ** -0.5 - - if torch.equal(query, key) and torch.equal(key, value): - # self-attention - q, k, v = nn.functional.linear( - query, in_proj_weight, in_proj_bias - ).chunk(3, dim=-1) - - elif torch.equal(key, value): - # encoder-decoder attention - # This is inline in_proj function with in_proj_weight and in_proj_bias - _b = in_proj_bias - _start = 0 - _end = embed_dim - _w = in_proj_weight[_start:_end, :] - if _b is not None: - _b = _b[_start:_end] - q = nn.functional.linear(query, _w, _b) - # This is inline in_proj function with in_proj_weight and in_proj_bias - _b = in_proj_bias - _start = embed_dim - _end = None - _w = in_proj_weight[_start:, :] - if _b is not None: - _b = _b[_start:] - k, v = nn.functional.linear(key, _w, _b).chunk(2, dim=-1) - - else: - # This is inline in_proj function with in_proj_weight and in_proj_bias - _b = in_proj_bias - _start = 0 - _end = embed_dim - _w = in_proj_weight[_start:_end, :] - if _b is not None: - _b = _b[_start:_end] - q = nn.functional.linear(query, _w, _b) - - # This is inline in_proj function with in_proj_weight and in_proj_bias - _b = in_proj_bias - _start = embed_dim - _end = embed_dim * 2 - _w = in_proj_weight[_start:_end, :] - if _b is not None: - _b = _b[_start:_end] - k = nn.functional.linear(key, _w, _b) - - # This is inline in_proj function with in_proj_weight and in_proj_bias - _b = in_proj_bias - _start = embed_dim * 2 - _end = None - _w = in_proj_weight[_start:, :] - if _b is not None: - _b = _b[_start:] - v = nn.functional.linear(value, _w, _b) - - if attn_mask is not None: - assert ( - attn_mask.dtype == torch.float32 - or attn_mask.dtype == torch.float64 - or attn_mask.dtype == torch.float16 - or attn_mask.dtype == torch.uint8 - or attn_mask.dtype == torch.bool - ), "Only float, byte, and bool types are supported for attn_mask, not {}".format( - attn_mask.dtype - ) - if attn_mask.dtype == torch.uint8: - warnings.warn( - "Byte tensor for attn_mask is deprecated. Use bool tensor instead." - ) - attn_mask = attn_mask.to(torch.bool) - - if attn_mask.dim() == 2: - attn_mask = attn_mask.unsqueeze(0) - if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: - raise RuntimeError( - "The size of the 2D attn_mask is not correct." - ) - elif attn_mask.dim() == 3: - if list(attn_mask.size()) != [ - bsz * num_heads, - query.size(0), - key.size(0), - ]: - raise RuntimeError( - "The size of the 3D attn_mask is not correct." - ) - else: - raise RuntimeError( - "attn_mask's dimension {} is not supported".format( - attn_mask.dim() - ) - ) - # attn_mask's dim is 3 now. - - # convert ByteTensor key_padding_mask to bool - if ( - key_padding_mask is not None - and key_padding_mask.dtype == torch.uint8 - ): - warnings.warn( - "Byte tensor for key_padding_mask is deprecated. Use bool tensor instead." - ) - key_padding_mask = key_padding_mask.to(torch.bool) - - q = q.contiguous().view(tgt_len, bsz, num_heads, head_dim) - k = k.contiguous().view(-1, bsz, num_heads, head_dim) - v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) - - src_len = k.size(0) - - if key_padding_mask is not None: - assert key_padding_mask.size(0) == bsz, "{} == {}".format( - key_padding_mask.size(0), bsz - ) - assert key_padding_mask.size(1) == src_len, "{} == {}".format( - key_padding_mask.size(1), src_len - ) - - q = q.transpose(0, 1) # (batch, time1, head, d_k) - - pos_emb_bsz = pos_emb.size(0) - assert pos_emb_bsz in (1, bsz) # actually it is 1 - p = self.linear_pos(pos_emb).view(pos_emb_bsz, -1, num_heads, head_dim) - p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k) - - q_with_bias_u = (q + self.pos_bias_u).transpose( - 1, 2 - ) # (batch, head, time1, d_k) - - q_with_bias_v = (q + self.pos_bias_v).transpose( - 1, 2 - ) # (batch, head, time1, d_k) - - # compute attention score - # first compute matrix a and matrix c - # as described in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" Section 3.3 - k = k.permute(1, 2, 3, 0) # (batch, head, d_k, time2) - matrix_ac = torch.matmul( - q_with_bias_u, k - ) # (batch, head, time1, time2) - - # compute matrix b and matrix d - matrix_bd = torch.matmul( - q_with_bias_v, p.transpose(-2, -1) - ) # (batch, head, time1, 2*time1-1) - matrix_bd = self.rel_shift(matrix_bd) - - attn_output_weights = ( - matrix_ac + matrix_bd - ) * scaling # (batch, head, time1, time2) - - attn_output_weights = attn_output_weights.view( - bsz * num_heads, tgt_len, -1 - ) - - assert list(attn_output_weights.size()) == [ - bsz * num_heads, - tgt_len, - src_len, - ] - - if attn_mask is not None: - if attn_mask.dtype == torch.bool: - attn_output_weights.masked_fill_(attn_mask, float("-inf")) - else: - attn_output_weights += attn_mask - - if key_padding_mask is not None: - attn_output_weights = attn_output_weights.view( - bsz, num_heads, tgt_len, src_len - ) - attn_output_weights = attn_output_weights.masked_fill( - key_padding_mask.unsqueeze(1).unsqueeze(2), - float("-inf"), - ) - attn_output_weights = attn_output_weights.view( - bsz * num_heads, tgt_len, src_len - ) - - attn_output_weights = nn.functional.softmax(attn_output_weights, dim=-1) - attn_output_weights = nn.functional.dropout( - attn_output_weights, p=dropout_p, training=training - ) - - attn_output = torch.bmm(attn_output_weights, v) - assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] - attn_output = ( - attn_output.transpose(0, 1) - .contiguous() - .view(tgt_len, bsz, embed_dim) - ) - attn_output = nn.functional.linear( - attn_output, out_proj_weight, out_proj_bias - ) - - if need_weights: - # average attention weights over heads - attn_output_weights = attn_output_weights.view( - bsz, num_heads, tgt_len, src_len - ) - return attn_output, attn_output_weights.sum(dim=1) / num_heads - else: - return attn_output, None - - -class ConvolutionModule(nn.Module): - """ConvolutionModule in Conformer model. - Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/conformer/convolution.py - - Args: - channels (int): The number of channels of conv layers. - kernel_size (int): Kernerl size of conv layers. - bias (bool): Whether to use bias in conv layers (default=True). - - """ - - def __init__( - self, channels: int, kernel_size: int, bias: bool = True - ) -> None: - """Construct an ConvolutionModule object.""" - super(ConvolutionModule, self).__init__() - # kernerl_size should be a odd number for 'SAME' padding - assert (kernel_size - 1) % 2 == 0 - - self.pointwise_conv1 = nn.Conv1d( - channels, - 2 * channels, - kernel_size=1, - stride=1, - padding=0, - bias=bias, - ) - self.depthwise_conv = nn.Conv1d( - channels, - channels, - kernel_size, - stride=1, - padding=(kernel_size - 1) // 2, - groups=channels, - bias=bias, - ) - self.norm = nn.LayerNorm(channels) - self.pointwise_conv2 = nn.Conv1d( - channels, - channels, - kernel_size=1, - stride=1, - padding=0, - bias=bias, - ) - self.activation = Swish() - - def forward(self, x: Tensor) -> Tensor: - """Compute convolution module. - - Args: - x: Input tensor (#time, batch, channels). - - Returns: - Tensor: Output tensor (#time, batch, channels). - - """ - # exchange the temporal dimension and the feature dimension - x = x.permute(1, 2, 0) # (#batch, channels, time). - - # GLU mechanism - x = self.pointwise_conv1(x) # (batch, 2*channels, time) - x = nn.functional.glu(x, dim=1) # (batch, channels, time) - - # 1D Depthwise Conv - x = self.depthwise_conv(x) - # x is (batch, channels, time) - x = x.permute(0, 2, 1) - x = self.norm(x) - x = x.permute(0, 2, 1) - - x = self.activation(x) - - x = self.pointwise_conv2(x) # (batch, channel, time) - - return x.permute(2, 0, 1) - - -class Swish(torch.nn.Module): - """Construct an Swish object.""" - - def forward(self, x: Tensor) -> Tensor: - """Return Swich activation function.""" - return x * torch.sigmoid(x) - - -def identity(x): - return x diff --git a/egs/librispeech/ASR/transducer/conformer.py b/egs/librispeech/ASR/transducer/conformer.py new file mode 120000 index 000000000..70a7ddf11 --- /dev/null +++ b/egs/librispeech/ASR/transducer/conformer.py @@ -0,0 +1 @@ +../transducer_stateless/conformer.py \ No newline at end of file diff --git a/egs/librispeech/ASR/transducer/encoder_interface.py b/egs/librispeech/ASR/transducer/encoder_interface.py deleted file mode 100644 index 257facce4..000000000 --- a/egs/librispeech/ASR/transducer/encoder_interface.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) -# -# See ../../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Tuple - -import torch -import torch.nn as nn - - -class EncoderInterface(nn.Module): - def forward( - self, x: torch.Tensor, x_lens: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Args: - x: - A tensor of shape (batch_size, input_seq_len, num_features) - containing the input features. - x_lens: - A tensor of shape (batch_size,) containing the number of frames - in `x` before padding. - Returns: - Return a tuple containing two tensors: - - encoder_out, a tensor of (batch_size, out_seq_len, output_dim) - containing unnormalized probabilities, i.e., the output of a - linear layer. - - encoder_out_lens, a tensor of shape (batch_size,) containing - the number of frames in `encoder_out` before padding. - """ - raise NotImplementedError("Please implement it in a subclass") diff --git a/egs/librispeech/ASR/transducer/encoder_interface.py b/egs/librispeech/ASR/transducer/encoder_interface.py new file mode 120000 index 000000000..aa5d0217a --- /dev/null +++ b/egs/librispeech/ASR/transducer/encoder_interface.py @@ -0,0 +1 @@ +../transducer_stateless/encoder_interface.py \ No newline at end of file diff --git a/egs/librispeech/ASR/transducer/transformer.py b/egs/librispeech/ASR/transducer/transformer.py deleted file mode 100644 index e851dcc32..000000000 --- a/egs/librispeech/ASR/transducer/transformer.py +++ /dev/null @@ -1,418 +0,0 @@ -# Copyright 2021 University of Chinese Academy of Sciences (author: Han Zhu) -# -# See ../../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import math -from typing import Optional, Tuple - -import torch -import torch.nn as nn -from encoder_interface import EncoderInterface -from subsampling import Conv2dSubsampling, VggSubsampling - -from icefall.utils import make_pad_mask - - -class Transformer(EncoderInterface): - def __init__( - self, - num_features: int, - output_dim: int, - subsampling_factor: int = 4, - d_model: int = 256, - nhead: int = 4, - dim_feedforward: int = 2048, - num_encoder_layers: int = 12, - dropout: float = 0.1, - normalize_before: bool = True, - vgg_frontend: bool = False, - ) -> None: - """ - Args: - num_features: - The input dimension of the model. - output_dim: - The output dimension of the model. - subsampling_factor: - Number of output frames is num_in_frames // subsampling_factor. - Currently, subsampling_factor MUST be 4. - d_model: - Attention dimension. - nhead: - Number of heads in multi-head attention. - Must satisfy d_model // nhead == 0. - dim_feedforward: - The output dimension of the feedforward layers in encoder. - num_encoder_layers: - Number of encoder layers. - dropout: - Dropout in encoder. - normalize_before: - If True, use pre-layer norm; False to use post-layer norm. - vgg_frontend: - True to use vgg style frontend for subsampling. - """ - super().__init__() - - self.num_features = num_features - self.output_dim = output_dim - self.subsampling_factor = subsampling_factor - if subsampling_factor != 4: - raise NotImplementedError("Support only 'subsampling_factor=4'.") - - # self.encoder_embed converts the input of shape (N, T, num_features) - # to the shape (N, T//subsampling_factor, d_model). - # That is, it does two things simultaneously: - # (1) subsampling: T -> T//subsampling_factor - # (2) embedding: num_features -> d_model - if vgg_frontend: - self.encoder_embed = VggSubsampling(num_features, d_model) - else: - self.encoder_embed = Conv2dSubsampling(num_features, d_model) - - self.encoder_pos = PositionalEncoding(d_model, dropout) - - encoder_layer = TransformerEncoderLayer( - d_model=d_model, - nhead=nhead, - dim_feedforward=dim_feedforward, - dropout=dropout, - normalize_before=normalize_before, - ) - - if normalize_before: - encoder_norm = nn.LayerNorm(d_model) - else: - encoder_norm = None - - self.encoder = nn.TransformerEncoder( - encoder_layer=encoder_layer, - num_layers=num_encoder_layers, - norm=encoder_norm, - ) - - # TODO(fangjun): remove dropout - self.encoder_output_layer = nn.Sequential( - nn.Dropout(p=dropout), nn.Linear(d_model, output_dim) - ) - - def forward( - self, x: torch.Tensor, x_lens: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Args: - x: - The input tensor. Its shape is (batch_size, seq_len, feature_dim). - x_lens: - A tensor of shape (batch_size,) containing the number of frames in - `x` before padding. - Returns: - Return a tuple containing 2 tensors: - - logits, its shape is (batch_size, output_seq_len, output_dim) - - logit_lens, a tensor of shape (batch_size,) containing the number - of frames in `logits` before padding. - """ - x = self.encoder_embed(x) - x = self.encoder_pos(x) - x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C) - - # Caution: We assume the subsampling factor is 4! - lengths = ((x_lens - 1) // 2 - 1) // 2 - assert x.size(0) == lengths.max().item() - - mask = make_pad_mask(lengths) - x = self.encoder(x, src_key_padding_mask=mask) # (T, N, C) - - logits = self.encoder_output_layer(x) - logits = logits.permute(1, 0, 2) # (T, N, C) ->(N, T, C) - - return logits, lengths - - -class TransformerEncoderLayer(nn.Module): - """ - Modified from torch.nn.TransformerEncoderLayer. - Add support of normalize_before, - i.e., use layer_norm before the first block. - - Args: - d_model: - the number of expected features in the input (required). - nhead: - the number of heads in the multiheadattention models (required). - dim_feedforward: - the dimension of the feedforward network model (default=2048). - dropout: - the dropout value (default=0.1). - activation: - the activation function of intermediate layer, relu or - gelu (default=relu). - normalize_before: - whether to use layer_norm before the first block. - - Examples:: - >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8) - >>> src = torch.rand(10, 32, 512) - >>> out = encoder_layer(src) - """ - - def __init__( - self, - d_model: int, - nhead: int, - dim_feedforward: int = 2048, - dropout: float = 0.1, - activation: str = "relu", - normalize_before: bool = True, - ) -> None: - super(TransformerEncoderLayer, self).__init__() - self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0) - # Implementation of Feedforward model - self.linear1 = nn.Linear(d_model, dim_feedforward) - self.dropout = nn.Dropout(dropout) - self.linear2 = nn.Linear(dim_feedforward, d_model) - - self.norm1 = nn.LayerNorm(d_model) - self.norm2 = nn.LayerNorm(d_model) - self.dropout1 = nn.Dropout(dropout) - self.dropout2 = nn.Dropout(dropout) - - self.activation = _get_activation_fn(activation) - - self.normalize_before = normalize_before - - def __setstate__(self, state): - if "activation" not in state: - state["activation"] = nn.functional.relu - super(TransformerEncoderLayer, self).__setstate__(state) - - def forward( - self, - src: torch.Tensor, - src_mask: Optional[torch.Tensor] = None, - src_key_padding_mask: Optional[torch.Tensor] = None, - ) -> torch.Tensor: - """ - Pass the input through the encoder layer. - - Args: - src: the sequence to the encoder layer (required). - src_mask: the mask for the src sequence (optional). - src_key_padding_mask: the mask for the src keys per batch (optional) - - Shape: - src: (S, N, E). - src_mask: (S, S). - src_key_padding_mask: (N, S). - S is the source sequence length, T is the target sequence length, - N is the batch size, E is the feature number - """ - residual = src - if self.normalize_before: - src = self.norm1(src) - src2 = self.self_attn( - src, - src, - src, - attn_mask=src_mask, - key_padding_mask=src_key_padding_mask, - )[0] - src = residual + self.dropout1(src2) - if not self.normalize_before: - src = self.norm1(src) - - residual = src - if self.normalize_before: - src = self.norm2(src) - src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) - src = residual + self.dropout2(src2) - if not self.normalize_before: - src = self.norm2(src) - return src - - -def _get_activation_fn(activation: str): - if activation == "relu": - return nn.functional.relu - elif activation == "gelu": - return nn.functional.gelu - - raise RuntimeError( - "activation should be relu/gelu, not {}".format(activation) - ) - - -class PositionalEncoding(nn.Module): - """This class implements the positional encoding - proposed in the following paper: - - - Attention Is All You Need: https://arxiv.org/pdf/1706.03762.pdf - - PE(pos, 2i) = sin(pos / (10000^(2i/d_modle)) - PE(pos, 2i+1) = cos(pos / (10000^(2i/d_modle)) - - Note:: - - 1 / (10000^(2i/d_model)) = exp(-log(10000^(2i/d_model))) - = exp(-1* 2i / d_model * log(100000)) - = exp(2i * -(log(10000) / d_model)) - """ - - def __init__(self, d_model: int, dropout: float = 0.1) -> None: - """ - Args: - d_model: - Embedding dimension. - dropout: - Dropout probability to be applied to the output of this module. - """ - super().__init__() - self.d_model = d_model - self.xscale = math.sqrt(self.d_model) - self.dropout = nn.Dropout(p=dropout) - # not doing: self.pe = None because of errors thrown by torchscript - self.pe = torch.zeros(1, 0, self.d_model, dtype=torch.float32) - - def extend_pe(self, x: torch.Tensor) -> None: - """Extend the time t in the positional encoding if required. - - The shape of `self.pe` is (1, T1, d_model). The shape of the input x - is (N, T, d_model). If T > T1, then we change the shape of self.pe - to (N, T, d_model). Otherwise, nothing is done. - - Args: - x: - It is a tensor of shape (N, T, C). - Returns: - Return None. - """ - if self.pe is not None: - if self.pe.size(1) >= x.size(1): - self.pe = self.pe.to(dtype=x.dtype, device=x.device) - return - pe = torch.zeros(x.size(1), self.d_model, dtype=torch.float32) - position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1) - div_term = torch.exp( - torch.arange(0, self.d_model, 2, dtype=torch.float32) - * -(math.log(10000.0) / self.d_model) - ) - pe[:, 0::2] = torch.sin(position * div_term) - pe[:, 1::2] = torch.cos(position * div_term) - pe = pe.unsqueeze(0) - # Now pe is of shape (1, T, d_model), where T is x.size(1) - self.pe = pe.to(device=x.device, dtype=x.dtype) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - """ - Add positional encoding. - - Args: - x: - Its shape is (N, T, C) - - Returns: - Return a tensor of shape (N, T, C) - """ - self.extend_pe(x) - x = x * self.xscale + self.pe[:, : x.size(1), :] - return self.dropout(x) - - -class Noam(object): - """ - Implements Noam optimizer. - - Proposed in - "Attention Is All You Need", https://arxiv.org/pdf/1706.03762.pdf - - Modified from - https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/optimizer.py # noqa - - Args: - params: - iterable of parameters to optimize or dicts defining parameter groups - model_size: - attention dimension of the transformer model - factor: - learning rate factor - warm_step: - warmup steps - """ - - def __init__( - self, - params, - model_size: int = 256, - factor: float = 10.0, - warm_step: int = 25000, - weight_decay=0, - ) -> None: - """Construct an Noam object.""" - self.optimizer = torch.optim.Adam( - params, lr=0, betas=(0.9, 0.98), eps=1e-9, weight_decay=weight_decay - ) - self._step = 0 - self.warmup = warm_step - self.factor = factor - self.model_size = model_size - self._rate = 0 - - @property - def param_groups(self): - """Return param_groups.""" - return self.optimizer.param_groups - - def step(self): - """Update parameters and rate.""" - self._step += 1 - rate = self.rate() - for p in self.optimizer.param_groups: - p["lr"] = rate - self._rate = rate - self.optimizer.step() - - def rate(self, step=None): - """Implement `lrate` above.""" - if step is None: - step = self._step - return ( - self.factor - * self.model_size ** (-0.5) - * min(step ** (-0.5), step * self.warmup ** (-1.5)) - ) - - def zero_grad(self): - """Reset gradient.""" - self.optimizer.zero_grad() - - def state_dict(self): - """Return state_dict.""" - return { - "_step": self._step, - "warmup": self.warmup, - "factor": self.factor, - "model_size": self.model_size, - "_rate": self._rate, - "optimizer": self.optimizer.state_dict(), - } - - def load_state_dict(self, state_dict): - """Load state_dict.""" - for key, value in state_dict.items(): - if key == "optimizer": - self.optimizer.load_state_dict(state_dict["optimizer"]) - else: - setattr(self, key, value) diff --git a/egs/librispeech/ASR/transducer/transformer.py b/egs/librispeech/ASR/transducer/transformer.py new file mode 120000 index 000000000..e43f520f9 --- /dev/null +++ b/egs/librispeech/ASR/transducer/transformer.py @@ -0,0 +1 @@ +../transducer_stateless/transformer.py \ No newline at end of file diff --git a/egs/librispeech/ASR/transducer_stateless/joiner.py b/egs/librispeech/ASR/transducer_stateless/joiner.py index 55f0a81f1..b0ba7fd83 100644 --- a/egs/librispeech/ASR/transducer_stateless/joiner.py +++ b/egs/librispeech/ASR/transducer_stateless/joiner.py @@ -55,6 +55,9 @@ class Joiner(nn.Module): N = encoder_out.size(0) + encoder_out_len = encoder_out_len.tolist() + decoder_out_len = decoder_out_len.tolist() + encoder_out_list = [ encoder_out[i, : encoder_out_len[i], :] for i in range(N) ] diff --git a/egs/librispeech/ASR/transducer_stateless/test_joiner.py b/egs/librispeech/ASR/transducer_stateless/test_joiner.py new file mode 100755 index 000000000..593577c7c --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless/test_joiner.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 +# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +To run this file, do: + + cd icefall/egs/librispeech/ASR + python ./transducer_stateless/test_joiner.py +""" + +import torch +from joiner import Joiner + + +def test_joiner(): + device = torch.device("cpu") + input_dim = 3 + output_dim = 5 + joiner = Joiner(input_dim, output_dim) + joiner.to(device) + + encoder_out = torch.rand(3, 10, input_dim, device=device) + decoder_out = torch.rand(3, 8, input_dim, device=device) + + encoder_out_len = torch.tensor([5, 10, 3], device=device) + decoder_out_len = torch.tensor([6, 8, 7], device=device) + + out = joiner( + encoder_out=encoder_out, + decoder_out=decoder_out, + encoder_out_len=encoder_out_len, + decoder_out_len=decoder_out_len, + ) + assert out.size(0) == (encoder_out_len * decoder_out_len).sum() + assert out.size(1) == output_dim + + +def main(): + test_joiner() + + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/beam_search.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/beam_search.py deleted file mode 100644 index c5efb733d..000000000 --- a/egs/librispeech/ASR/transducer_stateless_multi_datasets/beam_search.py +++ /dev/null @@ -1,541 +0,0 @@ -# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) -# -# See ../../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from dataclasses import dataclass -from typing import Dict, List, Optional - -import torch -from model import Transducer - - -def greedy_search( - model: Transducer, encoder_out: torch.Tensor, max_sym_per_frame: int -) -> List[int]: - """ - Args: - model: - An instance of `Transducer`. - encoder_out: - A tensor of shape (N, T, C) from the encoder. Support only N==1 for now. - max_sym_per_frame: - Maximum number of symbols per frame. If it is set to 0, the WER - would be 100%. - Returns: - Return the decoded result. - """ - assert encoder_out.ndim == 3 - - # support only batch_size == 1 for now - assert encoder_out.size(0) == 1, encoder_out.size(0) - - blank_id = model.decoder.blank_id - context_size = model.decoder.context_size - - device = model.device - - decoder_input = torch.tensor( - [blank_id] * context_size, device=device, dtype=torch.int64 - ).reshape(1, context_size) - - decoder_out = model.decoder(decoder_input, need_pad=False) - - T = encoder_out.size(1) - t = 0 - hyp = [blank_id] * context_size - - # Maximum symbols per utterance. - max_sym_per_utt = 1000 - - # symbols per frame - sym_per_frame = 0 - - # symbols per utterance decoded so far - sym_per_utt = 0 - - encoder_out_len = torch.tensor([1]) - decoder_out_len = torch.tensor([1]) - - while t < T and sym_per_utt < max_sym_per_utt: - if sym_per_frame >= max_sym_per_frame: - sym_per_frame = 0 - t += 1 - continue - - # fmt: off - current_encoder_out = encoder_out[:, t:t+1, :] - # fmt: on - logits = model.joiner( - current_encoder_out, decoder_out, encoder_out_len, decoder_out_len - ) - # logits is (1, 1, 1, vocab_size) - - y = logits.argmax().item() - if y != blank_id: - hyp.append(y) - decoder_input = torch.tensor( - [hyp[-context_size:]], device=device - ).reshape(1, context_size) - - decoder_out = model.decoder(decoder_input, need_pad=False) - - sym_per_utt += 1 - sym_per_frame += 1 - else: - sym_per_frame = 0 - t += 1 - hyp = hyp[context_size:] # remove blanks - - return hyp - - -@dataclass -class Hypothesis: - # The predicted tokens so far. - # Newly predicted tokens are appended to `ys`. - ys: List[int] - - # The log prob of ys. - # It contains only one entry. - log_prob: torch.Tensor - - @property - def key(self) -> str: - """Return a string representation of self.ys""" - return "_".join(map(str, self.ys)) - - -class HypothesisList(object): - def __init__(self, data: Optional[Dict[str, Hypothesis]] = None) -> None: - """ - Args: - data: - A dict of Hypotheses. Its key is its `value.key`. - """ - if data is None: - self._data = {} - else: - self._data = data - - @property - def data(self) -> Dict[str, Hypothesis]: - return self._data - - def add(self, hyp: Hypothesis) -> None: - """Add a Hypothesis to `self`. - - If `hyp` already exists in `self`, its probability is updated using - `log-sum-exp` with the existed one. - - Args: - hyp: - The hypothesis to be added. - """ - key = hyp.key - if key in self: - old_hyp = self._data[key] # shallow copy - torch.logaddexp( - old_hyp.log_prob, hyp.log_prob, out=old_hyp.log_prob - ) - else: - self._data[key] = hyp - - def get_most_probable(self, length_norm: bool = False) -> Hypothesis: - """Get the most probable hypothesis, i.e., the one with - the largest `log_prob`. - - Args: - length_norm: - If True, the `log_prob` of a hypothesis is normalized by the - number of tokens in it. - Returns: - Return the hypothesis that has the largest `log_prob`. - """ - if length_norm: - return max( - self._data.values(), key=lambda hyp: hyp.log_prob / len(hyp.ys) - ) - else: - return max(self._data.values(), key=lambda hyp: hyp.log_prob) - - def remove(self, hyp: Hypothesis) -> None: - """Remove a given hypothesis. - - Caution: - `self` is modified **in-place**. - - Args: - hyp: - The hypothesis to be removed from `self`. - Note: It must be contained in `self`. Otherwise, - an exception is raised. - """ - key = hyp.key - assert key in self, f"{key} does not exist" - del self._data[key] - - def filter(self, threshold: torch.Tensor) -> "HypothesisList": - """Remove all Hypotheses whose log_prob is less than threshold. - - Caution: - `self` is not modified. Instead, a new HypothesisList is returned. - - Returns: - Return a new HypothesisList containing all hypotheses from `self` - with `log_prob` being greater than the given `threshold`. - """ - ans = HypothesisList() - for _, hyp in self._data.items(): - if hyp.log_prob > threshold: - ans.add(hyp) # shallow copy - return ans - - def topk(self, k: int) -> "HypothesisList": - """Return the top-k hypothesis.""" - hyps = list(self._data.items()) - - hyps = sorted(hyps, key=lambda h: h[1].log_prob, reverse=True)[:k] - - ans = HypothesisList(dict(hyps)) - return ans - - def __contains__(self, key: str): - return key in self._data - - def __iter__(self): - return iter(self._data.values()) - - def __len__(self) -> int: - return len(self._data) - - def __str__(self) -> str: - s = [] - for key in self: - s.append(key) - return ", ".join(s) - - -def run_decoder( - ys: List[int], - model: Transducer, - decoder_cache: Dict[str, torch.Tensor], -) -> torch.Tensor: - """Run the neural decoder model for a given hypothesis. - - Args: - ys: - The current hypothesis. - model: - The transducer model. - decoder_cache: - Cache to save computations. - Returns: - Return a 1-D tensor of shape (decoder_out_dim,) containing - output of `model.decoder`. - """ - context_size = model.decoder.context_size - key = "_".join(map(str, ys[-context_size:])) - if key in decoder_cache: - return decoder_cache[key] - - device = model.device - - decoder_input = torch.tensor([ys[-context_size:]], device=device).reshape( - 1, context_size - ) - - decoder_out = model.decoder(decoder_input, need_pad=False) - decoder_cache[key] = decoder_out - - return decoder_out - - -def run_joiner( - key: str, - model: Transducer, - encoder_out: torch.Tensor, - decoder_out: torch.Tensor, - encoder_out_len: torch.Tensor, - decoder_out_len: torch.Tensor, - joint_cache: Dict[str, torch.Tensor], -): - """Run the joint network given outputs from the encoder and decoder. - - Args: - key: - A key into the `joint_cache`. - model: - The transducer model. - encoder_out: - A tensor of shape (1, 1, encoder_out_dim). - decoder_out: - A tensor of shape (1, 1, decoder_out_dim). - encoder_out_len: - A tensor with value [1]. - decoder_out_len: - A tensor with value [1]. - joint_cache: - A dict to save computations. - Returns: - Return a tensor from the output of log-softmax. - Its shape is (vocab_size,). - """ - if key in joint_cache: - return joint_cache[key] - - logits = model.joiner( - encoder_out, - decoder_out, - encoder_out_len, - decoder_out_len, - ) - - # TODO(fangjun): Scale the blank posterior - log_prob = logits.log_softmax(dim=-1) - # log_prob is (1, 1, 1, vocab_size) - - log_prob = log_prob.squeeze() - # Now log_prob is (vocab_size,) - - joint_cache[key] = log_prob - - return log_prob - - -def modified_beam_search( - model: Transducer, - encoder_out: torch.Tensor, - beam: int = 4, -) -> List[int]: - """It limits the maximum number of symbols per frame to 1. - - Args: - model: - An instance of `Transducer`. - encoder_out: - A tensor of shape (N, T, C) from the encoder. Support only N==1 for now. - beam: - Beam size. - Returns: - Return the decoded result. - """ - - assert encoder_out.ndim == 3 - - # support only batch_size == 1 for now - assert encoder_out.size(0) == 1, encoder_out.size(0) - blank_id = model.decoder.blank_id - context_size = model.decoder.context_size - - device = model.device - - decoder_input = torch.tensor( - [blank_id] * context_size, device=device - ).reshape(1, context_size) - - decoder_out = model.decoder(decoder_input, need_pad=False) - - T = encoder_out.size(1) - - B = HypothesisList() - B.add( - Hypothesis( - ys=[blank_id] * context_size, - log_prob=torch.zeros(1, dtype=torch.float32, device=device), - ) - ) - - encoder_out_len = torch.tensor([1]) - decoder_out_len = torch.tensor([1]) - - for t in range(T): - # fmt: off - current_encoder_out = encoder_out[:, t:t+1, :] - # current_encoder_out is of shape (1, 1, encoder_out_dim) - # fmt: on - A = list(B) - B = HypothesisList() - - ys_log_probs = torch.cat([hyp.log_prob.reshape(1, 1) for hyp in A]) - # ys_log_probs is of shape (num_hyps, 1) - - decoder_input = torch.tensor( - [hyp.ys[-context_size:] for hyp in A], - device=device, - ) - # decoder_input is of shape (num_hyps, context_size) - - decoder_out = model.decoder(decoder_input, need_pad=False) - # decoder_output is of shape (num_hyps, 1, decoder_output_dim) - - current_encoder_out = current_encoder_out.expand( - decoder_out.size(0), 1, -1 - ) - - logits = model.joiner( - current_encoder_out, - decoder_out, - encoder_out_len.expand(decoder_out.size(0)), - decoder_out_len.expand(decoder_out.size(0)), - ) - # logits is of shape (num_hyps, vocab_size) - log_probs = logits.log_softmax(dim=-1) - - log_probs.add_(ys_log_probs) - - log_probs = log_probs.reshape(-1) - topk_log_probs, topk_indexes = log_probs.topk(beam) - - # topk_hyp_indexes are indexes into `A` - topk_hyp_indexes = topk_indexes // logits.size(-1) - topk_token_indexes = topk_indexes % logits.size(-1) - - topk_hyp_indexes = topk_hyp_indexes.tolist() - topk_token_indexes = topk_token_indexes.tolist() - - for i in range(len(topk_hyp_indexes)): - hyp = A[topk_hyp_indexes[i]] - new_ys = hyp.ys[:] - new_token = topk_token_indexes[i] - if new_token != blank_id: - new_ys.append(new_token) - new_log_prob = topk_log_probs[i] - new_hyp = Hypothesis(ys=new_ys, log_prob=new_log_prob) - B.add(new_hyp) - - best_hyp = B.get_most_probable(length_norm=True) - ys = best_hyp.ys[context_size:] # [context_size:] to remove blanks - - return ys - - -def beam_search( - model: Transducer, - encoder_out: torch.Tensor, - beam: int = 4, -) -> List[int]: - """ - It implements Algorithm 1 in https://arxiv.org/pdf/1211.3711.pdf - - espnet/nets/beam_search_transducer.py#L247 is used as a reference. - - Args: - model: - An instance of `Transducer`. - encoder_out: - A tensor of shape (N, T, C) from the encoder. Support only N==1 for now. - beam: - Beam size. - Returns: - Return the decoded result. - """ - assert encoder_out.ndim == 3 - - # support only batch_size == 1 for now - assert encoder_out.size(0) == 1, encoder_out.size(0) - blank_id = model.decoder.blank_id - context_size = model.decoder.context_size - - device = model.device - - decoder_input = torch.tensor( - [blank_id] * context_size, device=device - ).reshape(1, context_size) - - decoder_out = model.decoder(decoder_input, need_pad=False) - - T = encoder_out.size(1) - t = 0 - - B = HypothesisList() - B.add( - Hypothesis( - ys=[blank_id] * context_size, - log_prob=torch.zeros(1, dtype=torch.float32, device=device), - ) - ) - - max_sym_per_utt = 20000 - - sym_per_utt = 0 - - encoder_out_len = torch.tensor([1]) - decoder_out_len = torch.tensor([1]) - - decoder_cache: Dict[str, torch.Tensor] = {} - - while t < T and sym_per_utt < max_sym_per_utt: - # fmt: off - current_encoder_out = encoder_out[:, t:t+1, :] - # fmt: on - A = B - B = HypothesisList() - - joint_cache: Dict[str, torch.Tensor] = {} - - while True: - y_star = A.get_most_probable() - A.remove(y_star) - - decoder_out = run_decoder( - ys=y_star.ys, model=model, decoder_cache=decoder_cache - ) - - key = "_".join(map(str, y_star.ys[-context_size:])) - key += f"-t-{t}" - log_prob = run_joiner( - key=key, - model=model, - encoder_out=current_encoder_out, - decoder_out=decoder_out, - encoder_out_len=encoder_out_len, - decoder_out_len=decoder_out_len, - joint_cache=joint_cache, - ) - - # First, process the blank symbol - skip_log_prob = log_prob[blank_id] - new_y_star_log_prob = y_star.log_prob + skip_log_prob - - # ys[:] returns a copy of ys - B.add(Hypothesis(ys=y_star.ys[:], log_prob=new_y_star_log_prob)) - - # Second, process other non-blank labels - values, indices = log_prob.topk(beam + 1) - for idx in range(values.size(0)): - i = indices[idx].item() - if i == blank_id: - continue - - new_ys = y_star.ys + [i] - - new_log_prob = y_star.log_prob + values[idx] - A.add(Hypothesis(ys=new_ys, log_prob=new_log_prob)) - - # Check whether B contains more than "beam" elements more probable - # than the most probable in A - A_most_probable = A.get_most_probable() - - kept_B = B.filter(A_most_probable.log_prob) - - if len(kept_B) >= beam: - B = kept_B.topk(beam) - break - - t += 1 - - best_hyp = B.get_most_probable(length_norm=True) - ys = best_hyp.ys[context_size:] # [context_size:] to remove blanks - return ys diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/beam_search.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/beam_search.py new file mode 120000 index 000000000..08cb32ef7 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/beam_search.py @@ -0,0 +1 @@ +../transducer_stateless/beam_search.py \ No newline at end of file diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/conformer.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/conformer.py deleted file mode 100644 index 81d7708f9..000000000 --- a/egs/librispeech/ASR/transducer_stateless_multi_datasets/conformer.py +++ /dev/null @@ -1,920 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2021 University of Chinese Academy of Sciences (author: Han Zhu) -# -# See ../../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import math -import warnings -from typing import Optional, Tuple - -import torch -from torch import Tensor, nn -from transformer import Transformer - -from icefall.utils import make_pad_mask - - -class Conformer(Transformer): - """ - Args: - num_features (int): Number of input features - output_dim (int): Number of output dimension - subsampling_factor (int): subsampling factor of encoder (the convolution layers before transformers) - d_model (int): attention dimension - nhead (int): number of head - dim_feedforward (int): feedforward dimention - num_encoder_layers (int): number of encoder layers - dropout (float): dropout rate - cnn_module_kernel (int): Kernel size of convolution module - normalize_before (bool): whether to use layer_norm before the first block. - vgg_frontend (bool): whether to use vgg frontend. - """ - - def __init__( - self, - num_features: int, - output_dim: int, - subsampling_factor: int = 4, - d_model: int = 256, - nhead: int = 4, - dim_feedforward: int = 2048, - num_encoder_layers: int = 12, - dropout: float = 0.1, - cnn_module_kernel: int = 31, - normalize_before: bool = True, - vgg_frontend: bool = False, - ) -> None: - super(Conformer, self).__init__( - num_features=num_features, - output_dim=output_dim, - subsampling_factor=subsampling_factor, - d_model=d_model, - nhead=nhead, - dim_feedforward=dim_feedforward, - num_encoder_layers=num_encoder_layers, - dropout=dropout, - normalize_before=normalize_before, - vgg_frontend=vgg_frontend, - ) - - self.encoder_pos = RelPositionalEncoding(d_model, dropout) - - encoder_layer = ConformerEncoderLayer( - d_model, - nhead, - dim_feedforward, - dropout, - cnn_module_kernel, - normalize_before, - ) - self.encoder = ConformerEncoder(encoder_layer, num_encoder_layers) - self.normalize_before = normalize_before - if self.normalize_before: - self.after_norm = nn.LayerNorm(d_model) - else: - # Note: TorchScript detects that self.after_norm could be used inside forward() - # and throws an error without this change. - self.after_norm = identity - - def forward( - self, x: torch.Tensor, x_lens: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Args: - x: - The input tensor. Its shape is (batch_size, seq_len, feature_dim). - x_lens: - A tensor of shape (batch_size,) containing the number of frames in - `x` before padding. - Returns: - Return a tuple containing 2 tensors: - - logits, its shape is (batch_size, output_seq_len, output_dim) - - logit_lens, a tensor of shape (batch_size,) containing the number - of frames in `logits` before padding. - """ - x = self.encoder_embed(x) - x, pos_emb = self.encoder_pos(x) - x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C) - - # Caution: We assume the subsampling factor is 4! - lengths = ((x_lens - 1) // 2 - 1) // 2 - assert x.size(0) == lengths.max().item() - mask = make_pad_mask(lengths) - - x = self.encoder(x, pos_emb, src_key_padding_mask=mask) # (T, N, C) - - if self.normalize_before: - x = self.after_norm(x) - - logits = self.encoder_output_layer(x) - logits = logits.permute(1, 0, 2) # (T, N, C) ->(N, T, C) - - return logits, lengths - - -class ConformerEncoderLayer(nn.Module): - """ - ConformerEncoderLayer is made up of self-attn, feedforward and convolution networks. - See: "Conformer: Convolution-augmented Transformer for Speech Recognition" - - Args: - d_model: the number of expected features in the input (required). - nhead: the number of heads in the multiheadattention models (required). - dim_feedforward: the dimension of the feedforward network model (default=2048). - dropout: the dropout value (default=0.1). - cnn_module_kernel (int): Kernel size of convolution module. - normalize_before: whether to use layer_norm before the first block. - - Examples:: - >>> encoder_layer = ConformerEncoderLayer(d_model=512, nhead=8) - >>> src = torch.rand(10, 32, 512) - >>> pos_emb = torch.rand(32, 19, 512) - >>> out = encoder_layer(src, pos_emb) - """ - - def __init__( - self, - d_model: int, - nhead: int, - dim_feedforward: int = 2048, - dropout: float = 0.1, - cnn_module_kernel: int = 31, - normalize_before: bool = True, - ) -> None: - super(ConformerEncoderLayer, self).__init__() - self.self_attn = RelPositionMultiheadAttention( - d_model, nhead, dropout=0.0 - ) - - self.feed_forward = nn.Sequential( - nn.Linear(d_model, dim_feedforward), - Swish(), - nn.Dropout(dropout), - nn.Linear(dim_feedforward, d_model), - ) - - self.feed_forward_macaron = nn.Sequential( - nn.Linear(d_model, dim_feedforward), - Swish(), - nn.Dropout(dropout), - nn.Linear(dim_feedforward, d_model), - ) - - self.conv_module = ConvolutionModule(d_model, cnn_module_kernel) - - self.norm_ff_macaron = nn.LayerNorm( - d_model - ) # for the macaron style FNN module - self.norm_ff = nn.LayerNorm(d_model) # for the FNN module - self.norm_mha = nn.LayerNorm(d_model) # for the MHA module - - self.ff_scale = 0.5 - - self.norm_conv = nn.LayerNorm(d_model) # for the CNN module - self.norm_final = nn.LayerNorm( - d_model - ) # for the final output of the block - - self.dropout = nn.Dropout(dropout) - - self.normalize_before = normalize_before - - def forward( - self, - src: Tensor, - pos_emb: Tensor, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - ) -> Tensor: - """ - Pass the input through the encoder layer. - - Args: - src: the sequence to the encoder layer (required). - pos_emb: Positional embedding tensor (required). - src_mask: the mask for the src sequence (optional). - src_key_padding_mask: the mask for the src keys per batch (optional). - - Shape: - src: (S, N, E). - pos_emb: (N, 2*S-1, E) - src_mask: (S, S). - src_key_padding_mask: (N, S). - S is the source sequence length, N is the batch size, E is the feature number - """ - - # macaron style feed forward module - residual = src - if self.normalize_before: - src = self.norm_ff_macaron(src) - src = residual + self.ff_scale * self.dropout( - self.feed_forward_macaron(src) - ) - if not self.normalize_before: - src = self.norm_ff_macaron(src) - - # multi-headed self-attention module - residual = src - if self.normalize_before: - src = self.norm_mha(src) - src_att = self.self_attn( - src, - src, - src, - pos_emb=pos_emb, - attn_mask=src_mask, - key_padding_mask=src_key_padding_mask, - )[0] - src = residual + self.dropout(src_att) - if not self.normalize_before: - src = self.norm_mha(src) - - # convolution module - residual = src - if self.normalize_before: - src = self.norm_conv(src) - src = residual + self.dropout(self.conv_module(src)) - if not self.normalize_before: - src = self.norm_conv(src) - - # feed forward module - residual = src - if self.normalize_before: - src = self.norm_ff(src) - src = residual + self.ff_scale * self.dropout(self.feed_forward(src)) - if not self.normalize_before: - src = self.norm_ff(src) - - if self.normalize_before: - src = self.norm_final(src) - - return src - - -class ConformerEncoder(nn.TransformerEncoder): - r"""ConformerEncoder is a stack of N encoder layers - - Args: - encoder_layer: an instance of the ConformerEncoderLayer() class (required). - num_layers: the number of sub-encoder-layers in the encoder (required). - norm: the layer normalization component (optional). - - Examples:: - >>> encoder_layer = ConformerEncoderLayer(d_model=512, nhead=8) - >>> conformer_encoder = ConformerEncoder(encoder_layer, num_layers=6) - >>> src = torch.rand(10, 32, 512) - >>> pos_emb = torch.rand(32, 19, 512) - >>> out = conformer_encoder(src, pos_emb) - """ - - def __init__( - self, encoder_layer: nn.Module, num_layers: int, norm: nn.Module = None - ) -> None: - super(ConformerEncoder, self).__init__( - encoder_layer=encoder_layer, num_layers=num_layers, norm=norm - ) - - def forward( - self, - src: Tensor, - pos_emb: Tensor, - mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - ) -> Tensor: - r"""Pass the input through the encoder layers in turn. - - Args: - src: the sequence to the encoder (required). - pos_emb: Positional embedding tensor (required). - mask: the mask for the src sequence (optional). - src_key_padding_mask: the mask for the src keys per batch (optional). - - Shape: - src: (S, N, E). - pos_emb: (N, 2*S-1, E) - mask: (S, S). - src_key_padding_mask: (N, S). - S is the source sequence length, T is the target sequence length, N is the batch size, E is the feature number - - """ - output = src - - for mod in self.layers: - output = mod( - output, - pos_emb, - src_mask=mask, - src_key_padding_mask=src_key_padding_mask, - ) - - if self.norm is not None: - output = self.norm(output) - - return output - - -class RelPositionalEncoding(torch.nn.Module): - """Relative positional encoding module. - - See : Appendix B in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" - Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/embedding.py - - Args: - d_model: Embedding dimension. - dropout_rate: Dropout rate. - max_len: Maximum input length. - - """ - - def __init__( - self, d_model: int, dropout_rate: float, max_len: int = 5000 - ) -> None: - """Construct an PositionalEncoding object.""" - super(RelPositionalEncoding, self).__init__() - self.d_model = d_model - self.xscale = math.sqrt(self.d_model) - self.dropout = torch.nn.Dropout(p=dropout_rate) - self.pe = None - self.extend_pe(torch.tensor(0.0).expand(1, max_len)) - - def extend_pe(self, x: Tensor) -> None: - """Reset the positional encodings.""" - if self.pe is not None: - # self.pe contains both positive and negative parts - # the length of self.pe is 2 * input_len - 1 - if self.pe.size(1) >= x.size(1) * 2 - 1: - # Note: TorchScript doesn't implement operator== for torch.Device - if self.pe.dtype != x.dtype or str(self.pe.device) != str( - x.device - ): - self.pe = self.pe.to(dtype=x.dtype, device=x.device) - return - # Suppose `i` means to the position of query vecotr and `j` means the - # position of key vector. We use position relative positions when keys - # are to the left (i>j) and negative relative positions otherwise (i Tuple[Tensor, Tensor]: - """Add positional encoding. - - Args: - x (torch.Tensor): Input tensor (batch, time, `*`). - - Returns: - torch.Tensor: Encoded tensor (batch, time, `*`). - torch.Tensor: Encoded tensor (batch, 2*time-1, `*`). - - """ - self.extend_pe(x) - x = x * self.xscale - pos_emb = self.pe[ - :, - self.pe.size(1) // 2 - - x.size(1) - + 1 : self.pe.size(1) // 2 # noqa E203 - + x.size(1), - ] - return self.dropout(x), self.dropout(pos_emb) - - -class RelPositionMultiheadAttention(nn.Module): - r"""Multi-Head Attention layer with relative position encoding - - See reference: "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" - - Args: - embed_dim: total dimension of the model. - num_heads: parallel attention heads. - dropout: a Dropout layer on attn_output_weights. Default: 0.0. - - Examples:: - - >>> rel_pos_multihead_attn = RelPositionMultiheadAttention(embed_dim, num_heads) - >>> attn_output, attn_output_weights = multihead_attn(query, key, value, pos_emb) - """ - - def __init__( - self, - embed_dim: int, - num_heads: int, - dropout: float = 0.0, - ) -> None: - super(RelPositionMultiheadAttention, self).__init__() - self.embed_dim = embed_dim - self.num_heads = num_heads - self.dropout = dropout - self.head_dim = embed_dim // num_heads - assert ( - self.head_dim * num_heads == self.embed_dim - ), "embed_dim must be divisible by num_heads" - - self.in_proj = nn.Linear(embed_dim, 3 * embed_dim, bias=True) - self.out_proj = nn.Linear(embed_dim, embed_dim, bias=True) - - # linear transformation for positional encoding. - self.linear_pos = nn.Linear(embed_dim, embed_dim, bias=False) - # these two learnable bias are used in matrix c and matrix d - # as described in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" Section 3.3 - self.pos_bias_u = nn.Parameter(torch.Tensor(num_heads, self.head_dim)) - self.pos_bias_v = nn.Parameter(torch.Tensor(num_heads, self.head_dim)) - - self._reset_parameters() - - def _reset_parameters(self) -> None: - nn.init.xavier_uniform_(self.in_proj.weight) - nn.init.constant_(self.in_proj.bias, 0.0) - nn.init.constant_(self.out_proj.bias, 0.0) - - nn.init.xavier_uniform_(self.pos_bias_u) - nn.init.xavier_uniform_(self.pos_bias_v) - - def forward( - self, - query: Tensor, - key: Tensor, - value: Tensor, - pos_emb: Tensor, - key_padding_mask: Optional[Tensor] = None, - need_weights: bool = True, - attn_mask: Optional[Tensor] = None, - ) -> Tuple[Tensor, Optional[Tensor]]: - r""" - Args: - query, key, value: map a query and a set of key-value pairs to an output. - pos_emb: Positional embedding tensor - key_padding_mask: if provided, specified padding elements in the key will - be ignored by the attention. When given a binary mask and a value is True, - the corresponding value on the attention layer will be ignored. When given - a byte mask and a value is non-zero, the corresponding value on the attention - layer will be ignored - need_weights: output attn_output_weights. - attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all - the batches while a 3D mask allows to specify a different mask for the entries of each batch. - - Shape: - - Inputs: - - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is - the embedding dimension. - - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is - the embedding dimension. - - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is - the embedding dimension. - - pos_emb: :math:`(N, 2*L-1, E)` where L is the target sequence length, N is the batch size, E is - the embedding dimension. - - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. - If a ByteTensor is provided, the non-zero positions will be ignored while the position - with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the - value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. - - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. - 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, - S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked - positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend - while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` - is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor - is provided, it will be added to the attention weight. - - - Outputs: - - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, - E is the embedding dimension. - - attn_output_weights: :math:`(N, L, S)` where N is the batch size, - L is the target sequence length, S is the source sequence length. - """ - return self.multi_head_attention_forward( - query, - key, - value, - pos_emb, - self.embed_dim, - self.num_heads, - self.in_proj.weight, - self.in_proj.bias, - self.dropout, - self.out_proj.weight, - self.out_proj.bias, - training=self.training, - key_padding_mask=key_padding_mask, - need_weights=need_weights, - attn_mask=attn_mask, - ) - - def rel_shift(self, x: Tensor) -> Tensor: - """Compute relative positional encoding. - - Args: - x: Input tensor (batch, head, time1, 2*time1-1). - time1 means the length of query vector. - - Returns: - Tensor: tensor of shape (batch, head, time1, time2) - (note: time2 has the same value as time1, but it is for - the key, while time1 is for the query). - """ - (batch_size, num_heads, time1, n) = x.shape - assert n == 2 * time1 - 1 - # Note: TorchScript requires explicit arg for stride() - batch_stride = x.stride(0) - head_stride = x.stride(1) - time1_stride = x.stride(2) - n_stride = x.stride(3) - return x.as_strided( - (batch_size, num_heads, time1, time1), - (batch_stride, head_stride, time1_stride - n_stride, n_stride), - storage_offset=n_stride * (time1 - 1), - ) - - def multi_head_attention_forward( - self, - query: Tensor, - key: Tensor, - value: Tensor, - pos_emb: Tensor, - embed_dim_to_check: int, - num_heads: int, - in_proj_weight: Tensor, - in_proj_bias: Tensor, - dropout_p: float, - out_proj_weight: Tensor, - out_proj_bias: Tensor, - training: bool = True, - key_padding_mask: Optional[Tensor] = None, - need_weights: bool = True, - attn_mask: Optional[Tensor] = None, - ) -> Tuple[Tensor, Optional[Tensor]]: - r""" - Args: - query, key, value: map a query and a set of key-value pairs to an output. - pos_emb: Positional embedding tensor - embed_dim_to_check: total dimension of the model. - num_heads: parallel attention heads. - in_proj_weight, in_proj_bias: input projection weight and bias. - dropout_p: probability of an element to be zeroed. - out_proj_weight, out_proj_bias: the output projection weight and bias. - training: apply dropout if is ``True``. - key_padding_mask: if provided, specified padding elements in the key will - be ignored by the attention. This is an binary mask. When the value is True, - the corresponding value on the attention layer will be filled with -inf. - need_weights: output attn_output_weights. - attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all - the batches while a 3D mask allows to specify a different mask for the entries of each batch. - - Shape: - Inputs: - - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is - the embedding dimension. - - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is - the embedding dimension. - - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is - the embedding dimension. - - pos_emb: :math:`(N, 2*L-1, E)` or :math:`(1, 2*L-1, E)` where L is the target sequence - length, N is the batch size, E is the embedding dimension. - - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. - If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions - will be unchanged. If a BoolTensor is provided, the positions with the - value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. - - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. - 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, - S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked - positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend - while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` - are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor - is provided, it will be added to the attention weight. - - Outputs: - - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, - E is the embedding dimension. - - attn_output_weights: :math:`(N, L, S)` where N is the batch size, - L is the target sequence length, S is the source sequence length. - """ - - tgt_len, bsz, embed_dim = query.size() - assert embed_dim == embed_dim_to_check - assert key.size(0) == value.size(0) and key.size(1) == value.size(1) - - head_dim = embed_dim // num_heads - assert ( - head_dim * num_heads == embed_dim - ), "embed_dim must be divisible by num_heads" - scaling = float(head_dim) ** -0.5 - - if torch.equal(query, key) and torch.equal(key, value): - # self-attention - q, k, v = nn.functional.linear( - query, in_proj_weight, in_proj_bias - ).chunk(3, dim=-1) - - elif torch.equal(key, value): - # encoder-decoder attention - # This is inline in_proj function with in_proj_weight and in_proj_bias - _b = in_proj_bias - _start = 0 - _end = embed_dim - _w = in_proj_weight[_start:_end, :] - if _b is not None: - _b = _b[_start:_end] - q = nn.functional.linear(query, _w, _b) - # This is inline in_proj function with in_proj_weight and in_proj_bias - _b = in_proj_bias - _start = embed_dim - _end = None - _w = in_proj_weight[_start:, :] - if _b is not None: - _b = _b[_start:] - k, v = nn.functional.linear(key, _w, _b).chunk(2, dim=-1) - - else: - # This is inline in_proj function with in_proj_weight and in_proj_bias - _b = in_proj_bias - _start = 0 - _end = embed_dim - _w = in_proj_weight[_start:_end, :] - if _b is not None: - _b = _b[_start:_end] - q = nn.functional.linear(query, _w, _b) - - # This is inline in_proj function with in_proj_weight and in_proj_bias - _b = in_proj_bias - _start = embed_dim - _end = embed_dim * 2 - _w = in_proj_weight[_start:_end, :] - if _b is not None: - _b = _b[_start:_end] - k = nn.functional.linear(key, _w, _b) - - # This is inline in_proj function with in_proj_weight and in_proj_bias - _b = in_proj_bias - _start = embed_dim * 2 - _end = None - _w = in_proj_weight[_start:, :] - if _b is not None: - _b = _b[_start:] - v = nn.functional.linear(value, _w, _b) - - if attn_mask is not None: - assert ( - attn_mask.dtype == torch.float32 - or attn_mask.dtype == torch.float64 - or attn_mask.dtype == torch.float16 - or attn_mask.dtype == torch.uint8 - or attn_mask.dtype == torch.bool - ), "Only float, byte, and bool types are supported for attn_mask, not {}".format( - attn_mask.dtype - ) - if attn_mask.dtype == torch.uint8: - warnings.warn( - "Byte tensor for attn_mask is deprecated. Use bool tensor instead." - ) - attn_mask = attn_mask.to(torch.bool) - - if attn_mask.dim() == 2: - attn_mask = attn_mask.unsqueeze(0) - if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: - raise RuntimeError( - "The size of the 2D attn_mask is not correct." - ) - elif attn_mask.dim() == 3: - if list(attn_mask.size()) != [ - bsz * num_heads, - query.size(0), - key.size(0), - ]: - raise RuntimeError( - "The size of the 3D attn_mask is not correct." - ) - else: - raise RuntimeError( - "attn_mask's dimension {} is not supported".format( - attn_mask.dim() - ) - ) - # attn_mask's dim is 3 now. - - # convert ByteTensor key_padding_mask to bool - if ( - key_padding_mask is not None - and key_padding_mask.dtype == torch.uint8 - ): - warnings.warn( - "Byte tensor for key_padding_mask is deprecated. Use bool tensor instead." - ) - key_padding_mask = key_padding_mask.to(torch.bool) - - q = q.contiguous().view(tgt_len, bsz, num_heads, head_dim) - k = k.contiguous().view(-1, bsz, num_heads, head_dim) - v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) - - src_len = k.size(0) - - if key_padding_mask is not None: - assert key_padding_mask.size(0) == bsz, "{} == {}".format( - key_padding_mask.size(0), bsz - ) - assert key_padding_mask.size(1) == src_len, "{} == {}".format( - key_padding_mask.size(1), src_len - ) - - q = q.transpose(0, 1) # (batch, time1, head, d_k) - - pos_emb_bsz = pos_emb.size(0) - assert pos_emb_bsz in (1, bsz) # actually it is 1 - p = self.linear_pos(pos_emb).view(pos_emb_bsz, -1, num_heads, head_dim) - p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k) - - q_with_bias_u = (q + self.pos_bias_u).transpose( - 1, 2 - ) # (batch, head, time1, d_k) - - q_with_bias_v = (q + self.pos_bias_v).transpose( - 1, 2 - ) # (batch, head, time1, d_k) - - # compute attention score - # first compute matrix a and matrix c - # as described in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" Section 3.3 - k = k.permute(1, 2, 3, 0) # (batch, head, d_k, time2) - matrix_ac = torch.matmul( - q_with_bias_u, k - ) # (batch, head, time1, time2) - - # compute matrix b and matrix d - matrix_bd = torch.matmul( - q_with_bias_v, p.transpose(-2, -1) - ) # (batch, head, time1, 2*time1-1) - matrix_bd = self.rel_shift(matrix_bd) - - attn_output_weights = ( - matrix_ac + matrix_bd - ) * scaling # (batch, head, time1, time2) - - attn_output_weights = attn_output_weights.view( - bsz * num_heads, tgt_len, -1 - ) - - assert list(attn_output_weights.size()) == [ - bsz * num_heads, - tgt_len, - src_len, - ] - - if attn_mask is not None: - if attn_mask.dtype == torch.bool: - attn_output_weights.masked_fill_(attn_mask, float("-inf")) - else: - attn_output_weights += attn_mask - - if key_padding_mask is not None: - attn_output_weights = attn_output_weights.view( - bsz, num_heads, tgt_len, src_len - ) - attn_output_weights = attn_output_weights.masked_fill( - key_padding_mask.unsqueeze(1).unsqueeze(2), - float("-inf"), - ) - attn_output_weights = attn_output_weights.view( - bsz * num_heads, tgt_len, src_len - ) - - attn_output_weights = nn.functional.softmax(attn_output_weights, dim=-1) - attn_output_weights = nn.functional.dropout( - attn_output_weights, p=dropout_p, training=training - ) - - attn_output = torch.bmm(attn_output_weights, v) - assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] - attn_output = ( - attn_output.transpose(0, 1) - .contiguous() - .view(tgt_len, bsz, embed_dim) - ) - attn_output = nn.functional.linear( - attn_output, out_proj_weight, out_proj_bias - ) - - if need_weights: - # average attention weights over heads - attn_output_weights = attn_output_weights.view( - bsz, num_heads, tgt_len, src_len - ) - return attn_output, attn_output_weights.sum(dim=1) / num_heads - else: - return attn_output, None - - -class ConvolutionModule(nn.Module): - """ConvolutionModule in Conformer model. - Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/conformer/convolution.py - - Args: - channels (int): The number of channels of conv layers. - kernel_size (int): Kernerl size of conv layers. - bias (bool): Whether to use bias in conv layers (default=True). - - """ - - def __init__( - self, channels: int, kernel_size: int, bias: bool = True - ) -> None: - """Construct an ConvolutionModule object.""" - super(ConvolutionModule, self).__init__() - # kernerl_size should be a odd number for 'SAME' padding - assert (kernel_size - 1) % 2 == 0 - - self.pointwise_conv1 = nn.Conv1d( - channels, - 2 * channels, - kernel_size=1, - stride=1, - padding=0, - bias=bias, - ) - self.depthwise_conv = nn.Conv1d( - channels, - channels, - kernel_size, - stride=1, - padding=(kernel_size - 1) // 2, - groups=channels, - bias=bias, - ) - self.norm = nn.LayerNorm(channels) - self.pointwise_conv2 = nn.Conv1d( - channels, - channels, - kernel_size=1, - stride=1, - padding=0, - bias=bias, - ) - self.activation = Swish() - - def forward(self, x: Tensor) -> Tensor: - """Compute convolution module. - - Args: - x: Input tensor (#time, batch, channels). - - Returns: - Tensor: Output tensor (#time, batch, channels). - - """ - # exchange the temporal dimension and the feature dimension - x = x.permute(1, 2, 0) # (#batch, channels, time). - - # GLU mechanism - x = self.pointwise_conv1(x) # (batch, 2*channels, time) - x = nn.functional.glu(x, dim=1) # (batch, channels, time) - - # 1D Depthwise Conv - x = self.depthwise_conv(x) - # x is (batch, channels, time) - x = x.permute(0, 2, 1) - x = self.norm(x) - x = x.permute(0, 2, 1) - - x = self.activation(x) - - x = self.pointwise_conv2(x) # (batch, channel, time) - - return x.permute(2, 0, 1) - - -class Swish(torch.nn.Module): - """Construct an Swish object.""" - - def forward(self, x: Tensor) -> Tensor: - """Return Swich activation function.""" - return x * torch.sigmoid(x) - - -def identity(x): - return x diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/conformer.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/conformer.py new file mode 120000 index 000000000..70a7ddf11 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/conformer.py @@ -0,0 +1 @@ +../transducer_stateless/conformer.py \ No newline at end of file diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/decoder.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/decoder.py deleted file mode 100644 index b82fed37b..000000000 --- a/egs/librispeech/ASR/transducer_stateless_multi_datasets/decoder.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) -# -# See ../../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class Decoder(nn.Module): - """This class modifies the stateless decoder from the following paper: - - RNN-transducer with stateless prediction network - https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9054419 - - It removes the recurrent connection from the decoder, i.e., the prediction - network. Different from the above paper, it adds an extra Conv1d - right after the embedding layer. - - TODO: Implement https://arxiv.org/pdf/2109.07513.pdf - """ - - def __init__( - self, - vocab_size: int, - embedding_dim: int, - blank_id: int, - context_size: int, - ): - """ - Args: - vocab_size: - Number of tokens of the modeling unit including blank. - embedding_dim: - Dimension of the input embedding. - blank_id: - The ID of the blank symbol. - context_size: - Number of previous words to use to predict the next word. - 1 means bigram; 2 means trigram. n means (n+1)-gram. - """ - super().__init__() - self.embedding = nn.Embedding( - num_embeddings=vocab_size, - embedding_dim=embedding_dim, - padding_idx=blank_id, - ) - self.blank_id = blank_id - - assert context_size >= 1, context_size - self.context_size = context_size - if context_size > 1: - self.conv = nn.Conv1d( - in_channels=embedding_dim, - out_channels=embedding_dim, - kernel_size=context_size, - padding=0, - groups=embedding_dim, - bias=False, - ) - - def forward(self, y: torch.Tensor, need_pad: bool = True) -> torch.Tensor: - """ - Args: - y: - A 2-D tensor of shape (N, U). - need_pad: - True to left pad the input. Should be True during training. - False to not pad the input. Should be False during inference. - Returns: - Return a tensor of shape (N, U, embedding_dim). - """ - embedding_out = self.embedding(y) - if self.context_size > 1: - embedding_out = embedding_out.permute(0, 2, 1) - if need_pad is True: - embedding_out = F.pad( - embedding_out, pad=(self.context_size - 1, 0) - ) - else: - # During inference time, there is no need to do extra padding - # as we only need one output - assert embedding_out.size(-1) == self.context_size - embedding_out = self.conv(embedding_out) - embedding_out = embedding_out.permute(0, 2, 1) - return embedding_out diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/decoder.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/decoder.py new file mode 120000 index 000000000..eada91097 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/decoder.py @@ -0,0 +1 @@ +../transducer_stateless/decoder.py \ No newline at end of file diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/encoder_interface.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/encoder_interface.py deleted file mode 100644 index 257facce4..000000000 --- a/egs/librispeech/ASR/transducer_stateless_multi_datasets/encoder_interface.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) -# -# See ../../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Tuple - -import torch -import torch.nn as nn - - -class EncoderInterface(nn.Module): - def forward( - self, x: torch.Tensor, x_lens: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Args: - x: - A tensor of shape (batch_size, input_seq_len, num_features) - containing the input features. - x_lens: - A tensor of shape (batch_size,) containing the number of frames - in `x` before padding. - Returns: - Return a tuple containing two tensors: - - encoder_out, a tensor of (batch_size, out_seq_len, output_dim) - containing unnormalized probabilities, i.e., the output of a - linear layer. - - encoder_out_lens, a tensor of shape (batch_size,) containing - the number of frames in `encoder_out` before padding. - """ - raise NotImplementedError("Please implement it in a subclass") diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/encoder_interface.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/encoder_interface.py new file mode 120000 index 000000000..aa5d0217a --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/encoder_interface.py @@ -0,0 +1 @@ +../transducer_stateless/encoder_interface.py \ No newline at end of file diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/joiner.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/joiner.py deleted file mode 100644 index 9fd9da4f1..000000000 --- a/egs/librispeech/ASR/transducer_stateless_multi_datasets/joiner.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) -# -# See ../../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch -import torch.nn as nn - - -class Joiner(nn.Module): - def __init__(self, input_dim: int, output_dim: int): - super().__init__() - - self.input_dim = input_dim - self.output_dim = output_dim - self.output_linear = nn.Linear(input_dim, output_dim) - - def forward( - self, - encoder_out: torch.Tensor, - decoder_out: torch.Tensor, - encoder_out_len: torch.Tensor, - decoder_out_len: torch.Tensor, - ) -> torch.Tensor: - """ - Args: - encoder_out: - Output from the encoder. Its shape is (N, T, self.input_dim). - decoder_out: - Output from the decoder. Its shape is (N, U, self.input_dim). - Returns: - Return a tensor of shape (sum_all_TU, self.output_dim). - """ - assert encoder_out.ndim == decoder_out.ndim == 3 - assert encoder_out.size(0) == decoder_out.size(0) - assert encoder_out.size(2) == self.input_dim - assert decoder_out.size(2) == self.input_dim - - N = encoder_out.size(0) - - encoder_out_list = [ - encoder_out[i, : encoder_out_len[i], :] for i in range(N) - ] - - decoder_out_list = [ - decoder_out[i, : decoder_out_len[i], :] for i in range(N) - ] - - x = [ - e.unsqueeze(1) + d.unsqueeze(0) - for e, d in zip(encoder_out_list, decoder_out_list) - ] - - x = [p.reshape(-1, self.input_dim) for p in x] - x = torch.cat(x) - - activations = torch.tanh(x) - - logits = self.output_linear(activations) - - return logits diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/joiner.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/joiner.py new file mode 120000 index 000000000..cfc14f0a9 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/joiner.py @@ -0,0 +1 @@ +../transducer_stateless/joiner.py \ No newline at end of file diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/transformer.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/transformer.py deleted file mode 100644 index e851dcc32..000000000 --- a/egs/librispeech/ASR/transducer_stateless_multi_datasets/transformer.py +++ /dev/null @@ -1,418 +0,0 @@ -# Copyright 2021 University of Chinese Academy of Sciences (author: Han Zhu) -# -# See ../../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import math -from typing import Optional, Tuple - -import torch -import torch.nn as nn -from encoder_interface import EncoderInterface -from subsampling import Conv2dSubsampling, VggSubsampling - -from icefall.utils import make_pad_mask - - -class Transformer(EncoderInterface): - def __init__( - self, - num_features: int, - output_dim: int, - subsampling_factor: int = 4, - d_model: int = 256, - nhead: int = 4, - dim_feedforward: int = 2048, - num_encoder_layers: int = 12, - dropout: float = 0.1, - normalize_before: bool = True, - vgg_frontend: bool = False, - ) -> None: - """ - Args: - num_features: - The input dimension of the model. - output_dim: - The output dimension of the model. - subsampling_factor: - Number of output frames is num_in_frames // subsampling_factor. - Currently, subsampling_factor MUST be 4. - d_model: - Attention dimension. - nhead: - Number of heads in multi-head attention. - Must satisfy d_model // nhead == 0. - dim_feedforward: - The output dimension of the feedforward layers in encoder. - num_encoder_layers: - Number of encoder layers. - dropout: - Dropout in encoder. - normalize_before: - If True, use pre-layer norm; False to use post-layer norm. - vgg_frontend: - True to use vgg style frontend for subsampling. - """ - super().__init__() - - self.num_features = num_features - self.output_dim = output_dim - self.subsampling_factor = subsampling_factor - if subsampling_factor != 4: - raise NotImplementedError("Support only 'subsampling_factor=4'.") - - # self.encoder_embed converts the input of shape (N, T, num_features) - # to the shape (N, T//subsampling_factor, d_model). - # That is, it does two things simultaneously: - # (1) subsampling: T -> T//subsampling_factor - # (2) embedding: num_features -> d_model - if vgg_frontend: - self.encoder_embed = VggSubsampling(num_features, d_model) - else: - self.encoder_embed = Conv2dSubsampling(num_features, d_model) - - self.encoder_pos = PositionalEncoding(d_model, dropout) - - encoder_layer = TransformerEncoderLayer( - d_model=d_model, - nhead=nhead, - dim_feedforward=dim_feedforward, - dropout=dropout, - normalize_before=normalize_before, - ) - - if normalize_before: - encoder_norm = nn.LayerNorm(d_model) - else: - encoder_norm = None - - self.encoder = nn.TransformerEncoder( - encoder_layer=encoder_layer, - num_layers=num_encoder_layers, - norm=encoder_norm, - ) - - # TODO(fangjun): remove dropout - self.encoder_output_layer = nn.Sequential( - nn.Dropout(p=dropout), nn.Linear(d_model, output_dim) - ) - - def forward( - self, x: torch.Tensor, x_lens: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Args: - x: - The input tensor. Its shape is (batch_size, seq_len, feature_dim). - x_lens: - A tensor of shape (batch_size,) containing the number of frames in - `x` before padding. - Returns: - Return a tuple containing 2 tensors: - - logits, its shape is (batch_size, output_seq_len, output_dim) - - logit_lens, a tensor of shape (batch_size,) containing the number - of frames in `logits` before padding. - """ - x = self.encoder_embed(x) - x = self.encoder_pos(x) - x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C) - - # Caution: We assume the subsampling factor is 4! - lengths = ((x_lens - 1) // 2 - 1) // 2 - assert x.size(0) == lengths.max().item() - - mask = make_pad_mask(lengths) - x = self.encoder(x, src_key_padding_mask=mask) # (T, N, C) - - logits = self.encoder_output_layer(x) - logits = logits.permute(1, 0, 2) # (T, N, C) ->(N, T, C) - - return logits, lengths - - -class TransformerEncoderLayer(nn.Module): - """ - Modified from torch.nn.TransformerEncoderLayer. - Add support of normalize_before, - i.e., use layer_norm before the first block. - - Args: - d_model: - the number of expected features in the input (required). - nhead: - the number of heads in the multiheadattention models (required). - dim_feedforward: - the dimension of the feedforward network model (default=2048). - dropout: - the dropout value (default=0.1). - activation: - the activation function of intermediate layer, relu or - gelu (default=relu). - normalize_before: - whether to use layer_norm before the first block. - - Examples:: - >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8) - >>> src = torch.rand(10, 32, 512) - >>> out = encoder_layer(src) - """ - - def __init__( - self, - d_model: int, - nhead: int, - dim_feedforward: int = 2048, - dropout: float = 0.1, - activation: str = "relu", - normalize_before: bool = True, - ) -> None: - super(TransformerEncoderLayer, self).__init__() - self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0) - # Implementation of Feedforward model - self.linear1 = nn.Linear(d_model, dim_feedforward) - self.dropout = nn.Dropout(dropout) - self.linear2 = nn.Linear(dim_feedforward, d_model) - - self.norm1 = nn.LayerNorm(d_model) - self.norm2 = nn.LayerNorm(d_model) - self.dropout1 = nn.Dropout(dropout) - self.dropout2 = nn.Dropout(dropout) - - self.activation = _get_activation_fn(activation) - - self.normalize_before = normalize_before - - def __setstate__(self, state): - if "activation" not in state: - state["activation"] = nn.functional.relu - super(TransformerEncoderLayer, self).__setstate__(state) - - def forward( - self, - src: torch.Tensor, - src_mask: Optional[torch.Tensor] = None, - src_key_padding_mask: Optional[torch.Tensor] = None, - ) -> torch.Tensor: - """ - Pass the input through the encoder layer. - - Args: - src: the sequence to the encoder layer (required). - src_mask: the mask for the src sequence (optional). - src_key_padding_mask: the mask for the src keys per batch (optional) - - Shape: - src: (S, N, E). - src_mask: (S, S). - src_key_padding_mask: (N, S). - S is the source sequence length, T is the target sequence length, - N is the batch size, E is the feature number - """ - residual = src - if self.normalize_before: - src = self.norm1(src) - src2 = self.self_attn( - src, - src, - src, - attn_mask=src_mask, - key_padding_mask=src_key_padding_mask, - )[0] - src = residual + self.dropout1(src2) - if not self.normalize_before: - src = self.norm1(src) - - residual = src - if self.normalize_before: - src = self.norm2(src) - src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) - src = residual + self.dropout2(src2) - if not self.normalize_before: - src = self.norm2(src) - return src - - -def _get_activation_fn(activation: str): - if activation == "relu": - return nn.functional.relu - elif activation == "gelu": - return nn.functional.gelu - - raise RuntimeError( - "activation should be relu/gelu, not {}".format(activation) - ) - - -class PositionalEncoding(nn.Module): - """This class implements the positional encoding - proposed in the following paper: - - - Attention Is All You Need: https://arxiv.org/pdf/1706.03762.pdf - - PE(pos, 2i) = sin(pos / (10000^(2i/d_modle)) - PE(pos, 2i+1) = cos(pos / (10000^(2i/d_modle)) - - Note:: - - 1 / (10000^(2i/d_model)) = exp(-log(10000^(2i/d_model))) - = exp(-1* 2i / d_model * log(100000)) - = exp(2i * -(log(10000) / d_model)) - """ - - def __init__(self, d_model: int, dropout: float = 0.1) -> None: - """ - Args: - d_model: - Embedding dimension. - dropout: - Dropout probability to be applied to the output of this module. - """ - super().__init__() - self.d_model = d_model - self.xscale = math.sqrt(self.d_model) - self.dropout = nn.Dropout(p=dropout) - # not doing: self.pe = None because of errors thrown by torchscript - self.pe = torch.zeros(1, 0, self.d_model, dtype=torch.float32) - - def extend_pe(self, x: torch.Tensor) -> None: - """Extend the time t in the positional encoding if required. - - The shape of `self.pe` is (1, T1, d_model). The shape of the input x - is (N, T, d_model). If T > T1, then we change the shape of self.pe - to (N, T, d_model). Otherwise, nothing is done. - - Args: - x: - It is a tensor of shape (N, T, C). - Returns: - Return None. - """ - if self.pe is not None: - if self.pe.size(1) >= x.size(1): - self.pe = self.pe.to(dtype=x.dtype, device=x.device) - return - pe = torch.zeros(x.size(1), self.d_model, dtype=torch.float32) - position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1) - div_term = torch.exp( - torch.arange(0, self.d_model, 2, dtype=torch.float32) - * -(math.log(10000.0) / self.d_model) - ) - pe[:, 0::2] = torch.sin(position * div_term) - pe[:, 1::2] = torch.cos(position * div_term) - pe = pe.unsqueeze(0) - # Now pe is of shape (1, T, d_model), where T is x.size(1) - self.pe = pe.to(device=x.device, dtype=x.dtype) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - """ - Add positional encoding. - - Args: - x: - Its shape is (N, T, C) - - Returns: - Return a tensor of shape (N, T, C) - """ - self.extend_pe(x) - x = x * self.xscale + self.pe[:, : x.size(1), :] - return self.dropout(x) - - -class Noam(object): - """ - Implements Noam optimizer. - - Proposed in - "Attention Is All You Need", https://arxiv.org/pdf/1706.03762.pdf - - Modified from - https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/optimizer.py # noqa - - Args: - params: - iterable of parameters to optimize or dicts defining parameter groups - model_size: - attention dimension of the transformer model - factor: - learning rate factor - warm_step: - warmup steps - """ - - def __init__( - self, - params, - model_size: int = 256, - factor: float = 10.0, - warm_step: int = 25000, - weight_decay=0, - ) -> None: - """Construct an Noam object.""" - self.optimizer = torch.optim.Adam( - params, lr=0, betas=(0.9, 0.98), eps=1e-9, weight_decay=weight_decay - ) - self._step = 0 - self.warmup = warm_step - self.factor = factor - self.model_size = model_size - self._rate = 0 - - @property - def param_groups(self): - """Return param_groups.""" - return self.optimizer.param_groups - - def step(self): - """Update parameters and rate.""" - self._step += 1 - rate = self.rate() - for p in self.optimizer.param_groups: - p["lr"] = rate - self._rate = rate - self.optimizer.step() - - def rate(self, step=None): - """Implement `lrate` above.""" - if step is None: - step = self._step - return ( - self.factor - * self.model_size ** (-0.5) - * min(step ** (-0.5), step * self.warmup ** (-1.5)) - ) - - def zero_grad(self): - """Reset gradient.""" - self.optimizer.zero_grad() - - def state_dict(self): - """Return state_dict.""" - return { - "_step": self._step, - "warmup": self.warmup, - "factor": self.factor, - "model_size": self.model_size, - "_rate": self._rate, - "optimizer": self.optimizer.state_dict(), - } - - def load_state_dict(self, state_dict): - """Load state_dict.""" - for key, value in state_dict.items(): - if key == "optimizer": - self.optimizer.load_state_dict(state_dict["optimizer"]) - else: - setattr(self, key, value) diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/transformer.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/transformer.py new file mode 120000 index 000000000..e43f520f9 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/transformer.py @@ -0,0 +1 @@ +../transducer_stateless/transformer.py \ No newline at end of file From 3ec219dfa0062a4a41765fbde7a3e2a6041f2ec0 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Thu, 3 Mar 2022 22:33:47 +0800 Subject: [PATCH 17/25] Add stateless transducer tutorial. (#235) * WIP: Add stateless transducer tutorial. * Add more doc. * Minor fixes. --- docs/source/conf.py | 3 + docs/source/installation/images/README.md | 4 + .../images/k2-gt-v1.9-blueviolet.svg | 1 + .../images/k2-v1.9-blueviolet.svg | 1 - .../images/python-3.6_3.7_3.8_3.9-blue.svg | 1 - .../images/python-gt-v3.6-blue.svg | 1 + ....0_1.7.0_1.7.1_1.8.0_1.8.1_1.9.0-green.svg | 1 - .../images/torch-gt-v1.6.0-green.svg | 1 + docs/source/installation/index.rst | 6 +- docs/source/recipes/aishell.rst | 10 - ...cer_stateless_modified-tensorboard-log.png | Bin 0 -> 451920 bytes docs/source/recipes/aishell/index.rst | 22 + .../recipes/aishell/stateless_transducer.rst | 714 ++++++++++++++++++ docs/source/recipes/index.rst | 14 +- docs/source/recipes/librispeech.rst | 10 - docs/source/recipes/librispeech/index.rst | 8 + docs/source/recipes/timit.rst | 10 - docs/source/recipes/timit/index.rst | 9 + docs/source/recipes/timit/tdnn_ligru_ctc.rst | 2 +- .../images/tdnn-tensorboard-log.png} | Bin docs/source/recipes/yesno/index.rst | 7 + .../recipes/{yesno.rst => yesno/tdnn.rst} | 6 +- egs/aishell/ASR/README.md | 2 +- egs/librispeech/ASR/README.md | 2 +- egs/timit/ASR/README.md | 4 +- egs/yesno/ASR/README.md | 2 +- 26 files changed, 788 insertions(+), 53 deletions(-) create mode 100644 docs/source/installation/images/README.md create mode 100644 docs/source/installation/images/k2-gt-v1.9-blueviolet.svg delete mode 100644 docs/source/installation/images/k2-v1.9-blueviolet.svg delete mode 100644 docs/source/installation/images/python-3.6_3.7_3.8_3.9-blue.svg create mode 100644 docs/source/installation/images/python-gt-v3.6-blue.svg delete mode 100644 docs/source/installation/images/torch-1.6.0_1.7.0_1.7.1_1.8.0_1.8.1_1.9.0-green.svg create mode 100644 docs/source/installation/images/torch-gt-v1.6.0-green.svg delete mode 100644 docs/source/recipes/aishell.rst create mode 100644 docs/source/recipes/aishell/images/aishell-transducer_stateless_modified-tensorboard-log.png create mode 100644 docs/source/recipes/aishell/index.rst create mode 100644 docs/source/recipes/aishell/stateless_transducer.rst delete mode 100644 docs/source/recipes/librispeech.rst create mode 100644 docs/source/recipes/librispeech/index.rst delete mode 100644 docs/source/recipes/timit.rst create mode 100644 docs/source/recipes/timit/index.rst rename docs/source/recipes/{images/yesno-tdnn-tensorboard-log.png => yesno/images/tdnn-tensorboard-log.png} (100%) create mode 100644 docs/source/recipes/yesno/index.rst rename docs/source/recipes/{yesno.rst => yesno/tdnn.rst} (99%) diff --git a/docs/source/conf.py b/docs/source/conf.py index 599df8b3e..88522ff27 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -33,6 +33,7 @@ release = "0.1" # ones. extensions = [ "sphinx_rtd_theme", + "sphinx.ext.todo", ] # Add any paths that contain templates here, relative to this directory. @@ -74,3 +75,5 @@ html_context = { "github_version": "master", "conf_py_path": "/icefall/docs/source/", } + +todo_include_todos = True diff --git a/docs/source/installation/images/README.md b/docs/source/installation/images/README.md new file mode 100644 index 000000000..97c1e993c --- /dev/null +++ b/docs/source/installation/images/README.md @@ -0,0 +1,4 @@ + +# Introduction + + is used to generate files in this directory. diff --git a/docs/source/installation/images/k2-gt-v1.9-blueviolet.svg b/docs/source/installation/images/k2-gt-v1.9-blueviolet.svg new file mode 100644 index 000000000..534b2e534 --- /dev/null +++ b/docs/source/installation/images/k2-gt-v1.9-blueviolet.svg @@ -0,0 +1 @@ +k2: >= v1.9k2>= v1.9 \ No newline at end of file diff --git a/docs/source/installation/images/k2-v1.9-blueviolet.svg b/docs/source/installation/images/k2-v1.9-blueviolet.svg deleted file mode 100644 index 5a207b370..000000000 --- a/docs/source/installation/images/k2-v1.9-blueviolet.svg +++ /dev/null @@ -1 +0,0 @@ -k2: v1.9k2v1.9 \ No newline at end of file diff --git a/docs/source/installation/images/python-3.6_3.7_3.8_3.9-blue.svg b/docs/source/installation/images/python-3.6_3.7_3.8_3.9-blue.svg deleted file mode 100644 index befc1e19e..000000000 --- a/docs/source/installation/images/python-3.6_3.7_3.8_3.9-blue.svg +++ /dev/null @@ -1 +0,0 @@ -python: 3.6 | 3.7 | 3.8 | 3.9python3.6 | 3.7 | 3.8 | 3.9 diff --git a/docs/source/installation/images/python-gt-v3.6-blue.svg b/docs/source/installation/images/python-gt-v3.6-blue.svg new file mode 100644 index 000000000..4254dc58a --- /dev/null +++ b/docs/source/installation/images/python-gt-v3.6-blue.svg @@ -0,0 +1 @@ +python: >= 3.6python>= 3.6 \ No newline at end of file diff --git a/docs/source/installation/images/torch-1.6.0_1.7.0_1.7.1_1.8.0_1.8.1_1.9.0-green.svg b/docs/source/installation/images/torch-1.6.0_1.7.0_1.7.1_1.8.0_1.8.1_1.9.0-green.svg deleted file mode 100644 index 496e5a9ef..000000000 --- a/docs/source/installation/images/torch-1.6.0_1.7.0_1.7.1_1.8.0_1.8.1_1.9.0-green.svg +++ /dev/null @@ -1 +0,0 @@ -torch: 1.6.0 | 1.7.0 | 1.7.1 | 1.8.0 | 1.8.1 | 1.9.0torch1.6.0 | 1.7.0 | 1.7.1 | 1.8.0 | 1.8.1 | 1.9.0 diff --git a/docs/source/installation/images/torch-gt-v1.6.0-green.svg b/docs/source/installation/images/torch-gt-v1.6.0-green.svg new file mode 100644 index 000000000..d3ece9a17 --- /dev/null +++ b/docs/source/installation/images/torch-gt-v1.6.0-green.svg @@ -0,0 +1 @@ +torch: >= 1.6.0torch>= 1.6.0 \ No newline at end of file diff --git a/docs/source/installation/index.rst b/docs/source/installation/index.rst index 0f846c77c..a8c3b6865 100644 --- a/docs/source/installation/index.rst +++ b/docs/source/installation/index.rst @@ -15,13 +15,13 @@ Installation .. |device| image:: ./images/device-CPU_CUDA-orange.svg :alt: Supported devices -.. |python_versions| image:: ./images/python-3.6_3.7_3.8_3.9-blue.svg +.. |python_versions| image:: ./images/python-gt-v3.6-blue.svg :alt: Supported python versions -.. |torch_versions| image:: ./images/torch-1.6.0_1.7.0_1.7.1_1.8.0_1.8.1_1.9.0-green.svg +.. |torch_versions| image:: ./images/torch-gt-v1.6.0-green.svg :alt: Supported PyTorch versions -.. |k2_versions| image:: ./images/k2-v1.9-blueviolet.svg +.. |k2_versions| image:: ./images/k2-gt-v1.9-blueviolet.svg :alt: Supported k2 versions ``icefall`` depends on `k2 `_ and diff --git a/docs/source/recipes/aishell.rst b/docs/source/recipes/aishell.rst deleted file mode 100644 index 71ccaa1fc..000000000 --- a/docs/source/recipes/aishell.rst +++ /dev/null @@ -1,10 +0,0 @@ -Aishell -======= - -We provide the following models for the Aishell dataset: - -.. toctree:: - :maxdepth: 2 - - aishell/conformer_ctc - aishell/tdnn_lstm_ctc diff --git a/docs/source/recipes/aishell/images/aishell-transducer_stateless_modified-tensorboard-log.png b/docs/source/recipes/aishell/images/aishell-transducer_stateless_modified-tensorboard-log.png new file mode 100644 index 0000000000000000000000000000000000000000..6c84b28f2f667b24a4968c8992ab24c831df2bac GIT binary patch literal 451920 zcmbrmWmFu?)&NQf5G1$-_u%eMa0%}2?(PtRy9L+5-Cct_1ed{Gf)1{4a_;@)z4zlg z>)c-5bWL^b+FiT1?^=W_$Vni=L?x9Ti+ z#>oJ|ljV~}M>G$eE2Kpg8w+b#SX5XqsSEKgM<4yK+3WkB79#QNL~Ap|yT zwR|jkNrDwy(H*6VG(}&|;>`y7%d8L|vP4U&cp<1%E26$C0rQI>p|KZ`nMm5<9{`GR zvLvB0Amcpt#|j%ME@YNf`ooV>B#erz?(veS$+?%UUj*0%+`{VjbYjfx2=oP7sz$qH z(^C4U@4HE`E<>KX8L%jego+@)DMpfyFvm5L;89(n<;`)Vs@A6Bv6_&n50-B?2aID$ zkqOVe)<)TQdUOoxspWne#iqrS@nFWSqQjsRFFOf5#k%;g!})!cIKe3)7X=ochy54|j^_#-zhGL}9lL-~{_HW_n} zqHL$1)%js(d+u^mj2PG93zfk;3L8xWnqlaK+w4KYdl`N-q?h<@p}Iw=?Dl|2mDW;xSmrOR8PD}*jFbG1_R3FBAaBB!{ zV1PdvYOsHQh~8x4Lh^fe@KekIb2a3t7=iCY=O*Hz1WHiQFH{(#C=KGKFxKsuns_1d zkKy9m@oePiBVP)_(9)uP0)LRv#@`I96~H{8atGl{OcE0e%kB!yL_df72r>T%!Ob-* zA>e^A&BtehmldT1a38WYe9{eL5Tl*8azS4CMkp}_P@k5&=3WVX_Z7j|C^;2jaIi&# zE;+{3*vW=LEkxB|UV~uaeZ&40!-Q6uvBd;gaNnJ1=7B#UqU$CSO7K-CRo*AE%fk zWgxL5UqIImBj^w97eEU)`Reo;I?hPSM~S5LM?*|Qs7sVf_?(!R!~&^y+}g<2u1nny z9pNhC^Up*S`lNy+cgil*O&EgGbYhZeQkvq+a+XCpxh{&2rCuuDRuJs*=`y-Pl?4lm z@=E6l`j(cKnwAe%vcD26CKk))R*GGvcsjkpXA8LHJEb{=Ih9#qbfMyD;=#`v;Thn;Qa4jd&GRbdTn$Af1P}Iy1<)%r;<}(UrbcQIHzCIE&CSqDgrZD zW9EPZImj7x8kHEu@IEh~^W7BjV(zLz^_TW9)qpNQv0>fT$3B`qvXHE=QIPB`8q^2YT<6p_3$;QObh&;zI*j-toIkQaLCayaybJAKhgVM%rvXW2wP@&@wVJeVwU@OiG?O&u zwbvSpwCXGCE7i4|weD6lmZF-|Y?ZB@$C8c4nlOP?jr^uh3zO>%182PL;_bz^;7D_7hu9-)2WU3cCqA z|2E!Mgw^uZYL8$1z5M&0Y@Qw0F1Hw-c2^x+0~fgGH(BcCiHE9xxr|{Tn3f}5<+QzKnQe)65Qf)a^m*pVieX9*(LcPvhMWK;@mn;?@TZ2 zI81KfTfR4bKj<~svNCL@5^7MoCg&ygHocwA{FbT8y|zv47~CS!n^cj~F&;g>miffN z$YF23F61Ze=liaexPCKEML(U=Hp5E5b#M+Tt-eBKfqTM!CEc>~is}IgIxn@rf`;uI z(fL6V(x`pXT_j{=S;V@me72uK$oj{lC=hFw8AJ9%0awR;sOCLgPsPG=ay|BVmtZKX z`H7B+-NNdE=K=_8@+6&z*?$r8_?r31!UWc&{{00o%-&k2QD(1j zu^aSe2O{-}H&J{tiXAw?@@GI#&Z2jxucmry5v|$%eP_3!?k8Bo=56`Ja&DTDjGiKs zqOXnGV6-URnKVF4RE67JX2@>sx_rE7T~xuXyro&|N$*!D3mmz2qZ_`@KZe%UesOvhwvE7ts8{152FL?9* zDKwqnGw-Oo&kFV0ax1N$x^}d@?1bIy+L+a}mZ|Msb`(GMUC5bjnR6*nYpQ~APNHYe>nUDj zZNe%ZBM*~h#pCHPF^_f3U;`59N06iw3boxB2K_C2Y9&ZfV-M#yua}Z>)eHl62qfWWJs1Ii@#{en* zt-b9|XIA6$G1gLxb~;ln>z*PWJ4X}G8I1OrYscOEZ%A8m%ergrn0H^#Rp0jSwS9}- z58|HycT<9r9u7}{%`K2bgalDLmOznUm50$~!G7t}Y~R|}8g`DX&%%w)ZAW+UwR`6V zXNQBw8TgBKljB!g8E>!Ti316+6wlf`?+jS#%r_bdDOnLk_cHq`*Gi(?}S3z99Ga z@2kflh}qv?NC`0g>UK2PMCZ#l;(TY3RwQ6u76dy5qYbIzGB3$Rh;F zBm_jLB?Q#J$H;;2e~vit^#|r(_xA}Q5Ffz5(7=~RF64iWhL6a7|6gqw0q`@3FUlfP zQsBF?v6HE(owJ3#OZH@}0k{LsK~mEh0s@QT&-G49iTnaQ|B|JOhKq*mR~}<~TLwcD zdm~c@4_gQDXb1>C4<2yS*3`w2*u&Pw&Y8!9pY*R0JmB`9Zbnk#zlOM2^OI`GDiDj< zJDC!*Gq5o*kqW>Q6BF|}nV9h?iHiRl9Q=!))WXHZfrpXN-QAtRot44f$()gyo12@F ziG`7cg&sVD-r3X6#n6M^&YA3All<2_qNdKqPL>WXmiBhUf95qbvUhdiCnf!Z=)aGD zy{D;%<^N!^bN=_Tzzbyj^MsL^fr;_I=LUoF{psaVu=Fss(Gaz?1=9?SLx6>Yn~U!+ zfdAj4|3UdLP*rDBClPyFFr~GlAtm}n#pB)YtPkJFRa;-Wv(K_KIT>*<6zam@CB;!tF%(eu0C~Cc*Y^F_DhgjP z28vUA%!R&)im|}=LMD764~2F!G@SkYd-v$*DDCXk|G;e{U3-4s(mXjUjo-bwxXfc2w;HS$VVED){s2N~V`j`aIO;(6YEmARK`omKE^K`J2th2yBC$f#ozq#WhCcIef z+(-nu%zU-Va4W!QS{leG(LBfVqkQavTnO?Xo;B2P7Kaag^*ezl`F8mblTl~#}LPEx^;XfDgP-; z5(lWK3kBqt0N+K3QCcWa@$`H*{)e;{f@BR8Qj}0^elq+%jfg3iT&_O(&P60sY^?F9 z%DVK@$(K=;G=R?GgOcztQ!N#bc9~pl6195qj~Zu@puD1-0Ul*B zzg9o>X^hB(f2yzIeu`5X2u~G%a28gitNiilDhVwz87*S}QKL$fr1{tyqbXk7%|pOh z>Yu7wNrN`j3+{-vta3Fbt+Eba6|Y{I9Jk>?2v=mX_3Wv3?WK}0gIef+#PqPD$^Hkw zY|K15gTkp~!b$s9#blBgo16-dgKkD_i?wPkT5ngIWTI1!olCtkCH|>&2|-qqzmunm z2o8ax24mrD)NoOT6GJR#(x~3$95Rjn$KOB*2QuFDfFOP1^TJaEG<#fz5*_aE$m)C^ zu(FJUE&k`UCzc?t`zkVJ&VKE~6b#1}2`66MtJ$CdJXF`vUwu_0!=hhI2HH{V6h7)o z`6iS6bE%3J`w1(?DsnQPR_}mHq6yejV5P!Bw61o5d(BjG?R_+|Rwu&{%Tlsk<|xNW zK6Yn6t)ctp%IGUa!0^>MVY)1`99IO4Sgc{*r~wWT5KWDRcoM415P}ie#>OT0&p&fn zwa;ru?=R375{pzxR*k{lRTZOYS$d3tM{HxGWe3TBJ~Nns7_WUC0AHq!3yr386);X= zfba~oT)GuL8bqsY|JjSzw4%ehVl*mz|31M4Fsu1^3^R~tX!@Ha0x@zP?j+DQUkgBW zoMbiCqd zDt@3Hd!NzQGRr{I{{T@Pl#j3?8`kH{kMKl)VJZGhi$%X)r7OHu17468Y?ZuW`TQQn ziMwlM4cmXfR53!ktstZkvruoJ${;w(!0jP}v`rs2E=@p*C0R?Zk9?#_M(ccpm`r1m zzRr9g6QL?+FZ@3?9^_1Lkm6M7ILGn1HOyc6G^@Tk2t2ZD)>>#}w7NxByEY0>JDp;e)1J5&Z(i-<0?OD_E2LGjd}f z0qIe2yHt^7fTV0g`Z1l%A4!;v75Yri-e*nGw$B{Ss87niHubezpibgnq``|8vf`sH zbx|8+)x?=4d4*Q4_)L1=pO0EPFRwC`<^)AL*5z#B=Ht*gL`LQCQAD-us{A*(eJ0%xjpIWLldk!=z zb)ON2BUS$Tu)mXm09i!|7x$BnA}+e0e66FA{NR*UM4QNXfu-%y&{E?nTC;6XhyIT{ zzVr-x9bK@uoQ=BdKg3b0=y+K6s)+MRRz{?9C#G8Xmkr0k=k%wH9Vo>7>5n`{gHdg< zMXEYH-Gype4bd(%WK-3I@nSr><-M~L)HnbOGY?V@pI5ePi6Z zJO!YYtZlUD91ojXHK=k)KG-Xb*%g^!Oj>*(7Y8$R1?rM2qEJ;wD}Sew=96u8WZKn< zm-5VXizipKcyhM4BuB<@a5m%Kqh*@bw@)y^adv5&e9G%jW=@qsNh1?*Z`u_d?K;i- zMQ)h%3;NBm_ZNh9$%9}lkgd0W4+tZ2S~cq32Ej)NgAx6f7592KDW>C<5<6@ zC_VBsTe(Xk_!qCF5;Il7PlxKu4f|DFCb5VAh$?HT7Z({cXD4EjkLpupt0LJs;oVkR zBurUJPK6iE|BT^VA*U-2z?^rw7s5{`RF5wSsUT7|pPA17LGC?7zMPfNAlE00ftas5 z{7FK|a=9sAZ#%uyN>_THeaf!K;x7eP6zU|b=!2Mu8VLiP@daQ~V_V;0Kw*L<)pKi8 zt0b!e)~@r(x_X6uMlY2O0%L#Itr4Pts>q<~o#13_Z%Ck_6ZxYWShIirx;^JKJUpV| z+3A+90t?|!Gf@b|9c!;yrFPTG3Y7p0>0p2&ru}~YNI4T5&5f<2rIrF^bXKD{u~xp} zw)J)y{5**$041!9zXT`UGVIl59gTKh}30iXI$bdo9yRZ(s+u}Ndu zaT~SVV5NiohqqIcp!AMqMvx-$3d((D;$w6vINY(r=Qv6w*~pG|b43&6r6fQ*Gpjr_ zmprP0!q?$%xK?@>m;JdBA7=?7q$y z>?9Cq=CiUSNEeuvFYttyksY;!)a2N%2x3MMmc`_)aidM@tN+5$0cXgROCNeXOAeZB zivkhbqCX7wt+3icjV_PKQe&{clo_qr~ zKR_-^eqA8E{xI#)H{roMzk1ZZDr7I*kUKIFjTuzvDiPqS7T?@{<46;W68*-MEggE92k`NhuK<|>0UltN*?wx_dpJonjn6Q##C^|4`Pbs3u@_5|Fi;c^~4llk<5aoY&Y zxg2Voc!?hyI6x3u%N#<|coV^Ky%Ie)HH!NtWzWs>^< z|6%>EOTCUal*q3|$8i8tNm-dJFfg#r&o@VO7I{=oTeqA6K!c$Q$i;RND3ks21S_Ga zyTAE!VP?&{EJ75!G{YiGDAjA6dFY1@q1n3YqHf)kz|-*XaG0i9H@wPI@)~tx?6Yv* zPMbiRU1Wv|T^j=hg4VVvFvAX-)ZO3n&BobG$m$(UPOn(%7KqZxIccq}aNl!mMX0u< z9Uq7l(qo$5VHCU;5pY@K1}R39zWPl}eU1U)@47PIz_L?9(L(|jqkVBOr6x}C4j#dJ z=&L%UCRccwZa{gz8xFmWEc_M4zL%XTcN&WZ1GodIio{rs zDmogjUX;*%6Wangq-||0pC1>yljVK3(XSe-ePwS~o#j3E)692i^&g}i9Ivv@eI&Xr zf*C&n4lgchBE&)q%1=zgyDuoi1)dn$StcT+RZQ(fbQUUbk}7%4lItyYCmByuY)3xg z0!nDDT!y|3xMI?09vtjXx4#l1)3`dJ~Yub<=i+yR>8^(_yotM;6*b~?*`d7)X+ zq1AR-r_*NiVmtVZgAq|?!N!i8a-h$pyaL27&9|tBbA7ANdHH(Q^<>w&n;=u52wjen z?YGaG6aPoub?s$24&q!T<~;YqidB{$u2eCUPP}SkA-zd0tS%e(=V>$!Of=3k3s4Y5 zGwb8cdXhQ~Vc(54nQc*N8RKiLO~Q!9RF%wTVb8f?WgP4S_gZX887rdbPmm&$5N-z+ zN;#f=YTazSlaL~B>SrG5{&v!F4_1LnD+c8NOyzO4KW1;iKKVoplAI|KR@BqTJv@gAA)SA~(he5@k|BD|Qs z--GGV@81|L-nV!FixjrSHOlrghfqgkcEACu7y(h(%k}I=vw~XHL&JF5EZxKeH{ehV zTekoy*Lru=dFm!k&+Dkr>1u@1d-l`AnR~Z2zF6W&0cxbMLR(Y0_RJBs$@$tFG5_09 zO3K?b1v~D(K&8=ArPgtQ`aoXgjg=|Sd!^kb*1L(Ok=(h`weqS!M(BT&uVf$HJ2~AP zd%r1?uh~}nGZ&rO=N3Ng>OZL}OzW{G&5;@nLE6$WmcQ#aD8Rjg7o!FA{{g6vhX~HA zztrC20ZaZkIVA0qa-D%;XN7!Jc)G2$N^nM&Hv8~Tq@1M30ei4hWwt{V!E}M*OWN_o z0kk0_dRiKS+E#4wC3NgZ?NVL0Yd?jG$!kIi$~#%8Ol&(eu3Mb+$5y=i6^0oud>s}W z!Kq%otojJEUJW^mg}y*|Q{DAw)p}HvvAZ3~$T9Ybh$zK*l3SwdGTQz=*%LIuIc6A2 z@Hsy}KU<+eCO9uYzuY8tjUjPRLrHCQhGGr*I>QxzanBa0Lunk|P@Q$egW7tEK!Y(6 zf*>}1l>6&1Gx-{a`Y5KI~F!Qo1=%RTb z?0FeqZymi%kfY_&HsVYb;2gk|v zj3E(Sj;kk(gM>lM+z^~%9f! zB3m;#Id6JACTF?Eb&o)J8S{Yy4Lxtu!dT0`{2hj7UQFJp7{1u_L++>9KW;}+q{7@a zcMwl-9*IIKSnB>xC60*}Ofw$ZF7YZTu)%;qS;`E`lePEAVf*bF)sMX^>St`76CWR1 zowW7D2!wE|sB1+nDYcKERA#tp%5gf0JuQFAOB}@nR_q&Zly67{HzBzFNRJtSrt6`b zZI>QgU`X0!Z9ck4)10Ez(<`}C$V{(k@Dp*EuK#qB@G_kVPtC$(msieJOI%ZG7X1{{K} z#u!srC%O7U_rQ7-v&Fxuxfw6sf+daZ`^3$Ow~7G|Igsu-#io6pOdOlW0d3#|sbYWL zqWoPOz3CC?JRTZhrLeM6w&rV4vqalTRr@a35VGbeCIbBBc#g;)4kZL>J`lLXp@zjo zN8?CL0|0=INXzO4eMax|2Z`{-W9i-|6?M16fgrjkpcM|$%*|p&rS^%5i-C^O;S`%} zb(jLi$r&jux<{L^qVjsB0+gNwZ=>u?!$B=Hdyz0zy4EF8fI?+D#=?-*aeYOOkj-S1 zQgff*>v&D+cuT>Dw(ELvL2HV{Lqytv4i>N;p?@D%|9|KD4boA-4tTl`>TejZ#L&ta zIHN>N^6NrNdty%;edDCO8+yz_ihm?PW{HVGnbzVMoKPI7b8rdDFfRRd6xnqfhrD-e z-#N+2k4MQP{~R!9x%6N)GfcK9y}@p-s%*OOW1wmNF?p&q(jI_hE28GKyJc;%xsPBL zo$DD>b-NGx?X>E2ANaLJ_f?EL@}Y2#);e zl71s*vSFQ8r@{)oPOX`v20FctB_HD7qW5>x(~3I+@QRlD8{ZhrCH2}*H`w`}BQiNu zM7r^OOHd<@ohWF?!m&Ir1Szc~pIRR+Qw9gcr&KWv&%M38b_-MV5?){Z$jH~+fVR<8 zm}8-{CAti&qP4350fv_L+SJ~so`shB#F4=?97*x>ch|#Zz(pCA0l>&*)bC$PlshBB z%`=G0dK}_e!}K{DoIo5L46cZUlCoNG2sPm@mi6l6j~zMT46;RRmi;R#Uo))HX|M&* z-S4{Eq1tzV%c3>UKLxw0-PZiCSISMum%62oYrZ-{BDQzQuEg>ddjya=Z+=VjIF!ce zI#-fHcEh9|_)QEpuYsamvj4YkoJ|B<-&v5iVfCMMyg75+)SLkg~a2GsLWyHW&Gpe3O#7 zFKf?0hC23sTQMIR5yEgoN8;=H#NY zl98RL+tjU9W0Q|~6O@M%-mA#=In70f(0kiKD(ZLc)&cL=d(RUStCYw+P2F6~Fr(H_ zp@G!N8R+3i7V9M)R!`n0k@SR%w_84GlW=0Hv^)Y#pU3PqXx~pE;?-J2MwoiHDb<;J zS0-opQ9G5xi}99WL>RH_7#7ope8de{@f^6KM{DMv^{e?#|LN1ptu04zUUL6$e7h2b zxtZajA(LG|x73^9qO0;yf`#dT_k(w~H5SO*9RQ$z>ApjXC+|=2_d%=zt(+665a&Xl zLz}OkIYRRO0A?yZF`K8jpc3eo*)_^yJ&-MOLBS*$Se&d zT$m0XU?sXxv8D0t%w}$>8gX3gzg$DtZ5C}ZZ-}7@w1j<}tBH^*He?Z=cEkC>qk7S+ zh(S%&^>NHOILq!q2cZZDRJ;UE>LfG#%e*y-04zQgE+=w=i-;7=*VS){JwQJzN^Uw~ za9BxdkotGTDC&lJRCSFXNM65vh+-Y|pV)8rq!+h-(hZLVzWhfmL#6XnM3BCzB#t)7& zzo@x$ml{ctNE0vhiFAi!1E_eaPcX`6E|M_pXzZCnBuJIy0Di8-Cj1A_(o$=N5|+nn zG%~EphR*LW9ERc@%rs*#325q^ib1;i4JBW2+2rRlV-zri<>Hy=$*W5IQ?BREC~g{= zF(aGOQLY^xf*fYsytR6YFvGQa%xKt)qKi!JPV(zO9L-YEP3A!*D3Mw{CD>ks!qce* zLnnJjh4u-qYrrzehV_<+6_o2UmH?D(l`v7;VNb49;anb`bS1>SmJYLGZFDCTCcx}Xf5h$RW;ZOa=7D_dfx;z!Lj0NgBl8w1vkeK zIO27@LJQQMht!K!MQfs+_PD#*0+;|)a=WFbcMmk;d5)!)*|gI!^b}o(>|LI%;T6}f zNA(FhUf#oeW}4vWP74j}^>78G7s)lUE=ET6R9!$O?OMH903k;{hmy0-=u06cmwp#2 zXp*u@U%45&I$qW0SDnj}6?4>!Vr!66Yi4e-X0_FUM128Zmxep#thc0QE!+e*54w%S z#2#mO!BM^?5k>doUCU0JjgZ$=7k*v~z+JPy#5McO&2-enQ4B*`%M-ro{pommevkmJ zC|X=7)|EY)fUnzz?Po@pAqc1yTbni=L}(n!=^NTX_%+A4dyygr?B=3vdRo8lsm1-T zg1Zn^fF?D@eCX?R3;{WhKen$&#oa25Ub4&GYyttjqoD4|CQtcxK{ImQ4q4ZB%apM) zM}CaL-q{V;*mxOPl0}%3{*3v%`)JJMI%BDc>|XCra{9f=>T%dQZ)m>sC6U>o7mg3X z^KkoH>)DLBCMxIW7LrTfp^9rAFUnYw42{nA4EZ^jT2C@$%fFGchCf}bsM~lFouw1-NnO1 z0q^09T;8k#hLMbhFJD!&9h&Si7Bli-tJiA|Y9I046l=;H{ev{-Tqi^UMtLLOVTvrp ze|ci91wk;EZtV>%@Fk+t^3cfy|3D5Yk>Ev<8pJ*$zrjrwxX&8Lq69KUf8^5QTtUQi z`HrpQn1TFAl@6k?8RGYU&1AH1lXpRkk*c7lF~;F%JSlEvud>TSUz9_S$tzo9McpDx zh*}&4xfV;3wng`=;%lj^TleQ(daQfwzf8ReC8Y(Go2>K>N@6nv|GS6O&r6o@CA3x9w0Yv^HkvI&tz6V-TF{ zG4$Q((u@?_Q7?zs7cJy?ub86QW*gUq-n4LCS4aAa^?qK7eT$j1ZEsFX^138BdO5-F zi%?Bkbi01rT_>E?y6>5GUZxyfaDPIj%OseM6np`soN1T(u%SUU!NQGM^aPK4n!RUX2gQ0j9u9Y2U*>&?@S}_Zc6v~>!}QWk?Dfd~ zBPPq$eRPQJQiO54jljiqjV8RZn00<&+uB_TtaT@MPSKNEa0=qcKqP);sPAaI2Y%N9 z+RK+Nwt`(zwUJi~=2efekCn>PO@0gA#QBfO1?NRZU8M%RBJMimJN&etu`pN`z+QCO zQp@Z(j;>pE6rY!_WNBE4t7q2oZN34)Vup44g)!=B@nUxgWAQ4$K5=3-!-j;`w(i)S zPvKbKThz|YOh;rbQ@5gIB=n;8W|*~;h0DySyMdbHO?9SYu<;^)NzJB~nC*w$N z$RTyg_m`fB+PCwA(6bz9+oRUX(-KCn1Fi9)#?6aPe=4eokC`ba`rZZE>djYGL_LT= z5v@Cs3j3udjFqL2qaO(uJE~4};qU`%PPZ~cZy4QuXMBEIqGi-aTS7ij+)mX%8B$e+ z8a?tNklH>^P98S}f!LcWnb*`WbX!X=T`46`7ra|FRilxgL7`@E3~gN&l@TEHTUJmL zZ&~Sm?OO=Q=LOEL{p>4W$Jk*j8|$j|q<%{puG{20vdqx<3*C>;YyQZ+w)GcF9<79U zMCxYMV1r|~Y&D4Pwf4hn>z#7p%sF*f_@+&jONMV+^sgH%&boQHTs?%NN*)Y940q3lZiuj4L)54#hl z`d+rHCtQrX_%Xg*Ic+1hQxlv!Tz-eOeLCt}0b|bY>JEtL@e9q_mQ4f=0nQu4=xyHlbwKe%gVtu%cPUv2RjJvCj-DY@c*%+S$4~pF0tN797izA}NHXloK_3 zB3@a!jFO~=uNV63<%?kh1Esa}>%@rmFY(kldc}8u>W46#X8+^c``o#%E(Ls}{gd-;XK3t%rMuQZ{JFnqOn>W5%X9^N0H{lFrp7 z+DzA1Lz?xdYUyt;i`GzK#d>I1OYTUMH;e?9YiAhaQcUxNH(l-$6ZBprWBmOuH#+U~ znFzDGGuQ~7<5RNyE|H|3>mLv3;~(8h*Df`}PoI%Gi@TSsrwB$qb`$_#F%!HVM}Iw` zo_U!)svo*}=V`BT5(1qBExES6*ICOhO`C^@6RICa#|y%j=`eyXYGuz6H}x81^BD!G zULKwPqS1r4f7!X{@CN_4ZEm zi8UO!7vnfys$5TmBu#X5>UE0e#l5}V+>agx!G8}7z8$~HaWPrbZsFmXwp5FX>A zaZbVi5n1wlKuDIp z@12+C)3>&W<2KGk@xo$S3hl;xqFG8$zyU2-WdW-$rZL?fK`*o#HrW~WD;(ORqHCMa z;oRuP1EZU?2%rCi-e&99A+Tb#$tdBIPfM8V!3utLTh54`L6r6^MpD!&>wSvkE)x!F3-%!kV3HlgdIjQm&q zFldcI6M<*i3~Zzl83%|rMY=5xLpfvH#Pb}V8CSpH&wq|O8!0R^I z>Z(X`9gEnqO}fkTZVR~1Fjq7v^~zBH!4YbK2)98!FO87KEGJU(hMAhD=^2CIU>^Q5 z(!DKjKXspXJjLm+xg@LPl6=?dFVnXb)oTX4D1=`!b!i$qU#w|*v#le9euulQLA{f$ zf4vVOZdMV_fl*C`2dvvjdn~QDJOL+Ydbo4%A};XY-LiVhUzY5bq;T3miwRy2sP?$_ zc}@7wj`nSDgXb8q>54GOr-7WI9nuf+7)>&mm|ECtxR|re-7hCSMws%Uw&jTJ_<*N$ zLVFokC zE`_byn8lOyPo19MC-XRdeNH{?eqF6QD5-hN# zWw9C|y?oeh&F>`*JPSAhzxsTZI2 ztX+W?3%315>)n2(La8l)AwS^$S0cBL1IW(!bl^GK;|zR%ULjf&o{gYn3rx?$oUEO< zo!=6dT}UjSo3RwL?!|Mo^Vbb_t_^HEqBEAVqYrvrJqUOn?{alke!W69>(YfDR5~@# z<_Lm~#@%E6Od;hfh;xVOy?Sj^v+qFIrXpuPBgUg(p`Ield>gss2!EdJTH_%2|X6eGGnqu}-8T+-(9L`Dfm|0XwTwE#)yU9-G8oqAos z^Eh~W9R{BNz7|d^2q>13@k4=asjTZK^t9g98R$j}x;N-}#$?gI4M8lBjToQ6dlwBy zb<1ICER#wf5uHi?6*R`urG*GbBYE@WFSSi`cAc>9jq|uuOT$$4d(Uw$lHRLxj_6S~ zI$?H;UD*oiEN>*EJC|{(@)py1u^xHs z0pso`syKdC>O48DBrT?*+UyyvCe{fdmK|a5U|0DR3!|C(EK%Q8*py5lk72Sjt14B* z4FdXnvX(lYc;`~nOv_NMSYIKwH8$gT_0uJ={M9d_@lJYQL?Oys)vlL+1DtrEZ4*Gj zWsO2&K2rd^&U95ejPw-MwoT#uqUSnVa<(> zy{E?1Fe_}VRKpstzB_`8ex8EmCi6mDm}lCSBPgR{%a5&I?BAf`XKx>sGy$xC5q;;= zKxaoO&H@hfaS<=qQS|9SK*IScRArIRj^xQI%B2SDnII&9|{L@VV8Uc@@VB}0O z-rZo#>f{@KXI0n;Pc8L~(IZ$A$$2sFW|sPyUrgkYSRHiQ_BTJUz7u0XDn12(|0T&= zBV>ih7F7hm3=zC<_Ml3q@~-(tORl)tx-_X4l^{(#TzL99g_&AH5wmEHVSPS`JJ#}k z^V*=Lr~^k>LE_$A|rB;_vGREf(sHeuERIBK5Nb^MYo7uZKi01Au?=tx$niI zStAAR#Un(mqiK~-vD$Gygzwh7r(RUz(O`&WfCT&s4Zk^NwE5jH)FHPhy49Unr5qq_ z%|M2xTAcWlTK-b-3(wAcpX|r@S=6cY1C3Y1dv{J#_I_%)eaR-<7RACFlF?!1E#&x% z8X}|bEq2{`F)|KscqdwA_WJuj-te@kBK!uMmj*J~X5Z_!Jr_GovOS!d2NH<+J{8!V zA2Wp}a@`WO+cEHFLgf*>3y^ovzgA~b_E1pIf9l9sJzbhOxF7!v5ip*K{H#Tk$-9k* zDZ2FqHD;)mTl=J`BcnM1<@?R^Wg@ijr{C7jY<%Bc@kF8PeXIOtf-=J=W-qw8IQG$@ zAW!`7;_HUyZr!tuMFW}$L-6@802KMx#8oWm0=N3Q&*XKiV&o2UFBVB#ITOEDaXj8B;($)BIVixEkHzJ*yAv`@26kYs3K01 zlKb9r`gxkJk`Zgkbrv?+@aIpF7WFv-cH+9Ej&4ULGfGA7zP!+qU<4amBk1T^Q|gdA zoDrOO$sv~*dkJg^w@%$TY`1>{Q4qfFw6#OqHIY!SusOcR`Yzmn^VNZ646fYS* zd;jKb#~XUvMt#!=Uo;UN8I9CyEjmNYPPindheA;GgW8@t=~m(QBF_5_AtKM?9F{!c zS*4NFSo#!jEeWUXsoAaWu*UOE`KF~a)ZQ;u0Yk3%rg~*V9jV*CCMV4}TMrqygV{6< z737SoY0y-#={P}H?dVLF*ZuBvEWyopLI99=!(`~sO+w<={?m3!)?pI7@b$2;E%9SJ zfPnqHZPr91vMo`)_Cp4(LxlV zD|C|KqFql?xi}Rr-+7ufzw>kI64EaLJ^1g+78<<>_^!Z5qD|!WSLaS7f1Zk#l!Geu zFM^pnftBBkk^Uc^-ZH4oHr(2-;7)-8MO#|j-Q9}2yB2pT5Ztvb#ogU4xLa|T0Kp}= zgy6h+_P6&pli$hAJ-M$_*0I)=S=JwK+KdrSvYNBm9MkN*@h+_FySfLnbB)MMLnFUn ziu`vEAepAM=T}nw&yg*3_a|9CaWHFvG!lqQ?P-2|1D1{+ZadR-1$bOwFDC1ZI(YBuy-|#(gt;; zMyZSklWAmRzXldc9#(1H2j(9ImH-NrrReetC{hjP{X<3b&3HDc1>IH0r-eM@7}bAv znT!`BrQ|z14!mEJuy=C~~rnIw;CX3VVc@8ena&?zKTHN`F zFT7k|M{!3GK=i3es`GPb|921WK^U=uFtsx_CTo~UeX)a`1JCP;p2Tp%ntO3<4I_X8 z6|6t^SUu;^FxoOQ;~Vg`jK&ov<;NjTZEO6*Huu+}IchyI#m{~DW%NZWaO1AVVbqe4csRBw|`dYQN?kvs5<&Y|=N zn31jI&~zJcbdp5tGu2s5vfT0u4s++F;yH_R&j8>K7cluLlZODTUMB1i`%`Bsclu-I z=D0p%xml|N>NDfshv)-gH|6fNVFK{50?z7?Mr|v7k?P6KZ`QMK%=5plCzyO*+$nJF z{3kEfWt{(Ui>s0imtpPDUy&|YPlU{nS;`LP_peB?lh3%7eJraY{#ERq(#Tnv)ciom zDJTRvM8+yY;W+0@GUHE4;2s42$)}v$yX(=#&*A6D`pzS(eEv=s#AS?xq_4sHi3z(S z{r|E6mJ${@@@Hb(0O`!GXzy>(_hiJC26C7^(*@3P{ut>&lB8wgUf}zrAE45 z?7CIwtI8#^enJ*>$EZmy+B8flW@?j8>AlP5etB1?_9ypC@HA$SpVaH-Ns!*Sd~jIa zIqiy(Z@EZC`-|lCT;|n3pGC-R)YAX*$y0Zo&U0DY+PvhFCKQ>kZYapeN`fjzwxJm5gyrp2l>UIRpCgwL%Bt;5&5`E@A_1dqntXo_tO z*SRM!k3xj0JwPW_$mM+$Y_u^En;|WS*eIcF7~j1QJ2$W8{oKNDKEF<{cjKW9ZGJs? zN!0mGFtcmx0T94w2gLrxsbMeY8_ug$wE6OB8~bD(p4O%=$QWmR+_S&yKQI_YCS7cPO=2>xL` zAHVux@hwYdY~1;?gy!Qqt@#BhcM#!|6QI$%y?Y$$j86gX)C)Xq=2W<5)RU2EdC>wh zKFaSs7FgpE;PvpKpT*ybrGfB!v1dXAh1qIIsT~_)jG9NQvsga&^~3GfXI{xZzC}D)sVgi$y!@fxC7c#& zX*itTZg_AvIWKdvuQ>;>W=rRN1RX4ly{iB7yeBSPZIz)5p&@x;r=A|uu4PDq5hnfAgZZR&P%m(4FMp@ zy^kPu25v29G4qEH)tTkRqC&=t6gF0H}=`uCSfpb8QB*ZaV=z z!`l-!^xWd}qv)kvUu79O@m$mH~)#Bx`{j`=GX+LGwik$va@2NQ?*Zj{_2+xAKflRwZ0@@qY)CjgvB?bh!5VXRN)@ zHA;bT171Sk&GfrwG7@KSj-x7sH}R5vIAUTIJf97_!T?P5)1u8|PKOD@D3WmtU$tq| z4X!+h_4iY^51o?J7n#1T-rJS|)@5AZP5JHMR8eoTlK{_EQSnebkFoE=+k7D$28u_7 zoPG05Mdz9WUejPk~S?>pz_^bkc zbl<3JwQz_ho{?d&7*DN!%`)&fFzWovWv%qze*(tgQ9kV~ z{G+^fAMKpl_K?Gx4~@$_-Z&jo>Ny&2vPL)esV#f);Dt6fO{o5KF_|aQoG<_3eq5Ci zYtm>!_W5u=f#!dtU@NPTmvx)VeITQ5IxF?3 z#0V3ekCv)Zf!9A?*uJ4;Tt;cUbzbIz@)UjEY2MI4kvhn7X*v)3xN7wHEa8X!=k$tlGxMJZ*D2(8 z`SW3oT)$CRJnM$*d#X{Hnv{)-?&{VTig%{ss-aqOO-`zCHY%~AI}AO`fP{C&y#p4p zwQYqzNyAGV{9ZE4F}+fy(R8UGRu&)ok9^TUNWe^O@+WRxsX6Z6s_MsW*vfY>Ym2gx zj%SW!5xlsayen3gAdeUPndroKCiz$g9*?Tc8doF4P-2UpWPRry7!~v5>(13ZNL^wn zR~2^7O;dTJ7swvN%E8msO8$zk~yYu$C1)I73)nL4RX(z5YIymSGuIn%#X6u>Fap9WKnEt($kW zayS)2nQ|`ULvSuJ)^47sGm?3mo5`S-Q(+v37&=CgiRO41>qOLoE_B?dk8jIB`ySK` zLbcLUQmqb!ncnrYFS({2BeRWDp|aBibeePVr(SelA2pFfSC7QIb5NoI=uvI!Ft!4dL#KI#PG?J*QSesR^hDO8s)cn z{vcK3E7IawpNSYkl)lgit_in=I7rMx2)EZd-p4NTFNl!@_Q9pphpF}l3&-*nare`_ z-epDS|7z~yJ*s=YDXi@`g^);0a?v4F4(4O@jMkhUzOre4_MC{N*5I6nFASf@noqIT zUqof?2Z4K%dr$%L(%^nP*RgqMV=z1iq0xUhq;c)ih|i$C_n9sALa0$7erYe6qEaNQ zgh8kpG3?Y)*0tTgC^S^{IqR)8SH`AfOhz;!YIng+*l;Pk3_Z=^6`4e_ByIY!YU}l! zMOc1oQ6%sn$i-uwRHZ~B!zx_`|B>`o=<5!*-}Ng*{wu=?#9M-NYI8CcY%FW~d1xH~ zt|+CVN&uJUbhqnTbu_BD?~pyB^Rvfs{6nP=F{~LoXDX)#Z5@edds(C7g$f^G<@j`6)+lIzS{hQ>VJNw~v&;T~f#GJEj~0b8@=$`D;`m zd3$4S-)?CB%lnOb--;3 zRxMKa0zIAe`y(u&(#xLhfnn%l(R;hc+_hCoArD`BDr#LdHxzn0abb?cNa1M*$j+`S zw+{k#ez$4a@Ncolp>YiFN_^#a?Wf_=+imD_?TuPqL@rN-di?3Y9^!hj9{IMU4$Byl z=w_O6!2ZXTMxHc)ne)qiX{T3eeB>yN89xlC=5)CMN-XE->)MTwDvxQiTRG#mOB9L_ z!B9*PyDdzXi5?S%;{{4b-4UB1KQruKsmpw{EIPJYqk=aJ8|ZV6ooTfAr}ICO&ph87 z4cQMHvXKpQYgLJIPD{>k?=vVbwc>+p6Qm)c&W- zsZjFHt#{O4yH{9Y;b!`Ww6rb9pP-LQ%3@UxF1ocUQoqVG7-i#g@tAWjKHs1UUyG|U z5OJB!@Ji$3L>;(s_?owe(IDq`zO|;%Hylt6w zdEQ+8(Epdi;lPHkQVU*){o z9tpzx?!Ttho|oN@r6Rn-SR3~NU43GCv>L;}!vV!nx!J>OFFWroWqkZFJgX8kU5_c+ zRBd{y`$J56g(q86T!#|)K%*5=3_w3f8YP}bK9GPByqKV&NnNV6q?QuU;ASdt&#U5p zzS;LXTv}B>IcyD88CdW3>CVa|at!s5+94YbkjY|=j_IW4-=rYvI70q+j@>m5N1r53 z2M1DagOS8T`6%u*&zKa9W0I6Bg`Iv=-T z_V{uuVx>D07>M7;`n999_BH@)Hq`&pGbFP5xBc>u_hAiW`5~s(jDwlw(3!XLU}I(v zMreo~3C97RtvJAua$8qP%1LMmDr+>7D#*(A5vFv9X9%W-W2UcD0c-I$4N09;1*pJzBiGmt}-LhnNM2xr{40M|Z( zinf|s%^{^5ze1_XvzTe#-9TjwpSE6N5iW`2-wyt1cPi2nDr2s1^@0Bh2dZzSutGP& z)otqL%fAe#w|(RwVHqL7BPOg6F}$wet8TK@`F4yDbbN=95{CRBdK%LzVub7e<vHrwyma%Y3bwpUt0+)-@68J+VU^J@Rr>(P4S-a>D7heKW43HB-f$& zpQ!zB;@yf~YMnCDotYJ%pkF(&I~SK`HPEDHoSznCc(B`p7w5@d<^<5Y>_9&huf=Wnaivus zo^*5Eunixy%GP&69`0GxElx(eWv{ZgVLi590=ke5=WsgxGTC|N_T}1npT+Y6*k`XP z5fW`h@vFs}f6M6d(Og_RSJvflVaePieAaJT20fMP21BV}2sXaYt;6 z=C<8~kerNO0Shl_o18cHeV(OQunS4H3$Rajmk=)?95qEAReol%SRnx}%iP|(MD-GA zC{sHY3EeKic){#u~ z)+#``o;iRcuhI|~AMAj`RT){nh^Z^W5 z=51OQ1cox*HHKT1x(yWj?6^y=)5+v?%Oj>>mjSB`KY0o(q~E^g5(-Z*VhhZ0SG?C5 z+sdhz>i8U5PUnDqoKS1wQL_OiR3b;b6zw3Xb`oOw31$6&AN^p3M7?P2n*P(f^Ru|B z;6U&r{@SlKjgx^!w#6nRU9XNu-?QzBTUNiCXz%RjltMhAAxwOltA~!5TqcZ`(lg^O zZ9~+Q6-KH^jPPujRe0<&dio^nd4LdY7Jq?i>9GFMLO}}Yt{NwvJde(hM!97a6OQsy zhVp!`J#9tJYnRDs*B{anrsjeu5i9{pv=FPDIWUV^^L{wyJV`_Wu5=x|;#-v49FSn6 ztrV^E=tAMH-WQFx!Ts)3L#Vwun~nSj^Wb}S`A*$!hLI>@nKz~mxaB0MpW#!aa2Y*M zPx^zF?!~=6R5Bt4lg;!r{2UhXhLz(gVEp~1m*3pjC;v?}Atd2LMG$VJ#Qrmnqxg?m z^TAb$jzPhbU?L<`Y@YT2k9uP4NA)sMj>>8lAQpstinuOM1k6-D@*vW`)SgU555zZPfWE34i&$;cw++@y;-36wv z1L{Ik)NIqK<42D+l8k0-LuKN8yCnKQQP@I~T^-%mvz!&*w5Uf$!Qrg_c|~DBMR+-I z^*BU8O))@aiso$o*R2+-p@RUT1Ji@}WvMauFd!!t9yoSdWeD-c~*Xxx=;jmLburUte}uX5hi#6a7u zx0#;d>f26KgIsIj>cjrNxZ;`LA5E9S$B#|Qn;$;-Y&5@>5k)C?pg*fE+h*$=wWt9j zodZXnl7r;w)h3zFj`JMLQ_pXa^BqVl>^|wS$xB5a9JQLo5qrG?J7)VkAAAoT3AV`` zq7lOKT|83kR8#mekepucD{)Oh9`rnw`@oM&$4klYg}1p@YR*#b$gI`r@@OY}Plo}Z zM*zt_{SQ!?)uBMRLY+$E{Jl*%TobGfPvlaQ3z))qKGxoVOnuG!abcO5doia^Cy>A7 zf9g|Z8&))G&AB!oKyB3K279jan96Xyh27~aH9%Hj>JB#Blcp*N%jjMvfS|?VIFFN- z)2UF?qF}jpW_3DzXcO0R5es6L9ijP|X0H^{D6gS0)Z_WIv*iHI@N6(pZi%7Gn~*zgX|)fQeAu zgU`IbTj>~=mT_vjDeBr}{Qv&qg%7Ei@Rop$yYql$qdZ~b8Sq_*%SCxIL)EPCFFezc z3(UxXKgf~aYkbZUCu2Z4Q+3N>Im-e8^aUubor~>jz|Eyr{UHDCFfj_>?J2nMgFM2V z0oQFfRB1nz!F}#thRn_gQgRztmIPuORzsI1YSh2I3pl%9k*ZW^(t=Vf@1|OP&x|A{ zW!JNP0({TcI?FEiTKE6I z;t}nzfc5(Z4?$YqP!yJNvWB$>CuZ>(6BPH*u=D#l>#?oNeJXy7xnpHc{+z>12StCM zXY_K@R{3y>C-p-izZzE)i%B}d3Y!mK*7wy?oBJm-lq7&r+pVFBrap9VjG2D$mU!PY z=CODPC>tkZPyg)&1I>Sn8qM^`T^?>$*T8SH{c4H9wF7x+-Aj)9FvohCH;N|p`2&}@ z(#SXWHHBl|D;!s&2waCT8ls7%ON9J3l+XemrTo$e{^BY>fsrxg15V;YJcRsdvMszR zYzOlFodT#r-!mjvWgbn^@OLSH`EBvn%h^atJR2MPxZ_(q?YZ1iVU9WOMpTUU9HL(- zuT2{$Qg%(oZW^zDiF1H7*~W*|#FCEpPhBfm#DDcP52eW#iZ~$0I;`Mmfpx!_87)Tc zsRw~2t3T8{<-ALgiu@!HFEXsn z4ZJ4Aixzi@$Lr2;e&4V&KyesS`n+yWtaruXL7(MKsR{3v3a>4?1y*~&-8B8!_(ms) zVO&3W%eKJ2ujzQirk|=g+(d4z*ZJ(&(&T-pJVnWVgp<`FSmi3f4Mg9c9cA{=_mK)% z1SwG{;5f3lDoVy5<+aK)*@V~FEBqCcLUT-yfLyUODPB2o$NtC9uTJpqaSl=@Dc|i?)!j%zXEX7J! z;glRy)cAmTwIeKO`phwym(nliN$ya1#oyo(P=BxZ`i<#R^NOO_=jN;>*9&4e=NoZa zNL>AR3B`y$POrlE5AT*5bPKH)LQOC>Om`ERj2;MF_;nry>q&Leu(e z5T=6P_rCD0M?&zUu}*m-g|kH%M}=ZF5q|zKVovv%oKr4sSLLEP*FRQO7n%;bp6RaGYb)!<#JWc)dcE(1OHX6{rx0!Gt4|x_QXo?f` z1Wcx}SqOCmH~c7EMTRC4M7M1-n+(KoMn%-HE%7Jz>P<)hNWJV@HpUpSc-{tA$09!W zaNnPycYwcCk2$E+^0}W=d@WjVL}FsyAOvxgqfGTUW>M zhYu(z2TbxFJJ=OMgm?dx#c89tVpvvnYdomWuD2h0H)4X-Z+bAnixKtj2_#DQ)9bgT zb$|VDpicQ$R+7+soxO00$)x4m2p>C%@xuZ`1gKpat4ze+2nmk%kagtlXAzt)g0tB}xUoOt4f~8OHD0mz0(?{Fk z=q(Dk_hrgCNeWA~bImyYZCu(lDWP6k$()fEXu&TL-Z`%OW(OmGewvR9uF?p$?5eE0AwOv& zlo#(sNu#Y7a3WSsnVsBezbS*~m|%vTa*M)(jMvSTi3Rb-jOfj3bthWT@;UfcPm`8E zA1iTIW&(aDtw%baq#%o7h$|Ah zB|!X^ybyU<9mG>dt0}q`crPhg6lnP?^Wrx*ag+P7G^ikxAu)&X5M2h#TrK)gN z8q^624v=<0X{0$nXj$!4+DU%&QyP?S!rb^W43A8JcE1GjoW>LVP6_AqQde6y@y$|6 z9}LZp3V79ga7w+K*=e4+-=H|kphO1hehbrw`$ab);aW0#n7Gl~)a@>Kns3W9HaXFG zYHKy%IT*IeF979}aCSr*$(o-bIKe6zJu8B#>Lr317r9Ovg8-cR z*z$%#lCzcMG$x6qtT|q$kFCk@?0g0Sa>TH2&In<545@x+N)OK6#x?rRm|-aP`XZ%^|-QS7yZU8Pa3NJYNP8QR)q4R5e_~(3^2P zwqky*l@YvZvB6!EzFJs@8#s=IM!@b}pY5VAuum?fax$v+95{p^m0C~JqaZHnT=avd z*l?#BZp~{IFv0jHVu=`_q20jMP$E&@Y3cM(PJPg0N>W;vu$vv-6_I&so4fO0nVnYe zB8^}kP4f~Q5-A%V)(|^JVZ!wCFD%@dJoZ0d{0z*kw*ta_J0HI29N&DT$69jB@Sq#G zoex-VnGd^p!_v_~@VRv?IF;al#GZtEO9y*tQPpE-=-!&%z}f*c=RhP_jR`m0d<&h! zbpycj!+!;1HCJD-z^Up_K2J+s3xntQnM(x|ZXJGp$~zFED3aw;_0uupv*u7YpL1iH zOs0-7UaF*Or`GF420=y|{IGt~#VKl(4P3?dJm>au!ho3Y?zTx-tt&OE6p5#A2F*zL zW}CK@_ue00KcXd`wM(^Mif3I;2~ysoYAw$siny#1R2Ew6z~BE+rFa(Wz>I$3PcDc_ zJDn^^03G~{_D-0<=aV9&md=5Kr6ctI7hjx@^7b28!PnV<%T=$n9=6Bj2E_rqzZA3J z$ua`$v~us?NG$V|1ioxnUi+XbDrNPH~S{%6C5 zRxN^SirOs{U9|r!Vqv6%H971N(8S`c<)%n%K>mM8A4L%=ZYt|j+uK`fU zXawqwhH~1!t0}+ep1-YXUO|-5W{hCn#Ysz+D@b_RfMsA=8s;5NQ^d2vaF1`NeZSRA zb!$|>yjsDbiN<-O$4O{`P_eJEbk`y08MINQ9>MVKWt(=o;g5d6&QUw^> z&(UwjJeGcla8S%^W`x4&;cps8*VS9%`^VR7xmJaG^Zq|ZT>}{rzhxaN=Eno7BsFki zLp{h_pY5OhX|lyqiSI>b!hGd+W<+?@dsSdf?GUF;o_(WK(tsl);?_|owGg?wD(qk3 zEtyV`5SML;`ox{ZEASXq>|{iw`)k~=LE-U|eL6RD8P4dWctOY!CH~-Zz7m&UP%#zb zmM-iLVyz+RW8Z6k{rC)N5RWTHLz~);&1JAuF;_b(M@Iacw3u<$H?--4@Y7USa%eOB zv`Qyf7JufR{yR|lTpAB;vG+FKSFoCa=HF&EeWS=6U(;d3tJT7rWsS+-TtLI54@tqu zYjW!vwvUidn_OE9d$jb?l_y0$QrTk5jXr+enF?{$P8tp`qfU2Eh}(UXJW2qJvOQ>e zE|L#!vq2PB9hbTLxx-j)$vV)e&O*aEkuuh@w-<&c)kIcc6h?$k!GPbi!%acfh{_P8 zO#K?fJMYh44>=NUeH}`2onkTiyX&lHOE_O<>aDTivn|fI+4J*5mO2C5#8_*z)iCh*X*eGvBvgaDqzRZ!``P?zq1>?ELJX|5Y-cu2`9e8yKDvb-jQ+)In)x(~n zfK9S3SAP?4>HAI(x8~pjQ^jN#{ntVx+mHKNG%XjLQns*;jf#d>5?zONL-!4Qd$)`t zl~C|j`5q`>z3hhJ33si4LnCuhG%#6MpG_y1pAay0?ohfbAXE# zTI1{Ur^9y-gaC0tL7Z@q+p`_k90{>vYx6u;vGO|mWxgb<7<{%kJAaHPYsSblk-ibT zn>OrTr~I7eVBLnLpKzB6+mEnR z=!`{~F}hB3E}>7a2L;rU)%b@|!3hVAD!7d%C?v^a(lsNb{rIrKYg{j{{xNsM@LX=V zf*?BG6Z+`50sA|R{E&)m4cb+2DGhvO%lhXqS8Nm|vLHAgF^#+QPPSm|Y{C4MLcTFG zW8qZLoGnTKY&y)89YdNTSk5 z<_EsT=b-nV2;T4&aT^LXdQTaQiux~DUiy#h$K>&PK85U~?0E2F=9#)%c`^UQ3S8#%`j$iV|0?72jOPZD6u^d-CsFY_<5?%avqAqLfA99 z)VIPXloP*J!W;>~AjU|veHy5N+zVk7wBgug1qT~CJ^X2r?S5xw>I7_mt_!puKAwb5 zl8NyO7GvF=MjSYiZiS%*ZZIKtD5l>CVB;c1=L>QNc-a7;kA;&z+0geQD~+z-sSL8I zqJ0=$%JzRnp0XS6OwG)57A7{A25!kJxX z?(qAq_gJ`u-C>UogDl_HT$~+P5-^j`SMuMT62y)kCitFQ`7)GV%qwa?8g4Gu6l%Yj z`#n>vv*~^Aw{rm1ak?qrY?*)$X3VY$6BXQ!h3R(xT+kZ=(Nx~jiz1fxe5GSm7gc3I zCgpm&CZ)H(n*XFy!!kF;%u%-d_6V^*Y%ln_uNX5hP4V8UHMam!s)(7o%BfWn!Vjmo z)Ub`IqaOV&k~7d$@Uc=HtTzfz4|4pldp*=lS6u5_LFd8n%cPQYUrS>R61#JD7CoSD zWBo%rOF0Zjnh#H)sHnjF87XA>Yc?=f6@sBe#mv`EwT99+XBhWn=IzUV%Lm`uhBL-3 zrt|P>Z9t&c^nm0-2aM;JucLG-$I|^{VCp#et2Z`!c?)-nId0a<-fQbhe%x=^mzo?7 z>}pcS%4%jgGfNRwIDc?zM8)1!3FNO_BJ~?ZR~7~lYat)CnP0;$n#(^L5_C>3^q^2q z=vo^wdf4J@A)O6lK+acxM^t;pOz_T5Og)b*JOBj(&I1g>MBko0I^NBo=1n#KX74o2 zQC{`+_f#ca66)q_|5|B#>^NbHGs&3b*|qAAB&Pb~oqDrb5aFpnJl|A?A(kGL-m&-h ztG(RF7euL@(aN6>+yP>V?xht!MrB-(?&g&s(;*@*x*;}$8w&hCd80nQ_#GV_H@$3V~lE9qHyB-ha-X+N=HI4Ajj_j`F884o#zdd^U^CsTI z6_k+uyLs^O-q2hg~z0hdTW(uXA;&ymT#x< zNX~(#48L0;R}1|0?|WOA`d%3TB29xuk(_K>5xLVrCm0nJKGLps9MsK1lV71#`LF5d z3oDudUy~wTbAKGn9EAB%gVnD%P__Hiaz+=QH|WV80(&qG?lwOM#a(*14z1uEjSzDf zzvW%+nZUv>`BJybx{A*-~7aQ9?(Kz zWcnMI7jfGl;{2~!5Cv6LdKbP^|H9Ed3(>wsL`(i>k3%=F&i&2W~=@M2)QI_pAR#u=^%_uCH$bL(0liW|%xRS^>;L@oz~ zLb7(o@Ig`4@%Mod*TKi4(XJ%Uh3{{;aa-g;(o9$t02PY0aMYmuFzGkQ+%|2X(hZb4 z=UR}k5i4=qa|(gsqt>+w1!YjDQUwvd(7s=gzCa&xH1nEz?+DCyogCQ!a$vn!cAo(< z`eyyL+Gf%4#|EN7rfpuqoA%8YB%YC4XENfq1-FKPAg%s{T|f7Z7rbD?NgP=y`K@#v zhywlFc2U-%cIP@Hp`do_;Ga{Ptk%HlzpPowbfQDs93#oD+s zM|_`>z<^zx&BH+V#FKX5-?FYp;T81H_E8TDAD&nAdBQ)0Fv+2j?8g@gy)d|n74Z?@bF3PQD9^&UMSaQW-GMu7o@2N8W&N3F9|B!*(ZDZ)= z3S590zhXWEG}bYn;G#amk-+i)cR0TxNR52Qfr%f61nde`w{VB;6joCO1Pt1~Zy6tm zd;ONC=)c5CnB3qYkI8@})r1SoVRU4{ z`d;g(F9m*k>z{!v_>LAkBT_Ph0N=2<;R>gJ`h24__lL5Mrt!+YV>d|Kxc@U=6Jj)Z zelBhHE2PNa6+PWZH~<@MZo&4C&l3x~&p9@Jqk@Txu}ODhwr3VB7R52w8PTKQKK1;& z`;1sI?N6|>#myGDt)XaZkA{J^9Jb3fJGGvl`{UmFybCE>L8Crvq@L?4#p@_0@HnQC zT_~o7oAy&^2fZpT%oEx1PHTp9DXEcd4uU{NBG1P<+31jSc)Qt3>z}}V()bFbHoy}Q zXxM3@NLiGaY)mA;?60pyQm+7oo4lLx-J_A1_-3D_M~t*}#@b1}y0pocm-$<7M+uoo z#;m=|WySL}wu-mXDRQJ-QZHxpC@IP{1)Z>2)-7f>&AerB7&iGX^W!bTosFrI;k;-r z(uW0NF%x{~xNDe?n5f1viRgS|NySiGW7-q@VPQN8>^r{M7hcIUO#9I1v42Ac;k%dZ zOQWYLf-LW+zFFkJ{AJ-F%;hYbQ(o*)#BO#4b7BN@tL!bGSy#vXser74MwM zYLrb@{Ak+7(1^t+&i4ZT_7ze7`zFd=^fxmWLHVeN zg-DunCYwksfoZ92)J>$`(Z~ubK4KAbWougPN&=z28gt|k#}cde4~tuZJ90?@xmhBR z&G{X24)}@Q$7g%%>u;&;6fTiY-L#hRJrOawxvXFP(eCP~#DRwX{tpv!bU3tT&HBSJ z3p8!OKedFl`4LHRdeL_g)J+oysWhWHgxHPoWv-z0#9U9U2z*l zo?kdaRLAd7*|@t_hhmnO*h9-B_IaNN)XqB=eFs~2a)%u~5Rh0yWEs0x3W-pvM-Vw_ z(4r^#j@4KYISulV9G~KyxT8j_GF$b;SYto9^rr zvG6|VMX=9H$bsXR{KxeXhLIifz4LMs;N|Y^I9$jP`9{WVn6h0v#aC^qe}hO8>Jrob zFt%fYM&W$@)IBeB@nL7ta`UrCo7WVoGsy}65ySo`jB1Z(xRv8RYp*zjX`dEyWGVe+ z$h1++seIt$>8Ts>;DfUSL+h34>z6ridT%2J%xGK4y?R&K681xQjr5ZU&z*@9xQ^_P zkFWwY@R{q-%b`F1k=WY0CI0*H%jSaP4A2G4XYsD=V3_L=Ka{M+(~~Q=)TO0!kH!;D z^nPUf#o0`}M7)?s$Wn*#e7$7Ud4v$%=Ck=U{Wz@b3sSW&)Xd5vLFdU~_%7rGa#8>(=dH$B$u{`@#9;aLz0F#PfQ?X*NaM$OdB1OpC_j z4ZNZPg=4x+3ad`7d_?Fx-FBh$2j!is6CQU?eBnLETECQU2lBYLj4@yk_X28yEFXG0 zQ5t#R(hf{3@=iauboKn5F+hm=feR#AHZp|V;rrVZx_K7==-x{-vqR2esg1W_psqE? ze603Ao%$PsQt6;Hl<0Ve?iX@>VD~%E61iuk2%o_fv58w~iC1}IZ>jcvpKGgR%~y)M zCLR|1ulIKp!-gEd_|)uzAEsOT{oRF|5R!2XtMz_%*Hb>UFOcgIp1lAOrH{0FA_qmY zuG=+NcACR;y~bpDJFVg@(b#^V8aglsgWPL)B-k_W+y{gx#c@pW`-bC{wyoa4ub?oy z-Db^CyLDsCmC)y~GHijOHq1k|Zmhmi-IJ6mU5E?3LQ&;2$guecso2YBQQ zke8PobldzZriXSInlkf!b;fmo1I)g3hw!+ylshoAtI+((^jLXmYY|B*q=z`SR(OqL z;gEkzoMIpX4C&PNtKVP$*fLZns^Y#~9+74r#zrirRPu+bm`9L_#Q%ZYc!6>`GHnji zAB)^6anHmq(`f3hb;@Re3;zaHH?@bIHvy<>m+RhGr<6_cDd|U}5`*`#+xt`2_Qb$)NJe{1^fx_syKm1Ei%|4C2r{$6$TQWOlg*25p5K9odBc2kj z4tZx|E|_M2!o={Hjmi8PF;@?X^8fi%jhSxt!uP%6=7H zRuX3ZQGEn>V%D|eZ84kLKUL20C=b!CKvRzZ1_D#ceMz+`ug=RDc)C|A75ySAp@HBD z9l@Tc>dBm~NTTVnn@D1+$T1l>WhjMoUsFm?QZyhkQXKPos(sLxGPXXbJ3uk~)#IBq zF{t#NbGd`S38Rgsmtv{5bl1ZYBV^k|v6#u~`NxuAoRVf6W#TLarrA|La3YpOx40_cRn* z2va4Gu31}--mTE6+uER@Ur7mmDf7oJw-E%y`udK&6j60tS^i<}L zE8`Vz*3O$%k9U|=j{Fnwk9mw1gZtU@;k%$m%(EB{?<1t8Ki*qNM{VmrJppHIArCGS zAA_$eNa|YKKSh#S?yo&Au^jg=d~T6@d93@jk5OG%mW2KD`f|3WQ){rv=+)y@=nNKH zC8+!Cl3?iXr0Y@QuXE}ZRMQ3Dw^WPBJG?!c+JZ%60`)&8$H>Ya6Kr*eW&g%n%eY;;i??e-(tu+?wdQ*6y z$;h@H951HAIXll(Rbp@?<`o5kcN`hnf0Gma9rQ98RXCeLb>_8OJ#xe^{dC`a1i9_p zTEK`>3?3q6`o8|-CE&ebI|wwcm;PG``~~8sc;Jg-7T##gF!FC2I4ANg>vUE8GDOVX zcJXFT=_<&0^ssw4TQ|3MFHYpvL`}Uc_*J_4XZM@7Ce@|RipH+J`lmwiNr`UX#EqJ( zSF3uD3gR}9P4XPr_VQTd^AXqUpJm74!%O#|rM1IOkw3dsVZz=;b6ys9A^BpP_RrYh zT|Rmwbrp#{z4tw~mQx(ir~T8Y6>Ec&kU#xB|Di2QMBEw;)N3H!N0AzMb~1w0Zx6iT zXZPsM15bA06qDc{oh{;M%r}i6Nn0#U`^yfMmwl-9KH8Nh5>S1=9_J{>Ifxf%{$$c< z;cLRl)S+uk)bgf8yV{vXRP{|xT@TJQ4aA(B732Sr_Lf0$wp*WX;|U&U+>+q#?tw;v zySuwK1h?RUgb>`_-Q5C&;O-WJyEF|nGX1=J?>SR7HD}&a=Sx@hm#(h+zOJRe^lR z#G8+Fvb2dKa!g5FK`I+fiqH22>?0crd%xt**G2#h5De05B_!bdQlr0|ai>M1G#L;$ z-#z|&CH_m5pTn0oK-N;@*9=1E1vyKC7M^8>mSna70sqAglz{cceN62JdyVW9VM zYw(`5gjykmv*?{?%Sio>>&!8{?9=VL`>^fI{KxRtiK6Sy@9Je@#9H7k{ac0+F zS+CVj!yd+wKosL{Vr+=?J37Rfrd0#Z?cPP0EaN>{CW_bglt=tMSD#P@t`>Rev~Eqr z#Gf<&R=!fzOhFvx^M{kcZq2a2E}s^UBrj&!cCfdH>|8jIk2``DaMwPNQOM@lkV84+ ziH63&u2I4Uqhj>s%n%3Fsy}i}G%;hZo$nqR_QbhL<8Px%`>huA_|_{fZf0D{V2!s` zfG<137>hlx7Fm7B8T5l)lm_u~B)tPl4Omd>?QGJYbjE$g!Jme#Q72DnCDn2k2E~H_ ze-RK?EPxY{)HshtrRD!J%v*e#4^KRLxeQ#oTOL*ePhy@~odc$t9heb)*FR08gbNI) z*k?RD&kSoyO(xyUIC69(Wj~&p2$=3%+gof6ohSR0vFA^{qzsd{5YSP%8Ph=mZh4zJ z$4kvup{L$m#R@Dv^Jr@ilJjN4(A8(f$ZgHg{G{L#$+)%i#H~*ASplN0%q^L+(@+PN z>Vb=XBJ-Ozzu~9sIwvLVApc>uo|ei^4Y8Xne5O0Li}_7Vcx7qBlM*T@ngUL98pCu_ z-@i0b=y;xp19n5d6SNN-B$T%mH;%f>p_Bp$auGF6>j_fjb8U@|MFMW?yGx~&&;q5+ zkDjCC#Je>Av>2NXUFKknT9p8RgF>Xn57u`6kP^6vCJ3VNIHUrX%6;A#{u`YC#!m3r zMqT`zz?z~=mM!IT@rN23K5tkFH3O+#Hft6A%SM>@4u3ZLA%XWNcdY_>&qy7G>*@QH zCwMr|Tbs1m$;hiHTRSHBkozwaT9%B2}X$>`x*96%=Wuw00L6z>oC2WCIF zhuMdeA76x`+P~TAhfMFk1Vpag53v8rM)MlU*lcPLijgS{Ox_qqzR(AN{IW^E6SVA0 zPNSa6dRGa=Y`}NEPOcN`X#3Sa(g?puRh{LmLY?jdbTv@ z^6iF6c||Q9VO=)^_zew1SPANt`-|ti66Dr=Wk}jB#mim=FX=U<%(Gg~Y6qW)$y=ZO zhEpSFbWIh)@;{~WLBoQsAoEHjVo9r{$T-ul3vU!e+`EJ-SVucNOAR7@aWkIBpL(fo z%(fkCYTz}O2|jOM-Mbw$h7t{1KNHPvheME`nBwFC#GPnHJX?*hiZk29_DysRC^S=f&F_N6_%b;$}OYGv#jg%r58 zNLen}iC^g-=@6M5^F;QSwiAqt+MJ+%Jg1s#YLxzX4{reX0Z4Sc?y5KUDD*fv#|ygs z61Hfu$mGyWjt1k$5kPg=!Tqi$GQD7QyGjoP8-vAnQ<{|*;{3e)gk`9onVMpv8#OS2 zSVxN(cMVZeW&tpfL;r&Q&*2RFwKm5}k!DReNBn3&C*i;AM~cZRFrcCb`5gLw+yc_| zVdj2(Cdi|74#sw7I9t*%TA;zX)A)-L(_fv37!xD5B?uzf6 z*1=c&<79J7GK)n7WryoTL5~*&CJAp#mrG!D6|hoWMLvcYBIkDH2dcV>j!*YUQx0`e zP9>&?Z!Am?x2nVw=F;m@(*1LuaN*lv26z<$_PwkLK5??EyCQrDshHQyhQ&IWuqLM< zG7UT!#1+^!tg=au#MQgZ~twxHu@*CCi zzZ$N1`j?e$wA$oXnEgZDg;jrczzXXEWlg}#r)LB|h|A#YsWf*c=s)(g0hoH5Tl#Dq zb$aC+-E}gKE_X&tt&>A5H%dV`q;=Ee!%Zx}+g~>wfibE?z!2ldz&iW_Z)%*`X=lLY zRrk3BNpedu^372Of&xn{{c@TzblqFqR>dhKr>()MacBCU+Jw91BLai+HylUBy~B0q zZPj~BslZrI(bEFLt@;r)n%6NOc+N_oSGNH*X}^mi<{80>S-gRzJ>cUNB42RiHfcI# zhwO6g{%XpslouHT2t)(&=z|fwjaMn;{~rUU8dIv_bit0VwLb(HF8@AKr-Sg-a8bvQE-$a zskd_Cr8HPes6MIE@ln{uRH81CyFCGofQ2#I6s&VLG`GobF?JB zpP-S$DOUxM$1s2kUbhR*9RfAhW+F%p4_yPwn&TkAm7h`Yu89tH)~{cxF6s{pG?u$T z54&w>Fw1odsUxMNr0R8OLJEaP%t_vg1N)l=&LGf^iX! z_RQ#QbVKWNACh&I%(d0ES#l;qdIwwzu|&~*vC=}AsOsku^=19o11OU zgV*6wN60*%44&fzT28kF-qQCf85t9pNZB2Humr_`vy+Cer!Wrhe{W&1c=^KD}65 zY=!xw$Rqzh>hG2t!_UPyJVc*%Tcg;~)_ju$xmXEom+mFf0v?tGYoKr#{?DaV-YT9V zXU}1ajlxwpOv$K_$vJ3TwsV*7@dhaG@@Xi;OIGNnuL30#fYp~amo02`K?>;Wtv~OG zeHhvvTkyJD8n27%`BWkXFH@9hV^7`i5~czGO94kqvB*e^LF;}aZ&e9&?9h<|l+m!P z2@T65&bxnBXv>RR_rP9|?GWC`F?T=9();Fs8#}hRAFN#wIEcH?(Upvz!8HM!QO0lB zraZVZ+1|LekdD$c!bLGZLmKD!Az*W?Rkm564Hk6%k$b=cojT~YW)5+ zVtcjJ+wNuA4FJn5c`)Cn;ERa0NUHkBT`}dEf-d`g7f+Cr5a@OIL%J z=5U*e4+x3>NfqES`MLB~@@X$Pzgn0o_kCyQ=N%E2bYtHi#kS%jV;|b}ufHdISO~N# zHsL_lcMur^5c*Lh?JAM>N&VT5=MJejC*Ej`hH~gF*^ICl-c6$1mV<#H{L`AJ*0&TK z{lR66frI;EA`=N}X6i(aycmSf0}5`eXqn~v&{D?Nw_O6R1?+`gkB~hpK9(q=(ygQL zS3(m6NFcX~bFuqlO2dd7h@1r#%_c-wf?`Uz~fKT&w+WLn+RhP6igq9?= zA;*hb5Jg7S#-}E#*&S>ChEMG-Np2xci*(IO!atkPCJtX&CK7kromk-*3}Xij_tAvp zC;5vj5i*b>i=ZVx^>%b4A;0$4A_L?nf}Ec_RPn@cx`L2Dm(*AhMa!dsz@n`Q%kT=4 zb=)2~j9;mR9KGVfC?jlh*|rzDjDrMf!fr;`FQ`p%FjDnontZahC>aUb4gS4ltw_tT zQhzRwzIb?!kL8c^)fV2K2_ZsEhl&hki00yWeS6m{({{?WS#ZQ;|89@NdwAsJ>-iES zB>!=v9_GjtF&{{Dccg*~tUBM=5cpuQuWETg+)0arxV5|e2r56D&Hc)Zum7Smw6Hb&*;{>>xib| zlNdw%-Z18KhozNkD+4BoKg|kD9IVI0+^=i>c)ZXzafcZGr9c>cEeXG zNKB;^BMyg8R4gqDt-F07_#(5>I>gmvBR$t{R`1BcBV<^x)d^CRH406~#u1tvoOy9F zaF1y0VSCa3dMY(pM1`)@WWP^zoRif9r^-5vxqp7GdfF&w9C=X~Wc0W9l=6`eVr{I!Tz2tOM|7#4ffR2vCP>jiL2YB@zg`N%rF2W%N zE!%|fZr0qcP~*U$ku{;pKafDHb5*vFHdkS^;)#7=U6u1%m0k2#u7Cp+acnPdZJ%)|El3dxKmRbUyM+1S*l(0FH+-aU@9bxh3w!Z_LX16k#?>G~c&L zHpi0Z7lK7P7%?AciMD>1XVCSE`i-HjXZ5n)PoF;yN-c z1(Vxu2P)sS3MMFF-p%z12^m9QpRc8C+;na@UYkVT5l%(hQymQy^%!MIIGIwo#te>1 z@J(&rQcqsSzX{eslEYc=SmS+5uDk?R8a8XY*3WvT9Jt*9Ns{ex zU*ZHz7hWLdu(G4X{25H+qn830g4_z2U&-wlqN&aWDu)f18l_M?OuS1(j>&g|w!RJ& zH+!}W&IeFzXX|gKzl>wBK2$&CLq($;1cUQkdq7)IWaC!MK$WeIj?*cqEkwlCh^edD zPVNQH2F}5`s|!7x(jh2mb<-BQ1Jb5lm9NRqY-&|8|IKrV{>O6?fo<3c5j=>bn0Qh` z=R_Mo0hL^>Q5m|hs=#k=Sb+EB*FF#KBkk4f#cs;Q>SG@D5u^)t($b(Aoh~AqfJJqJ zTcljZU^B9AB4@fb@oo!#z&{DYS;z=n*u-e7Lj0X7PyX3O>+Q<24&!xuNOuv| zViu*8=w_K07L1HkZL5*#x%daD#wh5kf@Z zYVCg6j4NR4Py{Q%_piJB8VspsJf>O0p@V~kFrNMCN1=}LxecuW88AFv@&MCn7ZGZ@ z)vp_^rqkAJ{rmAgr+d63CD6YfN-(yIRYEDiWq6p6@Hl%f(WOl-OJTT;fpSW zqS|nL56#o$ee-!dp}u=R2Ci@O^T#4sa=Mf&Lil9ry;^W4=zY~I&h40Qu%T*b&xe+k`@rs1F@K16O=krNd%dak?0Ihg24|>5z$STPY z*G#@4MR#_?Lx~eSW1mif-jKFTYiCWPwl|@^cTWt>H$whr7|vtQau zMAy+y7055R4s<~BS>E#vU`D;AzrO~t%AlWRM=;qqNsso+rNaWg2P`?&jvKxwc!qN)WNqwVj^>|#qM!7L0W$fg>NMH@{Gd@ z(pTDLH4M3}yAgw#+cz(fP&~}T4FM$LlNh?32sm>;!0YajYC@wPiLUpd!2*tLP z{L^PE+&*76+`q-{V{|*{lLutywB!`i*$C{yS1uME@oIe#%*P$S?;CgIgk?;0Gs4yQ+ov&0k+f=h)kVrFx#}7s7WdR7^`@jqB_W6OySb&{L7e^gRIXc+yjh!Z~%2OH(d-~k139rftkBQ=J zqiAN1uI*quwcG7;#zmDEcy@$t5oV4b`k*h;>jfl8nWQaT|NJmnMgVD`?(*G$JrFy_ z^nXi*v+xymYY-y0rodA`cHGewQFEA?Rjs?HDm_5%tQ>>UiV#E_ASaso%TH#{t&k(qrP)QkzA5_KcT2g+yf=_kZ0an zCGW)RAaIjfkcJuKzf`jDlW80BGL&qkeqHufpsW{kOZm z){rM(2P2A7FQGXP)1Usqz5_bhl|4m6##>-BK@sR!-0f=dhsZYR9?TcJ6DTf|7J7_~ zIqQt?6#<@I+D?-ZHV~A4ZfO6rg34}g-y4Nvnt3k4@dX3#W!Q#h720WF=(V=~e1c4_ zW%OTy?nySrtx|uyA8TBaD@-VhZ(h7^v=*Wh944nvjkXE+sbEOa&Oy0pd@${tl2p~K zFUH2>b#G?B8n9nbV0cwmygtLQiX7@0bIKIOU@x-}+@bS*qW7eE`BHp;!7lF!(3KvWzGkp~8#VH5yaFdG7te!7a|tX=Fv;H?fjh&S|kpu^n@} zCA*;2FE`}{_U8BF8=kb(l6*E2lAGIpK{_M(29?TLp&66IiT2WEixx*GZMJH$AtF*= zHkEQH1MF3+A7|~YL}%-FLLz@vQh29?fE_HK5^;WAJYd-a%h`_5qxl$FNFB5#8d=Nl zLN*fBqJU=wsF+m)I#fdt2fW-;V`Tv1Jf1ugaXR_fuhRb9v{vKuLw&N9<05b$i``~m z9m~?S#NiCs%cVq5*XG!x^*(m&dVnQ+9|2PfcP?CN-EC0`xI?$w`5~V%TpDem=6jIQ zvxVN%r10+CBujDtPI}cMx*!-=M-`YbZnr9^2RjIM{N$*51kMm5z;j~acz^%V;QZwm z>puGk`0v3#loJD`G$M_snea(__NZ88@6>vp-y3*QucMuZ)%;H#*-(}Q{i+%;7xS$Y z|8GIr{=Qe$Z=H!%HL29!^&656R2FsZLhyxPT|En)VSYX!V##vI-x##cde+x53l(8h z>NRLod+&HT$!{GM2b)P3wZT2^77|(LTb+1><9=Oz0?w`CulV^PA`7!VmHp;uduE&+ z8$v{&&M5^KTeGP9$`Hex$xQn#iQib!~%biZCUzKgf>?e5mV&w4d3 zE=d^v$qCKtGBrXZ`YA$0Q;pXOSc&k(Y#D)1#);`b#=+|ZZg!k69=|YnKA&d2lm&fa z+%ziDY@sTWQ0tZUm}sAKAlX;TRB-|r9|+xNh!M|CU|?_WHC-;AFJ!O`ZYOPV&D!X% zU*856@Gxf@$QJ@Xp~~$G-Ju|4sGAmNm=miq1#Q?LZFzIl=_b^TGf)VoN3In10XPgdE)O0#Wd|8X+=BjgMeIuu<43A1`-iJ-G}K z6u>s)J9cXV-8{3G8V(yip33UMd#skwY&ZI4IaROn&JVtu-h?OZf z(nAouoYx9rK2?0Yv^x4+<;)o8bl-TDZcG^x`9&c9@;itlblBN`fSW5SMJYz~X-W{k zqZqR~ZT4RXk^~B$N(c!;OgXYEx|7(Mth%_Lo4h2KsEYg_svH7WTlIXb>3txT@uZD^-eF4dk8MubSn2Ie=Hv+wro8@A?RQk9 zY40=vV)XmA>X+Q7>?k?VC4kj=pE#)yYH?iCF_M?95}L`XBZ8@0=6kP`+o(Dw{Oo{2 zQ@xCjwo7NRFSp}>gQ!ZQB#EY0o8`Q;>ljf7P7;ARiBceqy-s|<0{Bn3n78tn!S(!; zV^TUfqC1%TcYfk=w~sXFp^XfSpsy^oEOIciQVs_Cn| zt6QDnGeQp8o~&zw0{${&r1LeBKJ)>vkyUL8T}#Da?k`)Ua4D?#e0WAB)tjGT>@V}| z@sw2m=eB6Yt041vIpr6npI5mXJa5AK*pMIa_WhCP!=I;s8R%ow;W7EC6p+RN3z-6h zA)JS&3e>tR+VJU53FyGaAw{cxNtSxISuO5Egg3bhk zz8$QsbEM-0*nZ_1kuxL0%yhNR0jG7p%jppq^cMg)wc4JbicmgJNM=}*4e7s9$ibGy zm8h#eexLlSq(R(hcqsm{3>9=4j!ItwM^uY(UQbe@;rAap3`{D|$5z%+ebuO|a&<^A zAngeGF`F@`go9JD&M}-KWYzkOVGpTS|0Qgvvm8LPjeCUJ%MLIdL{!!q`d|nxA@YqN zRV}IBhP@mRm?6hU_{6&RG+^w7hvXmd2Y)P_Qx@^h5*!L6qB3J^2oCaPe@O*ox>29^ z`{ofIw#4E~ssbCXaz-#R^;@UEPX6VW5>IqpKMjOI(TA_^CJR?p3yPOA!4H#b?_J1; zC)>|E*>222b)u=cl@T(z4Y21LDUWcYCi&FOeDn78kwVt1#ji#SxnzCK1g3c_X6tZB zr}$=$Wcd9TX?>P@5tIsJ)gXJjJ|fGoZJ?`oxtwe4`?tR|Kr{G zGu5bX-ctQ!^h_>W?UL-Y-d$kBJ{~wjO_oK1*e$%d_E8aAWC?i-Gk5qYA@VdLjA4V} z=d(f;Db5L6Ux9kAZ2QfhHVIs>~>FcX1~ywK$~Bf#91#o}Je zwcFAwjA$|DqOH$%D?1!rZe6jI9>RCIYOOd3cghE`DBQI3O%+wJT*}pUS~##m#V*Th z-~PvUo@}v>&Ht-~NZZw$dH=Gu2zQu)qrV5`(Lfn-nLGe%z-|iFIiE&Hq!h5sy0>OA zs(^Kw1t=YJ%y2Fsb0z)B`>vZFZ`1|Sx89tMlxuoATXB9{^9TmTQv--SdEN26N6*>o}JsQ+Sp#x9C&89eI zI{WN~i7NpiRF8UHd#I%j#xyfKo^+s#PU{xdl0{rstwFaiBE;|LCrHvP{~*^LEQWAR zk)=idXMjM*CZ{U~dB823(zKt)0jK?HtrXlxeV3fBJ*S6#--0*YrU==}^}w(}7kL4Y zMW^N#h}RH0NZ7shLJ_i~0e#agoTpc$+#>m@Ea;Gbe)^MD8Jk=Yj3_SEZ9sR{T<9ZQC5EzF6cUxm^uP|B;GhkTdad8}~z-v0#n#&?TMMM%}l9xd-A`rzi zlI;1G`)dl_&I7E+|A6e;H>aQ*oLm+IllsI7P&@S6HpZ$RIf-3uEeSm@9XW${q`(h$ zLa{G*UAyv}Y1IGrVv#7Iu=tAss3D{eNOx9t5e3 zxuw&2-DM9nUlS#h@pBiNdPu7AEc@Sh z)3y)v6+`Od^Fzb{x1(4!i+;q%v<*kj@}do%4Ve_{r|f7LsO{!-4rD+rfK=Z`r6Sq?X?YZyhl zV$Ny=x!=V54cndW^Tdd!E9fX(W>7~Bn$}kJikP=JzNjvxd)ecm@%n_LL7sv!=Ps5e zr}a1CeZ-lUcKtb-ah_hHG2~56+_Z2v-N5{WK%#S*nk6%zmQP!0ZU47kRFX(9Ka@@Q(}XExDK%pDp4b6jOKRf@|X|>8A)|%F3OZS+qJo`2Wt3M;&g4MZ!pk@qI#BdFU z-;G}vc<1K&8lMp`d9TrT%yJoKMhzr*3eIvb`;7IjbI|_QiN9z*7*CyquB61DrDg_h zIM=T$SG7~3fa+M=&A-^lHe8*Yl36vXMNzc;RI&$Gp)iF5B-&^X<-`yY>oU2QgI z!%XdQh_B+w--+8eG`J~fyb9wtDzuXdIyAtBnV+G(FaUVhdoM$N)P0(Q|DU=Ja#hVp z_(gU?*lTnRUG-YB!3#Dnn=I@QdH525Cc9!vJSWQ4`W;7IDM{&LEifI21uTvF1ey;NvW;SV12aFjiN8MVG&yyv#Fgr}|Ja?!NBS5qbYCHLMw!?+9@zLeUa-u51 z`USTwAEG#@YKhi^-t zg1-eU8mjV2U}SZ~_ehZPI*eEXN}B4aLxbNv%wMlUF*5C1EPH8*a!bh!kXk(#O%6Z7 z2j@x9Obw`V;Ha~|#{6)qQTNKb`0+NXt_2$o0NS5tvTiLW4THi!BZsa^l8>aSjSkuc zV;Qr=?Z#FdyUl+5zXsSRl3QM&la?Ft9KvJb9TgR$9)INuTs22YiQ^ll8*dllL@1~M}14vze&Y82!XOKXwlGrXr+eA zo^m2@NLo@2VAnADSR_=%#?t-X2AfhJ(GyJlDyEt0QgUbMXn_&clMl}IwW~Z$2v@Z`KAWHJi6@Fz9B>~tf4$tY;Sv!4WM;Y#eSS8S7XfEv{r&cDAmYCu;{V8b{BK_j zv_eQ;$2e<&+T7s-%<9G!+7I{~jdZMq=^EHEDsW0G5> zwg{?K#`&NNwPXJFbi)rPJm7ygm(lgI@(y$rN%ajJ*MVYaGYV1cWye0Xw86>`(D^-N zD%+H`hvtzbw}^O+G{#d(+HL$Wxl zAj*dZ^%_7~dlp@N{ia*P?q}#pJ}Rkx?h6Y|49|_!vCM9Ghu;?qdls?;J;rJ z**nSU&>48TtxJ<1pGi^kRN6@H%2kYB^@VCF3_o5SDd?pgj!bRn*p*AD-MnUO&^LK< z4ro(mb!2`ek>910nc*$Yq~2q9sYYj&ssz<4OlL&3+l1~<+LCs4e5i23QUwypOYy(x zBJ`|E!Tu5|?=|PIH%o1>%4J1sS`AXuQ$r1%GdI(37k;29-#kC>827*@mh;<$!0jKH zSPCcD2!ic2jc#4qPb+Mwv*qNnznG3B?c$CVmAB`$q}j2)+A z{kXvgTX2ESAIUKI2=lP7M{G!b>9-ffz{(Sw89wb3ZoBqXA zfLg~Fia&ZnYcw|M?Y_OW4HKQZN2IGK*1l4~o&}>W32^+nNDpO^VYPN`Y=A4Fn9okT zxybD@K`eNhEt7(R%Wm3%V5s4#(lY8a>-u>#Knray+Uss(GmR^FPzgrPF3_B-cAk_| zPWk~^arvp)CBp*8x#%@YB5Nrbs*p35jz$hZOBpcZ9JqVgUBvhNNU_1WoAHK=Q%#k} zadW;tgMp8tb(R#ohTQPlVhR+iUsxxPF-$zs`5|T+a!a>M^aA}texG(e+Fdhx?4U7e z@M;db?whc4$#q=Z(^sKjkubB-{s@MKAW1pkE-GGlhOH3oNP;BEO6|u5kw+ChXibLI z*7EN6=_IQk9a8ANRdjYJlH4PenOAM_JQn83$WVS-!YLIggfr`qL;NG#CQ_>=%9Gh} z1^47aJClwDg4{-5v57zE)x>}{M})pJsXFLe>Vl&<)4akfy?;hW zF`UUB*U|Z#a5wU(GkxPO6O(~F`f2L1eo=mLWb*=kAkRM@_B=ZjJ9Es)U<;cK1^9HN+?7`2-HZrovrTKy71qa&Ih@Yvc&>0X@W)E#vhZpZ}B zavNd->A2HZI_7M+Do@*}remGKc+GEKpeBXq?KLa%*k%8l$^4Hm{$CD#?L5I-hW2@H z1mS-qleFPQ%iw%%v0iUhv>tBYw?=S(MA4U&{v(x0E`eD10MXdeqbOZbhik{l{pm6{ zdMQ?Dd~i5EjuEvIy7&iiehwUt_YyPis+`GA0rPfrync0F`q)C_E%B|@E$2t<9Tk=L zZjP&K!G5fb%bzZVnL_RVnK|vw@@P)~B?v?NG8;kFS$7h+oE{oFZCGTe(#SR$J)XY) zw!y~X!G{$ZKB3IaaSyA|mRpaP{XJsxm>rsH;hay1j)m<+)=d7Ee&`Kx`z%0Y-F-}| zRqD%X56e(hMk%9Cu}X8c$U*yz3`ZqLiu^4rK?xa? zCf|?8us%xhypLF8^(@N0ZjDD7r4Ao=8x14Vb<8%1Cs=Mi`51QX)F#ZLO}~W3Vu3^H zeXqV_=JcZ+r(|fUGo{pG$8iMP2?$6`+xSy!Sk_DzkR7;Xj?TEyG^v%_q+lfF2l?DA zvstN2k#5~R1Cb@wALB#f`^ks&>*p%z!(n-2w4H_vR|_wmL-Ijp7J=BBBv;rmL9qXf z0&`ChO5f#5ZP$tjeI#wuo?Bnar1X0{ryo(>BZ(9nn)83A^@P*;Rc3ryis$=yQslLp z{L6lu<&3BU9LkynzW%rTa8o7RI_hr-pOC@X5{#fXLw<4+wER8jtbAV&2ZfywRvvl&J`S8b4W|eQZ1>(C%B%{q` zr2k}2%%RMqO-)7n)H@u88tbKVAx&^A0k#D%qO~mkyvkV5Pba6Unv~DkGm(9`rvu}! zOB#WwQC#O1&BD(p{To%Dn?2()A*%*OResHL!{+KHRNRohzk2aFQr_gJOLi?vKlE_t zSX->lIB=r&`qbVQ21_@juM1%8Pqk*_@KyfUo}c{Hd;OA-{j$b*32~q;aAme_spRt7 z5=+9Ew*q2o)dCIeKSoa*>i=!pKY3dt;u+6Z{xWC8BV5^C@Cz;>{h%{!H9vq=mxpyMCJX`_gGftx;XF0tGa%!iriSF>gik&$!h8s}n-_PZLwq^gTK*Fe}R5FybHA zl-sL_3_1;btstA@za2Q)^v3x+mhh=dh0c$n*$RoyjUJ% zq061Nk}$9HS@kOg1e5A<^|PD?<)?V-Qqg)t{O)_HmS;!(uvSSMZAbFp6*zI(^M~g~ zO!5QWH|M|w5^*ZDtp(rTKHeJNlmkFqR)(OypRBr5EKa9tNJ{OHxhi!&N4a0;FO45o zp#E_zzK6zKL5re=e$VgiPEV3;EgE>5n&lhwMv#=QQ4`S(NoPNT@j+p_uVX$)8GXP& zO>rhbie_3)BcYh?fp?6sOJ{AqeyQ}6{8N#gn3psuXMS4nU3am7ai0JYzR|%oJIydK zbpgJ3_jg?SyW*rctzI7H)zjto^a1AHc8*p2GnqpFxEYd1dKMl7(1% zEQ_yCWXK1PBbsyyRaMRX*z_1|C-NjBs-wx#Tq}$tg50XQw+#B`@KvY(|6bJ6zeo6x z7MitGJu#@vXU)M2ZoKWLb(Sz4g+I{-{$;zILC^Os56wlynuh4gGQ#72RP5uhOw-;u zcv~vrxoGoh%W4z7cp165>i8#y)Vd}@ANG+Kou?l&iuts0qEJIKmB)qvk(wOR>BIBG zZp>;0fnWiA{P&{~2y39sb-nXmX=Xa_hT)eIzvo6fq~Pk9S?BkGt+tX3!P(fmfDXiD zbTeyNFhbL*<3VLg@Nmcw|LvN#9>pO5p32eO-`8BAk);#JVW4j^}^Ue%4TDV17ifS0dx!!HF_o zRc9E;X^$_CXmGz+UWEsKIW-ElWb)y1{B*P>tKmbl1tfY;KXWk;Z~MVbQhqCjuEM++ zbH9TArPnl?%TpR>pS99dEC*W?t=YBL%-U`pl{Sc}5| zogQp9k2cdKqb-k|U|Q&z`pr`K-!^!`=!y~uq51f@ak22*#z75CJMU_{(q84ybY`T|0&qhn>tX^V?C zy{c*E&Q3hc*K)Zk1Gt7sf4N0kzLA|iAx57*Wh6NjC7tyCGlpFkpI*%Xv4k4M*TW#r zEawF?IHItUkw3Fmx+CN`btrT3b)_c?FP8+2FwQg;-SY)8D~;y)>MU2jH%DJZO@%~j zXVNn4nRiiMrXz|gxYEu_V)nsApZP5Cgk10@OPKhUBhm|q2U_uw$ae*v!Wlk@3J!uY z4-OhUH`PxKl_)5JKfta(t{}0dd z4qGFOj3jgQh~?>bhdM;la34)y)KxVvXTuJK z*wk+o3E0LNJ<&h$mbltzo2drpEhE0Qmz=K4>(3R!QOa;$I~G1rf!5|t{F0&^c7o4MT1k&?V`F=X}w}VeRSeiwHUB*)bzUTMx}x#?{E)}V1~Ql z&VL~O&V-*HDYN~)Jcw1_qxm6jLd(2SxSa8b_cWj@G!LLf{W19u&i#R?7!G!hoI_1~ zzMen~BJq6Bm-|uSn7v^F?D=~VT4`>5s$fRM<*C{9mB7*pWz_w}QDs@LQG}Ayw`$=U z9!G5H%Rjz1I>(acrk^Sf^6oxrgmKMea53)l_H>Qa)F|<*H3a-ivB%=bC+f|YUN@3V zqv=NPx7wN+N&iby@^4W2|NN}ynI%*keaBmo;c*kUREQG-5T%yy_)9Er5ri^N80(wu z*M9%ypQZ=?gz5+ks&T?qT8^fNEtYUMxjf?JuDjW!x-z#+RQ7kfRbQ#Mj-c!)Qgc9Y z^T?jC>Zu)luu73Gu7F-Zp*4IWXOo7t3jHIhnc8}~ALUsRne!TrAd0Y&?i>oJKnTe? zleH~sYP^<{}Z+p0hb6R)GDn0iMo@+ zl&^!~KG~JgvfZZwH?j!$Ke&6#pgOxP4KNT0k^mtmkbdtv}EI3wV`eM$2av=`BSuYUq7ug6eDL)i^aT{5*~EQ_13G@ z`O3mh-LZ2LjkscrvFvuXBiL}049b30=q=K*xgn?kD+5e8{V;w@N?h$;EXzeQ#i_gt z{YHi1r$9E==TnfTUP;(&@UNRHnXEbeh9}Ld_wi7= zw>&4u6v3M_^tTF}Co}YxQ1Hs!&h76P1ch3%q>e+IADl@rP9}w!y zD=+sHY2QB^t*&a(O+Szk&k@Rg#*l*kCXKyr;8kBo_T056-wbu#VzY#zCZ-ML1IyRu zVM!6AE$8l6y9)UNro3MA&jU6Kc7Il)>5~g6j7j`N%ywrC&lcxK0eT8HZ(;G0Sv_6g z6w)@H3s-T}k}&gA(FQv86BN)#kK$`dbKR!q$IF!kNqzdDh&xgNAD)Xl-JRzY=aYlu zGNWRIyeBj6hy=7S-f#zInj+Y8{K5W{f&H6~>>)b(0m7htD*0$>IMl?RP88UnKXaa4 zAc!-$eM5fFLo@>1AhtPT9<3_#fsjSxr^U(1StyC!KFHkInSmON=yQ-5)^7!j-W7|O z)(U7r!}L_!Aj67(4kTiR3Vkupx|unL`8=5TzWFT?QVTnnFGW&z3shqzD2;RRIo`j3 zCKC46NlNEy`KyB*#K3&rFsWKOTJdl2QL>w9!ne~Aziv)-)dBU3f}{(q|i`8b~J&dfGVXZ(v!l!J@e(O@fVFxF_#d%jU_wcwl+;79t~E4-Wwd zixZuIENav`=QoA1Ehoa$HvoaXd zIO*R`I$ZdFxt?RaecE$VFM%{!acr6j2i@S5A?)vifWmfRfD++?qU(eX00}1C zQK}ySHyDP2{gJ%X(m+B*|0?YBpffC8TpCQx>=J6sLHP@_&p&?dSy757D{vehn*^)} z2i}t13kx*{2sdKXD%Je-6>|SxsKfWF-`-1JYs4o@Kn4W*@f|;|cM&Jo#Wu9&5c1wK zbUxQiA?BT=eQ>D}IrW-!+~TS5)fX8sKivu2N4MPG2-t9Dh!HE+j^R1Y*i&*tDk> zUGFIfFf!UG6jsa2)tMM)H~r--kRu@7H6ve<)(+n;UrIiwNr7Z}{-e>*w`OmgxC%cx z*VeX##S%g~URxM+L5(>k?#Mv9Gno^O{~b6o&UnO}b^2wR%&3q)uRKfRc||^xMiS(0 zOfL)3kE%v*x}WQ$PrB{lq>=hb#aoi>TjL;=D)r=rzeym#s{wvoPe=2F@HT-g>acle zX!Y(%(Nf;hq!b9>j`o5Jnw8IHeDXKHtkKPtc70KI(;QVS8WoS(jGs0b6Od%H#EC;P zf5TKQsnl*QEkToKwiUx!6k$SEyoQPo_exVuT#V5CXHh&?Fx0F4FiGi^@8lp9%lU62 z$>_-75zEN!(icU$qSekwusmO%3A;oCJ@LzvAlh-%aQ*>O+5>W+C`PMXsOuZLQn0$vP zg3F5Jo{*noZJ}JRq*U*km1Bd|L-^x|NnDS#+2_l5pgTN#Nh!q@*>TTVWz|Q!H!78A z?Pev=nW&a^2$80t`3Z1XbBgq!UszO(dAQ$XWmkZ*f{s0vZ8S@qKEwV;?{jEaBH-SE z4=oa~!jq%Q&+qi9QsQQ17|qm}Sj#M>r-K&k9-_6y!ZS^0kVz?V@)3uA-);(7VMW8+ z#H;sT8js#*#B48r{WisW5%?rZ1~SCld8at=?PJAHr1JQ9b3PujUHzP=vLn+bQwF@qgz ztYGrf)n7a=Zx%idXK=-B{$yn-5HOR~5w}P5+C;w_25$-~L8TOf><8V5^W3zde!hbh zUh`7GfMJ#S<>{r23(}ddW9v^o&tcNKaU4O}h-ayCV zJ;IaN_Xw*gs~4S1Zz9TOS@%1r-gN9y@wT7LiFnu`@xiLLzyZZWk*n1q@j&3Jdc~rRQH~0%qnOo zJN`vQ!w!#-d`;D_dp9PsA$z1xg45Shd4`-OgXl|)fA{h}2hn&l7piEh!;b1Z zf;a$iZ7{)9G;@U?!f9TK7-tVDeVPPsTvtDEnXMBQwOyR$$FDuW=`9+z`;$n3!qs9Nw$8V|4I|L($@Z5O7qAsE+g5^vG_aOQv3**f|9<`M( z#$Vl^Z-q}$N_sv#I`d{`HjUL4NO&RC# zc8By^@2CG*=v(w{6&r_epPcj0zpxdY9h3z}>wl!#=0>Omeh+$+&DP>J{_q(;w=jYV zwK!=vPb2Glt@_X0ljjDK&-gMzjBbsg(bhi2GUByPrvh7c%nmG>aR6 zsf9FuM1B721M`ES9nfj3a^OSaUk3uOl==Pm_b+1hPJTjz!8*c!=&t|Wcm8Y!=q{-} zVt_L1UZMHjyz{TJ2h^Ooi4nBm20#@PD|wu3)S^(~?#?JK9GnijCO<6#?V1^$Iy~yH zma`aeTl02K>ZP)kDC*>jrPcG9%mh;b%uU(jRB9RuDWw4M_TR1aUq$-gf6QAFJrke_ z;{N|FXc}&7iE^zMdGBzd8pVZa@QQM>Q2*;?B22(#BDVkKG7;h)swN~ZpnuKrl^Una zE)L_{faE_63wRw@6Q(m{lk0H};;xM>Vznh6B~@r!@j8S!DzYf(Mrrw1Jh9hOE3 zG8yJLW#%H%+y!#dSZRfAy!S{{36bVI#VHq_ZK`~2t+%dInr#s4!*7?fx6bZ_C|j4e zW%Jhe>AQ_(mp3%9=<)(kzxPA`H57G7EhRR(Wk*#4G$LiD*YE!rc82oJrVZ`SG<_M~ z^7~);*V6QRUxvBegXh8JEWdKLcH?6O$GI??-v7zhQ)aeXpkg&_BLNSIeibrB%ix<0 zqH{ihuyLIMA6(k^84_689PeP3$* z*>5SB!5{cH;Dx#W{$ofou7^!T2_KFE3ykA6$iZQEyf~cww{ES;0VavfdJ`O=E>`d~ z8+cod-Abnp%cEEqv(HI((%QE1w-)+OKV@iw5tZfI$sON_*DA+E2kRR)p8yBPHpW6d zj=8pvKwTRXrZ?#Wj`V%jw+n3imTMdLjftXsyb$VtKI8X(^F<7EdpONtr<}{26zTsO zYQ{30o`&(;k?ud-UDZ1(Mvl+L;d&!MZiueQf>j>|cP%db`qu9AIK3_$eydfz8;%Tw zdG=uA$}(eqv%s>sSr2ofzhKMt_af>)ol#%jJ4#88_Zwme-Pd6zv(Z$}7I{;p_NqOC zjJ0nEp{E`)huHLoYO)1IkjllOomzLRWXE|9<2Jf{1=)$QKh#5JI}tQVL7L;-HX?PY zyb3xQB@QhL2V0BDOtao%{RYGJ(e*7ByXVO5)?rEjcCIU*blas2WY)U5+4KMT?wH4T z>>YYA)TAvjMCLog0RdjG-nm~l>?Mqu&eA`owprs++^`}M-ov@1y`R7`(#043E%*E{ zfs7JXbd>+`PL?WUGi&hfo>-5A>Bp`z^7KW+9wwUEF9P5#F9rd|%?5#EZPZInjNruh zia(Yd12hSyj>Gsi3BfUNRpu7S3|577>5lt@h-DYc`A|~=M9G6LaHOs2z6&hv+9-$*7woKXKa(A?jN1N;ur* zo1oR?yWj5JWmrrJ7%50-BXqoSTX2T-uzb{PrTRU8`KK7V;KQ^XXN_Ajw^KRozjO@o zEYF;!(}#E*Cp(2=58R5>2?h-H4MFN;NNy&P^rmS8DqQg$uZogI7ajJWg5n>cf-^L% z>jS4Y?#8*#B`O#qDL&cP!h=UfZsBSHBanoQQxCw^>QC%BY3|Ls9ISCJtd%s+AO?YY z4u~VCYV4(p|3&w|<=ftLu%hLSzlvK0$9Wi%+Dt@O?VC3jKZ>T@Y%B(1)8}ik%brme zEV5aIDYZ_?unq+Rn#oYD3b&6PcNkz`1;J3Xj?0b z^p@Nck3Tko)`Z*a<}ABJx`+ZO z=DRV#6qrK)6hT43;sEfA?FSaRW!9ggW@zsy*q3j^Ml<;N0q|a1 zVBW8?9=>gB`+vclyf6TaL$EjXmkHDiXa6Z;7w{GZpb?nGvm$0@O>47qStzO$(g~| zC5oHxI#J13b^@AN89KgR08BRDoc&KzcxDD5HZ8wKco#=TaJ)v%pO#3J-zfpC9hIN* zITHYOUVpq_B`St${SKe;|8Sbkf5&MmvrS(r{5vKu`j$?cgUfn}hdN~HH3;=&?;{TD z*H09GEYpf78_53mY(TwI-{Ilz8VE@5{t=Kqe;Sef(7$UZn=YiC1^Y1>T#HJ&C>D>$C`jci&7-s&46vc}1Un z#_fW`f^FY9pWIf8wa017PWy8dyvsPft4_=xZ1WZqyS0}~t_47DKI5})+OOSNkC8iY zzZf|~;5ll4Zq|jwj(6!_;c;Fvv8#L8XuEjFzzuuQ_W0ni;*Y5c)xdi(LsHiEaHDkc z;o)YwE&tGMi+e{4a-%wD5QzUHCgX0MW&6Qg@{{RXX@~4S|C56IUEpXG*ZtXqYudEB zA^2?H+Oz4hVTj!c;_k$?m1_S(QHnP10@z#)fTTg3*YA_2dNB1;m(I(b@=~l@T^7zD zXZh@TUC7)P+I7JmNA>IqoPEE=etFLl9Yq#V-ZYH4)1?8^+%3jTCZnlm#}S@9WtE+X zEmj*T@DiwsoM+!ZxV8Yo+jSUxUvLIoZ5?b@gY<#lH~Zdi4?+qgHi=II+pW{Ou6cxa z%Wy4(_CF*j@(Or1-nWeDv~b>R=S&pvofj>wBlF)Koy|?R)?UuCufcNA;o0@SYYE50 zl;u7DxwK`lB&{YN8J+txoo}6LzCsm zy5NS~Ox5N4qVUz%LMri*LXfF9kA5rgGo-)}yulDDimh7<=g!Y~yxUrfA901;E}i*c zHtTEG-F@)m(K|0z1HVjSdi5>~G^Ig7*nHtGol|Qvb)1ETzH!_^lVUnV^Ho~Y<9cH) zr{Xz4?cl#IHfAVxe7GL-JU|kRY^D`n!nFy3XnWqDo7F8k;$G4|UPU}6@||U&fOpwu zQF3x}%u32?C1DV9WQWM|y( znM^2%;m3J`Rypb+9=E_#l=$2ZT_IAp2Th0dYr=fiVgxQmT-%wxC$eZB*76RW4&jBr z1*^Ua=o{@FujHI=jmg<>jh1y%qgsTid3^Y19eV3NS};~Q{$dB%a6-XzWDX+HoffNx zjrcN*<&SfHgr06(7c+*gYh^(FHMy3QpyN2JTbIwy2+n^PzXwE?Ceg+j-(}&(O38Jd z$EG9LVz3n(`3{4~^L|(FQq+BZekoq}=10N>pVRp!8NG`{*BIAL=8Po1<6yZn zzS~dD&-(mP+?Jm|*^9Hbn}_z*kA&9PrK?`{i-468_SR!?&BdH~#4el8E@8)z`^Buu zg3~G1jSSnYk#=3u$qGh=v&hXawKgs*N45o_#C?l$OmT5k`ewCku})(wYorsCTRMrCttkKoLu=)I@%+Cz>ji*R6Xhl3!ZkfKvehN$QCL*1p(0((?WQqFGeV40wXc$|Eh-I;ho33u}5X^ z5iP5RykfeW__}5?-c{6y#Qt2(zLn#PK71!_V)e0{eaKM&d#>kIpr=n?wwB}q%i7sbQJq|t%&ud+m&!f9s}c=k zu1lxI<~x>a(oSvjR!y{Nw!PTF1#xe@a#1rEEWVBu?{+jFR_lN!Rf3^K*cTm?Is@vevk%Wcb6OCpwm)TeFdD-kC@E%9dSU2amucHZae{eOkv+IC=$(3eC@waRf zk1%Zg66m-GQw^$t|M7Z^7xn~CyDd~yKDK}rIps9y7_*Q1RL+)g&XDkSrluFiTzc2y zwQwkf90BO3!j3}3m!CBro(XyZWRnVp_%6eS1ToG&TbW33JlQP=po$V7|6>>1wl~py zw|ie*cX>L*$}1jrg2Y^rKmemk40YyE~;+{dTi!&+r6MSkUM)#d>U6adSb)SflR3@5bhm=ySvWQDM97a9-5?kHh>`?} zJ(QHwYS&lZ9H^tf$Jm$gq&k15+(nS9OSMSZYi4tQG`dFs^13gfGz_yDdi=)*5l63& zA9?(1OD_EB-Lk&SQB4>=-ex>rXPE8eCFmNRsOh>G->g&cH;7+W0|l^UtnO*@bZnYA zf5N=`z5t3N>H1wZW(y@R5L&g3Dm@=BJ>~ds#v_UkO6k#cU1z!oYWr{G5{{qF5 z=sM9zjJ@EtAQ>3*Rs*4Y)`Bz5ysP*{>hHKcfPzgJ(PF0WrDim}2A zFqVqrM$Yew-`gY_Sd_NKR9XzLJPsw?>$t)rmAIHl%sBB9i4%}j%+EQ#U#RLxqeSDC z*wF9R;YqYRVMuZWLX|Sb7Yirf^dl!!bRE3iRU%ox4eOTkscs2+iqA@uIjPRxf4ud4q(6pa?w2 zuG(LK%*=14is1X^yCDbDdu5RONlb>e&f|vW(Evrp!u%8p0ETN89p}Ng8N2PTx|&zc z?zZ^b2EQF$0|J(P>N-(TP)RFFI1yu~Uv&NDXBc}#qI0sRntbujZq4{ zs7`31u>?@dv29grcvxI4KADYZkPi}9XVmfF0&C0IEfSM7pWAgSWXtX56>gc{rRPn7 zXR)IyBM)_lKc?=CRL#WwQ}sa1{j#yI=6zsFMfX__y4~!|%1NTB<3}KtN1%07nWjea ztYMFd!(4F)zSq(*J$X)eB>;;27&vHHU{;!0+8w*J@vUqTs8}Xt!OvYtcdlj3%f6|8 z=|oGHU*v4>Rjw_1H7u{2#0_uir-wi!Vu&C1nnL1S{Pt@{w*)~~wY^D$-BuUi=*jS% zRUi0)m(3a~PEuRNmTi_p%f28%kgG?t_K5`zt@0ka$bl$?rNcHHhZ*~nokZ`ZJ@3^T z0hmiR>-p6KV^ND&n5{?tI{=L8A$BTT7gT^AioF`e<<0|YH|j%W{JZprvx11-UWDgP>T%#_EEMv4 z%Nk=RZ#H8$d*ji(YDJHd()iwg=}a4+QJv|q?-O0jc?NiX$WkNf8UtZ!smD4Ap+sus zI0MAC55MFR^mud+)LvSXq4+Fq?{S7c0c3j_Uv(NI?VLkJsNY5{0D*rstq~61p6b=K zTrM@Km8V$Ym2L;XlJAAiupg(uC);~bCor#7rE)Tc_6mmZS7w7+bQp9tBhctu*$>c( zhew0oP2C~OUS(H}#!)zq(|c|RM>KI~+bW77oHtkL*g@mo?lgkZEaPKFO<^=<`zAQ( z7!F<5se;@^vDI~FVyBd+ zhD#_6?UQY&QLHd}9yiw49zWFr;uL8PimbTDg+7TCR(3(S_%=jADeSVV_47gQFt_(u zgzOgyrE-Dzj_UjP7D_G)LtEpQe;;^a(1=K0PshLqy}4V}G+nAEANwX!+@atXY`(9< z@5BArN(VVrw`QVXDIa%KK(b4`UgJ7toz_;`kgTueNzqcbYhkd%;el?V=~j6V4Im@w zWQCY2Z;^H}qv4$Pu^QS57*WGl#?3#|z!vgip^~WGP`J4#KVdsw@5^5X`PB`u*wXS^ z!>Hxpp2`rhSZmL3a53&f9`+%@y?&B>%!Z&chU=E#{VEFqj4k3iAaGTA7Ng;oNLWj3CLJ2mGF zGF9nRx=ZI*jZ~&ZcIiW;4)lR~3MavKpg@izL7yTif6)oTd&s(SS+&Rv74NANr+E`4 z(h=Z7JB`mIjH(%a4~Ja>)at$pTDY76FX`&_7Z|YQYL1rb5kN-xRsj@ICszrk|HqtDRr}RFW%$HbC_$#to7urzUCRw^qVVd}k!`tWuAprbjlt*E^u; zT{h~4!B%v_)uxs2gdBehjvqb}cgz*uT3p3Kk=bv|mS0EMRFVv! zRb;^5-%=zSuX%~WpDno_(z=3gn@c=nmu0hfiqcU&*n~e*UpmR8j8k@+ zF$ipS0x9~MFTsz-tr$AGq0WE}u4O9R_kpnR(V@h($8R5Ts~Yu=tgBQ z_S9@yP@cp3@XK^lv2hYy1j2D#8#iI#YmGD~zE_70UF6Va6D@SeiYm5%{v_EUZuZu- zvvrl8*@|{LhHawX?|SGxj_a2!)C}2?&l)WO*Pcvc9nTut_i<-}8s@+bJM7m(u&t)= zB$_MWzu%dQG(U?_^C>WXoqVLPTNeMFYgeB|XyDpW&S~nAd6a|XMNwx`RhqpvHV45{mfXEk(`l0gB$=ZU3k;T#?%Wevsq-8ZAAMuv^#BoqnU6VyEwnH}xH2f`cq^E_YW z&9FhcD72y!trJWm)&NUw!VS#}qBTY70BzWJ)FEp%BXYxV{=TNh>I^sREmQZOPH508 zRyA)r;0PC3K0^-fK5bHmY@gw0eR?)b&g`N7CX(bzq&(2v13l5)d_6cj5q;XK$WekQ zCRMmCz%1Q*o84_GLrC~OJ+}WyJHZ2BFVw)An9aF;=hJ$6u);2K9svbvvNa+{U?Ec& z&fPhHFkCcs8*dVo*5f+3bTAA<}vu~J256@<64Sb54lTFBN@U0Or2<_U4XR1vvKT56>M_EE-$Sk* zdtcCD!X)E2e=S?fSj}tUBm*_%w$#rT3ALTw<2Y8s@j{rfxY10BCA!n$6INn9#bxzpmdDXPsXFSc*Y4dA$_5rhG3kNc^}D&qGlX1U1B96(FHUl&63<55_h0F*2eU&*v5@(*q`^{U z%rIgmp3kYf{Vsx8cG^DarO$`?*Nz8SmE#39!a{8a>>}siB@T0Svwkk`Q(}$TLlo&6 z?5$MTkLhziC6x`aJa)C6vu?W|ROodl-)-j-A!$~{6bOwZ?UH8m6N6%#Vl2;K=e>>w zr4>B;8dju_v&(*pBZqH=NW~PfE?JqgjB))QXC8W<6TBxeVK|PU9P{CQS2OyX^`cCR ziYi?-w4sNrXXw)pX1>ZPBck}1j@74w58K^na-~!6m=Ihrh6k=<3`ssrsiG$AuL5%UJEdjxc*EKN){rN-dNm$KRT=pH5*I^FhCjM>>E$V@bJL zFWWbJPk_@@Y~J(nzC`kiMJ8U*$q_zvA5aXV`Kne_y}5W9Qf(M~BrolS;jhs{&tL**jz5(>ds5^3nj0&wVsjDamIylnq z;mX_%fzlqjL-1_x>zHotN21+V#)rUbrDbCq!mUzYouteNq9&6z=++h?hA8fQqlvov zS-|(r&Ol=|rDE&uQ=`ahOm11q3IOTa0s~yj-SN-)DFm-`wmGAgyQvOrie83%*W@7@D<7mCfNZk`6^7O6Qqk>Jd${B{&zo%n6)dbB z4l#XpKR4;$>$k-Ra$I|C#<^@pz{NA(hEyZrpd0fL=!|_q1EcLVCo@7+0_r)B#5nDn zIc&DF94=<}n#?SL@|FC`ec>DWq-jZ1@52h+*FuXy1ipFk>ur~SjjJxl4v$Ds9NMm( z2WNiVZPQhH>P|3#PoZ zQM(-FucUZ#c-iu|(?0@r(AsNTnz$Np<=E#hCyw{E4c8jc{Dp~W65+b~!8Vpmn%FEP zK@wifRF1ti&5C5<1<&5x(cotB3w^fTZ>ABbnbI^@k;?|lD=Gs+I%pXU4`bhkOkt!D zNKq-v%+Wnp=Tv8{+U_rkXEf6%f(+nsqXUT^ftQ#C5~=xn@5L@0Rh384K$N8R-r=*CFaW<@nT3wgMt*EE^3 z1el18o)J^94YRz!ueH>*2G~H|!l=}CZ#V*uZKINv4lZ0t;ahPmzm>r14h68H$k1V_&fO5U2GE7}SJtMqQE^ z!WAmKxw|uUysP@rM~!CrB?HJaiEdXX?rme&XV`3-N=`z0G^Nc|rv|>+sOz~QrYy+L zd(TeZP8?vO=W&_RSoE5}n)t7D6Xkd0q)T{ifw^@!;Kv6uDdAu?*`Xa@bqzpC>^4_@ z9Ujs`D>X12#69ivrpIxLjdkK;c(l!C0EomBb~K{W5+-%b(XPNW9*Pg8Z5E}eUJ5rn zo_LH-r>|cR4r$t-LmlDUK{$MtgK`?#cuwaniUPm-S!6RAL50r1JHhq=Zl~+YiHQ9YoT`ls zrouXaXbijcB@%sdc4ELW&zx%<_jVJVYJ*Rzo|TT>9hm+j^QYb&>n+WYbJ_-Tr*Rxp z-1T{|Sa-pQ41MR=73CRB<4^r1OLux+stp+eWz$mPoJxz=lok#x2Dwb?2hdLQ<8cfO zUQYN8aS9H8I8bO|YpsBox4FAsrvTUD&GcR`2~N2OOb{^+1Lo z^w5P!EJUH`Hbd`_+EU#5h>!?{J`STz=V&-qJ=l~b80)abKVOmW?5ANb?>i9|#+e?P zcCRdaoq<3!0k-YWtMsT$)xcEmU3X1HR$&;BUI*_^DeHg(FxpZ;QTm$b;~@`St%|9E zM7GK!d5N>iY(9Rdh%|%{g0?tNl#m$+Ns5V%B_2Z~TM!o;zf_|@Llrdi%}moG#;CM@ zN?J6J=JgEAl5kVq2=zSQJK>P|f`{$M2AnV~#k zbWfV0It?!ztz<|j!si3x=sa{FDaDy>aC4(tRNRhoN4@1g5%q&;Jid2USJfTu1N_$d zQpWolE5ZHwJVVbgN4kx6a4(_Nz7=L;PAp$m(``Jt zb}GE+_wd8j<@Uo+5JzhUx7q4?yw%=OIRC>{G5aZopgLD&nT^kggvJub^=i1UX)2Zo zsJ}6jl0rw>8k}hm#=dQi{ge8GH)Gn4E+!npknQ(%7*3qG*AGDl(t+nxC=+pA=Px@z z-*jx?i5(upelR?Md%MabE!S54i*jR; zv*-`JIP(Xoe{emyv?wu(3G%6>kqG615Q%+z3?hk`A_(s`kSZ2N00CbC6x&C+1(rt> zBq}$H>tHK4vm1%p%`j3NM0Dd>N@ln-}M0mG1a2I90jANC~PgL3s zlCCB~Z{>O#5r~Nx40a&Wmi$C&4Wr+n+i%?&zk3+vG!RG)u}4aFA+``9_X+!;+aJ0z zaA4DtQAweZ-v8-4y&eI&=ewXdy-NytY);sd7cRysz1E+N7~z-3;Vn>=jh5uiwU zXo{vXQXc;`eaM7JR|w5pgGm5imJEg;X(8xr!M1&LuBJ2^H{F{aP(l^TN39$Q6;{Q^ zT;rEAl8^Nl(T~PgU=eavJ`d{+k2X-x%BiwBK_XFo@DNn+v4RUH3p(u$_w%KuegpUQ zZaEbN^AHY6=FO{?&1CbBHN`coabjk%rL;=@rCz1qjY-7cQop~SQD3u4eDcPdlpz~k zo=>fJG9UnofAw9-4TvFgl*Jq7#83pw5ehp6&r9 z9r+rF;pWgzLIwrUYy1VWjjYthMkf>3R^3~_jE9mTXd$>ozPbF%Cu4V?8GPpuG~vsZ zVWS)zAo9Fh-^JZUUK!-+9$BQcAQ^YI;GvMP_yF^eV~e*e%r&V}vSD^iA)p^q!bSFZI2YgmhbZS}Go;Q3q*iy4row+FCa3ze1dNWjPMZawvc zU9qvt(-e0iyr@KyuNFZX{?5`Mt@A!xW|oV0RB%kZK6);;wOH};Mt4&u>soNr@(0nL`Loa?`VWu=391`m% zRs?WJ#eKa>;DK;?_ju7y{*ncyBr;iF?KCYj3|+RmB5ga0t?q{cQ}$eiitpIU>kNfw zm{GJ&v>Fb=&2%_8TQkIRz8;JZie>aMY)}t{9+DG^R>nfOO$|NZuitEw%MaCdJm?qC z8Et;EOuxBirkiwozu9|8>Z$zps59M$a{A{$1L_Taje<;W0a;8}&|3r3Y6au; zaD#eBLP7J3IHRe;$l)4wy2SCp9aVg-hk-iRsXjYTKkRur^5ZiTD=hu{V0U5{p%ca$ z_>^?VIWraAkb9H8h>vXbSzh;yv*?KR3f)v0D0T$KStVFbw$0&T_+Fy35;PJK>HNTS z)*Gn0=8cH7hy!K5VPBFx(pinZ&WLHi5A($e!O!(|NM^+JgY`+d+Zq-7d;`dbG9?Kv zvO`GP%!t7dS@{4q0PqTWo&pS!A0nq9@tFwcp$C{df>K8sm*utEiyi_K;A6$#552a)oQGYxKS6zchpOoGgKajGvMuTE4D1e81$nKRg#5Ya!*dfe_$K*8ZfUVe5XToV>3HWPrZ^J^O{j=7JA6Y0oi zf-qo+G^h@t>79z{DQO3o=fiMcFE#g&G$NF=-0xdgK$pmzrbbMk@%dz5q4|VWGG+}@ z+D(h(37#nM+@zexzs3U#InulDNWqm4&z|AVHGZX4Y!4B$jA38na>X7TdfsU{6z*dT zb1Yese`vz{(d9VNuDncXvF?=s8+Zt0RNP%n@O?0(U1m+D<`%H>eQvm86Sosd7z_lT z=t-jP{>GrLI5k}jf7XGM@BNCglQAU(tdEg(MC&81{_fbf0-c2{7ip#QZWNrclr#x) z0S5hQmEo_+M@r74kGT%&*NQ=*pd@KAjXaJF)d#*>*T$&H&}Giux2MC7(d>wK3aA{9 z4?OkKl(7)ZIB=CA1g|a5Ou=if^y_GGBWfhsJVwWle0LWI$9Vowm{R{cOT%{hOG#wg86Xj~X(aPtkY|4Kmxw!xF}`TI>{0J6Dl$r!?kKs%P2 zTY~$D?1@7AMww|=IAyi7ZGDmgm$GjiU`2}92EP#OFZ-e##65;vSym`tuC+6bUjWlc z4OI91z-glxohP(5lAmcByE-}zby4AxH@ZD7A|HFt>4Yb;#!)#yo^n<^X=kyFLUysZ z$8Y9$GYe6=X3@M(v};Vrl4%^d$lR8m?XAKyS~TInp@>=-=pq#GWdo3!Bxma1h4{)V zSa>Fdq{!N*Nw8u?*<1yvXd$+mLUc9Rzy)DH7KBz*wvaveu3lH@FzJYo5`OBZq8p=9 zbPc5USAG2D1TSUdc*@O0)PtoFG$N{BV|B<9DCl4k_jpOh`c=Rai}*JtW;h$CC$g9O z_oB%+Ov$c%t^NLHg(kgys5xppb6KS=o}aJL&B30NZt|6jCh@fSHGU01nSvtQ)n^M^ zCF|owZS2n{)#%}h?xjEZ{VM0#Iv;F;t<8!=BTHwMazmLp4EwC^iz3qjr<29u=MyBE>T0DG2d?UM1FyS5 z#&(EL6H|+sX$~pF&9x)#nGcLehK|@)FT*)iBCwCf`ey_`#rneI(!+(!p!QOq z&YbZbviAc=C^v3BGER4xEo!Q{sTtyy=Q@)atrn>_^4`lyhnk_hK?j>f(U8-8c^XlI zgKX=zRx7LDT zs0ow!R0PM>-|mUeTvvzHmt>XOGMxY|v?{KhOn#@6N_(&>rfLoZ@PnbxzNvW83xqYc zmxxZ3HcTv0q9+-?KP(VJ9INP%-^8cL03kCZJ2iHCG42Oo{rt@*O-jyt&u^b!jI;R6 z7G!vQc^W)bOl$rDrb=5#>l_eep%S!r(p`%0{>%Pt35*saHac{8_P9@b5%3N42D|N7 z<1>z!Qw7JhhyJ!D-9G$wnadD;Bw4zi8pV5xHy+EMhm<8qQbQs}`&k4lIhG_z=!e7* zo@dh+MVVqHuITJ>G>6TnqjMqY=o{FZH#`_u^#>B6gNc?|d2Tl_dI;?o<}R-zi!-!C zEkFAaTjNmIe7oLEQS)EIqSGT#Cf5O)dvCGR%o|LAk(m0y%+OcL6W-8Pek&(^(J>SN z7O#NW?5AfqtlxU43NWqt_`v+J8PISbxTN)ya$(`k(#P@NRUbXx2IY%IvOKoD!c;NT zAS~?X#Gvi81HIcE@E~kU$Rv+rLRdh3W86|~j()YqMGJQMs8wi=N=!ce!Js|Amut|0 z(x$iCkhjMzve%C9xErlRpI)Q#{$id!>RVzlTat@qN`b+hA=jN_{PPHh2pW8B#%fUM zDUIBe-UX)25M_RkIgMI1#$E#ZjJ|KP(>bsDM8C%RyC21C&mPx1q>D2*MK0$=Blgpq zpJ8O>95)RJzY!(p|31|p6QyRLmA|Z4^E3^s!f*x*hZ$He8W0K`IRj^NOFOG9rrj`s zth#0dRiw!sGxr7){l1%mPXU&fWSx_S?PYS`)CtZ67CB|ICt-&a8;dMV{Vs{MpE7)N zkLZ+$d`H}sx zC%t$Ie;%v#SS8skr;4~+kZ(3G4&-l7xH9W{eRisF88I(@hI}2k#k0j(b}Ajmp8i5y zAVku^&&d>1*V#bD%dh>l_g7~;XTGZDn@<|Y%!G7HN{rFjax93-QO`3K&O*(9T^lZ) zirFT_UDT^$N0^tFY)gQoH0MMrkxoMINbkLah=_s| z3r#vGB|zu_LJ38RQ~@EhB=i6wgdQLS&g5O|zxOz2@A2-v#`uy*-{7Ub4 z_IHC!AFMbmO>b-U`OzOrdG4KCaPMBVcmPh+vu@@QEqf3PM_Q759m?r%=KX~Ahko!m z*3CKu?Hc&(F$X*-^_<=hK6T3)UQA-SWeVX+j$LCt8RA`gI=_;?7Iwkql`3_}Fg5)b z(lT-jADS@#bYutcDZ|L`BpY)iuj52!_o>Aglk%V6Atg5EZpjm;M}lO$IQj7E_74y@ zZtx>zdctEiF7Yd^-JDB2aQZJY_e2Pw-WT`OCZVkNmCRm<>DG(p4*?Emo~I0AZnANa zvxUQ|F%IQz?jrqcehN>4V&YA%T2PE2Y#CM2V%HPyHRJ3!9iY1|#{xtewl)G-WtUPJ zDSgL>zHxdDJ{u(PNZk;8W3icvT%nHEQBBP@7NZM3Qh-(1_hECQIKN3fSFu)M&9PEd zOqR9MIZiKGh~*H8|1rpBL!6J}^UXf6uD%iXQ=TUxpgleVpy&t8dZ5I7-2?9Gecn5t zQ(tw{jq9LWB`|(7UzX~tl;vX3sdsnxLO~n<>_}a4K61WQ(g%lirjGWl`D>2=`bZE7 zuZ@qL7pZwLSI)<+n^G!IyvzL*y`e1)9 zYf8S+(Z0TV>W~xW(WTQwM@-Wk+<CO56w%x_%<><*}_M)MDb-fI53o3I@)6l3-$&CK6jJ?XVosC=5*6{Q8AE>6W* zm(JAwB^zd4&vYu3Yf73eG-9P(%RPB& z_A4U+o5RaUIYmA9tPti9cm*cNFS^vO+j8IY@n3el%?E_Rpy-Y(GeF=Kdme~NqVJu9O-n2zM_ZK*n;37;2i??1SdG^P*VK|1 zJGO)nJoFzE!*9R&6R<40R3=#6@qunTLz2z}l-kF}!{qThPgY6W#L%sdHbQ5xByY$- z^8Qdd*N)U|G~f3J?6VraQbW}09uKd^Ht24(+(Q|1@!rp;1IrE;5dPsI`Q?_V$Y zPPs@5m8ji)w$7L1L8a3yx7>*mKaD*x{wUq5{xQRNo zMqN$fujO9PEaulXcda@fm)9qYC|fhXQ28?NgELDoeHiGgAFapdqiFrcH)yc6tjz@D z>Hr%~fz)HkN)n>$v6+vn0!oDM$W(_eF0-aMYqv2D+4@2Lm1hV~Qh_YbVn8wN58eCz zNK?Ols$#yhfwzC1-db+})l1!JiQ1%CxvyqTp3DDa|Mdq}gF{0qufoL~NI7g)NVv-| zcQv6|4*!QV2JcxYO6%>1U|M81cZ{~iTNTmmr%#}g9)gLur$f}a+E;|b*aLQAkkqvv z{V7gwB0yuWrW@h5x(T8{yT+?7KkeilweG`Gc$fG#Q%7Op!B#S&@$m8`+2=79+-Ieu z4M)W*DdcDXr({@g87~;+9w51OR=d8c)Dykg#br(j#68@a%n~2vkGKUaias=}^a&(A~HJd9Abi`6jor4={nmf8+hOzll7i)$N{2Yoj>XHj{}B zxn{_cm%yN;Lc{S1>A)&^!m$`cQK~Jn2IK%sDsL=`1jnh^(d$OmxXKL0;!0mBP}fzG zEBb!X?l=Wl#J#chVGjNU4<_kqt~SRzXm|5vF0-O~^!G;B!VJxTx92mO z=HhbdbwzjiawSRwiFLK4NR{cT6ArPuOgCc5OPdGfaKinDPb1N4r^yp@8<^E~zKdVX ztq|nT$&KH8T!-TE;TRS?;M>RBn2x({kyS5MNtq`X0zwjYe4^`YVj|D*!BFXWJE|U< zYq7J`7nDCNz_NMg%emn*Z)mM{_`G}1e)~eFC+B;C9W-9AY_0tMrwj^Ms5;Hb!=vd( z3$d3!9^7JYZ_Qv}_j~bM>N=xNwAM6GiPU@Drgxkj2+!!E{?d}qB;xYhowMEZF|pX3 z99;{Ze?2h3Y5v$G-G8xH<{kI%np4LYR|EA=kJmg40l;0`p%1#7ENwXsXg0+WzfT>X ze2FJ9=0~}bmp<$?I@%dJB%Cced{K8*`0*EkvdTmy?e(`GRDS@6L_SD*WyC!Zuq21Z z!#Xx@)|}ss!?(5{wItX)(>Vsp3FC$AgeVgAnf0ncg?T>Jhb}O23RL<>4Im4a~jxU=J{pje=d9#6a++)6*y^18}Qm)HZc;8AY*sKKJ3i(EED(HJe*w1u{#L zoozqj<|L4enIi>*{}U_p-+Xml<0ie1rywuI^8*m|yDJ;qeNTB~6F8L=S&^AV-@1GX zVO>|fnm#vDAz}5I`erd!MsGjxN+@5+Dir(WZN>#vVI{tB zb?e#E+uQg1By-@E4^GafbidVl8$)4e!~OI5kN~vFQ~n_|)_ zqjAt>@qBP|r!~Hw&Pcz7CzNG=v9(w!i3%b9_}hZ#ZBea(CkN7xJ3xou{q;eI)>57i znTB%NPAUepY!aOxp>t{YaW`A5Ukv&LtMm%4?eQgnxBXMWOo+c%|5l21oMk!R z#tUA*@lH=5drJz6M+kKl^zdU^1D|5Wz;B4y3(6D{mihb#({I6%;d$*6{)C9+!8S#@n^&<0E?D1@M~XE zJ(ot?5EtddLhi}# ze$WigUF}z}h`21Vd|8$5BR-ydhvYE!;7!-&*QM(TWVuv1K0=c8_v^ME%|Z{9wCR@K ze!P9Bn=V=GA=SK;$Ve$vTf0h*y6Q`GS16GdSbs_9SY;0@J+f_hOeK6hwH?2O0_U{U zYj?}zrRJ#7bbrQ|se6>4JHjbMeG)0H-$`dbOYF033;5nPzrdd_-pxPTH?9QKON+Ag zjFAy{coG7vLl)jfWIRJJYy85c&>vQu`_AVo9UI&TC)#N3d=wqHu5VfGnhtlh(fayu zB3+DLM$uek1DbDFqtG1~L$Pt$;Okv}!mg@fOL{c60kg7tL2+CP`U z_VAs{N5;|A0$iQsnw(7I-wAHnEtvDvSS^l(N7pPLPz0L9aDP!6kH?t+GJvm7Y=$%S zZ1d5T5vBl{t#0n(52KXQUf4q0E}xLfn{wbUggftlF+SD=BFL+(HX?>z)kRSZnX{R{ouK$bcUjl4GH!L#m z-+cO@<~08TMm*d|6pr88+}M<7V!6%xe12$$iJk5HC*@hVo2jyT(k*G~rQ}Yrcvjk( zXVCYqO!Cn@bxDb8hVR~X)Ll@!p6p}MD_C3d<||Wuj{gnh2ZtT(5PGQlZuv)h&^;5& zNakE*OPO}!HS0zoO^G#QNOFldb2I)v&z~3B;?SrqN>IY9kXIr3x2+J*mJ<_rd$00% z@&=r#cMTrv3eH&c)A{hJMItrc|BsY%h(U72?Uom?dv4Bl3wCv!eTRKVmGTnehaGM< z6AZAWpo`pmnRR;WKKz#`TWdJ5F9YTFDrMdgY&ayH}yyc_I7uO|=~PP(M(b z4VwCMev@Noi1SK}w+cKH3@S@aE3 zXyMZZ28rSd^&d>&pkG-y(r!B#D3qKTXOobUaFu8QG;IG%!t*=8X_e0JqZV4WOzYYR zpZSELXn$}h{8IE*2#ETJi7=wHRJ%UzoD*Qm7zetgB5v%}+bp zk}QEn50CU)_H7lxPGS1C zGuK1@p(pR#4=I{&JQj5QDV)%S=U-g^+$nZZV{>P)arMrv4n|G6ik@2{?9%5r&uVB_ z?Q^!hc%^?^tjk@@^VX*?7KbKO@sH-=)~$MN#$wP-BJC*nob5@1<^cdB(DS*)+dZGV zT~MY|{jtRbeZi1(GsRyZm&RTpHlKK&+^sJrKl#mFpNTbh_zv>wc2X2-^-!fyvh!E6 z=sIZn&Xb=`AO;(Q=w^T%ryO(sxwTA0qfSDMLuK=C1IhYK^?Q-i)UhHwhVd%$tJ z$CjuZ3(k2)E6zJdD#{vd1rZWSj8p2(>K4B$=YtH*C+K+3Tt8r`G-R5b|Fjpt4Pr&` z5YRgc^oyqcvymU(gtv=56gka!AO6w|9WJJLkss-jXAX$vb1|Z&C1<806-VwWGLfhh zs@Q!|K1hg#-g&W=%vygrx%SU~yv3nQ?Mnp2W~|#CzM$dNH%Si!&TgG1%tuw0!pYYs zDtLc(!`cH|N1jr-kYOaHd`8lS=Txl6sXk8398*Kbvg@I{+Y(F{68Snn{Fx3euhwpg z(@4=KQnIr-H$H6tFZiXH^|!;+zR&VxZ+(|TakO`e9N=D>9}c72KCXX+MhR1dMx9B`Ko7k_9ZxlK9Yz-1YJ10rTWFZN#9=`odJ(87OWOsot(0#PbUd(26CJnCUnxKe&<7)&Ui zcD@w-PI!vl@9d_E%8^n>V83tA2X?>Zh1dvJ)Uha6{<{3s1t6#9FZUER&pJ+jMJ&D( zCFtRK6*X5geGGV$m~7k9P^=n+x2JLF8dhR}NDrJcO~m$|uoFZ8 z$MuTJOp0QmbP?9|dt*h|2Nh2>tcP$G)&Icayu{PuBM|*_!ya?QDtcC)&&RV=26Aml zSAjU~RqQnK_OPex6Slo0=#9(5g}Qgw!&RFjaEA+8TLP*dpXh}LG%S368Hg;o9)$y$ z#1uTgdbUmO&$aojo*J(uQPxDE?Q(N(9_Q|`%|FIa`KxtPufC`ca|W$FfXdi@j9^w$ z-r^2edM(1p%uzq~3>v9+c3V!(dh)HzMXe?28*O2rt9P})OFqEjHF?<#1fYteqqHx> zFu@g}48A|aDQ^d;3f4!!Rg*rd?D1}K3shB>GXQ7c=I5}Rp~Agq!YC@3;M^iAwcSl3 zrf01P&08+b+0JycWk zYk+_Dd9<@fY+Cx@JSVXZjr5rvY>w&4JnwC&u*SoC=XoR&c5RkWDdRCEwG(smE@`IF zYFdm=TA-Onm|EHQlNvRyLxhwQEG_0jRNwyONAE?WvDm?9eVtFFO6ZGI4&2_*V+XL@ z;Fznw(+Ekorj8}0MCaah2J$drrnR1pN89D?*m=*J->W65mH@r1YO>el?!*J-%deB%Srs-r%yl$0#NqYNiVuitq&2~D4PH6hz>FY$<@Kp@~>v1KsiCLY_EE`zs1-45~8Jiq$ z+o&fhINd*Yn4UfA>==@wt`Ms17vb7h`zf78Osx}I+ODm`cy%%4c$7S_!vq0b8St&; zVD!Fq6uI5m*}36-?nN=r;SPnOmG$)0J(BH+M4V{+?#$b74g3vKNqyCevqyg`>C+sQ zzGh-pxajQ}n`(gsi9@Zo5T=z`E zi@89dsYg?Xj}ukeic8u-AXE0(-?z>B_?5eDtzx>~G1e{K9~&7m2W-hi^IrDp)+%gTu+5q^e|5oL@3zdy->2IC@!e`_hPFJ%yUKq4ZA(H8Yr_wBp4768 zIu%*H9KZpw|K~Wmjq!shXc8hA_7DEvSCG&w+tr_c@nQ`A3oqu~3R=urtvDEfhy16$6Lv#$mRUd>?nVI&OwcP;d1RjLX*sZ+`tO|qEHBIwpsE0 zi!t^epWsFJH@mRfCcz9KiG8kLGCo~#_ATQuo$Q*(?+vJ%ph$kz#aaZ0JPKJ5DTCRS^rfoU5fw%@NfSs0PSDk_Qw8KA|Js67tm6%BCi|! z7r58WWi{E4%eFDa6(u!PmhL-M{y^ivNHW$lB{U1*q9O?8h`+6!{~Oqm77su%|NjUi z^q+vp|9?P20Tf$g?Y~1&a_TmSLat?5Buls0X&W0)&D!3#u9+H`G0#H{!4uG@Mr(Yo{Y{F|0mM8 zzhvUYzw~Q>A717^y$1^ay?fvv2+(F`Wv73VOurdcefd`(BER)57623sr>RTLC@_^n z>;F|y>?soUMbW==RZjV$z^IMi|N3~3@c*EJW-Se5q%HNOjuhvn>bE%u4ibRj2mpH> zg8}AUDD?S1K*2q7&86`$OY8Kl8dugu5%TG&DHLNF_P^dT|4FRP)@X?arl8RK;{L*P z8`%#1tMtz7M|l9B_t`gu{!5n|-`#{Gazj3_|ER3EwGo3*kor{LU3X)>1YK*&Q7T#FZv< zlE%C80sb+)c za5fD&I#gvK1G>q_cW0?EeqDrNKMe%*@V+Jqt3I+7Z{XV#HWrI%5H&r)Pjxmsxdu)u zx;WbW#W2Vn|4;XCusZ=mb|Ice}&I48%+R{xE|{BF9w0 zkEB#lMq4+%z9-0cd0`ytEWyr7yHi=^Q`RHgk_Poy_jW&5fa)<9SKa2=TTsw!pJzr) zJzkPEZ&pD0k=#Zm(*`x!vMjg%5k(wcboIJn zxkW4QSy;UnHecFn={vPVC%^ghe{n4?hXSUzd?#rAJ(>74?DF;YPCK(xW@9w~*u!Q7 z5^(PbyX+g6N9urv;4r|r`-?ux7c4bX7#o2Cg5fKGKUlR_7!f?_RMzAe<}}~Ex6Ho* z*b^U|YE%vxK4UN30(y-D+X1x`zXJ6C94GDbVuyiJbPC7mwho{ZcYvBvj0_N?;x*>Y z;sFdxHE?c@!%2X-zJ<1fLNK2tP{c;8Jb2s>=nSd{5t@jf_qI77BCw3YB*()co7#LH zMZ5N$+`O#4gacH>(QnMjYW#{Z&k3)*^J0BXYjG%<>L4ySg)5YtRK3xdm0z%q@Gv4? zEXFn&VKBkFrYo3}m2}4x?m>)jDSU~hwB2uLJ6F>z;~1+5RVyy3cpOMV?%}LH)2BKF zZ+j`Kg&pCuieKZWqlCY>9<-uXo(Rh{9FpBgJ+#A)PKs8%G3FRDX@^Yfs_J*V)4bE0 zbs|*S*x5e9jWoXRvK!B)AtiQHKsx!X;QPCNWr+)oN=yDE&Ak#qYcAufFp8@-z?gUq z^{>R?!crwdmSk46*`PZ8y-#a%iuP~mTJ9sMj=!0myw%jN3YwwH`UD&IX+oD|k+}4K zl!bU&2L3rL{vIa3ebBV7?&jLChxW$bReb5>+GsSnzY<1oFL7BBj5^uQI_4qotjdni zkzCR`ui5Sa3KQ%RxX;(SdpO#(Qmc48BpH_E+I+a~jf^FN^9IfR zepI(g#H+HoZZE@sjrx*$vpAoZSobP8r&bTqIgQeN@jL35C-P7!t@ zfFYvrnrhwtDNc>KB<{n^=BMHHfEd>}< zxEUnvif()sVRIicmASS|+9C;SY6cB;t^~Lp1iilhjDS>p$F{R8-;j~CUXPb_$it7R z3X^_Tb;iEq&g>Pa1sMfS{OH^j<&}xEeil|ohMRwpn8G#^JZppndH(<-H9RWW4uuY7FAG}%JxZz6QY8kyt8wVgQIhve!9mKxT8RK z*ooV5O6G1QPdU><8-?=J1-h-7X;SSG3a>Hl^Tlkbe2virr~-`nNla!4bvF zzC3KeAEJTJpG^C=au-)l(VO69yx|}#mxiSUUy($lq%~{P2aS#Xi#+MT+#7iSggb1_ zDSFWJ?38Y4^$o-nGFof~rexufa*EUA{>@Z*1b8$OfWox8dL=(CAJ|0vrc(ghpK%~o z7Gvm>XRez#>A|f&8Rax|l8HS1(dLze(vu)f>_mm1_g!;oxdfGXT=Z4)yX&_A|v!#dt_APsO` zTiYTOx0{j+UTPd$2BO(&OZlQ7)&iZKt0uAscuYy?M(32`Js0t7a>fVR^jyU$`^6PK zpY;oZ+{ivgjw{kD;ym&^r8U1Ab1ZJV|MI4n4@Ks5X2gX97A#Czy@<=+j?eH*#jQTH zKFsAwQ+WPLKj^wIGZPu#L@dbb+`HdfR_83UI>!RW{SvDX*b)bt(73&>u!H$?E8`&p zYJO;M%(z=dny-R=)lNzUBva~mX3luHR`W!gwc&)vW%HxYNH`bg$-;R^xLLqnJg}c) z5<-u^n8&3NfZWvBpeABHmv0)e=!(-V`p#Q;UBuGJ%Z$0|J$$Lr$$(jukNWc!W#y-ppVgi!oxk(^ z*`rI1ScUc*j)9N{yLZn#FeeAlAH~HQ8IVIC@T^~xN~LT_-1R z`<{@A#*s7=x*fLrf41aJETQ4q%h$}_roVtgghtmU2MW?_Ee9EuF{aE+41n9q69ilCWs5?J`!FeWRvFB`&3H|m^!$@2z}^E zyq^TlP4QU?ssxu9${=?y5srUc_P1&@FL>mG?*x0nh|NJ?V*~BJoj_L`Ni{`wx(ew| zPDy2&hT7ibRr3<3nA$ac79E!WC2J=?)UmWRgwLCvNH5Ur?*;=GnoYU}5&j^=zNdLv z3EX^rhJyNUULM7@ob>(Kme#KhxOHCaV)VV94f%}IPXIm$a;XLt>4ef8dUuVFP0@N? zKCfJR#0R_>9AzX^M|YkChOFOk$=W>}lYtjC(90HE{y#4}@jhX&Lf>_NoRw=H!lF>| zBhb?FO|E&!QIij1>4~Ixuy%vnpNS+e#2}NOeQ)g*+g2Zo9{jOK19qfp%UKilSgF>S zKBV2P`6rkLJp#Uj&YWERfh#dCt}O-C&p}yjdECn7GqMJ3jBnWT@g7=4DZrK+62}%% zUrIaI!|;lezy7-D`$KVL=i`qyzI_3)Yx}q~Y`ywIz({ymDqno2C+?Q-@{ORyofRD9 z1Zq+@wnr#$5SR1RpUO!I8jB0+N;)o=vC;BAETr6<{_zAwPPOr)!@ZJC9DiV^Cb)du z)S#}Zo00;_t+(HQ^Y@p!#rM`D<)MDXCqMuYPa+^{fsn%>fR_|zlXB1Q|0do`b#iLu zPv^p6v%+BUipS$X0p#>;z1e;!SFhgFMi$`d;{gSD8{;L@5UK8p?&8S`9xjtP-k*0N zqH5JT_5p6*1Oaj4$|38!!^|6X>4p_1gqR^{RNWn4v@HdwqXPW={4(DaU9%KIj^Mx? zx4vi=;q~OOqfM0{K-wg4)gaAqq|00i(?Ho}U94XQTOBQ)0-$TofD@puEI8HacfUF4 zhS$;lW@r{LH0_t;D8vTfsDH=9anQY^?Lm^+kacJ5m9Z80$^JaO?Cu}k5M^Y}lc+;4 zqYA6YQK;z@tC+oo%Vc#0hwpf+g!8zCjD9LE_~ln#OCZP;g9r_|=QL8FNhGb*{&uSZ z&etR_*NMs>;G)mLeD)d0a!aTq5@|j~uXr>=_R-kVTi$M`SKPt+0|)da_Q0LxAqm%M zsbWykXP@=yi86hJFyd&~TRnzFcn^6s)H>F0>lY6xXyyY?<^~x!MjK3d4a$3UUZQ=O zEL20Uu-}reo|d$2{HxKl!S_Y+mupL`ZJW7Szy)m%0nGJ#KpS~-ZK8^NPhGZ*1bG(A zI##61gK%wodlA5=X9XYu`bPgwUodbXkAua*Asb@>A%`mk5?)JcC+Q`|wF7P=KD91W zW2#94H{=^_=9)8Pd>{z=owa5EzJ^soTzLNF zM_dsx>SLOO%W9TrNW2pcSw=(>0kmBTMH9rJ82fn8whog^iu8+Fx8u+y2xLqF&UFjB zbdP7%PR&Wk5<_dCkjU&$dC;C!n>h%4)CYd(Q+oGKh|ceAIv(8=?5#C6^2ZCxnh2~2 z;Qzc~%`!7)Pk@leE2wV77I>lcEsGfC@5f)33PF z$*@?9yMYhgapDsuSy$A-Sde)Vuy}#(;1C(Ax;+ZrdQ~H{SQ0=&;b%sO-bBlR*`uA_ zlcxO;L~j2g$*{?GwAOjLQS1=!AYn7kUcB15?B8`udP>n#H;q1U)ytfj5J#_3t7%T^ zY34YH==US4uh1Og`KE@jl9jBNX*eH`fPU*LrdV4CY7&X#ox2F5n#o0%+NIIb#SZmp zLWS-?z*82c!*5c{vTR5{xS;Py84e-nZ9e7z8jWjqH}B<3C7I@hP$|fuPxQ7X3X}y& zGxfhD{I;$di`hkjQ8nqK4s&}=*`E@NlgJulWEtxn3Xn=L^qXCTVI`67C6khIilgtqk&o)M4i4Sqdh9WA7Oyn)Xb_ zk~QMw(O9ybVf>WJ{>{a9^K>jR9$y!*7%!@CfS~G}sFfjf&ID9>Fml4U2H31Xo+wk(700(0aiXJ=Br# z*bkBHJ*e-4^?!v8DQUeWo@Z*E+}^~(a{My*-BqgJvKFu6-PWcCL(7?CLSe(}C~!zE zjc-a2T8Gjqx!>4@tHI7=sV)*WY3jEtvsk)hmRK@+SG^=SR!2JOeqjy-7Po#-8bUm{ zO`1*1TC-yrE;Xqt5tp*JlU>T~3|QY)JFnbM%H2bQ*CuPHu;~rgF33x?o(M(N`6-i^ z#Mj7n_a+*PUb<{Hg4e&HfIh#tlUIN)(HI+=1RCvu?_W8c+H zW%sKEPhx9pl!jgrtS8W+{`5rjQU5?A@wwA5464Dc*-)S^Y~1V~%MoO#>x4%~xKn}K+{xV;B(adB|B7ew!ky`4*y^VHyk{L}DpFn-^f#-?o$i&Isl$*3-#v2Eu zc~}!Y?Yl%8FJ6NpUx1Fj|Hxq67f9ailbM%T#T`Kdb(ReZ%ohOm>69m>7V`jYYdi47 z`=gpXKsUxqy$58O4^e}1+iJSt7m+7d(vRSzyZ6$di$3T^UDqE zcQYECfss5qn8h|);T_8+`!Gs+W;IZuhF~Cv3>Ur%sH{}{Hd`m#OrAiMB*FrK>f2>) zyuu?bczi2A&2?*vX$LF?QZgiK=CuA^hvS5}{6RMl0F-w3M813(LtmyXPd$ z)Q26J0ZTU&zpgt(mOdG2cs#o@lMm@SG%wc{*$3vj*k#@<&17qeXxqNhq##)9hCwqu zii2&qYKwpdGbvUaffGtqjBvBmq&U8kVkdBEce+qHzKj4LENeboH6iep3iky#A=>2V zCxxMr8%FKzzBRF~s%s1kF?-+^=*z4xC38YH&J*-bYv}nvWl5}&9%b8yb9Pg2TdO=v z>CIQyj7OmX+viWj&o}$}DhWmo*@k@L(4Y<3HJk>k$`Lt{LHOhm8f0ZKST=ps^;4+|ziX^ek?#tb_2QJ4 zf&UUNm{JH>W)B*n++h+^cV_kaMmq%c3NPR0lI>)zfqWUc`#|J}*4&h-%969DiPpYb zSB7slGZT2+Vf>|TaAf1p_Da zEmKp%I0d<0OpL@p)s$Iid@Se6OgDroR+?9ieh)RXn$N*N7%Ii}{1{&(!#9X6-^~KO zNe)78f$;%@$2g|~#{2Bja!7&2k2WhuuxZO)dQ?2jbPvi7Ck@&Od(WdeI7k zwLrJDDIfcUIkE1a(-=4r+aMe}eT)qzu@$ekWk~K4uG1y&cn$pQ*!xo-@!1uz-oK~k zgjowS$+egX6S{7NsPlVg^v4K8lq$?OJu-3P(itWG?raEx6GQU6GB2YLG>@Xj<{Crr zZ*lb0=GKY?KQKpf9VD){&MEqW{;2fkjcB^;gyng=OB=JkrSWL}!%&N;3u7zj_L`Jp zLC>?3JhJqamz^XJcZbMKp7Pk%JWsSHkhSCtL^hcF_*)**1Vny8jD!h(~dx6UH5uNaY?mdM+J#s z&zz&*Oqrp3pL33JP27VpowW82;DN&>-vK6ipHSc2l+@Tu>vunynVE>e^m2}Xe+n+j z%^AUo9@7ezs6l>-aSoNejg#&fIU-Tbn#D?ebc>M*u)4%M_o#T`Z~7@_aC#{^1XEV3 z?CekO$<^hXZUs+)qF%B6Z8-*15J$f#KuoAhoNsauA<Bk-DSYkzPt z8d5f34A#JnmE!mnAyx&C184&8j~6_8We>Rnd7brbKna0k@Yf`)r z4zQ0_q8jr%-BGnjxhGn>%(~1evUBgbgUfYCuQGQA0i%GNMZAdnTM#pLWGi!nt*dT5!eUrjFG?a@j`GxGvQRh&+7uVmwe|w@EkHMe!lxM z*v9~T2r}3KK&ozw(} zBB7=@{<&4wQMmVt6|nQAnU8VZ;)N_iz{Q6ixIabKl^^y-_CV@g`UXZhh8+5xkn<(0 z_3&%p#nGA~&th1l-uSlA+BQR6+Ww6}2-!N+SLhyX zG!2p64(C<}kByb~&{Y@9VDIR=RJNX#HOBUbtAcT$jhA3FQ(s`UuJ7*n%Pc3S2C!e%@xGU{u(jfe~D(&RV!1rCzy09%%z3Zuyj6foeRo5p`8Rfw0b%iKg< zrzT2vuLcbL=9Csg+#Y>aBXqvtapa>?-6>-Cr0?(hmrIh0zZacB+PQ_y^EC)d=5X@)kCKqmapDNHjjBCL&{c@uvQ4Rb1Ik z8NVK@pn|mGSIciA;lOxrPe-WNMJ4TwUTBbBWfF_HMeE$?l#}ahMov9y*~2 z|D_)aehqlTS__uWZ{)TvF_tO*j+1lCG9mpq%zeFfj%Q~N8FDmABCUQcI&?~V{GP@F z7Y2_S9SEt5#D`j3MwF;RZ`}#d#F@c7e+{D73?z?TN&2-vpwYOWx2jzBoy-HP#s;>= zi_;E5cDqV86(>hn=r%F&o?wHt3@wZ=Ixn~1W7JTk=^!>NnU^6*ndsXb=sXbjDUx84 za)iNNhQVVY6QlAlxv89&mA@^;44|stXw;D2h}yQzydaILTbi0X$g&{ph8OH8j}fdD zI@cF|Z00)VD@mLZuc8s}O+nucL6KhMxS{m@8pGf=^;$upmwlQwvt-Aqwu|dmzn2QT z%t89rSJ7;UsXvxpczHiWiD@(17lU|bdc5T^yLGgWl>9~Kmqje3m4KnIIN;W=lB;Jd-A?hXnMCSPp;T$+heh*@MTnm2gJDfmpUJm0qK%8S0 z!%PSTIrJ(v?*mTfB6OFu`-^LjM^RU8Dodui>Q&Zt{L?3q;~3x~l>;1+H0HRhYN>Pq}zo>8$$NOV9eCCgiNg`HGrZed#{P0UK* zW8G!l@Fz9)`yg!L%?yaZD!dHP^i4=BCYd37h9f&@N6J!Oh)B+xs39+HpU0-=h24k^7|hzuJ=x3Smdp-skQ zpZ-dnFJ*5chpt_Y?woK4n^+CpEwZq#zW=;tc83&)Fz5aWDI)0+ zb*qc`Hh{G0+8~cnzdo}BI2J4@^jr=QYs?RH&(5e(DtE=QVIy(_FI;^xDMQbpd&^in z(%{Mapx@a_3Kqq{+$8PD@jN`BrtB!~{@ie(j^^_&C2H@WuA&rCo;w-0%%xS*9>ZyH z&x+R^gDeLdLlBJx}}#eDCmB7nk3*laJn?3!V-<9*zrc7aJl6iwV^dQ^vy zXW~;vw@lDTEfLOtr)ULs3S}_<~}6|H_WY*xWQvFw>};M^4p8y^=pnn0Obu zH3;8gI7by>2)IPD=-_e=U%hq{Y_;R23`xJitl4v+!WFVQmh6%`EUn+w%@XMhF4eo$ zHPXPj)CR3DepP&kiQs9*@jrf6d`pAm+)O4`63Nd!60Mk>L(AJ6TzfWrd>2xKrasiN zfA#vqz$9o~|J@W*6_Hr_jYQu3PU}8Eb}S;4@uem=Co~*l`YLGgsWFwsocD@*R?UdM zZc}Ekxh`>ET#Ih0CtmP`BQJcOFFFn{j|%i9^>NiE(wcCxTr@dMbU5CxBZ1iVf1WoF z-yI9*@QSn^SzCRRix@ABYMyN_!5q9(IyHU$obBN%WL6=L?yCUrBu+zL zxr*Se>rb-v)1cXVW}G5q!t6=l^spVsw;SNH5>-4In#;eUpFj$q51OjvLN2AkoZ70+Gf*pbWHfA7jqkH)l z*>j)th&i<^fP9|cJP!m_;KuBCqC?z2n)}7l<Z5gKH3cysYeBn##z(?jAfr44n%U zKB^v=G#%a%AqQUZ6e{}li*!JEh3E0V{7*ddUg)rvI-zx%-W(Jup zRuB5oz|jQ~N3&fN>7J7$={mj4PsewYmBQ7C>l?&pZ*5{kY_z761~~f-c@0Q+=^tMr zy&YUfHIN`7kTZqR;W>*1xrGoT$Y%>HT`-7pnW+aPr7OsKp)-v{%)@N6F$tPCH`0$hBaT+`MZH`3H6O|db_7%FsGrpU!@)5ZlxaON zj^^sy&=vHPN3w@~jt!a2(#S&XcBc3GBOxUuL*@u(rdX#F0PN`u3}?TQ7N~HZ4{S)x zjS-5L{Z5|%(J4SvoeCBNM!po^weg{>mveDdw*6Zp!%j_5el;JiH@T2U5Y^^Lmm%yj zAp8Zp=U-7L$znTp#|I1A$VIz(^aDQbgUapt& z#1(z@NS6xSulriF)U0Y%=uYhy^MOIGCnxel=&(>7=QChZ!Bpt>*|LM4FJ0-#?l?qq zK<1NU_^cmp_=p)1YJ6VVjqR)C`@#s+=$xSWCo!X;hrl_EvIicTBKv4fc!0b)V4COb zo#l{U1aV_c2wn983k2iyXZ4_}FB;|}Mnb?tB6|BEYV8qQ0)a0mbF+;5X!R_8JP|DL z1)K^h&@?}pJX8=Hi$7?o8<3tYWdK^-PbJlN<@m|V)EYWf|8{qlo zv&RXu_$cDW$Y+)IKA;Y?i}SJ^4r{dAMK$?%@Z)pSjy$cjK&@z`>+w&0SGl$7rjmJ% z2?AVB_MTx*PsX5`+@C0#!H{KpCr{_SFqS(jnRY(#gglDr`1G+sg5T^HK^HFlaH&bA zeN(BBdU}3>&(EZ=OBJnPs^j*^#uyv?b>sTYM5pPmD6YZS%QlX|U?f;vjm$BI{~{XP zF~00klPMs6vEk*C)b}xaae|#*495tYVe># zt)|f{kF(~nT*nGhGjCD&kEF^h;Zl0MpMK53`j~u3-9uLRaq*kn6?E5ZBctN3UpK9g z2{gK#M(kt+%sZxN@Ei$L!KM;!HK?g|4n+_NM^~OFJT8eWXFPnEb1k)?$A9k(Re;!(+W~G-N>Wg8_vhvP}I_Mx^=kR)t7G@PsxqFY^Vk z@}o#)<`f-&!P6qxr;*_jEons*^hd2^EIBK?kS|IP0=h4gZccX=ninV2Gaxm82_d8= zTZRMs>+0t7z+A2{fh}}%x&TvG$7u`h=aX;1YQEnCUi2wzsyxTgsyq@|tm9CbFOnO| zZCU`;7d*em#14Kf1|=IR4NX+pk&615*KvR#2(|E(5>}T#>11=;SB2%wEsU8wxh}ab ziU8_c{hWs`(^8fhnU@c6hT88zhytiG9{9oP!(AL{(O#?iCb86)Q{3@u*-r5yFg3U( z4#=r<@oJRp7vpn^REsH6@l&<6zWA*RX@7)Al(bEgDNccO@EZG}+v%!3noV|0cl2KC z+)|h4`QSo6+|73{1teIHN+U_sS5z@;V8T2U8ow01B#aASjy+&G+ z+#Ol^Qcadi-yYcJ&}P`^tErU?9M}OuHE7{m-0|@P?1G7^6bw>RM3K|xF78{FP}L+t z3jskG{_siu*uxVtT$!Dc2E^6ic&FsjT3M3>b@fq0Hugp@D0$b$m`a&&{Ts2-XS(i; z>Ag3(MUR`!FMV@yo8J0BdtrU%NvBLaNbtM@8;9xCKp4aZ0}A6fnPL5_;F=y=8aGA%uk;Qoqyi(eEtG1|R=SeHQx2AgA=^n;@)@kG zPWAb*?=E|aYR_3bLCk$Y(_WQULq=SyE+HO_Q&VKyMaUww^PB(2Wg;f@0EIuPD)qK5 zF|Qets6Q#VKV$dN-2O71`^Yyx%)`=vabb|5eEE}eIBz$!R@q@>(Co$t+u_P$jmD^| zoBV<}5tFk{7)A3OtgP24n5Kz3RLGA8B;Y~8D!94z)Z5CYMhZFVT;iBOlxUBtKCdgW zZnY!lhv+488}?h)7%957IJu+>GrhBo(>tE)8r>^4mq)Kk^h2lX#g&GBV9H=5plIep z3Ru@RVk?%l{ConLD06R4v;Cl2kKf8qGQmFmE zsC(FyX{ zh~LA0_j~r-+tA%^&D^4WD_Q^{i*Dd)@0^8A*#X!Qz&^>2f&`gWI%#XYlBYmos%HfmUeG6^u`6YmY`CZ6q z-5gX7jEBNjxI-lsX`g$@dmEnP`asd*#FmzQIMSH6%fKyVCAI$@HwAhBK@}G{Top3Z z4LXQbH%4>RxwBO`glEahDE@Y`yk?r+EH@;3OgKO6O+KGQXXM?D@{erIBSIpRt*T8j zSqNi8?bKtM%L@r+c`TI%mseFe_;AB}1>flXopca-x&MbnB6{v6?6?C9J_~S*64UDN zF1xTZdEkA>t`7rLc9n4brcFfb<=VG&!HbBDE?V>I|Y`Nk4;^okGTdtXfyFiRq~UU#;Zt6m%)$U_?SXpJX| z*d={Gv3;87aAez->pPTeWN;=OdhA8RJLiOE_dR@Pe#1QSa(IqSw$Q*gQFrJ{^OcQ~ z$(FolD+gHIwt3vE^kURJPLe&5+m|7Uwu5{|%b&Eqwid`A1P~yQ^~>jwgZ3wHw}#W6 zF4BCs;R)SMO6_pl!>?Nl^T&0E5BhXl$gX@Zn&NUdA$Y?nFSHNM!_Q^AooykfV82P0 zIO;o|F=P|TU#t?z>nEyHg(5EL{n;`O9QT8hi9xhrPwg|5X;CS}4$Rr3zJ(}~W+x)d-pitRYcPDOa{jnqrn%o2if{v<@c z;DLIS^zHN0P(G7qZ{%ijO{0<#fZn9eq#0z{E}v+8);MPma{6p5kkh;G_Q(uWJ6Y*6 zDfK@LEwz_a^sY^KJB`c z1a|EvXg+jbT>I_oAk;9FCWNvevBviQy$)4b5HB_l~bhLif&k0iSjP!Sd%9}PEp^`m7 zO{nP##39Z&ihI4H$3ZFMMJc${M)F_sf-9@4AUHjP80y4*6C5*_v#jZ&fDGB_*8s*Q z@GVUZ78?P4M4q!)GeeMI7j#K?`ZeDBOmO+6p8}GK^|@NRJ5P4*T4mnr^;sr9has)l z?I|r#1(oY8JCe&Dtqi6Q<%^3dey{m~}{eM}*I)EUqNh_{I;c6%5&d zRH0weV<9{bnX7>`+ot6^OEwulTVwM>7MTEY#A;DwaA16l8=$PEFEx;9`V|RNu+CgIFO zu)35*Y*j@bX4a~=`_EuM8^u&gRRpUIH}49zJk|O@fOwo;FUB)fGi0(s*3yGkm{OsD zWDf)p*F8p8k9li85Br|*SPUd>lEHEX!8!@c^)(Pz0XHYrr%=7+oR>|j3lyjd%$qQJ zF%Zy?pPj=AYkT)1@)OYGpWmlpFcNM)B8QcPFdGEaj+vjh?UW;BV+kgcV39Uv6*EL9 znwI!Iv93cSt~@{9`UFcbb_ULEA-m@kT$TZ&&z65pjb6VBt6q76p2VMjKPNZZbfujP zO>mxyLEXJw_RziMw->6rnpARiaI)Pc2+xRbCfIC1yr+eacPZ`s#1_{V`MP z!bSJM3ltFk>I?!z9jMyE*$%)_Iy5P!Kn9fA%rp-JHve&XoD2M%`!ns&6~o5GOwely zyz$@C*Kp(wXHd|7N5Cngeel|= z7O3*3!g3D?6D_%>J z)O1z;+Phk@jmdU$Xt(j$wcLR^sO{e2qW4|C#jGxl{@ppUqs|ikjpII^reo>E#Dz;@ zm6!co6{F0Tuh>}}tmsyCk2GsZravhkYI4Yi+l13kZufp>52;GcUcF(lRF<8e9$dGx zxSF+)?>2*-JzimIkBd4!yUQwM^37>~M6SV&WPOUOvt4QYA?d;%wa;iH8nvU8mw3{x z=ocFb-*DO0EIZ0@7}x>>nG1@kn)fviXSxP*_eSBPxtHWyqfK2v6iP3ii~5e)X92}x zSoSz8`h@-k4;A@%tvVsbHyiut2eiTbsKQ(22M)2-+l9t322C8kGuW0w?3`z1|8iJ) zwvL%m+L-d9x5XizAahXet!WX}EPE-GI$!#Ry?E|E11-8lX=CK&$D>MrpOM`{f}FbS zAm=SRiydq}*u;Y$&HJ&WnmtZGZ_ljFsG>^9@=Po_PJ39lHU%eVqfpsc-P{@f_@=g&~iq3kcy;-}*yRCJYQc@I=LZP8}g zs~BD~r7m~w=_|0QHe&O9X~#nUvskn?Em6Yy#-%;j%7DDZ@t5eb7D?WP&;2)LAJd5l8{ zX#~0f@evHA9(v}Vt&$61U+)o+gb^u09+!T7e`WL?)}@@6Y0v6hOtr$B#o1Yf+>Y!4 zGp^;>S@huNdzCuZiLAJjW-sxwd8%tZZF>$Nl?63TL<(qRFLqcP;T;@0`m~g^2$C!d z&|7sX;IK0AJ(Uv*T{jL8GFiOeDTLp!2~; z_9{Y(kQSyfj=Z%>jw5$rY+Ha3Iiz~~1r$dW{T`XOtdpxzicCbFf*5kQU!JRGQ%d;f zI}T`@ZM=C$BtS~%*p0lSrMvlDD$aQuyX)-QRVAXJVd7N%_yj$fdw)TDXo0~+%&&dP zQ_|f5jBL52Z}BKN-wt|BdC_Wax|Fl8GPQp!`{gv#aD`Z&>6rHIlOs;I7iCn0Y_m(D z6|ZMkbVsX+14|oTjJ69+*gTnj^SoSJ*tlv6aF+`w$NHu+#^IT?u15*Z&BF;G3ZA5n ztzBi8O>ph~>Uc2h{9fsVp2jG#W-C1ZrL#rXzTjYJVu)3yh+5WTlj@WAy>|O@Wt$U9 zx#h(2|9Ymcc`Pwy=1;+u$+5C{Gkoy znOoZxyVdX&aYrRFqhZwJ_;@mbi3rIa@7~DKhK$|P<-N91xf9{$gh-Xg?^DXJ3N*=& z);SKlz$)~`HEYe|`z@6F^X#X(j2$}f52cIq4QJ}^NcnlVX))}vaZs0?%8ubCKv!iSKkwhZARu= z>u0^(Qi46!=9w!u=dAd7@tlO53c#=%N?!%M1<7Ki>|!jn*Fb06_h=Q(sZ!W_C5z!Z z&VbyzT@>rR%8RC8zU2G6T1dyGw8#-SX6{rxvt4R1V>v1&Pph9rdSw!YBE0$V4j=Ae zj|{zfzD{k;b6j_y6LRIg!oCy_6d^U=TXd<};0U-IP2+`~h^l8@0h~HFo~f7`C$S2^ zmCi2@2wFYkl2^KurE{$&>%g{4jef3+Qutf-Ksw#Kh%@Q@cv7f(6w66J8&+vLxIm!PoYBjPF&X&NT;gY@PL)fapw1jrKaaOlH`bU` zZ{|g1A30BE+HEVJ_3@0A(`;9XcR8cN3$M!U+!CWcB&nJ&AE*sq@Sj+;CdTHE~r z;GVVhWA@6Wq5e1WY6e|eJdIaq$TC*;X~*b)Z&vVqm=7+a|+^HFXXS>&6>tnfesL8;IonB|2ieFD0c zIN$K>Ei+m)#*75bH0+j#$nI^vS0G9RWPlvdX0KyU00p$`@IB~NxTh+*E9=f>p@zvG zhnaI>04G>DQ~Hwd$;2uWe0z$rQ#`PIP7IsOlgyhT}>WhAIBs%`X4TRy{B=4osbH5Hv{at&hfyW_dj_gQI-``)0~4lLWy>K}2h zcJkEVv9EGj64L#sz9L`qRWx?tjd<))UfJs@4cKfSyv%Zs>t?iz7-~bG7S`&-zLwSb ze$_~o_h|D4;o9sd(ZI4)X)YmRdHC&NB7Jmymug*Kr=wW{rdB;O^fk+$#Q@(jB|f%> zvRJ)L6TpciPbZ+b-jER#VJ=w3A?}uPLKfNU795mcmz<`VAELA|Q7e~RV9<*+%dwa2 zp%ZmEco%hauKzRuz!dScQYn9=HBbP1WU(zLS)f4sQc@g;xnJ2sp*PJpg0mz1UQ{N+ z2j4L3Zce%>+!TyY#ox{{ee=4Pd;wn)`EdEBk$JQwkJrMibKBjFp$9EAGwx`>ry2oi z=JF+Ly&RMfD{|+&g9CHt_{BwgUk3_V3cDWQJ&m zD?*=uaUP%|zhnwxlI<>Ofeppf$HSoYcpjDDy4`vz_@i2hK}M8?0XLfnDMfQJStz}} z8-E5gV|3ehv6&;&ojyz`g7$ES1d^go87Q3M)r1omeo)OKvZ0Y-B!K~Y(yOb4`zhU} zd~u?uHjl*owZcfE!YuJ{o0{ApY)H>{&a094F?xO6&AY#mt;kz+qh5^oqf%$-4Nu7{ zaRt@!Hj|{4WlPVI;M@P5?tS4?{R?%@4`D;=CcW(uTw3}z&@sT-!mL&qG4aznP_t`i zdC^Phbwz3~+BP3}J70W|M#jXbaVXKMr;-!K}u z$Rk4fQU26<3|mw=(P9)Cyzc`3;}mL3H9(EX75)5ZD#0=d%G5?w~2VdUv*ZRrp#X;e?=DhGE_P%jMKC??;>~f{X z35Q;r1-No-s_E9!BUOeD8(s*a9f)2TFsLgiYm2#B77@aa2+*B;N{i|!d1Jj;cs09@ zP^V)co`7#1&Y>*8!{6b_ zs?(Q;fa=zAo(m^ba5rj)O1*kzAC0c8W50705xcLmk<}pYBw0UNj*ZuX5^k7S7W#pq zx0BQt9>CF(HdGbdt8@x*+nO*J-tDVh4HGU`W_)2Ja9F#s+Rxc=!bD>;KQXZUiTP9% z2)R>y^cC5Gbk4=wohc6N9*iHO#oa;c@WUDzZ3hMPRqyL;P`1w#OfS=RLnBco zr@?{zvlwMX5Yaa&(RMdAZ90aj!N#7UcuN;t1Is> zw>9NWJH3DAcqHmO%1Ow6_-b=noqd%;bx#FVtKn~lE+w-ZdU+x|Y!$J(k>FVqJa^cr ze(6MzU)YgfxV?5}_Z~$V1rICIski9R%vH^yL%8?EEk~bz(X%D~wP%aBDA&I9pD~6y z@Dvp7iw&zl5@TU1Wa^#TM}C1_F~y7c>k=%Gir2Q@5D5q2-*E5tskUbde$m~b^j9D% z%Y7EkCf1lP5rgDpQ_Cn5#fDI%C?|2n<#$4R9l8?5CwbOtVmvJ@M$EJF=2ff2BkrkB zzb^x%%AIWTYkhlcG?|mx8l|S`Nu!CR?e7ZHMr)1Ws0@b^3Z>G`G>wDSm=#j#|$Z`BKKC0Man+f1RV~93+G--l&qF<8@lsqMzuiIn6lu))IwlBDYbHZU#*CYd3&mM2*UoC3|A6>V|a})Q#c|WoSO~i$rsO(Cj`D zeD&pm?o-v3z1V?V%|UsY{CC+gose*Mr%?{!)fuo_zZc`l#HH_N-k0o2R5dRU4sz-B z&39z0OC=xJh@~?j4cB@fgRZ~Sd6QVej4Mi&3B^?h0VGFy&1WRSU@F}})eX`Zm z$c&?WMDgUCYY^i3c;Du>Xhixni4;H_G8lY zq`aR;6H6Bpd(zw9cj@leao9pHP<+c)EU!ttd|Z9k|HLfzkp7(OC#tL4fJ8-hx0G{Wa#p}6V;QHdX>tdHDybY-9<5Hx9oDE&MJSgXP znk=GF1;k^!!=?}?*4GaES*9!6bCV?deS%p(%*Rj4ust$@HC~<7nr9Ldn~IOOxHRu! zj04(u+mDvZG~mYd&UM>qY*0tDy)uH7>rUbP!iU`z`*H)HyovhHX4aJ~x|I?!m`vrv z>9TgZ^^G)jc8k3t2e<(Ap6J5IRN1ei-S)Z2xSWu(nDs4ft;~~!IcoFd>MjbN{+--S zM|;yFdmnrfG1ym;dzJ>E454G#tk)MOzB}yaO<(FF(Eh2^GRXZlxjMs1+;~AtT5gfQccNyd zH_pL@C10azq81I`Ld9yiL3(npBCgN6(ReNhWdZKvn6vqzRa_id;5~cmeg(9Zu6PkM z`ZZAYcZnQ_X>sY23Ix2Y*?r--Y~eeL?cU%b_Gf&eR~XV;tW}5QPe3-#?|lVovmI+y z^j=a!6;QHuacn`GA|FmcpOC}yEIXm8`qvDUG(<5zTgp1d)CKg{2r>xcOexbAfxIjI znU^VOtbO}}_YsP*m3|aJsn!C+rq8UPw#r|)c%Uz#rH!dt{e1><$%Cpo&Q0m^iR%rH z%NbuLO)}!=rDI}VA9vv*e0E}aOg<&XXrZI>p38;9p=-OK^EPmgvxC=bh6&TR1$9=H zROZEDt#(+MkL<;P^gm^IfIFjm zWh#m)^BBuocCDh9E;?+0uYAf|X2HmmmU}7Lld( z8qis&%sW3fr6bZ}JjMQo9%0=1%KM)WjJ{)OW+6nFvY`QzRX<^i$v(?wRGXpLB`|wd9id-!|;jp~Qwv6;Tk99o5ta@Gr*85PBcqCd)IQf!6w4Vs1v3k1RdcM`B;mBF!!krQy z-!)TkAur3rg=n4wN^PlPt zZY!%TjP|<@Kk&Q^K_yo8LHlTavhk^wxz`n^F48i^qXhO`!;~ZoJWC8tpg0O1LO3<`|tieFO~L7FVA3${H^)bNFLB>W94yf@fK*eGCD}A z5T|lrldjt>b>Z_ZxvnTzd$=FPBc(txI5l*u4_2KdCke$FanORg_wQ(hq|dB4_mHEj z-qhaS?!|{qcstW=+lk>U6?Z}tFeVeG>@(?ss-+E!_Kj3BU%S%mR1Zn?=0*(d*!Cgs zQ<955drJ5lRWD^}UwEjI%r$;^&#pyxBV8oFZYNX$rA`=%hg?OHWk6}(&wwgxwKE`Ujb`dTN1=Aln_Uv~2V?T+q%TdVEy7x~z zq_>sg6Z%Zv6JJOR8R%l?SM#Py<6aM2KREinVL93hb>q;})~4w2jB~0gEN@xE=K_># zK_}~QE%v^9Yww4aI(sM0@{Y{=D-osd+HqdCY$;7OEx$<1q!7lTT>fW_5P2xi7t|4}9 zCs69`s^F@QDU-?tz~T{ufW(v&;Ik?sNVb+h$hjr+ZzBaUX+ta$)Wg{W$+U%H1eE6lz-OYpFqU>3InP z1j`E`IZZ*T>Wpik9-O|6;wTG*Ir*Gy z_RBkdd0MTR>$Y>*zty?)Hl?=zAm6j@0{yU_~3z}nq z-)nG~@Q$H2`cIsV^6oETx@3!faF&eiBh%d{JkETBcoqxs%D&<(j%rt<2)Z-QWC&~| zd)E9??{CbkEgcI|{u$CVbc%zJO%QPoO|M8#F`l4jCu8W64fu%ec!&b@h6iw}^&nbd z4UnSP(Y^u&+ksk`lQ@&hLTCI5&vd2GCJy-SFpo+1vvFT(Xxa{#nzcuvT#gCloW(Oe z+W2VYuJo<8+_Jm{RntvO0$_B@$#qTs1Kkp*igbFToPrD?;u3%a&Se-+JTUW(i3k&;n;NsK?1tt01Ny(b~$N(j8p?7O)NOWex6AlXCyC>q!K&FcX2jHp66rF7W`P{Mxuk(ifN@RK9B#xm%S8$K7Pu%Y5J zHy+folWPuYmB78Vxq#mh&D{cU8-0-Y9oZ2dv(N`78Nc}|-TkchCCa}4zUJ`>8@oDR z`!ZkNe7ZTOnKT^DaoGNGn1d&r>b>gyRMw7B)UJ$%HO8gkU}* zYXTd>&D1LJCv^z(T8WQOLzyj*FVpZTAo};XNw+&1Z#>8-lT$gDC(&`+gT@Lw<(&Xr zOt6JjB`1m&l=ueH<)}x2{<7Ub@!wQzNVrG5eZnJU{yGf`czoE|8RTb!&%s)=MRGJldRgN>2(V9fh8%M1-MSU#*-{cV|R zjLIwqjZK$<0?0%Jm+pE-MxIV>6-vBpP4OlzXo)|VBvli059F{3=LZB{uYLj{utgqosXtxT zf3a#0l*fM<0m9JGP`xJwVuv(-%&y0*Ik4w#F~wcQspVc|(X)p$%oRXgE3LF{GT4g{I&;UD2x1rGLZ5~PYA~9{M*ws#!+Se%PRd3 zw$!8KA_R2lmSh2}1{)=%)*mBQ8N9;!1h0t_V_n*i9cU% zlD}jw=jW(ZmRG?u4XgECdmen@^h~I@=>)1&HQo_TL$`16UBN?0coX9rbQLB8WoADPu^FVHj>1!tmkilJIYdP;Fn$P5|ERLkz)NV zh?^$=WgEf0Lvah7XlCwy2|Pslg*U%yBj!r~I90$;>-{A^4K`o$FKs?~w21R|IuO)R zQsnXZZBa<=Omlwm>cztTac)L`@ll+&nkf0hSrzLM6zp&Cd7E*lQ&&>jsd%?wV^zk9 zi?-A1OqS1&6Nbds)sp4M>TFh|&Mfv0RcY)rwFzbzf9=ZsGWk5|+jFkklDq8e{z;2` zN8^CTa$C&@@{2J?$we_%jheSM=W%2%IPE6vUd)&2$xtgF{8p}^e2brHC9cDUxQ?PL zn zd9%ztdv}MPw(k*TpnV~wN*+ic#hm94moKAn zsy|$!UL&KuJ$z)GXx~2Ceq*UK^nVcrN_;#$!C0|Dk;-j|l&p)PdYT7)k}2(elVs%( zc{Ur(U6{RKKiH$9Q#}}5tUU6;wq4%tFE86Q9K_X|4-J&g%lHZRAd*_G0qRVrC zQssXcX-Ithcw5F2sgNv{46OXV;Xxl(D%h*k(Uf)@7OHVN?Vx>*Z$}1hsWXsBf)Vj5 zE49wX8+!Ntw3hI1-^LS{{HCZ&2{8i1faey@t5!6(Sa!VoRHSfGtfx>4wYa}%I|S-K zL)Qnzfq$S!t9PG{ng&$ZsQdo%Mv<{Tv>dCD1N^dbi<{(u36FSOZ)&R~Dgztzen9Yl z=>9|m00B^?CIkgH6}g;Ce<;{|m*^r!23k3;Ji99M z7{#JFaM}ZeF2G97P#Cxs)#>Fl0t1sD)13r%JXVqOXC?7}`2y#m?GLsdeWffS*h6YjdQT$FWR(%B9FJiUj=yN-XJ}$k;`Fq z{A#{1lztJ?97J241t|NnqVx@Mf3v#}g1jofoCufzSU2jvp3>p=93G-s!H{t_+V8>Z z7lF`!x0XNl!x9=Ej5E!FqsoP^A)YA@lz%&mQA+w0zZ|2eoadh@e=YwUD%9r(L5LqB z<-y(M$s~VE)sT{}rfZ?)?Q&H4O4=_cf9qFOF3fJ1qT3Q@|Ack01s2h;y(kUqNSb zDG8-^T`;QYFpn{2l?2Il1o$I>X%@PtCwkHSibvY>$PrMW%YE}b#@)Afj^?)r{oC!$ zepOv{AP;Vy5%1Vm97yrsnBacu~_<-^AG?hY>YHEi&QbP0mJL>o4dEs^PQOAZ(Nv;T)% zk0o3vNf?==LcA0xK3|sh$9@#TC|7xg=@6fs61Hf++!u8uP9 zjXJV<&_}^Mpz@NG1na+jEgoXevT7)_KtG6Ne@jjJpmFCSu*O4C_8cnf~u9&vZ33G^7UHmVng$HK6Ah zrq&0VHMwZO;4ST}A+~m&Tbn@e(igC+`MMvgXgl;5oF1*nzt43%kvpEIMB#$&k;nHI zL2nD~8z8=$r)wNuFnaBqu#D3v?aQx_q)@!$RU>f%Kw#1tYkSHPPcBZ1(6MQv(3@`b z>sfLGL~cCY!!}i+$~$d3*pDRs+gBAamLTGKA5_K2MRL0X0-gOBUHAs^4)??FFCMuI z$t;#BVkCY+RJ_RD&8@4NsFl3xGE)C(wvi{s%jKiSRLKosLnm3)W9KeiA<7}c1m5Ik z2AKpO(cPe=Jurp*r3Eb>#EjKtt`Q&*bev$8*e1}uhw%4sl#e}BQW&G2dcrzFZRU2g zYS$y^auoJNlfolU{?>EWXIYgAHu7lD`|5*r>9Ut7dW^e6U?xD@-B(%azuic%$5EHD zj{>Oq^U{F6eX=eJo(%eXN`oZ1tUF@UUE$H;{P7E@1v#TRXc(WBCS=DA9+9_LJiM#! z4E-phQWwMW`?iDq?-uc}|91EWl2X=muaK3ohs`(XtX=o?S{~}uWz=A)3qq;QgRicR z6)8j`Eq=1wTXEVcyg}o0_2$d==TbdX2p#?{pl{6|=YD!P1*{Mc972SJ&w^yr8Ci&e zGKZc9iLrf9!ga%LzDG&I*S?Ve+$8R_`@k~ZmdO_x+>>74;MA3&-rj5*DWg` zvK`oI)>}iO{r3GegyU4(k{*OzNCsV?-M?-xeX&OQm2~yar$aC~VvK1&5$J?iDb5`Ah;J_>!?kMAQ=!y8i` zKT7V~t;Jm_iAj+A}s_?)rH4s>!Sr^&9T;;wSbw zv49B3eh&<&6D><9(f`N#{l4D+_!k8qiA2!hUV7hE87T2ihsZJqmB>GAjeQY0eA(@Y zCs21jqt|{3&0{f9hL{g+`9OO6S*2h4Xt`_laU;|o^vgisWYzq$iQ3}cH?MNnTRd9H z59V5*y~D*Dd;D$ClhWf`Ibsn{O3^D5^d(wG0R_03oyV}s8n7W#=gua|ZGZT=QP0QZ z1=8l@72PxZ;_rN#QD8*$Gpk+}1MExtv@EJ^6B7pqGr^aLlEMsuU0Yc3mF|fFqqhc; zfDd}oU7VI8^Q3J)~Na(@r2#=n|X9&MB$7$M59_w^#p6flo7FF=N` zUXLz$72bBeREG7LHt0x(=>-YSN6oK^nze|7UgK}U3nzVY0_<)^-&1K4;YTJpE&G z2%+v4!Y8cw<&0KUUg-`mAom9i$9L~oykEJrfC`EW&Hi?!Ty})LL$8-~-?>Fi#w+FY zt?7odNWNB}PeR6PFdpg98ffJ=3Z35;4M048#NN#e=MQc1DX_)VMKJqBP1U`H-raTF z=`8KGc#)=<2^~wEwI>H6aS2`JeZ=@r=ThOOM_Qy#6=!}%f(vpT21D7a)IsqeGJ#YS z6RtboD`4WMTfwQ;UHiOUio0NdH9pXGHjwrC;xB%Yf#fyg>u{(FUUqJoM%?}xNuJ35 zkp2|tVG1RV_DO%+%>V`x6^O*|#n0iykzfVYFQ|NfOI_LoV_ z1UltHcYY}EcewagBbi=X*j=oTSEY*2Z|++!zuN4B(TW7_n9<2;~U^)nQ9TulI^iFX< z!%Q~Z4i>ql1BURi(iMx4@P@LqJc$U&N9i5A;O#nPsC$jAgJbNCwq9pz5$T>RtIoKbd-d5bkzgJXSNb6s`A4GT zJQvPtY#^4R7bM<^b?zgJN4&a>fFia3+%5V8TUj`m?PfZIJw9OE>DcR_)pn!j2nGakf zb26k?;ljNy$gBfDoRh%Bfy-9p%BP6PTQ>$=8wmM3=AclqI&&aDu4S;%Rjfrn502XA zcPluzj+v{Axc2;wnK&@ClX5^Mb&WJm{-mAEc|mPt=Mu39$+$@6b+WMF9UO=4Kr7eV zo$N<4nhR9MgD=azJI_>4o>+0e!quk{}HSG;|B?O zrd!FGUMZrb7G|h_IMt*e2_rlqVfIO+R|>?lTqJoBL}1q-5)Kzaxzrh@8y}-E;~Oa# z?mpGQAV#Op=ZL<+RnxuxNFH}oHpi5m{~E&OP1%bhziw+V1SvB%@X1vcHL~aZ9C1fD zSme`CuNZZY8^&az~;hLn;{=|NR}Cf3MC|*HosmO9b`p7;&?&EI8tMO@hhh% zQaQ|IiDacnuF*x^<0on15T<=*Wb4TtLHssEm*N>m;C*qc|Hh@6Kve-0HjK0yQFI4=xvG94U!{H{= zDHzd8$UoLSuchCSSEP*u|?n-Lq#pHZ`uyHeHArHC@O+g1aSH1bcObUUI9NKM!5%;-DpW2K zM@*V)gm;h&AAT&eootP3pT&>$y8s&^i4Vi9MV*CO<(1c84%8fU zI@OVN(W`szKE!;u5fO>EN@MrqDa)J%Tix4DAg$Ou_hFRu8T&u1?BBDEr1EPZ*p=$h z7;Pz>0TaLcCv8MDOwz(l+j+Kdu!O{NQay-{a45Al{k?0=mxF}YGx2Em&&6wk4iY|i zcM7gtodmZ=s@)5xad{z^C6V;d(&)~yrPd3iMAKJO%IST_OUJXu81IYXnwR#gU{P}V zciwQJn<7g|Wvln6Bi&Ot-S_fSRzokQ26N7_A~x|PZ%Yb-v?^MlcDs#TMn0- zI?i;E?&)?U?-og(?PnHR>6z)Ph#3}-SC{$a#5ig~TIaX?3dn6(I-NgUE%xGypi5>5 zO2WQKKemnQrC8y7twNK?)Pb+`=LC@=>1#o+fIbHI574{(^)hDsIj}ESR}6^K)|*Ph zC_a*pO5$>x{P-MoWp3vqK7_RP=kyyQWEchnD=uor$=)fqE@Oo~c*IjVYFXW6DAx73 zw~uts-|pw;RKxAT9ETc(zKCL7fqKPq+<6i;jpI`^inv1Unsxmev5x>2m%Lj zjXIct{>pZ1(N32*YM$}g=+9{E2du9p;$*X~Y}^wN^-ig)Bh_Z?NEz?eA?^FI-@$av zhS`(vKwY_Kf`d!!rchiJlkP<8Zzv;&GxLFVF z?|d4Dvn66N>6@=RU(QcIFjv>4^Ej!qqS@9h=>_SI9Z{JX|DaA#Z;6Ou&;uIHtmmFH z0c+Q2D5)(<6yxUUJCjb|ZlXENDX~s}etoYm`ch0QZp}MDM6wDM@g>^qF8adiZ4j$T zxNvY>Lrhx-S%-hQOR-fT^+(yA3%iF{uK09$_f&mw$1$#%`1T0PtL^k&!^=Gx^1TO! za3Q%!xP0%tMYge+^R7?w!XZN>D{hU|0vSabMkKFBv*ha2Z zfbqZeOBsoDl(z~R2vf^1$5&tNdD1Ny?I^WpzzJyHCITxt#nYTWKKg|uS6{Y>dSN+M zZ#-$3N_t6%jTG7&9ko!W;;3m-1rQOD02;xEou~cza0=i1vb~Gd=itF-Y?8;(A%TVf z{jiN?2I{PblNj&*xy?$NiTce0id0MBrvDtIg{m?1IT2(|9i3S~REYM>OV8xzM$Hlsx z63cTw=$zHw5FW9Kr`HLBa%yk6F*7R={VNJA?o4Tb_E_<=!>*B4W!7hyM|Y&H-S$0c zG+VhJ_4%}$3`^8_zo+XW5TPd@-^h?stXKu}5*e0LJ`m+=9 zcC;}Ccf*>pG;=y?PpV$IBUtU)$4tE2Qy%(eM;Kf#YX%)=(RxE{|J6{43M^GF9kbFE zi(3~gw9MMh2xokf&OuUR;G=2jk|1-VvsTDZa`U@b+Fanx*<l&gcI~o;7X5kl#f@+-H)pq*AVcp5Gh4G$WCLCJvrlr6eX$$xa-AG@HX^^ zvJPW64EMlH0H*Do$xV@Xjt_Vw%VpzokwfnJ*{P&K$-QK@eUVV8yQgqDdH2c zCp)$)t{#!lVv^97Z#ebV6*Th00*N1uO(W@+H*W-{=+?+sX}^aJNMXuMdlPjw!&t~6 zC9mtLu_eIBYr}cRw?IV=-vjqLX&fj{`O5JP?VcVSz;n@>gjycBfy`YBFoNi@>cA&wFEzlY@O=x1ZhXK zL_DALXe<7~k5iHnhTTU8^S3GD7|9`OO>SeLK>7Md&Hh@G5w5VRuA4)mgGG$~&p5kI zKf|8_GTnE**4F|*(iEEnuc+2@cZqPniEnBsTf)h@y4HEvn1awRTcnZG1Q92k zlD|ce>Va9p;t$~m>C$9*zE21@i$o9!;xZkqy^BO}ycO)m9$(_SN0=DLC1`c6 z&t@QG)kd#beB?@J@+!;1H{}m)Y&vlX38BC$s|Iyob3u2e^ZQp>2E}z{>fpkkh!-r$ zuI_kRu{H9Am6+YIt%YR5zA^YVQgWF)59@dQh@?OqMczQf3ESvFfB_zYc$@|Etunw-jvNnSqdb2kb!)ZdyVf6~QMKXkKnxC0EZM*O=>_`u>-N__(*hi9pqt{w+mBC;1Q0)ZU?7HVd1zj<)m#~W+uJ+tF#!{K zxB`zn>k+7jwCJ6X1WaFS2&wyLE4{yZ*D^$mMAdmKLU+KJibb%|QVe;cb7FgEnk;s3 zk=vpAb}eO=eS|_^Bt?s`_#J1BdC)sAN+j3ex>2;~E$f`p&y%0B0Z={23dXPkFD6xl z)8(qe>iZRjivsJ#q<9p5DHUDZk*&(CZ6R#{Knha79U{pZN>MOfmmqGW^UkkUruco!Q&vlNnS%2ppY;2l{rl4~?^EW2ZQ;!e`p_np9(_fVd z+HCV(fB|Wf+4%;ARUqABq{ZuS$KXwm%}lu}&zi4UOFyxbxIfz{2F!q>IzNU{ywa)M zB91z@@}?_ElFOW=%XV?Ghn=un14N1dYzJY{YD{y@fY&Oe!9(zGs z*D9WYTrNM}ra|e<#{5>t?ARk3kd*gq0YCX;;Zuq$iSy1UoUhLbMSNiokQVJ?wZ;M# z{D#2~+Ou;M!nPG(wBh8}n2`k~f!jr+BVplDBT#<_StvuyVqHs zv(LRB?l*iBlV9FakfL3A)Pi$1?$(r5fVbc~YJO9B^DP409*>5GB_e6)K za!*TpW80~7kC_~mrN@2~K@4Uueg{E1+==l>8AuU7;xdn4M@Mx6`YXdk_|o-nr#=jS zX8~|Ck#&8(bcLU#Sn7CqM9m$vSjyJ|(VxoR1WAP8q`ktX0=>?4z9qRhxjynmHO+wWb)5cv-_7Mv{qa0W z3ddJ>;om*;cVW;MjQVqtlAUmrpTRZmY?ff@5a$qYq@YoP#?kjgZv=k_Id#Mhn$jN> z4c=kf7Y)UEX4nw|0W;N+SMvF*pNlh~bdx16Qr*HH4jx<+d`~|?J|HLDQD@KGa~7-^ z&e|Vu&deng-nz-Z((*hS(P**OT)SyVk7E;eTS6!#F`D1tN4Z8bTHDL;3ZzLn7}ndw zTp77OGTBY=qRfP+7sG1d(^fPC;TiA&C)cw*iA1nEprT5#lca4E&sQ8vb1Q8xKKUN_ z-G9@fuR~utQvfULMCWai@st!BG+sV~SqTkyv&7a3phqwsrKF4B0xSX#Fv&DY+az_Q zNf+!S*So1tu$M#o2!!kxBNiN1iPV%4y%lkzWkSyW%5EP%^+9Yw0JZ;_xG+2IPG@uJ zd6XgVW3!wt)QZeRW_7@R^fVCsQ0*s`lIByj{sxcy3Pt^&8C!sfe`MOZ-y?~I zbGvf1bao%-s&EP|m|VM{6^eaio+=_pe^F6n+^jVc!w9)YctZErnKYUpcI_;^cXWp2 zO`=!%fl{&E{Kna6q37!Mw59{cGH9xl>>xJ>rR$YdZ>3_BHg2HaH+3pTvVMG4TCp^Z zLR{fbK^CklkRt2ug5eDoCAB_LlWRLhw5Xds*JV*E54fJs6OI<#meS%Ot3y}B*>3Ks z8JNwvo^#ZrkG$Ry5#9ko)|@q$wE0<>Y}3)NJ=qP;Sc0(U>f_;JBBF*+3U!`YUQKJt z(2NBm9{P*I>hhAuA~USyVF4TpKCY}H>`>DTqtu+9qo0H|7+iyKU4i~!6xnuJ>gB1= zP$eKn260tKVsK%`lP^OskW`uurJud(eK`ZH^q1^d(l5~~9?Zp&k2#r~KeCt`YU{;; zHkWTNMq_N~c86@MB5j-PPQT3|U+Kt9lqMLB1iXbW78x{8M<8W4glUZ3g*>|YbW2!; zg@rTO6m7S$ec+6T?JIN@NA4$|)=qaeNCiLi4%G(kPgfj!2xsLpx|<~j-nM?<9s!!QmwJZp zBb%^{@{@3L3%i7=a5Nkx;k4K#*)#64`9xGxF1jsgE3OV%qGlkMOk>pBp2&V7Ve#kb z>t__%qxtXe_$}vrveYXXmnzvkYk9U+CBeh!=3#-ar>)VKk7zju_EPB3j&tlbNudgb zs@9Rng>}nb{s(AAIZ~V+U!cWxVfvi28Qb(!V~^l{)$5M6kO-AMUNQH%wtC-Yl6wXl zQ7n||F}?9SIuTaBuF5TkH#EFyQ4;jx>a;Q!-W68ezYB54Fmo9tultCPk_i^jNRK&{ zbbYNLHv5m%?WcJ&zDf&jUYuWu>vFWrWJIS?c0LCx`C&7;8M5yRe@E2^kawg%XTVz+ ztyGaXokCe|`@z^kCDa4vg7!7I$u9?bnb-dq>;6JWE<^(&iOh-=j#_y~CZA02TR=n9 zYpHq8*omvX`dPKDvqpsK;rJ14`bGL3m`Kl?E-7`T#hSZqC0vBp$Tcw2z9OP2&`g~S z1=-`R#JXq3Tmh4o;cWLu61wwH9%$$I*(1=?n;LXfcb!h=qht+~8Fy_eYJ)|+j=Dc3 z@7}dQN_rw2>u;qTAK={7P894IGE6jlF{Ci0N3cgxj@RjA7{o<#8Eo)fw@cC_=BAlt zF?Z<+T>E-)2}5wb@e#e`YJ2yg~3wLufgyl|okQ)^}(d_`{3&q^qE|v55G`&@~ zp3rRWzd6!bq*Q+eeWULQnis@iZ^Li(8a%V74Ps6gr*OC}Ma0WC3fO7h^~>jC&#v~z zMsJfeU1bN!U4!Q@6XF7vX)v>kZ{HPm(9=m|1}s_ zix%OB^qXhTf1%OJaE^IfVD*E-<4KdO**4 z*3a-4s8ja2*iCf)o z8tykP2U2AicvRd%p9T$$TMAQ29*42KMl)m6{qhE0Qc0x+*KS5X7|hYDE27#E8icaF zFI7YkP^@0lw6Y^bI;2pHxF?uyU(JAtY&{a2Fi1l{ej+wOo_c4KZDv zpuwHZzy3^GRZ~0&ZV!AsZ#%s=}vBD?6dgmdp z)q70|peUExgN04j=1-_;r6r(4M4`hW$P&WE65H?(Jsqt*tGa3igdf)A=>A;Af80%90SwIO7a|N1)`M9)zXy3+N`5 zGV+K$v;^QgWsQum@{*yHd)UJ|0|m!(QDBJ<<%Bdf0`_?@$QdCU7bV ziB!tmWi3IfsnJQeSN~WW;HRl+N}RSxu#>@nO^Z3(zRUA|t-@@>Gmk**8Y=(H`DZCqurJOm;PcN8I)1$t9c;Nqu=3E*OYgvimMOi#v3=s`>cPHF*wo@arN~J-cJZCVZ=>jT5Ji(cH*r1p{FVLK{lb~nS*e)oOev2Z zn4;W0XsWo*G_@=MA)dn1UFL!XKZ=f4d3Rtll>x%FbiEuMm`R*H3l!W}z5P9V1ErNv zZVe){*8#nicblS%IgF`f;wf=NL0NI=5h;-3ue<#)H<{i>bIqX|Ki9oL>s}cbD-beXg|)q=8}q(y zlhc`^nM?(reqy3&l|sBp!6sLF0S4SPBn-Yut~}jOze6te0441V9Y@3=%>-wB4%L7a zDdUCaB2QFF=IhNF(=AM#KS;?&qGcddOz!9(&TQHIYKG(YKf*q|PXxjTarS#JvAhHB zjNJD?SQK&IjVUeAR?i&i%vsw-CdcBS4!QgGA-xU10hYuw zKUMzO%OKx~2seo)w}?Q*(sN>GicKBA0C3E1ClOazqzT@h1-P^u-t5!+4K{D`oh7V! zU5X<@>A`BO7;|Qkpn2D~v1fNfJXkm+_XYxNxPG{+v09eUMB5N;6j9@PO#K&1^rbocY4MF;z-~`Y20>al6yDt?H+@ zhPrO-NByzwqtwbV zCkwtImUZhEX^HbGzR(X{;*LA5WzeUGP}T~~g=Ew;^JjyJL#{Gk;vxgu6xljyZu;nR z9>18FrzictM~gm|NKGX6*%S;+@OUS6xqX{WG-1Z}{>BHqgDCxpw;u$lC|SsTx|f7G z)bUmlylngSF$ zb>+gyO4*34f`LR`7X+CxVFzZs+6UW@3nd|OE z4o@+C7leJpbl1s1UVG~4vfg7B;OI%INTO0<&0cy^{&5QAk`46jR`0mCU-WWe_avGL zX)l9{&YVH1KwC9g$)Sz?eOEZ2t0S&VSJ}_p3Uh9f4Xgh|f+gCyFH-p9DIMl|FxH`h zv-Z_?8v0g0yZ<|(=6HMfM-o4=^y3NY2AxaXXk zT!!8J;-<;T`*S_Xw_-A7o9QUjC)tV?$6Y}4q`wf7c4rSHR)EF9?UBlJ9nRgHj1f&V zo0$#G4em2YS3j_OmARcqu62q(NlLiO(#Nw7aTD^gUq{bJWZ^8NS;YB4bbne(-kjwb z(7)VSY#Z!6s&tP+xw`NHM?|)9Pq5V_Yp5(Q(YATA!}dw!WBaKw-s-WrsCM8$C<68Sk!e~^695SM6YBsKBu`5S%?)0-On zE9a2Qt_=y)nP6%rc<8m#%$f~X$Ou=t%#k81IizHih8CHH1!?A7sT=t)iJ(<>%TZK8 z>9PXCY}fGS%CnT*9l~Cira`71v*uS%`GCE`g6NK~SNK=LnRJofkH#jmMI9@SU~Y_i zL*rV|aiiC|m@+oep%o!bgR+-=A|>up+wGDZJwgi9oDztT_CUeCxQJ7lYkpj><|m%* zKRc?{WcTGMOsNw6BkrYgPMnRHKK(Z3_$Ars7gvC_=b6S#zPEC=QKKo|3JW7ii^bK` z>?w=|K4lHvaURefc7){Ot*H#AlZLw9))Ka!nWB$&AbBLTc+h5%CM2iP@8uiWWZ^o? zn&d5V@xdc4PM@II#H+$g3?Z?akTe-z;tp~Tr*2xB^IxNSVoq8}<8~2ZD`7P>9eV;> zn~TO7(fzTs-_X<@f@V-{IC?a+qv_^%b{*i z%OW1=oU1Z*o32zw|2V4sT{Hj36_p3TLSRqL&0U6~rz!&LG~o=V`5UUN|pH2OYA#ZB1CtL^A3 zp_q!;emCvjh7>Z*CD)viug$&?kw8A$)xPCtKSJu)CEn3N`qC&B+uSc`C>BcXaIo=t z_Cg~VX_{VACZ#~=2-tjcHZFUHarKsf&+@D?CSkjW11egX+l%agIi31Ys*B`lIZhD+ zc*~GWg+jl+(w#j|iK8-A7Exk%sFMNUKjqcxmzR zrLRZW2HFYwa#G~X##XmCaX8L}4$!sI#%@`$E5L0bXdAQxho=j-ev4Op5(s6=I>mZe zMI9Cu)(^q)o9n7mXNFsbe!K@&qdsAf+<3nSjT$>vl<1jr#WdV%McK@5iHDD7(AB~a zJer?ogyX54bJHc-u9GvzoqP>i7af3XwvJLWu(*yphmop~L1eXhMXjBBF(&=;s1ReBZ~Ry^d5@{>2cy3T`h zXl1zN9J8*HsL%PlLP(+Bscc2^_PfOwWJn!~D-)XvDSUIYp;8&?q@(2Px!SJm!m~qB zKCy`kvfi(Cu?lt#@pnULGvYo5+XaJMx^mf(hMK{5Y|27QRL1T{wwas#jjfHt5B#?+ zl`FsWZ$tv5_GPix0XJW_GWGPIvKZ_#l~jXUzX?Bj&S1vv@PSCQb4&iZh3oOQP{*hG zR?teCl^rmP;8BSXd?KZ;LOR_-Yha4tKB+0+-7h<#S0--EYX4quvp{6&tx|6aR~)n( zlsdmZ;@BG#5tiZWJ{zv`hu${g5H|D0Jc46s`ASii3o|Aa`=D9(&2&${?Mf#S2hYAdaCiu42LAYH*` zb_-9Z!q|!%DNhm=6jFD~yd3T363LK(7c3z^3$UI;uh?~n2t5$U^A!Ursr(`sUX73Q zyOB+dtgfszK&y6oNN-XfhE|k&X}LObTWy7m*ppq4I-9?VOah@WoGn%TLxI7}h7|dR zgpS=qclvf`MCU|_+h$V= zsbDiLYpqGExl6#YI~7*FJC9?UeH1EXW?JLZUhci&T-js&D?l6Q=n}8r)7&{49c~2i z=XxAbhMo^|)DMhOW`rF@v*CH5x_$@l!KDrCq3_s_rt-K#?ncESQL<609v5ENY;uhM z=1*g^Cu`n#c}dUmNe<&4*Ap3;)9WfID`jrp{U0-uX#b5*@Qr|?z>W41rPzydI|txQ zU|jcVkyZ5C9CL`ok)2+d>i(fdfWtW0foq5)PCXEgol2o(??@gCQNEtEm@}v&Ex}2Kv&~z9%%}p^JAtYX`t@9*s9FE;LTFD z;tvXAaU??D9v!Bh>CsDz={^@PEg!J|qe|ky4|n+FiVVY-nVAkrl11Lea_Y{4w*mQ9 z*eN2fPFab-<#;>gmA2+BX^AUl4l8xGXmQh?v|GLzbMx5vv&$7!hTaPAJlK!{>yPt_ zt9?%A90NhR0>2tMUp)S#7g1H_E!*C{duSU z`<0)>_F{SEs z&rlAjz3<@57?^T57JHs3+&6uJ!oj&|WI$E5)mKurl>)1W5O`r@!JN&%%yKT>LYm%R zKuySmk%ac0(NJq8k0?_ar8p%n6f+qcA2?5FUoChTYPnUHICBIzj=@i|Qy zCAFcE?rv0)n#M%nz05ITpxvGYuyhdEthW!3k8iFK} zQx{qZ^)k@izgiBgx#dFGAlEpN^vKxB7z9IfZ78p)`Neq3n6473emhh+R^yRM3xE5xM zNwHXdhpjYtjrv6=pEsfFoY!YEULH}?ZN*Z98yN6-CX#QahH`IUe9tvnqbOXTnSkNI zH4ve#K1cd#rZMfY0{0IIh9DZTPH3V{Hw1aLXD99OB8@p$fyNoc$Q|H*_Ic23PXw{W zJ>~C-a_|+YpXdoO9oKXdZS_i*wmEWHk6O0|Q!aCn=AGn*BlLv+*Oq-w?>k3axObzVf+&Z8DtELZWVE7yGbs>Z3&8P$lClr}3j+G&Cl8nnqlvN12y* zM;&YQSi#Dd>qAeb)sy-0SEfFj%-irn<_$k!iZGC(pbl%yWhX7`JV{p$7n@of07*h% zlHUuCXzy-M@L&+tAJOs=d!^&i1&x@+OB!-+77VRUeX=R*Fuzvusg;C9v(tQ2?78^Z zSYmh>B&2bdmC3I3)G@clSKNMuzeDy}B3pY*mO1b_)-EbsJL$*5H6YE6X^ZdETaqbH zeQ#YIj0y3(}T5+A13*5i6WH}Zq@;Ic4sp+)XO$A^X4@aPcCYs

zXFS}G&-eBm-;5{w_UkeA&Yj=d#z=r|Ed8=Dyy{02T)BFbdViNRta{lqnQMD_K)D>LvRugs-AGJU2{9uigjza)XL-3zN|m3@ZK0yAR?LZo zO)%o>@)g@954pU;+QXHKOQKYWrIdNrm$!K3wn0hI#J5;&5^an96H!{!i($M|U4HkQ z-pW&~qMVBp%QB1MSY)+n7x;I&LZuyZ-P@q%&=OJQHH2UJ66t;0T3l znli)cq4-6-G+gEDy}m*i{d{e(DaA2AW)Mn`Bi$oAwHV1;Wa+*fd?dH=VIS+o!!X5Za&M};y?fDe;He4H1MY`yb|Mu-n_f(oFUt@9Ww&zPi-@h-G^yh4UtF7q`Zu&?Oy{CwtR4w1ni#sS+LbJ+3V5Lk|!H+71YWv%SV1DVVKd3UU3;7Slwwm?`+RFH;cb~@?uJz$?U7Uj`y zIlg%wYxGRIKJc*tCyh>63|u|HXCY{-DVKawdR(HCEtA+tF-?fMtP@>tvx)z zmw1kON@M3rxy@AN7sCu8zG2%K@vS-I3!Ba?aFZj(jS4i(BzI#A{dg-Ya4VHsG$6M@ z$zAxMgyd62BjqA$iUfo5>L4Vnla;6 zDYhpI&#sQ5QHxB3lXACc>hVQ(wGiiA?+LHrZ#dmNlvkDsjOZ!F`;7_W<&gR}FLZUv ztmbp#2yp!A5*Dmi!tcoOVCFL)H4*_;j9^uEF)a=x%^vX3=akSA2r8iDa@&fy(U=G* zKRL|Qqu~vMM6$gG2-2wt%?Y7IY}&rDeDp0Nv1sdHw2;SoM@Kjn57Enj+_e!+x#u+O zNu9#!ik^`ET;`0I*o)0-n-ngO>rG2zqa;sRqwhu}hQNn&(PQ0^Q!kQ<_t91zUGQt83u-dtuKMlY#Qel^3eS6_=4fdRvSvK2df^x z0)W$B1IO;%80_VmsqRXuPADVqZ_wR8!lx!c^lDtwq&~%;*MbIjn#>pzu?s;O#~evo z3~mO?BA$;Q!7vN5;YWcXNrN&yk`->W>3x&!jD4w}=C6x&Y}2+1KaOv0q?WxicZO8U z?CH91KecKms>hW=qqIXym$xyu#_)0V+RHsdy%fEKtZyC8nd^#+-ws*JOO3H~X{&3Y zLJ0%5a68<6<3SgY!S&;^1j59}`iYexv**cr=I-d3mezvo+N%TF{*J&np^P5rd!?zl zsE_t~XO+N(X{Ai6Lyv3Wt6(>ga-+|c(Ido1onF@Z@22HN;UwGI=%x>KR9sGTBbjoc zii^L%F?uC5V1J@vTx>*S@p4D~b{V7gp+wHk*knLJMw*@Pmz=dzh0cS}*$cbNofKt{ z{L$hoiGhpW7kVTZP@~fMl4VVK$}s~sp{A)?1vz@kBg8K6(^^kH4X&UaIYl+x(_vd7 z7^kU>Puqx=<+2^~-|m(YV7H`tC|vPovkA^7Gh}I294dyB8~5sRp@+Ob-#Nd}u^YQUjc_pXj6mumP1kgXY4U+5akos8sxL^!X= z0}Xx@e?!`ZW!DjWT9@7U7x?`ZUuOu_o;i4#cDPFvA1}c{h2kQem&(auy?LZU0xCvG3Q^9Fy|}E7~$oE*GDuS7 z9wlL77rs&}fmIdl<=1y9?D_cyK80@IcMQIa(>D={StD!Y4r%d3 z^sA!8Cb7;gx5l(*vZ9_ul&R&trdYilg3hP>~X%sdE?lZ*cL;-kk@eT`v;Pvk0OuYEj|)4o&dR<42*UAT5g z`kFj}dRiYYD55v5H)>0r^~kNUd`n|cQ9%MxZ?&`oIdc<55;%3Thq73}7mQdrd@ z`zI&Yy3RvLGYx#ShFU$#gId@1@omo!*Vs#Mtz<~(Ge(a(jcgrwj(Z#3>}U&oZ0FSU z;KQ2KyxNs7k__w7by$?bCUYyT6P)o!SHJcJdp%=$KP-;v)5A_#D%%$6mH*Njl=-d6g1sl_?a$kzq zq}NTZP4tzekb0=*R)@2?U&+fjn|dj+|DQ|y|Me(Y{}a}m_w`e8D2L-gtf^Yx;r)D! z2Y2^uh%OL;v4$QF{O**cHR|=@&tp{Jonk)`nk^m_l9BHy#66dK;m>@IUu;uba@YCe8&vI-c z+`|H2pN-&Jax=(|la6u&!+WfY4%N>b_Tz;!w@dEH`E&Oyn%{>odcy9$W?wp1CkjfE zddE$rDKy^GpS{Rj_LZn-kYrs>wCb}yeINBYVRl~nm@C~E=W=FoLbf)@z_Nb8J}#VF zq?v&A0ptQ+0)q8Y1Z4=EcMc2cFT42UOmrdwV%F>pSP^BBxecrvqOO-0N5el~*wdZ3 zSfMfXaLl>50{+CKE+^@f2u$FCI%|O-NNJ2pMt#Z1_}h-Wk@el(pU3R(lfH{Bj8gl~ zPZ+N-{MMN=M*5Rk#>I>rD?p1Xb0&N9qtF@k1-P)})cH|QXbISGp|-v`+rhdB3@>~w z--qfJoyWWrk9M;$#F96C(6>?hQQKos;Lud~UVTuqbOP8}*upR2V`=AM*Tv>!dT}Ny zjOTgoUMu*uko<0XmbPHMmJaP03M~KMXf{JNs44y2aaNld768z09h~1dR*b zk%x)^+e?IF(m7})})bcL0Md*9fG(7(;7k`e}*uo;^;?IUGTLv&imj2*4cK&|a13LsD3m%J`- zsb0kR5{>I{tL3TQwqux)4Rg)}UR8QAn6>sfzndbWjezoh`y0TTIvcFh1;Je6!Q}Aq z0cpco8aummubcEwO<-ZfujANcc($@iF7)vn6noywmpzKNg!;_ui7%Bk@ zyO?iMf;46YRV#1ZDjFXIu9|+rt(5fVZljoIL?4?m(L~4VBj};zTA~WZ`IH{9F|!iiAOe~DpTD>&&e?ETxWjx-XF{p` zRjvJAW*qSpmPbfdVz*{Gexmp?d|;F09|Ok+LFb0v22J#D+H>D z2}qHjH5Bvdze4k`O&i}(`nqo1(-R4m@o7k$it>^58?i}TKahW~NdG%eFD;SO&*MR^ zZzZ>LU_Av=(F-}go?-g&*`H-b?O!cn+iAKW(30kd@eG@EFa*LdgyTgO*=Cye3&eQ% zU94Q&EV1dY8c@_8gzKMrbhOTocx}{2ekzpionp<-wu}c-tlnl((uogOiN_uY8KmfC z%0$;Qcx%YfEnT_G@91-=@+};`^(saQRZ^2h?|nTDVH{>pO~8WARAL%BtUpUl5wQ*F zaA~SGtzxwB;gU1hOvio)o*C@}biQy2Hg3bCn>R&7aEQIXoety-c4qUXCm^Ongx zct3m@;;J)$j@qnIxWXGL)p2r0_xrdYEuLiJ*=*Ogx6C#kx7Bz~G+hXoATQ^gE*pqw zuuh+b+Yyml9H%^}J-}Egi(p)ND{5yEQ)^L{2_KZ+@qv`^j`qvhpBMFC)|b2u$Q6y= zDDisqVCLWmw7?~0aj!iZOvQ6V{_KzZ+Z8cq;F26Y8-kHyU`4u}zsMBEw#hYPKoo_C zbgUh>#0xDE=Jb~|Vl&U%mHK?Ud$-P8 z@OLOE(;pdX2{W*XxH%#!!iwk^dB!;U?Orm76Q~CbKy_Q;!L(ay^{6BIAbDanZNebV zooL6IqwjSb0F(($`PlI&oIyob!W0p7rN*pqMpGy8&cY2k@r(Bq$r1jipSzX8 zka$%;Tzd!DU1QS&%X2G1Um*}x*eq;$;>Sn=+aRdXVfeT~WX zJ}omqK#kHfM{&`X*A0pnMKJ~ClYT53@KDM0IxT*=bU~HW4{0hl2z%acy$K-kzZ({z z?%N)p=zTzmO0*bYO!S|2aZLPeow*bNrQ(!&2xbsY6$P2^TK|~OAT}#3@I1w3-D$#z3PDOlG=yu)|9edZBz6w(eB>3}VatPs z`RBzc3%Z7>RbL~K2Ibn!596_3-$UlMRrAAC_CT?ELuTBYcv{&YqA(NHI&ZZ@|$oLuX#RMgr#%*hn%CieTXSNSU2NhBOYB4z;=%EKkmq z@=5?tKXBK`Ph^a^3LQ^bngMRWn8I5Qep6v?Z`dSlwRIrt1Bad&@AR5V8Rv4nCV-7P zS(e>x{qmlm+#4Er(krI1yLBVK25S;*JO$2^20$f^>VQ#uGLp zTxB_EKL0V<2-iHOpOj$L0k@$!=tet_6@ zlX~SQ+mZ=>sCI_~@Jti1M>D%(c|HR5HP1=;Xru>YA9pOYWIuNIDdjzXA<3TtBA7vE z?mj^(qtZauF;}Z@6n>e=&Ua?yGY)(H7=T7rn)?S&0Dhf*LMTeEh=ySx<9Jh{mDK~G zDzS46utf@0cRnhbU3KHrnFWP~)Ol&{*L+S779E{mDBP6-2fCu8jnA!RLjxh0QpM(P zAiy<5sO8~C9Y2^i;agoHV@0q9yiP@-MfE|Sx_mo^`u<*v3m)ruxqDsW&ek>WNzs>o zQ(dz5Xzp6afqI0Ry|tp<>%UWCQaCv^``CHucf4YXD7ff@8v)~@S(zwmK#s&4%(+9XXEhT2mPYZ1M#xf*e%DUyya43sm74P7j}T@jT) z>G2gKJUus>F@3xFQi8(thyCC)t+I=kh)UD){EWtqgM)K_w-)`Gf}uj>2LrUd-MiWL zdQL^7e1AQ#e-np+mx8wbMz;&aRUhrFGBTZhW8GMbE5H*0%8WCWbY>F|mZi)O6?Ia} z-(wEu8eB(|(24qkv%SB~9{;Nv@b|1T{<;b^tamjmgG2ltVt=iq=h_X+3^*vozDO%h ze*`SeTCcCu{!f1l*hVr`KG2~oz;=fN<3kwvwj>mzB~*;~2Es(Gm&nL}_a^`2{aMC@ zHM;zAzFTu*_XqP03o)QLt3}5XOi^+GDs(Et#{ZU$_>T|z&z|Kq=#nHLvL^uaWjN@f z7Frv4L`q3WkU@T369pfGha246^#1b4faeRr_mG~-49 zjD`(VoB`_4Wggo1g!x;)!BhVJrM8Q`tE@z`i@%FvsB9jE(t>?$d8)phXk8?`xOB>PTIzsx^>GK`QGb-u4p;-9Yl^N0NT z@1P7_6}3Lz`xRudb_~8!2G$oD{`{u@=9M6pAf?&Xm=oLjU5w{8d<7NyJ0n zjWmkB0ID6&J^6Iycgy4NMrD@K-;^G$U#BORe+CdQ2p}>5($heyoA$qXNB{h`pfI9C zK-12)!OAYi>r0)z!2s(K*lOcXvHhJ&@z1NrpZ``-Ck*@&!!acRVqGo%l%ThNj0J0Y zos!@Ey8mnz{^R{wT;P~XPIH56s{j@g47(ruJFCp^7P}{qIokg{wL1-@&Kmqb`o(xI zTLN&N11>1V^*@FU_bdC9i+_Ds`maV2CFvD@lPCFq+&p@EgQ$PpjNjj?KflVEsFDd# zwxwM_A#tbgR=FP9R)SKHK;;_$)sX$KulGKG!utR9197Jp{*NC@hRR3&*FWg59>;U7 ztey~3I+KMo4_#T5E$7=m7RrBFg8w|RetX6KN>&emw3{wu_`8rICH4O{te;^V68`7I zS_)L-0&a)!$|ugK0}h(Ae&U=n14`z9Wa z>D)6oi_!aUZ%hC3G=e%vf68SqT^=^0!yniTIpvK2i#7oeSc?GcFi{|i_Wxq_Dwtg;8gtIIG=Gf7=XkTV3HWvBJ3j-i zW_Zw9l831u_ZEU{cZN)HpoT9Baubw87xiBg+_7byxHsih;1zY>kn7*OGg-rb4$p?d zO!r{z)eiOtHX5S;Cjq+sfuAf2E8fbA1CZWwh7Y^OUwgNK1ZE2=o%rt6s)tdKMu}9} zJ6}jynjpdhtX%;$772N!5lt3BCHx0DcFegvRA$nL8&9AM@=(=_!lS`SI0D3+O8Yb{ z0laTj`tx3S-HEUhfMFI_gVWUmINAM4!sN;aAr^NEE> zCqjjhx3?cO<<`o>{2Q}CLQf1QQVMN%<|aX!*aT5Q44qNHK@^GG6% z7D11P8^FTHBPV_}uQmi}<`Qeg>@bHhOW&nM$L`5rvJw+Zl(nQ5LqI$6C# z4Q{njoMM z5Fo#;x+NV|yY=!z2)^t%>B=CjPcaD8G?1C=TtD93IT1($)Ec+1f`t_@q4IHe=Z}|Y zKg8|>bUP)Ggh>J6{b3S74UQE|P1sd~LTonT} ziTra1255fTljiJ~+&BJ>88?yi#DVkZUy0}0Wm$TeqZ=a%w)@Kk-;KAA<$MRa3xE=x znb|!n>8P}w!fYZhNL?3=aX?@wWbmw)Dr-H3O+*ENB06sK(7(^_MaV}F(? zHV-N;zlWP4@`aBd84{077HguA^!$dTg?<6fLpHDtZtzRHH1nCCf&%j}X1g@;F1gi$ zWlaCnF92r>#S?CeDUgB&Sg_G31{`$W5xF*0XajI>j|tV&n=DOQe$t?70nxeGya!Y> ztdDko^xElpd@9)`(J8d~!TiEX{WUh{0c2`!bTyvd3{tD_<&|Hr0>8O3z__U>mgc=L zeo$m%j{ddV3`n%}`K_gt@eVQFE-~x+78dsV{fV`MYb(WRY{nczX{O(5l{4)aUW3gD zn3o@0`8(*D-||BDjNa;|3o1l#GCY+&oP60)`0-x`-S7UXa)ZdwIqH{4wmpFj^;=Hk z379CYP0)PoMOvE!z#lUB?$!}N#SFl7-!q# z08X1)vRle1ZD9)B2|AjH_W+8E!)up#;6MeazHGvienPchn+I&RG(k04V`Yi_gkKGj z4pri*fuIhb&?b0bxOVqb(La~sEBQM6f#w4^kj6w$fw;b>sF6%C1j?-u0_18Td`jGH z?DN`3En!7EJeFX8=fh*R4Su`+Xf5cx+|b@d0xpn1U?v|umG@BjWwA*s9`9N z%6fM44J>F)p#0?at@B605jJr;(vf#=0feaQxLfN8rb@3l7)aXIX|0b8NWgdN!NEhD zRX~&-LqWyPCaEo$h?7 z2mlEd$khH9nR|g0EK>R1a_ev2;oS>no_)w5_dXE_(aWg=tal?mlYHLHwLyB!LCOjs zh$--7@7sXD(+B`lCW+M@(&mY7Tj#Q2x{Y92&`Of9xn_ofJOun<%E47Uq}V!|#xQ#c zP&7DVbXIq!ndA7kk+u>Q+%g4fHQOnP2CVX6NUnfh}OReb)q3McVQfw!tyU;!z#KVD zZh7#%cp-)X`v3@}E(NP#{}LTyTrW?qa``T++%L@Q7k!cGVzTE=qGi+v6FX8`^DTN9VCW_$S31U{QQ6+KWcv&H*R-DT?woYgfjIcz-J`7HiefuV-!s~)H{}OSa@`!=` zXlX5}d;{;%LS|)fJK(}xV{hY$Bmi4WlQ-}fau;j@i||xuE<^PK-!Adi*H>3-r(ByT z4N5z^V4I_OsB{XPo@)W9TDKCZ@6a%w0_=4mso?<8+87XEIix`vwF=oi^tH&c38P;r zLbrErcmyQXD!U(=z36IZxMe0TSJVH<68Gw@^!m1To@JL?c&f8Sz~1L6$_W1&v9ax= zzL-b!@JG;o+jAn7kk8OZxj~TFSyD?M3n7qp!U0|gnMaqnra5hM1KzTk7A}_hJ%U&G zc6dOlLrj^=B%kgZP=N*}eKilh=K)F$+whd6{OZ%^%##2{e`i#vgqd{@Bv&mJrbZNw z5N93(oF{Q}sc(bqbC26B>xx4G=G^r2N$TG+7?55QeI5ZryHa{YRT2b`sqYQ!w%0(0 zDbpg7e@hJd7pXu+kM9XP^;7Aein-mAC+3-(v#sS~ubqL?b5GOHS@m6gU*p2&gq~es zPs-wOd#a1AV!z4$u@ify@piH|h};gXy#^o`pl6^Fg?MmW)jaYJ<_ggN8lT*3mFK`S z=RIkEZXf?Fr9~NT!p(F+`em?#XODUuwGT0@~D0`CfF zkvAYrI@TzfRXi9lH5>C7JcO@iY?fhS&|CTl)*`j)&?8vTH%DM?-$A~mLyZ!q(Yukf z#M0w99ut9buG$EoX(JXp0V7|oDs!d-{`RZ*^OL*G=qx1FQ*pXIYD>l$MJk~VbiNnx z-B53L%iIy-K59Rusj!?`SbD_`jHuzfhfl>Z`oV$7dM(a?%Wn7mTO8-M`?W-oj7F?1 zQ1O}jR|C0gar^Z)FDwTwT?cez&WVn1&&X`SwpPd(O^H4ed4by^nV`!CZ%@M~X zjRU`9(=b?}E0mn$mC?$hG$J43r-HZZf$Nr`&Rr?!QuSKJmv%;+a8Q(>NVU?af5Aaj zS=l!y^ILA{y(5eRbZLc1o93FVbBf++g~eqLo$o70Z@2!P2)2ZXsbP0EMj7qLA*Jk2 zpMI@33PE|_xR^h>L%V)s4hq`A%imW+9wn-n%_ub+ObMoT3XyAVChjk&W z!;BKC$A8f5LLuVNWn(d?orTl~>_|rl@yPjmq8t?SwZOiH#4)QY;(cIV>i7Ls8CG%b z5vKQ8B6OHX*}T;d?;stnGusVf(@o*jF(s}gh~`_oG_S<1Wz`M2SJZ?ele$PqSk%AF z>Ry>vvWHpMa$7rmc*VbJdc1P0jtq}KIralCnUMJeQ@l4~Owi)hGq`*|`i&jJ>&>=+ z8sfsJA_GgpdqFbP6R3eeD^2ihsWXf!gikeDyNl6IYX25+il1M17@(A9YiWO|kOuIM z24_}>gT!Y{=`C5hx~E{fP<4-jAjq~+4qgt`GWF6yBZ5!8X4 zBCjV>k9etSCT|RqBcW$!13C}5MLSActG$wAE-c~8#N))OKSze2$VHrd038+JQC2aQ zqrxI}$6I*d*1i|Mr$DRB{4Opg9E3dzYk2%i3x#8hAU^%g9J&BDFFj{wI@g8j;2(^e zc$(J#V(-1fn%uT;VP%6QpokJsPys6hK~#!@bkVKSlhCV5?;SxvKt!V;7EnQ2kdQ)^ z-USN^A~p1?A`q$qBB9;+x{v$ZbAIPs_j%6!ATw{(o=9mVBJ;U({ ztArkZ8hH%!{)?Af%HchYVH;g1^Rn#}7F178PH80gepz;Xk=W0{DZgm;X!di>U{GRE zPUcYZe?ldUWU^Y1bRXo1bsnWSZ&AOE*@ptl6Nf zdpFni%qgOy18kWe;-+gg3WF*qgt=_H0zclq*>M|20)5B~S3(`Z^C8ZgFDj2ET2^yi zHxjfx_4{cF+~wNtW}({RD~HOGP)*&4QlR&cQ*nS9_*vv9CnK@@I{0GNBTASD z1Jd6Co2kh(L%BD%Al)`ZXqbz<^|<+-HSY*;gB=v~{%E0^PKc^I<=|LKgO#}v>l-69 z6~1{_Y<^tRtF3o2%FA&UC61;-Wy@`a73f|;E{6}hW7mb8W+fI8_UOrvKE2<(ueiT% zgIaet$wQ{AWX-UCkA#SvmD2G?=ndPrgtwX%jVuVgUwU;okV~t}ZZ}eS)QAd1nvXXr z-dSWIqd`X2+`1trTDDyheV=J%xVh3Ua2HrNK9Ue3Oupq^D;8uY*jSJ}c_LhJLw@#5 zPkQcqKD92&@h6@;{mZsr72c(TG1=H9bm1~fJC3!{aBcXJi?2^9JJa_1#FL9r$+rGu zUEj+04u4Nyrk~^*A+R2g+_?JIld+F^zwW?w(bwvugwmv=|4Cd(+tT5dk##+B@?=qr zPrYFtezvW-GAIbN2!sRZTYHLN5)*M3BscS56SF7tV*eH$!OT+}F}y_{vFz5Mu}}WM z6=fCXQ-40p3~_Gq1+hiS^I7Ps72CMKnwwFgG;0 zb{hF63wIlW(Z8e4XPYxZvQ#j<({(5bQ2BGz|S&Z6-PwW3+AJB?4Q+E0C9|iOw);2Ty_pYOs{p6uV5U*T=9sp;tUdj^p)X1CaA3P%NCytj7kwK=^A2X z5GV%}Pc_W>;7_Y-jUM`_77g~F7d@|-IAr7#Z6)f8+uB5DI(3q5&ElN%oO(oH9EEw! zC3rGEqk=!frmpVmM}wpK#!oy!+-X%uoSXsvDdK|d&eLu!pIDUSu1w^&6a6J_a!I?C zjFR%_Ncq|iHgSS)=Bv|7+LBqBG})SV?^D};gz&ySO9xx}G;o?k z(;^nU|Q)Re9X zraykfqoSkuU=dmf_4vC^(4K&SVmwTP`3`xvfQ+G}Non+G2)p>ZU5AdR@UUg3A23i|B6u7KCZ|HgbrK#UE}wrm0nTw! zK21lxhT3g9X2B*vTGpZK$|>s3I}$cUFySCB0?fOu%n`3SgB~Osdj}C+16_Wo^5mB! zCyu*&ofe_HgK~41lOgj&`7Sk)25@cL4Mn^sBkxD+WaIHybf!*M&7e3tjx|x6nMR?PwZzJ z`FdY(NmsX!|e}C1p#Z-I;$#39!aGm*f=vsYQT1cb_`3eWCT zX!({<*uW8ofVEeztQeU3VHh+O8S)w~nroFyLx-EtZOwu#vLA0BtG1_QSm&M#&^(+x zf~0w_=k~*tWB%SF=h43Q#Ai!iUxr?q0SX{oV0q+B6aLW@(*BJkmML#`6l$70b$j({ z`Q8XhrO6c8GSgfhME-t(;=PEas zfr#{>4(&r7Tsl>KITt$fCrXGPy5vOPV5GIV`D4)hnVa~pvtHsd(I%9HY+@H?VTlb& z$gT8Pz5n*zT;C;@vy;PRW}n_ad3?g>v%E)E=g^>kx>I)nWd-(im3Z-|t%VAZ6yqf1 z^HtZeA4NC?r_N_qnY!6om#9~tB`7X`3G-t*Tj^)N zUnQHNCFbdAFX*SxTLZ?M#Wvo^q|4RB?Y;Ux#|#VprJ4j`}3NXvfwElM0?sCVeZNoY>T)Cd%I9HG-So$IC5ZYe%H8m7dB?$jbk%-~E56M7>YP^yfXrAGmeR8>C4_6}{|9<Gxvj;rY20^=Z8I7? zneVDrHNDsaDU-C{tZjQn`)+x^r{0~`aZV=Rb@1#`QocBBe$6(mlTA(~10n3X^s~zq zvd%Dc(0LB}j0z@uOBXdL?K&x^A7c(tDeg~0?pM@5I`RUB_-!xrgP8e9CA&hz;JEj= zK_L@rCY?Y7jmucg#*UWPUdpy>Ar+sygSs`D+o0fV|5ZJ*MZrN-n7So-@^S2B6XJ~$ zb8Pu8#k>Tzr0ptvo+D&o1xU#Ln&F_TE$REAYIp%Mca1Pr-os{3sM55C+bYjGCGipy z?OBO(`wW(4PM+4_ARL(6B~YQ)#znScsk%yzSx&Uj&TFxo>ddNKKCDfhNtAN8q26rE zio`W?GoMGhfWuLKT!G;jq#9{+40@dlW}c7Z@O9PT$Jd8(I@(wr z8s(_P0c`C7^^>*NN{8i0#g|~@n4iMsjJD7{H-pkI)A;@D8Pfc@wWl?PGrU0p^-`~IcFPM?SKgvA2DyPYZ*mb&zU|gfG&qrl}01|?ej2{(=FMW z&4ORjHfa(y@c^rt0|4w%byrDX{=l&8_?Df5+euj>K$-=fy;8G2>%JAMJy8j#-Kx(m zCj8x;-dMj*6XU~3yxo=GBP;rEMmPUM*le{5E~=C%)@92g3mbj;wwOdNn4RiZ zL~AyeP-hohquWHg+i=>F$Jv;`pA3;yIn5H^uY%TIl=r}SSz)S3Jh}+qc)rDzRx$*{ zi+-f0w}N>(1iYwy=fu-=%xmWY|>;WgHFp(4d46?uxT-cx_o>EZ+WCQR7myu zgVM@C|5loc2uh4$!u>Sh)7>+B(#UOjA5j!4$buWiq3^)`#R8>QDxL{ihYT2BI8@+J zGV!BpL)>J9iPmm8Qbi&pQkN*Cb7{!zS=U_S&3YE^NJKJ~8)NYu6rQDo%#!!wrX{%T z=JWWqioM+fp8b#eXckIS9T)H91A(dU%h`J-j0Mfo13j_5xki?0k0SFS%#41g^n2YI zO7q|>9N?ZqaflvIkD-{GXyfSm__MfREBqJywC>t>7A>BWs5Mz8#m!!#?*gETk>;%< zxCF?`ZOkw7iqf%sRa>F^!$&-SFUAM(AW2huTEiVu2xpqv@>a93NNGP0Kjmd#agS`akbR#%1$QI~V;gGv z@4H4dhpXL3-DJe0Z=WMl&XGf|*$;}mq^D^=P`ReyNfvxS+PMm|94)9UR8$0t3*$Oy z+^nn<;SU{RrPRAddJ?+I8Q>P(h2M-bIA?iA<3tu|hVGYb#Yb*FQk7&~7bB+8f)kbO zNH;vjTa6uRg~{0^QBPQ0sg4vS^8;eX_m@=@Q8Lu8>6qC1c)4~BjjVx~*pN%eyxFj* z1SYpOeyo*p-xMs-#cwPC%q(tTB`mIrJWU_gr{A3KxX1}bxruQYpyk;R5|YonJ+f$M<~JG8GXEqb6Lp^|B1}CZ z-Rp_v)rOI+!%iL9mI(7$(m-YDVuo+Ech??j2Wk5I3|eeX+YqUt6vHy+FWmu?NZks1 z=keciV_uePevs=kU!S$1^;EE@vX1;}SXkI(-p9bfjGOH*PyB^VBN&yZ)YBiAg~j1; zaFM4GZPmWIYUBKc4^c&m@7p_IgsC(5?Puw~;iHdG?fhmx*Te8{m!y~c!UAhi zuOF1|=v#jWneta+*Ag0BO&%+QOCa5AtZgOxw&(@M)v3zWKDK!Do0$x=azF8+ogC}! z2;ZPY`KZ9EtIb*)H%z{o%+hp=_poNX`^VQ2b!?J&F?>rrt-nw`aSmP~K9%kh!>P#3 zS$0@v&Y7m_$=BIaAJ9DJ#JZDg4>Y6*$W7vv-PrDhHsq0_^lguKHp#G1_wlOK(F6xSkoR)BS^5-WS*XHnXz5oFS)MrNc(1!^V*2J>Vs3rHENH zJpaLLY$uhuYbq;LAGsK&E7^?N1OoKt&S~cz=sWU$<672Vfiw%J4KLTJQH1$|-_?(7$<`E**-QDf9@+i? zkJrS%@3BjZ*Hz9VCy`%cqj1rJp;aaOLWzENwCbn<_5}a(hn%k&E0Bd$mH13ePUcc6 z>x>KL&&_{^x`nIIN&GbqKU>D^j~jhA-c};6pdgees~?s-*QxuRnU)yno|ctOOHd9w zC586;zIXt^;5w^))Xc?d_QOvx>&D7UJx4!Oup{<|-QZL9q);J$z6wlsTe~;xL+$7(-(6a&NhH)uHnru}860;8 z#V2w`Wz8?w+TcD*j&jqzd-rt1x{!QgW6b&^sgJ>sMqs;hubild6r7?joyLh{t_to}kU^<}2fQ#(Ro$t@w|t%M`F2 zV&^&ZyGB*T+XHcrs8ZHl+>R}tUFRQ)OX20zdBr36uR5IrE{LU!95^Xak`I36C<=Xgt$@pD92y;lz5&Sn+QB;bTqY=Pq~ROUn$O} zhR$xB{Y1rGhTdB>z0%g%R+u_o8QR$TE|w8@9RYLNGi=`@vlixW97~T~qufMYaFhHC zALWc3^jo)Xou8SR@i-vt2Tjf0R6Oly$rybXH@$Tj<&=3-+OBz@98eQI#uLzr$2bcLx6h z{{2@72)_Tswi}E7GhXgu!c0i1J7D&5ln%a$!9k$Lj=VE{>dt3u@3Oml6rd(!OnMcR(P;QF#2|B*_ zm(h4laIDZ4e(w5w%CMIlH%|b}K5*(QyKMJc3UYNSny9< z{q1|}MV|e6gOF!fSk=as#6GidDg83kY~2F8`tt>~CUyDy8!jI>a+)+;7x?qYDVcJ^ zx8j|`v|s!6$^gu8)P6u9erY{>?Ie7q6^_o3pMR@00M41$>TZv)pO60eU;f1+<9Yvf z(0F)wj6j@u=)}%yuKJR=2oY3F|}J}vb=e2ea*~b zs(-!rDmmxgfB8!Dii(OP6%`YcY_B5MVaa^k8&lP%ttkAK-Sa!9)utFgaMUR#_ zs>XFr3U~$mS6>WyQu*JW6#8F133iR=*H3R#+1S|BqXIA`2xhE?(cza8Q^p_`iM}>>!SxJ1Fu0(hhn`{I_dqv|tQMqIiE>K zXvFpzTL0nqpKs#dFL5y6VdQsum>6kmD``u9P}s-yRsF*48`u=~ zpXdI~j^HjH?f@XlCIC?8rJjG-^*{uxUg?k(aa!Bci9D0=%aHcGYa4`7>qIBpW8KRs z&MsRoo{Bgx@Dm3_iL&I23HyPzrxOZq6V44?n+axcM@*h9=(Oe^>n?Q3JbBdQ;Lrd4 z-?0SgY{6{oJRL7@ZfmLV$TBNWH!bzHo433;#C9Q+sHUct3V9=rJ0e?S&!@*Pw{xYP z=KcA^sACY>JU;7rB>(D{Xy0NZX(c*C0v6aM+5wvAbYOez<|&NVCppB zIMu7B=1%~#k#M@Fd7nzc+1p%h_MU$xtS9!u3%7BW;wAhau7V$5DBoZJ#7Tuf629|> z!J=c0$*P~CC2bp@fBpJ(PvQP1O4JBsVIQwEf$oU%u+#}5S`{k%5isS+@TpSb0Y873 zQs#!%n4}RXOscQGNPGJ9rS-c?ZKPKU0^P6Cj0S@Hv+d~^e_-m6B;Omfvsqs*Na_gb1i2S; z^F(YS9Gxo*+KH(+@Z-D^Y)=0G_Q+0(lKb)Vpjv_*`3m(sX|W$UYunSa+w_HC#3QFy zfP!!d=%%{D1p;kLDwdY67?*fbAU}4|!{IK$DUtU7=BO`*L#sg$Gey`QCot|T0N0yt z*x?hT^N6sc_YpHWX&3-^oKb_Ma0&eF(iy0$W$c3fj5aD>bP)v9jv%Bi@cnW@CgJ@8 zJY6fIp5^Bn{JTRexP`m1-_riTt4%UJjhhv|TPz}lhPU_jXI@RE-jcR&6)y58){UwV zS0dc^AL4^>xp-?tcUdjZ`4rsd_I}?io*!{Gyiw(}TdIfG;_-z3 z!@UlGMk1nLZrAzP9RBmveIh71Z6q)k-F^ljy)^a5x|ZkI1i0y&VLYTHe$ND*_AqYJ zG+GtigDVBKvLp1LkqF5!YR{l}3%NUaJiNd`QHo6mzoWHKJ(SqBI{kvvr=@lMYR%)7 zng^J>nCDcFN0N5;*t3~mF8}(HFMA%eG9wUo^71ow0%HmT5_03px8>dwU34hat*4n+ z1{SYSGf#z`nw!tI7uC-^By&al8nW|o++y>G9vGkf5#fe33Fx2OOt6NP;jWEqsRi4f zPV^+X`f+UL3PK&=)mrc~IICu>x){Ux$=(Uzc{C%U?X+>L#3@il=33Xsrbf=OgGNsa zzjc?C{R5<)AGj^+XFU1&68{H^RXR5i@6&fAOq$Lo<2-JCw6$M35G=3?TnAO!vnu02 zilTMN?OdU1jN%6sBFNH{_`y)athPAY)=@j}>C>lqYPWbGTKm4a_XCu_q%?*7{H#y6 zl}tAi>>0kxw0ttjZbV8xbR|E|dd&?7C`M&LF|lFx@U$7&6}37)UZ;i(g)ar?F5Mb7 zUNAC>LOt8eFa3`BZGyk&-O*oe#{pLE+9fd9Fj2|_qSd)gMGH#z<{h9#zrd7hgaG-7 zFo?@fWxr`p^_L36d5HbklcMuRnC+y5tnVZH|JCvIk`ku&Z#<3LJ;X#3p)vB}CG1OEV4K zKn^;348PAui|U=)jqOYtfx#e-zpdlAu@h0%*f@t9k^|}N57&Q&4KX%GbTulzbuw?y_cDbHAwC87dIdM>1Ka?2j zZU!ZTeArQ)Zx1GpW5Tb`>%8#+VGk6tZ;JC?#TU)usO1nF+ zOXv3XD{5D@?|Z!=vgXgz5aqy9yqz1L+N2!1uGlyLbY7jH=D(0;rft;(@XshsGrngv z`)fn^kIPT*ZEK%QABUq+I9(3M=&K zrhXmFcxR@m+*og^PuN6dJJ)7{v|~r(W-y)*6&C(pu3G1WRI1_nA?*+vQ@Mp)hgX&+ z{(yF0Y~Jwv+*V1G?5zcVY{I=~B0XhD0C`aC>~+0Q5qM(#{>kV{`01Y38PM`YMC_LD z+BFYz{yJd(nu#+<;C)!O+z+tx~zxS|ni*;K;4l|J`so!Vo+ zzWx=$AA33F!{esVKf_+6k`p^&vw%X6bLFzPPv`;7CnGCrCezMVTe%TSx(R@49^spI zpX>m;oVlF9(0%Xt0Z1+!{LBKTpWN-B|QP+3rK<_!R@o zzzA8(*>7R}XI75#~@eOr#xzNk_w#0K(H}#l~0q&LGvy6Rwf!Hv$N{GR-*t4zX z=0CWQ1o8)m9|zbNw?u}^oQm`BX0$dG%^CozwK2J%H$M{>8qkGS_TQONk`@&=b2 zM!q-7#a{M1ou;4WtO=B`I#!GfD?pdx1e~Z*Ik4Uif-16&r5)N1Ayv7t$^Od93@9D^ zC@p(wv^N$w85gQ+x7L06CGvOB9RSFc50aOr<#H0~u}fo8RqU0;CxKZVNwzvVI+Sxc zfQjBj`^;6HUd7}W4NXfQ-r1vArsZnvdZNtSSH8@p$3J!K4*v^*ZGpgKOn))bx^SE9 zHmdq_>$C#A7KF#hN6G62k&3Jp5HhZo63bcfEf-vu6%tnIW5wl+=`oTR2^@beW-=cni z^bZYDVn6qzQZFw#LRlwBAFikM$cW|)JWVh2YuZ%9=Q&US7C@P`6GkJ*^7rTb^PkcYg3oGR@NYcT=l@GA!~f%{fRp@xJk=JZ|By5NJJb1p;HmyP z=Lr1XPf*UPw%7F!a`+!HIV{qXV!Da&FCq0+B{VBAGEgrT7ZVm1{Qn`p_n*DDpAglS z_5VZE@Bg--kYzRgH=GzC3H;vzd71qmVAGNxXaD{$YK6=mj`$4orbKW1M**crkP`VX1r6`m)H5jpH5&|w;0fX3v9KUJ}vvWrX_w{(ZIWg5e{&u5;}85Q((kYXnhq%K;vF*Gsi` zgs`#ey0nlq_~RrjRqV@2hkCBe)Jt2x+wLQaxBxf-f0QAT_WH75R0~2*GQW9z`R6O= z&sY5S|F8oo1pl4RWjXo-!<>4;XA1w1QYu^9CGA;oG|d5%2V!-x_gGtf0>E|DvO{TT z(nwduZ)J)sP4ICkrX#;*DIg{Z|2@p?6Xt7WRu26G%^A&OfK_gKb&J;+gh~jBGikyJ zl+GC7p`qd7ZbWeh1^4KR>kQebBwMeyN5xt22vIDh7RDOY$r%I%B_JqBYsX(jK^W0&QV6j@akXq2dilN5I+In`1M0nErFA(1 z^byKU(W3fYpwt!N++Qp5gOEw~!pjXH3V-Tx0@?;AU~HjBoz%j5sOVnt zIbLYO17)TxH#j)8j~}SSq4dly4@EyN!FNoB@tS3mG~)C|?u?Od zDmOv8)_$E+z&T#dN1APl`f=ySllJ~WhHPdCY&YF9jTFiYoVwe-ou_^o*3y<-FwTP) z9lD-V6VHWM%Odh&&6NvVci&C2txne`+! zXYU{A1cm3rFUfSSYIS{`Szsda06h1`_rug*3lLgSTr-j4H>iD(tv4K!OFeXMG4U$t zZd+^fM0a5!$fxx!b^ynO@Ft1&IJ7KluLrFThXvU!UseDED`+uVE^-5B30u*dg*sIl)NJK4VC%~!kJ%0 z;)Swmw$|s7z#exvgM`?}8u~drtDXhrW%meIMfcAq-gKxhAMeSx}@GdxaIxEkoZZb7b!i#Rv(gs9r<|?Ov+(hdde>s^2GuHP8i|*A39k z`T`)>xW{_~R5Ytd{#9MAGEa)v^lu1U&XDwC$vr*k7{UKJwaxHf9i4yvcS)J3bc2OB zFA%AFU7q{emQy@G@u98#Z9Z@$VK;sh1_D=*9tTI-2oA#OoiPQp1MAdU5?mu) zmdcZCBWG}RL)*ADV(^37Y#5MwFTM$8N1@dFy$+X6hjj1e$`CW( z9d>RvtbkT~5Ky(np#zeQMjhYyi_Ugy?pB zcCWTtFzDNuU<*;`uG|!`PAm7axtLaYcloeaPC$X!?-{wjWE_wy$`tME)iv+U+?9)c z*q&px&3}&R)XS7D*3*$p-IAjnokY&E9TNS#FnDYe;q|VmnvE5D(BkKsuKp0;dH9Oc zzBJ~{O0ovkAmLVpg%Oi_{6S|%i+B{WHhH^Ztey` zrvTUyAMtouuV)GYOoQR~@rd|E?;GcD>S#r;C~XC7m?X`tM!2QA<67GFQk zu#*t7KoSjc{}?`CE?NzkQ+}+4uExAKqm|;k08QU+H4do|&4#KIViU}wHNDKbKu2>} zU)dAPSv;o@U1%(|o+>gz#iUjEC5;_g)QMMa0=~5flkRu86Pj;=++0eb#p@|Gjd~FX z|FfS2wii`-jMRXQm&Gxg_v)VrbMGAC3*5Z@N>J*&;P>)WtJO2BVl4szGGx;u)(P?_ zzl^oMb;-3m<_|4R6in<^%eDXW{{0{OTpUG?9chwzmp7UyreWIm#oyVn{Yght#vj7l z=Mk8E5sA;C5i5BAS7PCX#ayb3;qz~;(>)hom+P0T4EP3C4mf})5ibw`o)_atVG(ZS z9(*37BPYcRiXWOMhPYeYOXg>5XehJq-@$RM1Q2w)5>2^wogzBQbGpCcz3}3)~@q>trTCkmi{s>--H^O>?~!6CA!tUjj#e0ixZa z!p7M)S)YESy67W-SnJ9`6WRoFdqS59T_Yu9@!2RTu4GH%q?3s)ZdlYKK5;TdAr3W) zFs%h}bi3gHo$AxX20AdN)VsjQWuV#{#jzVqQr@pZ8`m+7s*HxRCFrf?W@FL1fkRzhK%mQtkO)+U`w_NP{j;I(#vC#RS2jZlYk#6E%aQfjh;sIVX2{_ z*l+ajp!I_zURp8TI)mER=hbdH*(15DgtQWKSDpLPZHMUPvV||6ij&|rK3{=4PsNZF zIKKukC~1Iy5L|2e<_olT3Cjc&3&^}}N7%n_Q28hkLKonyKEcxpdyG$lc{?)2B#jA? z2263nOZ6t64ljU6p1g&-eORe~zdf^08{OiH4bu)&qJ!0%lf2*ThN%xMW=NBZovcrQ zI2bmrolg(TSNBhePk!?g1OVwZ5mc zU~=>YQDGldAL@QV?6duOK@H1qHTovB6O=I0y#8ld?camc;4bl` zt0Qs(;ssnEc^|OfWRxWAD2w)*%1c-*^&%y3_KZ4)5aF^e1Sh0=Zs(i!AX454$p!CFMdmvMSm)+WWJ=}?67bry)92is0{ z%SuIIjR+p;ZHTk;yHwY<(KR?NN~UJ6}>Q1A1GdS~%- zyXlSrsa<{}$(g7N)63{bkO}ddcF6dexEg-MT%y2*+E82@ZnPVCV6Txz3pa$x z4H2^moNpE;R=i*$gnF1pK%$vf1hs^$?OY_HlxFdS)T)Ply9Dm@DboNm)3)o&%f#W=jt~K}9AHdUq9z z&KD0^tT_zj2&HNGwYRQpo?Q}t+Ewm9?|#uP6_Y5lE9+`@pWOFxvbo0KJ~op&>ao%} z3$-RNv3iS3)w4VhurhlhJ(Dy|O^|2vpIc6ediGw7I{H{YUn?J)0F*7m)X~m*VG8my zP&mwzxQ%hZoJ?O+SWPZj)rgZ8D>Od<5lIlS+v;_Tb1HZY?bKdFuct);-W&^aqH~+k zAIw*s)|Mpe$UUTO<=MZ_NN(zKfz2*7_nf5;es7ykqEQjmwqWYT%0EN6f04dzX!Q=Z zkkT2tx`D86(v}3DOvDLl(uMKMpe{$RV9Dp!mO`UJD#=lAzd{%#M59JjV)%}}(qZ2+ zj0jUCF(vX2$`AUtSH0HefGyBIe5aN&R$vJyb>95dQHF24xde_fC+*Z#JScCM)AdTMbZ8BO%+sbK?fo={xXM%`$jCoZ30I zX3z2c4^2L>B%`PlMrw5`^(DpbJ-%1US98!o_r|H{YPU}XGlbIc*b3#eHNiV6oSJ@jWT5A;ruGZ}nre4II)mvNNSMFbWj)>a8tj+Dpd_D}2V`?DjkV8EM-%X0n)PQ}aa~8TY}l~P`IRap>h94O zDPK2-Uz3V_jTgiUsfKZae8|O59O?giiYZ`p>TNNwJne+V4(JVPN#T9)Qu@8;S2-?(GkB*%*RkpzkWMq;?JLWp|5Zwf&#MZ7 z$Lm4(-xhvqE+D=C{A~ZB{&jud1pHIR&DER!eVY9DYx<2;aCxnD&rUC$!d5F$<1^Zl zuPkGr)E_;5WxGbz+XQ+4b*C5lTYJG(>v|l~s3o8F?2;?fW>=QdlUUNTNLe%N2`x|) zYs~LfP!J!XeBqYB-_R7pG^VW-eGE-L7q-_c5_M}`Ty+N3k3yO+1`oE-xQh?*#VuM6 zioVkpFbnx&-2`n5Q6<(MRg?fC8)rkIEqEFH@47-wV6OL0MZpfQQ>NemB8`YMY}F9HbXiT+j58Es1J67qC$Lx zW|1D!JV@GN2c6Mg;~h=hwzx2zeq5(6TAU7LHd_>Jov{S;Qr%BjTfWZNwRR5RSu{I4 zer|@uC`KtWp3D_<>K}?fW)sQY#>{qug`SxmWDJ}*SibX#xLE=UQ-g4f^H`1Kd0F2JL0dY8F0Dm5hnE3#=r6#Mf(y`IfbP3`yvUf%eSp zZLLLBB>N<*h6oyox`s^hhjLanjq^0s?-I@cn=C2+2zui3X(N;)h}C4Nc4`b{HY)tb zU6Xi3CE1h$lq3y0CAM?Z<)f@hVoqs$LMYAwMcL_!dJt~egW$j`nhS5&B7;}QOBi6o z9BQX68edJNB&D)Ew6EkHn5h||YsqI(^Qjzsbeu9CZHzvEx6KD!hxTsq?57TNW*8UZ zqO|2iPErQQ3g*;tFafAT)i5qMd8Cj~4~(7i@%D*uWur={Tb+tV0|N3j!*+T_+T)kY zMm?y4h18p`!z|_$-08&>K>oeg?N^d(6H9#TLA~vc*ZSCZ)KnXXzK8CpFHj3m$lV0p z8Dk?&De=kUAB`Y&FdZ5k@D3p=M|4#_IcxAB%66i%A-b4FfM-rfXO)^nj){X3Cy+tv zo0g}F6+5x_D{|(oS8WpmbE}bzG(X~(6=@_2*q16nk97L-=?JHX;$z0ErZUeaQz1Zcm zBow|=chGmD^Z>k*_=P0*!)V7hO{`V&~8&0*hh0gYN>lk+$IqH{=g`bgtt z#^ssMI;D@PMwsU`@1bb@5=wXeP7VU8^uPoSmQ~V(J{z=mbt;Xs)1KhWW)v_p)m<61 ziVHhkSzigo)_Tq3gRF&A?exBb_di1imEPPtHK|^Qu=_8D(LL?!x$sWkK(ut;N!M3@ zZ6$)sxs`6)Qwv5#A72;kwcf^!XFIPUx7%5nKdf*$SDW6@J>HqyD>>;`3H9YJNL_gY z9SSq&XOrDdtW!OAcRfxut&)?^D6y$ty8F;K(`GmV@%lpLj>fnxaxF>ZkRs|++CZ_( zCO*mRkPsH$1ih<9Sj}J^fzLIH{-ML16p)GbVUoUS*JdHTRR*) zccEwG0GQ-?aNd=6kA@*q>3K;bAO)Mu+-nex#o8~TXw=Ump)Uuat3~MkW&-Auj8!eS ze*GK}M;j~ijpvgqU9~-dGbjvW=QRoptngyAihMQjqBfIQPC~Ud5Rf7BK7bn02CD8D zh*6T0O@~^jSNd?h_+c7P<~4@w>eLyH4FqOG?NmIiAdQ^>?9gNJ2N}z_RIkuY(v2Uc zqRi13P4CN1VL_;>h&I;yAS+X>KFnr0iU)#>L!%m)tCwa%EOPF9`{ zLoUzd#e_*zfD&%e_5|b3ig5~|nc1rxWcybT*(btP6mwL|_4;KD;re7Cpg|3{$+Rhi z=P=DLZIqzk*;;XH-5_5ngiE1#Wq^B>>P@D+{@8`rH^n0-1Qr33@W*qbEj954nO^{7 z+$p_=S_WHgQXG$Q@)@=hSFm19;G|eYp`-WAX$0rM)SuEd9}ps@f7b#Z{zR@cs~R-W zU}faX#4+(U8k-1{(D6iK^&?Gy3Q2}-Ljdi4k}Y1*1tj44U~N?uPo;|097YKB{x&RgruUj*v}*nq`P=95YJjvqZ#Kppkh{ z%2TPk4Jr!Y4$LS$yVe2!TZF@C2(gM<1CD-?5G;h9AN5Z(00eF*R2a4&O<3&*ndh-A zo3moQ^Ua9zQw}oV-atrk3IOaETdnTh{$P2rf7Kb0H}-!r)6X=qLsD2{XDD4Ey6mBX zTwQq<)GaE)D$!@w-8Q1JUQxmwoOt*`S!a3PJR*sFabW$g!;MnphVMya>u^1@qjPCc zw#3D=s?}?Ffviull+x$|4|lgTTzw+E?{f9VgGF>0BiQc^rnQxn>M_u7Tjts{h|9$L zNL)GrV~Su&s;$5KmcIotKW4xW6nv#)0kFmD!d5VXml~6!$SP*2EXpV|67-pc>7xwv zC*5crjmscz4FCke#bLZo)jeMb8l!c7H{2hjB|G{QCvoITgp`!9tm}Z1=;H1pc%A_8 z8mLZNE**o+K7%pr1TAlrX&UuD&@9Kk`XRd?)GV3JQ{eoIn9TT8*$WhHPMj4h?70Ai zsH8E4A62<4adJMHw5{ocO1?AL>NMJw^!W((cQ#wOcY(bQ^;~X+z9v|#24RNGLi^=( zunmeJqW+OQUu=#@c|HZ>IC}%}n+TZMbcrLt8@H8R<-Ea&Uw+1Ui>kFMsP-SdeOpV* zc98zFRLd!U1yFqJ^2wvxkbo1dO7=<^#3)2gg4VM(NJ=m4zKnm1P!&pnH? z(k-W|4DIsi8I-1Nr4`c6OdLzr(%f^Sick<0ZZ!R9WOMIg(>98*8Aj z@E9$k_KfCZNxT7lW}i9U=(as-SmW{0R1@-vvP8~hJEoVpkp7|x5Hf)J?-XOQq3-ca z#5Y0XZ;?ti5ULX%Li^ZWLJYt6<~(hqvQGS?RY&^;5UA%P4ct(oNu%S~^`>z)fTt@Q zGK*g$t^i^s!|H3_ksB%0M=KL9K^|4Q0lbJbaM#D6?>48$pv+PLm&13k!2yS<7a-(2 zS-29exKt&7o*B+w^$j>Z@Wz4ekS;H>B#Q?jH;-bsi%??|Sahi` z(**BgtT1av;#eY9e8UQek@|vSU{9^A!^VQA2o8=wh$~+mR?Jicn3Ga(gtnL=~Qs90DebhcUNls@&=EGA;>m48vq?Df}7S zfClF@xVcWMTpE<<`Z`pnLDz@s^)VT>attT)s=;(@vxdV&U;20rnAzK*9`1zMC?x8w zL6XP(;J`HQ)5=n)a{Kfb)TT;!d*Tu_k%|C*8RsYLb9VCfU5+P&)rY4ZRy(1Jt(vK@ zMX!#*w2F@^^(FeK$kS5z0sQ_quK4!@xZ$sx!u_}v%7MS@iE?Ljem8FcUGPn7WI&Dol_pK7%X2rLkj9*y1=QFFmR#(c3i9 zAuQ}g^qFHD3DkR;?>oawj0zk_sj`slWRe&pC~{>+KX~vo^X$Q=63;@Ct4LcYnfU(S z?5VE)wIXRni>bBm{7%CygVrx@7trqn`=rcv?r-)?1>Br%*lMZIR7KlldF25!t~XL^ z1vuzl>mPz)&nMyPRgJe&0-3zO-b;dotXH%pAGv{=zmDa2Dr$ZUcl}9W>hyzZWvcK& zZG3k(z?I(F5wM`J8$%1?v9_9n#TG4JPkG!bsn8gexO@UWDIVutn; zmnw%^xDzp1NTHRTh=_tsOIa)~U%u@8hJhkreEpxVU5~U_O(@(dIRo%3QOdcegWJ6Y z{Ky`pZKIrp0MY;qvJG@2k$HM;RM#rA-JR<}Y@RecGN0#Vo>|+iA>H|Mf+kS4=no;T z3#5t{fz8xOL~n?zCWP;f64o8{v@Te+fQ7_Gi0sA~4$|{jQilF< zAPqxs8=Cc5=ayp&jH0CjqSS|x+|Ks5A>HsSOE3+~3|ONxuT}?rGoJHyw2T6Gv}nEy zusev&c`sH7Yg!j;OMOSP8@7SkvO=k!R@gK2xZWyUU>~R!R7G)NsuQ*O1eBL#>Ps=C zbuSE1Z$(|HQ!Kq%WQ0wt!9>xv6j2kHq^sUwH{9cs5i$j18a)%YIkgYtEE%rYp+<0( zD!TftpR_tMDyKoWMA!C%S9=escAeUs{L_Y{iibsm#@+yUbH#RX0c&(WIzs)xIo4KVFulUacVXNeh z#77U1TWkV+`_mar;um-nnra`yP6hq57aGh$+e~?WL+Tx z4S`I%!u!;iG{JBdA3%_9!)Z1WEhPIu)K!t+NepDpqStLWL|cH7ig{}p z-IWFbJdR}1FqxZV}2xs+19LA^4&3n%pER$ziv=cO0q_BkA^1;xkhP@ zC1bjH<%^8VT3=Vqk8QfVvbq-|zW>PSS30z`gW|$Q@>8$LmTSXA=T5Xfn9-Ra#W_Di zDw_DiPmWp<{3E*V{LN{=-4s+hnTl&o6n~lfM8$#lsBY4~Ai3Ug{*GTB(hc9k82Mk! zeR({U>)L;{RW$5a7TQg;GAx9ORAfk2ndd2El#(oE&KyxJl{t}dnTgEvu$2mxS~6uw ziOiIF_PcIP_Sw#R_WL>SAHRRj$EmjUtmnS(>%NBXun(`jJ0Yz1FPe* zr)e0_;<7->wlSvzDu{CcC~~~;TlRN4@i9EmdN*DX3c3BC0fcA2Yuw1U%M&OhMd=?* ziH5f)m2w<(`TtN?O^>py;hY4#${FAqEQ@`WI@;-!I^-f@N06jfX2LXB52D%=)Nb&B zhVM2m=L>G(AY2g^m>=QupOJKy`YeL@n)5O_7nc-T{IB zX2||gPosNXoO0_A{(##O4K21?`qzx%-~LoZK!7FaIbSj~+{-}N7fJd76>O->g`PbG zs26R`=mJIz!-i4OEj#$4le>Y6X#`Y3J6DZ|T86uYtTW)uFLR}U`TKQa_K%GfzUGAT z9;$zE#&ogY*d2=&Qp-@ym0Y1UZW#b#&Y&Ln2YsQ!Wr>3CRDJYy+<$zXUj%RJN@`l{ z9k-aB|2cz;R@nR(;AZC}$KQDP;w-0NJ8#+2{tq6{MjMt?8}!b!wX~#w*8hL<>vVN> z!=j=9$@hQq>(GXq68Rg)W-ZlYIEhbi|B(g`r>hHTVPqADf0Gs16Zcon#TqvgKxk5U z$NvF)qn`LfS8)2{5;?-uwpOQ zZl@w#t}Xo__fK+zZ~%ZKshT5u|H~CXK=P!dq@0Y5H#-iSo`-aO8RB!)0;WlO4i&gw zi~5lF!SY)97l&&d1yeFj%tvmDvCoGMEq?rz8~IlvHVZ8egw7UE{eN(dix2?RxPNAP zXVl-QLu=LD3D6gbpn#Z&UfJGR1ZA9C)`TOb-9xH}&VC z%{ceq`(}*!FE``&P#JR==v^T5pki3F%xphjL5w``Pnqri>lgY9kfCVufDC2uhM_BLaDoK%2q^w0>x|Y+ZkD@$Fs(5JDib&2dw{-+6IK#`b+5QmX?;joy=x3ZwPlDG10oFjf4 z;@S4?Mf{j)Q20%|_&UQi4_ z?Yi_E`+h&V3VzddSSc_j&axlk%l^j>g7Av7DMkiAJ|fy25~;MEQ~s%|-H-=>{f$%! zK`afZKAoK3)Gt=*`&|F_UG57tm!7Z2IKteyEqDktY#M={z-7R8g2F|A=;vEg8UUrL z#v+|(G^-tr%I^xK`!pIO{@umnUsNm#3M#_A(Kp)Ge;1q3>Gom6Ya2^!`1uV?BjEjN z+r2ny_~UD?Koh!cUvcD;@H^IqEPvQ)C)g$fR19~QBH&AT;gNxu|E8&fa{ldLo|x+Rx}>C}YN*T#G|z|U=N;-((_;V? ztNh?l|BJ5aFbr0&&@%q8S@VI|flVW7!H#;tQ(L1p{>-Pvh1}#^j>y_Mv6qWI+ilR$ zVSnhDDAaS>+NMYc04UXb!{fcHvQwf@(LjRi15NVHn>QmtrSw%}P<>-`FSHScfG?a6 z?dR7(>pl+>6`4@GAhBdK@Y!H+4}DZJO1iWC;A74<==d}upa*UVT9!j-=CJ@?7->W> zKQ^}q!jdf@k&C*=YK315wp~5>wtfh#a1So9xmZ|a9tOIBz%Y0k_2yl+hqe{!*|h)> zgZL);ge{D_LL3!KqbA>eML5k|^Y#{`AIlM89WViygSIm5ub0^GPo?t9AF%O+? zB3NAxYXkFMc*+HJu8|@=jI6qH4Beq4+pIS--uG61q^ZE-+}j!CpPX^!skn0NP01!t{WKRgw`O>F+n!f*n#w_44Gr-b7cmwF4J#ST`T zXW_3hkgC!>UcG)7&Hl!;2fG1_l?UDQXS@4nCxknpD{l_!#o04sJ*}wu=JE(hpAW$_ zG9T?kz!ZwWPg5yO1DuownbHOJ8m4<_0Q6x9#sPF04_w%r=Oo`8KMYYz8MNIxT zMZU_dt40iQ;ug}qg-<+832L+lQUK6U4ZTfO-Yv70^oq@3gQ=N62U27029dXXoq->q z;-<7G1rH)?ICnC>v&RIxZK8 zq=Yx=5?c?=xDG~a495~?%MGfPACp+tx#Kse)q2rn@42x4iT7ig4N7Z&^CCGIG18N_ z-@VH9*~_T%dg4VEJeiE&yhD%Z2s0Ks!_gg+tnKwBJ=;~ zcsDz#Q!zg9s2riwqiCOPcriZ%%)`)Y>`r@5Q;V-zM|*t@6-x#$RTuKZC=W1YW* za$cmb;2Czw$WW7g4jw_vPl#K*aOH;<06Z=HSFH4250upC4zjHYqE({Y;VtULAw@K^ zzPk8j`5RzS4dX7_0*26Aa3L=w6hk6K;j~kn7ZS&yb5WR9U+VXkU!zNrCz3m$9u9XT zZKMlmd)d}Wa}=7$!bH710`AX*B-sw9u{-o~3gJg7hb;g&^?>oCvy8bb8k+itvF50$ z)q(Z)Gr}=-XYb#63qGu~OKk~G(x4-=DyZ)`ek}tY!(~|4*-?Ra-yPpN5Au!>a;E9# zw+L^~%N2~vT?E_+Z_7P$i3}B&DCUDkss<0G7^fnAb?@36W)G!ON%vFi@uv68QoPbT zxs}bY{vMWCnKkQd0sdAGj>?{op*A4hpFp++F5tZNJCmm%g_C1>LD7q4kzWVe;WbdUcbzlcLaIX2QN<=xTx!K-k*V`Nx2WQPo95+Xn!@DR<;Yc2iVNgw z>C3tFHe4(|ZxXCZD%14wDO+ZAp6{}{y&ERt>`R)W_i=GD$5Z2^5=TEu{e0H%`|$gI zNV-i$OJY33rboF@H1T9hl(*pH7Pf8Qw(9aX3l|UfAZwouDX;*t4=*_YC(V9En71O@ zBSc2~=#&oef()MPU?-eQrlP}6N>#~Euks{qeCo3!Fi#Jm29Rc*rB5GNJNst&u#Uz_}!%_DkS*7GOc}Ta0Dg!ahRDqTLs44bhhp1D*ZZLnX=(G{y zlg6hUdZkX*CtPf39D|E!f1__6zCrMd3`TvtzkyrX1t=%2FaYj$2{GZVZjIdFNk=k@ zA5sp^d0OHTA9<^VIstq=UWqV`^}Iep)?HtH_4n@w;?E-5H?k*5*BrtD=#>i$vMZSR5G^ZnLeK}+5k0ElZ*L$A>}SG*eeKe2w0z0_WQ5U z@O?>8lz<=AHvBlEihn(#KaXXg(s=5lGRsbxS07>OZ;h+$n(5UxQXw3u6ATN{we(I6 z;EXZJc-2)_>Zgjkj*x1 zF=0M@+uo;MY`S8cg4fWWjy8!GH4T6#C(oiX#qJ*W3noTT{-Td?;rf^$_dnDQr&w=(s}DXbfH|DLSr73kD$;JtXZ0u&uO9Pg8p;Dz|HZi`5w)@DK;M#t>X6*!!eQ zA1?6twr}8dcXLEb#gMkAlwT@?Dkg?+Q!6=(kPzJp2PaY{j6WQuK#M-@Lk zQPS5B^Xv99Rg|{lI142l>&v1oL~yHnPe1Z(sd~bSR0v3Go#(?6!*NV>ME@2 zG+69G4N#3FoU>u@5d}mM4`{PK;w0TuN!3Wx6-YyhT7;blWb&ftx1V1o=+^Z-bIrI> z(}t8nuP9|=A6p{KYK2|9AWO1qsNuryK4|1|>GCx;Z~4m&DaPQ4bsE#iw;CxH z6}*fj#l*^3qAS!}kbGK-W;vuR-|*sx+>X)^USv1xp79_fG(NpJd+XkQq3wIQ%q5g* z3>ms*Y!L;z9!p%DpFwU{#g?y>ttOM9-%0eJMc5|l7*T`romd-noqU=3Kki|b$KhVL zB~yT6#g-xf&?FjphKSUMbTafutk(T}$aPd8$7hVd?!O&li`uX+EkqqZ3)IdQfRpJx z>6hjE6JOIq!x8>rbe!y$XYVUR(r^I0PLa_>@XU*Ml?l%O0ECvW=o zoqld@+Uw%`sUa5T}}TmL$6fX#rT z=}v!Y+j93AM+0&>bd9|BD=y2ARGqBY-N0-Cx>_wVlyC%gx0il)K&l%!H-Tp;H^D+#X9DS^`qju|+Pk_M%+SdZ=m1BR)ua z@oOEWZe6Fkk1+yEfJ*E_Sxy|luHQRe(M)?NK{;D_ZAKh`P*>^t8Itw;U?45$C8~F- z?(_k25>pt$@$$+Pr=UQ47x>|E#WH1*cXGJ99cFnegJISzBqTi&4EG{#%~{;$N&Tt%Q7Ge^JOM24g;?#swi!M zVZs@aF`#&F1%U_?C}A|Z77W)@6P_gbZT$J(etsxyPTJG!s0ls~@1>jYx6c}GJH_5Y zeL3X#ijZ~{g|rR%ubki`+8aL^X4e^&=Iz$3uMJcXs#XsLGz`K%4$p&_V-CpBadtPF zQ4>m=vtgT=%Z|g9ns|F3*OQJ^DK+F3Xp&pQiY)~;Qp6Y4$M2UIiGeME=XrBbw)qP< z4;Sg}%(-LQUSu-_&?0&2quYcB546Gpst)tC3{@1_416BFvF<%Gi$ucDon857N;}-5o3kJCl}roXn5ual{DuFRG_3<NfK^*aOu#?@%toq@6mA1c-OIGP$WOKAJxp^}A3n3jz>1N}oQrO%|riGSlZrhjs{> z^``8_ZjB}ES^bdjqG7@EWN9}uGszx8iEB%ZIC%?O-Lq*NFn}H-l{lVTO#(*OK;sH`@YXxm=UH)pK%$uuKdv7%Z z!zAJ?fT$nI_bIr)hw{Cc*oA~ynfza9wg7N495100V=7zUg}ps4biT?@Qb5bwYa^r# zDY7gNc|HfeMMCFDh8m4SSg~wBGZHtOy@A$F`IZa>~a4PinvTcXG3oVg)^1h720+=HLgh6IXH;f>~#}&-+l9ijJ!jXQ(DCN=i2) ziPxa4&0DeIUS&iJm%4=XL~**yHMnLcK=bBEnB*fFOhSz)a13JTYswJ7vonz@3Q@Nn zWRtJYS|gtpfN`~TBM>DfKqfA_N(+%6=zY0VlArO$c#9$qJeWKe7bQ^0AIne_;G-6#WhMF<+?k zqDwI8lQ|KSl+HFAR1Q#N8#W=-HwiCKIbQlkt=N{RZX+!ku77+Mh!+vBQS(>3DP1o@ zNnLap*w&^kw)#ZR(r8u#X}$@>)UV}GQ*)W|ntd<@eZlbZ!3EabcBltBavj&o@);3I zOX5kuHov-9K$V0c2doThKf)?RIHf;m;Mzqp(n(M2L7uWmENo`Ix@-M;a&6#?`N|s; zfdf=8+J5&L%Iee8bvnAdo_w2!R*-JDskn9B}c_41N0!= za;Q@A#v=gboxS##9cXs3-u*I_l1}xf=@|4H02Y<{xS1x^j5&z z)6C9YlIb0;OQqZ?s)=_GUwPlw#b3?z_8K%v+&yR;I}|0A72Wr(9!J}j<7o6E?R^EA zd>5@&=mb5~8(p!+k7=%17y}OHF}GTsE}^B~y(CXnGl-SVn}=cjCGarjbvWPrhM`cp zHApt8H;{2OcWV;vuIWzecoLR0>w?&y#gIYjVL1pMDgm^X;N)rT^R>Fw^`6~;eJ&s6 zPpJEbaF+er(#jc50<~h6HH&A$l2q%uhRXF-3q*o0gYqNGkI!XT1rQ#JWzQJ-dK zPHsxxW%VxZsLDj(PM8;KMll^XO>Y_2Uq|iD;}p=`85gL4Bubi3?Id<(ANv`z+$`zt z3lhV8KFq*9&hEtMO%S&3J&S;VK6m#FvTRpYE4z!kc^5QsflxJ=!kLvwPVWFa-R+;>btmfrWims;AWZh65st|# zs^S%qVvXBTj2D)q5P7^X14K-rm^NyV^E%E?IaT$h%KcX+@5MrE7h_VHQ>73np65Zn z^t(_0kNZZPs zsr$KS_roY%4lTyxis>Ov5xPfG9tDiXwtSglWqz9$JyH)HQ6T(GgEIEeisOs+Y!Bu9x#9kj3#vJw98fOkTMnprVwIQ&8Dyc~ z%D3i5V!f`EgX^mkT=n>_p{h9r$+^&_y12(c$Hb9tUj3G%du+M0|L23PaifM$-uI{9 z))mj=wh8^_bsy^l{@rgzMI;zd*FOdi2OM_m+E5z*i}1{U>^Spt8P=?$gYMy}>w9;u zTm=RxFhUT`08i$^J;mo*wAA}(Ik6F-31|p6{X?iBG$4PdoE$vi zcCNlqXw$u!yMbfwP0Brn#9+0iDF&M@?*|4vx@WR7;PT*3xszdAedFh@F=l%MMju%k zuY4S+31g3M%dPu&pZe>zMbA=)-UhW+(c{(i1&$Wq+lfcs6puIsRyR7@}U&}`ay z*wjNwpmIKx*8T$dj$e%b`d^PgdtyzD3Dx$j)oZzb`IX`(dJx{jbngl5rZkl2!vq|nY?Ty8)}+&snj>olyTi0W`cMx^7@(TS zei(_i1V1`BeDmi{Sh>Tle(%~Xc${CgAp*}+EjjvW7R^w>=b#QOpdH`3TPD1Pd;T(- zP@q}ZFiiX8jwm{=g~RmQfuo+^KZzxe>WkuwD?%RR?u$~JSAOb`9mW!hLJ@>*PwJSDNQ2t1eY(RS`e$OK zL(PV*JI*8MfykFXfBR!(1c+X_psv&m)HXv~MlTd;mQ(fE^^3j7rtC1%x_`fw+#EuM}L>vmz)cj#6?Ls$Yi z$#-z}O+drmj*dUDNA=7j1E;S9)HEX&cHe`ZhsVP$RxrS8gw*JE$s2}UU!ZGCgu*`$ z)V!LZ18@<_9co6aMGm*0eS!(7bhJQKpwWA)1jXj@Gx={X+3g`6Yy)A_`L|iwn^L`% z1V8|Awbvi*gjQcQQGo|s==ahQPELRO0;_JyL*WsdV@z%)`k4#^nckYBt0*#|7I?rN zs{qI9D3j&)V>NRM{_ikPQQ34NHNk4>;lXvQM21mcwC>;$=1Hni&AuR?suC}Q6H-+A zu-%4NuHuj{&oRDhyKVL~YDn9}&T?^zHAg*)3lV2j~@395^Nv}FT0-?lbcXwX~40Jv! z-xpsW>)CZ=s&eaIj+8NwHhc%8^bcl!x&*6+qaF4=yN@&?s)KF2jV6*KOM{MxcYlf< zQd$^*U(0X&JL&N;hj9}GBhYi1>GqzR--SGx=0MhT5b{9VJk$}|on2Ssd=ieE?a<{# z2tj$sCj0keU9_k~c4n~C1txTMHZBo)5ui5klv8zN?&T(1XVd*~sME_nrzPc+3b=&v z;9s%GJ8BB`))0tITG|^g;CaZs&ZCgFx^ILtEgJ2(Y>G(1f#a7s+|2&jp8DNs_`TivPw~)#bMe`vl z?CDdnU@<$G845G2^Nm%F_`7m-Bto6z{k(q8c@q|~^;!d3i^sJt)?T$e0#J0)MKUaAd0MnmZW*}GsIHkdXS>Y2vxse8M~ z>=&yyoim_xhM5T)1En{0(?WBgJ^@r|4vXW*rL2Vk zjCAoe%MqTJvUp{nmUFxbzJAoiDmS)AeFnoC7bt@vrRxmZ^~~`TLN`FtJja7 zHNpG{8P8>2JUJFbtv$rmXCLrIOZr?1l&0m2)jHILp~vv}$Fyy{H+sA6|wjjunon!5tqOs4}~0{B-9w*bS*hP5c@QL4=-*2vy*M%wv3Y$%$JCga7Kn23#CV>Pb>_|iiH zOQLc?AGLs~cwY9{qTLPs3!r?M2BM~ypE}teE;OKA`K|QfsTDQEk1H5zFg&`yOq9<@ zNi6tpj4|PoTBvwf!U>zsZcE#?pZQZG$2to-lEVY3`<$7Epb9}x!8^Z$Df?i-r@L00 z3rTCNdt+`DQQw0bE0SKmrdkXJ7!hw?cf_uL2Q5BfN*fI8E`boKk553dR2)Xdluk=p z`BsUNS`%qDd6G3Ha7}KGT3zFqONAz}H(r2X(grlRhtTWdt?35F%aDf;V7Cqu4yjwK zc}wOVI1aAV96K7E6k$(R9+06YrH~n4GE}HZ$Pq+}z`8=uqxQVTB{GFf&m&}D_3E&~ zZN$H&@@aD~CO&>fOd)tQE4Xl3QN&9#G&9%?(TbWwf_pc1?}8*Vaf>D85R0}09&G^dpe#ccDs z<#8|Ukw>Vg@wowef_8&d$_#f)GS0jIou8NHk_Pse+Up#$�=WE@+Y6=_Gqh!QJk z2aMHE{$8rYrpF&hPVKU(S02_CP0&{eV75g} zyhH7Q^Tj91qCH|pFQcZqYLE+C>2!Pt{py$+w>p&bzjF}Xp}+dLzKSEJS7fT~6K1)^{E2Cyt=|X%&iNCTd~Sh@BQ zCMPxxH3QNvtQ={3GO;fAPF!U~Mckp>#G$%e>LJxjFuL2%HW-M1!@+~U{8gtlnY+eR zD`0vO;3awF2*z+czt(9VG$VSXvrF@Q!r3~j1cEKWVPbO=YGu==xVT#>Md{;rsPodL zdSq6=v>t(kaR~0Dt2#cWKK2WuG@CY5_JA*rW`ksaxY}bmp1O^(N7Qqa)Yj?JY#bjn z^>JjTd9h8-Xe~`>@DiB93sZ7x&TpiN30xmX3Scw5K}AzE0ys5mdE^gh3lcU(RWX3C zxOuK~?u#)729}qzc$`G;eoyY4*p&j5uuB5@i^;AZW&DS_mu9@aI_$~icEeS_aD~Xi zw|o6U8N%{G3ZgeinGh|NjDr661tVemQb}6~)~zt)8JK^3KC0J!Gw*@Y{cOzh9ZQRL zE`P%zPjEWtX--B)G9_&Y&4y$Nvfw zXv8Yxo=`O|Lf~Ty=sblZ+{Q{7av;x$6@cqnx?q@mwdmPHf!Wm0Eo6 zs-Y8z=5mtHl;7XOo}u=eQBqR6p-(?oh?0bn1kn)zOM)B+W-^W?G4o-!ayMxVAqp9E z2iu8s)7-82kXMxVHNycR}znpL5t zUq2brFwqC!n~rBoaj=*Px`sC3gaCT zZTC=5GJT$^4yhM$=sU%CshUqiMNMhuF6K5Dmx<*lpH$zK4vcfnm4Iz*%=(?`GjFQP zf@W0&>}JwP9^ioK1BG$F-Hjz8{cVY`hh*ne7+DCD|Ca%I*kx#^hAS5YZ1Ar;WSPyE(s;zgs+RhxCs9 zA&sVDr@prn+u5VwrKhW=VxhsRjQN&Jo^|F>VvNBrCZEY$P!$P*{5czZ9;yh29VW4N zRQ9M7Fb%Tj9r_Zlbd<%UAJ#h3Uy{T05;VxQkp_Ci)yFivGA?SuU8{FU=Lz4&&aNk9L|-g-Pgs3@ zehnRg>b7o%9osM&K*~v?79rKX1*|XAS`~>?gv}~2ea>Bo&9iCJ@CTbF$xTT$ELioR z=O8$BpB%vD9-FtLsf5+G-e?T8j>Dx?TchTJ`aG|B6NSOpH19!^;Dc(HVi)mCt(Is= znAMyY=^|JCT$h>vP?1X@(06uJN#YS|&cC&d4pzq(0RW&VbA7BQyJDq}BdUd3V0Pz^ z*N~(;iCFTbNpV@I zES%{TQdC&9yA^p0eDp>p!P%__8GJ>L}>^J)5!EL5z;hU*tW%K4VUYaJ|^ta#s9vEz~G%(^*kSh z<%+yStDU%RC?Q;G4(e2RpJ0>p!@r(;r9Z7jp(89z_LC%&mjd3x>oo#$MeK`6QRgCJ z!N;4`>MH40d$GaI8Wb{;9EtP~I%1z7=4(vJ-RTPL4}5qVYI?z%oCY%r9Sf zB{B=$gj{!b_cyK^M2nK2rS+2O+2t~_YlU858}$6gUj)#S?g96HwOd2n0hK?{_uJ6N zlp1?ENFUy_%!ucX6}3_%de6B}JE8G(1)c6^z6PB+e|>%=TLc?X}#d*vh=$Zy|O31raw}p z=)6vS&840)J&f~>C)}$6L?x0}BCiv9g4yrE#>cK+$wlzs;d~V#+ywpzO2{i$55zld zA-4=tYbR!>IDqWhVPbbm&y5Z%He4+2@-7(-5{Iej{96(|>E2VZiB7pv>uv*6uD*l& zUyZyiMRkfZ;C`R#3#h8S0nxk`dd1^(J1({RNKar|Iij45ptGLOS2q24Mjv5Ul;1-T z<2#A(m%jQ7xwhghg)VoE1um(?yx-I)=%}qWzS(dwX|H4iN65^_4gY(P4;MDrOCFbd zc64)vVQbY7P^6{^UM8qYUgs3o<1WCytLDjRo_~rsL4@rp^-(tRq5<(Z2DW_FI`>C; z7EO&ovqLe*1OSoOOlw})&8X@TN4kxl1;AU;kU(xjVcwQIGA-|QjT&J%RZx{);Q-FV zw$M^}(CiD`4DjqY@YWVaiTQRqQ3z3IzH{t@NKN3K*4)b%;iOoSO&}+?!@UJ0Ls3$u zx4W3443`1WPlmEy@aCPb4p2Ig88!_c_7L?zF2)E{h$D#7fg|Q^N9~i0izZ)Rcj0~& zDI#p)>uL)_)NZn)Mw<3veTdo)RZT%~TaAsCH67!!HoWJquk`*8g?U6F;~mEz_CT#h8Y0_;U25@NyN(ZH?- zx1x>kvv0hXI!NL>l&R~`-qDwmJ1P6({u*ch2=LxW0QGPK?qP0W3=Q9Gw}w1(Sz zk@cEdP#sh{2lnFSus6e0i#>7)OXj4r|yDJ2vmKictoH65=R%3mAz`q+boQA0qQ|6(n(uQ22&3PH1Y=;_E zcR^QPQm2{&vrpze-oG1(%>b_6PCdtO5s(3`kY)@se$4GnXBr8DW}sLgt-4Qt6Zl`_ z3?55nhTIo8qpK4@t46%1F}ywrW4;J_e7uYk(#nU~@Fs|pb3UZh-F+>=SB6pZ6b<1p zqxWT-ax)g4hq5FLiK}K1 zklT<<_Mc5k$trI~rke`A+$WL)m}I$If+cG)oYSl>07$kG3|R<#no{5n73O@cdAE|e}=vz*XLb`s*gIjW{I(C zbt4;s_hA{zf-8aL3ew2(%T2a?VuZlPF@P)tvsnD-tAV?1=; zaG7KknItgbseF`yN5$ur zi+)&nWvc%5Fz5>~Ho&X75*VFlV(01M$A8{|S9Wu|_LqBSm2B$-Comakm)z|JAKXD0 z=qa?N_Xd-UlH&?FQvw(V*!D~e^7CMWwNxG(itz17lw5;w$P1?2O2OPU4?7%6k3&J~ z%huZt$8qaJmOuqAvJnJgGvCkmc-?sSraSF zG71+|z4<3P16}sv!xkW>esEGPJ)0mk+9IRBm^c48x`V?lXJ}%@Il#U{SX$+wY zFkui_V5DI$)!k>H1-u&F)(a1aZJR}qW*Pup?I-KWOaboFi}x#2clhYnC>!)^^5LqR zLJSgY;fA)dgGMHcNbPFHhWG_)n6P!;|2FkaSPiBIsM}W(I8kEpP++C0K6RqkEm9mm z=woR};{iU0l6%9o{g`CEH#5*QUU}Wu>PM@-g^eY6ySntmGJ=gq)#@eG z{mv%3My&psRBL4%Gz3INc6RowDjo1MmsIxF&owR{iG^OKEmU`N2`{Yo1ASuy_MOJ- zoXQx|?RZw@8@R!K`!R}+UPTl$#-7VP^;u7$W~#<38a4q@=2tZxu^06znOOCvrJc&5 zRC!^6RE*!dE`QF-ty#*koJ+*m>pM?0AjjD$%lj_EIe>zjaax$0Qk>s?ymzWPzj0(! z>HU4Kx~>op2ht*XtpR|z$+=BkQ~7;^@33-!LA=z^XyTKA{&oPw@~&G#8hQ++;_)H5 z&JE?!R23KTrH41Q#Xy;68z8PnTyTs2e+@41 ziUr6U1((8T$&i@^`lpHsBlT02(3{Hr-<{$wl;4P6WQjmAe01}thvN<4!ps}3CZ>&r!Z?iwxue*jBMjM;` z9%J&y3FWOT|L(`7*F}fM{eTPJ404w*0p{?mc6FEoHDPHD|G8&dkSeA#8a=|J6!pd+ z@&WQzY00@nf5O*nWm&4WWi1pEAGK9dVJ4UXH+&HnicJkb&x-30TKSrO-N9XJ(c`QD zNL2lQ3XlMV$|mqK^TO{iT3Q|XbX`ec-IjMh-t&qKc9kFfnpV!YB`CCG{{!fvYIT6b ziY#XNN4FAogvmK>Z)pg8WIEE#`(pIC*O1X163F_o@!=oh;BNch09XKLLHI1MnG*`w zUk?1=`=#*rA9bsKAS-^Y>?+@SjobY4BSHZPpyRbL)*p;o9aQmmfYQHzucKi zz^GD4`O@<6A@-11*u^O4JXFC8*$$e7=-HlWIjNU@;SN~KzXYA(kjc`vUzohdu#Q_} zC4K>({&he6ZHHR&o-*n*0~m#fm|p-#b&81E z=0T?PB5NE>*dmk~wiWA{w7|ri%&q#19#kU*n2LWFYW}_hMiEEs2dZYkSdxMa(V@UP z3%AYhhw~&OA+qEnt~45@zY&Tzes%`WwYLYNRXY|@^O-IkwlC}U#tXsygN$n~j{=7z z@2#SkNTL0&wy}5%0NfJp+}^oj*Z&VcvTQ@-qoyV=GLpzerka~JZ$?~n#CCoM?4fh0 zwc$^=2;&Fb9j;Cj*k(wf0kNzS7x0y5@Cfs&1C;)@a3nz63X7=n*@So{p)JFIkBMAEo5T~$;Q^EVlT3Y<75uBWQy>i z@CVsY_QwOqZ?wA+X2OBZ;_K=l;Sg5fWRR_b+h*M^($6;_z8)}-+G~|AVOF~z{=R5j zB4`LpY3%d_HkJM(4>D-{2O{l;KZZb2Ap?W~*y^ui?8~6lhVV|BF4b%yNc@;i`^f#| zw82#M)X!OD4|0W2g`=#nCbkwp3al-BX)cz9vj}BKxWlbgew>bQxE8P(MP=^93!P4S z@4O#;R`@X!W+?~2+p8;chlE$&Hh+K6AKwswH}Iyu1(^y5Hf#iFK(@;0CxL5|fH2H{ zPbN#t9RtKS3`z!c?L&C=P^o0w1M^xBhRkeKLFOI?aL3$;>(XUlMr8t^MH|fe+BGJj zrbx;kDSRXC zfhJpM6g-4!!|*(bJN+u1hK+y8M9H%mfZ;SF;cTQ6>onQndiIjV#UEM#uJpy{{S&t9 zLK`q$o+wfdnjwW&&ZrME2?x4(E_Y}FiA+QPj*J*6ycjcpcpx^h!k3+lTN{szL#_1= znYIZsJqMEYgZp;i5DA!u!_RX70q@;Y(eeBT8`NzYlA0X`tp_dt*&1otp}de&H32JV zT5nQ{A<3-abc@63%j~y@^z+uK-u;~YpTWVWkID*9Q+&1f}1UoK*pxb0(-Bp*Ov&VjO!zp$wIJ$%|Aus`@T*eRyaAJTgdn$X5TZ`UHv zLvt$=v8-5?L;z$&g!H~v-+Pm5B*?9UPelVHJzD;_Fc=>^L4xw3wSj?>d8D{G7kY{>iunn z*%4N?$A;TnrK%i|-#6euBmC4U(hWDszFZQYK{Zys^$EA$)r$2ARWQ2tia;8{bOg3e zw2S23m53FKKyN$xwL5@KR<{+C!j+X!2Wj(&WGL7~XQK{ZxAgHMg#WN2C{IvMlq#vM z@5}&P(mUvtn8T?eB&R6 zSQ}OZDArY)P9|2dk?R{rW#FJ%NmMyczI(nqSI;S#^B`-csgGk~Lu4g!YcUN7`(2p`Fr{Q_3rt3W&Mb-%m2V_9DygAo~QMQ zh!O`Dp~3lX`s*ip3Y&VuP#c5D7z(PCQ_c$?C?GUmpZtk@>!!C1L%fQ+B%8M&1Cjq2 zD&m(V#0Mb?9nbWf1;m|RkQ^|8zFLD5U3GrZWkLh8#%!|(Cj{L}6}^|?$KC>fwre!E z)Dhx#oBimmybG>hDmLonQ3dOm7aEgEHiDwS`*asIL1J%K5NqpV7yCWsvS0dAOQf$Lgq zuxkHbUc&0H2L6;q1O>PWn}Hb?QR1CN;PVzNm1Kx_V_ap9+C#8Sl>9UZ<>#;3LQ&Hg8zwL}o129mm1UTQ9cFDRrq3Ue`Ac zl4w(`LDTvjC?j>(GTV32y*>IQfE+qYY-Nba2D345(>`D)-~wiEVw1-K7-zYk?bJl_ zehCU38;bMwk(%-}$3_eU4^#}Vi>>z09qg^AYaC&;2t-TF>-1zh|!*K`QNUn8jY z{j-C#mO4%vnk_}5r?K)El6YP)c#napY80XsBWOzzS4-AHN@{u*tdM|(EX+Gd8Yx~8 zG8iJ4&;?5s0c8S%I!uENK)p$oS%!;Gy7wxCCN2LDO?k~m2&I0%_%ubN0}!36thKn#)5a!%QpHw*zUS=0$)mlg1pA~3?)TkAP76W-H z%fsAxc6NrNC-6J2S8#sshWJ8q{c7L2^iOJCO#@F;vsh;q4E>j0SYAo*e+gnxz!U2# zYVsjMLc}|zWcq5UB}}1&k5W6z{@~rx?q+_Ah~z+k(B5CX(@#Jt{)Lox4|B#K*%ls# ztye=m_HfJWYlTYP!KcUj*&dtS<%lRl*x(k6SYYGFlqbKv#NBFxv{j1CAo0j6=GAoV zY59v z(;uZ!L*d7fO*ZW~^!;>YvcqX>{QRQ-13=z>LuBLKuJd2d4CR+ZpxF$&fgbXOKQxow zEuY%+GJ`FaS5ZJUP2Mrp?(<>q0lA3%kdp!Mb?4>gjF`TBneaML80(cx$LOHuqryqM zmrwOyJ{47ver!_OT+pDQ<2hWX9NI&UI2~?J!}UWWN9FaK(jY4Pj8B|U$?s=D`U3W#(m<(-}sra{TpN5-veUBo6rnP zgA((E#USk7>^Ku)Bpb)@j^cf$^ni*#fdfnyVoga4{@@y{GHq z+n0;mf{A&+|C^!a6LdOTLE~ZPvlrda#a^Ead2=CX&^Qzhy|YoKNKgQd&=P&(hHg2= z9eI=uA0IqvrNqig(4w>j&F2kqpm!_rY0ilsAAGSS48`CR9-*xI&=IwR>^*58Iy~qe_0yy3? zP_GSvC78|g112eaYd=Hc*h5nMUJtd~6E+hN`Um6%_=%3Lm2{`IM&6e`x!Id{TWsKA zwyp><^d0nKH5KVju^muHyjLRFid*(nGi#qO18Zd6Nl=R>y3VzBVAMOgP?4H~paFhyfSUB&I&)nn551k~zwBEJ?5<4wP zV?2=Vy=ce5kGfn!Ofe<|`r4(*PyLDxFIcb>td@4S$EL~?88xGx`!esmhA9*_y3v+7 zC35aJMcl%t(VkPSqjm;TOV7kclB`BO33P-hNqkW)RpF0n1>MiL(cq6uA3qCS4>sHt z+u^o%wIGv!;6i#CPE`~}lIinsY3CqLL4+nuP9miKOeTB)F{WlvtYRoyD8YF_fO((+ z#AeNM2;1gP03pvTbDnD{u^&ToGF!;f6R3?eK@lEC9Qnw%hp7ChFlBcXa(o+rU|5We z1wo#ACbqjZQQ+-`EL~Ykw#Y*ASSSw;w%>0%Epm;b*ur@z_pO^S#mEWPI|~r@&-Jf; zTn}^TJWejSvXxcU8E@|@a+okhgbKZStqFUWGR8})MI1bLa{-k2vY?^y4l-x+C%3k3 z8*u-rV_}3kI9u-Ai{1O&#*jJ2#C*^FqkQ3ev9Dh7(`!~ryPX~a-cGZ)Hwrr z)cw2zu2385Mu1pIE!N`gX3p%r*ac`@58mdl&45KZ1D#PK^llO?O!knDf^L)vvfpT* z#!ig?`P(+@p2d^Os*egfH`#Hz4P0OQ8whY*7}oAo@|FV^YFUnFLDU-Vb!P+o#u2jo z3<7ET@D^3|H$565hdA5|R>r5SMbc5WQ|+$v2aw=i`1WhC47pJ6yh4yNj*3uoBm7SBw zRJ+D5LD_UVd}?=PL)7@q6M(hp8s6SB;;P7#F6zrH22s<}u5v@+xe>=_5IY>6=a%#2 zbubrE8lCw0OLs0k)XSautWn;K+fqz(S!Ni=9bJE{ii4aU^zXi|<{?s%;SK#-7}Lcb}lkG)YIg z@TpH~XkV?5AT>dCDD+PFax_`~L%6=x*xyy1z=v(EnQ1=_xAqkV4>F3c7K@8h^_~4= z(_c{pu{&+%Cp=kp5mKh0g@XREG0*+6*WN?B6HFi^UZHr7X^Wkp1yzElNM7woJ17XZ4`O}HvU3vep?dEXb267k`XN= zJ=d#Wl@A1Wl-OGd_b0^;!ceQp_pEoZV+`WA>bT&rzDpvvH(Z`%-E+Nh#9wQ^w6fc zGG}ZxzI%vd$FN^Najb4uE?=338E7UZNVALq ziwzP=JUd6?5<+qfjDTH8M)1Rm&MGS!USP*Hg>Inhhfw$ha3;L=#01#yLxV22ezCgc!EL*mAv;OIbTe<2g%h7XNUe7?V zHK-S8>3m}i%Wy>ZrovsdH7$uc7C@gVmwX%u>O;`%GZ8uTBT{=YqrO1)ng2)HSB7QP zZd;pJsFZ?&fOI2BNDC6u-KB&yD5264DvfkWN~ffNgaOhZ9SSHN0!laETn`HFeSF>f zobT86dfczqTKAgw%rVBy7AEL^yyb=ARgdP)$ia5|qHdDHR z6Fzg{N4zp)Akm?jE&E0XxgI;(f5`RT`FoL+Za%g*tL{*ir&Y+hQ+W2;fbnd1XQr5< z`u4nv`u4Kb9TTOLj;ywqR5x3lG&6;3nyK~IbPl^GP$!e`E4huh|C$GRVF1IllVNwF z*bxvQfAQc}zE0_Egff$-0KRnm!c*a1NWZ} zEge!uz4E~qmM8a`50*#}$K2eVDCb+C#@S_oCw!pR{NPVCX9@|&YjvQFW&uDTnMe&G zdIF~K6YIw$aa`aH8r1p9{`$wRXCm{#kP=JI9=Hw$H>hadvy?e$e3*IPwQxa1Eg22d zEA)i?2ij^y78e_0^FIAuy1Q=k^S)>vGlJsyO*?_O<=6B& zlNsY75L8STCtA(z*G8rw-zfz0WE*Hh-UgJaG{e|0F#{Jo#0lyS>)TH3I>FInqb;+D z6ZD{EZ;{Xt!UlfuLfLnu#umhBI=$jB#R?Yu>fnE{;O%SKksQ^bJdUrjH1mod^0z7$ zhR$R*dycDyxJt^^*9W0|pju4IBl}-h>(TWwBnN`Anp)4x+c(@3E@xgvp&T#YAkFYO zviRN@0FukqxZ21@K{G6&SEjvfz?@?=fmHp1di=%RcEy21_eh{zmg=gXQ5i0M=`X{= z-Xi(`X`ye4$WA)%6jFIjfHS9@vt#ibM(Z?Dn`ur_@#Q(weLmE4QzCi<@kn>3JmW6? za;C&Gyhz`xE{GDB&HOW7yy5Y|zfEJQj|`!nw>nt0&SN0~xZ;`IB-GJWDC0tY{K7>+ z8!pcYG27MMhXv(O+yCxR?_}^geG28}YC9RWJ(f%6*QsVg4BOWzEe z2TF$u6eEcu#7K!t{uiTJST{Xv#+YL^R@m=g*%x=mEW>evFH5+lk`h;clN0rz!~STe z_oPW&Y)F1iw&IJ1vtiVsm?lSlIT`&V!HZ_rz*`YSs+L;cIj6WYvjV43a!oIJv~v`dxiRqES zH)-H0(-BnL&(u7)6Qic#NXX~(TX(6fqVHB`foIX;ntj&YUMzo-dzlqN$1)r>v@_an zy%5wAia}Y8(ulmkr@cTH_YH-INS2^B?MTH0JVfZ!gW6XweW!I#-KDGtxqX_=MKnwJLsQc0YnT7hNEk0Xee#sqs6As}-ifnnU+YJm>OFORXI2XOL{^GUPnm;BRPUMu zLk!{iKGz`)m5%{Wo{ox&IKhwxS{j!neiKLviH0YxH;=EsTCv+4YPyPpGT;*1J3};) zkReBnKZY*jT@k8C{Moe*6QoFi1m_=%lyQf`w|$TKQ0e28Mdf#SNdorN?l)5e_MwV{ zMY<%6VU(zJp~S&!VqfqF#nS?@*-B#aSbdUs4&wXb!jO;|%3BBMLv(?V_AchCX8_x4tDT+Y;V7Txc;gEd>eg>^;+$vq)S?lTSGh zYqXzvBwUsj+okO48S3qM@`J|(0o!o#;FE7Shjv4|PJO0c73#s22m^wpcF!335u*FK zM(Su{97%ow;YgH47=c>iKem>}gH9Kjbk&CP-g}+3W?mGlHKd~d{)5u%X%40Geksqg zdP~%$Q&Wwh`tS?m@n#*Bt=P_wyaTmQC<<%^b~y|X-B0a<$h$yf7wNF$p}t)syU>7(05)~}Z%Jyxp7rJ8CRJd_Lhy+Om6)eR_$K485d z>k0Fbo>Nz*sxH1+LBLF5w^mo7i85?2;a=|s*5x-~ofH9es^(5hj2RZqxAK*Nu3TLa zgxX-#pDk|-rpE`fnZKCc%eOV{d&9-mKLpjGWyv|-_yO%dd10Hb}j+@=yl zR!Z|}TcT0b2fcr1_twBQM)ZYC`Lp*KTcX!WF)9JGISueB)OwQ#Fm<-5C{fH(>l|x| zwE)15D-R4$*nlJ&j4K7bauY;(2esn^*%qXvZl*89WyJAz5|)$9xEv#V2~)$+W406J zkmPUa{VmBKHg{v?+vHQfc0aXKWz*S7%u_c6?WofD>b6qW%o(tlhgc8rJ&Y>WQ$l{& z-j>+2^f;E^pb^I*gL?bF$)HHG|C~WxhUrS+bA!)-t1f~rAwG+t%!G>9cBommhLx`` z5g)H?1p-t8PB7Bf0kv8i+Fk&-e58;FzlG2vGhtNexxsu73yDsb{t}%yirA>U2udA> zhKn{UrK7Smhf@}6?zx@L9Gi{KQW_^L;`ddnZlL+9XljOW3+l52Zt{D{<1<}&b!5Xu zk;I?*_~0NV+^4CK%iR(xqwuL&nQ=YGIBNOAC)XH{1X*d_>M>HBER_n`tQu4bS)Xt& z3OdWTD|Bg=dp(SKv~h?0xn{2gsT4qYAS?e0bdP#sfE{V_G0r7}^s6Ck4*Le$lD=i* zu)pO&fG|MkQ)@Y=yISoiHi!1#=sfcUX)o_G!W{^%79HA&S&@c8S6NLmvLPP>|={OG3n` zA{!O7QNtv~+!kSlY1G~J%`>bXTeu{j>+nL&V0w*;xuikaTEVlrj6(lN-O}I0o9Lrifk@Ouw3dCpVqn;OGN6A*J76@>l{~{LJk!EA{oN znWFn&&pJ(b4o|J)_qz7`=bD|l#vOp05>(t1N0xkCNIc=jg}o12%K)<|D)kl-e3D)C zEsucMUJ`cTNFYGjBq`q6;Oqug)GO7X;!`y*{_j@X@<5_`-q#pf3OEs;M_XWZ5JSbP z4nC|@f<8}X&vjq{>H`+10G#*$vVut&k;wgoQZH9Sl*K0*OGdChfr7JGhT7OX@>yN~ z_E|&v(H>_g_05OX^m(mlvCz^jzTX~JN%HFK8^?<(yk}3FDc6ZiM$5PyzEgUMfEC#Rf?!peS!gQ7K|*UsMcX- z@Cm|C7J%;99)7=N(w68sJq9Y(Hkc>}CHK0sXxWST{z!mGsM@Q{bN?~BRTjp_$yT}3 zpk3CYBw<^9Hoqsg%Y{-U%S}pbs(&s=$z(ANsdf$=F8-?P%Kv*x0McK zmETFj7AI)QdF;!n7$dRP$lPyYx7ElwsXw{aXcy5PVI%3NpC9Eg!4_c@zjXCe#x$|R z>RL70UVDuAi}Zo`R6g8HtwY)7-$dwfvU(o~%)6gqeEW{x%*-r7(A(G7x2>&BuK2Jw z{rb=c)rxEI#l>lQ?6S?_L!DmcsO`qLlBh^@&3Y=sq7uXZaHH#01v%K-GXN_qOifL3 zGaw69N~Y2?^gtY{&W>+uQd)oY`&UbV;n`2!xmdB$KUWWGAY`Do&`395?gTu1$C=3w z%ggsg%W}mjBqM{(?^7Vyio_3`Ah@S6kmR#YnBmXq*j3#E&~KqFd3%#!X`4U%?^gKZ`a(@>Ym4`l zIqCQNG}l|gC)JfwG2;#6t6bBd(S+ys*p8n{jViZo`f5#Z^Gm$5jZD=twB?cbte_;DI^g&hm1 zO-djB(3zZ^tWy&?*Br$z4IRwbd@b$FXAeKA7^?t@U2;^jo=&}pj^i^TSIY9OL!~=Y zl@!(ogd`_ZTmY$u`dPUrcs>5-nTXMnuqJ4&WHVpOM{duDFKYi_Z=tWI&Cu&79M<2; zjtzS#)F|J)^2UwK(#`8FaIr|{!p{75RAo>GQH0~r^wAYm;M#w6TwivU z`6_O0B0`xBu(d-Upmo%*#V>#*sqI=4W1|pH&X!P7V~OYxewVE|C7Xjm8ut`mQi5%G ziUj^Dp&{0?+_}TbJKw7aS?%`8C|{x0iu|IM`aS>Sx>NkAcTZ!rO@FhaZ8*GjtG81C zb1z^II%^P3sy?9J`YF74NU+ua8H|v*F9zU2ndw@hOj%}Ef$LYx5Uxvdqd$YRdpsP9 zk=NZ@#ZK{3Clz%{u%)hDp3W+R=}lb_KWwFl9})EdIR{BEq5LgECC3^m&Pg&`*nyb0 z&QS2Nb1AVxn!LF)_QKUW6Pfa98P4JHmtS-5OxqKqMY@*<SokMTFW@7>Pw)ydYXNzpoy z?)2pTr1{+CGj#<`4?7K2dRT?~aa+Yplzl(@)AVpn1QyI_+LY ziEK1Rnv-+^pxUpo-XFu``l{oX`(Cq&NsL%kDz?OGydL%fhB<6G6m8hSn0FMA?^upr z6WtZdtQ>$q$ghyoDo@bba^3{>l;*M0FU;-vI zmkmG2A>Veh+ZlLL20CJ_4 z&4w7c&3Z3PfP&u39%3QaKp(=Mo(;xa*{&Tf=@pC7^RkJ=8uIQ&78S5{6CfidQ0OxL`LAkiNSPMHXC_rxgIRT(N z>H?@*Cs?~RND*XtM`x$4aLO2qjo2i7H^SlrKrcD=UrVfa-5mj*=E}Ykp9KAxfq5;R=j8n zBBntpTWL(v;aB+Yf4*1q=1Jv`wqfoHng^dHrz)2D2<@4kz>OI>5de|ay2w|oft=*rrJ$p)rV zoeN^mw6wJuv!kEX6AYg)AVV7qzj?);Q`V|6g>TEqzu)FPZ1^2fu3Qh}gYTlY$ zd9i(K814}c`T25)&~4s=nGq40RPirT~9u_%_06gRw2u(^uo2ENM5(5_&PsOQ3 zo^|I>`f)0OmFc*vjuEd1ztN_vp(OfzN>BO31taIxZmc=e6_)ggS6M6)JE02B^Hwo= z8fZGtyk%BLY0v<9J1%i{&mR&K!TDhapFC%FUTu&OgMan=g7m7 zJc%vmVSuq`pyOS+4jWwckX<(^P8ev)-9hUEA#aqNp_>w)FO`(Ny`yyFzO8TCoC_Wj zM09TXm(NDPAIAy|wk}Qk+kL0?wyKUufHR` zk}|C?ekyBjoEAKvQ|`XkIKB#v`>B?r8DWny@Hb>n3F%6@>3>>q9zCk!eiTy~^Wz~y zR}b0)9zw-;ZC+r(VD;^pIL`)w%VXgO2p^GXn4ZS}0euqerM`!(jIf_q0QAzoZM>e4 zaDYujVSpNKfDJ4ZtgNgN)h=4EOJh1PFuaW6<-^9EC7U`~{R#P)K9YS)s(M3c-*45! zG@-WYZq04yUBzxIjp=K~B{S7Yo$z9%^F1| z5)X8AxXW}LbHX5Lfs_~hH1=K@`sk4h7}R0BM#7&dui}@BImLtTbMOuewCbdW44`2u zPojJO_%bC-4nHJl+C{x&9~UQz?>%hg6CoK+Zd&zJl-+qSJDYOp5(!QxJTODcPGn?; z2_cV@PMm0uxTg!xa_RU>osap;jz*Q^YQ1Hf03@E`3^7kXoa8 z+x97+mp=M3;a*U=E_1|3ptY;3>!Fp^RZ6e*XJ-UPF1VkKjlaE*nZ~#R*^%)9x856X zI(-4;7$YOBbKGH=E>PCF1Ewa&Z>QLdqRzg6SU6nY&lBUmn(}d>_FK>By&l11JLVr3 zVm)D>*04~SJT?_6;vc(zfjDaM7Biis`{rWEqXyN|`<ZC|x?7BsI zX)M8?=~Mk>oI;@(r@egkQP?9{s?xh(-#ygTB^8im%y*S0Ub=_r@yDdZwNrfe!3R`? zZ=bLnrt|udyaGeOXklwxWp)~vq^$TPu2c~ONs*Rv3$#wCgqcW#vDk3`#n_3NZaya| zH5zbhTdSHQ8|Y8cOlebDxMFl|St8B2msS51^Lq^(vZf)2%Q*CY_*whsjfb2!)ciHD zYso{V8vd^wU>o$M6_~ zs6dV2MV!WfV=>%OS$?W?Flx=19adK5Zf?8Hs_Q{7!>|t9f3s;x`ao>!H8GbH+B8=d zuemHX{XsIM&#jW=oMLDBH-4=vxJO0vuW19|c37BpFJNMzL2+;*{jnDtxFFo(zgDDA zsc|${wy?sw2^oO*TPjNy$4jG*m}J#YBGULqwqqC8scz@zaV7{E*{9 zGu(`gO!tdPaSGp;Kew~7;YrhdB%=UNUCO;tG|N#ScuKJ60mdzLX!Kh!3HT-L17{5z~|46 zo(a5|$Nz(=Z(Vtu01QYKww+=9v$XCBBuS>vZ667n!dQmsyYqu_#O7tWT28^j+gW&r zyNoGXnwrv6klOvDkI~V=7RZLGA+^&UbUzY;3J zV%t(-sz^$z^gT!Ka{M^n;{Lxs?RrGl5i?D<4T!j}V7&$0_m8_q60A9@Ra=jc8@@vaK64Bq2&?digK< z@rlqpoe9qq3b@VSg*3qFusF) zX-DiSCB}ow>;QJO)4lLG>7#Ex)rVjUBKUOV=S+D}w-arHU}V~tgF;*GZPyK|i*zkn ztx@1sF2NUp8mMD*OGO=t+Bzx;aSzTd?#P?P%+x4{((<)TX1Y5Q?t%m1<=nEt$52U z>Yg9j^CgeW4hhFo<;1W*r8pj7fWh1NuX18RaQYG?8c{zV7N_;@%$n)*$6T&M`#Twi((z#77YaQNDWVdL;sl2?A$@b8eoCRKYDF-^r2|{q~Z9eM3Wy^OYLZ~ zQqx0AMYRL_4*M(R*huu6>4kfn%FOjs3bYPz7w<>#>VbQ;VgMd{hJjN zGanMO?uLbmlY^DcdX8Jyt%W16hK+ddsc8)j() zEZq9}5w{ugM0Z|PV0!iWA0*ueN<>23srO8{7{GoS#uVh|5w z2!Rqo&j0}GVNm|7GXR$!>E;w4e#3P{4A^%CM;@xH%Kv;N8A*JD`@-@nImd-g4WCmm z+qL?oBRFGEIg+nv@ppo3CQe;ew?AU_OnI1ZP*av@6OP3E5E|CP0B`7)qMwR8u#y6avOW3oXQ3@LNPz z>Z{xGjY}4-GbNX?GAsgn;sWG&hUDcq?8+3_X1=s@QS5ryN0$x(TefoZ%Q3!Zv$L}! z2cVaj2rU3RJG;Pc;BEu}03BIkVs7wg@uUj@l(J(71n)tHfDcG<`O7{mK_&Oj8;z6S ze1=9AwPBU?t&sz}wVMrr_pX7IDnG5FRX`3Hnc|P)beQ+7^2f&t)t{ zoz_Na#fI#*yPF(XCKsQp>TN}q)Y=^n)X}v0c!X##G8{O#@hd8KrrjzmEX4Az7W5{y zNq=HqZxA~9Ng^j!m9m_UCsvhlfipilEywhJ(Aj>9tMmmG<~c_aqO{6{O2c>qZlz?6 z71(vxc~+KjoqL;l0h1h6GGSmIBq!q-@*pK6^lt!;I~_ub05-5NEWIFe-W(%%V3kwX#`eC&)3c>te8yx~ehihe`iO433dmpu=qukz=? z>C&E4He!YA&c5rq)ku8pS1}5!xW@+b4O(5QB6;8t2hwImqoS<1w|#C_gsvC4vKkk3 z?|-}~^gt{M{`^mVG*jpMTlEIL)xp?jJ@b`DR&dd?!q(qc(6GjGB}zw=L|(i_@?IlY z>b;a!HGGjv%fGdjn18$L?gTcZ=|%B6+e5T}WbWon zjJ%_;>rcIPjMFZ#21m{u5Ujl*goZQ_vk3XKerW-SpR{eW2kiX#DAc+)BeN^F?JlsJ zYuGW_9$!2G9(qF%?>U}3*wyWmLN;Ie7&z4of&j>Y8OF|t>9uV4ob>f>F{I~zGsiDn z*Y2ywHvqosr8X3iB94#vpB^v-0r1XWIWgOI@NQJ5aV*4?ic*M7&pfvh~ zc{j!Rr!V=SF;FH*Wjr2%$`Dhd#~*WiIP<_g;eK(Ld=BCg(f`RMZp18^Bn}V6imyz2 zbGxOcnmk*lP7!M|ps=l9PjD%zbyC3ilP}B|XC<*v^N?Y&4V?H)WdD4`F~Lx+q13Hq zT(6adP!E-{6JI<8nhwFxf1~T|1%6dDj{EwO1F&1_fkR0NU4dX^i$~5$w&N<$VeO!C zO5uEfN_?Yh8b_PN;pt;8pqyf#tyZ}1H$W5_KegvrnwqkctNABK@N#ai7%j9%li{%& z^q0cyzaN*9nR7AR`R$H&7-GgxUG{@ zgp;sRa!4~eMvCjn98E5DO5L;Q`^&*4w~*H&|I2HU;z-6&*e!SteAp?%p~X>b+HjsZ zBXy27FlkrC$(74D?xV(7w;+7E6__+Q97A?Ap$>wf(RxLn}_ zB@(5(l~?q!kN-yld}lD2zs_S+X&-9pWRjMW@3nncuid4l&i>Pxu*)}|AdPJv8;vVh zaC`l+1Y|duZXMa{uU$u`S{$+pr1QyZ$Aw;xwS@ZFvfJVBe3J8$DcF8=LGJ2uhVUTH zPs?>By9>f{4T$8o^%_jwY7u?+Y3Qjkp7(L|fE`nGE z;J%})UcctP)cMUZu7haFX05yQbb?dt6PXS-D&`v{4riA^Yo#{46?4xCK6(_fn>O7s zgco8TureQF{C@sobTCYgjn^_4xyFoNSe*Q6ul$Jm3YnxsFDO`aP&mF?zYnqRBmkr4 zVyS*KFDqO><}k;WmO33)%D~p%)kPsz(9_dHMNdy(U7Fk0JSqvWUnRl|umHzJrlWb` z9f9ssE~wHzH>4>e&K-K0f68hx&TPMu&}2FT#EE)~Q$(ZGm*?!3JwnhFhB4`kC##Un)BzGQR0 zAxQreE<^D8y4|21-sM}~5bV@P`lq{5ZK(S(tYPeC825T@?`R-zSh!FIt>CZCp8qk} z5no$|`Q7E22il}GO`BDyJ{f%Y*>8@l*O+P(&u!V1R6$d3vE>ZU?>RCwy%pv$os!=- zqyPrmT4w|m+~QOYIr3AL8n%+GO(|DKmOVy1C9qVc?YVwN0jOsp_?#;&F8N@*6or5H z_TuZ(QnDX4y7RCD%&Tu=0K`M|L0VgS8=qP4=I!Qf%?%Ku35E%>Qt+yP?S^c0S4tn^IJjl|jS@`(w_ z%_$1*c&j>|9V!gq$1hIm-Z0qnS#J>9&LK%&SYYY8)-m3>Fj-ZL0-0RTJ+kg9vk1KW z6at6uZ|l@oC#9Vu>kS6lVVxWaR1D7ZRkAkoR7A%YSs_F&_}7BS#0UV_LSQ0JF7(`( zK`mG3F*nTN=7H8)3n57HBdsJCC+7&);F=#y`%r_X7b5fb$nn{keRZbaJ&UilWTqjk{W7JVu9brq&gZ&-EBc%SQt5?U15x*g^n3!+*SPs?GsN$E8Q zRp3z14+0-waLek)4fJm!zIdknOsSi#`9YhfzseAkXMNa8 zmh&{%nx*GkkVp7-AW?ZQB{|@tg=CMr&Ds18@O9EFtx-9gidrb(i0(fDM|hXO#b!g; zxqYX-!-hI1CTG{lP6abE?|v|sD<@#3wGaZGGqSJwDQ#V2{G7U`*iwQ6bt)M#RIoP|rPR2&b zN5G@hHOneG6rxz}waF0fy_qU#LK}uB5yeKYc6V`qra}Bg{0$Y3WZ50&Y^M_tg&OFA zjNZN3+r~5}Y%%PtE{5sVMRetc@du~Hlect_{R-rV$nGK=lM#s{?wL7H%9v*$Yg81r zQbiO__E@(*^@uhwzNZzNj~-b+1P*&m=qn7W17WAoisi8HSYf8!r7})c1;;@v#||Sg zvF~YPeu&?kiek5rYQ{Ids#z(l)tf945xCH~P;o|_g#?H7Fwl?m-QWSqPI^zLY*7nm zpD4K|%i=}(>C-VQpDs zT~c@psyB=v`l4+)P^lcqJ4gphHw0G&AGI@d0((Rz4;R;y6scrDq{4iEKa3{y8q6S@(kvf|kmld}#6k zMpWyB9{Wx27he2juf;iucCt+E?)lA@Dyy;IF6|(+E>loYT*(_Ry~R*3M723@>3MLv z{d?>=yZx5c8iH+!3-{Fq5aBSb6zc4kQXFp);dynL(0bZFeXfp{)FF60789(wUhJk z-!QG+aWSQb&F_cSHplrGMK0MW6dBLM5}-O5M~?y@oV`n4z-mcNYU!eMd6!a3B#mqv zb;|QV(-bMZnW?G3Cr{8`#KxV3!7-vD0Xu3)Ja5D1T4qbV?G*QshBNW0VZjYiY-Q*B z@LQpVMH^-dqj08$@6eaf&ub2^JaG+@ii4_HD6#8JlkzdQ$Abdi0!wh_F8) zJ$PZ^^f6#k(jgjAWuV+)34?#HbY*q*D4b{nn-&89J^Q#(GPS|cJ8+gDX>%*?aQf_g zGO!XK5xO4gFGdJDs2`gNurX>6^+SS)f!E1%1Zh*_|9ODq_U+rSW^^0J`1#&~Tjn}M z%mc42^^-b$V;O9){jfzxytgpibn1?FXKNtS?qY*O(tFuK91hZ8gl$JdQ+ihlFW`Gu z%TaD5dXq`ovc@ek^SqHzUoWJF>9)G2yms%m$!+U$Q=`jEsAkdlcF%A&kT3h+j*CN+ zh^OMp&$=gpB2H>aeu$nNs~^-U9(Fo(9E__>K*X+mC8H97;e40%9 zR5XXxo#xVrb07mt0>WDuh4)gW7ic4~ezC#|G(taX@j7rGVR`=IiNG5`lSCskkd$*0X@lO}yF&KHcP zP@8%RsK}R#%czngiSN|I{maacy38sb-{tuk3$~Nj@{_Cve9MOvQ)LQfp)%xJ&M0@d z`-`*#%8MvqUx_LYNE{|qJ>gq#^|w^oM)1Y#{nkMmu$`G>`i@fZNT9eL)bt5Xc(OVR zuy8~RZfjV2SATyrj;9vv17rs2v0xsa7e|u$*6O53puUqN@RQw~#GB-`89*iDoUX0O z3>F|G;Jd8aT8=u8bW%LCdID^Ei++4K0oMYRLvSgVw*FxPALdwQ^}?FlA>W$bu1{S> zk$wr=KM3Xt=RJ=u*kkE@sU|2)LK)T+<q=e%oQ-G)?PyNPSmg7N5SayF#oY8_wNh%9 z`-))7V5O2cM3UniGX?E(tizt&$)joyVnLtL60#)nourF*>ZoGHRs$d~a!Hom=BJR3 zd7fb$OQTF#=mhy<9TT!Ih56z4FK=qtzqC##t?z`UY-V$9@+H~#q;}ZkMu38`46+3R zX>;lUZ5pXz%|YNOVLL50wv#W6*Q-P)C!BVf!7;ej23Ahw0iIG!QqouahIG>nNP6!Z zf1VOtgWc6+&e9!iZ9^SB5xU>py^)?@VHXFGfo80wa;PD%mZ!?p$Dmfb5jz!*mnfss zfN|JlLjKaR3vRRBxS}=v-C`|Iih`Y^(=IZpP5E=l(;Pe9oqWB`5|f$+FZ<*<2+G;> zXGgr^6m;8z zU-)r~Ie{!w{r9hH7RHE!%md9z4%okLwH!}F6+Zzqu=#YZ-vM#t=&;Vt%<>w5qrA}x zu3h;af7*76U2f&AI(6j^=LF6qL7DDUX^#)qc>0_ngm?r_iuwSc-y=FJDJdC$auZlTSkK=+@-`+dDhdW1xRxIq?s+Lf+>sk_{qiDypZ$^50;7%UL4$AAmKC#Uq z+{#)ddc|b-1hL@A=>_;_Jl9vM8J(DvJ9!ni;nR`}X&B2uvKw?oYV#Afeez=2lJQhU zh98%RjxKKy^nLQp`%6c9j-lgyd452$iGGT#8&OH(2*|+NqFxqTOzC;OXuHtF9%NOG z%v%6Q!*E(t3wE}SJnVcv-X_x2tBJ43neX%%~IowkqC6~@jEr>53??&`FZl6 zd|Zgm^zsRB=e;xlIL+$_kgd~tT4==1r(F$sFCtpWxbvzi^1VH+NQno|xvR>Ly})ZL zuT9V4?+?PMaN!1G2hb_3sv0_0J|+J%Mu@x}lz}=%rbjwGgL`?!1Z^Q>5Y1MmBzo}q zo+a#Ix|tocfNb?rt#m3+?XKGa5N!Y~yQ_yvA>8aZQXP{?vu-)>%W$O}JRY!XUjK<`7*!}Q6|E8PuVPI4S$x@r6x(G8;T`JMsR@zhg5b7i z`Kj2}P;l!Z3c)9Y#QW)r&QXGp9EshM$f1%kCv{zxr3M0B671|~4hoG5iaBGy+oS`6 zl$@_&x+FOPD_lRSMlk6L)6B_z=-e4J{=ksKw8Vq4k6psWKTM8pR*4|!rVhHm`3z9VFm((_@@Xw5ZkaA0(@Cm_APgVZd%}G=ykm)aO_!x@Y7~BnNk+Kz_>DvHW ztHur?oA7s04x?pQ9%l%y$tdM$(NF2gv!dCk#SMgaY|IRSnf zPM7<0wyJUMns~LJtur=1^XfwzoWJ4&fB){5N_q=aJp!$Au}Av$c}=$<6O_D5r%-@g zIhmt>h5)as;?k>;4Q|ao7{#qzeT)clr_n%i_zW38re|b~BJ0$8ZSH`eIkSE}4w42G zP)C4T-Q&DFO&3dGk(eIDjPiOUr-T$-OK#B?_H!rK$2cyluDw0$M9jDO^{hTLki>fx z$E2OtgETDb$m^8*oFPY_Q9NI@+lO-VaQe!tfoj=3EORsbit0mQv@(7^W+m~p(bW*3 zYRA3QUm=wur}3XUc5PYIXqwJWa-4))zExbGc;4uKIupD;IT$$Ax?xxUj?Gk(>3EcW z?ly#RfOa_^GTfIXa3x3Zmm0-a26}pG0TfmC&$fftI7RkRduP2n4@k(em5qvg*GxYb z%L1#G7^#CDcY5l{K)0Lw`Q`yt1H!(q(zCU?Il)39UK2i-%k|PW{{Zomv=8ums(7>W1D*D5~(L`O;#y_sG_Vi6)Dq|8bR0I_7eiKXR*+aVxa&U*nj#WYS~N^s#`GygI3X| zT0guPKlkN6z1jaY<))Biu`}89C)+2v8oJN!MLDj6tA}qQ&36a=%uiqMlI5{GL>fyW z{x9K9sDkA5O79ih2dHrn0)c8|`Z~{Q3&TnhHy-WXisX-|tOf=&WZ+f){iSttx#m#6 zCgIE6*xgU9i}L9sr1`|UCeaw<;88&Z^QYgbC73(p1^HQV;123x64 zXl6!6b*hz&ig%HVkQz-{hdT5B7IfG^jb@KdM0V*xQ(zOV_R68G)MtHL{?AGg#Ahe# zfTee+rI$;FK((c(OU5R>)7HcW9N@vFd>AC zx@Za|N#Co_MY$R}ao0*|6zIb6it7iLA3WRMYs_3nfh>Mvm<42shW<r+T{On86a4bH#D>lV>}gTWy)>wf{g?}9nq8t_3$ zlhjs_JGvNw8KUrqjZ6WzmO&n7DdiBJGh}Ve=z4k&_YmL&Q z&n_i??YWv{xtiLT!24jv6us>xGj7t`m%&0*#wWP@_eWnuyUuVWe#t@}^{C+@H&5(N zcfnDHY1tWF@%!FJc>=+HO#}^}G?78H+SQ%*U;);)!|4BeSU@SB8~q$Z(DG{oPfp2> zg|y7z(i1f|`&V_bp=oF5wUO|)8c)wskD;CnYl{_d+Y#;iLdsV#q3KC1pK6E@`o6n; z|2mF~|MQ7&LNe}_*t-JI>=}Ee+k%d6yBM??ZaxMJkc%x`Xxu&C|8{!Zt@Dv%Ww}Z; zVP*AD#%9$`qieHtvMHXWi}&c!oJ=E7a{>Ua(p%W=7rwnn^$bH&ANoK0GD@#ISYP`D zcX8{<9#2djAV+9e-14OEH44lniFvvkdUp)kL4fX|VD4EUN8Kx;Ki#X25J1_lg_)LZ z(e%!@j1sE+sHahVwc(OC{KLSb>hm=rmMsbmPyY4rs|$@n5rPSo0665avfI}JqWBpN zFVk)wk>isF_EhSv{df5aXMK_IljXuUB<+*sl1@Mw>J0Z)6x{gdTowch&a7BYbt1tG zpk;d{Ia(awH|x9MKVx7j8Y-mNx(ELPFu_Ua0dyi#)ZGMo z7f~HaaxuG{(L!si)C!s`#~;h1-m~>?%u@}m2wh9elPLtCo2DVYl)hbQGg~rV56h|a zkyICm`LcrnfXTXv)SD%+TSyOfGt6{0d7Yyj!5>_GLI9~g^X+#>fR^e*r%@~u1B>a` zK$)2aS^f*zT#j-SF{;$D7y0AMcj8$y+Vb}imGK9wuq?E{!ksUI5c?t&iuZZC_mOM!6F~e6anX}1&#EEMn=dh__D>Vh|AI;nE*3~Q;@bUX zjpG9_`Mp8C%DIb{-&U=yA+?g=4R+0e$o4Qf$?kKWzBhs6Vyh*vMH@iPfM<4lF@>*g zZ9J9{mMjIB74=f6Zo^L3+7pz|iDCP2lF0RRn$LjCnTM%8KTJ#Sq-AF>uhLbcuI$x6 zU50|3>NtZNQP!Uylv`JpzB!zfkSsNIb|byRheyHeKyv*W;eHn{0|JcXSK@tgStb^A zg>09~I2cfo@HXM)T_o!6Wr={+=f63ZIg_Zc$Dip1S_KS`Dg(L#uv>3=4nKaV<45hT zbk_#c3$2EkN?NQV;^E;?4Pf30hapo6iAWYvWT2RN6=ui1a8qEdjZ|Hqa5M`vG8A;9 zbxtDHF3fBHKu1|l0uyi&?jx#D9lC_ieH}Qnw}jGm|QMyQAFzwl#~xZnx;eQuBU z0%vjdJo;X0{^lfH>@Gv`VKD327wmhZJI;#XhC@~O> zb?0NXZFAX>>J|R&KhWYFFQf5HcauqBZaucHT*f`?F+!an5<|fn8!ebwJ0QEFbz*iaMy( z@UXT(j^sTpgDYz3%FVEoCCjqkrn?RVcLY80uq$6n#avA9Iui?5eH2_>DGLirVreCy zK1N_U*SRImbq%5=<8^3R-vpi0pbX=IQDoe4`j6udGq~LQg=tHZa>q*8le84^C0_0C z!D|;*30Gx3yM)mbMrXRC5c}t2-ly7MUqnD5RoaF%C`j>o6&WgnaCq|MCwdyWy3f00Kj|Bc0-JL?+ZGXQW|7EHm}7cO{Vjz23cf!<%+h@EthXj*Cm()OLPGxMhfC(VNna-{ z#DsPZ#xMuD%(-|I9Y=s<>emI52j;XC zBs-Eo_Og=vfeEH)FyC*4=29TREHG=w+Fr`*Is)=bANF%F8-66d!2R&AKE#y zIq`3~Q9=Wb?d?77(5r}XEyd|vG5=hw(Mh#`%`iM3 z!QA1<$S(QHTlb&ISMCW~6n*bJ6C$f>s7bQ@RRtrg0J5M`vOLA(o*>Knm1w1!V1db- z$|}2sDZ7cWL}$ZWdsD!?a_fG{kQeO;w4GU$b_P}|C=s#?Bm`vAxQc5qLmW`i#&uYE zJs9G4hVv$c)bmtH)pk%Gr_{EOEtE^R3liqwR97`R*Y!%UMyFapPv5314W&5n1JUqG zyEpw7bYUr|pD9^5;$OQP$Hoel4@ zq?F_V`A~lsypbE!pE;|DH7eE~Dl^HXyju9JdGk^l0q1NoQ)N3HlvP)!1-ue@vIOq6 z$oZ)vP`og^^S`EE11gab+YA|+J3J>DS`QZr=I9pE@Cq>zVEFS1+Ji1at-sSEsVpV4 z!AA5HjIIRBnYY3!H|vvi9@bEY3j zg51n7JKZtQ0VI0T?p=a(EUipD0tG~SjR4UQC7Dj6gmpn#oAlJ5Qai-<=rn!X`;9iwZv77NpTZ6(s8Ssb&;(biVN8?}c0hipG6^KjwmpCW1-ErBea*xGXOSIl-ot z&lbCuUmf}Se(rx62``HohU9!Suq`j+_zu-A zFTW)i*2j8Wy~OdMaE)A9A{KTIyV7YMvnl{xnamxCx3HmD)|$n z8zc~VUpNe5$KGTQ3x0rbfz(czFNs8doEj22;oVu&Ip+pUV<)Yj*qJ-aQ86VVoMQvC z>hTDl+q|AyPPV({RT%~;6P3HtH5*UjJR&8M(#cAb1Y|9P9P7HDHzNh44ra3OH&*Mfe2Z4n3$U@tEQ%Q(%d57 zr6Fo{Z@#O6dyl#++bkHVKTm_e#{2%CCljp54Wej^iQliPYZjg<{o;CR1cq7?6?yis zgtv~Z*Xe9Vg)U{Zbs!a!@iQU>P4ruJb-B3>#m{eWYN#9tUFOOOrJu|Cj`JHe{FA&= zrVAd9I79LMD&lLQ)pl}-N{mr1-EU1|Q{>~TtE&$;H8lY}3_ISmQe!b~*5u^7r#ZSl zdzsmRLluy>tn0j;K#1a70PW3+vM%e8+zdS1`3*!(de__)y<}ME@_M&WftSTz<2{Xy zDpeH$c9(oWzeS08RYrI0&KP@T03hV?4kAO|d6>C0tW>O&>qX}{&X`z^MHt+1Oe(Vz z+Jd+|0$4=0b^2taL0X~wrvPIV$JCQ2#2H&6&KX;-OcpS=lm;!ZmP>RtPbsA^v(ly& z6{e03y2+gg9f-jL%_oJkqous`A*W{cnoEyPL+y*=#!U%gYVpjS7Dttr4@I6F-5Ww( zecdV6&YRO1vP2PwGk z08^{|FEKR&JzJJ7fqV1rSz!#x8(|@u49{d0#D5T@CChlJyUMYdEd-D`F z84Y-l#v{elZG>psz-2v^CJ2B;;)wuIWqASB__g+U^es>{ng4jkAc_{oEHQrM{0foB zuR?eI;K)DDLtg6T=@(jwZVF90?Hzp)6Enb{09E)7QM%CRx%9N0NllU=;z9Q#x+If6 zWJDP3Z|@^z>hTjkhdb{|2*nb^?pK!)`ycQAuP&n@G7N>?_5}^JD#8Ol|75jC?;kRXrkp5i0+XZ%*8`0G&M&i^zNs0R9-@mh4Co$25!s+2d_ zKQ-v4l5$}e@lMCKiv5Miuwpa7Q=?%G8Nn=n1J$r5tr4n=OTNg1b|&BlH)c+P>z+Os zKNr9z)s^el?NaO0-ys+*%|G^6b+t4D{S48TW2lxEn)07Duzs~9Ncj6DaYOpxG?rv5#bZfk0M-y@`N7h11brSYF)4i@x)IaK za{cEQ$kb_Yy1j5u=rt%Du<-s7Iw3jh4$Nuw-uX9b4xKxnxF;Il(Ir5XsmOS0#G}>lOoSQE(i>VlWPJOa8UiksNV*Ni3;UZuaA6SE%-Ddh%C4tUtgunoKN38f)0Lwc>7+S9`;6jc_U%{}jS9Q`3XniORC`cQx}LjxiqA(%LaD<^|6dz z$8}}z~uM}|% zZn-N4_hQs+;XITnUW>~*#WwrGu^}0 zeS}UJj*R18^f=7OGxc_MhAux19lyZ*b=Gj{fn&39_PWgVd>y$@@C(J1jg+gx$J6h^Bq0nQ?du$6KdS=ZjZ^!xrR zg3ClPJ#Z2KsXQnA7p-3|=(Rtet9k8>j7or3M7(<2aP<|B(Mwpb;d+I;Bo?i>t0fp) z@pYo$L27^UzTc}47Rqh}3svA=-1c)lFXe%A+n($Gx@*H%rH=F{ux6hP(&NFA2W}l4 zn1SQ~L5Y&YM5*@azK}<)$pJ9r0I@kOBTW!ULx_$T_Dcc=&%H(OHF36b-xMjpH+$xI zAK7SLb^i9~J&qs?_x+~Jpr%>6wXgBHdyF@w3hhAK=@Z+j#pgI}i$nfxt0m(iN-RLH zd$<&qy!JRznweR6tQdQYrHxiv6zu*-S-+#^Vg_a?*y%1 zl@R3_l0W~-k)woAEx-_l`zIpD!~L@LPI-lQfEP=I!MQ@xgWcu(vRo0yKf(K%84+BW%cr2F#q>2s)Ow^G!Yxa!?e)kw9i9)twv?>XWgRT5X#_6( zZLCWh_!U}@F+6uwvwf+jyUsZ(?Aw`W_heLGw^O_Lw9nXa&9)*zFeUm}&@slJdYMo@R;2>bjMX$ckF|KgF(s=-ygS|3y$ z252Q8!`EA1b<%fNByI>TZfLFDA+2x_C5?a7h;Q!so;U2~!MFP1V{&PX9m_LZug3no zg}pvK{0+lKq}~+#$9hw>MTC`DabJPCTo6|0&%SxaL$Pd%5_4Z0!yP=n-Aa4PnI0;w z*?Kqp!)c8S!V$>Rk*G*d$B0jBu_OTZCv<92srQEtQ8YThx@ zJqw=2a`9-HSLgQMgKdAH-cjP;EOoj*a8Ou0@G&g436o(4s)H!`-{^Sg|4PfC+d5RR z+oDq(Cw`4{LyW=PXTpHd5n278ciTr zA(C$bkxj?*_&L`R6+oa{9Q;oFf$)~MO~NY*CL?r!zbTHmHJ+aKxq5 z($EAllB;5h5s9Jn9eFNS&(5Ne<DX#?sM-a1tqu$fOZQL_Hp6-gCNx3;c{snqn0w&HuDziHW2u!pj1aPEP?KJv*Anh#zXo;yO8*N|g@?-TJ+ zrv)?uL5SC$YXV*J|RFQ0pISPsfn%Cc-6I~V4|Cr-w-+1TZf=C`U!`(0Z zkXhSZD1MW-K-A+%VXJU#5RBBQKZ?9hiG%m~Pxoz18uh9qb&>>-b!hY{;>h!SG%@~sgYgfvu;k#nVe-r_0t8K7!}scgtrp?&9W?vOfI zG|suKeEQF5mq7b-jxBKHzZ~A#9&qG-G6N8f9Ksebc#aX4+W?F2glA?p4+vTF{~#U+ zijq4x0iWzIA%l+t;f)%Ixl}*!4Whqq!J=aC8BAbf7Y`vQ8u>SXO zG|VfuUn*Hea=S8zyti4Z;k^Y4p3>eVAj_4UoLr}rX%=y?-3$^Z7XOPK|D(ww8uz{~ z<6f>;w)HWYTf5}Xi+eSS&Y@VvpDdc0{-FA)COz=wVfm zG1s)Bfrxf1b`A44PtCVy5fEeL7l_gSUw|0Cuw3U=nqVhc;e4%JBX(MgEu0=}#HW;e&NHrWiaB(L|V& zh=81@zQaEBH4oo392}Mmu&Dp_LVIXw4nyCj4$JQt*qFc zy?MG1g~;@^8zsCE{e5DjggjUTOIz?EVcHOB01g!fy`}zJQd%U~6aOToAq4L!4E;ZJ zjKR=Z;z_*t0m;(zALgK=u@CLnuN&>}b`HV%5y)xQ{xw1U(N+K=PWbHK?_7k#Mk!MM9h`~-wGNpj zFFV1X>MV$S;!pY_j356qHB?|$YsIff%qp>#-`b8@_4N!hPJPS;fHq?aoN#Jsv~PUx zpg;9J|8tk+K|-ySEfsz1gUCb6sElYu}Wh3!mnXFp}G1tbb9Ic5}io0pj|m8k?`f6lvyE z;a3Hrkh&z;ywTR9;cr=|!SiDfpgqO# z9V^NFO|1^l*xVN0{vxR!FaV8^dZnJ)RXE4R4Y-I{WUxz)@n;YJ4VhO~hI0-Z0uJ;! zfLpRP;1ca{m;=7< zJc}RL0Tj7`OlEgp8N^)iH?Hx(fXVEcRS~t=)oTYBjdar|>8*?l>nD>zFM~i6wKQbUlkF~m;>0ViWFqXM zLl_$m9pT0!ddNviMa9LP4TLh+?<$Cu$Fh+Rf-3bU4C?-FXmJVkQ+pNZUR+2#5~xuU zwPWj69%jh&AN{9b!Jg%87iefxhb)->9R7coUDS5C}O z_L)Ol3Z67zb!=V%sP4SCt+m6$6o-|n8)CZ}1buuPL1vp&c8}y9(wsuX{y|7a$9W2a z?bM(3l#VAnNQ7PGwL6tIV?P7GA969xaBGAm76;lmIFh_$NTXT>X8-#JL7m=}I(HzBRBP6!g+_kK|?9{tDY{!{n1?qCT7B}_XYdLU)= zn;tm0YH+=~L3Xet#s0T3$i5mSqWb+qjGZ#z^O~5FUjmeFAFt$kU4mSC&Q=MqfP zShn)mmz9Qq#!b|du zx0}Fuf~FWmojOp5De$21iu_yQ5_@RB5%A##kh*` z!cgWtU*(LhmWe+ndXNEUmA3?E`x8@TsUx^@z)$nZkZ#ztEOl{05N`)qeOob{sZ6^HDA| zrmP$^s#88SZyu#FF$rFrIa|g|al*rbN$13+r3Fu1JQZ;X$}zldC3~ir#wTM#?}S_b&f@G#XRR3L`=Z<%Z|tiBEH9wB6#AEljM8Y`7}(?urhxx)ztEG9 zG)Ip8`ZM(Ou?o}KJHa?S#+}$gKA46Xc=j1yv=e8q9sZpo#|Vz$`N+RTZ|dmi;JNn^ z@4;Y|$m(2wBsgKpj*X42fT+forLj*&kUmD&sO_j-cg(Na2?F_ZdH`gp2ncm6wx%+z zX1X~YfZ%n3g@q-HKj{4R1e@7j6d0e^%*2ywmRaHZT$6JC786;P2R(P+wai6+aK)2E z@y+mAS2(-*3Ar zjK)-(>lwQd6lEF(wT|3(a5B4M_7Q-VujR8Z22^ZPfVvs%sDC|jdS=PhRasmsWm&tV&8sk11tNs;GJitJdOK;he`ia`ewX7)Q}%7<4Fpw219T4LGlBGPAK+ zw?|dpi*+{;@@Ha?0M^U+z66;t<63WCCuqNo0Dg_*LTvgvy)wRx9WCQgx7DX zN0jBo`beTe!|t~@<8h1N`FxNb^+qg^b^On?tF)f*NZ?eS_I7W;dQYY&Y|n7_l)fG| z$*|ttrEl-xil7~jXJU-c<+0xxZ@(73ys*dLO>x3iU}dG3!D`DlS^C{_ULuOVan6Bo zY=!cge*wK~19wx`Gv8B@$Emy19~=`&*qjjc$Y=_EV zvUAX0tXvIWiini&@IQ6rC?~v|x;v8D;EB>+>1cbqUxxv4^P zrSH{=Z8WvC6c`v7R)DwS(Idri%vBrT`^ z7BdFpEFO)`Pp(YaPORMg)MMtdB)7Bwa+c1)S-Wx4lA5=%2{ZSrQ%9V|mp6s?BfNP7 zD9(KV7dkj5cs?0UEO=Na#E<;?L%>Kw>;uG7&My`Amgq7kP5#4ek!%7#i#TxT`O=pB zPVr0KrSQOIkT?x><+*wOn0~p>{d@N=_XAB4ErQ!Xp*wYkQ6cJbU0q#~?Yx!)kYl|t zWO_LAqlN8Xmf6h8Lx1D{CNjO~9T{;7`^+|&915*e6Qv1W2}ur0AgD_nPaJN&4MP*E zor7=+dS>Aj7yUS8TZG6LwHo?wdbE$+B|9C6$~--)<5+hGWS8S3K_ z%onLXWKVM5o%kaWK}MpXg@1J7n%5>=pJZ@q_1A&{2VUmJ3hc=z!ch4o!eg}Y`r2rN zK?(dS5zy4=e3-IRoRC0gOpC~Y=buO%k9Dn5t&>g#AKwHJkiO+y3G3z?Y>o18M0QDh zEQW{~WOh_;Nis3lz9f+KDqTr>cMwt>21tWQpK^Yfur~lBiue6J2d{$SZ?B?yQUJZZ z^9zToy?4akEm@+Slg)YbZ2lGwE2%S)b1AZCUg@f)Tj@HSCcC;rG2xmR=-&Pj%9*X@ z$5el&mB&Zukpt3ObRjS{7E;}i2#R(|1z!rZ`3~RLkrU^@PB7##24|sSHFy4Z&`_hk zOC0;KrvTI_rnB=j#p_}$So$cf+S5>-2IFuRwEFT>Qa%uPi-N95s8@Rqx`2AgL(0$L zobI7)8nWP~NXxsVTwFf+&izGs+NF7&`Nm??zKKo^Gi$Zv72Z!68khCzL6hJ*n?~_U zanEbu+#P9^=h~!GQwP~yegC2EjX~SlUVZCzU?lRV_o--VUf*7yPmPMAc+gjzjdHFh zOJvj}(_49Dpmz<@<|@x~HB%@ydYOu%#mSOzD*G>5=p-)eCatCJ$3rYb?Y2tco2F`i z@mZQXD>a$jE7zWkFgJJ4NSqElPJE&}7P*i#-ekXJFJU-(M6oep?^O@p^MF+QZ{Gs_ z5(d;n=Lqa8b>E-58yg%8dRO<7-`99Vdupb=0b+NI=P<1&W3rWt4V6dkI#|YZCy3C zv0lB+incPj2K+bG)CjiEn6@QhXj-+(phl8!kAci0>)Z?}+wR>QpQG>+xqtXoUa1E% ziP!AqCL7A;#zwC6=_0d1(ah@EzSO;8C#G8;BFzmd&e#T>_TzLY@Z^-mwdMPO*l{V2 z7{QdMeYOgp zP|%<;qr*PU6S}*HZ|5;AHv60s_;CBNO@htzd_}BgrWdUb7Wwa0P&iaUciH1RMI6_@ z1SyIdIroaRvkUHnXr0k`bF|8h8*~|=y^sd8qc8W2k7(O{KUoI!%%rM(W&nR4%svGE zX6daQH4#FeJrLI9bavewBSoAlCyV5SutZ$zk)cNA3=4OTD zn{@ZJySp2nv*eW0rI;Q33$Kp&&oVXISny81g`=qP^C&h^?=7H-_$R7H!qf3#nlj>D zcpzoQ??=(ib-;aZljiVrHT{ zb@-mrXyMKee_ghC`yXlty4;?J!7?&3MR0XK%0JUzo9PLTjJyiGAGE;W6t95lSE5~=WM7F{>I-_gHV~0u<9txmowe)BQ58P-hH;B= zF_c65;p~Gg@D7*38ZT>Z_jE>=(M-MFfODQBN}@J-l=u;4?LltHQO+Jc<;N+1vF!BS zGO|h_^5n#x;ZI{gx)0^Yf2+H$f0XzUlhI9{O=PN_iG2N*eQ8=!HPd4;kRoG#~8}8VvO&U!}f^hu7 z4|V;#V?*R&ds($jm|yP|)S5kiuQing(A+ZQGq!U1C!2~n>}}sSe$$vwoFVr?7eIfX zFm_Df%M+0-)F=)1vrw$i7VefZm$f9n*UBqavGd<`{y)7W9L>v&IjyAMWkC&4eYCYmiU}=-HN%? z*KlYQV?edMsWj>M*sc*zGI3Fn$wxPT***-5xL?Wai%pFMglE1TL$br7u?GFJXM(=I zzT1;0#~PcPQ;LdshHGTC3YZEk4Jgex+Xp8JZ<9LAtypRaIqRFs6n?t9;24#x;X1xM zb6W-*b*KCH(xDbg2VzTn$p8KFgy*lt30VG6>MJ%i4Qp(aj}WwG*1vzh4VIQog~ZNW z)H$*`-4*ckG-c!UBFKOdqRxU)0W=lYSr*vL-gcNuPn`frHXgQP*k?#cEJ0Ot1r`iY zKoxyIrE(|ypjr2sDGzv&f%S1w80P4UN;l^94o=V1pr;+(T|e(Yhb7685 zoo7qPyT!xkG$7b7POiW7Z4DLDSGb?%{Xp6Q%hX6 zJ~JClxw8ejOh1A0h}pphey}vphxvi5q_%xSiYepi$Qz0}-QjrkDr!ADM)G?Ep4=q;?Gv7-N(@M~N|MZ2)`u0eDz*pE5n1rp<6zAunv#NS?lVFnF;Ny7D#J%x#X8>8KLUp`L9+6MEKA8ok4 z&rjY{Z8;gA#{6<_IJ`^=TJ>__%%u-xi9Si?m+*YbMv^}{ehOtPGU?&!Ei}Nb1i?sr z$yQh({R+F(*TD`wnO(C?+RCyuL8b`A#sGe#f>1Bp&Zu|roM-;_oa@|ZtM#sc5E&@X z?#)(g7V68M9~)RQXsp_^DbBO%q^dt-froZ3Kr`@oz|neqDgm$KFYDQnXLZ0n^ZoEy zX%vtyLE;UuY1u02nB>1nM}K|PSgem3v9R;gAq{N&gJ7~acJnpq2QuUbXz_+lQC1GB zYHDrJXnJFuMZ=|HCGt^t#pMaek!i=LEkW4tF-~pW+uiQfDVhGBsYAV7X4UFTkrjE?lv%H6FJ&K`Z#$ z?tI7I4!s#yR?}wJUhr4j%i*7APg)FxhC8^9ii;?h;J-KyfqJ_TMgSQxEOdw2LY(eI zl4R?U*vpek%geMx#7aNgr0!idoq_ZoOv*qBK*8i;^lNa8a_~=S?&c7(YrRjJEeb zqe#4OgG});gO!J=uZEA*6Fn;r!MaAH+ z+c}rJ_w!U#RPYGd_>4RKFtMSb03+FBkH}K^3pZAsKm1y|I4;x3+zrbrb z#&qHCT`@wK&cmiMi8?t&-a~JX1Yc5)6n8XSg6$%wEO+O!DwqaAr$zROcg_Le`<M;;JNbBIN$CT}lyb(E6)VNhd(-xD8JR4r^V!+6(OWGV7hu0>0p?;TH8m1omE4e* z$21Q#PqSJOsrnI@OYW3iovMfyhE;W%{nNUDbdZ_q+U2Vs_7mC6Ox`&--PeDCcKFd~ z@PypJs-imK1qs{j+5xiLmmeI1WsrS25O+=jc!+s5D%NG|f+@&F`n{10plUDBzk2h7 zcK)KA;HBA|Kxh4RGU=XvG>y=n5;N*u0NQI^-Qfr~72Kg4*l2Oxld7m{&()4S%VQYm zB~?U0$ZfFJ6xEYS1U`WV%oi@igV~}IfdBGRsjj=z+IK6Ejp-XBqD~tZ0rPrpVPTPG zJ>4nqPigGO|D)#Y2#&3mo{EMx9Rsi42W-JdLr41UM*NSeB0(-dZ0{1UX|KvB*C98w zG^jt?Qx&P%T9`RuO!#gFEPaZZ39PF!=7W|bVqN#_UnN0-{=g`NAY-p8#81@)HVcoC zQ_!N`n9>thbG~BU-;R_f#51t1_A7A2#`tJCb&T(kPtvZ9&hhP;s0z*gl z|85A$!{|$##8Lgv>~j^%3R9igqsHZ6Llp zQ8t}})m6b{32T3CVA@1c%t{K|XLQ$MNS|E%Xi2DKT`ciqj!mUx!c<-??~@L?_Jgyt zV&5sYc1yV%!d^Zooxdk*UF;S_p=X4}=j9p0ZLNFU;L?XS%s%*Fx6l*V53}36}*I_4W-0*L@hW(M54L4xK?^DN8)wAHYW8ay8#gU#qC8PO#@HvhX@tDfiB#V_@t5CtK z$Ct7fIfEU0{DakUuZX);uxBLV4A-?iEYEY(v}o>d$BEK2Dq$#=D5#9>)pv6^{HjB| zN3X_Dz|7v+DeKD5F!ex-fAyGLCXZO~UX0VnA>rNEs=Zm+O8VJbm?61lW=o?DwbY{> zX(;y;g=pr?leJq*Ft@e>wQcWhkhMCk>Ac!;R6m=2#q?H)H_tx!*rdWzo}!mD zoU=S$MGs!Hju$Ddfbce+Wy<}}5lHJy+wG5bt6P9drS<&5Hxsv}16rJQfsIK^1`xyP zs6}E2mQFNoeSH($8XuW%zBbz@=Y;J5=>*7bO-0ud?)UbHD+C+7Yh50=B?&tw$Svi# zrB{K>#QDEvJ5<6d-4z!vQ?>h0WwpaZZczZx)z?N_E0ei(DMz`T68xJW_(z3 z;#x$vo3f1h>@%tq8_}LBsk~fZSF_&@Ygcg|e*yUCDdOs!+>8B0nD`Laab`_XT6tYMt|=e z`a>X*#%eh&%Al5DN}2>fugH_z-kPJCxm9!|-$4Z>ytmkNdO|0f$cO6EqsK@Z=2W8^ zEj1sR0OQw3Ruu*L_lgRg_-cQhh9Tr=_s2z1IH#56$9|MV#hjUB>qUKjF>;*|*^9U} z90S$sH~dJOIMDuqAN=rhykA5YQi{vub2{&X^wCf65<1N32WBShWgE5Uz;)`{NBXf% zuPtOE0QxkPMaR53Ld?FEzBqN7V#2WeAumX>Y9wWEqu|XdCQk_kP`D=XErd{}yr*tS zYD)<4b`n+B^!*z7I{BNM9@Fc*_|S2+gtG3pY%w-lZa6bamc^${hFBGavh#63B2?97MBH>APsf4ZH2FKtWoLM-%ZS8B>{jjd|mP+pCedoD&m zzAz|RXLc-<=`$s~1dxF(7VS(wkyUHR{grBlHaXgb_V1JPjA+J%JN3ltdWO{mfO}VjzgalW_ z$(BC`v+@Y~V6^;3&`yg2*Ucbe5^YJr`2)If&F*jlyQjpgZ+qvg&on)n_? zCim~@94{Y@%0wf-RNwaSs)_s5n#+SQ;;BO_k9Y;fElio+`rK`(+`psRYmaT|jm%Pr zw8&+orltl}Q3gF^EzHQskk%|;Wg|ar=^o3gW|q9v^WI9v#xvnX(R{F6D-JoIVf_t} zrH}M0pxyKZI!~jGh41avuKSMy$kw+Nx>@GjrP2PlQgm>oG(EEIZe~23Vf0b5+NY;a ze=w-w~1R^~FYCpMp1{P=;nXAN(SIWw!Hk=UlScR0kQyj*BxfU$G<2tk}EyAfsyefE|$C$mb%k{;FFLIjZmwGS*5;V7pr@~ z&9g!_S>H+4>qhn#>#4m1M8|Pa8pS48Cp!Ecdeu(AdCi}uxqX>LNK`&|r@#X1F>$z{QAeb>NWzS@pJ>Ja!Y|s5vCQ~@KyU!?Bt1=CK zzv8-vEz!VJzh0**%$PGW#=cRPWRP!r(-E=Ry9$P|+YjnoLYKk0EZp+g>k~O)S{Vuu zAO7V!`J~~`h6&SfhpZ~1nEn0jj9>1k_kg$1B+TGQV?W;x^V@xn6v_=_<6%xB7NeJ6 zWvgUO&1+z&L%a}`YU_Dht&Q61h*VkHoR(G%S>k+sX(T*|$zX^~UWD0Pf&8i=297fhdZq9%`zuU%aQUkHHzYqIE$p$0X!*Recx5%9wlZ z>iWv$kj1;Mclw*6&kUDV^{Jf}B$gbNlh5)-5&9HdT7uy%;gbgXM68`yPk&c1ph?p# zw>ipxE4*R&^68z~HM6SCr}FahMO>^^qvkG)b)+lsY`#0`%Bh&TG};UNW9k)TR}A~h zc&~$fXWYbaoi9Qg8(rQ!wlsD!AguoLNEcd8Jr$q8H;YefJf^)ow71*CzVkRT>JI?K zM_h6qiVpBV#b!^@wA4maH*G#s&wxbC$Y_^9 zF-VX$0h5HNhl$EJ36cH#@wby^aI>jBR-grex^ zUG+6Dhxa)AhEIA8V(ewT^xsgj2z?UmcF}g(Os?RmHQ^Aw&!U!__w?zDKPD1BNSBQ- z#qjo`YJayPQT)XBlew+B1Hw3%MmQQu*_?8F4>8JQw%29}wQYv0B|cZp5Z@7fk)t|L z`^@W8Ma=9ZxQWJ-vC!@=0^LS0-??j|Uj_ zPE+ya)qifdo#z(BGyA%Z@9lQFh)vpW{gDD8gorH>?JYpKp+%t0Aq{;hTJ;eF74c^bt*GD@2e=XGCv&TytV~iL3o0??04k9QFhph8d*mq+&8}V`ZHKMTnM@Qi*Hb8Ghdw6A z52iXB;(J+z%>>StTDrRN3vrD`B!)})^RO{llV|{J6mJo$g0zsn`--tbwsS<(KNJCh zT%o4B^c}T=fap-htDwJBM`x)l-CNCdC8lcuy`=z{9=(_1^?-UFb6^MCM3JPt@$DUh zmKZj6`++FuwVoxl%aC}T$IKt;+#io|WY6DNhhmv@SYu7L(M@2H43^yw>Ww%-81fbg z-tHgZ&V<5jLK0Pc3QI=);#mqnV$KgY^LixAJvu!nb5QiUvrRU?)=Em)H+dgDpf`Qe z_?pSiOy>vL3TOj5l~)I-gxGxEh@&%r&@YOv&-~B13=M)?(FjzW-yEP6XM!gg@)^bLs z9Li-5);6BKK8+>XLwFvI1Z`?M&+tX1&K{6}B+oTcQBQYfvoy-*UvMNJ<6=EK)1FSa zJ3_sWx4tP>sT5qPHvobmDI9HTlBjIjpcB-n<^cwKKUUd$s03`*q};c%LttSGodp6nu53QDN#AIk-|!vGK1V4vS}b zAAqmvWjK(}Y6p~GVfmW6S4K>|+8N_;jfbRTXnKj3NFRSTLp77tGX{oWJfPBhHJ5N^ zQ`zB>er~4bZhcLmoo8%FU9QvU4%mp=nl?p!5epiyWF6Fla%OiW2=8lw4o|o7MCJOx zRaO!Ze`8paW*MjMlKDlJbpx?H$=0d6lijh(fGZG#^LBj;%yqUaUVl9%ea9xPPD}3K zqw1AZMw6(JVCwqk>n(Oq>(GnZJXI))o7zrtvH`9w#{AuoF4rs9J8Y>0_V)KGA|@)w znT)&cze=vV!23Mmo2Uy%0%J`27{gsjYx1G-x7+F`$={w*z=L~CNYwfpIpvZR^vGr4 z3#RSj;_{l<=V^^GLje|vPr;*kpyZHV&tLxp$=m4jRNoiJ7o+r}pe}d|rNBWn+gHM& zY2BfE+u4~e__30(y*b zY$6Jy#$symOkD;Vc^^YhzDBryoqSkIx569OH@Y3yk{%G>N}Jvnpx+a41YvHEXE#U| z3JgO%<7V8{s3m*s!OIB=|K{H+SH5KAmV;2S`8SbqP7M-2x?h|pacg>{{VXiq9ajYr z18|t<3rJ3BBsah_ue)o%ZLQj$2fW;>&wAmeG$wDVDqoj6Jv` zs#^ODysCrtJJfT^ll=3jhWcDQ)!>Hu*5PZ_mw4(q2=wy}6VUWc7sT>51QuMLzB`rY zMtH)GjjQjIb%JTpA4vkKCGjYsO*ga_bBGa9g$lhbI7Mr;;mKR50lh5VNw$KAfcJ)- z?POGA&f^OJbm}j$P73p(Kz&HrV!Ja>TQqFa;e^ zan7|zWk8&I(NbTmaZ*&$1rqG=8c;1+q*I;ZNqrPzgnNQVpswPtD0*Kbb0#4%FsKH7Xln0*oo~X=AfpzQUH+4P7Pv6*nc{ZQ zMJh)eQJOf$IFQNrNN`{&3sVD8e@WE(5AP~QRsI?9A<<~9@rez{=MK|IWDn4p)ZCUH9Y%HM=Dm%5UlpcCEz~BN1IhilZ*P?C z&ggTpqCHptIGv;v+^07mz4J^xj9t@%NyRELK4t-v7;r`(UPf7f`NWe|&jXlxrHwnGNQ3dj+*y{FTX zni^%{Lx$EObK+O!ot6VgYj#d(z}_3E3!f?RIA{o=qz>M5bokNMlK82oATHe6*Xw=u zmR?jGnKL;Ij$5#Kk6|2O2FHHG4921X(5^-;eq&6xn0^s7GP)s<)08d~GLD<*5M4H1 z7;+`^?s1m54S_tm5iL=ZAu8j{n3r{s)^DqmZ5~1fL+roe&j3dJ1gP73l;(osVr`7@ zsssn$*beYwQ0C^GlP9A9K(U?armb)-@XEgubiP*aQ*(1C?v<;Ba?0$3*&6(1lPQr! zJ3}4=i4)0e2#T=03*Jmc-9iffgpPoX_V#qs=JV zz>-2LBu+;6Pv6h}!pUAn1CrD5P7MG)DB3y zOFeKAX{5XuVKdfAyGIcZ6$-Erq%0=SilMQ)oo>Z3!8`W+YRSQ&F5}Vt^4sc`2d&Np zB2T&2ZqEePBqvzi^29)dx$m02;F|DEnAtRF48AZ-g0-a}x_R$~jXcv<4Hm~2q4^5> zULd)Wr^A@df-GgR$mr_?@Gclc3!=Wx5PxOOW4yNJ^$PHjLW7ph=T5d#OhBVjLWW?A zi}Qf$bCFYNf(b;CX&;|OW8wrvj%&~e6;1fo6$6;THyg&S9wh`@W4+a`)pdqep%}ad1EE}-T*4Lw~^~6xZKA~*(0X~DVx<<6MRe6Ff{t)mYx z28%Sg;b5Z1DJNQe?TdalxVSL*&}&EPh)sr0uD%u3br+}7x&x7X!DXs$Mk4kw!04QV zPqB*C?Zk@L)YwQ+fB&j(qTibfnJ_Eu$*yvGbY@q?S+MlZg7qWy%qq#T={_CZRHFGS zy3tuIdd{0baQ=QWcz&^U8kVj?>(UAeZrm}06$xOzD6q2MTVz1%0Rw{zi}|yxhNJXT z>fx_KL(extfj?&OLQAaNc3uGp)x@UHwgUDiN)bFV_OrZKLyHx3^IitR02@r0xo$^x zCJ{=uBI@&NbNwvB#s$wl4_&-liw>Liq%e!~P3&JCxcz1Z=+_enQq=*n(^h5B2);gF zO4T*~mP!QXNy+_KA*C>hFoJK_UTUsKJA3F$zzLaEJ;DB&zC^|Yt%#MDCx4{qZU&h5Yg#lY zw~Q~hLW-{u#jBon0kjns3`jlMA}^oZ(Ox5Ty}1w$WCNxFjY=}@#vj(fqP7Y8U1R)W zW~KGod3Wb_n!hDoA$-3p-8{5zzE|(svM3gE3>gXE*`=psl!F6~u5-YHA)t;v-wNIa$h+S%KXuwa< zX?8^r*w6ffL^A9Z3zFX{gG98Keh8XHz}PPFt`qCyF>_1JGf{Xx1q)U%T&l@DUox16 zqHitvKdilXIM@CAK3BE|QH~pqr$Ga2b&woVJ0uA7Xo7A)f z(}=<>Wa7LTtr@`19-ijUhlr1krKU2lmfhGh{upzzYOCS5k)K8Ts%6Qb~( zFVj#}{n|cUe&0587t5%8es8F8>*C@{4foL_pzNqqELgETZXkuZC-`GK3buyaFT;Zq zc|66HV}LJ!@aurS>*kO4~02ipq5yz_OR?qv=)Xsan_r+vt23ZEE!pQB=y_JGwqMIiQOf0W?PUcO^QMEXq58u((5bMUYWNW2FX|}H#}@}6CE?f5z}B8A zL-EuIkh4Xl<3c8jD+?_viFRtM#o*Sq9z8X+#`t#?vmlgw0T}8O*}t3WLf^1-nqs?? z@$6vCiF*IF8r@e7PSY@QmcU5%oHl>-B2sHixk*!fgy}2HD^&9>+x^vtQo3ttrSosx z);|V$&M!pSH7zyFHyUL(EKTi?cll7i_@NdHyJ}IKB&T-xIan+V%FdfYQJMSZ;y5sW zAz5DCzzaWG>~*=E+Msv5NyLgpzH!7#kVpx)S3cgumyp12JeAl=WucR)VR#~QvAKu3 zcF8zCG{xhu)@jh3wF{a0;@4*^MP)nj_iv-4Um4=LjXo4jE*yY;{$*Ez+*6r1T%zr9@U%cM{gLna(^h)!3(-WP*Opkk6 z^IRH*Bw-zn;LVhdzz|EhwPoFYfkmJ+&j1R-IhC~X!bd4^c~xBi#jUjJqZ{Ffpb)xA z5a!5a{}q%4DWFfvkDJ717q3n^odl11&ix6oRZe5#uGROP!B&tvj8lkv2?7YBV*9-; z_~Ui-if2MjIV|0os^bjC_IVV;O)Q>d*f{%shlurjbMr3!&uf@6ud&A&rf1C5nxK^} z^Pt-@+8(mNKoX5X{x!G#v;Q=Z2;pj^&?W9KKu`7vYQuie(Fgc>tpCJc4rCB0(mE#i zTs?&$hko;RX7%{sx~j`Gguo#mf`P zUaM!)So+_%+=+V}?jFha{y~g!#Jx13tv2MnuQM)L(j45nv2%Wx>&u#DIU9zi&xHIZ zwtCYJ%jJHIx8+UF+uss3^wzm|Bv|6F317&~QF9%78~eo$PfgF`tJoJJ+jRd)wF~^q zXKRaZ4PQOelx0>vd}1k=N{Pl9>z{Vy2{cKhAqjdX@lC?SxH!g0`glVD^A8I;+5SuI z;v3Wcoj;dmKQ>Oo0I^QJ4I;LQd~982!N}^`x8mVAJ9+$@t8H`onzhx-L3ln5J>(9e zcVF^P4Lh+M;Ws-)*R7S3bboDl<`1Al)}u$~ZTg>wS|@%LZujm|95(dwGOP30h!qLr z9$E08IZn=(gOK;hLAy*F8l9jfwy1tlYmE-wO{ez?ebO8%Fh`JMeSWBg%rwn_gHMwr z%amT6_?L*fHnLLdf8p%fOQJ;N-sw~B>Uq%f6C&hPE|`dQJhM|5!f#tVt;={Do&Cz< zbQ8v6S@j$yKT?2sqgH3Ji*)^;t}C2_45UsdD3I$@E&X1n16Bjow4bHU|5-g6yj1w$ zNz#mFd^Bh?USpZ-Iq!MmxJAordocws%gt|5Bz?4_F?U%|`M$+McEcB&E`xC5N~xsRj%>fM zEgoj}_R|~0NW*Z#{o$uS=m^bC1Rc2{!@mEhx;BwJ*{Z3~;QOB@?1{4p&`BOh#&V1} zrGZqp16B$GjvC{YK$49hHJNX+>kGJdYSR&lvR2MddOD^ij8-L3tZlx^_(YAub5tP3 z`3KK=z0<9JxF*l$zGj_2S}*5X(qbKPOKq`%8!Ce=%u-3-$|-eX{t(c;7 zMrJoF8`<%UAGp}u*B(811<&W^T?wEu@6DV1g9u8o*rZX~P|+eVZ6)=eg!#vNk*JdI zG;y^8DIHo5SZfBSV%N`ih})kvPjO+jN3-6EX1eWv{YFa(g)N>$b#(Q4a%p)s?`c&1 z^4>l&-TcpYg#`I#t0FUh-XdzDj*iTM0SzU^7bJpu+3qKHJsqz$y6@g8sMNic(Sh6> zqlr`w5;nLuy0?@ojXzyl{ez^OIQ0QqlQ!L_U&|h!A)CrJK<}a1Jlk3{0)( z*s*SvbI0lpt9FL-pCp%7_^`8D>bIttWrxon@-$&>ItIZUge3%e>Hh4>FjAfwy!Cp` zB_RP2EI~esvejf})%G`2wUh>q!|97=|_=zU%lZn=dGbqKTg|7t%ukYE!G9x!wtmza_lH9&Ba+So%_#WDpmSVf?VbQ&1#EE z69g$wJwBOXMUgJs%kT+o;af7Gn!M+$*OjCJAtwo@T>l$#R#S548Vr%U%VxCYQ|=B; z$c~I;!zR`WQx(ph&{MWwPqhq>`eK`Wn~}B~ZomvHtBOX@Sl()P-_bvd{gn{mu)%Om zF90Fno<4S!`aUa7&UEb>@t?!w>w6lWQqBgN&sVRaNSxR?&Wy*Hnn5C6ka9Ai(nYc; zQ@|YV1(0W27jn~nZC(LmuX_Qvl#;ZNFNf>>@0ZV|-@ejeuOM4_`f5IqFzzH(f$Yu6x8No8gy|lM3dLpwl9SO2xpTNV_2UOZqqI5!^?#; zV+M^(INspg`A=Vq^XdK6a`QCRb~&z?l1gd*+!oRz5!LR7zF0jTmCkUv%r~EDs34LM z97hsk^qEB2&nEDaO7&TBMy>u?Z9HlOE(VhbBft_x!;&2C|6iOF6MR^0CvHGXNQ{5I zc}JGzKWPFCU=m|yYi(q)o&H*E*Kzm!&Bb(+dXm1d2#Gce2q(Er(}q``Tu~NQ=nS{a zd^1Xu3SSOg93%$*e|`CMXZ#*Yn!z`@7|`}HlwUC@h~v)%K5-HYUMW3a$!fpfuax4f ze9-YLbw%cU$}sZfD>U%sxaoT}%?L>wNRpGq@+Hu$R3FYDCLxJAT639$j_q)HsLodq zNcRRIgKZJ^^}f@G3}AS(Vc#PEwfLnb^r(yn_b}Haqys~odiro3d)j*sKU*V~l_YlTrREwa}7Qo-4ltmM!5y1BO~dn7Z09jq>d*D}G(uUcg*fQpsfgWDO@yP01iJ zbL96Uoch0cgyHDPGWC7eu-%WwQLgDMmmj}oPr3*Rc$7|MLVUa&=-GM3RK6qZs+yKOq7tj(g?qoFaGw*zA-2~IY zVxDTTmVp+QdklIaFL-V33ZdrBfF{iaku??;mflrOd1)K}NB*99-TgbUuRX&jSJ)~X zt?##rYxKV=nl!BC$&bOF>gvC{_cseS+pJ8o-uqyl?yA_|dv^w*rfC=IOny^(0}e~_ zN=k#xu^xU|nn|s>^~|m0$+`8_^3SS>@(VSMGFCSh+8r~dtU8}nO-qwoEYRWpMqH5> zy&)Oa=ZF5|I}8wSqHplRqb+-_^K&^AOV~bcAe<9v{<+9ALj|x*Q_9dqNQOF7>jHzY z=3Sf-RO+Sugo6yh>f!uIMb$k6@11Eqz3gUO2@0V%wLsQK4gmYn!5h<7oP6yPs=*Q? zqfTvB{7~%tr*Bn=(&}w?bzU$hOU{zuFgx~z=Xui%wqWd9Dp&H_eyx+ajSfK^ZGZ3T zAXlp3%Dxr)pRc|U3aJdCT+gj%?U>QI1a>-6c=!KKPaa|}2Y-eppcTFs)({QeRk++I zDAbHJ?j8vsnH1Y$-Du_APl+Vz`92i_1$#6L;?nwL=d=m%?!6AX^$WU$*nxL23%QFk zaDJY>qj&;m=n_1p*e-362)K;9vA@>S$$(US3I(yM8(T$Yh2?VAJLfZORd*&P^4t~y zmD&iI(bf(AMtC%F?lt<|^hd6_zbXt=m}{{0&)8&L8*aeh4K8TFz(KF-(}&n&WL9qM zrSXUA>nDiN=!l7kc8;lsr`{0IsX){uCHt$Nb>b8UFiLGsVvJxQS(wl6Y%YMyOe>;d z9_#7g-a?kz)%EDXH!08Fericv{P<^2cd}qW+Tra<3$|FRcU||SKa)hJMm%w_Zhh0z^tWM9S%2a8bE~P{?y_A`cKFo5Wse zBLv64BWojo&YuOKl2T@h#HJPVckHfDE`Osxf1fqU5ZD~^g2E`OpC>WD5o^_JHAu6g09j00*_lFs{%(D`WT2=@oFVVlIG%*+1xcj?awH z{s@Ke2SaHkDLwxmu_9I+*?d+OjzcwtW)*(-85XwAlMh?YXebRgus+%RjHv9`Eo+^sa;LmE%u8OJ> zwR1u;2-L0BZAI@L$$^%nM}Uv!QkMFP*`ArhTj;BB^lSm+ih?+=x%J^=$0# zA~7~;Q_lP0b0}0EscZTX+kQR3MYlacE+`3?!SNy8=wB9bmPkn(9v0>m7#Ij?kHh~U z?HLTnVf))p_af7iJ> z!0sDBJ713d``{;yWRRPujw&rn=osFpf4a@W#TAvrr%*`Gj7`A6g5;thuO2>vX?FSK zn-Hw~Phrks+)>2Ci@yY1WyancV-awA`WgPc!+#-Z7{08tq_Gdi@j8Tm-OP+71n9m) zbp+!{R{m0o<9?|hlxFA2-C7MdlTmC2Jwon3nX#J!oS(3)nIi0OsH~z;E=1%{(sTl? z5wJXLriXXfk(gA#M2P*>$C-S$suK@p{-rk`p-J^^^YH(Y9K`lKE7Mg-dL$Yr5LEWQ z*~dF!qX9vYy4qen%Y`ENIk+GoCF0cX5<6E`Q`(2;{r2JvS61kfyq)iKy#BBxQ>kpa z)F9he5ypSkTNK#{rv?nTDcrVSDLqG=P}oR+=6|xy0BP8fV;@D3hR~G%G13syi69Nv zP@f!8?M#2>&Li^JhzAW>dC{1Tne<4>?ga`GBm*7%bOgEE&6QeKjjH>>C-vlIiKq1) zgv?zRQ-H~h7`Fm}s2EC2sqXLfLHGYIyU*gYjLHft-q8=zjO81s1NxFYY0z3I|K z^{?y<@@DKUvpEE*vj%Jf`D)EiK1SAoqCcm-p)u-OSY`0k$ zhAkxqt-O@g#M|gl)%**hL&{LeV)7?3S$_KaT_%73KM3x6EV~k>6mw4Ke2ZgWI(RV? zO8|WYFinva)!W^$czYYch!J>ACC5+`o?e7ogU}I;54enuxY*oHuV_*Z-#O7@D1AR| zz#oUgiR=X%&do*)jAcpE{I~ZzTL0IpS&#Lc67Meqkn-Pj^{UY0Ft~B{-kCpX##I`` zrm9ie!A|D53M}FvoA5%_B@s(wr8WkrBZ2y#h1@ICcLs%llxOysnib)7O|KnC66WG>N>^j39s98GQ22Kbp!T#Q%(_Ddj3>;K(%Cmy87Uj@{ZeoUOm-~`+k+^e=YBI zVibdAUdk0~49vgFN2+a{xJF187%P0Q5oU)Mb zlO+apfRifyQG6>h^Ne=<=Uwm)%FMgom_7J(u~>htH5k2`P~kIJ2R}lySkP?he!mlj1U%`J=Pt??}f=6(EW(v zEwH?WWD|fSB8mg%xm}~3G@}>WgROavt;Fcb|5lVAqY2Ou#T4$-JpRYu!Wn}88wAl9 z0T7MeTT%YmBsxaGeK^_JR*8s-ZysyXo)F4)B#4yCpP7j9?~BL1l=ZK2_RgSop4Z6x zR54F0;IbjKKHV|ir_)OIbn5PCz>h=3u{R|p{Z@M(p859$+D58ZpVG7O;7yv1PcN$q zhP-*6TZ_(+ae;Hhv!77=qyzQ%8IxiH-p4bw$lN+8D#8)Vy~hhf+-C9WU(^FX7qfMshpE zVPddAo4}K!4!9rJyj91*VWUq4NTVTIDfQVXeqMJ^;B`F>cXU#)`QVuMBGnW?Ogp?U zN%zxzO1RDD6g;#L2WKmAVB@{raDrBPnrr$ImDuR2|5RAmxw$Vj_lL7)=w~HHub=>UvZCx1 z))E-i6p-2d!O!XzN+b+m#Db!GA$PaKqvM=BXtnW}o(S%w@Gd6f{i>e`w;Jik4Ioi7 z8ZkzH^kLIdwVQG1rhG)!`7Oa~K=0kTjA@K6bnzLt?QMc&|H0enXN*T}&+(UHb~oDC zLg=SF6_)c?Vvd0T@snlCJU8t#KaffFU9?D+(wx-Ppfxql`@Q*iCjk|s_x;Xw#dTii z4YZY8qSv(epeFta!=ZCPtDRu;snA+ev6Ji4C8i$}nKD?MQFTv(E`*g2sSy{5!wt}* ze0kxx?eOV5=DZwC(Sp3dZuV0gO4js-?;6@(BN;(kQxPDRU+7Y54-mMR6Aks7=QnnZ zsys!A>JLw8+8&Oa@C(d%eITdy{vIXv);h%dwKmJ;Q4B|lvgT4rj`idhSmc6D+)uS` z`|JD^QjbSAoQLB-{AJ|-V3^PUsiLDjqMvIfoyo3eK12Ff6-|FPiS!>Y0H~;6*R6bO zC7`Y#z0tcxeZxIgE#|789@!VT>NYr3S^o%l@Gc`cop1AFHDG!o9YoMEh1R2=KkZBk z;i6{wt#>}h~6 znYCi4rHzscKEsGABm0za@#JVBs8*F6@^bK10eKXMV1*P6@xOd#hwRnb<#?2+#n-+& zvE=sfHOqAeV<8JQv)Yg0c)kp7M&T*b5?u1;?#rKuVWBYJ8>YcRShRYBRC!G_I!J0kjS%aDA^NY6R0GTz3JXfZd?sL@1LW&aFhL=yzKkUV zP{E~+U{2ecG|8&#)JNOFf;qsx+E@oAGXa(ia07sPmI)IuBJRH3`4Li^nB+{!uYmgb zO-cNhPHP}BlJ}B2oo&`}jQmu2kJXot>5@(p|G5k+9cdQ4AXJPjJC2ccI!EkBd*nnt z51z2Syn9*HZBFfUED%X=-M>6m(V+_RNbx8V{>KkQh?@Q@{2Gj$mIvN>n132)%7}TM zN=#rH^28NA$>jtarPilz7k+0yfHUt?=v3Jdi6Me8Fgpw6+#j(_>w|eEjG}A5e>PZg zzMZ=EJu6CF?<}H6Ea2O=v2%Ic%z=7sYw}qfIMnzKo*w+-7L5Y!D?8 z?HitlZsn(oHQ!AN!0yI&U-j8_B##q0&f~O)j`HQ%4px=ebOqz{%HJ*b>gJ}%jnv7h z%4+43~HL&`lD|k$kEw7*uS|@60X{ip6{RK0PXl(x(p`QDxGolm6q?D&~ zcNBp(U3)UvkPp_#1~^AoFA|dO0@ZNeP@0*bwUM^Um9nF5URK`TU23@~q}bR$srOoV(6Q_s$6|3 zAv@)adCF&bhwtJmcY}ZNXC1jl8M@{3;k7nHCUt6Sf8VPoQEo4x_7at-+hb6utANb@ zQT3bLX=Ho>5PYQYQW6tqXLT|cl22PvXb{}GV*jJNR*WQ54m0JyE-T=j|I4yc9ZdEZ zW;9{VBocp^a~3z>B2#8f9Ld?DZx8ryl0-l3sZN0>pAfl`tkgO4zn-|me=6}6S#Z-u zTvlwJRql4_74YKT@=+0&bx1uNWwZOFl~=CyAh2U*(*cYW0(4XW3lYQqhtYo(F&Ykf zIRVbnbmQSWR$C5}AhyqdU)&2~`FBKob=1l%HhQcqPLN?Xk4xCsEL5k9{D|iujPp5> z7%%vw(?nr>Oaf4!YeWjKJmGlJZHl(~N%-3Lru*fZ79PwG=j|PBJZg-+KQ$X$HhZSy z!0nAzeWo$HuyecBf^hT&IHQ@6M5sewV(^7Jb94}~D+j_?0HLcYFh=tLe>U!9zUc?E zvhh7%*^bw~cBGOSD!Rq23W{K+=*K~nSZfxG`$G6m+N{f>GdtaD$ul+bC&7&6)6EAF zvm+I`0$+OzWZ_q12U=pD^)KwxzWe2BONmluhk=|jeQ&%Xr|tFE=&hMo{;i!Bk|CGW4`5~T_Z<}7blX7+jVA%VFcI2*71$g1sB_L%j_ zRR!u~MDm3ky{;N&>VtCs2tW1QPLxea%j>=?I~zkr))Q|Z41hEMeBr{8vpnK|_r?uO zL|G)I%?h&H_B%Y``tA$%U*1&7x3&D2b!?Qa6Pe6ybiZE2+TA-3&9r&2Q8uQZC!lpt0&3h7|;JK*JX z0G(gFdhbojJoyU3@dTAO zp65FWw`CeW+u_IEg5S=+0h4tW`X>Fc_eMc_U}nRqTOROA9E+$M_7dr+;MKP_5hArC z@iB^}TyjM8%sUfhrIC7ck&3P8u4#ucSEiiEo(vwmZ45NIb~K+|c!IFUp4*L+*xv4L z>Czu=P#XR505N^wv-H>h6JZ^mA#?~Z8WZ2Fgk#zVNEulo!yTY0wA<*4lAyAmt1i$L zu`<(2!#g#~8RHU~`+OtQMlgBZI(LG~1id>P2QikGDqj>eu5U(&x0oc@%&TX6k-g_ZSD z!VXGQ2YUF$#)P&Z=nqnFWGhH8tnQ#uv+zZf_q<+j+jBu^S7(ZmqC09a7Te6zu({Na zWB`3H02|HS?cQl}H%YNl736X9Q*`)C!E1<*D0l@d9vt_fb)YySV7ZY5mFg2R;;7@l zsLBZx5wgUQ#E-rw(VL?eEevH~f(v4>=zO>{qh8wDv$Hnyk-L@R8gr_f$#Az6rVJRS zD}i@)RNRbdcGo+^Y8rUf*<0hDQCE0)y26IL;cf%7<1Sd3=sLHj$`q zH_Z>>V`B2EW(SCs++d7tak$JMtWZ>-MioYIjNTqtIHuYzCk})xZT+)1W4(<&N)?h$ zvufg=RIz7>ecnp9^>l;=JB7Ivq*Xf!zmHwv5fQo1a}+ac38%^b`Jlc0YyZ2Ml|fzD zTgk_%WIX}@V>@?9B@LsGvW$km;u0HX-$pNKJL=;?s%;53n!nc%l}e@sronHsZ+3;$ z5lN&2R5jcQrcVC8&{siHm={E6Yckyt=L}Qs`@y~Y8t{U9JJwcGYx}y%XA!rNDzC(! zUc*u9fWPyCZ$M|zPPig6 zmj+%RxmZ#OZ&9!Uk#>9cGT-^5k^rVk);=R6vp>YWCvbA06uHOotG=W%0LN#UF0Od{ z!}bYl>sY77s~vN~zHModmPjgoCA0;j9J|DHxZw^K9w-~3EO}zi2?kN05`B07q@!QY zBQS-UT6C0zS5x1bZ14Z;opsha0=wRyfh30MKz|LjKUky2fvvtkJ`0?GVx#JFcl&N+ zu)I6~p9k;_#L^h4vG(u~F4CtPBxPvp{dzT%5BAt@;~ z(kKZ^O48}Rs;oH+ZctW@jJ)UvWfdK%fe2(At(_75^X-T%5~1vOF5Z1TO*{w5ZHoJk zL@@4>fzQbXA)v3#V7|Go<<9Cf;zO{~Q~o%D970`(Tp$YEsIEDTKdKFrANNYi-(2E9 zFJ5$yy@OIPX!uzkxg^x~?eB}R%?_kPKv`XM0`$3M7p_08It$D_OSHK^_@*xplJFej zwtMaD>>#ZZK$-sgByyC;6J`b#8P@5AT-jMd=RymQ0Z*XoqEA7$rta$Msvn#I#!4=D zVL30Yg-g^Q?%sh0 zb(O6vj9@_-He}7#A8g+80NtSVj}M7Yr*~w{&9m{If_vP|ROfYCEV+9Chu+u;Ilg{< z#YblohKstyqLwk5!enGrNE&@)>V1nVjHc``*ECCW<@d;{LB#8MaB#Ix0FH^GFTU&_gV_W79)08jID9DLAukRA-9skSFXl!2_CDk6rv$;P5$QKj(1>GndHI!& z4<9P$U5*Y9P%b^7r;te$7phLb2uDZw4HCijXRnA?>;?O(&BLvfrftyZr`ixTSCHlP@i# zn=!WcVG=!m_>}u`jLqi(G^?OuBg@nE_b*@G@lkE5*S@o?JJwDcdzSZRh7}#90{*G|mfLKB9V3sii@Oxv0arnWJiA1sJRcqCw=&OHW`R`L-Z2~v|DYJ;R; zMWtq?Ui$rRT0j7 z5Zhx=Mqg2hFNzIuD;h%RBL*9MvQ--`#c{|(|8m@;0IyFBJj@UQBi{xXxu_qz9?~0W z^xaKh_}lmJdog$T>2wZNY!YHFLq&+7*xDSlOW-Y}NDk|gQdubDq1r)GvS+YP-QQsyYSF4beYz=BbdxSWqBZThEcwqgl|Y1$14Z31 zjTV|kJ+6t}$A#T}CkIe=zW&0w`~@mzVVuv1*xk zY+18h#2ytvpAy7}*#H9N z%M0Zo(eolmYVCPO>6Ow(+ZT|zeqDBo+%)h0 z;wDD7?iH_Z%kaYYf@p$OKpVhI@z5WTL+z&cPVd3sw~&3 z4gqN``&*=lGT}(I9eI&az2WnnDAga$<4js5;*O`-;=ip}D zp&MX%ia+acG+1nK>vhCeho0}v4^pEtsC18w=&2X>HbWps3*NpoXHvi@+Wv+Ma2fwY{}G;K1)$0%PpBc;S#rt#>v?PmTM z*kl)n3m%_@@XUw`&%TFSejRtB^XsEbtEm6VP_Aa8YKf@$m9mS>?Cjwn*bZz6J5xe( zLoeIEvpx9Je{J90^{@A%vM!zD>EG>(HQfZiK*G!}&}w|l=ScT}Ou3RiYFjV~oS_q+ zV#f>Jo06vOHZ`7;I9!QxXsO!U=RAbULPI~zv4a&EO1Y4#`7o+`2VCZE9A?;Rc6aVe z6NDiWNM4`A{m0kcx|gZ-4G!dPm2E3i56Ae8)B7H#>Us>98Z)%;!u+gMy!*t&#K@qS zt}waemuBr3cA*s_y<=GCfmvqdFb!d@^V68sP%$Mog`g^LYpdDH`xC*Swz^elRQq}d zc3cheu7g<>pRl$H^qvL#w<|uT6ZKkSkV-d8P*WhY%K~zz=sx%$V|U;c4PezgxVx6q z1)yu7&NdjW;BHgWTknG&l-IhS;hsA%F#8ds4Exi~koXgk%YfYwFUn-*`?DGNPo z>0qMi4D&3)C+@Aj)#X_DfHn(m25p5Q)~R1Saw2F6fE0*8_`(m%^msF&Jy&003r4}i zdB2TkftQ=+u=GibcB3&stj^0{C20d6xIah>Nt+vBgA^DQhUp7KQ1~+8#}VUVDFO!Q)3>GoV2#sgBTr>77y@YXcxNf)pI9D zzMY!DB69nd;$w)9hFO|04Q5Ff5)o1Q#&X9mq8!>gm?k`do0(nhQF~d z^bveR>1}xWrI64yeH1FdHz59(AwV?yf|)|E#N3OvScE+{ZB103Mq_}JkyzC#_qRsB zD6*xA`ugoA-$1whN&0Xl0#1f<)pf)VueIR}kEtIc7HvU(e!yeL2H%r<_;uI2^WSO- zvP=SEhj@WL*mZ`IgV59rHwrP_ra#o$1YcLSQy&{MyV57a6xcIZ!jp7$x+cN9+o|na zv-{dUq@#r9?%neEY?jwg(qfhFBwPvjiCKC;4W1B_lW~D77AB)ZMm{OqAs#p!{FXgg z`!zSH*A3;ajW~m>*QBVut1BDQ-Fxf%;EB4SMB0z+Hd!wX`l(n1xo~7!vuywXtbm13 ztoAFP@cz!PYFj(k*t-AWR(st%@45bsOGb{2m_z4k46{lOq(RrbX2oAo`N;BtL&6Fv zzR&wPGo<5r1;q+tx^!!omdNMpk!X4OyUV^G2!z$UvwgB-$>fwpuMl2ON0kjyaXx1{ zE&pN#Sq@(XDuD%}$)WW5I|z6HmP3Yi*0bKhB=)Ifft>|UfmnL zHy%nu_y8<3tO3}Wih2jbl_p0jfFog2EN9}w9FWP8X}%OQFSdmwOjN9eyN6%>ajgKR zluBqDmvbNQpgA1?bur!M6e*8IFQDH{KcoSuMkkvT0+-RK5SLPL0e?rp>4_t>`05*4 zNBe6#MuzEk98Bg*r4++4Kf&=KV0j%h<9(tO;m;(GZiqlr`4mPxO*#!!Lqxnua@PR# z0GQ35=@ANK^Wd@6`0dmBZ(;W(Uwe~jG+TjW7sT-`n8I4qA69jj+tNtKz^#9Z=en*1 z!0yfm8$;neOg+NCI1(3h3L$Erf`h>@c~>84;v;QYZ-YBO1+y*eI$oPGn5Q}fO&Vre zpWnG|&x4z)F9eCBXbxhht>e`q_edp)H?-mcTOJ#s0c3z> z=}*w9%QRJ0Y5H#}wnD3nkduom7cuJ#ISj%km-Xs05fevCZSjDim?p^?hfXk3V?lwN z(=*4cv0Qh8gXO_|S!=*dI}W8VUU>P2v!kaCleFv0+v^tYG=#9^Hy`qEDPOM~p!WXZ5LHAxMQ1OVW5Imbve#31Ze~1oAHjtE5pYuuWFiQFq3|)X5$?uxe4uC?AGL7vc_Te9dX$ue3 zTKf?Q{saQXTx#(?gP$EfkXM2&as^L>9>?xP0U6M-kqe?aQ=%UxJqL%?%!HCtHTYa3= zhm+_Sc;Tr{r@;kB#C1L$G1zpr{E_j*9*_sW?L0>u@W{ypUP5@~z6!EWankcG9^La* zRTfm#q7EQO(UhrYOS+EdRP5UdtG z`lwf2Y;2hb`vvJaBhTYNG%2!K@^m4RR}CPQ%D*G>Qmz=qbh_IUOMmd9*6%!+wmQk8 z4Ez4J9{={`7h)0&m9X>vN^7{f+tf%Jf~10__7my-Zyg4=UTjz0(0J|QpHb0~43w$E z%lSjB;twsroYoS#JKTBy^SkqSvh{ELU6QPOk3?hH4)@)Bs@FBpNr+pHI z1;#gP0SnKW+aR6jBhuE#|CFi>GWIOXvaeh%OA-0 zE4Yy8M%F762U9R)fl^Sa^ZB7n4aXBpW0KFoQP0vY8>6jYY+wtFUG*XqJM-WGY%tWJ z_lbR=(DQs=ETAQCWMt&s-YQJ;lc^ulcftl_x>+@f=XnYM8!i5dc&cH5WymLXeryC! zvXV^IO0Q`twgXpM&3#Xo0e%&*HMNe=v)bq<E;L3&9f-tG;8hF^2fooPaLHgBBW}AcBq=c8RJiw3jK|F{OkH_kd1WDU zL0kp$4NR^7qkMyqN@gPg?PH@!Swl^?VU_L36U3UmqpOPun;M$!I`geugSMiZPe`mw z*$EARIRQ!I$MR<|ayK;Vo4KF5vQS{qc~ZJd$gE%5_fNLgxx?4CLzrc%~GN8CK9 zC|^Gw@0&u3TM02B6 z1>3n__Kl>@orRv@_5BJkU-WIGlRI?>t)HHS`ect>qmv+*AC*1k0jmqdUkOnv!#wdm zxL?;HD$}~m4cv&0p;Jx0_VSA>`SkbiAzlYQYcoB$OZ7*F!+tG!R}d54W-2#-AFb~X zBC}!V2h}CXP7NRerzF=klY!&3ByRHy3^$>4+Y`#KD!y(ZL+R;4SceLpvSM8nFOFU7 zBNHU%WWlXI;zRVgq(8wNugR((M~S(NPxiMkN{m*qU;AqBOv4oMC^tkJKV8)<8M6J58@x38&STxj#JE zUC$s;5mh^z^Sw<+x`j%Xgv-#D^?j_lP>@;7jAD78!B-4Sq|%zqa~93RImZ-vuJT0D;ddcfgw3EoAn=61T6|pe7HvIL$poY=W$WV$MGw z`^2mO;weaKU!p*A}lc~KZiyAJ!@^*P;VcZFr z6n(8n`F#}}4k;g~*7IO*k&HeXQlbfbPY*kAZ^ot+*7%OV9+3Opb`KB@e?aXy&&>wU z@$IOrRm+DmdGpq>GEI$=qe6NjqeUcR62&0WNk;NcCytmpMnH2lNx}{fCp$Ivg1PKe zxIok-2YjjZ56OA7N8Va?Sy0z87g9eUXR|+V8WB1^t~~G+g-wReC|B|$A}xT zC&yK?W&UxL1BRUMO9VMYnO9J>O_6T?U}}lJ^27KR;$0y(@ZVt_Vn(uLg}aX`-+poA zxVV5+dtR+n=gn`^{(u2?e17R4a3dE$3RtX9`jwmamBHri*C4jzyv0R0c!l!<$9vO0 z63=If&5vO~1G}MDCBLVcSyV=&WZ0BoeZho6&bYsYgRZ!;0asGE3`<9J}^WoP%j$s3Z;Vd#C4>r>-zj5a3ez&$Hw&C*=;Gp zRhy)JsjxC+xmzW+FN3P(O2hlg66pd1(d2f9Dn=Z5<+l9q*nt zlv(OJTrf(SyYWOrg}T?k#ykiQx13}U6lZMg>;$dd6(Nu7cFM1Q=D2<`BkpQnApb#2BJUr-JHYup&{L{wiSrAmlbqnUz5!dc2GFKkGXnHd^ORscF~?CU$prX2A#@#HuqiFn$+z#f+5H z6AYbIio9xH=F>(8W;RNxh+A3bje!r|w>2xQDLoeDMM=8dEw4M%-CC{||3B>L=d4%o8^lG|+tTra+}f zJq}}WjEy4b59d|l zc~5NK)l*o&-A~?qX{T(gebZ2~EOF-MAa`i}8KwP{SdYa0!}uz~hqz{!jBjq!oad{1 z@D;sIv`NPw_nqp;&$&8Y@Boa@pndwoiK5|vPb2t#4ho5ADrUum+dCWJ%*H@`(!+q` z9AZu!l11$}AWi)CwE+}IYB!85o~XuZmjjIJ0jIg}dKN`sbYOG0!l}plIp>s87RFYM z>BAhKx2ElXXHM|Amvc9RCF-bE;xHPS3{Hvb1M^{q$i(m934PLjL{Ne@1mpQYpa@gj6P#a zf>;-I z6vT8pC-W>RX(}QjLTkMWGCc*wcFL62V$bP8*DL82b))ZfXFe1x40u)*4e88wj9|#U zUH5d4n0;BF=1M`k?m4lSIvAV9%`YI}=_>Lw-Tx<;XI9R^ya$V4I`?0`|Gha$s+9!= zox<grO)q$AZ}TIBW?ASj3wp@@F`OI}4oBK>to1Yb)6Pqte;9f1G|FwT5y-!a&V zEAa~4wpx4pR{WQhr})(PuL~z>VG3oy!?@11b~-iU&I+3K+O_U1U@A+ri zg_k|O-{XkPUskxhWILQD*c~p@K{1$cdSmSUFO!VRN0axLG_?)l)@ziJ+dv`~>+dUs z>`kk-TF=10dsFd$vNxg&2K8a1)Q6`x?0qD%D~M#)D`wONE7jHK2GC+Aj>?JE>)p)` zm;D}xTnfa^H07diIwM8*rV=^*R?arh7sCWsUje zQvJT;D6|~s1H!aWTAR$5-g@h`B)lTy1T#bI`}+sSnX1xr(b&eB;w~^dMcaa z(T}{2d^~5!F`kwt?5(bB?2B~$a->r8fp%1kIt_<6kWIp=IwTt?pt?@_R*+|DGxz@O ztW@9ATXBJt;Lq5+(1Ax91C)iSbCV9(28u*V z0HrkU&?9R>mA%jw6Ecn;Jur}S$28?GgL*x)xe{L3Im{#L+L{>Q zBWErbK8a~68RvE8ydTL0FDi{+ab+>pADR2*c(1kXPomS}8=Rw-ZZ(t*Lhg@s**5fq zf2Ptt`m4`?48UcwA~ca!bM0pqU%{L*E!E|1Y3*4n`@nHKCOqor57T%Rp=rW9*j}}( zKsUTJ$jTa!$$I??aRLjWx#S9$_X?JcwRHx!>HA(85(Bdmo(UaRaOdD0ZIhbsKFDt=F0U{i@C>}~zNJ6YNH@!rGD3t_I846mm2h~EAj(Ipye z!qZ)~RMC}y5)oF9;|@(|^ud}>?x)ys)tZ(wUM!Y?KfTHrx{H?MtghP$PVd|&r#>U3e3BjpxXqJRVtxyR&=fyd;AQP~= zMV2ZfJFCgw>@ydt4zNqTa0-D_9j6o9GQ=#gbK};S3m~4iTE#^^C&dcDi{rJ!HAw}9 z3l<;Nf{sILygExq&x4%stif`L9TPRGBSrzE#9hQMUs(JQfcXj+j_q_w|DQ>Bira@( z#Mt(s^5y4ls*h5{wHLweMd(W6RR@Jg@bMgVnV2qQ42)3kXrOVp`oKuU!Z6^9 z;)nBmxwJXd<`^cXIB|<2q-sNW^RiEbL;qiX++=YlazGLZu(k zB_irF1bAUTeqr9AYW*HzTxFsiI;<=*hTsRbhL$Jf)|i+s*AofI2L z3Aw%GsyXk&qM5d%$yP-DLo22F!}qOvjt>L&REZK^Gz^&vlm0)4JKIpvjgy%A>mKhL z%d_-<^%ldbz^a3Cyq8 z0nNLB#ixrGw|I`7P{~q#PODG{^38@eV`PZpM-b!KL_A*n>E_Lkzzw62eiOy56&M}l zGnzTW{4d2A@NC@u-pNYXHEy%->4_ zfMzN*h<;`@dIbg`z;J_@(R%^r#GSD~Ij%q2tN)zY^IWzAo7(sY!sD<Q z1u}RNYocQTnH{REFT1^2<5)nm+KUkP?O4LcAHN{q3j_iFYPtjOzAb&&s@q+uzRlnt zjQmUYsl}o~GHb-p@rUjb?|eu^QU=~MmG;FlRF-D(l6ox>?99_(8?XGcjmyfOhvSNr zQRB)7MVzb=@+MUpV!urCldsTccia25FZt+Zy_gV%kv+&odv~UtpN_@y&tglPZOJsR*Q6e$F=j{{uU#=;5KStZ+ zGLRpZ`hW1l0A9o-G^h1>(cV8eVP4|ci4&j0grWK_fU=oqmzDMWP4jOAo6E)aZs-~Qnx{O!wT|OQ z733bo#mPwqhaQX*SV@EpfZvP z-g7wRYYNZJ@;Jgf(g@*1(4;=m5ZJv}6*wtfT^v}JuOp;Yb(n24c4Y*J%ugH`eH^fC zL}#v@#}cuv>gZ~g07q%|&KrPN`&><&1Khp=@Un&sMQn2EHj~$-t*R7IsJXhTaW6j3 z5m-T^%O+XYgk^MrDDq+Ouc8PbqK~t^4gJl?MdGs(Bxu}x309{Ak4#M%h-vnoifMo{ zikEFr_|qrL^P&Vu+7}mh<^9hao9^-R_eWvR&qVu#8PpojJ|&rj_kM+{PE@I z*)WG>9AqE7rW0@hr6p(Rn8^UZ#7Ia;*!8|%@oUtt0Bhnepafh8ua@Te-?M-}}NVEzqUi&)P<0 zPUb=n&TBs$0iU3nfE&;Q?$O@>y>2)EqdNq@(O$3c0RxgqMt@8a9vpytY+2c&d6FL(X zBXXRT{K>FG=40@;Nr>3G01E)CgGvoxl^7_w+6JNs&}!~=z8gh_D)(<5G2o38Mgwb+ zeQR})FQ*0QCHWg?UN_0oWAb|E$koJ4<}59uo~n2fbs%Ydt8O>6}365p2kjY!L{S~tu655 zBs0{h>(zS$2XXP{nA4yb9u*I}dcJ99zZ6i!Oau<2zB#-#8MktFbbDI4MiPZXo~Zb5@IE%$&L;gkA*7F*>CkoF?R#Fc}Yb(WR_UZ24Eip zV>cgszc789Ou9l-ODp>*Z*Vc}>?Pew_<_%!(6iW{vg=wE20ewUgNP{upR=?o@0$HO z<*n7&G^&Ec`-3!dd`|AntS)$;2Kr~I2vu)U1adJPF0HPXiFg3{4I$(FL|>)-(gz_x zd#W45%&@sK*D)4TZl*8gjyQ0dpC4lv+It6cH?0{ZA|zZ7>fYxwVKqABodZ@FEhQp- z^N^bd_qFAEuqe~i17^>EcE_H<>AuaJt6VEhgp#rR&IkW&3e`Bp%&KJdw?gfW$~IW8 z$w%2YtuO^p5z>yB`wzLgzwi1rqGX&hp{}xQ|HC2Tg6CJN@3c6UR#rhZw4hEn{P{uy^l8>a`p!g(>UXT4#`yH_6-#^QYL{cFedt> zC6pZ6GwvlYo_4SiI-sqs4fr&7q&`I}YkcS0ybG4!xO#T7J2^ApjDNC8-PseN5@ncE zS+_-W@(Ru$SI60bF+<=!=9z)jsX>ny2;39Xj_uHD?vC$w`l-Mh8%S0pEuUgKITzS> z+LhOk?HqA)_)SjdAt5LOvos@$kMyyxvD{N?6j~QT+_j$cp1M4l^zb zaMJ55H0)RYv0?P$Bar`NpX0oRd-LUCOM@K?sL2M8|Hxo1*x)#Auau#7!~z3sFtgAg zV{-TgRfT|h(eyGiJ>6?U_mnSC{e|@ice8QgBhZJ0l*yDu@QGP8RrHlUaEi|%m>iX`TgRsGWoU}*{6tx{skfK*vr90iim zeM+5wgQoT#@9q=^jK2;nJjuz=r|K2Shu&&LXN4J=S{Rr2Mv(6uO5`M1xftwSW2+1$ za||%Tiel)9VQWUjg>Rf3l@$c@B)Ns~B)Pn$36-n7b&)FPreLT(wf1Amh{Cj-CzAl7 z!RM@XyGMe?0AK(|RM!izg?q0YLvQ*G4}ab>b2#oI`jr9fpuEncTg!*=@C@>SXMd`J zM`x6Y_%1?N#rT_O!9Sp!W$?@vtngaWsh^%ts~1m@MJ)2V;Gl}T6(9@F<&P;Yk*nQwi&|$>XdltzshK*q)Pt=*#OJ|M}0}eBbwOu z)abQ)IE=nWHgUP~H(6DWM z3G92mqAlU}bqv%k!6RTROm=GO8(<*xuu#2SH}jI2QI09_N;AEC4}=R+oWQvlPf$>e zm0OL++JtzVZY7m~Em*PqE)Gp)UPIqQ#qSno+5-yah*KCVG}_@92M!C0-!eFq4;!p~ z99%;x--_ULpN27|?E7?Q+ha#sfPFV$f&AqWgJ6DBu#y@ZL2=wA3wv9BOIz-@9^q#< z-Z=dRvaE=Fi}Nd@JiI4-DN(o^xSv8>Rl&|Yf5?9NKa|j*AjB#Az*AaM(sk69l!-01 zD)Wq*bBh@1I_FtpS(LX8Tw=x#R{@@gZ~|$H4bu^pjY*#^F9$cU-V-u~GB-87H<8J* zp$}Ux1&$Um58PgZ5KHuXhk{CrbnybVz%4B{olof6lcpT5b21*1seC47a zxcLjZV&yHZ+hQ?#+$9lsg=vTNeG&y&ll1AKasHaAT)0w1@n^A9r<#|O1Mg|5%fhuu zOXJH|)5#0t<{8bkjJHQrkI2@jjXDQLQ5{* z5oDnqkvC}21{SmszOl;dqELG9klO6dH*Y{Zk?xQa=>~1c>%($u34&S*)khl$hbq1( z;qKZFqW4X7MBLs0j@TV?XMKP-f=PK|349yeGh!GvdA*bN4iB9P`71vL;UU<8gE1R+ zs|@J)DyDcsKE#3nlmJ+eQ)_63i05{gcnfu?Ja{MGm1mVh_Lcof{7E^ddF!dMVD5y! z{+GD!SqSaFK{tRqEiVh@IaH?5?g-k-V#h+x^Q+?N2@slY&$*5DB@SR#0G4#Y1Btja z(dQpy0NNKiCTSpQTg}+0K^Z15c-~f>Tn&-x=%Ybx_sbE% zafXve-#`p`_x_XTvm{D2A)S_S84E~9?qkK*EnAppE@8m^&eRRL(Kkpc%dIQw;bmKU6?NqL@fOQrV- zny7|In}X=<`-*QhVA9GY7`|Q3ay}hCHTe=OdR{qg%;=o}#bJLZdw?iZIOSzr7o8!= z>)UvD(05XfZ*<*3Q&B;p@OG_;O0V&j*eh+Fh4a30;P0UWJY7MF^L57~)lS7>fhHi> z%dv~9umJUQB~T*#^|u1c6 zr9ToMGRiQ7vX{Z%%w7+9oln6EUD65l9a|yj!W>x2KA5_~18Bn?x~Qsuns3~D3UmG{&3R;&Ycd`2rgk$^~%;BQeUT!-e4_u0+M`fkx47R7f^e(2J2+F*>>kzGN zrPu^8oadZ>530Hez`+}h*2-WKm=G*)Glwlx^5Dcz<)=H&4GZYLS)8G0)PVbTS%^l{ zWjCU4x64v!j3CDnMBKhoI|a$fqELhEw8}{5!2-!_Tl%Nnv}aSf&I6PUCv4glqqN)M zsF~)19Plh9nw)|D$?fQo@k+2x9ajp0E&~mZVzDxfQipL{XQ_NAO&%k z#1vbUU=6gnk0HmUCh`UFH+Iyk)2M1tIPVUd^a6n(x{+iw%Adp8SCFrp`Kt#Hr2*O~ z2(BbCK`$-5y`TKsbo&HGZfUHVC68AE!V_fgo^?clc2y(~505F3ujBYPRukbp{Ru;+ zIz|5kbz)7ffwJUNZt8hBp;1mTJrjp1LdaZVDZOEigXXQ=6l54Pyj2cn9T9VQq>5(< z&0{Y7Gy3?mvRDSAWUp(Azf!1AB*6Deva@L?hK3#kD@tWeO-Zt#@bJu)2(=|cU&O9C zkx2quL7CE8f0>R!I=!0ZI?Fa_i_;jj%H(x*lk7^BN^7(|0}8JI-XK)i*a26LnNLmz zH9o$7Oc5SJLb{CkHzVU$q#*>^t{k(tBO8veSh4Zw2h)7h#)fQVm($V#Wm{Z@aM2HL zH!oUh2+OxARKY*p!|4#T2cT?cyg0iC4M0c$Qpq`;I+Y(}4$an6vAp2pZAlqHf)i33 z`3^A|398c3!u=+$UXmzlr|-_Nt_{B=Yx|56no^G9jcI`sYV`e0OV{Y-dK6B-7FOsm z$m@eWf8mt(e4%N_y}SHLB2*oLz~${Gky(Zh-LK=iaPd56VvRZ9x(}=h?s&ni#H;7Y z;WRM($vQXxdA3^_uT!5i@;agusXx%{!&5AP7Xljo8?IOc&Mr^)KZ{{&TodrQQNw@V^=YaPL%jg#OAYr~ z?*Dl;FQ8IDdnP@}vrO4ck0_>LlM5fGjPZ^BfcnoP(kiG8?B`$iC_~{0KUH3NCnye8 z(bAx`29>w}AV*)C;}&hX$y6pGhw^d-3z+b%sM~SvSWdJ&(!}@Ic9}4Z)B?4m7&eUo zQj8Mtb;myBvAl(k$X{axPw(Cn(`Dh$5a=o?1f1<{kUG0p?2iuw{2U!y`<6?lKWBq> zNH?RW(z&WVh6OmifA|6c%@_Z9uK07@F%0l@G>o)gpB{@56gsz$-}rI9DRZdzU+(gSlH z+&p2L9OuqS5Yi$5gK4W^Rh0_e3lu!7nO~ClEgFty!k1K`2NeYpE9m5uj_S)5)|%?t z!k0_Z9PanWJ9DcLzX;c!i#-26jeqm|06u||l2UbHVJOhNXO)!n;VJQ44bpMad0#9W znH9;sRFDeNUvSdn^+R`Cu<_m^188#4M4f^VsHs3w=JHh<5~Q%l*A=fA z70CnF3yT_9z98pxy-n|U8+e*iSTXYK*rFxR7KM?nNJpHt#mL1Wv)L4JrJeaXEEspv zA}jPP+y4G{x3#scPDud}HU8NGmJ{;h6e|bL?s3MW-B8AV*Zs9 z7#mKatSL6X*~)V4)r|2@*xcCcH_?^AouM8zieb$-t&OmUSWz6GAr8oIbAUevJxAbz zrFj0l{CsiO~TDLlX!L_-4rdpMB0EbF6u}vN|%>t?~7}xLel93=>3|=g@xqn zx2fbA@N89aT_?a?0I-IfO3uG1VF%lr!2N$@k(ONflDgnZotFVTc!H1nM#+^v_O^cO z&34|UY?zc3Vp8&$Y$cQdGY`V=|HP~K&$9tRD6Y70;V=E?r~+0>k48~}`?cH8)|sBT zllx(2#)yN#du9Bird3|};Gpxoy@Zi#{j6)Yy(>k{XoN&VI@)s;qDoNSVeeH3fwbN? z^?nsh#lMC|f$ip7uept|!n}lICRk|w8Iv#do&$ZSdeA7fWmXD6XQzVMCttL(*J^fH zOA;b)nhybz&nvkRBOz9-Ygbbu;>y;AtUg^UM1x)VmtU4IyC%(fGk$pe=ZNFgL;2qt zap26Z?SuU9uVx6y6vFlIk(x;Lm*p$EDi&sHI4?6w^d9V3PrhfDLKXFu#^1_pQ0Ws2{_==V3;jfwW5<#TVxHL=ed0f$)O6x%C|^zv(*|BNy>??NLz_R#B2m z9+JRUlu)r(R0$zMee=4jRYq(79@mpM>Ry=t`pfRcS~=T#Jk{S49}UD@`t`QKc|;O< zEIvtNi2jPM7cSj}`o;@yrg46js+GLR3@wpX>24OKyH}E~Zs;A!e;aUgEgR@<2Zh-D z^wqwD_RER)$Br{OIkg)amsrku;5)SKouDH;nEDW;f5_WZvghAgcV|`RBoSw~0>^K+ zrYdLz7Ap=q=9+z2OJBUbQB%vbxnQ{q>YK5wALTa$T9~cNyT4!NqB~%GgSpu@YRSI; zIZ)EQZ3tywpo7^q;J1LU13s@z$aT7r8zU1Kwf}88SoDGu+xIyw`hn%ndl!`tSL0-R zxjS91i-Is&+O#@aO=@=2HNa)Kg#}FF6?2~iWtdFqT3a(W%2q@p=~~|(Q3O?YQsiSe zZ}WBL8g%okzE$Rs%&~ng=pX~E0-VpEZQnuo95ZH$>8U!eIO_?C!N^hLw_5d0D1n?k zxh~3^R4s@tx~)J!MVlAvfYvBm{YsLKF08eF0+?M!#@D*9dkRGAF@8bO`?k1*0$VU3 z7`Ww%1^oeZPmwd%rG7oH5De&<{>&cO*rM7C|NS)uAIv!=3%MXo_PDfWkRM&ttW7Yx zB(p8ocXtyC5H5}HKRcj!EZv-btnEIa? z`{e}o`r+4@Uu}Aa(FMAoKY;FVec>bKU$t46%zqg&Gd0-z@YxSNk0JE?J>c>IAQXA= zS|jbxVw9aH#njP}tFf#$VUqz@(-ZZ+J-X;jf@NR1Q)ize?pf2CJ&iS-|?W22g*{dG}v8F3{9B-Mzn>d3tD9i0zjAxV6WrUM?bRb zz1#ZafA^aC4($}dynnZm!S$JyRR$o%#Jo&(NpjmwG3(9H@DK~QGu2J+2CZT?1NQde z4{Bb~0pSqPw5Fv7@%Y_*a1T4Qdsgyzh66H=(l&R%hxVc?t46vS?yinJ=LUc)N|8K! zK&D0sO;i$1RFA3*H$010SOHJ3FY$~-SAqYH2#(6tWa82i^Hw_mcU+q>SesQGm8gjI zOIBfY1bk4NQbBfR?1UYS(UPRXl_pZQjB_*go+%9nHVv9>Fm+Utl0px>%OATBOO3Nz zbZ!n3eg`}o@G+1a42t8v}H|)H4htAvVnAsdlMoF zV*E3E^JnI62TU1d(|cF{G3@d*1?f2UJ50HRprj_wAKac|f7V|&?7~Gy(8^TW+R(2c zk#Iohiv}N2FqXyO8i`E&9F=@s|w0>-Sr9At&Rq`|8TKMFBsfP@m(8r z`4K7(8N#K4-Q&zgbc5QCBmj*Hy2FZwV-T;l@VyG zo_`YqS9UcGAvi9j-8OdiMeQKkSJ@T;nnOWp?=}f@%ar{v@iP6=O+UV9%c6GcCTD{lbNaMW4g|rK!8Vp zLe_(|2-mNyw6r@UqSGts;awL1$>Q-DSDFI`(#YLPMoTtq73b>@xnoU-bqumyf81Gm z;8@^b_2$Ou07*lrL`^=;Te#JhiI8O%{kT{!SPQoVj#@1E{;uQU00K^^@9a3}qi3~%Nt#T#dki>Ox8unE_R6<( znH^=Zdmq$=wLQk&_+0zLXG|ZB3-on&3*DxYPF80yj$4u1F9h{{79XknY;K9(od_zj z@FI@{&emJg{YZ@{#SkBOj!)Ri5*|c-ecF*^xfKCkC8WvS-WAZAp7Uj(>Ob~qzTWbR zMMKq}d>Ohdpl!yMY=%$D@9EdX?8RV?>Rs8 zVi!X7r=M&mIBElZ9)^fM;ZugZTwe0*b=5}f+6w-SIs!@-4VgU?wQjsM zVDtltSeB~7z0i(3oWL|k$==@H90h|CnnL%yK^1iw$WIOu!Pz_=z+1G+q%0)N5t!A_ zIqs9LV$dZ(eX}V#;zGRtIZme5{QDc%(A~Qz0_(Ty{4&Tdg}Mr z=uFA<1=a5bz%YkbQmGiwweL{MawG$3`?nlJPg?)?OqB~{ONrzG4vvmWVA5o6(m{C+ z)W)8?`3x7rBLHMuHp1I1B>tbu@^l}$Ft^ubanDfLj@^qAR!FQQ7F?s61GLt*Nf&?3 zwQ8mn^M2H9VvI8L)@_mSZpuCY8jY(B7~(%*Z6s6xUFy3q^!w-l=8^^DN5a*LJNA$= z9SF<10Leod+U$<&%d@dJp-OQysN)j2T6gB2lb6>oPy)KwOJ9TK*(bUsNzNRp?dx`5 z+et~%H_2rn+3FZ4sB-?qI+cLG9PH&&wT8MikFz6)$KDN?T1F9t@+CPrej_6z4Bd+ygHOU( zVzYU?Cqc))ivnKS0?cB|Otg7rms^4F+e8~UoWEf98;g2$HkYg2wY#$8_Q|XAb~m5B z?u+AUI-I=d2N4te$I`s;ELcj`WW`lnznceNtbJ1>H~ULpIyNZQ7DT%e7v58;>9JJK z=~m({GT&L2V!W**{xx^^WGr{)!I2N@h(?nWs9$`4%PBfwi&7$Hc~L`IjIfQ7Z7S@2P2_pUjHlt`bnza1?)fJ z%e229kBAhgc}M#CaI7sXL`42GN)nK|->;^d4yhF&`+x_M2HCDha$5!Rxr127J8}5}oD)=DJB8r*_LjRldc~1feL9J zPd*Pb-fM3Hae!A-lQOJM*7i3|* za_6{4BbF*17ezKb77X&=k9?7sRTxV{ttkiHHsX|HpE6^Mr@!dejmxeh6MIMe!l$2kVwg##FA8g7yLyxQ zXd)*2q!T5f+@W?W5vDlV#I@}5t!0drzq3b2vD1c!*SFEOzrQ~`@3C$|aXZIEQN)PH zp-PNj#DMD_7wwHO!u?OQC5{2Xw4*XVNa}$$$DpSXP;xrg?C}<3?KT`A0?t?6LMPuc5nK9@wP0(r>E{bU>(R}u*Weu1|fTktxv<>brS5rLg)|jp>x1Ij%}nqERNh$Xz3O_Sa2$fTSNk8 zXG0+HiztmP%3|x~+8V2{T}%s;Ejl$U>fEug1+GW#v4CV+W9Wd8h4ibWeyeG!C8!cr(PZ zI6qs;uFh+;Y?HCVMLI*U|p;sTkXq&VZtttYBhXZR7jb|aSi zP8-gu69Rkh9C%HPf84IiXpQvX_Ww}b&S+Yqwc5NC>YwB`CttKff}Gf<;wZ5qCFJ;$ zV@2W;twL>il7Z}uZZDl1_vreFk53U3TAB9s6_@)2%U5l8V#T#b5dHZrA$o`v7Z0*Wxy!i+qoC{i z95J~@r40mMWT%`{EpvcVOC^&496ZEL;%&SYY!Quy2QUsG=8w_9Dm*)VTo{ec%Ci2T7Q?lq!BFJXkdRODTtaxTrUEw~r4*2zFR z$B!AGCFWlOTAlJBhe>T@;!!>9^dVq5U4sCCRSW?p{%Id_h7{9`HQiJHHTwKduZQv{ zmw=yc#?I1UZ+UG5abst$AfPShY0j!xMit*r%g*-AkV~?PiV4y4uox}th(!YT-p4M> zh3~HSmd&*Y!@(J7Vb|@B*+yMpYyHM0;6$xbTlXbVc+aP>WTl~P#l^%srdZvsH8wMQ zx85;cMK$qgbGF0;_s51{-n)+Egm>-9tsZ%qO_{5$GcApRePy?qQdjw#yORg;mByd> z7wbHit4{=IO>m`l9P|EK_;k_03j1;Q8FP9(`b#UKrWGg%Qw3TJsC!!W4)FkhL_%Y8s&TIU`ZBiz-qE#b-o@;f;8m)9qa2SbS5TGDe z-$Fq&JdtHk>)s`yYBwU`aV+tEk<~%#zLj-+y|@*>?)D3}jkVnbUdJ9l1`fOp z+V;Nud?rHR^kM@dR${MPfOCFQ#FHTpFi2el76Oz4ngYG{-Jr(I)*tsu(bVzJAd)gwazUyM+Zs zN%Y0KP??;~?#VgN_lDt`koxb8YBCU%WGn)jh=_=QIAcRCL&hqKiB^=yBJGaopR3rQ zO+>FC4mE%G5H4}xCTOy$1;raD9Y!rx*{j=_Mkz�%-_>_JO;`TIryUq|Bz}w8-U3 z;PJYUU$qKHG;B1@&dv@b{ZRepNKA`NbXiT&Jwd8{!_}CG`AH$}20IV)`a9#f!@v(w zl)lII^Qpdo6lS?(s*g7-;#P_{6WE~&B8bZ6vnhEA+V5FT+6b9A1?JNr$w@c1>>%q; z))MxNJJsD5gEj=Z2$tnmVncnl&Xl15+w7mj#!BLV`_E+tK(iTqBW1O}jiI)JLIHr8 zv~#@p%*hCoyN_w-Uju67$JRH8@kw&?#os#RDp^2P@`6N{aap7AE#O6!`c<8_9*7wB z1Q+ypPc?WPPK-X>KycEQ^=RUe%Is2)Swkem+Bb8j;jI8Z4iemRZq9O3Mj(&wnZ0TV zJa)~V9UjXyc%j@+iUN}@)T)a&jC5O$5_DW1t@d<4REZtM51Y8facyD^sNBje{KRJK zUbp2kUXnV`Ey*QrYc!Q?Cf-?mADw7J1UOvt_V@L50rP#D;0%+)Kx@}}zpp%&v9$)h z5U&2%k+Qapy7WW(b;hM^5i}CB*>D0#OGf9r=>HXYzkm`w6pk05LEwp5^ffb`%{BR| zfvVW|Hy_r`7H_~zzeGo1LttyMun*!_BbFQp@do~(C_^_VA(JEFe3Ic!_B&jL3E(p4 zR|sWyFcIO&G2oJNY0!ylfZ-wbR|8jL; zpIBv6O7~E4L;zS3eZuYEu9}nyx)xOy7A$#A_S~6zk|d)Xs#*qCUY$@;USFZ3l1+ap zuJ!&-L5cgFo$}>d-3Rd*YsRuHF?4=gmRB&ysgu#}I0-Mlg-5{r>8oyE>bMtm6F4=# zcj5X_bfuL6bwy)?A$wZB&i2J^eLtB(g^~O1x59Ed%lVEL8y3uA*in9#J8t)8->b?B z1eqfe>C4TO=B9BRHhDmkaOH=^LlUJZVC_`Thr*R%MxB^aG#_Zx=Wy2HK<{KCOk+;| z?b$V=jjHEogB~aNB*3nq7M?rtawahHrQYuHaII_q#6)7@_~hBCqrNVVaN2f2n7x^N zp)rA*bK4Rv$GA%A_Q1YDR=Rsqc%?A?{z7W-2U?7~e4n~9=AH=viTv5Ruk~?r1G<3E z#Iy6a{$jO%5oNU7nf}I?iuuGi{u{r&?H93HV*hCDiu=#8I2Z4%-!p+c077$a+0@m6 z$Ko5TbB0u_Lr&aNXvM}k@IinLB43LGl|MYG=(!a>0TechKVlP~f4#naGH(|yWFXrvv7yPMi9OT9?M1MQGa$gS;tLR(N}imY z5R7her5CF4$@X$_b>G!ytcncrNvyHz64=(Pr_L%U85=UvoBqdp)r}R+W+Kae*+BWX z*T|Csyhc>dui#o8LC9-&_dAf*w78};W8@CW4QS-OQiJFG*7_k(4l_KfcL3@DV#leD>X3)Rb9$AnbFRT4dPg=zA|G$NAJRMwG{IC=bMyvi&aIq z`RY9OTb%GbH)orad2%c&#w_`B9w;B$((R_+@&QPP+`LE@ZAI#mMnA~V%MY#f+? zid$M*ZiCi}d**FL2QbMKdPm}*-c&aBs_qfQ?(H*8G1d3vs3EcIsbW?zF}unLb3nE9 z#po!p==)6)O%L*)c40EsuLrOx3)*ln3a)WWIiJM*+a7P)txmrYZMsFW?y$eWba|-I zp?+XM^{_kGT?7{w7i?(UfH_ncs$)5~(q?w0M^TB>8cb2c_|MJ}EAF{FU>j~}<@WCG zEf}os>6c5>KB=G=()R-?l>4yr>{xEv;E~`~Oy|&Sywhxi+5%V+n+mOigpe)j{>4wg z1_Gt=DW{(X4c%Af2p>8{+uL#3fDd04SP+!(E#IfM=XyQs@I$xVE3f@_ieN0f=#_~? zh4saHUkuFZpHE_oJ~P&pWVkh^G5(N2^Fu*FW_PN%7P?ZZL#ug$u!71*TFdzC(awA% zJ$73w%Z`qq)aF)B&=Mt1#4h6C)9h@EQ?@N@1gnHSn&{cVoS-Q%#Ai1@yN>5VKt7n= zI><~tUkx$h?z5eNWmqvfFQ=|9xv=!2?H@&?BB4qmCiXCYe(f8y_6=BOy6(}YdCLhL zq;Vwh^OX07DEP&cX zkDoX7=7~Ip@$KJ#33Gc6IPBM!-VXfZT?9XHlir(b2H?-*BHh=i^IWf)fpq}`P03@yo5T@|pJswm0m#_HL01P7L?9GX zw;^Tu$o-@THe~h=4w{_9oam38E-Tl7fsqV&T`s8vS!seLrcl-04}|uCToaE$gI+Kq z+6HFln)k1WfaNY>1qmd ze(SMikl3i0JiotL7{3Plj52)da}|x&>)}0SJXrMM(E}IEufMI4z`+Caa2hf{XI{E- zT_)!}$^33T@?o;o->1Vp5I>HDWQ`~2u)=xATq-=9{yhfzLtq8%iq5a0T~=Vl>Dv-3*0%5+z9p`(RBL`qzr59~Si7UhCwI=bN_#2Z3-@-W;>y9Gq5vRkm&}lN zX>8-JSvbmGKe#CNDhb6MZ-3vez<>19by4n1W*#1b-9ebum6efOow6W%Jy~MK9q6cF zAsr!p#PBr26h0#&zaEe(((OqhBEuC(>2H_qY9LI51~YsF$oCcti;L*6g{kO3-2Hn*&d!HLz!#*y z{+8;mv%jYWkbMOaoE@>;zYd|VU6}u1NzZnFpUr7$al$Dzm*L#l)7ZMCQd%u%t}M@7 z7)Dp*S8gF8@}OFVBWcI2vZC&&RX^vXPS=q5(G}&Kk8}pjAsFgUxlC6(mnUMbymokG z=ofk6jJQIy$&LwNfJlqm6rG)JKwN%h@qJ&U$L<3-Qe3c-jMUDm7%B3QVB6$duzdK{ zLI&x1sR`!U8LA3DlRPolnYepxdV65;7Jo)9EIo-W!rtm=WAH<1l*fumzOaImAPWqy zgok-P8u6mQwbDsNCN9DU%O$(N6jX`6K85zzCz$XlXqmOSvh93hcK&$V=6k#omDmwMX0`=j&C=7mnx2em(eyZ>i zID9!dUEnS9{4MUUdZJ6{7iL^Sb`l;LIU+7~PAW`{E|#Ur`CpI|7Jtl_?5 zD_r}6m#P^)%9k$&@eS`NlD#(~%m+*nVu?Y59j*n|{dsTZ%pv7OOAh#Rs?j`yo9UaK zCxU+ChqEOzqECiFNeBEf_(^>}$Pjr&1ab!#Fz3M{pH@?tA;!d(U+B0AzKC(&-~qt2 z6m0xOV}6c*LFVDcyEGrw*^_W4KU#6amBVM7owxUt3hcI~Tk?-@uwZ6BA2fS3VXJrz z9Q1ubqc1anFR|!NWcBe%TM^uG!s*@lq?)VlK!)dVAvIlH3mW)d0M8J(a^y@#cXv=6 zw_urgnPlctyC(Y^zT{mOdg)N<{1v70v3GgBz(bMl_2VwE0^-6S|C}pFfzyh0`}t(+ zWhd5e?ea&=PrhhmG4bn2hj^ZCWvuEyu_@cDt!yk2oo;Rm!Be7 zOACsbv=guT5i=GDl13^Ax8jjTYI%eKo96tp_qkXl$39_MtG2eTzJp_T78q`0-xIg} zVd@o+_xhE}JYi~4W2X(EvpU_iM{M;l;>?nF4}0h_2h9!d-9?XftdsgdAVF1(INcD zdjj2LQZ~UgS{!?qy>HQ0GZG25K~^jAJIld2w9Uh|F9smZU&LdZ@ky}%y2P}b400p| z6vr;$i6!>P`$h^nb9D$z+*Sr&d0K*n`oD$^VP}_7FleZnOWv8;<^OiDKEc?c%=BF(-~)k8!*e!BmM9W|i(x%E`+OQ0y+)kayn0#X8%j(Zf38B23tqS3V?Q!GNAL4&o zcliEhec5j3Ev8-Q4eNKnAj9;f`@(N4%Xxk?c7Rq70&k+S=pNeV+pSwy66{&^O52^* zA{>h&^3Of_5_Z$HE7<{GR_kvZ-3>AvTp{I1tmibsNsSu5X8A*ZsTIuJYSvH) zuKJTDD~blQdaG%I(y!-9^+@(CV&`!JA>&8eGul2Cejjf+!8bX3#OW$Dt?sDE%ZZtz z)hfpAFFNj`^!PN^r3?*}8#!sDx5qGNPUc-YM!ACK^Gc~9cBMq!2Z@ui*?CtN%Wsv; zKX6~07=Bs+k)|=u4McL>4aB&@*2y2mcgK6bVL@Xv}*-^ zr=3NV0ZoHV_9czdnj{x&Kby3#s|_1lkS{}Dp78)>U)dtChCJZi z3=2+erR`+{jP27E|JBe%GQgOVM}6S6gX9%6)9flA@?pNQ2OY`w|Mw8=#|Pv=8mJ4D zvOhQuacOe+j?qLDrn3{dA{9nYxN+zh?7;B~y_!F3K<$L^pa0UWh2r5Nu=44YufPoB(~F?QHrJVimCIHu>K}PM zi)!{VbL2XcTk8d-02Po;INRf=Q6B04BKgRrEEE~FO@oyJV0R)@$^VGA+8E(PE6=r| zVv_glphsH^`Hf-93hTY53!YpH*1SqhWL(vC7Zif zXA=|7Z8@jMGYHM5{T9?b&x{+D|swHif0)M z)+Q9pyV78624rdG;^TH`#5J3ILM92)>WsD%F0Cfm5oYE1sJxzJi$g!T}6|`b|AZ{8uwT_IWaT z$oM05m>!VV;&PV^=FGR1s%*%}muw;{W@L%12atOq&fG_`;6h+EN|N2P z=WzwaA8dzA{Vd%^H4F`F^{16{_|G(-t*&5ppfkpIl)Mc3L5D4+aid)=1GO;50xsZj zrHt7A<_hl&J76}vtIiUGWDfCQZU_2+|GT6GX^IJy_aVw_z1=@iUe~1niMTGU%PJZB zr*gY(iWmXIN`XtKKI_w3PC*b^5!v?k{;`U=;rMup9U zw?}HGHI4+7#I!S8J!7`+fP!zvToTuyl2a0AlPU%P>eDmQfPBU!uD275-}wkS(}Iee zS>Brhk=`F%Wf=$DJGX!tD3`=B635ZGL-%c(6ldAXy^vx!O9b~m_A7{4#h9`HDiIVy@)?R7R`Q&S zj|sQatqDsnSv25#S4o2jlr!XOwQmuX)6zvK{nu&b548D0UprNlRiAx0j5pP=R^mvc>QEtFZ2i_Fx!)jzbN@czVX_a@&R{tFKdIFIQJoc_4T-#rSTTH|H&`Iw zw??RJ6N7rkXeH;qtcaT;jADl#$9_bapi(r#u57P0*C8!oa%Y`N!>Fxp;gy1ml$w<3 z&{h+xpJR!_qzh4&ML{7XzNt!NqtngCLFjBvJLuRdxsh?!DAoxp5Xikv)Y+|WO|NS& zfTFGbrd?IVpr*!iEub%U;QjXe#}oI71DJ^Im2tbM!T6YBI!@yan&80%A2B-c!dynr z9S&x2*r_zM)Y-9`J#OD*>nXJh)v{M4=!^nutSMUY1$8a=Cgc0WYC@Cj@eFn;$*z?% zSBhKSTBt3sVWu11)kzjuB-}c#jACkc)>&+No}0_RrloAtWipn7R&)c(jKEI%f0r2{ zL80+aLE+8bzj6Ih3(l{2*ZyBc0h~FY%-!lPZW?-9bp)nCm14BGN9L`zPpilzXPpZkXCkYbpJ^jl z;@#94Dgjj)fqdwcxKw5I#^~*{Be14i0YNM9p%NT^Z4rGEF z>tNpG;MH>hI?wC>dY(6rI&Qm*)PPdsms>rGKvx@EK9SBPj-ZJkuOp9_zdlwbAEH zJCCqh3f)L2*<*(Bev9?*vq6@s7MRfK3U|k9M9I&V3=b%4M1Q-{tL@$phd8wY8}M+~ zq($K4)!oD4eU5LF3dasVDhQO-0)@<=!6ZF3Onlb2CEa{mp{#N3j<&GE-ORb=iUGp$ zW9H5qT=Gf0cr<(E7Nv*Y2f@7sdWZ8>VCm8U)v5sSWDP3#;b5JC2PJTbr5!n!H!yUt~={F8Q4N!0Qjt;1etJ)q{}+lhCNGR*HwV zWjn1vkf&idTW#m7tpXiwCwG}zZOB`A&L_k(gyLO?R7J2G1Awb9_-X=D{4OPs@tD=q z`ky)97Jr{hR%hV_e{rQ6X*d+fZbLdAKRE*g2BP3H-L#E*`Ky{G{St! zt3u4z;dqoqq)#qjj=4-Y`KvhT800c$QoFMoM;&F0rcWe*DooGyf2e!&c&OX&f4mfl zggZ=;tyP4I$i9Wrf*52CWgp7Ejx8jSSY?_b1eK#lA2il&MAXEA_Zyc52ly+A}OHQu-=$GOXI=0FzXwPq!KrbWv?sVqm zZ=H)Ls!)ikL*CskSN3_FT1oEhSZNAKx4ik`vtdC?A^!WJ9NYfqhOBsY$Jrt63+`6g z7)|E@4N*FLjya`pDDbNU7QYsDrt2M?c;#od)-sO`Bh%Y`-~A6#xkILqu}KWx2#b2g znt!Pof@|B3LYbn$CO$qUCxv#Wm@EG#5H#kw?r0&u!bCxqS%4#| z+<5(A@mhCQrk1jzA!H)>79YC>rbX6NoFClnlTO@XIiJ~C`VFdyCH44SduKXhF zWiFV5LO5moExT-L;u)xXd?Y2%FbBW>^q`)EI~gbRw7V^mdo5g>H|;*F>D^mdXC1U% z?QXLAHZa+nnUtUtYMjODO5+`0=x!W`sCv7EkQ+!XZrAJH4dmd$ET$|n-l@~oOnf5Y zJ+;qg6LD{0;hYg`QJy_DLP{$)8xKk+(inZ`9GRt0;T8Kz|CatTC9#nCMe}p=!$~>^t_mYj-sG zzx>RDUcc@fn!*tirGwuX?q)p+`I(Y^wO5`cIP=iQ9E=vV8;VCARzRm1NT&?-a%`n) zz$}S<5{%aFz19DzaIxlYSIPVXyyJ`Mj*_&(OC@ zndrwpc~bSv;qw`m61o>Z7wp%hN`~i+GDL~6@fe4ccpt`3gRlJb?;OXROEd*V);2I3 z7nF)NE$ml`gmK<{?M+IETl$_o{zJ)4fP+iePKy>kUuha zWZ)i{btY)oeCf;OJy6jj+@*{c4V@AY`T4L*4Hn`d0<47=n1`t@+?eN=_~Fp6*{fF< z*R&OKD=n<`Bvvk=B)swmAKk1nAG(xiXO!0vq-&nq?WPm<=ke8#2n_Fjs(LEr{e*=Wv7_;LYNW$T0)#kiw*^bzm z>ip=Sq@>of$d;uMUJH}LR(RL)ia=3vU}dTIe&xp>`1yw0Cp@mp2sho}17-YN*^5b_ z*ugy<9EqceR}a=p5nvQ3kpXKw+q(3>l$)RM#p4^jUw{!2B5eTRQ}SAv7xO&V z?CY2OdgL{q{0>Ey8`)nKp7P=Ew)OX41^UiMA{8~oxo(P8G)=?nvoS3(U_@+}S@@}{ zF)}gg{)8DEsK?>qpBQ?1biQnUQ~GOflNt5?L_ zuP35xku~O$5aCx6F&7W?x_+yMbybTooWt2Z8yE|Dbm= zlJz%)r0pcHPVV3E^R|T~HcKP1)lb1o7;`;)p+NT_?X}Cs(({SiKtUITFnXu0{g6Y=@GeQ>_ox0}MRx{pL3}HfO;M z%AS68a3=u4DXkhx08;$V1JDhiHR7~t=YCs!Qz9=$)yW{-?YjMnCq;%mArCgc}sWwmjau9F_2ScnWQo59J1C*)@5P4T@{(lqW15mc4*eK%W-koTrbKAKr z?V_3j0t)fr2l0t}-OYVT_-@e$UoTrec#G>TFi{s26zq`($gXP)3=GtAmT^4vZLTwY z3Wa*1l$oe?D(Qed($&+WQc_X^h^>|%67NMdCH9LrSvYi*P)0A$&Sn51PtfKSx5l5S zjQ|n_rd`!>;!c*sO+)GQgjlO;istp;3a1mm$Xyn>q*4Nw#Rf=S?RjQ)cXHPM%N`Ep zyGNFJE-Wm*`R@N)8jJiBQ6uFaV~hDIw*h;|Gf-3U^aMYx0@JmejDW~vg#{@kI?Rf& zlAaEJn4oeuhM>&+0(%wq^H{EU$K}- z6tVPxw>HU$;A9XEu5~th0#a`uoa6D^MMX>b;~pz5jq;SI?4XeKL+MUE6?gi$fZso& zIA3*SGS{m^+35OQl z+{+}tYd81<$xnSr4&7j3p2qz9FKDO(6g2sR9~nr6MOv~q(mL>J{rQr2YLDC@r4DEE%a* zk*Q!#T-Xw`<@ zId3|)w=mxv6Hha2E?HaR9x>(ZgnqUZ`=fkXCKKCY+Hf3-Ezz0ws?o}sAaay37W+Tv z8xe$Q$adyKLaBs}=*m8A>%gHcC!S^z!;RZ8n^SJ2;?2X?A zV;*eo^5!4#-2tg^$j4IF?4A(Xy~+cn_bfwmCU~fM^II+{V)Fx+VoOhDa4BgNcfH#f zCvuIsogwK7eTdO7e>JWP-eI81Mz;3BsyxS~r$2@|XvT}&Y%>`WFR$2J2ma`py_J)rw z`itHFW4tPZp}~^SbNw%m*G;omxe?bI|Jw$4`DgD^Xb)8>Olda5MFdo>TAzI{7-Fzx zv%CD)fEf}w&eu!-dZuD&)%zFsouPdYjf?#vlSv%fAMA>$==tPz-D9wV$%JcZSwELl zGd!z$5iwHkgZ#^i6f3OE48>dTIjxB?DA}#h{$r)cV(Y_r_`y4;Y1&y8^{u3h&cWFm z?>ByB+uDD5o?7svYbs6W)sgzb$Cy|ITbT`;73dTV$0aN%w&oU)n&gmw@duX{mmAoq zdKpZQjZcBwD$r0Dx97In{75X)d(po6FNUWBlLuq2{}*HHKX*@b^l)VX>*cHp){phd z+>hs0Uq*{MSQ%X%OKWr@o*ZJ6s&%*x7O>3lBkz_+ptx)kP3@`t)-&v6dAyzn|1UUOy`}~b&ja02p&FtM>7JQd&Kf&*dv$hx@;|@Iwg!e?3_v?2DJlN{7q1DzKN#@b zh0Rju4oFR&_&29!%YpvmqJg-q5B4k5BZL74JOhDN&HM;v{KCQQ|L_JFSQp`)-hZ(E zST^ur2?+@XV1Ot-KXo$BGTA!WXR%)loqxx&z_{l6&yDqFzukM9070=C(1C|6Fp8?c z#G*vQu0+_OK>Bb2@yq4+FEhE*C1aUQ^=-%fYTl~kbq6L5fc8ayPWzq)= z6}Kr)ERRLBT358n{T!@b8upsNtj>uDS-tAfw~YfD{9=|EMXt-ew?C~l1#Vk@+xgvY z8)EL?N5ci5SZ=$&fBZyR9t~x1)T}a57Pd-CN<%s_?V3BNrD`A``HhghKUC19qeGFnx{m)I*jp5y9xq-6z?twY;pTJS4C)|P`POi z9Bx|?unYq)u1mEB6ZCcJq#1VzOcxxL=DV8Zl-W*`G?rM*hUEci$JVOJ`|RIIwX|7C zM;V2_Nx07F5jR!|3F9#)!-<~woPLRJpPGR5Ty$xC|GgNa*JEjJEfTn+#E%>fu^BPO zlBe&I2c25ua`P)2TF5cAYjsWW7aIqG_`w7a*gj9dtmaR3=f?Q7IQXs_6n*&P+GR+b zlbjZF&mdsKPXI{;QH1YC#Z-IRq+fRgb|xJ#=??2}x2&QIr8X6=89-9CP^RE%@i9WUPb;o2NuCP{L4d&%e^qEIAA2DfVbk;ON7;yV6XmVtC z;JPTuFF*hcP<9IugF}}Mzixe2CF@mCF%iG$ZUR(+Zmdc8F2Cn)QsIEzg|9T4c*Z~( zj{_!`mlK$L91HAB*1_tA$sg;}CObxj`Nb=UmrFK ztpQT%p`Iz9#l69RqX{rt_M%qi_48nIvOnx}1p&|Yd*bNSXV&dZawEbr1P0vUM*>l% z{JxNDf+a6Xx=QYjjJBL7tt>u0o%{)&l~!BZc(y2%aK`m0j49oeYReP($p^@v<5N>^ zzu!v~x+`Hnc!^`U>*~gB%cSUgI!}mTT*vF7_QyiBfJCM_0nAzqjExsLjqr9{tRUZ? zR?PC81B?kX;g-9W%U{CzA8r6rmk7Xng{fblNCB+>NNaw`t&&&%HCV+|2NnA0;o*qi zUa5o+WRq+L(LParvXIGCY>$BhtjEOHhX}M>;sAOvmcp1_Blp&dNzIiX*O+B$qh15F zzdo8HJ`l@q)j%!{doEu{8gE7f_pfH~{&HTtwAG0k>3IR!zh)rCWBxVe#VVn8gvQ;i&vc&%kmNT=fkI+{z1;L-o&zdA zUM-Br_Xo3c+{6Ia*9=sBOp4yo349Z~{H=X?er$GWKUqkzzDO|@{EzSoFr%QUW@*@V zPkM+@rPa41A2B1}B|q^NGEOwbnUDCoNKfy3d-Xk#5waUv>bK-xJRYA3c-dCd?Z_Q~b!;xqVx%+?gL zUAno5^)jG!S+%aZ)Leh-%qkrx>4K5~Z*9lSx;}Xd&e@o>Z%S2zEZqhYg4N>}N^%s# zJj2Ru-Sx|48M_QLietsxfjVH*R2IU1p3QW*dy>AxDG5HPA5WP z62nmLlV%a{OXXnBOaeIbWnuB258H5pM&3Hqd<=V)o#M1pZmHns--%Dg_E4*>e`;sI zODI7+0bfqVOQA2!tqi4?PNx}P&p}GO7i8pbDMN z;@>6j|M5}h*j(tCj_uh+f}lb>_rO!HZeHYQVgMiA<$}VG>@YEXQ&#F;((!O=h~cYd zXBC!HNWx@yLab~87nuGG3rL-F$o1qW`ndfBr5>zJo&KX~SSZq}&$vzov-YDg(dqr< zAo={`{OLiUv((>Stnf0`$ZgOgGB_8+LYb#H1-g`6e2uI~4&KcB(nPHJOvK03ZpR^y zNVr}mabx*DFeNO1VLR@ZW!=Z6qU;K$pUf*jO}X7icXP10bb@Pa`=XAol1p=6Z^)B( z2s-iU)i^5`gCs((<>3)GN%Da#?hP*YQGZH`(a+jJbDM6bx3K(Znthm4Y5C!pAI_2a2tH^c%5Sn%(3olQ3Q$SJ-at*z5+CiTt^eH z`s$9$+dp_Sb79W;xtY=|Ryw~J=xQEG@v z%nZlEo^v&pRRmf(iF)tV*|KK)s#%NL&IfV_Ga0-q|8(=?qg1q-*8*~&rtdu&g7J`0 z)GlUD=hOoG-mZ)!xOJ9kQL~t>6YohhGFveT!}uA+MH02h~jldLHW{3I9Qqx#s9t(%I!J_{&9grN0DR@1q|YUV$3yrq-4sfti%ao0Zl zl(*K8zjxAU7_g^pUHk7r95fb20~y5z?25u>-|`k;c5%tcU)4UN8eR5-TG>UD2#CJL zP_RJu`w$?MWen`)XLqQxk8my^lvap(0hr@$5r}vrDqZ) zKS6Ju(~#>4(1mgr4pdSp}fqD_fX%TZcwooD8B&B=^P0L39Zm2#zXI@$Wo}bgce59#&|9tsuoRd z52@@t$iQ!wlKjDWINSE{NF$v~pzjJuPEF-;sKu#z1JH)=lJY~5Dn@x>OuA=h z9u(pfB*VL9$)-jfNbccmPb+ZUdkehOMZEMiSPrj=hBj+7ltpEzv!1@-1k51ghCnF; z%Lgc}=txEOuq_;@Da5BJJ?@~{AC_;tcVTHBOcAARAptSn0m5Nzm~J45(C`8RPt+rD zRjuEMbwQW(q%uSyV;^lr8Xk>EkfTX@{P?EhvyAFRT16#e-t@-MSAI_qc|6Yxj0Vm+ z11B9es(*1ttBFE8RGrd|Z=dt)LMK*=bt35Kz4Q4JqC$EvyOYtji(EXYBJ(LVd0S2QB(#QT+Mj#PDqp~p6J+tT-2P@PVq}e zPEtbb`)b?#3X8=6iw;LyQ|v*hnLAZ+2aL)QAVom;c~VBn4M9&{qrPppOY|;MSVB7k z?9SZlq7&50B?qDH5`bHHNU1i(1=_jnt)R)-Nts3Sb!ng1Ttya|TnuzL2hIr<1g@#d z-_p<~hY(EzMyBvtn`2^Rhhag!7#H*Qckkg&3sOd0Z^&Xue->W`ceEVHRkR>r&wP z_0h~>nwnnz*xXtOM!{5>)P}6BubfcgSbl+(6Pl8~f}5dJKTX|ATDp~D9dxx@HYj;t zjxGcBv|)kwRfTq46Q6FHAW|SdwX!Yjip`#tK_DSiD*i}uDM5_(+LTueqwF%zJP}A# z$T`T3HE1fOfM*azACu=7y3Fx#01O`7ZnFeYE|{cYQuXpe$lxSW_~61lf7utw_Z98k zi0vzCoGj4ZALLOKFBeNUq@DZ<*^tHVxj193W8?x&Gl`!{HTOo$T>n$P;t%o_Nvm&? z1Ik9mB*j)!>!xF zopW!F@3EC)kS&Vz!Zi3`Go1t_K$hKU_54kEB+pBP}U_Crv zBz$0Tt%iYGxpwWxHK)mx$Jho#x=)t}#N7z)59M+D2f>}{V@(0hAQv8*LclWD4`)vGTLe9NJb;ibxyABy$Y7`YqNUZO2WK{aVh0TbzTzemP0B@tlC3 z%LiuFSRf_WwE+v8E8|Dwee|OLAid{hC|1zU9$R|e5(uJ#k;66e*<935T_cz1^=c&g z%!KqLq;@$-bKE{jHwc_2ho3;L<2>0E3v`|P<~j4+3>?isB?P{-tV}2I_~95w$2(aH-p8IuwJy#QT#PdpQlKA^_00?4M$kOFyU*KK7Kk7 zAJp|R8kdCmBE))=@k2ecrzZ!k?6+z-{*_E1zCfA{b)|B+1Mk5{W84rf5A>sVbtmx! zn#e==U6_rAZsH%i+JC23Mo?0L$x5({Lw0Wmzzy!bG*Gc#d{-mPhq{*&pMm1+HXl`# z3(&p@rowB-;3~_-U|3Ou9B8x>>r1D=fN=cnv`CGt%0lfDk@fo>5Ef_&1bR=CW05d2 zf=^IB%bXYS$cB z$3y0(&4yo@zIPV4U@S=>5%m7^UxkIg;Nd^M+T+ z^TUE=gJ!v19|BV=Kc$7()2Z>Fxa|;FeJ?vdM)&qTRt`F-hBm?g%6biQkyHVF%gTT% zMTJBM8GeG6Eus#b=(Perex@!pD=Okcb{?p+&mLISx1%@G&~80JcZ^NslE^wkbPk6* z=cT9L2GT=_=JnwhQLY&TTkYGux;|G%6wc75GX(-$`8;fI6JL*pYveO27y3GkDQH3i zZq-Rr4~v7^Sy|5DF%4!aUr=?LbsGcusohGT-V?o9vU4TtcG0?AHl%*B%-lalSkW4soA?;-G`;!@88tW#?gYJ!^mvJ zgRUZZv@aJ3CvHMypux)yg^-d$@J_FX9kf@B_rM~Zzdg#DlGcz@lL5I`QSXb8m7A)B zd*o*-PEH~)CJPI?opO&qrLtS&Cc{k^j90<~8#d2%#Y3_s3nO$+L|4%V87pRwJ`io|T&lF(Dc(DPl zxTLDB049H9KS(fgCh}I3+JmTEsCBO_YavWahg=Of?DM=XG5QH%v&(3q3n6Ykli*_& z<4~QG@60e?K>f4~k_l}-!ww>l7&RN~AL#mEL;Oc?kRumGle$V=*F;D__tjkKX{F!K z0^jwq-x9WI}_dQ4{jp&rM2P))p zWzz(hD4!%ig%thVSWN(z>lU4#wpHDs3dm}zx`%SmOA2z58uttz-Stm*x^4nwt~ah5 zzb?dyEDgCK7~Uxdrjj+oHY^1AP?1J8jC2-fXx-oKKi&8B$b$$V1JHZ33X~&fN6fH1 zLS2RRz+I%A%$+7GG0JpJJ~jdRl)5c;?cXh1_O=FEA={xW-8f4huC|Dc1rKB-H`0U6 z2$~j!mB*z@npA*rd#F{pKJ9c$`VH0<^@aj-65M=JW;oQCS-K@j(|Qc# z?>)J?1!B1vQ23gm*0@1StvMk5lzDsi8EC=o`>WiAIPUFI^5a+g#zBz);#j;@{)|^w z-v2VBx`T1|SfDT;sDu=-Q_GRwS9u3E0YwBEuR<-zJgax2lv_UfYza_U#0pKhnCf?j z;;l90b%t=9;Qsft3e9cGf2H#Csf51BZUDG|>3|y12b@)^VoHSck005OwP*o|QHfDFS|GV)yYX269 z#+`ML)rs#rdS8AxI+rg%F%_h`_QM|29yCej_@gRcct$9x2I|ffD9Kv+%15@&O(Zy! zLi@Y9BTcP5+Ip$tBejy71$9jfy%MBZxX`fx96k5AjYu=?7`)ggr ziA_oKVIC1`e-390E{w^rTCRYS78jxpqdG-=3zl@)Lf`wLVhY*659FE-0>PE7dr){Z z_t#0M;^9@Bb)x(&j!8ppIsWbniw-0zHD3L#?RpZDA*vXjcuJ(EpqkWUFp=ogGEeiI zUZ!5VfP+qBx&|S(+#c(-wZ*%rW)FdeTMi%l!C?HmVbF)NtvWR`Qd^6rN@Gm4Z!2(x z9w~Ms46B$dtLEBUaTy)e_R}_!&j-ceMf&4QgW=jBaaABkW-4%FozDx(W(WIYkR3=9 zwqCC#kq6vdzZ+HZvvk0chDy=BGFs6*M#Z0X_m8)$a+mN*7$x5M#M)+=mYJD3gUcSw z`N%g)te4w3@8xeYG@qlN#9xexSH&m{u>-05R26?MqYu4|G)8ySAt$@*Ok^IhK{>CO zmRTvYA~N2U(_y)M7TOn&4WM%Kk>!WXs)7& znJ1I>DI?HCpAvt#bQaeQupqaMSHR^zn`V z9-rq$V_nZfgCSG??c?@Q2_ljJ}20);ff zIHh{dmX8LMUMmC(@Hsm{!AGw<(n3SKv2&lz`VSWhtc(0AS2(>pDDE8^q-6sc!nv$w z-NL8RR`YQp+qbe3fWIl$F(`#EU4vA1F_bAjxf%#hF9n574x>KHPplec*v>5VR?Z_H z>8PiHGz_m11q*R4q_Zjc2fA-egW?TmT-B@*^&#Qv?}|mBeKruXOy@oIILKbZO4C%^ z<&008!oG_3i@vL%?S(bo%LN*9UorgU$3%Q?Qj~MiDMA{j7|7E3zLo9;3Xn%G%z8s^ zRbN1#<^m=2O*(C&*DaHUb)erLjB3cGswQ(Our^e>&c>GyR3H4qW99?MjL0`tSVvht z-C$?g+2^fm#aaO?hk*=`8X>*+8A-MWh=b$Kw`^O?7( zL48N7sE*Cx@OnAlRFzrKUOIDqd$|DGlzlu& z@GFJD4^_1f8M6@3cLCtUi+tRBR%McMA2|D#a&Pdx;X_q!WkY5QlkM-LY~%SoF+lci zv2dbSy2!G%G{VJSlZgMSjRjdHYqzp!?<@Tj{sY!Vs5>YHh}Xhf#c7M}#w=bL+|g=^ zo!4h@lyZPDp3m+N_Ac}=1)cya8?x{{%N8Fo58rLlON=Le4G}Iy?C%zIv(kV#kqxsW zV;))c36PKaJaY1va*@$x43v$?FYZwD5T==ahgM|4S3-w!@OnBLLc_MIUDqLTtjQXz zybP7+^0@DV(L=SNEfxad{0DOA5p>L5{c=Rq!ct%u!_N8Z}2J!=4MrNrUUQ)kvd zX`vt%1aC?uj_%NR7nuS})HKZJF~T`K$M#@+zSCg_5cT?mhJ`A*kWSavc~Xqwhs_n> z%j%{I)Tqr{!=%_!TK69ppNUy1bQtCV22F49Pvf=boK<33d4>yY|~#h@pk|QdpjJQHM3kWyH&;0-Re| z(BQX3tP}p00%|*H6B9kBTark6p;E^LDO_fw+FAL z5eT2n7Va8Enxx`&&GCQ8Lo6?5d-;QEU;GHrGn)V-DPm@mWoMDuJ!!%X-A4ZWTHscA zyb5mQ-WH=+`6<(cW5=|#SVxnXKs#cyM+5^!p?lq9jC+em4nzwg^KqFS=MGrJ%T(3 zpcQkr0>zcJ48{KH4Snwv=!S=ZzGbFmYw|{%v^f#z?w)>5(VreLf5fK1;m&g;Xr~(c z3KY@~KyQuHAnu+g<~F*mH|RoYwfEN<1*r!w2^6NNw2QX22d zZi8dk0oGB+Jh!lUWv}*`E0FjIpn+O6>^Xf;Zh$X4n|{5L&l~8R#@i27jvKIa%20tt zvw-%#^<5j|ZeGD55DXV=)<97pwDOEjNdy>arkQqimGH zJ1o!mY=D^Y9bBM1jWP%T1<-jEC|1|msX?0(x@fSw-GopLByx)Rf}u8n!?U2B(P`oK zTVN#5Kh<>4=Z#&J+a+kBA-sp2z%vc(MSN8jO>ov$UI6HY5;;3<^=Ny5=G|NNr+kL_ zHzv)cPzA!`@~pu}{(hPM?Mb+I{J^WpwRw-!&eWwbVjXuqjsY!T?QL~rPGWulpp4~A zB0B#bKv+tq?y9R0as)T-T#LR}rU1Mmu~=_L*LmiW?)8fy_}(jSj8Orp`kep{W&v6N zLa#orl6p4bA^F9n@RMXXXUB4$AeH9J#(#T`GuUjn$bne5<(rrd&0GYXU%^UXRr0 zD9+uwCKz3GU1rqfoUm2@?RvDWpV`Ln1bwhg9EynqKpkDpjC&K0mjU;3E`_QI1dQ4o zGRFWYXr?(GTW7d)MU@9w?ZrulqIdR^^@jJNp))J(5EE}x0TcdW4!QaT7a2lnaZliV zdpV^tptiAOz}boFsBBZD@5g(C2~S1!CE>HwYAK6U81w?#}GiAc-2D@v5!*67SFBe#`$Y#Xp5AqjnI*meG{J-Me+k- zZW{X-MO><<(WIgCR7Q+zk%fbG;92FxaL=Qf)H=GK#iZUksUiZ?|E|nUGu;MTwJlcx|8R>ETUZ0Bg( z70#VS`t=oZ5kNTsrmC;aqL8?HFai7~1EI04Zt3p56wLm*Q0NYn`G5X`?cbaYqX5N$ zdqew>7tRk=x)d3eSKLXfV!cK=#>YcjxDlr%1wG|tP1YWG54TlDeSG}#OY)&wLD!9C zBI#1tz2%pUm$uwLAUN()(5$65->R$sn>VqcU?YDcOphyQs;~aHaAyjVw7Tw*SV+={ z+un@A10#+{w(%@HqYxf2xzP0N_F`{Gy#I-umWR=UYSC|+34q%a`?B-4g7xheRoUr4 z>;=nDbLVi_KmL~OA82$^YLeHkT?1&-J>WHo|Ltp1P~GvOIVI`{*}cj#%?OoGB4PL3 z#lkdq{PJ#KN4pQJ{0Cszu3`}GuM^oQ2zdAi^NWucyc_RrBYFM&O#G19BSD2bzxp4) zDxtv5fh=H|*u{&Qnq8e6oAjwimYs)pk_#r#WMTI+75xu3;RaDjdU`bL{`d$bO#)WF|;UEaJ<49hY$Ae|cQC;O4=p$&3FjJ{%kZ zKnJ>C^~)^22;TiuIol9iq5cI4_K&Mi;`x`=cVbEZ@3{lFG(`vJ{+0v>b`kRB{C}{E zGPGb90l)A!a_uPf|AkSUx?wZhnZ@{jbe4XbEHev9^#9%qaWT{Yy<34*eudN97+!2QhM2`$?EOw-&S2J8s2ae-sn=JjaBm#Lfm7N9lpT zI4Ctr!xEgY(0|<9XIM`|2guj5nziS_1*h=g=UkM0N%>)OyxZxwybUu(d{ug$D!jjbBsN#i4_gA)8ctBz$aRe$@dwj)&Y#K1O5Vp4B% z+&s^e3KP`gwv2*bp9cs{Sq4G{zfpe+=TievOxp-R(1u9Z}m?Kkqsie{GGMmhAFiJ!6DC@vGW{IbP$@JF#cL2y5GJvQQTL%(r9S`IxQWN)btmMzgc ze%Cub`%=)2 zZpP1h92KY4ZM{+?!RpO< zXOjfxqzN$rSt0Tz=aI6!bPuIzHFj>Jt-geYh+cU+nPwYzV#DPUW~!C*;iw!wPle3aR)6zCYCdoc{5T}{$Vjpo z0Q$%x@jkSnZN!I)XPl2>#HhR9=zo~29c|I z%ZuM`&aSC)R-7~b)bnwvtY%uu{GFXsYgffEG~75pLBj4XBca&)cqbO)J#D&WgW3`D zIjxLpHEU)WV*^?<#Lnc}8eB+_y{fnGKou|tdREKr^32YXBQJpna-`rFozFJv?-C+g zDL%Xd3Co$#6SRD5Ka{)}ZBu)D>pt_4`q#QVUJ5<)N4E1%wGr9yW83a|+u zzq0b)Cx6F`{ey+XeWY8Wv_sfgm%IqI%q{Bv_&=|SE*{2aW#I%#l-6EQ&^dR@$^M0UNH<?aEY9apHv=Q+H0q-Jdj#i=3Rf zuBWZEK;?5L9$T^W>EY$pPY;!28Xl@#ET6Ye${0Ra#{UJ`7^!Xsqt=s%CBDCR_vp}Z zMQMe0eTkR-jK3E)n_+j0x`Z4$?08(wUyWaXtKbJnsM+9FHnnrkke$?^Vah=S(BA}r zmuXm-8yXM=6P!3Pa|>Y>L7UI$at3q(N)rHA`dL2i=kqDgjp>_no^b!`y+ngfk~gfj zp-<}fbMUG75w%*>LnMr2A%9(CzLMXV;U1to0$93iSB`(X{pkXQja2DInCquf;njfq zQ+SbsppZEMCQ*r?(Zm2+EGk*XD;|&)d+bWObzoF!tujlAN95quL(2fp)wTczEv!ks zW83sFU14AiE3NUBeeLewd9cxwi52LH?*nooowDSX%k?o-Dg)=!3EmS6XwQVXA$XLL za>wF5%dZEw&X}KkGwq3_pCCqvG&hC+)oHPUoTXVbSup`jev@0uH4j}nY0&~gVi^ybZX+0bZ;tfdj{xpIXz_Yg!h^4Tz-Mb*Dh3X}-Iw9fJp7?p*4((oI_Jc(5th zj*bqLPj}#Vv!Tq5j-@u!C^KW-ZI+8gdx0nlViJ`0?JJ z_Lz-f?ahEh7JT=|?_w7Z;&;6AUezUZ3JleDlobp|a4-JVN2m#d^uNcvmxyAC?{^Rx z@g~N}t$kq}-ZcSOeN6xk`~nt7fnepmZs6~)p9`nT5Sqzh8UiHC!4qByly(S(0;-jD z^CE?OdmBJm9g_zJ2kolXdOjVGkiKwOyV>IpMjN$JK&?$!0eG1vcH2DDQh=YhIV4W4 zP2qB<$y1uH_KN^r6wr$R@7l7??|wRVvptNo0oNX}46Li6)s5()$sp+Fo3@W`6Otv* zYT${1w}flo@4ATbQ(Gzkv2^}@;Lwg7z;D;#PsaqJrHJVV9T#pRbyj5)Fd@zfG>--> z`T-lNIqI5Ff96vz0Gt*vefMx?Ia){m^9&$fQ{I{XIZ83-5ugDN<^fWzeN;^|grzx7 zVi9Dq2htC6zZB8P2`;4^8z?uJ!V6H>E_C+5tf{rmAE%r^`Td|m*W;=o3jb2J_pyd% zNVTtFW8`|C zLYDM?IXZTWT3M=tjngCFwK%Pn(X8%Nv6~Fw`k|+=CAN0f(i99WUJI;g<=Z;7sy6nK zWJm7t49q!@r{5|l&}#*YWVZBp<+p_+$EY%7<@iWHLRgFdYG$bulpNyzn;Y%!Mi5Z&xt#K>$X%ioX<;nG?9< znp*ITae!s5VtKJ_&@n`l&z=HdQRQ^e7Moe_GmnU)z+4t)`a3sCw19Siq>N8&0>g6d zf^L;eQM(F9|G1&skc-OO?u?iE4SdC@RrV~($kqwqW{Q^f9XNa^(6`2u^t!R=`0=0C z6!(hFpD76iw7<4V2_S(KaN8DLa?FVhuDgnmkZl4yT$rBDw8ouMlUlgN^7k*w)cZd0n^sDv)0#F z#Qpmn3L}>Y1~Df9nn_P~RDOY4UQYo|1~5h@z}UcZCh(PvcH7IzU}#b)OrR{RviTvp zS)^6kTov}}EsZh6P1@>I$vObsz;H|Jr=L@Z88_q4n?GXkX)qc{7; zEp?MsHAc^Dzn95R&^({*7v>D~g4M zZ)x~?)m&xcB*T45+pdJG%&FC>OmY|i`QWP>1x7Nuur=Ut`bU>3pvEl9WOCOy{bcM1 zaSD+{#q?y!dw{TrPC8i`KC(6;J^?7v=NM%cB4y_8Qb-E}xK9_mTkNCS5O&%{aDm^bHiUD zXe3;gJZbkvtCQurR-YZ9@W1NRhHCPjr(R?E5qgkXlRvd^)TTuaf=HPtqW|{IHKB?R zk9r(Wt$mk|<^g^DRlBxUadq#+09$|@E{YR_>3iQ=oVUqETa@-&B+QI?yRmd>?b3bU z3SSiAuz?ph}&NjX!ev`+LTL`vnH2RnR-?rjpd>S+fVgc;KP*88lT5 zXqnb5t)M(nOvA#yD~&U2bdxzDuGEwoXETKsC-QTRXqhDt={;A0!v?<> zeSnYI1b8D>QT*W)R>EA2*eK}apQlWdb~9K=E&*{7d(PPzjKZVeYT zw)tjQzEVMR6#+BwNV!O4?Y=6jqD%Dwc2PddD8*iV*Z+^bHxGnzf8WPXN(-q@nG{*t zHCZZK6r!}rZpfY_`zVZMY*AE%%0bAItj{p^eJdrhZ(|>n>|3($-}|ZPyg#S&`M&@8 z{q_6)bIz17^E}V%UatGPuG^~9;&$m-`4{%U!x^tVi~7+FO5O{H=C0~&jkMz@VYajV zrSl)ClW~F5wKl?M&(BvG@a*9m+jK>E7oEe?-f2wM)!RSQq6-}p=hU1;T!0Rjxr4oj zqf_gI%@>E{r)G~)FrsA6X#eNG*_y1CRg5TBcpE#Fj&-ViLH+`_EB2(;T|JITYX%vEocuk7bs}n^Dl#{-SX4-;kH@3gC074o0;#l9OVi~Y+ti);|=+F^aI&tzQoVpsnPGO=^t;Dq9iPW>kJ+W|IbGZKzmX< z{GfuBkePr_6^PZaCQ#Ve8I1wH63vmkvx&_%oZ+ifU*HK{*O5APiEFq66oBD-t8FW2*Sq9%DAHdSd=5N zw(`nt5Jmd-(jGp)5LFgJJbCOZsV2sih?oC*{gbAeNl=k9-69pFa@Mz%6W@8MNw_Lk;rAWl@q$kr;HmY6TZ<4SdWWMf#!v` z94I)|7LIsi452f(l7EWxv%$n_Syl>|J*CL_Y?Q)~3F|b3L_0xFflUR~Lt^FKNc~6* zBSXS@7lK=gzy;C>_Wfd0;WDK)Hid-EySOQRuEikCtn-~_v#3bO0-D7{VX!%sX|amN z?(sDje${ufz>x;P;@Fi*p8|n}j>$mk3(pUeg3?nXtpyAVpVW-Zbsh@3A6+GuOKpB% zye=Zn@7oURoaxn|?~9px@WT5Sva|Z?d!%VJX`1-_o%`QZ3)Vfz zNRfIsBiCsN^Ms~-*3G(t*W(bU!BbkV(om5er$f{ycmViz9hFUcn~OFv!V>bJJTv_I z=%9C!-{ukDn=6Dvob%AB8Dt7{ux4v`_aOKH7L=jkzFGo1~5)DWkBA< zB3|QfX^-HFzd|kL=#VmVm!Fe)aCh#(1%A-bv3N9;`A$>J?9Xx{#;*ryG(X9W`0^^B z0nL=9ezat)d$L|htFhm-M6WpQB%`9T%@^cr>>pu8rF5($Y$Q^9@qW~(-IdHME@iND zt?{Kr*|Cde@l%je@EuZ8>q8KS;|r4R{SzRnVx1YyzEXdNnK2(EAWvMIkzVAFv}zeY zC+IJA?H$;FdfQ^B3F_!`V0aQXY@11)!@riOMLkDU{HH{Zf^NB0W`v1dn>cWI0-Ofg zyj89fz_TEIhTvdCWkxH;2#U!_X@xW0g`xawe)^DT9fg8W(MmWTT9^4?kxtt7Sz4VH z#mye>npXwH&=Z{ebR-K9qBCtr$}35VI;ED96Mx-YkQ};s_9yC~{P5i3B1al!uF9lN zUDj~viFw$$Jgsl(diza2EvD@_-1}>0vWD)^?E+`FyB-fK8xk^du&O;JsKvom>}=d0 zhTth?hPzy14+S1GZKiVm9q8RMU*q(ZDHn0YC)w&OAs?}ef~b>vv$fqC?lU+8?Zs`S zII)7>;Oz;-HIeRyJEEA;5=(Qj-DzQ@7rOPdA?3>%Pxmu3vi|JbTF)jNTwqLiLj=E| zHYCDK<>}s6;W0E}>o;xV@BZLbUiFzKay+BD>S^}tq@elMEo@__O;ljvpLn|$05W&X2dh2#nRohPgwfx=v=M{G-SIY%;JPYqfsm{l5 zV>grN;*s}*ER2V6;mYUJK9k#;fZN)7BXcbKN_Pavwb?~v9nPyu)Ygbhc31$#sTEhU zo`mc{%CeDdq!~8aXFt?~PT)*1|5|EN`^Ryu?YlVEry*;lJ!+!~QP;0mXP~WVeD{ql zKf6Zxc=QfUF8!FlZ`IjXp^uI?sde?vGDHrT4A zD?#Wf0Dfg4bPdh2_0C9547nIA1od*qR9?vViBK3OQ46ZR>Z+I7^?M~02(QQ^LFNT> z6_D!Lv>dA(-7ZjYnaKT7u-p)?f6iveP9e;L(*8+KaFaMLu^}8wn$H9-Ss?%Y2-cRP zxb--kJi%loMtmtw=0x*|2`ZYdFaU=sz}@h<{x%aO<^+e>V0-lHeSU}XkWrJm;r^S5 z?d`D1cPQP`yAzenR>g53*hu${s;`uA$8aGjPxl|&jPxCq8M_RJ%B=eA#I@YVba!V4 zDdoA@bR$)uH&xU`;VeX?n|^?FTS#hUwSg9i+w z{o0uh5X5g>$!^a0nR4~ zJ|43bN2^3|PWU?DO3!2HE!_KJwm#p7K8~~XnPbjCbg*d>?yV$_k%8e=c>k57A-C=q zzUnJ3d5CCX9YH>*(ThB}(3+^|xTltz8TAR^8bshivc) zKm=x)*7;hG#6N|q-Tu5%xsa?9l{x(NE&YpK7iZ!pD{n2Vc1kYF;BqYE$47Cy>lB1N zO8-w(2#dnVL)j6rh$KpJMfxW4#UC%&~oYS=Ycp?u_Q5SdC$DO(==g zxa&6gdBXal{3NK$1fh4zOL&ko*Bg9Eh+?m^UjoNkA;h_X_{oha2=K#TkP^4NX#tZF zi|Jb0?e%2zQd(f7P;e3iMa^A!ch?A1gzDv*jQWbH>3IpbR@>}S9;s6;o)BM|^RXL@ zi)lB#XqE~Fg!=4EmPdh!X`F(*K|VpC6Y%o-G$=!kFf7<%X4UrGH9UydhNLZq3cA9bG`hqPO@wE1^qE)~nW@o23S2QFL&Lk$qx`P;^iYw_2&3I*o z&v}Vjw9D&vi@2InAUMUtC8#KokJ$+esYawnSsA6!edw z8Rg?O-_VBmyU!pB3O`{zBgn5&H4BgL?Q7qoH@E9*dlU}lbHb(>*586 zMdYINR^VA)5#-IEe4cQ@7Nk~oOufkMXf1`bgA0*}2iTt^P_^B<8J|x;42f;!QqpZI z*u72o%MR2%p|k#ecW+gQ30Mhez9#qKLR8EmY9g>{+t|)F0~S+gWOaPJp(w3o(*%uD%}|6VPs)BZ z+5vvl^)F*#&Nk~RW*eqrVSXr4rirX&HZwu@D30`P_UT2`R`#&QUU`>FTc*0!ZG_qc zj)>?u+Q%PRSDY-l zaCFmHbs%2}YMN<1-(UN>Qe-H;{&t7=uy1Q$d}>vSL?vMU4I8)i0fzgMz&UJ>HXs>F znd7G{?-$v?lbp<;ygY0m&ew*|O$h$Wasd#~7M{L`TlYTa>u@u`WU0K7$J4cAZAqxt zQmR&5-`Qi;_v>ZPRHBAM@gSUnh3hBHdaZ|)bHPT;rDL=YN`Tv_QA?Yi=15DTPN87= zxYuPmSL5Z#0Ra5wriWmTm$?p~V30eK!H4z%qrF>2mPY}|Ght3>Y*mB8Zv)Oy$>k=l z!>e!_CDoI&dzm%%;`|=Yz%*)jnrY`n1{@>geuGN13J4&8zORj^H!?ugd=@c9eucEZ@q~ zUdCL8e7rF1bOhJ(1fQEjR%!ai+@L>=dP@L=$=T6c!3ifch-0~^<24yfo? zp@A2bKxbsT27<3&5amMdOH~dVz?rP`mX%^vD6(z!wE3PkSu z5o1K#gl@y}1`y=Wv?2u}9heG=1GC6+&iMu7K&7PY>Dg#Hm63v7<|>fLfIsj)@&R8$%aF z7n~zu7XBub#{XM=H%xU}knmcMh#;L_;xc4zYQ5PW%pKUSweYcUsI0* zif-x4Rd#PozaM=(E9R^5Nv#?ZG0wFBFQ4nZPxw^KsW?fb^|sAR!xiNo#S%Z3$$hkR zo_nEcP?xQnbdac(oR-Gj!Ff%A$So~6K~LWAy!gT}pGn*^B$9*_SlFC5pp}{;o`%OE zr7dQLKHY~VhuLate=<#*(MG9s)BIPR;H=w1_;~S^KC21&lm#qp*b9?l(cP2#Hlleg z6rG1DnWQHEqgwdq8Iy%p!wiqdn;9FYd=hr^(mY*9HD32>VcZC>Upv=5aUn)(tfbFV z+`F1ZUA1nu<`kp45>q3ya-h855rNs>6_{&OiZ73E2hW#s8NrtK9X>1Q@hex}{1q_n zGIvG{4GvR38;(bCq81?g%cu*FJ{}u=VhBc+_*@s{hnevp1W$nE8oGcP+60IeykV{}Ple$;LDmM79d`e3DNtJ6%lXL+cFpua3 z1h15*e8zzwm#8PZ(1VR0+(I;4`OH*!;=3!@oLk2gh?sk?)PrFUJv9-96x`j@SLkMp zYlakE6x?{H=9Oj!fyn`SrI@03s9HDhl>XJ})3wQhhBX`uA}IKkFc@qtuRPS49M_bG zkboql`qbV_pfSnsp8aJ1D}}9ayUtNjvyt2*1`q#PU;NL5wc!+6`W?B}gp^q#lGhWV zef{$@y@z`*gO{A%Ub^zQHcQ+It5+R0^ZY)&lyH39nH(ovzp|es?IoO48kkVO!UdQ* zT$;WDgMgzT=B8o-Mp?PLxm)GA=Il`9@lusT`%;*s=kKiOQz9;J$sh7PW0D z$5*`oR9BvR`^ZvHsGvmFztDaouuJWiHf9y0`k$%&-Z?dt+CS^bP=Ps9nCSLHRz^?H zxrdoefXa= zuYZ1|akz(n+XcW@zecm6+l&%gqO-GG++81TV>R~Iw7AlS$;#n!h7Vc^?15pvhuhil z7rmG<*Xh=_%ml$DQn4IL9$EcV|9UmS_5$61^NZhhFnCR0vV|s4uNC!j&aj`GHAN2P zKn)b^CD>YOHPR{|7Px9yE59DDm`V7bEwuk5=Kto8E)nR>EA{wpkDrM9q4tvAEl*nu zldO8w!}_A`Gi}(#37uRZSl`r6j&+L9eZ2Nl*W`l0QUY~KWyy#CLh6!+Zp5jys;9O# zG+dBU*lzl-w5-qJZ{6AUG+GD%AEakrl!p7?VzmfG2loTfAoadF^j>wdz-oW1e z>B{CpxBE00Y#z5As_W!;jTi_{xv+L2|BaU z$tl4-|N0YteE~wt+M1E*oGl)^4?+pT@1@U}T3LaDE#ISVL+&3KsiCFKH&*_yH`Qex zHE;P97%esbm1pW+`|JGt>#OhUSsFE(CW_N6zDFX33PC>J)r2HJh8ENR?HfZkv&{V5 z%#!vmlG*=uGmAn_=yoB8@9*e$S{U0OTgX-~;6HK~`UUyzkiR&!svidbkU;%ztJ(Ka z`s#iEb=UsmRqQ6zp~NHN)6XR=zf}PJUvC_smlEh{gk5MqR^a@07n0JZOJnhlg;+xD)F&5&JSZ)4jBItcT?io z_GBlzI{O=i?TJ-RslFk%gjyI{Dp&G(S_XQAEG>=%+K2sy#=3^?Bd+HR<RHxf8l+Ku0X=iF2s)=`77VXR}P33$6ySWiXhBbQz?SZ)$3? z#b*3zo_p;V&iV-5TU0==^j} zhdd2VOxvSOt>|Ck+xGwzFn7Ae{p;EK^@SGh9n6f}oScuo^z~_IYlrrH`EudiU(5IO znv+TwC(agi@vqo6-^nxcOZ$FRM>|6^HLrwyH*)INtem3&qR%YPYVGmA zU6N9~OU?HJ?tmAwzyVI?zWJB~UjpHZ2=gEL^)Vix@>LhzQ>$nGen7q#aZd=slY&cn{y!{!}#D6k+oe%YRY5?b@uja+t z+=pYw0XQ!kqIb%K0)>v%szze;k?iSD8**Xu3bwWuS8#p~kg^egJ(B@3K+$v6xouAm z{uCm+#P6&LAOfLH)J9g6IGLo9pdPijh*HtPD$Ayg6|NQq!I9b*;GxD#3jXdcKc$6e z@5ysB8Yk<0B2Z2hJyfo7(-gETy&G(ygp@OGc zdlK_fqGf{p?b%^3piJq;UuX8$tCVvPCe-EScYLg_j)^dQ>G|?sFQfa%y~p8l>hZE} z`oS?ccM&}9Doe{hNc^@+@7f^wT`T1cu2oON4PUYx{t<|(UY*>s_WW>M;Ap!aG1Qol z$}C{~;HBe7-||$vuI?)wY`*%!%27k!lBQ-3;B6j zeVtTGKYl!B#D~{byC0!Hg`K*0eHD^QQ4f!osD<@Q>XktPyHet~FjBt4yd$Vg?*{3(rY#n5{b9z14iY-#~55uYcp7pJ~Vi zdpx}Fo%r(XJC#Ijvrgxgd`m-WPP%|$%^L=Bs6bCv9}haMU&d$;2(CO&9_8vvu{%UDaC$ejEC)z;WTGkrtc{9R4;=zVgrH{?tsO7RF$mV5iHk z8pUV(0l63#-UH0#9WQ%mqCoo)>cuWz(0tas2;Hv$Z-4qCHAXVXAk;3JrLlYoHNC@a zp=`*1-{eV1haS6{cJsE=U4N)zCSak;-TM10oKNOKNAUhyqcnlHiwI=)7I0T$4Ov0A zV&!Kw(c6!#0R8ulOIbh7%2s~!j`k1UtR{q^Mz6rAfKj>^5DW8byQg`>yClJXhAK^@ zrRlKkW>^`Hd?@-5WysL7F|vJaMTd-ZR!Bau`*_-OEPAe*TUQtWCHvYZ3QP>@WSY=T zXZup0IDdh0duA{`=cD+?GGgL;(+7Y43~jL~H1precncSI5|`oN1P<)2Fd^a_0<?1bMX=)u;ziW#JN^zS80Nf2PT^)t; za<%^8&#FPG)%36m9Lu-68oauHc1?wjJ* z;&1yO?z@Sa_@FSVEa)~=Mbq@l{dumLs8&*$Gq!K||6WsTCWqU*%7CHuKLBGtm9~V_NeF8qAVbXCcx~oN($4RKUz1X8Erf` zn4?uKjh0cy|FDDPb5_6nC9dJY@D2vx_Kv>?dCp{EdA??f$ zZ4uB8otW|VlUqyWYZ>hi7vy_wI{#Yxg7c&D@^VYRdwDlr&m7B0`qXvSr@7GK&fb-? zD|hZ>ciG-)Npke#7d2(+5P4gDD_}c0aU3f34(q^QqW>K_9I#Yt&^y(6WqGtXndSu| zyVsQNne5BcwYXR4{ub&)oNu3F0>XnixJFq5CI`_4C$xlRymC>s&yjwAfJAZa&lY{`(BtX;RF-a`y zZir7+d8e`f8b<|a_bLnUHYQQ!;d?>DFItT-8`x>U!U_E^AzxPJjrKti#9ODQckrN z6s0dm^9)mZ@`JE}q2T(p+_LqzrOZ>*P3J($t@FXqL5v~4DdSppGE(SgAg_0bz8j=X zZPJffiHAI^#DWeWBAL3uf64OQ;HgRY7uN6S9yB7j^1>h%mCPV?7h+slEY^%z3HNF0 zD(;k5Q(kyKWSueO;RlV2F0wd7#T$@e*}Zt41BFU=z1v1*VZP?PfLTaJCkZ_R>Eqoc zx$?*dOu<>8ZRe|<4AXui2q(Fv5EhZ#b?&RVo1m8ZK|Re-7b2!iMY7>7*d82B{zAd$ zPpK%m9d|y?&8Z%0UtGxUzSdW6`D-^L?uO#qD*`KgvB6yneJ}L9@8W8w&I;+5{q=%( zrN8_fW-n~FRH|;kz6h@d&C!DQ}ku(W@c!HvY{xxK)Vj3OJyhKl0`L}#BM zof4=bEOS4ubw)Cp?j;fPHOy(=NHuO{EPrXG5S^lscoO>t0bgHU+3NnQM*Z6>&iiOT z-5+%;61Xkk;q(eWJL1fB>J!Fuv6{;>USlZOckxF0A=NmQ*U>#`ZH>s(*-{HZli)9{ zT)%j+Pq`%&cuV)j2MrV!A)h13VwBG~`)|A~`&5b#z(%1?S}=}e4jBPLJzcT6dUdegS*sg`h|_lixYA)FJrDQ)A@8jG_~R#o6z0yD`u^|;+Ud& zB>_;8PqE5nt0kqT?0be-#N`MJkizP*{i z&2BvpRSQoO#W9-3Z8;j>iK*EKPyQR#V#dMjEwEr3^wAv_@UnWC5M&>!4uhJ17 zTj=2@&WQkRlN=ZvYyyt7f&>=kM6>wt{`PE38NWy?)12LO+xnSf6SNj)m>SABlRoDWpZ*k3hweq#elB2!xZl5i7!p$lt?ei9xdG;Rx?WR1-sU z+0u7@8 z!s`la*)NNZ_I};I`jh4DX!CAF7Mhfo*{Ag)J1WD(J2!6IXNYddnUuGIy<+v{Q&j7= zN|c+~Kbf>H|JF2i=MqrcVf*k6)qyxbJ*+<-4rqoZ+lbDSm~bE`G_gNCR#icHf5l$D z+qN<4!>Mo|5Do&+m2%!^wvw%P976 zk|5y>dHi9HBW!3z=f^@TP}+OD3kbYAa6dwPWigf16}JRb;Dl&$_q(9Nh<@16ESdW_ zjL*P3e_=Ft8dc^OSoPo@i%(SWjVw;QE1WI7@s>u_Pr0p;DOiQ9Ij~L2@5)r}J=M4T z!?V8=2*4=X2+n>DMUNK3%ua;Qr42{zTsWue|Jc=-2eZ(*Zf>*~)L zNhpk!pBp4L&^)=w56PQ=}UMJ#%QqmFU zUqODUk1T7eEA7CwO%=rQgKunoK>Aga<0|oYs>xu6?y8|&olGRZ{so!+4LFx)L(#(b zk9Qp$bbTJ)-E%4R75U9g0Uyw$rFFy?NX-4O zeTLuflC$cI@VJK5F(<}Jl4#QCw#M4meoI5U!UEV@5vXGma z<2<8U9$y+ByG^#*4TSGrD1tyZ#-4cY9^yI*bysv3+QHmF#i?f!qoe~a9$(qM=fs-~ zkU8G$(~y_%LOz&$SgrXqE^1ME)zeB*#SwOSld2|k80hlI=<1}tk6j{9ep{uxW?Wx^4=&G$-SL)N* z>~3C|RQjCurtk zmcc~U0*8BjRF(e(U`9%qcmS$IGKD&9--(|Y_*CZ_0YHyUNhE_w3*6=tx7kH{0SXjl z?#~B@@)2mpeALiA7k6D60pL@wWm>w~YZQ5Q=4!In$yL`YB<~F*Pc1`*9pi}R;9N8; zlN+@gg!VrTB~83&0X(Lwfa~?w9+cV$C4o)FDIn)Rp;tDJ!1`pEcZ%J1QoD$XoO8Qe z>D!UFNGM%A(eJ{$g1py*vGb?=T0xj#cxm6=2P}=^#19R2+ksCCk{WWqm3dcU;c`ld z_56bL6r7-R`;LY@XhEB4WdW?xhnB8@607h|9VYQpSYom(;aE;cum*I{IT+lg-9elf-!l_Oe=)5G&CuUz{pPOoRV?VUN* z{o(zFiPa9wqp+8t{{avdg?C|-`-?czRl?&q zreSW446AZ%-CV_IAzO{s_-&ezD3QaV}b{ ziP#TBnOE{ledcsqj_-({hMUz4n9IoMy0IW5qS_cHL3(W#v`905>RG67!{pYv&Xwh+ z$r(M-`$WIfOVvjQd9PJ`sFsTtsj&qxF!JrCtjm8cN5nR!fFU5IMsYrm~t z_a(^Ac@~kuN%dv4pZTPKQ}y!+jlpFrSL%#A0brAivc^Tjj-QzC_7TsaTj{V2^)>>9 zG+uQ1H##LCvBm$aIuBF#fJpIzknkv-hK_Q!We+Ktii^f_GRGeZs=s@QBsR;RnkSO{ zN0e8Bnvq-LZdzFbxyL+#xqWZ_+lHR@pI*NjLEn5}ER7ED|JGAZm7ah{{lrz|M66^e zofr~FFGsK=pNMFhEK=tUq|in;F)B7#r9%tT1{!sJzS|Xdx(D5m{# zfQV^X&dJe|Lom@uy5W*=J-J8*MZB&J*;WVQKZ~#Q+o@p2F-LGDr5JDfi=L{YOJTUH zOQ;dLAMVddAdaR1$uZmPSbW^ouplya%vj`z<6h}%;n{ zRRhIoIclo)^kabd))J!Tpb5+6sTNrBJ~R4g7B)ReyV_q1x4=W?kXEB42c6_03SNNvVB_iqVn(fG;Q$nai9*MEl`vlb z+mcuS(q%i7LtY?V?V#FMh$U8{PImQkzqafBZQlFq049`%8=g2c$og2EOMNulP=U6F zuw4iLtk^lHGWTl4`KlRf9J#uPY4nN8?u5pqILhgvQ42F4zqOmEy!n>8Jb$32%B+%B zm5@>0djKAu)gXv>C)n-G#_qCRdv|FdXb{(-rR3&!=)UZk`ZNVqRf=o_`|6*PT; zwmMPVDy9Wh($)IJ3>AtKt>!K>x|dk4AYb2ZQ{^Aud`vkPdaumTX_>Vsymrq42juYk z8N~F-vEhc-FI!Qc$e9*f+Faa@z#eGlfC&mSe=>7S$6pTM0`_{#R?Y_^i>Exf)Vglh z&^o?(sPGd?Z04kC*~dub4BwljM0*~LtJr5sF`Jvg+e&lhd#g842!-zlQO+Z4pL_EK zS}ep!>i)IB^7mG1^Q~u47XjTOSvC8BYs6#s3xr!xC6n`~4y6i^+OMdvJ0Lw)BN+Rl zo_%d6Eb}l0B8y`QUnt2zOyY`F+$pPmgdFl>Ky~byD4$HHnC0ed%?E(?8Z~vHZGLJ9 zx{hiaEidFaksycQB?YuFs=h|ZbH8hpxLX;ODXUTW9B#KfhJfq1mPUzJt2d?~e*K@h zetCKSt%E+NLjR)yw5QgyGRhn|gd99BC(r+_TPR~RZiu=i$2h{EbC-R}F=maKPRj_K zl*vnr*Eo~LBT^c-`PP2jHyE$_3YA9-kn^Y2Sx(-3fFr3?&QFh+pv!QqM_OCt#FuWt zn&AvIRMv~aK_3R|mC9(PKFz=1+5d=ktM#x4`UU}UIWRXj=N}MYAS^6A>W0Km-lUl5 zzI)sAnVVhnNZ^0)55A^C(>LO{EtA1>IZZ%Gd@GsRW^n~m`JCag@%^4H9%S}~w1lu( z?=$3fwk592v6`%s>m83V>0#g4jt>2-c+XMLeWn6r%PR)L;54IjBP}T@sre>yhzMh_ zHXt?|B4Sd%eLVl;L!?OkV230JWPxx~GQv)3B`$$DUhtqOLR(&We{D|5QXAlAEUE7Z ze6S+7n!1;-Qd}`K+)!6Rpq4=jCq+TF3s+l+jDG2~DsvaAST3T|$tpMFjj3!@acssM zYZGKyv-5tAfiZskb!#4R=VgcE^I9cPW!y%$4gnmk23*&8K(-s>lwyX@8dcH#KEwRF z83>BRMwIJ>dPFQgjkP&?c;{}M{N&ixh?R`?L+2w}H%WW|f*KJZx_ z^T`r~&7K}#W-=%Lrf5jIDP=tr$Z#E^RB^lxs z4%fVM3zluVG1}{I?eB&-D+$H;9$q3^PvGSceDf53jvx0ir zqVsC_&R6);lojRoPAF;P`(P1Lb_VEJW9Au~*?v9>x1PZp;mUd8qBIBEv;?!Z43%8# zkvA_Jp6~z*$1D~w<7yu7d76D^rW9yV>nTpr18$!RFKp1Urv9PcZ{8pP} zzdC{9NTtQj&!wf9guwJ<8ntslh0NeWWvwr+>HQnVPU+QEc*2WCD0*nlna3W~@!V{? zcTaUn(0TX^92%UytKXt?`O@;tC5@S{J)?IJ0aIgvs+=}w!(%Mk5O=v!Zn?B+dnw`5 zj4N9sP1{;)o*$NuS54H;a9Sd=oV87&f_CK?RgC3lL~iDI+LiVU(l;s9*~$g$tVQX5qX2L<{uV zpz_QOqbPLxAhkF3BqtFC9zj}&X4p)aUh`3&>`l&xkyokH@|=O{ z00Jv%_TH`7cj(8%g*oA>jK+9Z)Zr0wgB@Bcu#10abY6Vk=0n1&E05QI8~iJ$^TF=x zsdYFmFRtZ!?$Dnd*aH>ra>?VW!sg?8K&(79=l`)R=cse|xL zP#!PA9~(i7ck5A#amy;Gl?x211jr_81pLdbKdwjIj;=ZWyTu+(Vw7i5-D*d0O)tX@ zKhJEF{#87!A$9{hQOq7k;{lvPAV0IurEHK&Hx9YKqAy@GxGUd@#iPhq+FX=^NX7hR z77jI?(hJL3{aW%TOOerAl!q@>GwyGR$ITXnWav&a3Z<*xKcdn-Y)>P7!eg}@Q_h2v z?{Ttok3_9iNDDMu!$>yjI1(N3MZ3H)Jl4huH^s;L$pbO|X!ku--ikSgNZ!XJC}Be7 z^n+IKC&sZURebf_|H|v(S&%ubV7T|poq=U1<5Ed{yo5r+sGC&&)7WUU$#BEklcq10 z+%N@q0=AW;zr^Rq&##LizSf`TT7^+^=)$lMuCA32f*I$cH%Re<_n=0eZ88c^PXh|2 z25c=exM|kaJt%CnwHrlA6Z5BoP-+vod>FNM1broHWdKevS1M9EO2HCP7gc@91PfOB zb8Fm2Ra~gryfMQ}10C&}fx6Xmixe_MSgN}NKPA_$!`d}$&_AW$W;5Ckn1PlIzk+;- zi?*z@yB5U%5r(x}l+tTaf|Zq6P-D7@D#%&eB7z;@=v>=hiF(#S>u&~(rzF@--V6q7 zrcj^N_VAy;x1Y9Op6g41Mcd@^JLb{a#w5w;>3lp@#2!2WOu_js+IX_HP+MiFUD~*V zegxI4XstJI0Z2}K@#}rm$Pe!fP##n)M~A?26rH$t9Jwi$UI8pF6V$#_s~!?&*3hea z#xtW-J%26ee{y#ReV5pyTE^}YpG<&1TieVFN5lvT%1e_F7Vb`1`Sb@hkhR*>!XOMp z^U}_@f#K&(umVj-S6o3fc%53-%{c~KpCdVAs5l9h4c-cdZNs^@0m_L%cM8@z9u|38 zws3jHK5GymwUdv6&>xWMB*-Uf6TOuo6v!!Tme)UOx4rP&`{P`4oTs%F6ClM`$9xnk z@87!sDI-pFvS($DMXweHiD3us-A9)(J&ZkO(&PQqhzq9 zP+Xyt<5113`ynuTZ-i@dvz%`gz5H7wFBM$PQ-cKx&W~!#uP3rx8dKT#E zsb#z6PhA#o$Gg5)M8synJi??}DC{p(y>8Vbz1^@Ty=(^~#bq`(yeW&sC6dL-6=W(j zDTTR8ufsZ86w~Uho|F4>#h!O@O+Rqx$kSsIwdhR9D*oB!L&TdzZQNI-w^tH@_ghAL z2ICs9ou3n2cV>9hWy*&r0B1&Xdb`qZv2MA=k{}m95+bR&y-hI5_ky$It$;ikIzd%Cgi)k zi30F+=J0%pBX?3JcP;?t@n`gPI2A3U8GZF09a&>-h& zbo7ADiIFzTJXrHe6`l=f5{-yP6i_EOp>jLyKGIxHH1SX8CM}z6v$<-=dKn>t?n6Mw zq?#D)?Q~R1P$(QV;;W~(6ByGr0n`E>R%@QkC`qslByLQi?iV&G+OZF?wZJHbN*1;B z>UXvbB7b#hdY+%rLHyaxKOLYs8>Z6HSAQ#1e^M%%#XPT;>OCJ^7`lDxFg6|a|3Jm( z9TU0{P3;LU--zp02dU$9{6%_S=(Ug05Z!V2lyqJyDcw-k@m374@0`?+L@nk-`;1vd zE$#av-tN)Lpl`?7XP7`NL9Idw`Imt!*299pw0*mYZL6r;g{I(+P!N?e?lD%uC^^b@u z8uL8IPPwN9h;j|0B^_X0)(GyQD3Ip}p@}RK!S)S@xx8N@@z{nKrZ+@k#3SfCJL35q zm_O%oL}~9M4IGYVVx(jtPR8~`JLC^zql8GZX#Tyhj8`81$TVui5L|R0$(chLj~x0y zG?VeWhGWI;|IRODe4)&#(q|90OqaGF0Q6cyQ021}~>(ls|*^Xeg^M z#NJA;U>)u<4@xb`TV=5^BqV|$+ zM}AacE~Y!X=F+~2g$;@wA5gofbD(}A%*mi;q95GMhC=zKULpfTr+Pl5cTVF?s-!{#pK!eK4XE(HYlUpnchsEaShE*_eS37 z0A55^kL8-=8c9b@I~`BT6mFuT6WXgYg^S-jMWI-Y?sF1V9kTJOBkuc}QSzH9_tVE{ zgXQY#`baxRL1A*NT)o31;%)HjQ?TdI^le#0Q1lza@AImm1W)(i>*=@6_*)LFEa)k4 zTDeH|I_t3%EFCz)fy^oI^rKm=K(WE+1Zkz%iT(Rk4_}hFgzK6V${%jvcjNXXZh2Jo zjpik>ph>#nMpHN4G31oo3BZibxvuYUn%vo_Gonbcmr%(6LMZ^}o|qR2NU(W67it8l zl~pgUG?6As=p^${@^|lUouu(5ZXpM}C(aEC;CA9>h6g>gJ{8-_tShmb=vAv7lj12I z)Hz}rrrpnH`$T;PM!6Z_f6=0zVLIUYd_E-z%fhht)O(y{4g?j=drfUV*f1nwpvaA0 zD{BG<=N74p@IJo!`xVah4edictyfH$Hpsg*bVyOurWD&vba}_hKljtpP10!lNiFGA zpcLpc2H62ZoI#NNwjFjm4v{4Ec-W8b^Oo5s!t6rWSM?W&UR*rVt6QO!UkD2`b7U-DN!avo#kj=)&Vu` zxAIG5i=r(@vs&~%JV{0iae}}K(6frqnh+-%SneZl?F-k#8pQHRT46`E(eV4*JVq~8 zx6_3fv(Mjb2SUfrkdoDUaIN&*%<w&)bA$z;seS zrScr$x}fk15vFj^nKCUl5Y;0Sh&IDB&8aE#Z*GTF>PE!?2nprqi-Lj*+Yn-HzAYHQObku=@w16#jh6UjqE469b)0DO}aK4sQ9@ zi8z0Hdw&Iryrb|HKjBWuD=%(DWiA2jwnQ{+Nc!^pspm~)#LE1s{A%0kASm9*DKRv! z<_~D(_31fgU)!%BY5=FU9B(;)AfVtv6fZ;E(FdqGRhyT6VA@+@&@Htm1qd17rVWOFkO^fp>hYUcxD!1Gtky>NtfK%AgY7Bp#$@26M9nJYI< zE*qa+HcqlXS>#C=A+~tV4vijPqpP86W~+3#A*{G8`55GO;v))ruXPOmah)z zW?*Rpm;o$YF4?`CT$uGo&Z-vx=UDnwRq#*J>?tjdXEhwvG2?~GktSiomVPTvpY_&8cXeauc05vVtS5HH>-|gjiUehLcN%QGz<=EQsORLPARge0VgA&gB_Y0$02a zl|Vp;XjvHZUPG&TMPw9!={E59rXHXpAh|@`R@@$|H}}>>d%XIFJ6vl!0y$35O*j9J zac6;}88i+V(3gvJeEyr>ZY^)8D57c*WwoPqNxJ8;9?O<3O;ih4sm26?-s26@(zzOC zu;wD2Ss>v^1>OI`D0&L z>2C(MQ#GgqZXj0?GVjEs^vKr06WwgoO*~4ai9A@(P08sT2^3bUoXcK8D|m|9nhBJk35Z>pxBWXYzYSw zRn&K+rOgd&^WeFj^v1fw&cG&q@M0R^gZSnZA115Is`6=EGtazvxil3TO@_n`ev(N` zH@ehG-ai|*?C94mdfZr6Bup-SDajxZDwJx+TI_XhvML_@BT@+3{mcW$Pk($ZWC~YI9kZ59!+gIm2uAUdL zLYEl|3!i}%eB4Inb3&htQgJ0ppLcV>Ng09TIyvb1bl;eWQq%P&5nn7AJ^Nz$jwQu= zL?N2Yrf^IAd&KHay1N}OvoO2gSMif*-NGTSX!x|ReiD}__hn9bMzuNhiIWbrT1T&m zZZ$luJrQbdKQn3u9fYmbv*%+m*&<4EU=^{tqg7hcRsn*Pu_|f5^Qadj z^w1|g2LhD}yZg)K=f?oRu+quOfgRW~)QS=lkoG(r%0H(=SMLlEfu$}kB-5$zx3x4D z6>kpCpZH0RBrm#mE|)Zl6GBMX!TflhY8iz_j|k*Qko~YTmqBVqPmAc#8eW5wMRVPG z_pNuSe?jIMu?GLC63z0Y@K`JX`RrZvq%TZoQTh_g8BPQ_N9TeyoP~P!dbvNhUKJ?< z%1#%u@p+=rzOx(0?&UEJfU7+T?dfEuZRE)J@LvySWRh2us2wCEy67)BF{E*GEk9%ForqGx*8%fQ4z*UnF ze~a$+`RgsP=8HzGwmUOF?`l0z>L{~UkXyb8XkDKG?W2gI|c;{@qDCNB?Yau9>mT;6ETCsY~EDQJ-*s6$0W$q z8JkRmKR215XC^|32>X~w?M*;d`>-!u$go{ z*m{mG9nsxG$qm%D2Kz2kmJY0Q$&So+ZD4n$&;%_X>FHKnTbk6xp5h!3z+O^8i&F;i zrr?vG=Rw&DG_SG zfum;_ih8qkx<_mG|A?~!FhqLDg(&AXGjbGdJM1M=hcr+iy|Bv|W)aU7_D{D&yiPkU zeH(KvzcSP$vfQH@tjj(Y=~Q=m?if5Yw>N9aD#SexmmKdvxviM{PRd*|1PqB5E&Pd`f4;F9cKZflM*wA^ER2EncqM#vbJ>Xj&htX5aX91-Pxwu6eapc z)a?CLCi=65@VF2W)&#R$q7gK|jA=ScU7UNHu}U-HQ@on_i%-%Ke}423TY#xV6GGMD zDqpvn0Nygv>%%i*i@?+%`fC1&*6t;OxR)-p5@8x=?3!g*3dQl28=>bk(zS;4b|Bhn ziPRZLo74*|ci!ih*Wbyv9re@|V>2hPB>??NiT_Ko#iKlk?K+VW4+6K^ZMcn~b%s_q zH~xOiQg4DBb%)W)K9;C!NH<}33{h~S=vtaCOwpk7k?N__D_vNOq|RT^SZ*zW0I$wg zWCeJ^+A*RoB^A9R(0;G;MeP(}ytBl?T4)0VLixRjrwd2uw#OCZ-rIUGX$|Foj3dh6 z5fL(RqIhP9TKXuM{T!r;5n#Y!=k>VHfMvV}pFcv?QRi41qCtrjuzltLNM+IWt-9N` z<;R?xbi15wYf`?X5S|il-OH?Sa>#Try4!jeX{yTTR*Eami58&Y%BnGNoJ#k) zED6hENrreoLf}6sQEJ%ctC)8U!%%vnY%oYpzL%E`oebvVb@G;!sj^@Ltwz4`adQ%aS)2+Q`z|q&WO@+w@gIfR7q7YWj99PGlnN6ZTJ`gdSOxqfrbD z(cbU@fIywRUCC9>sE=e4QpV#Xy02VSD?eTec}_(`pLWbY>0Dztj&y8exG$XoN&~b! z>kO$qxS*Q#`0S&~(|-b?L+gfA&m%x$5s z*i^@zGYk5W(HQL=Nk!2ecBP98bGD$w&&5xeI`OK@jJ?C6APjr;E>LvxkTzfJcjXzL zz(ndSQc{9!a-`nWxRDL8puA?)d{o;6D7CPu(8@rjPhh`8>)Qo@;8%|%nuExGqp$_4 zq3XaSFhhok3DS1GT}q)`$kv{x9LJ(n6gs5ibiAB4bR%y)*SnJXHj!|eHI0xMKHylG zKAuTnW8a09Qeo%ZtJSrRBgKth1?Qqit**YvEI>l8mLCLp4c=tj@P`trCV%g`&Dte&ib>B%lL8#0+&GR?eUFL z>6S$%PV6HM+qG*ge4l}GJ6iTWf%{R)7@4HjQptP+xV~+}K@X>1*C7n5_!DY3mp1Fdx?Rs^4TF-_` zo59fIZ487@)@`0@8!EkV?Dq(k0}vTCjcSorvCgE3oAN41C~aiNamRpH;7Gx(lKK&{ z1D{7=dy^QSM`LS7A+55~^CKm*25fH}p&5%$N0JPUlV)CfP+>D{9lMt! z>hlg{)cuTkBeMk*zdFdnoEh7aTK33LyI^m``u;3Kc}l(?Y0R zrTbC+VD225d<1WW8VTF?e%-HwpG#p5B|+)J?1PCVRjdwW2xodqSNhxfUy2dcbYM%~ zWf~a)h$WyZH08sOowxuMDGJRHqrX&4ey6R?2xz_cZ0ASg?7FJrhySR2B90};WdOC* zRwk2F7{Qn);Y=mUZVNIXJps1MdP$13s^LonJ=u#u5=6#V(L!?-V(yD!a)=IfY*)Jn z37HNi1!NL5M;f9PbgL)m;(vyd;%z7ZW6kvJ8)$y7-Jg2pwE?6!5Bb-+NBp)>$OHP5 z)_>lH2N6~T&F0_q;!-!XubV=!A>btm22+z;N+{##!(`Etc_ypPt(XDoxzci+@mF|zUd@D|jI{c5J*M0FJ zq<;%$qJJlm!&LMA;|2pAos$=iyAxC^p%=?^31YtD@i@ZNJ?J9Au#4@UJbEdXgNUHG z2DQge;H4%Z&w2@zGfgN8P9p4FsF{Qs1*iSeA&0An|oO zkO=rEfy5h&G_AZ^q{9wCs8^Ry_ovfr?B}A74ty`pmLK?j>MH8z(`H{4Z=@_NEJRcn zQo%4%#h9)r!l=}h^{dDK;SK#cEjySN_AV`m_GOUJ)L`kK!l3c?nW;YN?rBR(s|3%Gtp(5aRI* zYd3X&8j5PYonBJNF)U@dTFG|`ttEX{{dZ0MU;mYA6NLB_t`w(WD^%{hGWUSXm`(CI z+*!#zb&@|f`wA1ucze2>(1JU{hTQQM{ZG#BAAn=~m3ZhOgw}vUA*uzO+}w>o9KvJy zH+ELNyk4_1;t=%4baVFQ*L&S;rrX`^pR3&`%YaRg>l3`ca#u+|G7XU zbb2v0ihPUThkID_dyLz^UNLcoXTU3?($ytlh5pvY2btDC7VPbwdXGN~7726Hm>SRj z{om?=?{xOHw*2v%)O2BYvO0izmbS;>&(-^vudo*ZF%-Vr|Nb)MEU7RER}Ga;BXr@W z&yP*P0uhSFDQ_RH((%lnX8%7K-G6&^MPOS@hgku~Z`SG!VmfOKl)aS@kKaA`SmxI= zh^qmgC*(CMnn^Y1W>lLh&u|EMIT3>%_MDg;m4?)e9(`GbT z&z>Iv(f`I0l3GCFHfEpx?2m}~0r^vspO+?97F1szmwqG07>FGGH>^N1&CjafWmi{G z%Kx>eggyGhf;+n`@#CUIT{JlI%YqyNnl4Skon3$9K|$LpBS-St_W8N@`}~|n&X3vO zJ3k=5c*6A4%acFeaPscR|E8(-AMJI*#LCQZhJW;PPeCrMlx#0@_0$WA83Vl)qJ;(G zJdqwPX!kxJXCyQ%iOg~X2V%b$;cS2ym5TwLV#?YkFlfwy!O{+0IjPDoq*Ll{*OLIF z&j_iPv;lhunjNxu$eH3?CPt=#fCRz|#t??|K4|Z}N|vr){(1Prvzy@B@?ivqdD^?n z_sXFiFq{(_9iixF2sBK^d!>)_V$F>>w2x^FCpxJwXH04K7KfIO-Slu397R5<4d(+_ z35jbETpN;OYy^25?K8)dS|F96K-mfE-VZ?*PTl<$j+d%b1p-XB#J-44%R7nA&^s}U2(9{S zzxzB*vAQL=;ikhZu;KPd3m`hav6sEmx2+mOZjy>|i5aZZPzu#IKE>rMP5X?fmhb)z zw!f~V{NsEeei>Q)*B3cfY#JzL*+4`gnBIj6m`Ip1V^Yw=u<*`&3U%?0ePh^4JvoAV91BfGY;ZZ=!L z*eVeCwjtUC^Uh?qij`(QEpE}8!Oze`@|zE%Z*JD}dG$rd@VGw49Us0aU!l3S{C++~ z-}CyfB<0$`58Ri=_4+>v6)7>E%V33L_YOp$17uEu1syAX2h{Awuvn8-^E@N?dY%a# zDGJ?@V>w23K4gZ?e6d?;2xpwl@@cFI^7o{!v}P*vyaUQ@<+%cCSK!{V8DD5|t2ER) zRC*(W&FOcCADbTH0v+ZCy$Q^NLqnR-Bk?630q;3`E$63Z7vADh_ zZ{xxljQUA40q{l^dJTv%EjQ(^@Vn%9@xs?DAPo@% zE#(WH*+cJ1c*l-?bWYs@e_7-$l9x|$%PtPb;ot-Tj`}} zMdI)&LGsS$*ZpLA*h1yz_?SZ@eQx{W@IKmEKiRcVrr%b8M0&d6wlc$kCjHk3dfPMo z?MJ0*h&HODFXSzHHg54Mt0+l_o>r-C$H@#e8Rl2Ck~%dG31+4q|MW?L*s9L)Bffwj zBOVa|{i*E7@?Remd(s`cpSqy5UuRQ^a52Gj6@My1s`|)urOnG((1X}*7_5*6Hm*I( z;8H5|&14M?D0oXTCT3is;<$PZUf2@0Lir~)JF=`wLX4A zqz?%dtF6Or_w0<3bo%uu(3>izdoxB5)`g7D-epiX$N0k+MoVXCmCJ|P$SqKXvw%Tb zcf*^cIA0jG*_3U@KHCg;Xyq()Spn^MGQ&fEs3mZFD=6AoLnSMR=C_e(i@YwJ1h z#!dOe4d>ZzzF(p*dkuqs1mBeTm zY6C=zZy?QSbF%b{G?)g<^WR>2!hBdo_2$x4aYU!>CZaxchr&`vOer@SV8pWblYlwU zCyJeBFQCq3wJ8Efxf{HYvU$Fw8%!O#(*nD73$SIQETevfnM~^2u6MK`SG54r)Tr-xj_3@8dlk#SVzw7BRmY}s;MO>?Qd1wETtpePvT__#kEbTrvK!3KOv{SPi0F=hjN zmb`arf7*sq?ZTmO*eMSk_^$o6iUad}i9fy_!xOPIUvazf%&#CRKmnpNL`k{4o{41W zZ3iwo7$OzRz$^lDZ0uLyqvN9U+3q=BFGGM}LhKu_1+oZ`3AtW~jEh%GQC5h<1OK^* zfJ6NL1`O~@R1&~aO@bK1@q?Q$#^aC@Q7S_uh~Q{jZxK>Nsid+5L5ut6=uc;s3oX+X;NNp`wm_FU!Js;)4F}h0oFH`5qqU9p7{mDCZV~P zdmNyXx0LLSBOfvzJpoI}c?*7O!l8<4eZd1?WG`$#bs{>QEt`~Uf?R4Ha3JFZHPAkw4cZbEL`sAa6sT{c@SE&0CwJmKa%8Qi!3w<%9 zwIHV`s#T#Zp-dv}If-Z_#;K58q;%bH$C+{FD?-oDe=$i9va!gx?Fw`0uN{b70Jd8V z^vj-5n=DePQO|aT6P_SyUZ5RwRAc4Svlv|LC{(3yLj8P3Frrz>%M^PTTa_VZ$oe38 zo+BkxU`Dfj@$HieAUHTVU=t1q_OfI2dA;q9)2Ho@9FI+2%H-;|jai%4+`8)ph2g3+ z9)rpP3?ja%G1gxG{2?XU3C61FTvhAsNyTv(#iyH*=v7MK2u{)hYSVz-SIU4L_-p;mxzZ1UE@w1Vrd zm1^7H-sEF$aV`5kdF*$`ShfuK71@sYui@u70#Ierv*!S!m`&#;*=Zd&A_Xcq^(j*m zi@a)-jL5vGD{m1^3t*n!nTKkf?-)+|%_S0sHZ=^dUieI862n&z;M8raxu@&S{*v&8 zz$OF%UfR{mYTZ{#?0ebMTSyvLe}x%Q7@^w)RbIlG7P1T-!SK6q*;1F=CeAV7`xS=rzx} zGf%{gKq8#R$@cFbXEs_O1iQ@|?OCxmIBU2(!jjpcg zGv~>Z->nhew)H{{TatTmmM50ww~hsyis=hBV2yuIQ4(K8a6~oKfhulRV(K#WZ&9Gk zXy@y9a^{sdFBsTdBrM4@X?kl*q8}MYxXz`WU!UJ=rw#J68a8)rsmpD~R61N<70q(V zwbWU9?Q8XrXf@2&0GVRaw}ePSR0&t-&HATbB}1?47CMdSw!glq*)hL#d|e(i7_SY1 zC(%is19$6Z$=n7tFbkvz)k04UfTpSE9&#=!m4jp;v|vixBe92ea8u}z$d%oPsu>aE zOC9`w)A;^aqw@M6P|DX!)lmeF0QoDeWJf?vaQ29@5LKZRm2u-|7RIF3j+|{jG^LCipIabuGtd|PdT}YK z5$dX&-pLL{`Zk=XJMCKsTN6YM0ACzAaRS>&-2XiT`ZF-M-$_Gqb6Xws0iq5{UjL8k zpt{{x3b>l6@*AZ(I;_Cd@~#CMytLc_z>K3;|m@8@{A3;6v8L)$fgsoA+?ULDtDRK!xlCj%HwsiCj$!-wZW8>FZVA}Oe*p{H31F}NM z2zlvOK~G|n1*fo;fO#ac8m3JR0#zK<=Ij*=*|UX4pxOYW9QjL=-Ngp#)uAyhDse_d zNbtsf`R^xm|MTE2WiNTcjpM(M>jG%K*{h99>-E-zw#Df3K@+E`jsi#-~9L*(D~nizit6Baz!Hhm2q<0L`JlhIg9%~ zFqSd4qaRP65pY(!-ox_z-rI%db4L>qaXAGyevNA7P$YcUGr#y_fp6@YX(R@wk@ygj zx#-I8RQ94=m@}^lV#;7k@~B#EEH2YJs|zZcR+i ziWWI+^rH^QjtmgBA=u5L9VtT5O2oVhfDeD8gxR7KZ>}1X8me4|uUDsde#ZtD5%9G$ z5;bhY0i9O;L=g-7N5xWYNHF0#*0~t#oJ4Sd zzgX4|S?i6Q7D`mxlM4n1Zfy}(3F=x`S~=Ag{E1!=!cuGVNtv7!IuO;E#5KQ%;>_C^ zu-P`WS|{%=PYQG26#7J9ZjDPm5=x|UQb26M{VT}e)K=7C2!6hE<7Q|EXc#@peED{3 zg{>&Hh(-TqTeq#xlcXE4b8M*KzK4!_K`ze(EqeN5gZtiAY9m2KP+cwqHE0`Hmm}@? zQxQKxIP<8~dcEI;oCRd=1TRE}o5vrEs72iuD{z1)`6_e?UbIKL`cz-4l*b~1YjJfB ze60%0h?3aeD{U9s7J{@+?#KhTU4=kz4@=!zDGT)o_Y~YOV58OM_h2ZSRhPe+A=PfE zZx&vkrnlvu%9JOF9W}c^3*GnZ4;`zHF0BBKHu9|ulFR|eF6Yw?y&V8Hh1tQTQ$5q) zc7^PAJGSI!o~Tk{uolq1rJ~Yd2;H?XkHsgmyuX=923G(ZK07X42JKSqFvV3xX#>e+ zwzOxtGpC9ucc@*-1nzD=a3aGCXtCz&5TNz&Zh|Lt%`BP2J@eW+(?kA1eXVX^Zd*HhbzZax4-=(KJI^Y z^H(MRP&G5sAsR0e%s8$ECTzBjmP(Pc%Jefik_$>zrQ z)Yjdp#ZMqkFV%q-nmAfu&uq5!%)nSb^S2N7S>D_3O2{9Vb$w=$pM{V>##VRrudmGa zzMRs;UOsSZweQiUy~@VcCnxf=thkb`t-9>jyLn0RL^uz$xA$QTi4^M4F6R23O0)H2 zexn9w2lN}$Zi@0;Bc?oa>UlER_vA-thD{MK)oBT0!|T*`noB}4&1PnfMJ#s9@{_LP ziymVcQyJr~8VZy0jm70y9S*RcMf?_Bt+zFXgT*C|#KKq}4isj<;m!v|B0(7<6z1K8 z0emu$Q|7;rkB-2b+;e8dJ**0Y7v`E)tHkE3`%UyHG0MTxHgzEdcms=e!{YZ|x3Uxl z@o2N9*P~(QDy6Er(rT7?D)VP63>%G(Fzo#KVRD%mjjPlOK8!i85xOmy+nf1SE768* z$|xc4L+%v&RG0_l`H;Rx*^m6Enj^ax=Rn|M5EQ~xQ=dxGk#mlQ$f7WY+EKd{YzU zVn~%??$D`0v6X>%^It=RlL2&(ggjCS@V96k9n@OmptS((jV&wWCt^8=$GIMR0 zeT+Q;j~PFyG2cfAg?TaFJG*$t&u8RtTjZpHpT&Tj+GMTc<@>}x zKN!r2%J3%UMV}qz>FZNQD4xAn(vjG2I+9u|#!jz~&Q?rK_e8s}S`=!@W9a0&|A?XYCpiztZeNJ9P0#dUEm= z$&_$nZ%?FMi&B%K4ur14!CSK^>ZPZoC})2KN8Wa>_=T#ZF$SfCh^Wf|BevCwuvI_M z(-NH|+ z-wCKd)QyiZ%!*OyRCQn|nI5*aLG(QGU--KK=)f;!;ODVvC*15$x;T{uI9|gPJ+xt5jgxc&(X)Ab&x3jR!Wlik`z@1`Nu;S0%hgoj0UR%*@#}$ z&U`07EDGSO3roQyuvHv<{$F7sjA_t0{+|o{2F=dnj z+YOSsq{}igK3xiAF;``570$9m9aqV(O7Rrgl)HqgxQ_Hp1c@|l0;*ZkpX&QgAs+a` zY??dEIGoqMsih1NI^oi#h&uwe47<8eUy^h*()#c=1k5AN(5dq6SiMJ zOQ0|;2f+~u=nkN!?o_>PU(P5whIkGrL|Hvh{7^{6z-D_b+QOT(H zXh z#w}qDBO*LZNADW!>PVwlrNL)19%0ohRVl)wpHw(~1-)RE3#ARa)?zn7pJVSJ&*cQi zBFBY1$nIsgHa936)ix-Ncml;wZu@GszwKK7V_)qIMcIXqivd|jmHD#@Jp(RyqEF4% zKfOp)NFjb&dVR*v`{^O{VI!X7G6#pm%cO{}Ncpuk@$iXUT)*ObIoNy38(kH!FM{Ua z&YpTCc8V`3xSDrc(1jCCGn!Ku&aH7+_pl~vM8J_X`5J^uDPL!9PhM~>l#2ij2t z+^Z>0&c=c!P4XMpLAZBZ!tTQ5o^2E{jxfNaZggK;rvk>Am@)igG@UAEe|tq z>s`vuZI}7}%?RnffE_v)CbfHE>oDi^9;N%-oM?T<}kBHxDm(T_YIii#Jg#JzqxmglF~wUMpwY zg82}1HOc=xpp?RE4OFziEp^BuCRuTgxt9+G-8^J;FN!ATk6(IyQe3%JB97|+_O1N@ z!8p^T-2utD{dm7zrKjhEey^)6tl7Y`XuQb<@{CTI*UU2aA+B`sLxM{r%A{ga1#{2+ z+wRLL#@ttk+4&(AV#{W4%ZMWyHfbLsVVUl5ZhPhrbVqx6tDbIK|Ltvj`mIp@mlY8w zI^c6Z6RoAJtlSrOagQ`=(d5TI&YTr64*c~<0rxN7$0vJAa)QMfc_VOs->1_ME*;49 zS4JEU@+5dea6GfyjtB8TMm1v;FUR;|9boGeXqq@x`*GW5>7DSm7kPfWzz}(e&&z%X>Kx}>hy-1B^JlVgDCfFP(yUAtQA80U zLqg=9-HNgMOCZ^tZ=pQ~<@`e!Y4zPq8P!*+ut@ywyB)vNbda3V{(6++*mwcXoZi6v zxPJc^Yp=gJ&O`7NS(OiRYf?szU&$O!_ZKZmEMxk7EWq z3S-Umw0AbZ`VE~G*J<5nL6b+QuXZzADWg6#o_~NhaV_ZH-s-`mP)+tfH{yX3awA|` zl1Q2NE0ZQk91a&E@6*C_>iQ~Y@p4P}^{MI4RXxUB#mm;2#mkGS?3$vrv1r1++0b9_ zFE;KFVuynMwjKH(SXgC6k?FpASBTOM`)GThPi3k7uUMG>L0puJ$K%VuWBQ@To4O<} zKbHvkde^L;@lQ7KXa`1lkCzrKRio!EyCW}U5Wl2%{J2lxFD(H27f6tX992vWoX~;o zR1!I=bwMpG*II6h&lB({Ot;QjE%Tglgl+1MUgWbBoUHqgKK*NN>0B=WRo% z_~H5D8x$r-I=salPR83R&p%h`KJg~Q7j_&kKG}_aA7X6wZ}AOM{kcSk;$S<;{eh+%GF0@got-G;x`O%;dd;p%74_kXcE%+*kD=FwtE~NMqBIX( zy5qOc*!_pyXotQt^H3dC-Ebbe z;l?ZL1d|rd1oL`Kb9{m3J<6M!*%o5WDyR?HaV=-T719J`_jut=!oJ+Qszp3LF<}Hq z2J12;d4X52?K#G4c)8Yv%=8fygSA{?;)Ja%D72L#$(4Jw-o!c8c<0S3MTgMh$02Rq z)01fV2EaQ|Q8tO6v?-y`|+%p(WhuBGW5;L8cFCi^oJ zd*l+)k>@Yk?}O<(5@sW==se$H*;dXQogFdBN zzXV*f(3!(}3U3f6dGv{1!WGma{%uL@UU3v6W7EuL39}%0fg_f?#2FGjvtm|(qa3$k zh?UUU&z84g_AqzF>Y-}S?ehnNLQD6YgNy%Vo~rDkr202ds>rCoI9@odeCHBF07Jy3 zyN9nGT)JlY3|#~*o9+dlpjUcAO4<2|co)eZh}1(M?LqKJWb^x+nw z+E)LR=iHFm&}uyirdVJ*sA8-ok3TP^ z;bEiUZQAz1z~~)eNJc3=c`{MOwYIG}SM{qDJ02aGTVvIAS(%^3S%zFXH;;sj>awY6 z%EGr!zOgc|mw|Yblev=bMD}>&^YZdoxo3c@hLRc(?)N^WGNpa_ynKwPuFUe;B*63Q zVCn+pr?Iw%d&*{s<)a}T&eh7z<{yBm{xTAY;ovJtHwQv^#TXXvm?k`d&_$N3WGWjC)}}j6noFc=6$|7e7e~X}H?L1ifhB*7Sn?mp>I&lIz5%cA ztNPpKQ`>Hlq5-ZlCqn3|eS1+C@Eswck=&A{QlIRl-jQhD->{&y?#s#P`wpe~E}~}| zOFxPUMUIT?wfC`r-0b-*1yn`f!PDKFuHqK7u?KcX7E`!4}0 zLpd>IdX<8U>)A$i#7cLPg149HRJ8Sw=K>P7B42j}>0@j(}=YFvu!Nv9Yrc;eEOAXc~hJf4B9E47*N7+ce9N zY#F3%UJqAoOl2lpD3{#ivAj4$no{Bt)!Na1wxcAjz^m1!ORH;XoE>~{UfP!B^Be{4 z5L!XS$mzSWdLOz|Eg1J!j>05TInc38C@464yQewjQ$GDP1Q5dMk-egR=tx>2I!X@k zsXiB2am>6;=U}lujy&#=-0U%Vp45@ODJr4d>Z^Fx0e0kTBmvG*%3iOWUO=q{4~s;p z8>akz>!$^&dyes9F# z6vw%X*_Q3v4Wf>56?&)QZCYa%%=Gju)&ddWz8Avg9|;x`HbBb7nA(+jvnF!itFy;{ z=gp0_Z}Ru-uAj)8oOgGa@>(x!x}cP4)Xl`D6Kugq4Wc&BkGj%TJ)Jq`)OKy{s6qGl z{;}+gq}JtkMoPo7v-)B}Lc*UzB$tiM ztg8hZ!ht9THuE7Ak_(&P2bv45M+!C=BK!9&0a7VhDg2XNY{JUs~VjrEq^%vls8#y=_48{dI*yAz2RA!Es zI0eTwP{XgjeNJQ-T(+M!u;dHxT`TP9U0ZDn<0K z3+PDl*VG;Sm6SRPLlH50qFa=HbiR_Ei3=$k0ite@ehZH#$Vjc#@-S9@ zMD`3@KW);5$Qv80iz-k#up(XE24Sk*mcJhRop+*Wc6xKdFme?SxwvTUvc^YvlQM0F zp4C~+O&IrpZ5~0!WvU56i7DcOLv|0GvGLNc=H8snfa_Vozp0ax-LHg%67u}e3DC_K z3`_&r+S=My9#|Osi(d<4&7yVQEkDg+e7L})R;ffXa^c%FJMPBFf$~j`)R+Q^(#SnR zthbnkSoB#dbe$qPf^fCva83P_S6U{8TQ4OwuJvd;!?3H9$2PL- z%EOy(j{SWkPHD~0qEZ|;vL1WArz$YWiZ(-szA78Mj zqmI5iB+0#Izir9w}Bn(=Gjj0fBT$_^<6yNVI&1$0~wz5K8zpK0* zE;^`PJ$^l@_M!SDMLtPHmU$=d3J4{UEZ-LFGz@0&J($4ZXARx#8U}p$IBF|GS{iCW zcR_QZS5oOH#TmhC@~=N#sZL2ulolCuH~9|P68oSkcI07WK)|^}(IwFHLgH2ijWi*i z2ZfWrGzPOgJqDr3=Mq4=?)B`(;mZeJPD&ifIm~)Df*-F94xjdxE(Jzefw4;i%2spj zdCI7^)8A!(-DYv36&3(G8lH<3mjQzhg%RmOZ|4UW6d!B;b+bfavsUk$ig!X@9|#lC zRHmYb#=jdOi~OdT8iy&!$?f~!CLlAzwUAXBNrlbim3~#1FpXF3>^C}FIJ=ZG48Ag9em%)~)Jce4T z33!}(3_M5=UW025l@^CKBkfI5Zps>%E~O|liE=@&+m#wlJc1-tmJYC__gb~Q`# z&K9$7r-*0&u0BfhH%HCMbWxZXOds+u=fZ<>qocU(O)#Jp;gwnB+&#l-;mH~Pd%XWYSv5T9oA#3u_;!&d3MZK$wUTIc)>g&<<` zi=RT!IO#qu@LV^a(7+{pbRN^Re&=TTpT+-z0Tb;fo7ilexaQ;g$2A=++ z05G!*S+1AF zpZ2^uSILazSWA+BS4hTQFK>Ln6%{5a;V4K>PJm)Zirt5`B}f}JeZtQk-pl?p9UnmP zo@V_e+0KHGjg%_+V@K0qyb6-Z@sc14S!bqSkqrT1} zvp|(M$dW&O#O>5Rvs>F z!Mx3tKS&22zzWpBQpRw>(etWGA_@2ji6f;j;V|BMq(PQO!ES#U>LkukSYQchF64`7a^aBLFl1pZyPJIqZ0<0FS>D+Eok^Fr}XZIaJrYt0!mX^-&KGvd3_+tq<(sKcoT76l4{>fab>$rYX(F@nRs z*J922njR+3*^kbi;e~aBlzpPM=T;kkQ?ZL{woI}XI}#9xpSZgScfPO zI_-~Vl2;ak9^F0{-p+u@*7>pa>)WttulfgPF&sXEFLkZFLdBtDgyr}4RvO1ZGGimjNLcHNbW4xLZ^0ml{AxRAx7$95wLAw-vOOY#m}1agO3=wWJD<8T*m{$ZMb7*kA>cHCu}a1ZA>*YmX!ltB*w- ziaBz}Nd8_11TmbW)Yq@A^FEJJ+n9sT&B_yhmWyGh7~GhR>QABmCUJ;#9*m<<2Dg1u zKtLe-&R5CB8S2C&`ZL$W-;!Kk!)oXzEV*-N>FRO@gaTWF+cz53t^h%P44!+1cY~ySq_Q6A$zV*js5qddZ^tB zgEZ|b5Fu2cKJj8Z{rj)A+a3G9L(Oh{S7q}zN_O?VW%c~GKkLBMa5gCXD>^V?vDj=F zm(I$~tx**c7M6G+@uS_(det;Mt!Q@_w`hPsV;`lxc%4`uTZOJ;<MQkH^-#+riuRMPjy(mv*P9cHD88;V2OahR{rx)=}L7AUIt4D}qy8wHv z1V$Fkfq6qr$T$`F# z3}w3L9~iVz1w}EAEB&=zdP`L*{k6|^xh_E!!%NSu%F3?>DUsdvtNB$hJMRCdgy?<- zf&G;~{J7;0$^O+XAMV2ZVhF&yay0Mwa8HKN=g4e1(jG2~#!L$N*70coq-rbDrb-uV zORvzB+Hfm4$cptm+{=mcA8X{0avEjaw^!Xu10v9a&@J2+h{C8I?q5RhMd(R`fB}MG z%7&9E&l^%8k5uzfkDuga)Z{ltu9nQ!@>Ifqo`X!|6u{94KUD_)v7y{^g#y#GgCTqG zpR2gn`@d2RcKoxdKEqCWxUVj+&vdi>;@Uz;shX3npChZ@>-ey>Bg5LxlIL1M2%Xi# zoTFbhkt&Rs3v!^$_a;e=P$kqxPwU68NM6a z@2&~nT?RnIX|9GPsz$Tzr3pjFgoNSUeOj`AU7`+%)6)^fzdD|?l_r1B@leybudX^G zgI3f?N}ZO&Aamd^slCd(@4@A3Q$2;*pvT?`bDQHKQa;)K9*;zzv539nOk4WWX?W?! z*|oEXYEVXDf>jCHMN(|j!uqF|Xcd#11CuWMx<4An_I02!%C<+ctaXIuv6^d3IV$C4 zf?rD_r?bmLp3tx>XoWDFUwjC`-zl7Y z^7{0}zrNxBgQwM3wgCb1MMzeSq?G*yNw+KL{;GAXAiH2Y2TB8Q)_wNVT{vstE|^ry zdG;PsRRT@uR6b->mJtXK?_E!GTN$EWfrdU3Mlz`~Xxx!OpiZFIL#g1|z4k}k`QO83 z#yfM8f{24@RW&qNpywScc7d^I25Lj0FW2xG-$9UF4$IEw?Sv6R6#2z`HwJKO8XDUK zS%Jb=k4m2iz9$d9Rsz^NQjNS8<;hV98aofPTr4cook61y*8zlCWXd7x5$-U04f#ChM-ao`VrsS zW%>auM%u-zIS!H*Ix%f70-Zd=`3TAO`u_JH&fj_W!LIsPKt-OpHUzmR^srbaq)kk# z%C4j@h^eGQ6gsqQ74BtDLO=A~Vhwe5LRk)^z7QHe9v6ZrMP*F@=;D_8nW4}yM{4>0 z%Z6X}jBCX`569QfeTZo&Aoh8jl6QP6uj>Z|i5|$Q4R(v~H0oB}+=jlXwc4qgy3}|w znmXIzbWj@u91~$1Xe`ARv#r?XR)fQY7ruQm;ah=RLVOWMoR41Aq54nJCWOc0e|{U2SZ_R2%<0Yh z9dLzYPoO~BAoxQLd-7|4j}Q`5a*L0!=edo7^2(?YVj1e}qm1E26sL2cMxuG}?3W!( zbCKPLhZewr3gxJ9(OXrYY%VWfMm;%{nN`kqA|u ztSO>c{R#jPHyq`sM>Kl7;nWN>;t-9EIT+1dq|{!ib4*-?u zR+Jg9!7sMAwpN2p7}S39`3xwH9}~7o*27x(mql6@fvEd{l=4ChloMqbrr_@!9K5^t zsjvuCHnL&T^rCC&%v-YHCjtSoN%9f+gCfx2Kz!KO_U+UA%R(95(F}nv`T*{#D6P?a zq_)=Rw%Ia&Pb9BqTp1hdGR_H5Hbt$aR8@3E5w;$ZyFVk1#P)^n^lVKkBc%O57zerJ+6 z!b0I14BWOX^4EdRc58@omR#ahsY8$hsU~+fVqCBMZR4u1jPQ!+j*v&px#OMxsJE5RgHcbgVXDf^v%Ag^=XsG) z6D$zAJcZ&}iBcA<^tW;ZbHbc!op8;BbUE+H`p0EOkwrC$_ZWN0+-gwtkeeAm<1HA? zPHvtYXEHo%pN&CznV)l*Go;Kq%-aWr)>Ou;jLnP%1A7E=x$*4JVW0}zjTdWLIl7PH zghrG8 zC=BR#FjQ1jj9>9CE^v74{la9|!g=rdQER<@L2QQD)7}ZL2CR&mz}4wsm0ZT=ib)Nr znS$i9Vy9f#|3lkbKt;K}|K2JnAOb^oHv$6E9W&A(B_bs$Aq~&8>4))%lQ!s$l z3a3nmaz4g|wDI+F)9S9Mym-2bLd12ruR z{>I(5BrDJLanyF_^Uw4x;EWImErH&u!Rf4->5`G43TVhp3py1;rQd}i?^||4&g7si zv5fLw4^H@*Yr%?R%)|A91xw0C+BFDX1(5qcfCE3?v6`L#aD7A_SA1Jq{kT*7VTXGN z6)+JQY&Kl_5yMEd-qC`wB$kHUP7no?HKu=zEB*6ZJ2V0}n`k7w+5vCV3ExZeppvUM=sYoh~;}b zP;g-cbn5A5Q_5%=UdeVp5C!}ossKujXs}bhAjWcymN<+7&6OhGegBv=`Xtj7?+6Ss z(#~MopT<5rus1hNNOs@rqlY|4D6~SJ0(*~f096O{22MKYtG$|TjGRCDOaIRbl*b?6 zCm$qetNO9on-#fi#pI^CM|UV!YGQ)-yA%jf3%ph(3(gB0_gs~4(b6+FsXfFqoZX_N z5pQZz4#_~72xR}GiTIC*6@ko0=@k%2l%(xsDb-;xLer!AOLlhir1myyeCWUceg?*_ zL@+RRMM*+GKKiE=-(~>4@>T}y;VQd{1bmo8{!RLrn;NKg%3YirAxsF_-OnK4CH@X7 zkN7)?L+~lRHt<5OQZ9Q2%r8dVrH*c4J_GMk48aaWXB+gyRp2DptG^ulPg4n)op6N{ zUE^?&`3YwbYI3cYr&HPb0LibCCahst>3y>0`q*P*4+x~Np|TZYVD?B&iZwiW31WjE zX$_be2+v2{*KXEu@$T&amHiyWPL_Bu0cIr_LF#tWJxo=^&P%SxJ71QM*sR zV_FCPOJRJ?gBNu+8Ss-Z_7%4$paUKkPk-I}O9Q?t*_M9<8cG00<kv!9v z6=XQUg5|ILQY?a9P?~K~VOtY#E&uqT#9SrobEq7nO!DRAN z*^o!K{hw3G)O*rNQ82qrI1sS47}}W6_B(LsZe2PLRT$G)^>=R!2w_01h36+9SgQD) z*P?cmanHL26wY;LRVCyg&4TY+@#xio>*@GZ5fK7uq@6@6aAnwe&W1?&d*JaAOgvd4 ztrB)>Z&{k%<>J!0{|TmL%kydUP;a2lCW;s38?mD`Lq*oDJS}7S=l!L-ZVcw?D(uS;tHoKtDwKdh33peoOAQ03idBtaekhs<0n0 zZrj{P4$#NZF)Y#JA<695=CMg~Nm}E(-~wQ>_|R8PL?f0! z8kdZC0vCsBH+R9HpY;KekqYpsZIsNQyG>nUOqd#0-syC*GLo`PN|-PdBMWgBYMsS< zQ=kM?6Gx9G=%fOsfUxe(FLYkKGtkZ~HgZee)csJLpRe!~7k5*^Bc|mm-r`)^SHf6D z$2}S>&adB|~4<~kKjT>E&`^bkJ!>C9D>A?S8)ynOrv72C)|XIAPVqJcifIJ3_T zA4!J#Bl|tT3uZq|I`JON*XXuhIcyeqCxO4X4el#q1uy@Tfy}uQVy()o%%@@ryl;ea z2ObQe%rYGh`BP$*p%uTUIeYjR#qf0%{x2>P0s3+x=*8U7Ye`xL0Dmn5DqkC zWg=;RdfY1@8K@7^=*5RQaVdBpkHT0p9(EX+<|f$S*AU{152z*|>KJlgD9Ad0^PVd# zDp4W}|3G^(4^aE7>`sid6Ckaf1x&g!QaFqf-)H@v`x3S~-^F{wE>uuP7W%Lhg@`C> zkRrqA0Wp`)NPWB8g+V7j+uQvT^N9!E6!13|($d?oPs^(?^~Qil#jgGRghxr&AMe6> z9GF>8eA<#^wVm;%x0hSLOY<5zyz1-Fk3j-joqOZk>phwWjZ@`*JF>nhPNjEGabx4Y z$@3zusNPKKK0n>bKw53uZR*~DX-M()Eze&lJ}2RHqeagQtDJOJc9SAFV%nJ!PTdEy zhr&~a>b%fk@A7Khz+|y)->G!RXw{xObJMvasBWlRsd_2Kjtxp)?Zy7*F>5BiG9ds7 zk8-*9=+Y;WAD{<&R)OA@bmmxfWh?ove)r!?CuSz-Vo(t3ui=-y2VMg(1$hHvz(n?) zv><#W1i}36%1`=(3~m)D%6|>|W%l4}5EHsJK^%lZB#@l|U1kyv^@L2(Wz`UlGM3MR zB58d(JD3}q8U#m}XE0`Dhy#t_TvU^)9d$bH0Ky*x>!>9sZBq4<1`nWg7mKxduU^q% z-4Z4VWVi49OlYnQ&GG>@uQ!uR(@`$L71@g_E@DXyAOu4#!BfJ1X%tI~C5YDkhFqX5 zRNOsqG^C&&IOpFB(l^p%vz0C+?A0AAvLF~t>Fj-w@7)3?5wzs7juJ7--auX|R5SwO zcZw>%M%pQ)$*{Jg4k{HKBuMTBDfh-2xm>7mAT*-DDdt0zEzjx4(@~X0F)0o0Dpn-f zeCDyAG?N-UU*Y#V+;cLi2*%`d^!1fYaE2fw(MbX4N^xMHv?14cd~muI-Gz1U$z+rE zEGjzZ^NVwu(BBdBU#91upi==dg;r>WCetT`55mj0$RG*qngY>LcBk z^^uaBQI8522f+dUI|xB5c0ckBX%50eTe%0gCde(nN#LbH4M<=0BI>}v3zUAd2cSE% z1JqvHFv3U}@lb(zuNAup&t8up*x3qg5mVlek7msa{FZATj}AQXP3~TQ6=j&S!LF;Q zQ5&UZM9=NGcD(%r$ejP^(ga_}?R=rY6PmBR%hZiZXPS~~l#8#<3-s1c#>528H|Oun ziTPW*Kr5#cyW)t4IrGtJ2K^}4F_Q>yKh^r|_s*J8_KaNmC@Ef6-YKL~??t%<${{-P zr^EMehbSMpe3xy{2S2LNCzA$7pmKDOS zx(wqOgMkgo2;Fh*;EyT30HI3Q4AFBBn6u@WeDu5kTp|7`R)8~@W2%MQXb_RJ(c+`i zi!%h$W}{*Y*aI-vlei}$Hp75(u7FNLwwsF293HpsXfKQ_g)2?an@WoVS$}+Gl4x&+;qLJA;EUKv)0sQ0{so;hn;PsJK_2ChSFoo4mYWywP!#Y=rm;l zo8!9`Af7w-^psc-Ywdo*4*S>J6RKnDfQp0plj(_51y=40!7P5z-FUrrLjO_rHZI@n zt0NG2{G#NvndfJ8NDY<3$4vl`cuzK1wSs-DnH68K|5ve zY6WRi#V6#`T5#70_&0^kqjVBj84lkps((;@ORakB8abgNAcD^Zd)ww^ti@lV zCWS?(3ZhbDcbhdQ?*((Ny`k~hmJzUp&|L5n(ma7lRL>AvlmlKD#1{1o*t`SSw$h0;^ za^4TjW=F`n!qThBZF0?JQ%GR+Tl8~jo}AUQOC0v&3^MV}{4CydFtG6-A z2daLP5f6-CCSf><>hOg$-0v16sZ#i*@cDJL##QsIDG2*9hVhy7VnX#!)ja3}Khhco(1LM8?Z%S`DJF~fL7SXumbfbsji0DIG&afeM=mg2-Wb_)Wn z5VPV%ZcIIRMJPNP^b)Pp?i8fn(885r2~t2W(_n3PjVX8BBkP0EJISg9!Y$O~-ye-2 z+3a6PbM7i7X;KW-nR&ULSBN87r+xOil+s+opMSo?)hTl!n=QhmWiGm$=Az9c`K7_{ zfUy6AVK_1-sxJ4DthtzvV!&vKLWCu{HWi-9qnXYYS2f6bjBEN+T+dFTheNn!m)mIf zMLC!>y9~lZV)|UQAAXTC!oN;nNxf3FrGLuYg*Nexc8-D} zje+k*_jRGMzDkeGxA(H7*0LAHzGQ3`WwNC_FMQ6$c4COvtN^c4fJY$qLLm|BF5z*0 z%VC&l@J0lB4PjiK+KB|NB3_^E4ozqs;z)QpbCZ4iQ*Wd%I4M~fMr^HvjHAJ5n$d3& zmW4Bn?Ls$|xk`|MCEzq4DCxd0H~IMG@2I0jaO@kx0|)wI4Z9o3BN3ai z_QoL(BY8DhxhNT*tR_FpDEK&U)U`9DOw`hSc{0*q4q{c-op)Jc%>ZE6te>gwj|2VYQ!!AYRaVjU z(Eg2Tc*c&fH?Liy^?x&dGgFj2$?MBOOIgo;83w#u@ev&9Z!ijm#TJYA-4*)gN+s@n zI*#&I{N6s=Nnj`HVc#Rq0(9N^C~x+aBRW@~uiiGALO@BvW7Yl6=ESVblTL=5AZv2;Ck(iSR6|LS#vWeT75`f%Ji7?39)G|rwmGqZ z-j)FJc8Lk{*iE`K=?iCdV%&n;v6I_P1i>=U@E|g)2YCY_xr_)Kz1sxwQH2=fw`Dn+ z8K63YaYfvurN5o7X%ZpjWZPwxp`yG8aG_*Uka|aRgu8sUFp#NL+M4&~L>r>z{ zApPXJM|;C6YEd?X@8KymB(QU+zElC>O-Yl|44U?9QF#;G(I{sFk>w#S*kFhaAO(LPQZDTh94julJUR~kFXaCi4Bfiqq6 zeJFQuTJGzFEl5ZoHO`|gvwGwdUFJy`(r*LnLDi&lIKuum`#`)-*X~ru$r+uKY`gLs z#~?I~px92#$`j$bwz4UoXgrXjuqRP+^O;I4DpPxn!N<6ZEdvfO^dH>zHU{i!9pHVN z7m=tdVFH|;drR=Qvu|9{b@%V@3#C`E-LBEk%$l~WrvYSSrQ9LUWH^$Tn0PIe8s0JA z7OhUUOc#XotBQ}5y#0-MW4>N+id6GJ2eV3StL~$uU7wkL36gC=|IAmKb^y!&S?6ch z`Vre-4&@YDJPEQ^tx33wsv~b z*A3{ehod@D)jJ+*xPP4QY9DXbO9Mes_Ig9+y`oD$3#yr2vwI1az8I~G7Uia|*%0fE z%N;tIYKc*q4a}OKe(Y%~A4)w@dX;;jHs|U16*V@!WkV#dG58XHn*rAMGQjdN)#m%z z48qL`7H=zx7E=<&$5`LqXcG17)dg9$YiPLBXM9k&9e8Dr2h$?_4&=E?}MO#=X@&-mC-DS8VeI z*Sx`2QaflHOcttSkSI3$1^YT7mT&)-M3nU~y*vxyov!jvRADKS$*}7+#Wf0JI|llY$P_A!YmQ$HB!Ee<+lWR37K*fZ?15FnL2RD0}cLh~c7ip)>vC8DfS z6}~f6t{ri#L8l7hrf0a;ljmQGSkz_&Qsc9qGgvGzAQ(R1LUw(&IAjBNRLqJCP_*Zy z`JJp5^tRZIXw@-}vz#DEc<+N|`@^ZwIK`rq^I6jCI*B2=)G!Yq$<~?=sNov0n0i3= zjPBvOZA5@`Y7El z8{(EsU+o=~W-zcFD4)?=esxGg)jKx>^+Yw9Co#UcF9NO>?@$TW#LmmR7xi=XH)zPH zg&p5U01V-ANhBK2U2LXM0AOY_sQ~hZ!nDtyMLod@dl96JYc8(|VLz13AveFmGjaZf zXF@yxcqXhPOkPTeWm2O9pCI!CqR9GBK=a;1B>*&C|CPuZbPC7G7!eFN=oV7Is6yjG zG|fhf@}f0)+ZVK43V$#+AJU$KyrRR20)+y~Iy9}*4{pDylZEfEhxQwvc=Y!ZAfZM+ zJ1tm?Cl*A7Y=t2#JW`-Vc%~*r-w*@UIE(R^u&XL4$mJm>$#rmNr4|{KIH-$S2&I&V zi@t16^kWA*n024i02ThN`?Cd%60(AF3r|Y3vnG&RS@&cc<$h?_cRV5jiWTFVfeZw> zF>Ttrt4z??UMZ%hC+?Hk$_e|f>wa28^AZjenhYSz~WiQT}^A$*LCg|ILKN)u=KOy%2O8=A2=1JCX5wz7( zsn(6|7a#`)tKQaqV_XN7cK}9LzGw>6}-vx)osOF^1?S4s;`vL zn&8YcqEfAA(~z;D-BGX%V?&5e(f8j(AYVJQUP;b+fbzWCpOQ7fk;kh-#s4z3 zO`>$;qZJJYF*$V6GSz*~*?uq?zm842&zdg&N0Li|E@NLZy{Eohd6eA9;l=R0Zu|nm zf!nJ-zQukA9V1$PjJJ(|1NbKXjc<4Z*z2sFZse~N6+zfh_M3Mp;`^`Dnudi5tOqav^XoS zrPqJJ*M_`}E`_^aWI1h~yxf)@Lpbg#_$IQ9e7Mow{FnQJ};jq4r+#QfODQ?YsmU3F#k?OugS z1N2*qH||IvuivlHvQ3Vm&+LwJyTeG4V9R}rZ0%U(PnhAp=hOP5tzg_6f@=8o$3>os z{x$B^rL1N9ycwjMebR+RdA)5@ucCfM^4tDeP3|iOL}1ga8NWAcBQ#;(?|&38Fjw(q z=SlC~&!i^9A>Q~Xy5MD}PlH7~)()di+kG=X?sQ3MJvE>VL#x5JQF&w<9d0M|2 z#iM{zX%5`C5FQL~9`&QD=xc>I*_cocgDU&ZA(a@QILZ??5umgNOQ#I)j6Q zn11)0Bu?z7KHt=JL}zx22)kA;PfTSd{uUY&!fOe_?C}SiIsRjL2A^o6fKQb3gL^SZ z8bGaXxub=3urVPNKwG~uAKnbiZq!w(>LPdrhoDucT~56qCa$+EaPtdjUG2@@LW<2% z*8iN#WmyAj;gmUm4;D%m&$n9X=)8&hUYDEJiu0gd3a*mxV3fkgn9Yajzq!DgZPI|6 z5-NJZE7n?I@~zDk>kzXkEUl7Bul4sFr@*D6sq*2*5~M%o>_@ABTt0uYtztJ9cVVFE zrA%702qOw&TMpb~`4(qF0K5RC2fBl=&wn=j_Ks9bzb@syNzjsIWj*(l4iqqE{s*w$H|~D_~Hh>JqUxx(tImP z!gqK4DZs+%;uE|2k9Lu!&iri)}rR>wNg6VrT3k0mrK?w$BQpjP3@j|hH zZ=fm60iO$Ji-<{>_+(33l^R76b|joj6ZAFmNVpZPfqmtZG@OmViqw5Z`X)pXzl>l@ zYVs_Y%6eL1-`g1ZrPp5gh%s7R)A8e!N{lW2v$vNud2}P3^i>&7t>l_UM)ov?tz0VW zuNEvxa@lU<6KNPap`hU2jge>{hVf1D$I#CER4k1*hl&z`d9ig2Rf}REJ5la^NZH6; zQ=ZnF5krtjdQv$)wS<0R7|4=k>Ji_I;JEG2NLT-P0Q|1IWY|-hoQDH_DLX>9<`IcM z5v-e;z@--;T(z2;T3AMTo(i;pd?bTa023#zTc^#nbF@Ck(F{KGP0|AGnoYI zI}s?-=GVBdX&Eko7uPS>(}}Z2D=);Pp^a8x_`5+cox& z?UA9AgGD%3B@b}JwjMc6c6@7ESY?`Y|0H?BW$Txfmj8zoe;f$qg) zdZzu;MWu09jG%G1BjVVuO{Tp3i0+xh#7&>+M)!7*lyrO3b0VI{KVlFP9^YL|GFiL$@g@6=F=?2w#W^vqZ&J%ak~!R$PX<*Kq? z^cqYDqVmktg=f*_hXcvNSQq<*A)Rzl$)U+ld5{74%XVa}ASmuFvP~8Ny_5@7xM-zx z$alK74vvzQ^tV}B?^7FI6Q^UFd&OtZ#Q39_#`uGO43e!n07qE*=P`G^WkFh zLRW%A42SK;t~sEOnNGH+c4lujfR0Fu$-NZFk}x0FO;<9*ej@mQ5*bN1%lHnbZ);l! zZ5hGVQ^0VX3+(seU#qW4dX&<-4Qe>;@=bsI?TsEg3o1FHvkhKCd;YoeR}ENXx;&Mp z2H*G|&pO;_WvLJvC*A$DDP<{Qz!LE4!L@muxZ`a_x!MOt>P@VmOsW$^ac1>>jEn(x z8L?~TSJjoRUTz7sb#?)rM&D~^_n-MH zx~b4VZ|mfVsg421x6FqO7enWJ76suj%8`OqzpR|qdij;1Vq+&M$Z{0MdppA3?J3fu z*qx^z?Bebw1xX~;K6Cu>nU_%%rSZ@)YXP(R)Lj}AXkPpa0dUUII{h4kpKU0e@uGS3 z&So|1|HLo4)iA~TkK6nj)?WtvBqSU9-AT@7*GYwH@&HN>I7=vPWz4n(xeyyY4*e?o zGJOU;!8^~_PM}Gq#21)mrY2Jg(`bj~)5?9+UsHT=eU*6s9X=wKF@bkpWNLmeG`j?455+9rB$e$IK!FVvcJ6^p-DG zIrE8xy_f7TED&?qEsItt;_K0Al4B9!bZJcajykK>jUi7D0Y*o!FKzU!a}t{ZHzmgL z&h^zlhV1r6t>Aq&?33czXa&azM3)nLG8@7PYxz~Gl0r301bses1caUF;CC2u1!I)- z{LXSPtr#8d+~Wu{`1qdnM6okHzZJg-zE#6N9d^_!< zA&~UO5&M`nNOwxN?D=rCTPeDXvAlt_*z8u|MNrt$t;bnvP-^92zs>RiVwe+EYcs2v zhO>s$_$@QyaU#25e}(Gnw@rrEQ?|3Non+AHeu$}z$kmO%~I0AJ6`HhaJz}!pz^|tkh9IgnoYbgL8 z|DAy82Z}o|MXOXvOLOB3(C!m%dR9k&b9eU1YgM65Qbj+a+ol%RlZD)X3VZ=#k=Gju zJAx=J0BPuHB*yx;xm4ymI$UZ#oTy_QP$ z_q-kC^_O!-1SjuR+4FlMkE<=ueQz`fWP2rv(=I0#tBGSq2YY`MWS(Sv1&yA0x;^ny zHOx%K?@2iLquLCGo&Ju;{_ovsmaumkdy_n=KoKxuBp5nn`~uOln2F92>E{Vk6RzL> zS1ojH$4dw%KEC=(+v_-Y==4n}WT8xhZ9iG2;@P#dcVY@ZfHJ0+?vc!}>^_HtMJ36! zV-}VyD-#P;96F@{(RSKXk?-a280>)814HPRh$-nVZ~7g31A6Cop7)V4FpV^!l2brb zR|yw6>;X7aq6aEcr6C5GUhwKfPG&+dEib1-QvO^*CqU<%JcDK{E|^iUw@3PEslST` zk)F!Ec^`@inS|Saz&YSniPftnED|jh$%!`P7Bw&ps-XNRkA7oitCeLk3`kk;x@BF$ zmZKkpO)JL$rV$@~Zx^0`6{gc*Ot%TLne}hAl>S^(|sclL((n$^9$(d?0LY>FxTLk^JFRL zj|-C!@RShGOJv_n?`;(oEiuk)s~6UVqPNc6mk?a#+eX-u2|fo{+T{#O!9>Nk-vBLh z{Ap^Xh9Q|`>d(FFG&CIh!39qaJA{;AKJPdooe%EvA3=i|SO zAHR2rM-_!!HMD~Y&%G+O)qQ0JIWJ4Q5#C&z!^%o{QpzbW)+iD)*8v5|0PBQSU1Km) zwM3r{v82RvG|yX$xjD;RdxXxr8Ly=l3p{Q2EkN*y$Ga15ai8DY*{4P?#w^w@IB6Ez zje88-wLd>f-^FX@u_I%_2qr%(rYdnpo}t&{FMluCz=xFV8aR1Aw-{?|i}OTY~68oY|q!iPxliJSB=Qa<#JB1Nf#c1=xM~J4|f$52Zbn1*qTX z8UnpKEdvP{s?^@nwA3vfl zl{t48Usl%{=u&{DlgRtj zi2%wC>!V{^dUQ*&%}g)?_`fIjWGuy@UTXz;g`x6O_)g)Qe-CaOwZ2t4u+ffD*P z5Qm;jCw^S%&Ig;`BQ{j-NBO(|CWaYRmD^f! z%vu0IPv{N-?8~7U4OWaaKRUz-KUQgH{s#23OSecHK6jQ#P^ay;8rh2=$2GOlSC}P` zDU|-4ICA(4-Uy}8_5E0&qbg9Sr3Ws1MUfl^ZNE=Kl2~oA&LOd z0PeO&7aIlz`X{Gy+;op7KP6U(uP=4Z<_IIp!jNAz8Z6%w`!4J039WM(i*g-Ourmim z$FXot{N8-s4InM64}aLx0;{`|@-iFhtK0sx;y=BIbacO?v`?1ueFAe1bMLFB3u%1n zmQrk8EB8db_N~YPlPiK+Z1{Uq<6@#QHvpX{QrcI`bTymWpMgoq;uV&P)e^rC>;5f6 zkQKX&>LT5`1NVP;{iA0Dt(1|fbpzPoR^@JZU$y-EG~h@9+ArKXS~TuJp|{5nU!ArI zSq%hKC$sh*S5}zn0xMIya>!% z@ln)bg$g-IW#|MCK%Ph(bc&BHO0U8ymH@U>RSm3(YxNx?#{TbV1na@rJO?2AM!B}l zQ4)?hR6EmONd1u%-*iEx$NF^tZp3f4h3?inBs}3bNaXx=J8=VBEoAeX%;9pwxf=pk z_}Jssbrb*xEbl3c-PffvepC;up>^NBg(VyEBiOVz@zPU^Q;e zFGS^U$9Dm581i7ak<`mvAI%5|zT;B_0H{saU!4>r=H=$z4~WLRN#|sTW*Za^N9#_(My!A4!7t zxotJD4LL~F;)6t|{*yV3?0mzvNYCrs*2kE!$9BItsmG%$$}k3Oq%!G;wj}RSK03=} zoy|?JitBIciJRk=kf`zoC$|G6feH_pdT;?Ooe^G)UlHl#*ZB0gG0tCuXMDN=$XZCPZNH*2+uKyP=r91h49*vea5Gyw?;a=q#G~YMF{TKb_B`tVI z{LGUa2>)jE7q`+O>>ppuJm&BFI7gQdC9L=4m&;=Vqu#r~1x`5i!KS7-{Ums>Gkvwz zEc|*?j(Iz8f3`(xurJE-hQ%3#w0g=$cpG4NUj~zVUJ+*q@%}F!EEJf>Yz=@#!@B+; z3I1z@2+s`5-?x-rqzsYa-u`>dQ=JgBvivTV{*zt#w1W>IMEH#Ryre+1Can2C`AGklvGxDUqy+H0Y`gw|oYr^%iC%|$@86yE zI&l01H4khPYzs^PcK8|=?njD1rSOQXybUrR{a`}wPvQ?kwar2JaeZE7xjHa;&TN=~ zq&AbFNnp%z1~H)Rz^6%~q;r*hCbxQ{u=f==qQtmP`jM0ejOy!dR?lloF}$b_Y%{P> zT58*pcE)~kkhlwo)YGS5N8|UqPI2Q&f}&$`scV5N-$6xY0knMrk?!Oulo5~6lPgxi z_x}sk6G(o_2Q()%008(e(c>@CeZcN}aGlWk@bawZQsH^!jhbKO9fQ$ofQv~mIq3(; zeCA{27B`Y1AIY*)>5{-W>QjH6r%R6~qgBnf0-?JFj6P52>*N5Lj)Yw-=01Xqtng=X=D5yUg z!_`1BZjT-z$j+X)OL#AFZnMUhrV4Mv$P?0ys3pr`J6BqrQNo@V$WXjdGn=;4raPu1MqD0#5?m9Ud{5$r0O~qU>6d5 zYDD`7Aq4gtI*Y6Q=JSQ!aPt+Zr2J1hnJZUE%0G_@-4mdlHLJV-Mi51KR0II*B7>`a z=I>S818C9Lx|lmB8KZ<{OX_x&#w-#I#f!d0ivrx*QCg-N( zM-D*P9=X_y;z|F@b7$b>(E?O00o2RjqAX|oyjJN{ZqjlovP}nWY^NQeOb-${&S}9eX&ZWKIB! z0@W_rpK2HRjCJ@MFH5WmKpsMDrY zm;cW+vfwxR09yejJPmY(R^_gsRO>K{mS9O2&57bX0)l#FK&Ff!f9>f?r{WS#JKFGj z4x4zY0%Rwb1Bs@`15wo%^+pq|(X{vEUft2?Uk9_X2O9f$3Sf2cQs(i*(IGMf;E#fg z6LYHUN2HTns$y=D9oQ~lt#Nc+x3$gAHBBi#f2yfzQgO_E8U5vcz-y%N>E*{GZto8r zh)Fu3+cM2;6h4fYI||}P8o@&Kyc+Z86fJ03rV$$R8&YtgJ}9rl_5ni*nb}|ryt=a2N3>inCP)(<(4HrB(e_Q}ZN5wB zqMTB>Nf@1nobP!tPmQH;D)ROapCD0*vF?LwSh%7hVGSMPl~BV_%7@l`{zhzNW3%k{ z_hutGZEjqLT>JA8;D`dZdmPkVN+-pLtH#gH$fjNY&rh#IdaKH>+gU$289U3OMhRuf4{5!+!-(XF~YD=>LcGx8&r@Gs*U`vkO{41h2ittPt4 zL?L8PRc20l?i)JoF^`iX=FW{nDadI(1%xlzQj*@(7g=ro?9+DM964GJE8Um6yRrOooKV4l zBXYa3pP8G5S#=?hjbi%jk!>SIRzK!FPT$Db%KBiT@_a0G>1%&|G%}-VFC85ipstr? z<+pkKxoUmfOP8aO=R6&a$3#+)QGfu0y1cu;k_yHq{0e+3T&X-Po5NJAOU-;mpRd3E^VtX zOMbFOo1sp56-@KhqoppY2T1B~fr}^dpNse8=&WgQdnwaUThH^R_KTsWcpZbK+z|Yi zY&NmZw%B$)J4f1eYi>U#YFet0b+Z^6-?FXa#mpldGI-P!2!rXs#UuXr#UqCHc}T{9 z$i`se*W6CLtkw*GDf+?E-ACfF+0{4eTNU(;8!2=L(QeRP`|HC5W$ywP>GI+K;nvC) znjRfh0MGy38sz&jsX<2HB}E%k7Z)zyCXp;0$6G3`glUt>3y_2gl*P(s||( z7v_%^t(^(v&wT5n%8M?kq%OR)IJ`6#-hLnCJJE+88~fu+_n0zvyCf`g_vmHRCi`ut ze6>aMe-}9NN=5SbvnGJ%qYkqKXpiJc?L50wA&1=3`V+*-{;C|1Vw2BI11CXmRm1s4 zgaa!DS;RH)Cf3n`SjK>6;vUB0B(S)O{8?NDYUkI#Q4pfOxi9cw{Dfe*8cFldTZfs7 z0+tyXQqPXu+g0Nj8yh1cq%_N%eAw{8k9(M1pdmwEBJb6KkC#Eux~?wLwSmucWF=E( zZ+*mN5`|P{!1-t8kp@UrC_t*3LUo`| zmBMcww{@%{@=X>E+r$Uuyx(|ip(JLu@f0g@x%X=T8bJKrOGt~& z`PhR`8g&=D-1%R;4v=wi1P4UK7hAX14yxMb`|Hz$(+_dCNgC(*p1>!g=OWJvdiDnA`~Tif+DGk;s#_s2iB`tGChO>58Hy}FwmHed{lHRy>Wj-5uxqVhpgXlwg>Y3j4sICr{5schB>y~xFsnX@IKX0bOH2c`Zwwk0(z zC3batQ(JyVVyuyi2>_DZJ^X~-UQ>ZsX2KX^MlgMwkCV^@{`9;K^_q0&Wyuj(~kY1 zF+MZPBET4Z@oZyU?f9Qv8>ugR2;x8qIv$=z+IV9Jw;Vj)nNDRuTjX2(23v$Yh=VB zm)z}t{-c+mF98RAiC=kpclCjn&v{v9rb}pM^4&<~?z*dRB$Ns~>le zfu)sHZ;MbzkexNpOM10K$MMS_mG@bGSlz{;|7Y>N7GMsZGZsek<+DPaqO^1g zy>XSp+rOb)Ns_*zQ-GJ&U9>>BIIr8dCucRS2xv8%NMMlYJ!-f-hbBTUHJimg3czhEOb@isTpSD-pZ*b~t@vQgS)^ zn8Z!&TuCS+3M|>RgIeIQ^d8PXsA1M+oIiUY^>Dp}x@_S^rpPJo+dv23#9iDJsNQkN zMz=fDb)=Pie$ERVXy2f)8$T$rqT)J!tusxb6*AEy3PBMsH+MVw)05X=3p!;$6|9Bb zU$+|{n>CufA7;8LSo443-}+AhA5a3l2ErZqgOpNb_PkZe2muZ{oc~|f=(+=_ZvYNj zWpS#@<5>>~gz5jiGcTq?jQth`TR;T~)h}+ZZvdLnPjOjKd}6E_|OviZDnxwd4c=?&)KJ{qN4XZYo-ct`*W!# z@z6iC=#e}v-di&`h;9d{#hlFZU(fb|b6KqP?GeYed&4O;mvQ~%m|}d-M`15Oo$mu3 z4Ig7WZ*b#?5v+rO)uxX5PkvCVx6>gi%Xp~cxDgKyH<9BLJ<;Ad!dtiWTpLAlHEwd_ zfi9(a0QX7+8%0sR^|_Kwp=UD1j6Q|(?pY`03%W&B;a9tDRGXfXdPs2KDa}w9L{dvd z>-M;(QJ3(`gZJGzp_Vp7u8a2li~9*_E{uz~l07){ zt%-4DCxs62BF*qwNi)R&xfgZAqZ0~7JKxpc3bRW8+%E0U!LrC^B(Wvs$ik#fXz{xW z^6WoHBk+kUxS8A-Ud)3K1ePfP$k_*KTlA6@FbjVN_N!V~AfswC^lqbehit_ibQoXs zd+OAAIfK68K1h4+1xL#QB(hQB6DUSL1Mf$=p=w)%wf94+Fetp#6AV|={6j)4jdbp4 z48qhQu!+aO>_!S(zuc!RG~TTX4{;CROg)EK5GU4Huiggbxbk!`Xly z8t1lWj6Zg+?fJEvq5ZXSg?b%4Ae>m@qMuIJ2YQLQ^`uFBV z!LVNov2HJrxlS@kX~f^J14zUDBI9$!XZI~Nu%ok*cn+5sDq~Q)LQs}hZG*{+gE(5B z?SPLT*=K@2az4@3mFK)0NhuU%*_8qV_D9F{&FA2arv%_L)G?YV9BC1I>zR2 zzK{Ck{(kQ6Z};!_`2Ev;SC89$&inm(J;(LDuIq`zJyw79RlPMi@6M5c4z!&b?EV(O zcS$STm7SRjOiXEu*EOk4NL(aB8!CEzw+|P&cze)IkOG^gP<8o0c7jm7I`6*hgm!<& zpi|PguYKCj2H6HT}ASz;SHq3S--CB#X%oJsIZ1fNr=VeJq~& z@G#@K@nP`FS2`$U>v%s^C4QuZ9-OL15O|=nYO+b&gchHzR+z z(HjhW!1DaMOS{96-au26CjIHtH9OP?X55?~O66Na%x_l7rTAb4}TU)RYcVPq?<-XMw6@ejrjClXv*c?1z2! z7mMSsQuKQ{Sm|95W`=GSU%(N-p=@`d;y!(b0XDT&Q^9Ka;(2>InnQdM(_|vSxh0cS zQILoZktR`$p{%IQODdlwS%|RcACLv}GEyeD# zTt%zqj-&%YD{TE(hKwP`rhbeZtsFYbLxk+Q7*4-Gu~oB;9IZ+J`OXph zJ2e+FL7f|tT1O$V3CBpKG(21^W4r6m zS6YXr-&_w0!ISUNyT=iDW8)kav0|ug^yb*+^rkNz3je~?;9uuPRFje-vJKNRu^YUV zpOjf%b0HSwTBQ>7Dm|}kOWT+=c6KL`YHIOk-}g10&T4LMr}xndr0JI{DZb1Y91q%U zAZ8cBt)LQrBiUW@g_cnOR@wTAWz#L)?hyx~kktDQc31nPw&Tl>Kb$A~*V#~#I%t}d z!)8~*q+s%&oG<~*Y*>gm)>gGkvdG<~_?$4OUExkTcte7FH8^>|Gw|s#Ze8(|YN0i` zpve{Qqd{{yU*{8X`ZA;L6S4vWS4Nz^5{E|^eh^pPl>)C-=)h0N>7FB5+B}C6CeN*k zqeE<6xL~QV$6Auw3FAZm9pGU9k26sM-|Ok`lR}tz9*Mj;<)-x`>U%}e-FI8H6yDf&24q%AZnAUHug&%ucy+_Q}SH`A<3 zdt-VtxPDhI9oF2xRsKHIcJ2;}F{q_CBq5m3c(jcqcALR)io-GU-mNy4bduYmB zDSj-<`E+mc94?P!eCvpGEje0bEAJgeetBxm*v?{ygMyjbNMT-H25E>l#-(<5jh`!K3*Kc?LErtFsfrCXF-($qRCNgv-TaZ*NF|zFO%H7#y%x za(&twlbn45-$olDR{1Ney^jF6Hz?TboF%`7XEw13Vqzb<>sRtP{7D(Yy%243-Ov7)bgj2;Du@Ixc4Xq|3Rx>sUxSBpD@72y{=?&`dE2aZp~PQ`Pm1g!r6GO-*CD3LFovI66k*l29ef;t6h-+0Ou2c>tRxmXC@? zQTm@7Bb)Viy$*o>>xG*&ha8l!m+4$=VPS8cY5o4k`dklzJKAY=utdVuQg2bDXn;E`tnN1Y!NLr^{FmaK}Kt(YceG8OW_Q?1K8?5etflM z&!!hnE_-<1B@N)%H!0?tQ2oFnj+3kRwuI}b2gsE$=R;+(U4jtR}qYBjBIo_^X z&!;E4)9q$dtS*#4h&iu?NzM^Jk{suG_L$e~j=G=;zL;j~iaoTUY*n)Wqmg6%4dF`Hz24mCWI~EM&z_4j2lBoR+Q0D<JkA8CJpg z@8Y*Y!0s`?N@&Jx;$3W!qVTioWb-`u#aATz1PtpHRr}LuHerZ|gOQ>`U7hN)1j{#} zsobROrU3t-995n-yiXGYThj*GXVL~+o+YS;jJmCkD1=XF$&Asj!sT1N795KVHIp}w zIb30|GSaX!Jr{223n2q?bVIJqnuXxFr>n~KN9+lz{l4b&Di-AEKHqnaO3A1T1}fgW z_FX;qjHTmH1p45U^)n4jz)E8;$tCL#Sr)1rjF;j3Qt>E$_R9@AxGp>P9~P0uZFzQ2 z@c7pK=tY{r+F-zy$+O`nyBg;{rE^*f66$#fO8zd(=Fh%NwuWDqCUyPASFf2%KgO>Y z23UMbBR%o4$bo%oeZ4!$@W4=Sg_m!gTsyina1`J$c^D@oLn$t#-!OEp>|NYPeg`w& z;ZKQgRybyjs(egWXqz0fO3S~U0KVa>hD?ncQ9D2Y>PAM7PSw)D0ZrZC_F(qPpr9Fy zCx+7xy}0$CI$_K1*Dz?Ie?y_su{TB{_$~voXQn2L}TmIi)$5wb=RtUINn>P{H198lGLxaO(bajcRIXQP%e-KfM69 zuLtGsDM?4u5uc?NyoGyC?g#rF?j7LUiZsnJ2n_iB#rx|@@xl>4d+EO1wpsP9NUk(I z@6W(G*dprnt^0C+EoX<;Tg>ZU~sI!787)?sINhjZkkt&S8C(VK(qCp&~hS zcp>Q8B+N6ujdtqjg*yWO0B<>QlZX+O{xSGLeRduX2M3mdS;o&w1Rm60+Ks0L82Noh zwasU844D#TJb8k^rXMgZCVQ0#^`Qm*{mq@_bvR;kTfHw~cCg5A12E#i6e;iZIGC2v ztv?f(zX*eu$(40V4LVigsfSr2(r6O_xy#EG~$VvqT1-@yTz!wGaikTwi zj#~xBg?vsjI6v`=e{?vYxXO(6Jq;Q|qfsB(lxDeT-SWLrBF$;=vXT8K;uenBh{rfyN(alB6`9y9(gYhYvmx)FJ|!t2W0DChs_b-`0o&Q#iu zKG^GebAD|KSJ*hY6k=`Z&H@0(AsT)POESxnV$_phs~O)(OmmEt)dfv-=8M`CTwKB` zHaj&@+O01HJC@66g*gVjYwGRvv(d;~I>^JPBnb^k#~iX~K|=h@KD!m1&6$F}ChUqo zd*RgKt)=^gcXK#!mRpzq<1j0C%Lf2`@_)ku$r(z z_Sq|&t)?ViYPWdZLxVo^QR+gZT76WayC!W^q{!^S@TTn4-t}`(ssxG*(%yvQkUBFA zbypKPt78J;brpai!dx~_@{(49W9+$8PP=GSqKB)!cPOnL+O3xDUy7d~YY{*DMGYEl z1lJzbR@OE#W~p3#O;5!$scl1!HevZj{40|VBg-x~4YChJ|9+^rs}}q7WL|)8+qS-V z;>mi=(SkkzEj66`y=QClVbreAVYVGFeQ#Y+Bw#7Xv3`(AtuNqBOX0Lrz*x_4)cEM^ zQ?5D&fq-clrh%3EtPkx%64Dq$fq3z)n7roQm>XNSL1WfBafVtGfeM_x9^SwCIt9k? zdqMBimILTiwcPDxe?d0Ao}3QkH=n3}_IB|d(EXn-&L}Pqdel-DG zU@s6k>;*ckL;a(6SLXzSK>%#i=G&NRoQf?*lsF#nd9#S!wYIfoi==aKaIl;m!r8CZ zzV;KK*nzcJ;j+50yk8J~2UqQ~M&VV-9@%VtxIf5WA&E%oVO;syFVKesRT&9DbB9u) zZj&N>As!#@03bnc8(-~9vZX@8wQaN47kF=n?RP#h<;hN7mMLprhoyq)Sy{m{!H4AU z+_|$fd^h+k^*%4xXVtfx;;diOhcQkd%?-n*By)ondQA++MlbsEpK|km&M7KjVl~kn zTf(x|Fz)DVc{;*iYRgm>_C8~^6t$D@dj+JFZj(E!uW?{ic6R-d*SVQ$!Jg>6ma&QD zb3H<&F3GsAS4qU!v|Vp6f|aM#1Jsi$LR}ocSC`tR=UISrQ$?fAw(Jnr!Je=G;|n6N z;CvrYzGi_ueh!172s&`0((}$P>zDK<Vp6NEufw(ZB!EMl`tCDAZgcW&fEQeS(?6Yq}*|DS^+Yr;oY3cC8C3$BMhO90}L_z@0K$`ECcMg~w*>!=)&@VudJZ zv-uPXP%+Ks$0OFh+P;uK=9|-LSeJI!EZng-%|#D@kyQdP(rRvW%H9wA`AVC0C;=F8 zT=Uxuj2!uEF!?8j!e4?3OTfv@^i(kt+n~|QQ*>JDJ!`(jmYcswsefq~x znO>~iYC`y)dIsTnYs=Xi#NqmF%qiBF&7skbf`?yny;-vuV37#S2P~?+tZyW&mF?Uj zchj3U>Oqp!$`Roouiu|IIG3OZn3V(pvs&WX%T9_B_$T58W(+kV@%kUoZa+nqAIHwe z^gil>d(F<4;#HD@=SAMK>a|5eMfRj}*FM1j!*=K{%spbxcI0`o3TA#M5-eNTg^rRC2dmUL>+qt|+z*bDrhC8Y_a-iF%58I6lU8tyTGB z*VlcVpfymW#{z;i%%Kn2x^6cC)OeP+yglMB1iu&Ehav$DU?QLaZY`A(8!<1OaLVxf zb^l!|W@In}(tQIpa$*Y$*QcvU~FbK%u=*eYo9# zZdyk64lYd~M(E4K@a$0w89yuoYgC65xZm`#a7Q7h&IY(X`U~%{2rbVn9IjK{F#m_3 z$Mk^Psk54%Ohh96AP~(aLIgrduxa8XRsImgS>*qLKRRolmR{#5n?6#+yj7lT*=ob`BqI7br)3R{Ztm41Ued(7qQD*);q;GLuZ&)Ro} z)78Ka`zl(#@C@=s`5CZ76TXAzW@IzE&8XKuJr1X(`RvU|j`pVH4>QDjB=wj8PPZUS zIJ1d&KEFN^;8UfOjV7E~+#)mSrZaozM}L~0mWI4x{2#vIVo2mRvFY=TBz?l#j4m60oa}m9+qQ!Ip52?x>re`y3u^^;~VwS!fZ$rj(G7 z()jN}3Ic=s{TU3dSb!=vf0fyim)bFY(Ro3EqwLGpy1E93=9U}DL~G>e)=+*XACy&s z<!w=6-5mA-2y6=%H| z`q0^zOxQHxs{>VlEAR(J9MGT+(yvKuNX!gTNYcb?y-xRHWs-d){kD;L0d9M_C)z`$ z$&+~I2|F?;f9DHP$~=acRRO}MTgqALOfRP+trgSBoqPSTD9ke=!y%WPwQb z7phZ8X{fETF{0ukV-FzuHABxsyoC^|!HE+DD7E6wsZA`$!w&%kc|s+ncl1YD$;OQ_L+}Tc@W>yy%B4o7M z5nf@dI&sy<6q@jZ9%3FnZ@p3GZ?)n2E;Nu~T4;kpsmRem7vB}0LD0x=hY5TD#S)2g zwQ|LF8l4Tu&}~5qxn<$9+$-CymA=0#k|B^=SjCd=Uid|xcqtCSg*rQ7!PkyZPkE8p z@A?&bBqiCmU^Lio6-xgIpk~^|(zxn{NPN$FIdhf?>)^_*tG?Hnd6YvSvxP9A=WzT7 z;S@zaz7TT^z7c<}JG)?+VP9!Nxn-3*E>kwPrJ1;@w8~=QSx#fd$fDDn&YkYA6=)c! z&qGC?c~RN@5i*m}hJGiuO`|zsXF+kpr|uBN#@=nMo zp~KgU^L+E5CzBZPWc>hFuNVdzEu|9-R%_q7hXzYpa_1c89Pw9d*NuWt#Zb}I>n@Qd zk+pdN4v!Wve(jgso+?rWR38}xB3)ZuRyQ>@EshP#^Yp{YQ~19_iprgFq`zh!bmYjP zbK%n_m5VAou#q(S<;T@a^LnLPY~_}^HEe`@QwX4Wyu_DZ8SuR(0y+Sc^2ag87uE57 zAkkuSrXqM$0~ugEZhu)QknNZ3YY#uhdPhSF5Yubacx&@fPukmyt%+VdHI!0j%5vU; zZ|ktR7I}MW$zvfYa*a8?{kD9#$Q6ys(qVC8j6gT@jiqR$d3Ab?hY_jgi#tH*ZnBa2 z37sccZ4n7bRj9iROrfA_O$a%Qi9pWs*zZH-i=rnrFWbLC&Ke9kwW%whLC9Gk-@9GG zYIyFP^tFwN-lz^YA5T^o_l=BOEKWOE z`A>YQ5eYuI>acUFICLa5!ceXV7cVp8!JvS+{~8na)<`w(Q;_k2-WNOraUSJ^c{t7(}mhq$&`nr@BzHQnY0IUXij@qX{V z!!AUJfb!KFS;Zq9gGbI=TBgq|Wr8{*F&_YaF81lDZoIwd_fOuAuyqL$1R$o_=VLai zVADOP?m)}iRQ%kNbbPk5{4LE&SZ^n-_WhlGXr|K9)g*ifklb+h9NCjS*nc+-7*mFHJrq@u0 zTUV%P7!(oeJrAnVE`A{n*Ch7}0m$ynZAp4K^ZRyr!Pxi^M$B1rYBung-3lhhLLm|A z*~&|zVMwhy^Dk;uKP)bE@5bam>Uj;eK&+Lzj&Qc&I*&)uR>*R}5K?)TlMSKhelJ zy4X6GekiDw7uO>ps)VzP_-@w3llibRtTAN;=Ue0k_$rEAju~?Ht559}#rH-G-SA~; zS4Q@7#PKx;J%(9h%pTSmH)#Av%A%lu@!BBr>l{!ygta&oH+NxUn)x?@8RAwb49Esw z1VP-&QcB+w>}lkalo0p|-`rAY_#xw|G-2|R6-iqkHH3^kbAS6lSzoX&(RUtGPbHt^ zh#5Ks+dGC~7vdviI%e9}j-S~+4Reb@*cBGih8T?@4@C(ep1CJ_hZ4QWYGisJNd{idp31#=6}6ua(!NCEvn7es|aL1^8mhx8?xp9LG-mdt?+bn<4Hs!m*z;1RVBU?>H zB{+5x7BwY<9uX(c9xvTj9pvC_a(Au#tX=8Uu7-G#D4%@?2L|@hyN4@U`1=`F!cb^TOU|YIzP*o7=Ii+zLvM)>^#yj8D<>eHhC!`cS9$#p!^;BCbO7E(_LpiuY*bl1t zf8$AkB_!g=mfn_3U7xv){kS+8sa8M9h*Hk_% zL(_IlUf22ohGD~wL3qC6sb^hd_1VHW5WbIKk&quQ%1-fA{Y>-$|14XRlOp5gXvCI;9x z>Il+HLYrTJ+_u9e)!(M|J z2gPE$77Rj)`a^&TC;+m28^eZ6Con!*MRME!(|7<&0~s8|Dr(7_+J9#o=Fi*f9 z-k=0lf(6cG={@cD0SpWu{W+a832mfFlGz{?HjN=m&q|91?PSayE9ANQWQi;(`$@=z z>mkdu=H~~3q8i-pp7hXEg!+{^l9H2?Gbqw7=~)D)RsfZU+UNK0-%EncJlj47;qX+Y z=rag?cq(|tYq_&Gj(4>Sw&UJh_JI5-@?{ut9@nckesU^+OR zOOi!IJNJw4c4RmoDL6hz3tF6cOHiyePDWbVY24cNO~qK0>AT?D756Kke+Zc7oq>(T z!B35gGr3!NF@1?%2emH0+F5{c&X%&bOqW6Sp?AQLd*7i`_kcB;Fk)(1TZ0{fYHf1N zpmcq0`EklVDUSX`8?KT`u%awKbN$}*iIUVtbWXGaiVRW~_h-mK+j3%afCYuCFf5&Gs>^KLBG; zT(}nxQbT>TPnaAI2nd)xI&3bn96cj6l?XEt`#}})#Y>M9;4catJ1HQ%?LJF=2StCm z`avx>wC{Te?Yr54?FPNZjIDxzq#2vFoZB?fyTphr##*9J+`aya1Z%M@#gDXtNgnn0 zw*3O04u1l|bQ%-g$6>Ao{cFQ1j+Mmhr?PKqxmSrOB2v|K4YwaUs^##0vaB$}V|7Q9 zd4!#0sP$o0Y{&K9Z$G#m<%9hFJ4p6%-JY@s8FPV2#Ifb;RXGf!KQ_>yM{y+u)XfuPeJSH zerm!-c4r^0rkQosQio#|1LDr#JoZhVPaJEV$ys5O04yzu7`evJ=uFrO;2Iy>xW>^X zQi?ysIvdo{Xw2DTzZ%+mZU|GVhGip6KKtFQ3vFd(ZJqHvmTGd_n=?b;G|=JHj}7Fx z>de)rPfNwX)ebVMibo)xZ8ltA(kccwsV})Rq+u1y-IH+wk-Dxf@8Ht5I?(2Uh6)x5 z`De6qR+3dddGh3t{j1UIY$-X~6jW%Cn0~t(#jA7Si~hu}ee_@7Dn`Fnvm>eJO!SzK zX7--SxTnl$TE8aU-Nt3f$tM(&uN14CiDu}1bdQN;qT@v%+Qwhw>2!?0 zYLS>Py~?gK(+OwA5NE$OLoTLSF{^y!`H!ZPsC8ApiI|VrO*QTx?PAD%&g2b^yRh3DHF4Cqd>7(&@KuIod+xyzIQ&}a4STgBc| zH-2QrpN4^f3OMCGe;$^g#mfVJ)W{}n3t-h4JQfLe21oAGbJL^BWx=rDI~Jk2n^fPm zSBc&KXoCpyV0mN_7PeKny?I9aTuDAq%#lveD%=mwV6c5@^XK=E&ITR1ylO?#+T&@U z=Z2Q`O5DSy8lp%-Zv@`1!3@E->(_hDre9s<50os`Jzn1z?J4IjSC8DMR+meS0&Sw^ zZWGgSrZ+{K$qcAmX*CR}_&f#;BBz1=Ho7}=#dL41Y#&|%*F)>s{h33du{fa|)#0mM zXGW}b>t5N_eg%81Dt7A^-r;X$UEmj^j)K)N*97Z{LtD)rJ%*40Sq|#mr ze`$;9ueGxs*4d}C9}ZqGjTKdtmsrAu#MQfsnM@Vw7TBtE4R|5g-yy_A)%>xoO02Mx zm4#vD4b&c(2KgG@G5MXmm}lIDJR%^uSGm>MLHl>WJ2GkAy?z2ZnB_+V*6447q|Pwx zSUTEsES&ZIrEJYIkWQtkCW_ClPI^fgu`fm)?ZI);q&t8&NTaQ{=gus{ik@`-{J>!d zg5yyy&JGg@>J0Q>!g-A8-ua-47b+$nRxflIz23A!G~*Dh&`J$t0X>yM&o*Uf^)aKsa|J*>N0UZgTQH?OlHWE438o^1o4dcGt|m9dLs8>)+F&l zETl(Lc@&cQCx_H;-}=(Ny>=Le3+0U(sY2cTig*jarYJes$bJq#vIhIn@V)D6F2!J$ zet$qY{H6=B7%XfYdMvSO+0{79o_IZ%83QcDmlQHURGOf!!Vgoaib@fj2-;xhJv&GF z*+Jx-GTcTzL7WuE*Cbh)VUBj2p}Khi*IDWwdWtCCj^tI*}HsJJAW0w%Sh0t zBOfaTZp1c~(W`T^bgoS|?EEr_=W*a&Ls>v# zfe*~vTPiXH9pqT4^BR%F_nYqd>FKo;X||A{5SkUneqadKX=m?9G&) zQ+VpqTXOU5`{ixGUuGeGV4`Au?S72%zJ2=|4@&GIA+h}S=@qsEj0wc5?!YfyZb8v} zrV7_MWHW!|<}KUo5qAQ}l5(b|tT0Ngnsa)MXl3_?q#oJin<4qq7r;fP<7e*oUY}7e z2Cw#bqDiS3M0dU+^^&Ur@vA`{Sw8}%frV7~Ob8QK^eJczF&pjl+dNc9UaHSLS0QUw z^c(ZlyVE{Q)KP6XkO&6fSM-d}>%Ib==FHn<4N(VNT245u# zalQ(#minR2yUK+dD++Es81av%Dq#23YDn4oa}%W?-;(x-$Asq>v^2Iqh}OVY`_I=kZ}qXb^BrU!x7 zi)XG}&|ML^Bri`qH_}?qEIYtv(_7yC*#P)291c{{4>s|7DJSLLm5>e&4k}s+PkHQM z0)u&)R@;Gwu4o~LD94LI@=X>-$dJRp1j@$9ggOl8CL}}h5Hc)IO=YX}l7ne;pX=$+ zOJ3xcz0k|lRHk{nG@ELvE!#Yi@px!xs3$lKiik)%jka-HdLz5dV@{a(#7;sOxZ-@Y z-~CD?dURyO&ol*WDNpm=s3ii)jQ-dTASK)xzK6jG6wgd#t-G9fak=~JNq(2NLTO*) z3=5|kcR|RGe7HUbDgLA5{5!CT^c%GOA!2#{MyG-iREe*ZnM#$Ncty`Z|GsS2NO$Uf zfQoR`)n!uq8ARie%=*l>Te$eM&%rpY5_apCOl#kWE<88ehpg|a&8?0f)E{zVSM&5( zfn7OLzKqu_x)u+ILgerG*ljyM*5aIFig5(WrF_OGx9_l60uLoqqJSNnXRW&hT$_8d`c793$*dRf# zM`cZ72iia26$dFn(cJD zpJ3VgenF4vo9gN?lIfRz^M2)oFQ(@Mb4P3AnOFHXorf$pg!Xk2obg)8=GR2jZI~uy z+@@>Un?dp$X256HBWiB2`n^U%B}ORH2wd$Q!Pkny?g6q>K_oH9VT8RxMHuXF79XdM z8hN%ZC54D9EcAL+cs^0L2ST!ytRWOK9fkHuVFFGgE$7f#-DFLV213r4G#pFtJkcK{ zQ5VEcLhv<`?O46p;S6Y&RmtghB16+rl(NBxhW!GU7L|f$z(|Eu^xI1%TeFhY}q0D}X>vc(a6^@RDXu(?HRz`kw<0YC!kAhL7tW9w{;b(Z`C-XVG9 zIrid@!N_F;rnXV}Dalmbf8%w;-Fxvfz)^65svF_p0U4Kn{)tq_Wne?0-T&VDBmXT$@ti%#U8SLxx)cJykyrqgHLIhU@D=#LOM$XFY z7lZAve<-SAR(L!$=Wt!Nwz*~N*CWV8%taWDOBHO*19F6|1H|^!FZ@|-J?K*r+cyEm zjMy}l*Pb9)^r$uvgu)4HETN1Pzct3o%mMV9Iv24YNo;q83b9B)KGOjLRjQ+4@85(@ zsbZF&gYifpX*n&Bb~w2^3gJ?B6+F5VmWug&LgGqa>NWz4O;B*82d?4VfbmfpY#K2b zNF%?YgarpRiC`Dq$k|=yOk-ndX`kF=5_~$fJ2(-f!3L5LZKl`qL~XsVSJS)aL1885xz^^`(f*BQVG(LU-* zbi2L_17h{y`1XkJ>v(>OP()ZL6puv}m=K1kfI7%B^D`s&(+Ue8*uqX|de~za+kS~ao}Y9K#Qt;%h}HN>E2r8Cir=Hc^NsR= zXCDLp!Jp#UPz#QXjL3yBiV@~6?Mw_}xvWZj7@5jFS131y)WhJ6fjf1`Z#U}?^5lJ| z^VF>={)jZ+`OS9e)2GkVb0dQi45#`_ZlD_nR=NT}XP<0)6CsY!QTb)Ia+!h&ZgPq~ z7rV`eXEzJE!7Az@K$RKehvI)T{*`w3r|JfPjFhc=v0@?JhuRCjtd}Az_x(|3d_V}^<%oWN1%7=)W7x7crX?+(Iq2C}M zzX2ydoRspfN}QbMXA#)@mIsA;I5u{9p;he3ry<$_o;1L^|MhtBmWB=@wt=cP><>bN1*?siq zuTANPH@nE`#SA0-7I+^ap4Z(#qvrPDF0fqwqvBH}BPgaG!SztCH%t;!ZE%PsjMQ=n zBQz$%go&DyVZWfuYhe zpmZ1*1%LmkF}C%=8LH=Fr3pS$3xSZ*nN5WBfnlO@=3h;yjb$b8VGybE5xE{;D6+AkVk>XGvDGsq; zTL>Y-J%V@>ojBsBZ_WS1NB+UM;MfR?1ikC7Pqb}KZ!Z47dTg90hY|mFEp;Gnc2|EL zYmQRjEs63v=G1(dh(+uh^k*(7FUn%vuOsK4Pz8s_2_s>}DjR6j-ODXvErly~w$F#N>thu{rI4Wn+;T&Gxw&l7;gcCZsW2Pd$bbKl|20imo;pa{qP=?^jqsZG z)iRt4P~a(i6XZ*?!2#;lflBf>pE@?OLdHEIX7D8E?;TzopUoQWFD5} zKPMiNqm!8FHiCKXgFg@lru$)oz+uNdv4}T|B89dWecHqKb;=!zACH>uA3n6uQ*8H4 zwnJ>h4|R^v1T7%ovemZ5z|M-l4db(AW`YA)Q{|N}KuVoS5PZ23&^NCa^cD{-z6$4X^d?XA&R0mqG-U6_6&)lXa7|c{X?G5 zJ+6Qs!9P7hiP}bcMWA)H-^G3(3h|868ao)Z|NMqrz}XyZGyd+*ZV;~j{}`}sauneV z1@B|^gWbxrdI180h5JyVU!*x!V0WYF7#v#n*C|LbX=TTQJlXvO4~NrE1gW8cX#lEt zG(o2&*Da|t??>m?Rj36d+iRGdL^cR2kN+A}3Zcx>;LtYF052TGxX;#uID~XF6mSK` z4gbm$=KN7i-b4{m+M@x}g6XHXjzR-yRL8+#-&_eYr~|$X=VuFqkDSelrlwGX`N(mtq$fjEc?_g{t953UD@g8&2-tA6tj zFtptMpZbHAX)3gC{&j^CPbXpw%a`*8v)xQ;uPRnc4ywv-)LYKxP{RJ*dEi6;is8-w ziQ(?hI;A0caHOfE|5r^V0^x#_6~u%;)%$QuU^IHUIZH~}BlM@XU{r$3`0mz7{`3N< zPCl1Qp!cXBc&~B^d>4sb<$3O)Vs@NkR{?VA4pBX;y98}{z@X-R8s1YEO ze|abWh&#K%*JqgHzqD1O4pZB06(TN_ zJCLcV3YaAzEZAq&L(-N0i1EDt(C51xR=;MTH@_jq^R2C|p|Hvr&e;JfKP=<9&E_q1 zD!~2pAtR58q9iD5aN$0AXQMSRj&kebKf9yI2(R}nBhDSHwxZq8_RY&kg0U(5a{w_Q z`XB8^Y5=yYdRjL@J+_1AF3=N>5VK0b!zcay!-r|nH|-?0U*rWj8Td%y=gv(byj-c{ ze~=+HFnPU8DH;{uKjzn6ok<(3BRW>{R95#w%vorhq1i71*ZVZFa-@qR6z~6W_JSLE zHP%_rBez3`Q&#Z9v50c=4cJfwd%Y$p?@<);NZ z^u@!Qw7FChs5<3f;*gvsFuf{_Y3B@yG2BN~#B)9iIV#sa`cGG9;UvoH-p{ZE#-f;Y z7*3IGqDZ|*zWqUx+h2cn)=kD(3ms>Yx$lX($M9WW&!0clFTfGE#AGWQj{kk`r=dc-SFFl%XfH(SV$NfT zBUT;38IZQvGMfBHAxQ`L%#V>tv(eoq=;7fNJYF?QoaWI?H z8#5N;-XG$ztE`dN>%HTPHEmrg5Ac=KjQriet5PXQ`c6I()Af7EY6N78?fb+N98)bp zdSBjtwFpq&)JeBk_6xeYmN42EFsa@<0&B`5o)93aOr@=l&+SLMNL<}co$ z$Wd~XBiPKm#4?)lXN&OrH@m=9cn72>g$=-nz`jkp0VOs=+`lTZH4Vj>G^ppo4!6!~ zRGxV2>9$yUjVH2~S4_^}1((>ER&W9@229=k>uOH_3MgA{xa zrfFp6>Xb$dyG^+4PXo4}&`wKB+nk0;5^egZa*22zAy?lap~XKqVe(XionU;1$ZjSs z=%3|Q?V{9@n({J0yCsnYD%OYl5ek6uM*81WT_9QiVdYr>C>S8CG)UFu4O<@my{3VK zi|ekf-1J;9%ag7lTC?LLMF&t~%0bI_%CahctCtovrDF_X4t^eA8%qhoaosB1rlTrXhpTd8IWdxS6dcg7X9a=oH(Sugs5oFiM#s?gKljzxko@>?z8dQsd zVP%6busMbi&NP^i9>NY)yakSm+n$|uA7&eSEt|qB0!mW9-qJ<6T)gIt53=@C-RNk< zy)965*t8?}7~epqyur*dP2A6tU#bjYZsW6C1T%1@pi z7oB~7HXXEZmevj`tt8=PvpWmc+U(*bxqah5RnJM<*l?d%Kv*YW|LrN9)Cc2S)~O2M zQVFl~73W;&`$=)gF{;{SY;QmG{*lJHbBEK1^UNCPAD0mwx^g4USx7*Dym$MQ!@DDA zeg0||H2^NTN`L2IM(?rvAk;pp#Yt&Aq9aqBfI|npx>sR1$~$caXudWD&YGAzJv2N&WwvtiKuG9W|_ElOOWKHSW!3*SON#}M()@MC^qu;g?0 zbq7(}g=m)@l>FxNKw?l7MmF7BUA~;Wb^ES!o}T5&sUo?oLJwHAzwMUFj~#ld^L(gurZEYVRNiws6s@u5!HVn^qW!Kp2aJNGFcsn2kG0`U zZ>4)5N^Vl#M1EV6W;gt`J_vEB;3kM1?$EETakZA^JHIrIGXn9W4``JX441_uW8oTi3UWI^ED<_B^Od1 z*fp~ASrz-br56LQ!o>|KWIz+V{P51y{k}G(NhmLj8t_YyK`wvhe6QE~rDo2g#+hJj zX&C1uO4#+=r@L0Vxz)h_k$Kzb_Vk2vAsc5z`vVYsvDEv_Y@T@>AqXA7L~X@)2k1C> z&`<0ut~{OeZ_F4pUR>mtD>gKEAu84tYJEBUQI!8CE$xjP5>Pjuro$jE+{v(t zr!G{)O_sGsKb&dr)GNv%I`g>C*K3u>n;e?NWCQneV__#zl!BsSt2(k`0@-U2H^cC; z+mER{!E0@C77SWNY(7^4)9#25B7~AghyG9`xuszqQxm&W?qhfi#daGuurqgJTZl`6!((1Q_IkCqH9AvFsk5%K@Qud9; zT^V&=FTE==b(Gz(1&o}oX~AepEFbKAa#|}&Xt^(38XpmJoBlexO991)wK#K_Se~@n@fWKGban}5G-oiueZO1G>OiXT z1dz^6-|^pB-l1*MC6KH5y!uj2hZvX$N=GWl=ztc7h(o!wRGYtfHiA4eN`Ydp&24>OJPUk>|#uUh+rXSLEbaegO zL3Rzhs&hK4bM=?LOn8ll(+C>jqwK*qo7zL{+S#+-O3{2NX>##y(QLQAjktj?N&@h2 zUWW>Ar9d-Kvs`7lfw~~Eau}0mR7q_q=F}X!b%pt`{i6S+FrnchoGM&oRhCszti{q@ zZ<*uc=Hv^fpH&xHUW$^TZU`1+mfEt7{08#yZYu0JgT_r~98fIIP{Cu5xUM$OSCjZCb?Vi3{QHXU;? z=JTo5br@`X;d~`0r3q4oMqR}gc0?IAbZnGXae2HTame~a#%T@lR$f`2;0D)`bcxrkN^~6TD!SmEJ7#<&vY#~v;W;6}VlA_Ns9-uDnDK;CJ|hGt9j zCLO_Vh7B#M6xVheIz?M>kK+Q|p|;fi2mRHi;r_bY2S$Xz`^0(D7yndeE7 ziXW3AVxZPGOs)i3pn*nlbDu-va-6`)llv{Ntqx4DEl&oPle(>ZpTf7av}B7n-8!M# z#iqPIlf{20sH)(e#F$&&UIuZ!{9aG@$E9h+t4DLn%Ji#)4zdFs05u|z={7eztDQa1 zz;{V5E4wC$#1edMbAgy|z1ji9kUr=7%=q$?I&`XWr!e96p=4#|b3Lu@=O??dOdR|p zqqa`U&s^GgM&jlst!Zw~Mm4*@LIc|RBq+a0lK z%_atdl#iq!`vB&fz_S_?*)WvR(1T_VkIu_jvqzprOmt z5b-5BE|?d)hzVDuqN)19>Hzt)EJ18XwBZq{iYHrto#!6MUKAjP|OSf(;!96wjy zS^ct;MikOn+v3hvnslW*yL72n-pAftzP@Rt$#}KUv?`{azrd+!4^lhTB3x;=u-vo1dQ~ia13UIS5%RT zP%~t5!CI@1jt*%rV$-j0L+>PJPw?mtM_t)>Ui z)RJI{rao+MdaI}xl#;9Wk_}$chY5dqxmDX_TTZfpA1X%Jc-OBBl|y_x0J}8e)*W=l ztHcY4(&r@$x`tG&QzYz~UK-jCjyF9kpICZ$a&76^6t=F)_=CqSaqHnQv$%&J!v7PU zw(g!~hwVbE(O)rY8K@2%?nqUA3H$kLjO(?kp2q%<5TbpY`$W-IFruh~O?Xryt*0FZ z-a;ezRit-O<>EVamDA`(iS_Hz!cNUm4fbeMH%7zd`BTRs>sU=sL8&b`2CZ=iwW9N0 z`CQYFM4k-?XL9VKHOne1%blp9m9y-xAuWE@seFGT+skm)*Dd0qlX*29``g2|*Ev4# z_Ye|~h+Ut3ETLmOW4PLCNW+lH$~R)`TipDJm@kR&GC%_U{`y=(tGm{u4wZ9ROZSDj z$vU9nQyaW(t2j72$+y}+`_Ql$)e$rW4!sxD=0VP5e*4}ku=Q(uR~mY$_c70;hXJY- zzoovr3bg0Z=8AoJ+n4vYb+|naqnb^FwP_UBtN<9@`0;obPO7lo)Fkn|yZCzAh|)%ABhMGV9MM z2Ye^Y#LWw69HjiZv5(I)wQ6PK1IsBrVJTBv#>c_Nc+Sqt&x5Y_9qjcRF(!85*!m_^ z7*<>wkG^Oz+S=tWz$HZZN>MbkLdQDPzT#$GbEMH5JWLH{SO;q*;x2 zV})xcm7fUDzbeY8_9YFgeyZ+=etbLPe1>9-nsj`k;ABqQ3dto6Lf$Vc6OCG=Ed5x2 zY`TYQfz`vghuKzF+Fa%|(T+zcuIO;Z#Cc3LEMVmU1%Ugt?;Ts znx=Q#F~f<=g+0m z(jX~QNQCOqb9&8)o{g#XGVf^G<6s(EmHlQ`p1fPG-zh^bE2Y`uQ*qC5!}pPvB4e-* zJf?s>F5M<=w|Wv_E56(=;t9jJ90;T6%MH(<)CsESBbkt1v64+5L)8K1)NGKksto0(v?5 z$;Cov>3)Fd33B4QTT6@Hlv@&@utiUwT~>a~Vh3!0h4u0CZ3)sVs&zY(n-zRKjrIOEA#Z5vs4%N& zIT2_h38<+^fC45^nj2Q?>+)eK;*~$B!iXTej=qK$R`Ya3tfXv_owRe{D?1@AJAU9t zE;!K1t9XwXr{09f`S8*cecV2JGhp%wZP-ESHpR=_zwZH6Ulf%Fu?P4n$ip=rTw5z+ zC5sl0HN3MtK2q$kW|uLt+{Gv95z1ORerB1J&snuUvoQ6mbbutq-fy@sfdqQ7bFS%M zeX)j{eLbUaA`0W}8<`Tbu6u2-V0X*5K)$WIthq2}1Z5<9;EFbYNQ?!tZ0GDmk5wIH z_~lqFxovosdWO(gnwng3>-O5P?Y4!Djq}u$9IQhdbkU5^*4d>V5yggH72y^90tRvT zKVo^jCgZfO8*(%UVqvrwbT<6H&XC~2Mhbm@f&fJ%)yqME#5CejxTbk@CafKlBZj(~ z5}vU!cp8$q4EHox9rSDRrYXWB)Hcl&J#-1pMWzQk4r=7AnG092G z{$n4WH-$bmsT(EQKXFv?5RC=mSgoecpqvsB9nQd=cc%(JoYSjkubjE%@j!2<7bUhm zsnP^fz`*-|DB*r_E2J=VU~dz%;_$c;YhYl2Eo_IezI1@+s$sN|OdK5$TUT#N|6Hr- zH%avyQM>OZ`OL57Jbg8)Matt?rU?Uip2>-TFY7;VD&sfPEE7HAYh^^muC;dU_dYEs zUx)F%7caz^1ebA1&656(?)0V!!?RMPkKQ~5lXs9C#1|WL!;}C?p>Tnoz+VE0RCgD? zs;R4o;gNE^KS4~(cQnGn$g5nKGP1-BoQH^;=7>65@|WszG;Df~dH8%dVX2qJ+HftR z7W+$Nk~oZZyfJtLbiyy3H9AYF6ypWj*30RpScQd!QE6%sa!#`C;3ALWr&NL21>cjplSxSp_OhiA4)q6kH^j(;ijB%3c>~k0#umURG=^8RC>s5GT0P}bLPHS z8?w|TY9#Q)Ax00zFezB84OTRF+}XwB`sCbW6}h_YrT8!8F2l0A%JE~Pybs=wy$R7d z>c_&}nFvH#Q30-o=e5QNrpe{p4mMT;Y1DDkqYw6SoEN_Xj2ZKK{X!ajmdrb~_8Aui zXXb#xIowjt^u;h^Z1lG(a;vsam}Pe_ zb$=)oa#f66DfzV#@1+?P5^of_ayfChz)BGZ+AyXMVR;JueJ-%$Y_}*^i{ZX#z4tqQJNy|P z?tbG2!Oy4KN$mfzL#^Xp*62vtlsYpyzoZ#`u~M*P8)bg6`^pmJ))`^F6zMJd^wg--W6W1kAR=ZBR-eB$#P6+iQzcOW_h)wUYf5|x;y z5IPChNLU5m&E1T0!3JG-g8lCKPtG#XU*^XZ&#*_fHc}Xr8A|z+{12_bSUeKxm zeE+@nH30hu*BC!W-Z^MH+hZ6qQ8ac;r=$PKmwRGDe-uW549eY*dSc$KV#HHLY zAjO-y=&+R{5`$mV-bHpHaWfw8p*;Zq0CV`3KQ>}i#>+!v$S|X5Icj|gVr)@Py0yjy z;ly2T#_Ad+-u2WGO>RExOFv6F1ovNmlm89v=uowIX!_O9*HX6T=9X$b7t>y88Pk!q zP_P;j8TP6mxs>G$%9^mjiyA}U+DUN+-Kj*=%Y!v96d7y-4@;h_<0#w~OQH71PJ6l+ z)AIMA7g;>72J-PY@chqq z@+s@IFMNF}N51z;o|i*?VsK#o{sX%mz`QU3Wxo7Wt2lc?(Dti>cYnO$`nu)HHLBjf z8d0w1i)BB5oZ$?L=;+kRComs9^u{?fk7RVUD%dbl89Q9UAuLtcW^??zR1`}EPU-!Z zZ(@oQ-2(m}UW*?VealESq2^Dj+J%B{B+Za~NDyxTfpKZnK7??b-*HrIs>$G@P|1?Gs?yZ=w z&PTz%Th%!7MnD4++Pcpp2njB%WK*iW^;Hc)E-c5%_E7nNUmhst&j&i!T@$(LocpXb z^M!YxHnW1z^|Xcm4G_bHZ&LjXWC1R9d7I7mbk>%olt&~3q)bNXN*kUK2Txk5l9(G~A!Qd>W9R`=e3->U zU`aje8?rDYSm$ygSc3q?E!8aXSK27?jQW8px_AraRz|0Lnw(Z-qiMj^r7tm}=3`Qi zv!uz*Z7a6Y&2{mV+SSs4FcqQe@thWi_5w83bHCrPj4p2_sBVNJ70>m+=~uh-z0be9 zD8GA?QW6@nd#4E8kd$@)izljBEom|tsk9YPw-H>}Y%kla*1G7|^d{8epdX9+K+6qZ z>}kwLLbyQzh`%QrLEr+Khr1K+@ZP6lCd`y0K1FzO8xW&Vk#1jEYyGe|qiNdeoSTaf zV@YLoq$3%pRm?GuU(U`|}0RrHthwp0zOm_HoUbQqDerrAU(k@9!byIhN4DAVKJ(H~XakKPJcKrjMhEUD$uhkIb{EK+jZ;qp!+>fE zwS6 z>#g10tudKx&0pp*7oB#q`0N>)!3uwO@4(2wf!TXNtQ!qRrY)JB%n`c|X$>|kCy44F_{P!=270sh)kH@?QSEmq-SByAiXRQalbRTZ1OD`}*9bnnnnsm4H-U?Nk@DE1TF z{IO~AZg)G+QS7^s%2>UEO&A1(#c&kFpOlGC?I(=7lr{2IXHYC>va2qli=CY#+^8{H z)Y2a3aItNP5hm(57>G_Yu7#)lnJztqpY~cPve~`C(L+{Va<7qv#tCYbtwug^=L3!3ap;)(rrI-v#lW# zu)SFBv;F*B65+16LTee7m6vjE$fuU|DE(bnEIUxqgChM{ec$ua;K`ExbQ%8N~>4cdSP7j*fa0b zp{*eIEd*PT^}%^hh9HqeUwxNBr$T6$c8fen$KsOv^x)4%vm&@(;lL;>UQ!!fl84H2wfPdbD#20 zT!&UQ%4WHU}3oQ9ifo~r~RVU zL&>j316-#FGOSg}`4gURJh!j3O>?VE@YL$)9&AleruGrOXtDRRFqR&ckN#Z<keS)ZX%2%axSMI$GUV4jT29uma3h*Z zR?=;j=4l;Ty8#<8WIMcSwNIf%VC1Q%H?$Ll7Zc|1$$|;aX@##dJyckU#}#0*tK;a= zPN4v9B~w??%b6*Gg|(!jZjREOqdBP}6C!@N0@2TFoQZH4J-O+b_ zk0PD~%m!JFr+ocv&Fn#cCZ+xSuNPCN4Ae3CY?hhN{fvFp?iu4LAKJy{-&Q}g&mJpV zO0_GfSqlHq{QtL~l>3Ll(##(02OFX8eNpIs_37&_=QJm|TV7apF_(XTdlI!fEch+j zi1o}$affmkSRe^&5O>wkoEDyWj)6WKx93Z1+;YS*Va@!KiSYkxp%T9&xkhdX6Ibr(2HjcTJ>;IxT3eX-P9 zRd{Xm-PF6r+3JX?S?3KMB|R5$VFOz);nPWbXcFkU1Td=R2@AT2?q}e1vCZ9{q$^zT z78gDuRXfK>(^A>ipdAN$A@bVRD&$)_UG0SE;&YY@8?ZPd8VW4ZZMvCs{o}9?!k``b z)@|(Gwb7xW>+5exs;N{&79JIsl=$#EG(w>&PuoEy`tlp54gZvnZnq`XC;y*EzicJuX$W6 zA1qa?8f@9gEvXu8-N`Mjp?IS5%Q10;W0L+ZBD44BF&T8Ub*x!<=s3Vz=a5qDxkj0G z^Q)nIV|D|?BB`7TU~_aWWS8(_&0oI%S}F@Y^X$Gz_%}j@5)T)bQpF@OgGASFq@+TQ z=EmO@*vqBGcGp{*z%H4zmp!*Ilrg(9Heat)88Mt>d9;&|T#!QvmY3J+-#02D(WICO zPDk(VhO6MlZPx+|1;k|TDW&+3hwft)FCfiD{x0e~bB&bWM*qwBbRrdDRGf^ng>>7@ zwSF4h#k6f4&=6TF3WIQz9_~M6us9M&))&VD>Bwj$k4E%n5NVKOE)~gzDDtJ7BY)mX>q8ThAthd7Qf}HDE;iw{2CJcwwuY0V zR#2wyzQT5$Qo8MKyruPHMqQTj#%%%=3wQR*BUtmi(a^~?7|B%+yh9Frk?#^8?lar7 z8&q^!8oscT@Rr9;0M)~$Dt#xxWheyrfhx=McoEvdLPExH#3E0!mr-@wq#uydX5~Vf zN?brG-sx#=>YQ90>g1rtRB)IdwV=(%qdJBMMhm~ujZsUtg&%nG$SmOafiQbcta;=8 zuR8n=%s+gJvPuC7UVpf<;x!hb zJsPQJ69Q%Im$#lFyVY}dbGV7OM_>JgwJNd*V3th>eb|f{;KD>+u7GOzwVQ5@HRo|X z0}iis1*`nZnL-8C9~F(b;E^eZHD8PK3JzTFSyl3yxU>GpcL(E9t&z{@;vo4cCffYY zGsjMY0@9bR>k3#wFZdjVB|(K*!W+rHd;IkNk|3xuU%$Mtv%i1(xR#tRUN$~+$FrMb zE1jBCIV+N~oly7NY?_*#50d?*I8EgWg_s6zx?8J<6+;j6xuAcgqLvEj3?a!}_O4x6RGjC=ga{Oy!JO8E)#{cm_mmnQzc7#TWbW z3@Yrj&jV_nYcTsYUWo|&;7{V8pKQ3^sLjmFrm+pZDh8Ohl+2sa>M`dKX{?ude1pMZ zNj!X5vgR$*wt0b6s!O9|FF2kia1jxLrBm1<`G_$>HvuAE-yp$bD&;=5U` z4QHH1ohx&_c6wR}yvd$4A^=~KjV7T`7w-I*p*aKi=whxEH(fZrQXCZ=SaY5vH_J1q z>11uMVWjCv=#jPse#6%|qcO1vNTRdSx_#jjl{|6RH)bcT4VO`e7D3t5VCOfj@|s{9 z!}ATsG;Bs3!x1+M!4>$J=GhX(qynuxU6W=N7hMW&Q^lL(*+k?^jYM2u^GM>J<6DdK?Bq948H-dTupI6-a zfLW|j*W9Tqcg5LTPxfPRx764X9gV z**W=z*JrOYy)N)vVQ~e68I$f@oru_@-P(kxL&EDPD1npSHYkp?{SB4wXzgwf?i}0r z<6NA)W=ZGxYp_le$kzEXb=`IBY}+8)%4gW%CV(s-$ECPm@$T1d^sU@&GoYQH^D2DW zYt5me)5blfB$2wc9eBHZEOIeR#Up#7W98#Km}G^nt~&PuS;7R&x)Lj}W|6Eq)D`c> zl!a%*>p2olT&r!XRw8)^pd}wW_S`8b?N!7kIGwW1IUW0W`To$jTg^1G;&R8V6vecg zUmGfD|8OUAsj{_V*m8%tBG?VJ~BVM$fC>G zO=^tHQda3XB-DP{K7)GK+o=b+9><%FjT|c1PPA+T7&IDietPA)WDMrh*tceEvM}$W zAk4=^&eWCh*(3&y zl1}a9ILB8I!@3kY=h1zk;nCjZ-lIl^1 z2-5x4OjUtyb(ePQ6FKDe8hl>yd@)mOby9XEpkp|YXjzBJS(UOfH?i6oPwbb>vhWf_ zhErxTa!5V%HQcZkvzaN<%0iQ=Z@qLZy(ZP>bH@3rH+^!eX6;)8KG|;_X)hZec59ph z$g>|6tRO1Togbn^^TaDv@j?hE%&pngu3kJl?Xf3t0QNpF$6$91GRd5yS@X=#ILV&y{lJugdg;<>T+qp;A)o;l10hS)g9}WfvK}w=gPQ z@#^l-HtOmh7N=bN$7dD9U_D<*0IZVMJg2k>s}P#b53=qk*>*wcwp72>4+zH~#PI~0 z#{%0`*D?MBjKXT&(%e7gV?{+p7xQdA6p(Q$3-dqLfz{EFDi>x9^RY7p?t~+;^h8^N z(#p%l3cVYj7Qnt$4)hEnu0SVC&B|iRlxsFYDtRjH();Bvi+V_vO~#J#mcp$W3tiXo z#%OjsCf*KdcImczb57mS_sZh_=?ECSwA9QhE#+79c}hr^GBwkIu_V$Y$*38mpW#;C zdS319O2|2L?cUzX9r!&}0V1ebaCn5C2?7fJ^sv)Y$yp)R+0mFo#(ENye25u1LQDo_--)+bfGn2G2W?;M;Q z_5-Ki5Fn4NzrpwXBfQ@aBC9TBdY1*k&2|R+sg)6}AuPJR+_4z6`y*&kWp`O+vAorz zg=oBHXS!w-!;&A|TV&P*a;w=a1!^Pc!*AYGB1SpwSlL$U^TEk-k7s zYxFRowwQLEqn|=n488p&%r}REqT11SgUh_N#Vd44IUh6k_5hU!k|!fb6THsuOyFT@ z8#0B2Z)n72RtRj*@}ZOLYJH~)oaEkT@*G{+`>Ed!HXzBN4-I+|Ne-Wv`(dZOc7q*7 zIcw&RGVO|pE>hY=zDX&aV^Ls)4NFje|FSRkfGQy?7y%QcbAX8!{?&&%(rt%tc9Tv4 z;Ezh$o-tF~aj{Y`ZTI>-H!%^67+SW7ZO***%284y?wUl%yOE69UL@0V>0k@H_$|&B zc>n(T4$ZuUBZ0!*u90sBaZ%$z)_%4ekC~(GyKILkju&WRVzGy|LsE`W#rw2QMWS*7 z)KYnmj?v;58-%625!$M0s6$_3_X6dXwDgMxOLUq`JC`LYhA z%BE^|Gk5>T{*i_KBU<8$>Q2(ODIYsG&HD`ar3TkK?$mbibA62CSAokz0P?bU^+e;h zv~o8b2vG_KR=BR7?(n4kTym1Qq7TIa1w)cCbdBUz{jBPBLNf^glcC( z4q`WD>-8MX#tTq0**;+9`^>%_EHF{yT&|18KnTjeB)`Ru9H!Prko=lWRRTOlgFCRXWI^-1wnk<97QD zs>4WO^arlooD`9T%}jhZ)jMYR!$R+i7zH@Q1`K``^`;Hx1R9}7BczLox!0?7!P$`m zDscANXT_{xPuy3+?TPhaU=2{t!A+8$$FZ(&BJ+5!=`l&r&p6Y?p3Sc8 zRjJfz7SD;@sGJUcIu@uvwhwBkC=e8Qr{6~+_(ahSAXms4465^|$`P!=Xj(o|#97XP6h5|y<4$m-8_TVf%#DRNmdrz_~oH-qUM znkUIU;h&!ms<7a_p_5H3rIlTpVVGr*<`zJd5g>Z~lUddFHfLDic#z7k$(;_8+%0~j ziJfCjA^Zk}Q5sOJg@aO1U0HnFQE)o9$o?#^*n9yjDWW#ZG*fuOV{+_uf@mXCw9+u_lq++lW}_2Cgc6`Qb~~qzVne= z-pnjxf`pKWLD?~V0jk6Nk&GMQ-|p-AW~WYk2MxoR=%Ez+WCq5ZX!RY}GWv=H@I&|_ zs;341_-%c)v?hRYqvittF?8|I;^LLo1n#ZqdASa=(~IGB2>>!;%?(8m&Y(pqfZS&l zO!U9*bLdSJ$!DkSg!a|Cj>}1nlQE=LJ6|^xH(WjhA&$lPUxv7BDBO&1k=Sc{Oe@Ss6?GBWTL z^2+^l{O9vwu(Lq}%rrx8Z;09-^|aF1xw7k+k^&AoVuHg#y|c5VzPz=-PJH_F{hE?Q z{=)F7h4+hI%Nfs!pdKk!$yOpR5HONn!Ou}(vH5)W*KU5N3e$a>mmCNES1=sB@23EKe3weM@VWB!mOoZGHWTbzqsmY3D#;|*=;1@O%mEOI= zrz9-l(<)M@u2l@QOEK;D|N0^~*VEGj)Ki*G`u+^#NO7CwNXf~#8&%uyzS47+W`6G6 zqYDAt`B}Del?@<7Rqku6nqbK3p$s>;xf-B&KDyY%fu9@1-1(waU<5CQ&pNgPp;lcw zWI$|Px#Ku#urA(moL@1XId8bCuOTA_LC);Ro8$%Yn%ljI5 zyb0(IPQK0AZPqDh7qv!k8hll=O!w3G#jX!b=W?#!;r9sOwDxEiXXa+!yK1f`;o4_U zg4G7TjL65xm7a_>9N>JoQOl$t>=`fk=@;?IGdDQ0UEJKskjrgAp5LgzQAMk5NWnfo z`BNkHYHP7cGaUr@Z?iyNt~e` zTL{Rx7vw=EP;HWDzat+Kp2wwC(pXk6yH1ih1~9VFIc3PEauy?^X=PJEKhgKh>S0xr z6Pf`LM(Mn|`vQ+bNCO1Js+|@>OYJrDn&(0J7XOrtw~>j%@)of4?LSE#JL}~3F-Hbg ziuYkoN*o);X6f5}>ST@AykF;^bhM1ztGLkW^Djf|6d`IvKFqFvauQlkK^ckFJJ)Wm zv)2Aj9%sLYvVMEe2dR5^OMbm5RPX+WaEs}}FT$;y37$V?TWQJ3Z-^>?ihmNX-Hd@5 zwpxf<_4(A>@SIQ2D!Hc?Vp{K|Q|7!?T=rsEgmSWPqXM%IFCU+evh8;t_TITRa^vr{ zE6Q;%iNx4gRz=$zG7=`HrV0<207X`3(WGyri`=lsaPWwx%c`}tbsoQd_O|9PVxX+R z2}|q63jbhBN7AWp5xPVg%OD!C-4)FuOdP}e7UJ3C5#m&Do=`sysFGz zUFug1jwGt#@RqfmW#W(rPHhs)&3_r=af~1-CPfS$%bE(mjqt3tlHRU>+%i69k|W0@ zaOdvm-i7$v;X?T9u@uv+vKT^CtI#xSo7Yua3qMb`)JEQ#A0If38^f6vN5fPafp9$~ z_!yjjn+9Emts}Aq?4j?_1VE3xtpA26HWR}baXNw&5kw1aeSgQo+g!B+%$L&l3JJa~ zr4z!KsX6BjR7&Fqv;)OfrzqEth>VRNd^~ckSq`Z@tilHhQ&Qf!oy{KVhlz3XaSD>^ z6?*ydT98?Le4%5Bh;tfLJ+8ky%(0O??X!b^(ENx36@i=y&$epwT$}{xgG7~!zvsks zh?Dgv-sJXSch@9;`E_K91rI;JUZudeXp;O02lN&Q3rl-V$rT#&JH_TVz^z%m`uk&! zQPESiDhO%dt%T8|A{2U1?(W9|n_ys@id11<}^0&iN z@E(9vfn5!R_aM5aFSz>I$OL>D{FJ;};sv{{%xabr<`373K<+rYwbU8~ssP;;x%p_b zP})1=2`;(?_Syst367z3hmTxgxthG4)&v23-A<8f)UpOh2$~fopD+pW)=Z7G4`B$^n=M+?1#!sl<%{&tQ z4B@=$BXu+9sqR8jZqxIZp7bxd?2113P%m&4U3KXFF-PwFlIJh>C<)U|P%+0P$h8K{ zxmxw@xZVPN1lv}t*iQNMFojz*lHqO?81uPyN8c9Ml;y--7z;}b`~94wCmvjb*}igI zHp1lBU(U{Y&b89sDjPYH1}kr_-}uYQhaxB7LK8I#P5>gbs@Uw|uU-hKXPU-GZB#4> zsEq^UVXc4)VX|=QWb~?y4brN*))CtXWV54K#y-1oak({YRDu=%-O=-;u5E18r@jiZT_-$xHBK$u=}gEn58Sm`|D7fT$we)I4#4CqxZKmSr*m%6kd9N%FT z>wF(C$u%&HLk*X|{uc+os;a89ql0KO;(J$PcvlJjl@RHt&&uWsS!?eE$^K2QB5VnF zBna>8jeF0#N*AZNL@)u@DxFTk}!P0F3{)SO6OAWuo4}lbw)U_cGn}iZZ zvJ~onnWgv=qqYkY@neaoiR}Md8*6OsV1DR(> zN&u321U!YWhRDN!-mfmQegST-TC)fZ1pt^%vS`u1Z}H4kIZ?t=c;Z|A?L6DRGZ=>B zsxWW=J$PHEydobV-5|dr*-?U|^=RD-5W1Dg3bVbK*N0jst5P9t_&bFB1i2 zp{hB<)Wi(rzKa&flUW+W*B{990UKJhL^=>~F^^bIs_s!-GnId?ItVH$ELoR`{6REu zzA^h?47=TALo9LtwVeFvyQ6vOIm0!NaN$m1*3{Qe=l7!Ed1;7()>c+5UM{*PL?U!) z8+A7hpT5qn$ffn^yhR;8S=g@~D@UZH-^5=q5dof7J^P}bM>tVWWNvZKb@v9OQ4EM& z?)C1a~@ zth{EkpAOi&Jnu{5vPlw^w8TY{TRSP|{0bw*cI;;Fg}#`Ho@*^0sjQAc5zDXki?o> z7(B@>pNdO#Eyd|3KCgW{sA8gwK)cs*|Gv0dWEx;5+@^aNUZQk@efz?eQ5wat_(j)W zpxq2K9Zl`eG0#-Ua^s0YmtZVmS}!F;nF) zn*}uyI)Spo_lZ0cC}Rn&Q{&nIC;ny3fXYTw4X2)(U7?n1VYlbajCe*=_wg1_z z)&mIKyJ%YM_&d-!@qp(ySg@7+Ahow{>7B;KtFS+1DUenFzZo4u z67#rc;hLPNVpKs%s0$mcssFkgXi@W8y zUUPQi8vB{qTO>2ny!`J!GYme9po@((kI~ZS@B2Tu4bmU&m&~p8MJciFOShqm(M0Z6Y z%9?kAgSCTeA(2F8g6krnyZCSRf$q-NETCSH3wX1RHa_5zp^`YU7khp?%!M#R1?`V2 zK0r1>15iE^v~m2@{6B(KtXXK-f4r!FCaJ8GcpN_P#DDz2RQ_0Uz=c+}HE#t6k<8&or#Ah)~7dGMD7)`x9iC)bu=k1#_N z&!SvNi?#R$mRb}0pN-9Pgw+o2)T&3CA@(>B-@zW|{{(MD2QpVgR*%iB_VjGl9Cl2~ zkI<4UcHQ{GRmK{~2WAQdV*Uqd^H~|!DBr;?+**%RK#O-hqEW}K6wifk2HnJ!{@Zs? zsP4;Gj~<26e=$HO4Y5Y@bmLMZc`YLo-j>&8D1noE;qCHeh;FB!PM4J@J%$DM1A#z` zGU@JUo|^9Z3x^!unaeu5t??tH{=tCy=3(&re+bmD;BGh}i#KK&ztNJ=2 ztAR+n!psOMcTH6tD{6dEYmDj>4#bta{G-GK_dn5{-=#5Fodna!ucnbkCLe;Z1T$z5jD|2`|$^^qpuh8dV^s`mH9Kvv9?+!eA z4(YKNiYAq4#5mvEO>1-$+5Pb=fLlbcIHkLcav-v)EwB^)wp*pQ*LMwAy!jmkNpdav z9&p|J)~j62i0tZoD3bqO-3gI$ZRQ>YeGR8qiDS(=b~9XbGIG(u*l69AYZ~gMR$t?4 zxedOy7hV2$1!EGG?ws+C=qufCO}j_Z-aL)|Kh;voDu2n>v~sU`&`=>054~wCGNM26 ztH?oME z$C$ENn~Dy-F6R{>+TS+Mk&T=?U0$9T0E;XKMCGZm!fzWLCaSgt4pyi3?hk4@fWnnv z-S{6)A{e4<>ho58%O*-K&hJnc`5I5z5Nu6A-m`=mHH5M+GW1xs8cM4e(!84(Pf6?^QxQrtK!TKKz9Rdv`joRC@Q`N{(UP z=aQ?ZaI8BZ6WvgnwB22`H382bRP6Opec<32HG6=`PB4^lE*0h(lrKc}T%C9KDlFw#e|W2}qkgme)scy(eJ#STdcB^d;2zNR{?v#LnxZ*4#);wv?g{iN&Xm7bt1oU zC{>EC2PTYN3$+3byqr1-khP4Ue!*us)Tmd^GGBJZ1+Dx%k~f{v0h&1D@=#s50E61s z?;GuRYJWR`kHqUODfxAhScSyeF5^XF-lYqbWHQD3ojZd1KtTG=xui^@aOpG@Stq$9 zFW)Qvd|ae#%kopmnD(P{>z_Ezt~BXyFAhB7a2R%<7uBh9d->A;b0)g?cfyy~#evG) z@L~E1BUz9-TOI+*D*0RiK8#LMF3t|Pzq47{7#?wYyK)!lhj4(CQAF&y>>a3w6*-r5 z%1tUih786j`5Osu43MdGDjc((p69$$(rX%*nVwj|xwfr(v|y07u7af^eTIR&RwW`p z&xR!}{pMWu`ws9;A=oY&i`3KgBBZ4HaKBN%m&L)vwmrG^)pSomRDyF~10ji0Y-QDH zr9_e5R13XZb70zN4EB~Xv)6KitqhDXcR&HXSbhRpnua@zQA-Ev_PhVT9lUtZ4}(P! z@c3UohJTammr(YY3TjZkO|NOE5IG=&gyGt$0#Od6XP4nPQJNoqGi+@!{45(Jk3;{| z#`y^=!7f0Wj$PFBWAr8Hmle0uZ^?Q!pwcdE(?x;!p)S!L;rKke$yT^iUMK2tl(+OM zbitlaA;MwxD93LjLEjKE59Ditx#4Mkb@g!i@b9QoS0M?o1K+d?K$BO2( zsjpsN%+ccKHKuBNt7%{mJl=W;W7kae`A&Gn^k$#-_-mpG@|oj8dU zdgihJ@yd4qq}^)|sZ82-`cfK1cmC5Xd2c6heGtWIiLdhhw;lDN&m}61ehn{B=yJjIH)kzq zw4+E+{35&a?dzXgH*At6y+_}1c&X6Oic%niU}~l?XCL5bjD|ir3ZvIi>MpPh`!T2VOc`t?sUqa6AD=E3Gdw0{-j7f{?9hM|$5A8t&s^F85ChI@vHh+tN zL{KqDcDu;;$9JT5zU(f)GVMGlyEEQl7&++N4=y*!p^6ad5$&_$W++rJCYb5}RyiD3 z#F%}-(WR|qTG@f-MZ3j3vnA9Q$%Fr_FCs|s4T2Ok3UpV2wz&GyN&4@pRvN@8C4$RJ z(MB6lp=W-9TUfYLL7mt_jEEaSr`)q`TAk~@acR5|-_ksMtnQwnkn!)>OewnZM{tkSb4?vA$fqw5&@$@huLks;n&a#a!F`(#$T4IMtv@ksF8 zd?7c@-uIrbL$h~qD}P-=#3+pR0oX-ADJ6A#gFChOMSnRe7C)Y8K4_PGa%$=du~Q~( z6m0asH1SXi!**&Yz}~QT1^(-`4Nw_u_yxCm<7C=mzF2C5pk4iLFiRo#4?~PNEaD4o z*Lj!g{N}i_>!x&M1y^Fv^KRZ$5mT-$9zV1u7_&Sukyx6p6jio-0msu>$u{u<>yTD` zS)RW?z=)LYVJ?(9!*=kd&L5B~K7H45KlsOy!X9d#d5Hh1gIA7_F&j~d&4HkNg|Ai=smu)24WkoyL*9bO*~|D5w43D%d$Z4BD$1x3@4_)M}R`P zMONz#X)Yh+kNi|DpX`gBZmeYT>F}IQc#{Lsq4^FOCPd;0k z_`O*gD!=ZQz{I@(bi)D8ZH3_Vl?N^@$UY)Qx$lg7Skx?tgs2S%RlW-n(To*y&nyzI zgW^6tOlf9YZdTuMHPAR=Wo;b+9M1Aj4qXkGFzGISKhHyzI}X?*5N@wbbOP;26u_&U zK$Yf{V8ipkLKYA(~ zpZ=n>pE0mM-<&`HzL>+}4C)Yv5%ps*TDpOU2ZDd!$8VqFq`flUj1_(|e`+y=%#Q~3 z$s;ALMdw>k&yoipuwI>MBJd4L`9!)o?q}dKs2Ljl*vCRDv~5=mPSfuP)u+@jC!}-iR^cUF zOr^(^fLwVZXM8s0Pc6Cd^u5W4j8jx{tcXwQ3}ftP6MT>$f0^dh~P<{Co`L8nF7wSlxw$=bw7{_s?J%ADVCQAEepW zt35y*FrFEHX&%`^0zWOrd3QlPZKT?3VStvVC6NWv)rs(4!wM>l&{aYO5Hb1`8d7ys zr-Dw*=|us?W~Lph9sSG85E(@8>-{` zHYkv%M0(|SY2RHf$-9$p+>wDF{G~2&WR&-&7qY}}La)}Qa)1_x zCq&^Q*h(>}JNq$yWSJe`+rin({hk{G?4)t#bO(EOi9TDPeGRIxN z1Skc>I%U-bB|b|t)8vf%>~R|QFrow(>J8SOT}6yVLYbt1&}yVgEZs)M9B!h73pnJ` zX#Rjd9M|4vq>k*kzKIx98&uLrGJJBI`%-a04LGBz(8j&mUV_pC-zz8hNkmU_E5NR! zp51zb450B~)!VCkAB_qFtQD}kMi;fnmE7RX+Hv{exU;li0{jG2rj&LdL0RCqQTJD zwNru~ROQjVi@4*tgdES`Rr(7jf~}oHp@)5i9Oy-=WEnFo-my;<-o4LCo>g=gyOZR* z*Iq?DW$3HGQ^xIf3UyJfbemj3%Q7z%;;P)e>-V1K8QK0x$cVk>SBxT5I!v=~e)Zeo zdk{c9B@7COxux~RdrCfv$dYAH5Zs-eiu>f+2u^k&^83v-Zizp zTzxTDy5@FWCFs3LSi@?^jw_%+;klp3`ls%^5BC_A?vF$E(3%7>Ku|}R>=6Uwi)hK5 zvNFB*2%iMtUxPIlUB+~*JveqT80CNNv9dX6rowDJruC9n+C!pIl_89;@MYb`L}<}n^C;4km~St*!j?~NrbyDJh@iJK`3E%{@fefb@49W zczX=Q65M$HaOh=x7FltJ{u^@W&u}1zzNr#Z%>oy6GR4uLp^7d2&6LF!EL%I!Wz)t} zT+&uLt>RbHMJWY{I2aV&Y#iZ^#)%_5xhBVuW-R&H7+!jevYQi59x*vcF1@dG>O$^{ z{{DUi9ZZ}~pyny6zWu!7cRV~KAdCoIw6DKp57I<=b@g5CC)1=Q#l^lDPYX)%A8+uD zGZLIbGrcAffg47jCiODc+dQPnQMQ;Yu{ubq%FzUa)$eW@>tt5Hx(ISI$tpKsba? zG5VGtR;xna=lxkfwwLmo<4{WLBv?BMJwU{2@24r7@1&JtXHC+Iu+E9S*ZtdJH5w2y zIy*XsH_GNd2-{iY?taFzZ)^hot!H7|=G@o(l%9}~VE>V`uD$?xj{|0R5rq(i%fKZt zWo&U|2Snxy{KO@8*7@Ax&*gllh<0^#Yc_+F#Taq1olH8TnhT76exO$PW)(ofn3<#V z)12jhd6&R!T#NTA(}7sT@n@E`Z{>3<;+Z4u++6}5jKgl$RdYNQw>=6bT5F8z0wZ7^ zi;f4s$XYayk~lS&@)>5te0!-73H~c#avb2leX&2YK7AYSs~E{o>j{wATaI$*n(-6K z{PLXe+F7dHE((Un9oxGABwo|P3#T{8`=fkndI;Q;CCgX)th~vMHL`_IUv&O_^DIKD54P=!Ln^^(0eWCx9a#XaN~GkeGiw# zQz<{3Z5o@A8&`h+GGtq zaxmj5`!EU9x1SJNx6|Xi6k+04oQ(CPQ%#k2_ja9!rV~5wsjwjYEqBtY>BR0ptTW!A zQ!yz1b1)H3MsQ1R-=0YVcJ#J!JB0f5IzDEgSrR8KPbnH2A%#4CU%Wjel6?u;=T1%$ zZtm_meki?tWP9Lu;z)>VH#wAM{1RYHf_F>llW~D zaTXxZYnNM}J-+zWpQ8~G%JL80O;L>HVJfVtS%gU~VT-K__Fy-or^y@%)6d_8UfZ?~ zYXRS~(My$wF_@am@zUFt@M$5OzV=INBN&-4+qEcUzGYkKzMP?ELsi-8hcsZh>gw{|&(hCkkTi@v$&`Qs+orpS&04g3XE z`QECpHL?`cmyah}bp?@f*Q%35lmp>(EZQhA1Xl+p(|n=$xNEUhwGdqCn~sp^Q|0C5 zp)XnHbHgsvC1!Ifj;J{-t|>nlH+TH;z}Dje`K4qpO8?UBG%(+1)z{?Znmp+x-ImR6 z-4y_K95UxB4`I!3YiROI6X%hUQmEm~f;X4Xz`bdyVEd<|rOJ5s{Pn?tj~#z_aaR?l!;VrGaRxRs*>DK zmQ+vS>NvtGS@$%E{C_=c1dObJhu>b0y++hwAOUKyYoy|yuRg{Ls;8?Y*)+uT=^SYW zVUzz(O8Ng8^MFv1^SE}sAid?&DY+e!xkb7rFa^5A-3RUc)?yXaCdVXMQq}~ z-DzSgMXX^7`Hg3sAwKK)V-0F5G`nlQKYC~X>A98_f170UyM)5bN4Ui!PV1Xe+;~fg zE-taUPznqWeYLJqke?^&3|z!@CfSv6h9K{D%WN&Sp#Uu^`83_>$M&elIpcFsMmniI#zaoEEZ{yx_o9q~H zE<=GtC-l7P+|)jEIoW#PEOXM7xQ*4v0*_g%IcA2u$^^tyco9+H1uy2bj$GJKa04pq z*|;h8bgzw$Bxx5_wvB~9yzv`BrjTm^#>{PjpW|v@DX@q~4)s@awD(Uxq7)gK<}y!` zO1PKyNL~wkbR+-B5q^K}|D)}#qpIA$ZgD%0A_^iPpmZxOAYcH}xM9S7I_tGn@*4H`>Ilw?75jfXnHU)Io145 zI*c`kb{hqWbGp|D=Hr-pcPF*55{*8_y)to`UDnM&6oEpi*A zH^pfal^2}&L~mfb>qJII`UE@-zoKJ-=X@os0WA02W5;O5-&C#Lo$O)Y+!yK&=vE|N zp9$!WhLw(;D9U~D1|t1gjTT=N2H9+yj(R74b9XE9rWR4!N3l{poM_m=IZnLXdmm8q zjDdCAJ8h?SRJ{M4W`C*?@1Xrgl!s%yh)O=5C%qda;w~#) zP`s~df1Y*YCkXKCz};S9$(XZ>8Ay4`$623_iJcWw>a?LcbuRgc7v}k$Q1=ySi*upH z2u})Xf5+{MQrnml3c;%<)EMbpF@2$@PpQg7;c0p+OYm~T7NR`027#W_!S)EabBn=} zLrmvNdaWcwjNb8^wz01FR#zNIq+0HH$CSRLpqHMGa493;Cni1%$=rt245rFa+g1Mdx6?4Zn90x9og?gxgo9IQy^vzl5ne)z%0!-Cy+oh$r*~5_WsG~}7X_s{&JMHJ;XPU*a zVLxANH4Fr#^iDP1f7W{1{Q-FZ&2n{1+@ABp1;$;f<#ehP0GtO$uk#b^#kNnIz%@Rl zhwoiu1egBGOiw4f4ayLs(>NXeq= zK9V{$m%Zq%>ux_s%4FEG1E_q97jO5U5xb4kQ-AyHCt@lm{jzhs95Iv&d$LwGgqNdq z7$Gc~E1ga~ADvmTqHemnjyPoX;x%i(JVyL#5Gq0_GG+MYpCBI0)~gj->5kWl(Ygw) z@Vu(}^@@BqjS*@~Gy;hP$!@w!79|) zf20UMPR`jdlWW)~RAz6uDkNN-470J=JTuedG>8{>Z$nGZRQ@cuaTb3mKy0VTD1zMY z+MV;77k4T}i3(nnu6i-{3b~dDb`T6Ob{>M{%A?M_--7j$#+4^xCv4TV5*yd{w62)D+J|}GSrm18V zUyq84+BGADI=D83K}E&7j+V(?q}Lwa>)&>^T(cpV$h|>iYb;t8%jnfPXY;0g?r}hg zM`*Ef4BiV}v8KW$a5X!wxkPaxXlIO&GNxAN+ya?4jrm#RlSVt*|a=OSMzN5-Ie zri`JBF~0N)>R89*B+pa|rF6lkIr8MgZOv{g+N@YFOo1SUUM6uLX*vYN+}1j9I>%^b z8d7_WNo6{XSJzx1sEnfTY~OAI*e&p%zCS?Wz5_i7$n;#q!}hQ=uS4MZKa&S=zZig? z`9@Ln@y{loZ$Sur7Z?gvubC!-V{9PLY5u&XnYEY)Ok zwP8pq%aw(-FT6U^W#SoQOT{c$=%7(Sa8zX3t7)~Ze zfakoewg?USqAcICv!*zqIWkqsq^R~p!dXXp%tXLbe8SGrE0X$`EKNe>eYddEAHR*i z?(!?4l3u*cM~YRXYwToT(5EoYw+;=&tkY_6Zx}T>MoCUD2J2EA%ii_dxHOfbg;vcV zt5B0+^&Q|7b}ab(UH`19uo$j0nZBA4jp?&o+aVVYe(Vm*k1gjbRRj8 z0^~K)A04W_FjNz6*84L5&t$!000c<5;Jp4`{q)Rd#16K3!fWi4X=aHycWWK7isoej zrSdZHyFtN<6{aC9CX9uKiw-3aAC1&No08=&9T7XPZAQ5khl#hZ8mpiIR(FmK3z^GO zkTm=Ifu`HPy#NA{MoER;54?*<$jq1ER^!v}w`r3d_7ltb7EBu;kIH9xY*hXubvQ9C zk>h27-79?J%BxqO3iGsbc6S_IxUkF2)2gE=4TuPe4DA&zdD*?vo&qhs;^Jg83SRV! z_gFvl>Q%CFd>msfY=)ycW)2W8hH!LqY4TiH+ebAAZ43~MObTa?OmuQLxZZ%O9Y_d5M!ccx&6jNSWVx0_$rF=Wn zH`_Sz1Lkqdoz{ec=B11}=4wh;&C7=mU@rF;)haFQm*D2fxE`UbJv*4CB&VQSoyQ?fl-7JCyE0X6Ggw& zROSf&M@c2y5MUHWe=gO0Yu*3vj4j8lN=~==e_~YTUJ7EQ*Tf7MoXPq8=*!ndN;?uS1cm>0mo$qcucu^k+ll zNCbd+&>ivy>($=hZnb+CcW(-)$`lF-Rh zOpwHx?PS(^feSI z?>r=p21ow=3D@b@XYyJa37i1+`j)SP3VbxzUqbJa`iboF5=h3vrlzLQb$SqmVHPIA zk<6&oxP1+5T;zq}y33On;X-Sl+V-;qO@YA;a4T;y_LTR zcwqCTcn0anpZNh-PdH=m)+!oM0zO^~D6Siw33wT#@(^sUo7$SM*E_F)PZGvt|S%UaPdv$$~!LZxTG!v?w?RlAY)u#OO&k6G0;MX_QM02iK-f)Y!_%3z+ z;!d0e>XU0+HYR1Et$M*nT2qfat7^NHDbNy8RJGV7%=vH#DSp>iS)MGL%vHFeC4Eu% ztUt+Op&v)3@0IGt;M_kT%F{Vm@w2Cen*X|(e!PTOpHEiorsD2+zXFOk*wdA+lUThV z^SiE-T9t?Duhit7xlIBjvWZ0D+T>SO9vEz2ZrL3|u`=naTh2%W+5sV$46SDaf^Jjo zSWND>P?efHgfd~%0JbrMS6O&JL|t{Kqg=@DyMpdC6ig-QxqY49NhJcPCjPJ(N`U$> z!nVPmI^*ELLMkSRe?%Q)Z$)g%Q!?&(n+9+^WR;Yc$)$lmXF zx~vUttuKzkCJ2RQ*sptzd1W+a*zZUQ7_|5hr>&%TCvbX{V!d5gJKUZhCE(q8;F+sA zxwd9*TrvK=IB!Y7bCbENN0s@NU&uAo_Pk5m%1Wm$(%*Q17;uPljYU8YGe zXc*9|TdF#uA7#!y^stUfx7x^Q?6Ml?^4o5^<@04yVp_NRE9(j46CF44;b}!t{swR6 zabaQSx!Oc6Cj-K!UD-P{=9oMBW~mygR8Mfq1*IoM>-Qi>6BWdgFOwneg)iccsjlehai*;ox4f8xBz5O`I&D-%+eWF_I3ws6E zQ3;6V&CSeaQYQ>V!>kWU#r!Fc?>pp$bZ4|GJegSUcca7`<69IV;l zX@V1wB)kC@M>wkYfn>s}%8=`329uwZCQEwutsEjYbN*-JG~$IbZOQUQdJXv`^&B^c zdUgWqlh$+c$nTgj&YqG^Ot!3CuX#uX!F|nrbhW&Ga+E%A2j9A;5mEFI2YH&=o??jr!GT$Rv$APXib1ThKXA8!( z<|%b-#Ew|MzKzcG8$yMh^|Bm%spU8OqTu~pl>W1Ap(P^Q5!hD)>!2U?^dl#;ecZiB z;IUw;Sqekzr;$VG6Qc}%B`LHC_RpQMH!Co3@U71wV zk?Ggl=68G2B(jFf}aU2G5E-OE=pF{Rs2}4++K)}65ryaii&FApn2|yzB zoy#Xu4-^Q=W9&UI!#6}EBRJ3QB42@GKAJT*SMLBKJ3|{|d0>NiGmanf!0^XQJ7L7h zS}W56U%n)uisf~YXMnp<=OlQX`kT}9n=Yk$U#aYtnT=(~$L(wxB2h{-SdcOWTU4%( zfz=Fy%hUxY@GI~hi<(^c@sXebO7|mRT;4cAL@YMsXgaynj+MJhI)!k#5thGFE=DkE zmcw%Jx?(0E7b?o?#)%jfUDc;(+DJg?muCA^5I_8|bjDSdN5m!QMlGvq;z#VdO(c1G z)oz2N_S@xD6u~w_L6_rY1e}_?bZaI1-YI==?$CA7ag#!y(JHQ+8&l0HRL0_$%<~4_ zdTFdCyzUzy=F#{qF zuiE3LmV`&&_&DTvbPGc_p38Zg+uI*T<(D1$*aBq}!?pn$56l4F1y^CDqu(4Q?r8sf z_S}nXv)>9yVX0}6X%1%JSF@saV@sy^TdQv}IXqrzzq_5r8Et|;A`md^~ ziOX2Y){T+pIkZYNUD9=YVOPCFq0%vsKnZTR=M9&g6)2(*QG(?hljZ{-4tC!U8JkN3 z+<{*BsA~JlMT(#1Qw|im&g430*YoyN#!~KZVq)`Ym-m_lpOKJ6Y9^GY9dEs3ko*dM zf;>2OqFKT_7jYR!NxkD7MAf&ghH{lt(?cLYVz1a3rMzscW+2@)b35F&Q>|7sLGrq` zOdpwxc>K{Rw1cn=!DQRVVo){UCUHtf)WHkZ3)>SMM#oG@Ydq%AUDv+o@MNQk72LZ- zK%cy>8)e)~_H+uG?#hx+S^#K?l(f`akAGItv~u^nZr^?XRU+#Lm}6r-g$jS%v?;po zHh~cLC_hvVUwfK3f(uiUO!(~0xVilUrQn{xeWHl61Amhf)!zS{;9^KFXPxMm5Qb|< zbys~4lIgws!E-QYfgKf*YghX`8VwZ09^1*+f*Lv}DpMFu`8(!CnnMKB&omJX&`KLzrx3j8&Y0Szj8h?<0%GS!jcj63 z+_SfSWp~YdjvM^*CE}p7`pgh3ke*C<>i5DzFMlWufw|(86dzvhr_!sle_|Sl953-h z$T*_eJx#Jdj`nGaC@wR}J@-edZep6bWO|NC(SWpx{ko)?pcnDx7a)b7i2UiWwhLBN z&SD0kOHt%R?$#Nx@Ei|EfGkWfJ>z`$KODkMy32!g>z809A3^X{9R-2|fvhe;`TAIt zIAhE;E6hOX(()aVjlkZCoy-!D6Bu`+t2mNwCTDr=d$cO8A~Egq#nAgo zpJq(V<;q2G7CBi8DLWosNI8*Nigt5zqRFv4LWquH7$R0VA1bqX(cZXMBEgD*&Md#_ zHM2QY1>N#yoS7d{8Dr`~R$)Y8IQjcRA{sAtR}-}*RrBaULCV)(9EGY#J1(@HxlWMM7wPlb5eO0eRMAte}iDeVPRpJps#2Ad&J++0y{rUlq<}I&N=81=B$_oy?j7* z+s=8@O|QY_*7@k;m@E|g%3CO}hoHm9M%zAK6U9yR4K+d2JAiHkFh=>;2l|m@QYbHT z^(O@8aS^R98IVzDoW`3{ov|MTH%l6523Z`@DLnUX zhu4P8seB5B(I% ziZOZ%jp^>1MqV5Aa0l>C_&h(CnyWxeg*dIPJ}RbJ+g*lO))sdxftk^+ab2}n#8ps@ z_wbO)BL3dS#tpj+HA^n+F+Ek5h@_}`?USW&VjU!09!|{TK6D@nDF40cqh;il%Gr*z z^_89^cK$`eJawX>9oDNt>!qr#3PfV#ae4OXuBELzn9T>z9`!%HZl=eK73ZlLd3(Y* zZ)vj3_<{cJHaJ(8y>}K}`adHR6o>7Pt0k0@P9@K2cK4^>ycVLiRQzn&~`zaw?{9ySQll6 zz!%!o#zS%uXUz+GQQtIDU6!i{xgd>@$gyZtl=wcaO zrKhNBg_uI^mvd=DmeJ+wlbWR0m+d*%UNrcGq@a{}>ai>_g)$`#?Mm@ZCJAk+Pcp{hnjX+R zXTDt_4ge#qkXAzUl`kVN8zUfvo{01P!r+Pr)5hUt9`CdG5i5!U&ejHAdcJpo8(@_I)MgghGoyd@& zKsMSLv8eef@vw3pq0S!y=6Y3J7AJ3Qplepd3LYTyD=ucECM+!EbDK!3(_&rqHc`{j zjzk9lm;C`F+U_=pT7=r&DC~|9$RO3h(?U0?-o(Tt2uY(5JcyW>xIv$k-pk~cd7sYC zM%MydZFLj4crkYxyl%-@#(~Kv`wxaS%mWT_6(E~bvVr{6R4IezXeMxzwT3cT-4t9! z{rsqzACr_{GOl1!q~jk>qa-zZJakbVnGEyfO6NPo+)?BO3N)&`Ga~E(EcPC0m{75_ z=SHywf|Pm*HHY0QRL0DU^ZpsKA;(Ud`PaR+Vh|u_exqK}-ZMaxmhoI%Vm3U__o6zv zjBVxD?;mCOxlN46x9;_>?tiW)`R-1U(%1HhMCLaugGNpt#Sec0OhXC+5T1pU6$*Q= z$4xR3Y~@prC+$0D#kVv$hbI%vcxHo)gA;9giO&5*AMU7r?}(oh2_0jyKbkxB^kqrX zBQYG$hH@0AK1-H~u)BJ`HjjocPq;L_7xNvKpS_%yz0}X*%2!4%v<;ATBlBt(PCLHG zo?D;?0vYGBvxEIVCVN&&Qocw`QDEfIS<4{#rVL|L8?BZ*F{bm0$L8Z@Ly2YkBCkG@ zj+Y3+K}L+zU*Em0Y}Cp|b>V`df&+Cg6fkYJOIK!k^!44-5`%O0u_xX@yxOd~gyuJf z5+z1o&1tqEv9OSry()zd3FoH$7^hkp`{4kdOII?`y!hFf*=-7gy22BrpDWe|eo@s) z1v^GH(ygj$EXvCdK*>7;v$pDa+UV>l-kT-U?=8CJkT z-5q?8meaenJHgt_sT}kW9Su$*xF6Aq<#{_Fh^iX<5Nu@qvK&Wv;9t0hHNxXNQsp%# zwZbxEz5lk*Un#MaH%x|cfniB!&9BKAHXoPROrEcaEH&xwZ5{uV_Mi`y)%^-YDyU%M zpOg2aL;bmL^#j69BIM)oXlWOK{Y3Gu?lh|XDdA6MsPx^aV`n&k0N%u+P*ozb5K7%V z7_*>3V^S*p`r261WOJ$fs;}pA{t!d@r$mLJ4{3hErT0u6eP2Yda$?8skBYb`U2c_2 z%baF5OEpVX>^F5v5jCc3jXf`QZrZY_z(<|L9w%VD9?a$_vopybnIuQ^>Ih0pi`{d4 zS#Z&gWpX(5^D0Zyj)>nI0pGrG{})f>gKhvm=6f}s(6B#ozSuO;dVKJl1T>OSj_*JV$*|r$lxHqLkyVj^%fp$;IgVZ5!)+_gtDUOfp*d0CS{@yQ+7C1uvD9h*wdIw@e0+z zjwZetJmu(xVePCl)78Q?Hm>UtHd{l(Axr z3we>Jib6LI%6prS>sAZf00tUskt5>s$kYc0)58^j0LLWWJEACQ>J1eL$s&SuR}LWu zgDG_I{WaJCDvr$>RLK`(QiS?W#M=pab(!gl5mRf#Me9*pvB&b#DZlW#A>L;Aw4(QX z?Mmo(uM_#iwx{0v6Woy+=hGqcEP?}|^*VpRwS&j=#y0z;#ayVcCt3>*Kg-yDoa4G< z^`74)f$Lt;?h%nvapd#~>S{`(7fmbvL#Ge5IX;(JWvEY5{?V>dHnFsLQoXOS@#MiH z{{*~UG?2_v62B0UZ}s6YZ-hgKQLc+$nN07qiCo#62}&%Hy<7d$J&gGkMPy}~{NFic za|30FlpK^x=-5$y9G(vL_8NUn^&jdMm^IiIl_;BR_V z63R8SWq--`}(sd(VpTihY z2Tl|MFH~1juH@YA%}Mv~%_*e`{ez35@WS7oe@eXdR-8_$4psee=QJtX^W<%dlKRwr zSe#&=L1lSAhaIc@jltLl$04?{TbGc}QM;aK|5QpD#ID z-g3&&SHOhkZST2s#?%AY`smUWqteH{BR3x7rKX-8i+5yGk+4lp>src?mksjzS_KJ1 zN|a={9&|-LQ=k+cdEz%nBVZ+<(e?Mj5=RWMf&Q_=PdKu_x zAObCRE(VW6ZQ4hQ8yV?u66^ysUlH715y9=hbrOo5tnIA`LdKTSq#+|W#mkqAMI}Uu zvP#xuY$r}gPn-O-NS|nk=Hum`24yWd;bd=d^a}hrKm8UDmRJ=I5IfCQbj&G`igugD zTHBjZvGknFKAYl{B+jmcMXyg=5u=rY@`JDgDkUG@TvC{nK$R$t-t#@JUeQ6 z=gg^*)7X?bioWnH;>~-220MO;5ccujuJ0L#4m?76X3YR0zErjY^=961sI19u4V71C z_jn;i=njB0Br=-NT}2XMaGPkKW2Wt$tcs%dlP3>sKY^v7s-mNK`le`Y zy-vJUku>+wwn~n)$&0qk&arM@mXdRK_oMq#b(?EBzO}Es`lPA!8g138rGC^mU`iOD zIPXZ_5LBcS6uHbBoNCL*Gn-_baXp>TwC|Zw1T$s|^TAjtTRtl*DAin^*w0pHwo_Nu zb$QdDt52PNR^HP)^uh90y^{EA35;DFu~_&al!_9%urKmXE{AxFboop1`yCz`>ZjSB zhp}1VCcmd0kn9pVAIAGrb3l9HgHGc56?=NY5Nn;LK?W&&l88)1Quk?Uuk#w-cPamz zJP0SYzI}1DpDji3&;DRUOgIv|l15c&V5$H>Y<3^|{K3?fzIE`tY41jbD?Lxt?8Uz3 zaW9u_=l~`(sz)%4sgmO6hXy_$Iar~XbCq*o$e*#-<>h^TU|a>>ce{KXNx!T?>xP6Z zQmFigiQ0~I$bH+>J%3Kv6H|Da*^15!Gq*I#+;6N`kH%z+Gjte*dZI2UMQc3nuu{mw zoi>=V2|bo^G3D%A_EWFobiE&FFJIKMaa`_XpyHlpK4>Bkx`Do@eVxsNXnmMw1gg#< z+m=E!Jf;?BBwQH0!)|B2{jQ^NDZAHnl4x%BX+ny0)_#^KYZGne^%Blh$$%ds#Av6q zcWYDa`Nrqu*ve!_jr*>o)|4H*JR)9N8db z-Arf9a7m{NP*N%{+~kf~X$rezNnoJ3uFRLG&V1xl@OJV*NC&@A%mq|_vc{9?y|7;& zK znB+)^SJCCBk&pEQbnvY`G(n;!`3{kb&rD4}Nrm1WNbzSr$&Q_7*lpslG`PrV<4+UL z5-;UQhRtQaRwQsn38JcQ>#h!KQ?xA0`_z3YC|Q<+_p=|oYsJ zg|@9&tr2|fk*X}E#B|qxx^>-GysO>#o8dIQH^ZT`qly}413KsOi$d?(R?SlJ9>`0= zZMU1c-?W-EqePXj;MaK+N~R)FdP)J;&xoCT7U8`oUiLw1#B6MATA&~!)xSmkdpjU1 zG$KK`b01hD_*NTZvyPnZ*!uic@kkF`ml@>ern8C8%mvKr~5q0`p|xh-*00t)bv7Z|}&dFln0jLbt8LZfDx%QwZ>Q2IHLjaW$=p+tRBFK9G6i~`_aS2&ujNmP4juCn zP1!t&u9A5vw5D0cZIkPhdgxHiKxMX^8SQC%AqA7d9z^y|(o-bJ z>+kO;W<6fl^I}iK`s7I*>}b3hpA>~WK>n8$QTek?#;r^5vWA&^?fVN9_-)YrmClyk zyHQ>+=%Zkb;rIv-q1p>Im$D%8<0^oYSN&}H3hjP?BDLg@y$$sTJGZYvKPM}3Y(<%U z^U#aSQ*WUtCWA4uDkz~vEjeH1n?n3cgmgjgb^5~98%N|y`;P?!&q;)vCX_?zz^r{I z#arX0Cl!8~tAdLws@zHgr!|oB3&qWCb#`Jt68Wf$qT&?f=Nw41ZKDZq7R;17mn|@^ zV-mIgjoLWISxbNBUsKDsOFk$|Y5w$a@5YuMcEB+m?K{{QQY)3gjGbD}w@`nHoZstd zdg?`OH(RfdTHu@os??a@D`yo&NTxkV(?XXQ;FM>DiS-1>nhT?HRLZI?Np8i?=^70_I8Cl!Imc@nagGw3~nhmMhp7Hh9yUlL-hQ7IhGoU_8N}u zqKUSkpUSbz(nC04rRasSxnRcBTO-fB3B1J)6%dLk5m9V?J1PE>oAA2gCB=wbU<)!S zunOtL`=7qBW7L};L2DFY9c}|a@Ww^^2+3G8pPa7zb?jHt^I9EAlFnKWf4+Tst(W!G zrho9%{QSLyFR7?$2KtkqzJ&X5rx_RcDeDy+-vr*8O%nliW@HMsiQda}VD1jNP73&0lBXVy0j#By+%(n7rzwoJl_bm~A15}k-xI!tw&;+^o`pbXmg?}% zvk52!_-eL{;5J4Qx`oSh`qJz#eeOJDi(4${&m1w~{QT2Ry;jf6x{-Z88Cq9SaFFa} z@TBJ;`XeVNFJoqA=4|$O@n`u%Zpml>gJ0b*5W=R2m?`~vS} z!H0li7x(V$Uq^*6L>7usy1B3NS5!GF0cgxp0$B*+eC%dc$Ny*-11b3Lk5^(;d%dIQ zGHa4~oG5CvliR91@RaQUvDmBSv|zs*H_-f?USc#hyii$`2tkb`e~4#SR*>bbq4mN* zTtRUxGD_dpfmlH)Va5&Tl4F!~pVTNq{Qx0)R7q>z#Mq0j;6w6)@=e7ybUW|s;kud& zz8}>2t?|VZX^xq`=+r>8Id^-v*fU~c870bsK$Mbdhgg=OS_DP&PQH15?6MQ(72l#b z1}&|aW?Z8(3)zO`&f@TnUr7n`1PB)=2lsX@3+3%>- z?K8(al0jXb+=)s5y?#Oj;F3|Wyg&6<1?e8tiCPJzp8h)s%a&Eao#WdVO~bqfZISGl z*w+tt&ihy7gO{O#F&LeBBudm2tHx^x^zYj&WnhZY^Ru2Fj%wa~iKr{POX?q_a^$Wd zPj5v$H#JZegrI3W1&~P)i`3nciCyaqz>ThQ)F5+`a3QyrTm6yq`sHJ5@^ z$}>=yYB!ad_Wy;{2-9UccYR3StPwrGviTwllhSF}_?pABgL%qa6i9Wk)}*@Jw~V0t zaR9ZnUH)_~dMA_{KbPyHhg-S%QGZVPXq`H_86<8+=P&F2ey2-!cl$Zj)YN2pd3m)m z+&K07tcK&G{u#WI8uz0*l~%X{;f=0OI;t&fR6UsONE9SJ7xJ>vy^Jr@s@85JGSpxuG_Saojv4mRtIRf?N6FWQmX5mRi|F%wUyKi8l})rQJHb zLR&{KEpfrAp8fTTCct|a#a*}kCwGras0LntO=mCE{k3McK%h8fScaNK&Co=cMi;R2!BB~Fy$`kwRUew4t0n;xT&26q1YVyeAK6p$=OR@AULGEf zd1qc)$-m_3ciSPl^DcE@)O?9RO=0K6oL0lc_+ps_mdmwFo=uKTKQ|utzQ4i+i=2P0G@=VyY8%ZoogJZORyvMfWB5a zf3!CA0XMB7{I4gRr5f4mf6PgTJRu*S{bA{(^?=iR=_BxkWT8--67}kXz6271qKGo0 z`L!Y`OZhXC@XfK8DmJo+in5Y_@iZqifS-ckyhD`WD%Tt4y-oZF_kGqCOS>Bwfx!)w zS#5`az|9FzOzCK+?d}E)l-UL_a&}uFbK21TsX;Cd>fwc9kCdJ$l47x>*2cAj{K|2QN6RPp}kZT=lkX+f3Yw&!hTFX z75TqAA^bimUF)0+oq?G~g-f}1!Q(aXu>#BZD(CvZ{N@EP2^!kIIrnF~Tv9};sk*wl zDq><{|9K#h2PM_MGTUhzw-^t>wkV(9?{7z6%Y`}t@|%y zdWKOQHn~_IHxcVeWCJM^Wtdxr3XKV00hlMv7*AO%(R9A7y3qUnFRZ_k$Jz0#JUs7z zN3}>_CECqB#DC=;G*-mW^mq5rZBGmuf(UtQ;Z#Em2fdGs>*^*#+8!%%3)n-PK8GO3 zX?uHns4;!Q9^DkoUUCrq{mOZaa|CjpW@9D{bSf@?Bp2~l%IbP|vvuC5Pt*8FWbBqAaYf+zvkPnp zfnQqz8wnoB_sG{Cn6N`xLXx|u(79iyP~`wsv7U3OrA*SEF6QnTEQ0seqth-hW<}JLRJ%dX^SdzMFK7zxb(8upt10ssHa_ ziv3S6Nr*_25+wc#IKP5?0F@Qx|6#}fiPQmRbze%M(v%4FArS#h{e@EsLxX2o4BNiU zf9_{n@ir&vtA|dEqIB;=mDMGgSy;R?9$Ss7s+d3DI zzA8622nN||sB7vIL|)^8?2rJi3AxbPuy%!HTIwpf4>u2wWSp=|!rQkuZ46##W@dJq zvX9C&(wGT`mqULoN^YY7Cfx^vB_pJEeocZik4y2rh6VE`aUGQ!Gv0p31$ZZ3ztO$T zaRo?F3!wj!R#FO0FaGXz_)6+5xo6h>!)oAOa7ZL2+!ene3BnZB++w_^1A|2^UGqa4 z4*zWPitz#Sq5;mvP`%1CKSn99296`1TH9NRxY0X)7bmM-yfdC##vLr|-!PdcSQ@-B zPw1=;D??_)q)L9mFv_)NJ-@pu{2$8e-N_H4<}K?hlINI84|;PX2`*WSgmci_+YRss zZ??^S8P@z0GoE4FO(p`%~#UgA)=aVac?`a|y_#GkGaqogUMv}d4eS;mg z^`o~n{O;6GXzwz%YW^!-g)sK4^_J8Ba~e113hQ07(3m;?QSEsYGJyx6}8zifzYy*fd@J^GGB6T)*hlD z_j14)_uDphAb+r}h>0{Tamk7_963dXo~b6TQ2Fsq`0d2a^J@EDps0&mUvaf>nClyV zqG1R3e1GoduaL=QLpV^?uI9BU_4<&3Po2oHp2|HRNpZTJ&0>)z5#7waO409twh5urU0aU-gfbB@ju7cmi7_sia#%d`1AA^&g;OlynUCS9w-BN#}fUeB_YYf`%9Q1(__X}O9bk{B`-rqQ`vbU%v5mXq= zeY9YMOg4{S+gl3z4wVB_YD$a?^_d$9;y@(EZNPV$#>?)YcBCSRhxbj5-514K9FVaB z_REQD1J_?L_R2nqrUVEXxPd2VtV*Cj^>3u%9A2EyUg@dn!(lS8tFR>H72?JXzFyK@o|Z?>Ig0U1LE&L7Q%r5&lX!}Q%nCH2g={S^M7C*ph}k5-q`-PHUbqQ3|o13JuG$& z{{g3X3?YbsE>=xc&<}Ylhi<%acTskp@N7VWV%kguEgUKPlxX`?cI1JNmD9a%ous-a zkb?2Nu8-xgcSEf0?^ET{T`AfBNS6*r>qsD=Ib>y102kr*F}xNgw+d$Bewnu}q#IL<=L zjugiOi+{cA+3?P4zCdq|n%SrB8iqbh7fn~CAa4O-8M{;GpFHmd6Cpr~5ssB1kr98- zK43pb|2|ar&Xxc5yKgQcM1ZEICaw1heljUsd;9vvad>jJ_u`3Czwd&-me+st*MKnC zk^q?G&q)0B>%Kw(1m#cWjrq%&@xS@-?@!#&LzvJ1!XTg7MaBRA-4l`!UJ6OZnf}=T zdrA54f8g(1Q2yV+NsfFm{nxJ_c@E1$I3?(>eo23Q>VNy|pPJWS!OofEu*f8?1|8oU zuK(?Gf4${nak%Wvjg2Q^sVt=n5bgOc!4K8JVNO5(L3?lc{HM2KH2*8ZGVY4){p(@~ zLFCi9#wN^r^!@*Jx&dk4!=q0lzGo#RB`m7Z3F&_g(2aLGFwonD6m+lt(_8&{!KeN3 z3S-TFTz}Rjboii#I{~GO`n%V~e|SR=AD9z8#=5=6qc`5XMsB%B(|39Q!{_#1=FDdj zC4AMknD{BSvKhM?UzdrC8ZV@9*sBin6WT3 zOXWQiQ)St(aV2*bjuENMh7F5E<<<;e`ukbENv?!FJ$Q61_PC*e>{rl429}lz)W>M{ z?Au58PEr(HWUfpH^Ix_;P) z6MOd={6sj&L8YJD9vkt0B5Ve9?Qo=L>H>&O>HtsHM+}ib9;B|U?J&BfRV-?~JxsO3 zXquk$(kve*E@_%D)HdY2D2p_?E(eR9)-wC~3qw3znp|_yS1Q+TL36%q>tMxowy5eN zZ~j~`y=)K|A>OXUuV_GE9ZEJ8YK;rv5R4(m4^Bd1u@HfAJdW;$|$!x0n^mE`@!x+~$P}gz9 z4G7K^#|u@=XoKQI->iS{(&aJy>d+OCzdmKvEdG*TI~HZhj#%+PHcqQw zyLpM}Jc@r2r2c`3k5=8eYfj^!hX5&89AXk!xwRnOCRdLSxP3ZsYRZIRG*@ZxXoKoV z*-$Tw{-T3Mvl~@*`Qtqp@64H-M2RGSwiR6Bhmj3!WWe0>@sPTP#%M~s-&x}G%kPtg zB_erD!>;3(+QgO1!H36(yYpi;Yi~o^8ur0>gkdbxD+)W+R%>n0@-2LOMO6<^$b_`( zW?Y13yY(LlL|74{1N1DLWHvYg)0M~1@FwSwy)aMn(RmmH;StS@o}Pj zoUk=G?nS!HN%mxSEIQA(xlcR~%UUrFEkFwo-G4qT0YB$rJlMUB9kze+ua5egoi2iwDwq&T z9@M3Dn#maxG=vdktd8&IFC zp`znbGCW#&JAODtFZn=sYFcI_nn;P`D`Lrb{2VZtcL0NvOM4lv-fH_%3+d&MyfIIeujEB#Pf7pjhubNne z+jxc}uBTUEchW~VVbbmSlQ%&ysf*dfHm;@V;HA!r;WX!}8^i({@`sFGNZ~6nBHU7i z6GFw}GVLvvWg`m_cbhROD>|?qoaQSS$SRfrRS3I^LerEYBF?c&S7?8g_gOcKOdk(x zG{&>g!hS(GoA$fTaxHhbj%&&K<*uQfZF{c8JI-GE8olBhT9Wa_^nP5CQbR{ut}puV zTeN(4xhLsClsYs;oQE%G3!PVJFv>f!ce@9i+|A#Yjl*5B5B5?m0QQ)`E`ZHlIpK7* z%vVrvhrFV6WK5h%Xj3t@!GXP3a{xFUc(=UVffYoRr&qE3Sj1BPn#$X7bh{(Xt8jJXE^e zsZmQUJlP%#o*=@koky29u9%?)O|vQrJ>R7mTYu9&;i5kJVQ6cNj}uJ%O1?fjNxSy8 z(9?z-1h_+E`B_+W4jlaq)oGfip{F!W+*rMG%i_Ot2@@tGwx(CVy1rwD?Rl*|hY{CyNBiaXrNN))F*AQhi8<)6VV zVAG8enQ1kOukU3qai$N6XH3<7?mC3)pb1Sw5zs;x!q7R}dFqPlg#`~6=Ixti+ zyH#Y7fujWfr_D>TU#fQSmXE5iPRmENL^g2#dcm^193qaM@+D@{ZRX`aYsT(qH>}iT z8FSm>_oom)cA#P%R!nh?Y8W?Xat>D)kL3AlXKEsm^UV>DOiixQ>wDh^mJ~T`%m=1~ zP^>v52X;>9PQ^hx)}WbwRYuk zF}7`6c%JY)ByEI3T1+b15G~Rq(>gU1TD7Mpw2?B6ERTw`%aj(=zG&B`1xYn&Z)j7Z zQcb%y)lA=cd)`;`_2?%xd! z%TLwmXRwDo#YBgPhbQ?ej9o`zXC4_};*5^L$9MiE!N1uc{%Pv2R87fl&8NC_FVpys z?`gChTNc-te+M(4M&*@KvKb!TjH^?e@3wmLNA#8>dc?qTWi7=SXU@KNX+K z8!(yhdI*-BYv?<>g4B`P!yEQ*t8rgG^X}f5$J(zkUYVmM^h}EKr=v4Xr$YTH#ohOl zxzOO&*$)MR(lWnilulEp?xJ!e7+yZ=T!>fdDaWMdbflwwbEB=P3qwmo)ZanvHFQ{i zEH;M6zKp1(e1 z43)V;<=ppjyTF8P=vj6xRH#mreEsrdQ%vYWnrbGscyV{g{K*s1sZEA1D>Q-*5t|ess>e5IvGPO285MYuGS# z;M3Pwl4Qm5fRwfxFLCQGsOrkc+9a>FPeTiCCr-X%cj>#!m&A>B-*lBYnKqqjW-ySe ze-7ZzWwj1=0gpZT3SeWD6+E-$=%+QIBCtp;QB#_U8azwgzPFzKHvI{58gNU4U~9QA z%BkSIy_0NUIdO^q6`E_{wIZ!j+kDAQ0XqY3*Yf&RP#Dcgm7h|PB*<~ZF1bBjj8m5| zz}9Sv^BWy2V7{772-OLxV2oY1jc6n|MSI*;5n1oDn zBI#uwD6AXU^$f;1Al@zWaRV<&6^uEV`F@M}Dh1OLX7(4DZ<(XW#0mS4$jMT^fIWkg zQiLYi3r1XF4pBERM^IAO${8s#h4$B%lW<>&K@XaymmnG_^b8dRjk9AeWtMW7@LMt^ z^?mMe%y>PZk`vjEs!>5z8q2 zSyP&pcn_G^g*a>qm`iRxq?;%ytp);oJBC*~Ba; zmk`@nFbw54N0W#$wBBIZl7@i@^Giu4XT^%INL49C>u}@ug3XwuG|@;N-V+!pG>+Fnpwa$h+TVb7T ztj!#~M)$}H{w%8VWcz)!Xf5vgS+f&b+ziCksH{YXBhvsAQ3p}4rLhw*vU70=!JQd2}VrjkT*`@nTl{2IpDVF)vI?uo?&Z)wWSB;Mi#od0xVAjAUd->_2lVogR z;({qEM#-3kZ<2pF>9Lw9()eXXA;U*&9N&9_9_mUs$APMb$_Rw>(;O z4viAQ_u`Hpg+Fx)z)Nnes=0d_CLlw`h8mW@i3aB3*nE!(b$1bdL)#;Rzp~5n zKcvQi>su0e8?o>8-NC(I73dj4Kt>2A>uDzKs2H`Wbl8-A3qjMA_FndAEr_vtz@r>3 z#;>_O7ctj}Qc9&1nY#4FjP@l>5EY`uN}!b?l}q=8#ri8_#;6WDcSZ>lsd1J`SrD7S7O+b8;+=#_$aSIL0rhk4!TT88*F#n|54x8vA4BURb^wp0#oJk^|kzE z=O#FzHJ~WC^IIGQCji+GUQ-YY1{0eP%1tk;?{wmRaj7_C$S$4Po=k!+mW^E;%F9jf zBIBA9@Q?B|VG0okBOa9r)7cou$xpB5wh1#Lb-1<9UXKp92#|a4%UBaUe^S9xGcb7@ z8XSAwb{xN&F?3CIEEL%=9|$ORs=4Di=K(5jB`PDqBew-3PX6<{p!M62N&acQgVWcI z{d*G+o;73P(|`AC7T0Hy3_Osmt1oZkhtRs8V1Ex0jlI!Q<2<}6J(za|C_+^dK`Wvh z7gzx(!)2CsHmWApaJ;2NgbN)N8EFlCDIz)6TR~z=IFDRQ9hAyOaG~W;4|Hr&Ls}7w z8hI&mE`(5dGw9|~pXs;jfxG+a#40$xKM06ZS1p-`rg5~S9}tIoRTh|pkS`{=d%kxw z(5DxdOMK)+uK*CF8`I)Ur8c~6&loC9P3u$r|{z$P3(9zAXyqub=U48S*0D8}doU&3e8#7~ub5&ul zwpVQo3--Mj`BvlU;<0p2LxihiU+JJr zddj?Z$`*TzK+)HxSBudq69#Zlu~;N16Rxmgg2vWqt-Oto&AZwMNHW1&Ti9RVTDLP& z$Xx@-%yu8$(9lpaqT}PWUWu&IY5$=>0rx&)_AW}oamr}n;;F+-r66vZPLPqCkltm*A4^+Ad~jx?|bS3jH;5 zTl2``qi0#0*6@=x1%qN-s0Tz)RuG(&#l7oemCbRVT8FYpAX8XkcaOBsD6|*FZM|6T>g)4O^~8u=$u&&q=FMvVX5I>&)zM=mA0`0aNn9^B_YQnrjxa;Vf!swgpN{ic*{ZBUT}nB?^7#lk#4 zF44%P2M7VYJQ<(}B`W6J*=BO()o&u0VD%_X zlMp{jq6(18#u&cCvNmYR;u`#<>}J2B<{-VO*aV26E6MVi#wh zbjz?|n3v}!b2f`a9yZeSb1Lz4EiFa1&#)uU7dq2}?5olto{KXtWQYF2U z;J3m0&~;mmzU=-m#9=;~-%`gDr*PZF`L=ZPcOjEP9&tKf4x@S)iePLax&HBvJY0P;Lt+ex} zZM-6$4RhR-ES*%%*_PTPKIkb0HJ<0~qJ<6HTbKA5&HBeM_ZQ>;3|DgR9K+qGK3R+} zgi~T;z@K_2$3dKVVFDKt@xN$im;C){Xir}3jp^D{Npt;bLd8yK3qbW8Cj7bhiTuS1p=&i4d0nkU*4CuBN8F}R5|g*dT`min-DV}Zivj{wjd_iGTh zy4CExSA)QieVrZmN8COk{_5SJVhHm*Pbn)iEdUMg@nai$)F@uie&kdO{8d741sz4ew5#M#R@a^ihuOm ztJS-&9qdvv^;KGEGtl+($D{NUjX*;s`7&Wcq-*86@Js1C_njN&s<9zR5m6m9J)z3R zNICJD{Q;3jp&LgUI__-GIeVEd^D^=E;e;;!n6+aTzGO%Al;Kt-Dm*K<+ zYNTW4MeKee5e&{Y%V1n-WFmw`+8qW2*r;bE3dHK=kE}9md686 zQ3Ors-FyOWc90n4-_T-}3j9B(p2YknDAb=sJyZR#u*XvqK-!5O_A@Fxlpl}=nZXv> zQPz%9Kb$f1*-RJN4gW)6e79cqR-E^}w$wG1j{r*Hz_`P1e^j>YK)EtuDTfOyrc8)z3!<PX{qliuAS zoQ%#$V5NMgy-@Q8dMU?eGT%>N`FS|+H%)4Eh$4Kpi&}F<$a|4%55YL3-f(9r#~u6fXwNn^#OV--(puz zOtm#R^{_v+w`2zz9jQTDNWHb7>pdOXer%kID4U`&9u&?7fv5G@cdhjtO7Z* z<{xt80p#uHIpi=7vhXwY_r=#9u;xX$$atdvy8DZyV>5L13o5iW3|4CL~A@2OC#h0t`~;6KuL_) z1@)NEjnFi&h0jbQS~Ks#a~G6cKQ`i?&5U;B)h}eWRyyu%7mjGWgW!Ts8WZz{z^yXY zpD`AQH8b!-sw;D8hQ_tvn(AbWsYx;`<esAeGX+Xt}jn zUb@t!3kSZb&KZ|k7W_@D-7kL?TU{P1(#WB(tE=lFSDusT(34hztN(YC1WSkED1#Xl z^#Nh3M?-V5NmV!!K$*sjPp!`JF>6X_5l7^5hK09gG)$W~X>mB&cGDu#2IsvwiMY{r zT`+Wqd6R8;EcJ44dCuA-SWO#&qVo#3(t0Bky%yJH+g;>b7jU>d_xbDq&)SY{a9mg`zv7}3i{ZvgcKi?`@ywPT zY}AuRxT{9J$hl4pv+}RaGRikv1^3oR%4yzuiL$-X>s-BC?qYYyn4K!10vRUU#0ClB z$cdwT7a_06F}y_`{%0b7wt72bXF!8j0bB0(QVCd@kJe?e3MWY67}-2nOQRt#d2U*1 zNa;Ec=j`cgz*4Wi?M?I$bPM##WISr@8h8Fm_{n4RkSxNw#KhFqR17g!cqp45wiKn+ zPxwD4A^7}All#3@-rfD`)kg<-(^xFq^c;}npl9e*j{nP**fv-r%{ZW(Olvau_f=N) zk`KmHp3gnN^5R*N4u1XVBla_$X4}B?nm<-A1PwjZAq}Cr$<^ZL^ZzeNE33~sbn@>a zO|NWxk?iUtsPw~Q37y9L#qxdpm!F^5{G*bV~T}(r4Y>;jfxm&8eI}M<#X}-qb+*NE?d_>p#*>ei9c-U+OED=JtdkBMsgRPUDou-uUKXOJDd z_aW%s_kuZjPgw&Jf!*BP`ir+)uiA)#JKQpo)+J@`!g{CGx%+k8KA-mkVc)_pn?i?_ ziEv1uIPW<-f`5{jjAI>CO%N#9f5mCAoDs4<=dVvMj7Dfje^VxMz9s^$e`8}~`dBPh zpC?(G<+1~4`0h$Oqgb2cvLqS&`tzke5=TJ_ zUs;-Vl)9QR3X^FG>%eGKLhbv1zBucH?Gap$oS?fk#ho0)GjxUgADWR`GY6BtPtTrp zO#oSF*%+v&4R`<_SpkDZF6cs+Kco~LVix}(O*V~wsjZnx#pLPAxF=^8UUaW`(eCQx zjY>7dP*ggVG^0A76Qo<63$}cge#Yep-ZmHVG4g#OI~mGm#BM(nbCk?cch;%spNj z8MMo?2&5N)X}D{vqrwqVCI_kO8ll2@>7L?Xc?qL-?y%PrX7y6zoKV-1ZYG8CX3YG7 z@&o0oquGqNV}i&P=0v-yN1fjaz7^~WB=lZcxQk}Zp%&LXT1*_^UU6;F=7J|I$`KB_yT#RP)0I4^H00000 literal 0 HcmV?d00001 diff --git a/docs/source/recipes/aishell/index.rst b/docs/source/recipes/aishell/index.rst new file mode 100644 index 000000000..d072d6e9c --- /dev/null +++ b/docs/source/recipes/aishell/index.rst @@ -0,0 +1,22 @@ +aishell +======= + +Aishell is an open-source Chinese Mandarin speech corpus published by Beijing +Shell Shell Technology Co.,Ltd. + +400 people from different accent areas in China are invited to participate in +the recording, which is conducted in a quiet indoor environment using high +fidelity microphone and downsampled to 16kHz. The manual transcription accuracy +is above 95%, through professional speech annotation and strict quality +inspection. The data is free for academic use. We hope to provide moderate +amount of data for new researchers in the field of speech recognition. + +It can be downloaded from ``_ + +.. toctree:: + :maxdepth: 1 + + tdnn_lstm_ctc + conformer_ctc + stateless_transducer + diff --git a/docs/source/recipes/aishell/stateless_transducer.rst b/docs/source/recipes/aishell/stateless_transducer.rst new file mode 100644 index 000000000..e8137b8c1 --- /dev/null +++ b/docs/source/recipes/aishell/stateless_transducer.rst @@ -0,0 +1,714 @@ +Stateless Transducer +==================== + +This tutorial shows you how to do transducer training in ``icefall``. + +.. HINT:: + + Instead of using RNN-T or RNN transducer, we only use transducer + here. As you will see, there are no RNNs in the model. + +.. HINT:: + + We assume you have read the page :ref:`install icefall` and have setup + the environment for ``icefall``. + +.. HINT:: + + We recommend you to use a GPU or several GPUs to run this recipe. + +In this tutorial, you will learn: + + - (1) What does the transducer model look like + - (2) How to prepare data for training and decoding + - (3) How to start the training, either with a single GPU or with multiple GPUs + - (4) How to do decoding after training, with greedy search, beam search and, **modified beam search** + - (5) How to use a pre-trained model provided by us to transcribe sound files + + +The Model +--------- + +The transducer model consists of 3 parts: + +- **Encoder**: It is a conformer encoder with the following parameters + + - Number of heads: 8 + - Attention dim: 512 + - Number of layers: 12 + - Feedforward dim: 2048 + +- **Decoder**: We use a stateless model consisting of: + + - An embedding layer with embedding dim 512 + - A Conv1d layer with a default kernel size 2 (i.e. it sees 2 + symbols of left-context by default) + +- **Joiner**: It consists of a ``nn.tanh()`` and a ``nn.Linear()``. + +.. Caution:: + + The decoder is stateless and very simple. It is borrowed from + ``_ + (Rnn-Transducer with Stateless Prediction Network) + + We make one modification to it: Place a Conv1d layer right after + the embedding layer. + +When using Chinese characters as modelling unit, whose vocabulary size +is 4336 in this specific dataset, +the number of parameters of the model is ``87939824``, i.e., about ``88 M``. + +The Loss +-------- + +We are using ``_ +to compute the transducer loss, which removes extra paddings +in loss computation to save memory. + +.. Hint:: + + ``optimized_transducer`` implements the technqiues proposed + in `Improving RNN Transducer Modeling for End-to-End Speech Recognition `_ to save memory. + + Furthermore, it supports ``modified transducer``, limiting the maximum + number of symbols that can be emitted per frame to 1, which simplifies + the decoding process significantly. Also, the experiment results + show that it does not degrade the performance. + + See ``_ + for what exactly modified transducer is. + + ``_ shows that + in the unpruned case ``optimized_transducer`` has the advantage about minimizing + memory usage. + +.. todo:: + + Add tutorial about ``pruned_transducer_stateless`` that uses k2 + pruned transducer loss. + +.. hint:: + + You can use:: + + pip install optimized_transducer + + to install ``optimized_transducer``. Refer to + ``_ for other + alternatives. + +Data Preparation +---------------- + +To prepare the data for training, please use the following commands: + +.. code-block:: bash + + cd egs/aishell/ASR + ./prepare.sh --stop-stage 4 + ./prepare.sh --stage 6 --stop-stage 6 + +.. note:: + + You can use ``./prepare.sh``, though it will generate FSTs that + are not used in transducer training. + +When you finish running the script, you will get the following two folders: + + - ``data/fbank``: It saves the pre-computed features + - ``data/lang_char``: It contains tokens that will be used in the training + +Training +-------- + +.. code-block:: bash + + cd egs/aishell/ASR + ./transducer_stateless_modified/train.py --help + +shows you the training options that can be passed from the commandline. +The following options are used quite often: + + - ``--exp-dir`` + + The experiment folder to save logs and model checkpoints, + defaults to ``./transducer_stateless_modified/exp``. + + - ``--num-epochs`` + + It is the number of epochs to train. For instance, + ``./transducer_stateless_modified/train.py --num-epochs 30`` trains for 30 + epochs and generates ``epoch-0.pt``, ``epoch-1.pt``, ..., ``epoch-29.pt`` + in the folder set by ``--exp-dir``. + + - ``--start-epoch`` + + It's used to resume training. + ``./transducer_stateless_modified/train.py --start-epoch 10`` loads the + checkpoint from ``exp_dir/epoch-9.pt`` and starts + training from epoch 10, based on the state from epoch 9. + + - ``--world-size`` + + It is used for single-machine multi-GPU DDP training. + + - (a) If it is 1, then no DDP training is used. + + - (b) If it is 2, then GPU 0 and GPU 1 are used for DDP training. + + The following shows some use cases with it. + + **Use case 1**: You have 4 GPUs, but you only want to use GPU 0 and + GPU 2 for training. You can do the following: + + .. code-block:: bash + + $ cd egs/aishell/ASR + $ export CUDA_VISIBLE_DEVICES="0,2" + $ ./transducer_stateless_modified/train.py --world-size 2 + + **Use case 2**: You have 4 GPUs and you want to use all of them + for training. You can do the following: + + .. code-block:: bash + + $ cd egs/aishell/ASR + $ ./transducer_stateless_modified/train.py --world-size 4 + + **Use case 3**: You have 4 GPUs but you only want to use GPU 3 + for training. You can do the following: + + .. code-block:: bash + + $ cd egs/aishell/ASR + $ export CUDA_VISIBLE_DEVICES="3" + $ ./transducer_stateless_modified/train.py --world-size 1 + + .. CAUTION:: + + Only single-machine multi-GPU DDP training is implemented at present. + There is an on-going PR ``_ + that adds support for multi-machine multi-GPU DDP training. + + - ``--max-duration`` + + It specifies the number of seconds over all utterances in a + batch **before padding**. + If you encounter CUDA OOM, please reduce it. For instance, if + your are using V100 NVIDIA GPU with 32 GB RAM, we recommend you + to set it to ``300`` when the vocabulary size is 500. + + .. HINT:: + + Due to padding, the number of seconds of all utterances in a + batch will usually be larger than ``--max-duration``. + + A larger value for ``--max-duration`` may cause OOM during training, + while a smaller value may increase the training time. You have to + tune it. + + - ``--lr-factor`` + + It controls the learning rate. If you use a single GPU for training, you + may want to use a small value for it. If you use multiple GPUs for training, + you may increase it. + + - ``--context-size`` + + It specifies the kernel size in the decoder. The default value 2 means it + functions as a tri-gram LM. + + - ``--modified-transducer-prob`` + + It specifies the probability to use modified transducer loss. + If it is 0, then no modified transducer is used; if it is 1, + then it uses modified transducer loss for all batches. If it is + ``p``, it applies modified transducer with probability ``p``. + +There are some training options, e.g., +number of warmup steps, +that are not passed from the commandline. +They are pre-configured by the function ``get_params()`` in +`transducer_stateless_modified/train.py `_ + +If you need to change them, please modify ``./transducer_stateless_modified/train.py`` directly. + +.. CAUTION:: + + The training set is perturbed by speed with two factors: 0.9 and 1.1. + Each epoch actually processes ``3x150 == 450`` hours of data. + +Training logs +~~~~~~~~~~~~~ + +Training logs and checkpoints are saved in the folder set by ``--exp-dir`` +(defaults to ``transducer_stateless_modified/exp``). You will find the following files in that directory: + + - ``epoch-0.pt``, ``epoch-1.pt``, ... + + These are checkpoint files, containing model ``state_dict`` and optimizer ``state_dict``. + To resume training from some checkpoint, say ``epoch-10.pt``, you can use: + + .. code-block:: bash + + $ ./transducer_stateless_modified/train.py --start-epoch 11 + + - ``tensorboard/`` + + This folder contains TensorBoard logs. Training loss, validation loss, learning + rate, etc, are recorded in these logs. You can visualize them by: + + .. code-block:: bash + + $ cd transducer_stateless_modified/exp/tensorboard + $ tensorboard dev upload --logdir . --name "Aishell transducer training with icefall" --description "Training modified transducer, see https://github.com/k2-fsa/icefall/pull/219" + + It will print something like below: + + .. code-block:: + + TensorFlow installation not found - running with reduced feature set. + Upload started and will continue reading any new data as it's added to the logdir. + + To stop uploading, press Ctrl-C. + + New experiment created. View your TensorBoard at: https://tensorboard.dev/experiment/laGZ6HrcQxOigbFD5E0Y3Q/ + + [2022-03-03T14:29:45] Started scanning logdir. + [2022-03-03T14:29:48] Total uploaded: 8477 scalars, 0 tensors, 0 binary objects + Listening for new data in logdir... + + Note there is a `URL `_ in the + above output, click it and you will see the following screenshot: + + .. figure:: images/aishell-transducer_stateless_modified-tensorboard-log.png + :width: 600 + :alt: TensorBoard screenshot + :align: center + :target: https://tensorboard.dev/experiment/laGZ6HrcQxOigbFD5E0Y3Q + + TensorBoard screenshot. + + - ``log/log-train-xxxx`` + + It is the detailed training log in text format, same as the one + you saw printed to the console during training. + +Usage examples +~~~~~~~~~~~~~~ + +The following shows typical use cases: + +**Case 1** +^^^^^^^^^^ + +.. code-block:: bash + + $ cd egs/aishell/ASR + $ ./transducer_stateless_modified/train.py --max-duration 250 + +It uses ``--max-duration`` of 250 to avoid OOM. + + +**Case 2** +^^^^^^^^^^ + +.. code-block:: bash + + $ cd egs/aishell/ASR + $ export CUDA_VISIBLE_DEVICES="0,3" + $ ./transducer_stateless_modified/train.py --world-size 2 + +It uses GPU 0 and GPU 3 for DDP training. + +**Case 3** +^^^^^^^^^^ + +.. code-block:: bash + + $ cd egs/aishell/ASR + $ ./transducer_stateless_modified/train.py --num-epochs 10 --start-epoch 3 + +It loads checkpoint ``./transducer_stateless_modified/exp/epoch-2.pt`` and starts +training from epoch 3. Also, it trains for 10 epochs. + +Decoding +-------- + +The decoding part uses checkpoints saved by the training part, so you have +to run the training part first. + +.. code-block:: bash + + $ cd egs/aishell/ASR + $ ./transducer_stateless_modified/decode.py --help + +shows the options for decoding. + +The commonly used options are: + + - ``--method`` + + This specifies the decoding method. Currently, it supports: + + - **greedy_search**. You can provide the commandline option ``--max-sym-per-frame`` + to limit the maximum number of symbols that can be emitted per frame. + + - **beam_search**. You can provide the commandline option ``--beam-size``. + + - **modified_beam_search**. You can also provide the commandline option ``--beam-size``. + To use this method, we assume that you have trained your model with modified transducer, + i.e., used the option ``--modified-transducer-prob`` in the training. + + The following command uses greedy search for decoding + + .. code-block:: + + $ cd egs/aishell/ASR + $ ./transducer_stateless_modified/decode.py \ + --epoch 64 \ + --avg 33 \ + --exp-dir ./transducer_stateless_modified/exp \ + --max-duration 100 \ + --decoding-method greedy_search \ + --max-sym-per-frame 1 + + The following command uses beam search for decoding + + .. code-block:: + + $ cd egs/aishell/ASR + $ ./transducer_stateless_modified/decode.py \ + --epoch 64 \ + --avg 33 \ + --exp-dir ./transducer_stateless_modified/exp \ + --max-duration 100 \ + --decoding-method beam_search \ + --beam-size 4 + + The following command uses ``modified`` beam search for decoding + + .. code-block:: + + $ cd egs/aishell/ASR + $ ./transducer_stateless_modified/decode.py \ + --epoch 64 \ + --avg 33 \ + --exp-dir ./transducer_stateless_modified/exp \ + --max-duration 100 \ + --decoding-method modified_beam_search \ + --beam-size 4 + + - ``--max-duration`` + + It has the same meaning as the one used in training. A larger + value may cause OOM. + + - ``--epoch`` + + It specifies the checkpoint from which epoch that should be used for decoding. + + - ``--avg`` + + It specifies the number of models to average. For instance, if it is 3 and if + ``--epoch=10``, then it averages the checkpoints ``epoch-8.pt``, ``epoch-9.pt``, + and ``epoch-10.pt`` and the averaged checkpoint is used for decoding. + +After decoding, you can find the decoding logs and results in `exp_dir/log/`, e.g., +``exp_dir/log/greedy_search``. + +Pre-trained Model +----------------- + +We have uploaded a pre-trained model to +``_ + +We describe how to use the pre-trained model to transcribe a sound file or +multiple sound files in the following. + +Install kaldifeat +~~~~~~~~~~~~~~~~~ + +`kaldifeat `_ is used to +extract features for a single sound file or multiple sound files +at the same time. + +Please refer to ``_ for installation. + +Download the pre-trained model +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following commands describe how to download the pre-trained model: + +.. code-block:: + + $ cd egs/aishell/ASR + $ mkdir tmp + $ cd tmp + $ git lfs install + $ git clone https://huggingface.co/csukuangfj/icefall-aishell-transducer-stateless-modified-2022-03-01 + + +.. CAUTION:: + + You have to use ``git lfs`` to download the pre-trained model. + +After downloading, you will have the following files: + +.. code-block:: bash + + $ cd egs/aishell/ASR + $ tree tmp/icefall-aishell-transducer-stateless-modified-2022-03-01 + + +.. code-block:: bash + + tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/ + |-- README.md + |-- data + | `-- lang_char + | |-- L.pt + | |-- lexicon.txt + | |-- tokens.txt + | `-- words.txt + |-- exp + | `-- pretrained.pt + |-- log + | |-- errs-test-beam_4-epoch-64-avg-33-beam-4.txt + | |-- errs-test-greedy_search-epoch-64-avg-33-context-2-max-sym-per-frame-1.txt + | |-- log-decode-epoch-64-avg-33-beam-4-2022-03-02-12-05-03 + | |-- log-decode-epoch-64-avg-33-context-2-max-sym-per-frame-1-2022-02-28-18-13-07 + | |-- recogs-test-beam_4-epoch-64-avg-33-beam-4.txt + | `-- recogs-test-greedy_search-epoch-64-avg-33-context-2-max-sym-per-frame-1.txt + `-- test_wavs + |-- BAC009S0764W0121.wav + |-- BAC009S0764W0122.wav + |-- BAC009S0764W0123.wav + `-- transcript.txt + + 5 directories, 16 files + + +**File descriptions**: + + - ``data/lang_char`` + + It contains language related files. You can find the vocabulary size in ``tokens.txt``. + + - ``exp/pretrained.pt`` + + It contains pre-trained model parameters, obtained by averaging + checkpoints from ``epoch-32.pt`` to ``epoch-64.pt``. + Note: We have removed optimizer ``state_dict`` to reduce file size. + + - ``log`` + + It contains decoding logs and decoded results. + + - ``test_wavs`` + + It contains some test sound files from Aishell ``test`` dataset. + +The information of the test sound files is listed below: + +.. code-block:: bash + + $ soxi tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/*.wav + + Input File : 'tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0121.wav' + Channels : 1 + Sample Rate : 16000 + Precision : 16-bit + Duration : 00:00:04.20 = 67263 samples ~ 315.295 CDDA sectors + File Size : 135k + Bit Rate : 256k + Sample Encoding: 16-bit Signed Integer PCM + + + Input File : 'tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0122.wav' + Channels : 1 + Sample Rate : 16000 + Precision : 16-bit + Duration : 00:00:04.12 = 65840 samples ~ 308.625 CDDA sectors + File Size : 132k + Bit Rate : 256k + Sample Encoding: 16-bit Signed Integer PCM + + + Input File : 'tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0123.wav' + Channels : 1 + Sample Rate : 16000 + Precision : 16-bit + Duration : 00:00:04.00 = 64000 samples ~ 300 CDDA sectors + File Size : 128k + Bit Rate : 256k + Sample Encoding: 16-bit Signed Integer PCM + + Total Duration of 3 files: 00:00:12.32 + +Usage +~~~~~ + +.. code-block:: + + $ cd egs/aishell/ASR + $ ./transducer_stateless_modified/pretrained.py --help + +displays the help information. + +It supports three decoding methods: + + - greedy search + - beam search + - modified beam search + +.. note:: + + In modified beam search, it limits the maximum number of symbols that can be + emitted per frame to 1. To use this method, you have to ensure that your model + has been trained with the option ``--modified-transducer-prob``. Otherwise, + it may give you poor results. + +Greedy search +^^^^^^^^^^^^^ + +The command to run greedy search is given below: + +.. code-block:: bash + + + $ cd egs/aishell/ASR + $ ./transducer_stateless_modified/pretrained.py \ + --checkpoint ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/exp/pretrained.pt \ + --lang-dir ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/data/lang_char \ + --method greedy_search \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0121.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0122.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0123.wav + +The output is as follows: + +.. code-block:: + + 2022-03-03 15:35:26,531 INFO [pretrained.py:239] device: cuda:0 + 2022-03-03 15:35:26,994 INFO [lexicon.py:176] Loading pre-compiled tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/data/lang_char/Linv.pt + 2022-03-03 15:35:27,027 INFO [pretrained.py:246] {'feature_dim': 80, 'encoder_out_dim': 512, 'subsampling_factor': 4, 'attention_dim': 512, 'nhead': 8, 'dim_feedforward': 2048, 'num_encoder_layers': 12, 'vgg_frontend': False, 'env_info': {'k2-version': '1.13', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'f4fefe4882bc0ae59af951da3f47335d5495ef71', 'k2-git-date': 'Thu Feb 10 15:16:02 2022', 'lhotse-version': '1.0.0.dev+missing.version.file', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'master', 'icefall-git-sha1': '50d2281-clean', 'icefall-git-date': 'Wed Mar 2 16:02:38 2022', 'icefall-path': '/ceph-fj/fangjun/open-source-2/icefall-aishell', 'k2-path': '/ceph-fj/fangjun/open-source-2/k2-multi-datasets/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-fj/fangjun/open-source-2/lhotse-aishell/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-2-0815224919-75d558775b-mmnv8', 'IP address': '10.177.72.138'}, 'sample_rate': 16000, 'checkpoint': './tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/exp/pretrained.pt', 'lang_dir': PosixPath('tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/data/lang_char'), 'method': 'greedy_search', 'sound_files': ['./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0121.wav', './tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0122.wav', './tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0123.wav'], 'beam_size': 4, 'context_size': 2, 'max_sym_per_frame': 3, 'blank_id': 0, 'vocab_size': 4336} + 2022-03-03 15:35:27,027 INFO [pretrained.py:248] About to create model + 2022-03-03 15:35:36,878 INFO [pretrained.py:257] Constructing Fbank computer + 2022-03-03 15:35:36,880 INFO [pretrained.py:267] Reading sound files: ['./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0121.wav', './tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0122.wav', './tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0123.wav'] + 2022-03-03 15:35:36,891 INFO [pretrained.py:273] Decoding started + /ceph-fj/fangjun/open-source-2/icefall-aishell/egs/aishell/ASR/transducer_stateless_modified/conformer.py:113: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). + lengths = ((x_lens - 1) // 2 - 1) // 2 + 2022-03-03 15:35:37,163 INFO [pretrained.py:320] + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0121.wav: + 甚 至 出 现 交 易 几 乎 停 滞 的 情 况 + + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0122.wav: + 一 二 线 城 市 虽 然 也 处 于 调 整 中 + + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0123.wav: + 但 因 为 聚 集 了 过 多 公 共 资 源 + + 2022-03-03 15:35:37,163 INFO [pretrained.py:322] Decoding Done + +Beam search +^^^^^^^^^^^ + +The command to run beam search is given below: + +.. code-block:: bash + + + $ cd egs/aishell/ASR + + $ ./transducer_stateless_modified/pretrained.py \ + --checkpoint ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/exp/pretrained.pt \ + --lang-dir ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/data/lang_char \ + --method beam_search \ + --beam-size 4 \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0121.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0122.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0123.wav + +The output is as follows: + +.. code-block:: + + 2022-03-03 15:39:09,285 INFO [pretrained.py:239] device: cuda:0 + 2022-03-03 15:39:09,708 INFO [lexicon.py:176] Loading pre-compiled tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/data/lang_char/Linv.pt + 2022-03-03 15:39:09,759 INFO [pretrained.py:246] {'feature_dim': 80, 'encoder_out_dim': 512, 'subsampling_factor': 4, 'attention_dim': 512, 'nhead': 8, 'dim_feedforward': 2048, 'num_encoder_layers': 12, 'vgg_frontend': False, 'env_info': {'k2-version': '1.13', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'f4fefe4882bc0ae59af951da3f47335d5495ef71', 'k2-git-date': 'Thu Feb 10 15:16:02 2022', 'lhotse-version': '1.0.0.dev+missing.version.file', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'master', 'icefall-git-sha1': '50d2281-clean', 'icefall-git-date': 'Wed Mar 2 16:02:38 2022', 'icefall-path': '/ceph-fj/fangjun/open-source-2/icefall-aishell', 'k2-path': '/ceph-fj/fangjun/open-source-2/k2-multi-datasets/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-fj/fangjun/open-source-2/lhotse-aishell/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-2-0815224919-75d558775b-mmnv8', 'IP address': '10.177.72.138'}, 'sample_rate': 16000, 'checkpoint': './tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/exp/pretrained.pt', 'lang_dir': PosixPath('tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/data/lang_char'), 'method': 'beam_search', 'sound_files': ['./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0121.wav', './tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0122.wav', './tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0123.wav'], 'beam_size': 4, 'context_size': 2, 'max_sym_per_frame': 3, 'blank_id': 0, 'vocab_size': 4336} + 2022-03-03 15:39:09,760 INFO [pretrained.py:248] About to create model + 2022-03-03 15:39:18,919 INFO [pretrained.py:257] Constructing Fbank computer + 2022-03-03 15:39:18,922 INFO [pretrained.py:267] Reading sound files: ['./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0121.wav', './tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0122.wav', './tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0123.wav'] + 2022-03-03 15:39:18,929 INFO [pretrained.py:273] Decoding started + /ceph-fj/fangjun/open-source-2/icefall-aishell/egs/aishell/ASR/transducer_stateless_modified/conformer.py:113: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). + lengths = ((x_lens - 1) // 2 - 1) // 2 + 2022-03-03 15:39:21,046 INFO [pretrained.py:320] + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0121.wav: + 甚 至 出 现 交 易 几 乎 停 滞 的 情 况 + + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0122.wav: + 一 二 线 城 市 虽 然 也 处 于 调 整 中 + + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0123.wav: + 但 因 为 聚 集 了 过 多 公 共 资 源 + + 2022-03-03 15:39:21,047 INFO [pretrained.py:322] Decoding Done + +Modified Beam search +^^^^^^^^^^^^^^^^^^^^ + +The command to run modified beam search is given below: + +.. code-block:: bash + + + $ cd egs/aishell/ASR + + $ ./transducer_stateless_modified/pretrained.py \ + --checkpoint ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/exp/pretrained.pt \ + --lang-dir ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/data/lang_char \ + --method modified_beam_search \ + --beam-size 4 \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0121.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0122.wav \ + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0123.wav + +The output is as follows: + +.. code-block:: + + 2022-03-03 15:41:23,319 INFO [pretrained.py:239] device: cuda:0 + 2022-03-03 15:41:23,798 INFO [lexicon.py:176] Loading pre-compiled tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/data/lang_char/Linv.pt + 2022-03-03 15:41:23,831 INFO [pretrained.py:246] {'feature_dim': 80, 'encoder_out_dim': 512, 'subsampling_factor': 4, 'attention_dim': 512, 'nhead': 8, 'dim_feedforward': 2048, 'num_encoder_layers': 12, 'vgg_frontend': False, 'env_info': {'k2-version': '1.13', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'f4fefe4882bc0ae59af951da3f47335d5495ef71', 'k2-git-date': 'Thu Feb 10 15:16:02 2022', 'lhotse-version': '1.0.0.dev+missing.version.file', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'master', 'icefall-git-sha1': '50d2281-clean', 'icefall-git-date': 'Wed Mar 2 16:02:38 2022', 'icefall-path': '/ceph-fj/fangjun/open-source-2/icefall-aishell', 'k2-path': '/ceph-fj/fangjun/open-source-2/k2-multi-datasets/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-fj/fangjun/open-source-2/lhotse-aishell/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-2-0815224919-75d558775b-mmnv8', 'IP address': '10.177.72.138'}, 'sample_rate': 16000, 'checkpoint': './tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/exp/pretrained.pt', 'lang_dir': PosixPath('tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/data/lang_char'), 'method': 'modified_beam_search', 'sound_files': ['./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0121.wav', './tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0122.wav', './tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0123.wav'], 'beam_size': 4, 'context_size': 2, 'max_sym_per_frame': 3, 'blank_id': 0, 'vocab_size': 4336} + 2022-03-03 15:41:23,831 INFO [pretrained.py:248] About to create model + 2022-03-03 15:41:32,214 INFO [pretrained.py:257] Constructing Fbank computer + 2022-03-03 15:41:32,215 INFO [pretrained.py:267] Reading sound files: ['./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0121.wav', './tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0122.wav', './tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0123.wav'] + 2022-03-03 15:41:32,220 INFO [pretrained.py:273] Decoding started + /ceph-fj/fangjun/open-source-2/icefall-aishell/egs/aishell/ASR/transducer_stateless_modified/conformer.py:113: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). + lengths = ((x_lens - 1) // 2 - 1) // 2 + /ceph-fj/fangjun/open-source-2/icefall-aishell/egs/aishell/ASR/transducer_stateless_modified/beam_search.py:402: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). + topk_hyp_indexes = topk_indexes // logits.size(-1) + 2022-03-03 15:41:32,583 INFO [pretrained.py:320] + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0121.wav: + 甚 至 出 现 交 易 几 乎 停 滞 的 情 况 + + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0122.wav: + 一 二 线 城 市 虽 然 也 处 于 调 整 中 + + ./tmp/icefall-aishell-transducer-stateless-modified-2022-03-01/test_wavs/BAC009S0764W0123.wav: + 但 因 为 聚 集 了 过 多 公 共 资 源 + + 2022-03-03 15:41:32,583 INFO [pretrained.py:322] Decoding Done + +Colab notebook +-------------- + +We provide a colab notebook for this recipe showing how to use a pre-trained model to +transcribe sound files. + +|aishell asr stateless modified transducer colab notebook| + +.. |aishell asr stateless modified transducer colab notebook| image:: https://colab.research.google.com/assets/colab-badge.svg + :target: https://colab.research.google.com/drive/12jpTxJB44vzwtcmJl2DTdznW0OawPb9H?usp=sharing diff --git a/docs/source/recipes/index.rst b/docs/source/recipes/index.rst index 78e9ea569..9d1d83d29 100644 --- a/docs/source/recipes/index.rst +++ b/docs/source/recipes/index.rst @@ -10,12 +10,10 @@ We may add recipes for other tasks as well in the future. .. Other recipes are listed in a alphabetical order. .. toctree:: - :maxdepth: 3 + :maxdepth: 2 + :caption: Table of Contents - yesno - - librispeech - - aishell - - timit + aishell/index + librispeech/index + timit/index + yesno/index diff --git a/docs/source/recipes/librispeech.rst b/docs/source/recipes/librispeech.rst deleted file mode 100644 index 946b23407..000000000 --- a/docs/source/recipes/librispeech.rst +++ /dev/null @@ -1,10 +0,0 @@ -LibriSpeech -=========== - -We provide the following models for the LibriSpeech dataset: - -.. toctree:: - :maxdepth: 2 - - librispeech/tdnn_lstm_ctc - librispeech/conformer_ctc diff --git a/docs/source/recipes/librispeech/index.rst b/docs/source/recipes/librispeech/index.rst new file mode 100644 index 000000000..5fa08ab6b --- /dev/null +++ b/docs/source/recipes/librispeech/index.rst @@ -0,0 +1,8 @@ +LibriSpeech +=========== + +.. toctree:: + :maxdepth: 1 + + tdnn_lstm_ctc + conformer_ctc diff --git a/docs/source/recipes/timit.rst b/docs/source/recipes/timit.rst deleted file mode 100644 index b630e2ce4..000000000 --- a/docs/source/recipes/timit.rst +++ /dev/null @@ -1,10 +0,0 @@ -TIMIT -=========== - -We provide the following models for the TIMIT dataset: - -.. toctree:: - :maxdepth: 2 - - timit/tdnn_lstm_ctc - timit/tdnn_ligru_ctc \ No newline at end of file diff --git a/docs/source/recipes/timit/index.rst b/docs/source/recipes/timit/index.rst new file mode 100644 index 000000000..17f40cdb7 --- /dev/null +++ b/docs/source/recipes/timit/index.rst @@ -0,0 +1,9 @@ +TIMIT +===== + +.. toctree:: + :maxdepth: 1 + + tdnn_ligru_ctc + tdnn_lstm_ctc + diff --git a/docs/source/recipes/timit/tdnn_ligru_ctc.rst b/docs/source/recipes/timit/tdnn_ligru_ctc.rst index 30877505f..186420ee7 100644 --- a/docs/source/recipes/timit/tdnn_ligru_ctc.rst +++ b/docs/source/recipes/timit/tdnn_ligru_ctc.rst @@ -1,5 +1,5 @@ TDNN-LiGRU-CTC -============= +============== This tutorial shows you how to run a TDNN-LiGRU-CTC model with the `TIMIT `_ dataset. diff --git a/docs/source/recipes/images/yesno-tdnn-tensorboard-log.png b/docs/source/recipes/yesno/images/tdnn-tensorboard-log.png similarity index 100% rename from docs/source/recipes/images/yesno-tdnn-tensorboard-log.png rename to docs/source/recipes/yesno/images/tdnn-tensorboard-log.png diff --git a/docs/source/recipes/yesno/index.rst b/docs/source/recipes/yesno/index.rst new file mode 100644 index 000000000..d68523a97 --- /dev/null +++ b/docs/source/recipes/yesno/index.rst @@ -0,0 +1,7 @@ +YesNo +===== + +.. toctree:: + :maxdepth: 1 + + tdnn diff --git a/docs/source/recipes/yesno.rst b/docs/source/recipes/yesno/tdnn.rst similarity index 99% rename from docs/source/recipes/yesno.rst rename to docs/source/recipes/yesno/tdnn.rst index cb425ad1d..e8b748e6b 100644 --- a/docs/source/recipes/yesno.rst +++ b/docs/source/recipes/yesno/tdnn.rst @@ -1,5 +1,5 @@ -yesno -===== +TDNN-CTC +======== This page shows you how to run the `yesno `_ recipe. It contains: @@ -145,7 +145,7 @@ In ``tdnn/exp``, you will find the following files: Note there is a URL in the above output, click it and you will see the following screenshot: - .. figure:: images/yesno-tdnn-tensorboard-log.png + .. figure:: images/tdnn-tensorboard-log.png :width: 600 :alt: TensorBoard screenshot :align: center diff --git a/egs/aishell/ASR/README.md b/egs/aishell/ASR/README.md index 1b3c5a2e3..d0a0c1829 100644 --- a/egs/aishell/ASR/README.md +++ b/egs/aishell/ASR/README.md @@ -1,7 +1,7 @@ # Introduction -Please refer to +Please refer to for how to run models in this recipe. # Transducers diff --git a/egs/librispeech/ASR/README.md b/egs/librispeech/ASR/README.md index 211a7d120..30b5c5c6f 100644 --- a/egs/librispeech/ASR/README.md +++ b/egs/librispeech/ASR/README.md @@ -1,7 +1,7 @@ # Introduction -Please refer to +Please refer to for how to run models in this recipe. # Transducers diff --git a/egs/timit/ASR/README.md b/egs/timit/ASR/README.md index 47103bc45..f10bfccfd 100644 --- a/egs/timit/ASR/README.md +++ b/egs/timit/ASR/README.md @@ -1,3 +1,3 @@ -Please refer to -for how to run models in this recipe. \ No newline at end of file +Please refer to +for how to run models in this recipe. diff --git a/egs/yesno/ASR/README.md b/egs/yesno/ASR/README.md index 6f57412c0..7257bad9a 100644 --- a/egs/yesno/ASR/README.md +++ b/egs/yesno/ASR/README.md @@ -10,5 +10,5 @@ get the following WER: ``` Please refer to - + for detailed instructions. From 2f0fbf430c34779829079e66fcbbfec074c9dc45 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Fri, 4 Mar 2022 11:56:31 +0800 Subject: [PATCH 18/25] Remove duplicate files. (#236) --- docs/source/recipes/aishell/conformer_ctc.rst | 2 +- egs/aishell/ASR/local/compile_hlg.py | 157 +----------------- egs/aishell/ASR/local/compute_fbank_musan.py | 111 +------------ .../convert_transcript_words_to_tokens.py | 108 +----------- .../ASR/local/generate_unique_lexicon.py | 101 +---------- egs/timit/ASR/local/compute_fbank_musan.py | 98 +---------- egs/timit/ASR/shared | 2 +- 7 files changed, 7 insertions(+), 572 deletions(-) mode change 100755 => 120000 egs/aishell/ASR/local/compile_hlg.py mode change 100755 => 120000 egs/aishell/ASR/local/compute_fbank_musan.py mode change 100755 => 120000 egs/aishell/ASR/local/convert_transcript_words_to_tokens.py mode change 100755 => 120000 egs/aishell/ASR/local/generate_unique_lexicon.py mode change 100644 => 120000 egs/timit/ASR/local/compute_fbank_musan.py mode change 100644 => 120000 egs/timit/ASR/shared diff --git a/docs/source/recipes/aishell/conformer_ctc.rst b/docs/source/recipes/aishell/conformer_ctc.rst index 2dcf0c728..75a2a8eca 100644 --- a/docs/source/recipes/aishell/conformer_ctc.rst +++ b/docs/source/recipes/aishell/conformer_ctc.rst @@ -1,4 +1,4 @@ -Confromer CTC +Conformer CTC ============= This tutorial shows you how to run a conformer ctc model diff --git a/egs/aishell/ASR/local/compile_hlg.py b/egs/aishell/ASR/local/compile_hlg.py deleted file mode 100755 index 098d5d6a3..000000000 --- a/egs/aishell/ASR/local/compile_hlg.py +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) -# -# See ../../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -""" -This script takes as input lang_dir and generates HLG from - - - H, the ctc topology, built from tokens contained in lang_dir/lexicon.txt - - L, the lexicon, built from lang_dir/L_disambig.pt - - Caution: We use a lexicon that contains disambiguation symbols - - - G, the LM, built from data/lm/G_3_gram.fst.txt - -The generated HLG is saved in $lang_dir/HLG.pt -""" -import argparse -import logging -from pathlib import Path - -import k2 -import torch - -from icefall.lexicon import Lexicon - - -def get_args(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--lang-dir", - type=str, - help="""Input and output directory. - """, - ) - - return parser.parse_args() - - -def compile_HLG(lang_dir: str) -> k2.Fsa: - """ - Args: - lang_dir: - The language directory, e.g., data/lang_phone or data/lang_bpe_5000. - - Return: - An FSA representing HLG. - """ - lexicon = Lexicon(lang_dir) - max_token_id = max(lexicon.tokens) - logging.info(f"Building ctc_topo. max_token_id: {max_token_id}") - H = k2.ctc_topo(max_token_id) - L = k2.Fsa.from_dict(torch.load(f"{lang_dir}/L_disambig.pt")) - - if Path("data/lm/G_3_gram.pt").is_file(): - logging.info("Loading pre-compiled G_3_gram") - d = torch.load("data/lm/G_3_gram.pt") - G = k2.Fsa.from_dict(d) - else: - logging.info("Loading G_3_gram.fst.txt") - with open("data/lm/G_3_gram.fst.txt") as f: - G = k2.Fsa.from_openfst(f.read(), acceptor=False) - torch.save(G.as_dict(), "data/lm/G_3_gram.pt") - - first_token_disambig_id = lexicon.token_table["#0"] - first_word_disambig_id = lexicon.word_table["#0"] - - L = k2.arc_sort(L) - G = k2.arc_sort(G) - - logging.info("Intersecting L and G") - LG = k2.compose(L, G) - logging.info(f"LG shape: {LG.shape}") - - logging.info("Connecting LG") - LG = k2.connect(LG) - logging.info(f"LG shape after k2.connect: {LG.shape}") - - logging.info(type(LG.aux_labels)) - logging.info("Determinizing LG") - - LG = k2.determinize(LG) - logging.info(type(LG.aux_labels)) - - logging.info("Connecting LG after k2.determinize") - LG = k2.connect(LG) - - logging.info("Removing disambiguation symbols on LG") - - LG.labels[LG.labels >= first_token_disambig_id] = 0 - - assert isinstance(LG.aux_labels, k2.RaggedTensor) - LG.aux_labels.values[LG.aux_labels.values >= first_word_disambig_id] = 0 - - LG = k2.remove_epsilon(LG) - logging.info(f"LG shape after k2.remove_epsilon: {LG.shape}") - - LG = k2.connect(LG) - LG.aux_labels = LG.aux_labels.remove_values_eq(0) - - logging.info("Arc sorting LG") - LG = k2.arc_sort(LG) - - logging.info("Composing H and LG") - # CAUTION: The name of the inner_labels is fixed - # to `tokens`. If you want to change it, please - # also change other places in icefall that are using - # it. - HLG = k2.compose(H, LG, inner_labels="tokens") - - logging.info("Connecting LG") - HLG = k2.connect(HLG) - - logging.info("Arc sorting LG") - HLG = k2.arc_sort(HLG) - logging.info(f"HLG.shape: {HLG.shape}") - - return HLG - - -def main(): - args = get_args() - lang_dir = Path(args.lang_dir) - - if (lang_dir / "HLG.pt").is_file(): - logging.info(f"{lang_dir}/HLG.pt already exists - skipping") - return - - logging.info(f"Processing {lang_dir}") - - HLG = compile_HLG(lang_dir) - logging.info(f"Saving HLG.pt to {lang_dir}") - torch.save(HLG.as_dict(), f"{lang_dir}/HLG.pt") - - -if __name__ == "__main__": - formatter = ( - "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" - ) - - logging.basicConfig(format=formatter, level=logging.INFO) - - main() diff --git a/egs/aishell/ASR/local/compile_hlg.py b/egs/aishell/ASR/local/compile_hlg.py new file mode 120000 index 000000000..471aa7fb4 --- /dev/null +++ b/egs/aishell/ASR/local/compile_hlg.py @@ -0,0 +1 @@ +../../../librispeech/ASR/local/compile_hlg.py \ No newline at end of file diff --git a/egs/aishell/ASR/local/compute_fbank_musan.py b/egs/aishell/ASR/local/compute_fbank_musan.py deleted file mode 100755 index e79bdafb1..000000000 --- a/egs/aishell/ASR/local/compute_fbank_musan.py +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) -# -# See ../../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -""" -This file computes fbank features of the musan dataset. -It looks for manifests in the directory data/manifests. - -The generated fbank features are saved in data/fbank. -""" - -import argparse -import logging -import os -from pathlib import Path - -import torch -from lhotse import CutSet, Fbank, FbankConfig, LilcomHdf5Writer, combine -from lhotse.recipes.utils import read_manifests_if_cached - -from icefall.utils import get_executor - -# Torch's multithreaded behavior needs to be disabled or -# it wastes a lot of CPU and slow things down. -# Do this outside of main() in case it needs to take effect -# even when we are not invoking the main (e.g. when spawning subprocesses). -torch.set_num_threads(1) -torch.set_num_interop_threads(1) - - -def compute_fbank_musan(num_mel_bins: int = 80): - src_dir = Path("data/manifests") - output_dir = Path("data/fbank") - num_jobs = min(15, os.cpu_count()) - - dataset_parts = ( - "music", - "speech", - "noise", - ) - manifests = read_manifests_if_cached( - dataset_parts=dataset_parts, output_dir=src_dir - ) - assert manifests is not None - - musan_cuts_path = output_dir / "cuts_musan.json.gz" - - if musan_cuts_path.is_file(): - logging.info(f"{musan_cuts_path} already exists - skipping") - return - - logging.info("Extracting features for Musan") - - extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins)) - - with get_executor() as ex: # Initialize the executor only once. - # create chunks of Musan with duration 5 - 10 seconds - musan_cuts = ( - CutSet.from_manifests( - recordings=combine( - part["recordings"] for part in manifests.values() - ) - ) - .cut_into_windows(10.0) - .filter(lambda c: c.duration > 5) - .compute_and_store_features( - extractor=extractor, - storage_path=f"{output_dir}/feats_musan", - num_jobs=num_jobs if ex is None else 80, - executor=ex, - storage_type=LilcomHdf5Writer, - ) - ) - musan_cuts.to_json(musan_cuts_path) - - -def get_args(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--num-mel-bins", - type=int, - default=80, - help="""The number of mel bins for Fbank""", - ) - - return parser.parse_args() - - -if __name__ == "__main__": - formatter = ( - "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" - ) - - logging.basicConfig(format=formatter, level=logging.INFO) - args = get_args() - compute_fbank_musan(num_mel_bins=args.num_mel_bins) diff --git a/egs/aishell/ASR/local/compute_fbank_musan.py b/egs/aishell/ASR/local/compute_fbank_musan.py new file mode 120000 index 000000000..5833f2484 --- /dev/null +++ b/egs/aishell/ASR/local/compute_fbank_musan.py @@ -0,0 +1 @@ +../../../librispeech/ASR/local/compute_fbank_musan.py \ No newline at end of file diff --git a/egs/aishell/ASR/local/convert_transcript_words_to_tokens.py b/egs/aishell/ASR/local/convert_transcript_words_to_tokens.py deleted file mode 100755 index 133499c8b..000000000 --- a/egs/aishell/ASR/local/convert_transcript_words_to_tokens.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang) -""" -Convert a transcript file containing words to a corpus file containing tokens -for LM training with the help of a lexicon. - -If the lexicon contains phones, the resulting LM will be a phone LM; If the -lexicon contains word pieces, the resulting LM will be a word piece LM. - -If a word has multiple pronunciations, the one that appears first in the lexicon -is kept; others are removed. - -If the input transcript is: - - hello zoo world hello - world zoo - foo zoo world hellO - -and if the lexicon is - - SPN - hello h e l l o 2 - hello h e l l o - world w o r l d - zoo z o o - -Then the output is - - h e l l o 2 z o o w o r l d h e l l o 2 - w o r l d z o o - SPN z o o w o r l d SPN -""" - -import argparse -from pathlib import Path -from typing import Dict, List - -from generate_unique_lexicon import filter_multiple_pronunications - -from icefall.lexicon import read_lexicon - - -def get_args(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--transcript", - type=str, - help="The input transcript file." - "We assume that the transcript file consists of " - "lines. Each line consists of space separated words.", - ) - parser.add_argument("--lexicon", type=str, help="The input lexicon file.") - parser.add_argument( - "--oov", type=str, default="", help="The OOV word." - ) - - return parser.parse_args() - - -def process_line( - lexicon: Dict[str, List[str]], line: str, oov_token: str -) -> None: - """ - Args: - lexicon: - A dict containing pronunciations. Its keys are words and values - are pronunciations (i.e., tokens). - line: - A line of transcript consisting of space(s) separated words. - oov_token: - The pronunciation of the oov word if a word in `line` is not present - in the lexicon. - Returns: - Return None. - """ - s = "" - words = line.strip().split() - for i, w in enumerate(words): - tokens = lexicon.get(w, oov_token) - s += " ".join(tokens) - s += " " - print(s.strip()) - - -def main(): - args = get_args() - assert Path(args.lexicon).is_file() - assert Path(args.transcript).is_file() - assert len(args.oov) > 0 - - # Only the first pronunciation of a word is kept - lexicon = filter_multiple_pronunications(read_lexicon(args.lexicon)) - - lexicon = dict(lexicon) - - assert args.oov in lexicon - - oov_token = lexicon[args.oov] - - with open(args.transcript) as f: - for line in f: - process_line(lexicon=lexicon, line=line, oov_token=oov_token) - - -if __name__ == "__main__": - main() diff --git a/egs/aishell/ASR/local/convert_transcript_words_to_tokens.py b/egs/aishell/ASR/local/convert_transcript_words_to_tokens.py new file mode 120000 index 000000000..2ce13fd69 --- /dev/null +++ b/egs/aishell/ASR/local/convert_transcript_words_to_tokens.py @@ -0,0 +1 @@ +../../../librispeech/ASR/local/convert_transcript_words_to_tokens.py \ No newline at end of file diff --git a/egs/aishell/ASR/local/generate_unique_lexicon.py b/egs/aishell/ASR/local/generate_unique_lexicon.py deleted file mode 100755 index 566c0743d..000000000 --- a/egs/aishell/ASR/local/generate_unique_lexicon.py +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) -# -# See ../../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -This file takes as input a lexicon.txt and output a new lexicon, -in which each word has a unique pronunciation. - -The way to do this is to keep only the first pronunciation of a word -in lexicon.txt. -""" - - -import argparse -import logging -from pathlib import Path -from typing import List, Tuple - -from icefall.lexicon import read_lexicon, write_lexicon - - -def get_args(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--lang-dir", - type=str, - help="""Input and output directory. - It should contain a file lexicon.txt. - This file will generate a new file uniq_lexicon.txt - in it. - """, - ) - - return parser.parse_args() - - -def filter_multiple_pronunications( - lexicon: List[Tuple[str, List[str]]] -) -> List[Tuple[str, List[str]]]: - """Remove multiple pronunciations of words from a lexicon. - - If a word has more than one pronunciation in the lexicon, only - the first one is kept, while other pronunciations are removed - from the lexicon. - - Args: - lexicon: - The input lexicon, containing a list of (word, [p1, p2, ..., pn]), - where "p1, p2, ..., pn" are the pronunciations of the "word". - Returns: - Return a new lexicon where each word has a unique pronunciation. - """ - seen = set() - ans = [] - - for word, tokens in lexicon: - if word in seen: - continue - seen.add(word) - ans.append((word, tokens)) - return ans - - -def main(): - args = get_args() - lang_dir = Path(args.lang_dir) - - lexicon_filename = lang_dir / "lexicon.txt" - - in_lexicon = read_lexicon(lexicon_filename) - - out_lexicon = filter_multiple_pronunications(in_lexicon) - - write_lexicon(lang_dir / "uniq_lexicon.txt", out_lexicon) - - logging.info(f"Number of entries in lexicon.txt: {len(in_lexicon)}") - logging.info(f"Number of entries in uniq_lexicon.txt: {len(out_lexicon)}") - - -if __name__ == "__main__": - formatter = ( - "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" - ) - - logging.basicConfig(format=formatter, level=logging.INFO) - - main() diff --git a/egs/aishell/ASR/local/generate_unique_lexicon.py b/egs/aishell/ASR/local/generate_unique_lexicon.py new file mode 120000 index 000000000..c0aea1403 --- /dev/null +++ b/egs/aishell/ASR/local/generate_unique_lexicon.py @@ -0,0 +1 @@ +../../../librispeech/ASR/local/generate_unique_lexicon.py \ No newline at end of file diff --git a/egs/timit/ASR/local/compute_fbank_musan.py b/egs/timit/ASR/local/compute_fbank_musan.py deleted file mode 100644 index d44524e70..000000000 --- a/egs/timit/ASR/local/compute_fbank_musan.py +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) -# -# See ../../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -""" -This file computes fbank features of the musan dataset. -It looks for manifests in the directory data/manifests. - -The generated fbank features are saved in data/fbank. -""" - -import logging -import os -from pathlib import Path - -import torch -from lhotse import CutSet, Fbank, FbankConfig, LilcomHdf5Writer, combine -from lhotse.recipes.utils import read_manifests_if_cached - -from icefall.utils import get_executor - -# Torch's multithreaded behavior needs to be disabled or -# it wastes a lot of CPU and slow things down. -# Do this outside of main() in case it needs to take effect -# even when we are not invoking the main (e.g. when spawning subprocesses). -torch.set_num_threads(1) -torch.set_num_interop_threads(1) - - -def compute_fbank_musan(): - src_dir = Path("data/manifests") - output_dir = Path("data/fbank") - num_jobs = min(15, os.cpu_count()) - num_mel_bins = 80 - - dataset_parts = ( - "music", - "speech", - "noise", - ) - manifests = read_manifests_if_cached( - dataset_parts=dataset_parts, output_dir=src_dir - ) - assert manifests is not None - - musan_cuts_path = output_dir / "cuts_musan.json.gz" - - if musan_cuts_path.is_file(): - logging.info(f"{musan_cuts_path} already exists - skipping") - return - - logging.info("Extracting features for Musan") - - extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins)) - - with get_executor() as ex: # Initialize the executor only once. - # create chunks of Musan with duration 5 - 10 seconds - musan_cuts = ( - CutSet.from_manifests( - recordings=combine( - part["recordings"] for part in manifests.values() - ) - ) - .cut_into_windows(10.0) - .filter(lambda c: c.duration > 5) - .compute_and_store_features( - extractor=extractor, - storage_path=f"{output_dir}/feats_musan", - num_jobs=num_jobs if ex is None else 80, - executor=ex, - storage_type=LilcomHdf5Writer, - ) - ) - musan_cuts.to_json(musan_cuts_path) - - -if __name__ == "__main__": - formatter = ( - "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" - ) - - logging.basicConfig(format=formatter, level=logging.INFO) - compute_fbank_musan() diff --git a/egs/timit/ASR/local/compute_fbank_musan.py b/egs/timit/ASR/local/compute_fbank_musan.py new file mode 120000 index 000000000..5833f2484 --- /dev/null +++ b/egs/timit/ASR/local/compute_fbank_musan.py @@ -0,0 +1 @@ +../../../librispeech/ASR/local/compute_fbank_musan.py \ No newline at end of file diff --git a/egs/timit/ASR/shared b/egs/timit/ASR/shared deleted file mode 100644 index 4c5e91438..000000000 --- a/egs/timit/ASR/shared +++ /dev/null @@ -1 +0,0 @@ -../../../icefall/shared/ \ No newline at end of file diff --git a/egs/timit/ASR/shared b/egs/timit/ASR/shared new file mode 120000 index 000000000..4cbd91a7e --- /dev/null +++ b/egs/timit/ASR/shared @@ -0,0 +1 @@ +../../../icefall/shared \ No newline at end of file From ad629817659cba719018a5911287a40aca5548a7 Mon Sep 17 00:00:00 2001 From: yaozengwei Date: Fri, 4 Mar 2022 15:38:23 +0800 Subject: [PATCH 19/25] Add diagnostics (#230) * Adding diagnostics code... * Move diagnostics code from local dir to the shared icefall dir * Remove the diagnostics code in the local dir * Update docs of arguments, and remove stats_types() function in TensorDiagnosticOptions object. * Update docs of arguments. * Add copyright information. * Corrected the time in copyright information. Co-authored-by: Daniel Povey --- .../ASR/transducer_stateless/train.py | 38 +- icefall/diagnostics.py | 346 ++++++++++++++++++ 2 files changed, 374 insertions(+), 10 deletions(-) create mode 100644 icefall/diagnostics.py diff --git a/egs/librispeech/ASR/transducer_stateless/train.py b/egs/librispeech/ASR/transducer_stateless/train.py index 4f5379e53..2cc6480d5 100755 --- a/egs/librispeech/ASR/transducer_stateless/train.py +++ b/egs/librispeech/ASR/transducer_stateless/train.py @@ -56,6 +56,7 @@ from torch.nn.utils import clip_grad_norm_ from torch.utils.tensorboard import SummaryWriter from transformer import Noam +from icefall import diagnostics from icefall.checkpoint import load_checkpoint from icefall.checkpoint import save_checkpoint as save_checkpoint_impl from icefall.dist import cleanup_dist, setup_dist @@ -156,6 +157,13 @@ def get_parser(): help="The seed for random generators intended for reproducibility", ) + parser.add_argument( + "--print-diagnostics", + type=str2bool, + default=False, + help="Accumulate stats on activations, print them and exit.", + ) + return parser @@ -510,6 +518,8 @@ def train_one_epoch( loss.backward() clip_grad_norm_(model.parameters(), 5.0, 2.0) optimizer.step() + if params.print_diagnostics and batch_idx == 5: + return if batch_idx % params.log_interval == 0: logging.info( @@ -517,9 +527,6 @@ def train_one_epoch( f"batch {batch_idx}, loss[{loss_info}], " f"tot_loss[{tot_loss}], batch size: {batch_size}" ) - - if batch_idx % params.log_interval == 0: - if tb_writer is not None: loss_info.write_summary( tb_writer, "train/current_", params.batch_idx_train @@ -622,6 +629,12 @@ def run(rank, world_size, args): librispeech = LibriSpeechAsrDataModule(args) + if params.print_diagnostics: + opts = diagnostics.TensorDiagnosticOptions( + 2 ** 22 + ) # allow 4 megabytes per sub-module + diagnostic = diagnostics.attach_diagnostics(model, opts) + train_cuts = librispeech.train_clean_100_cuts() if params.full_libri: train_cuts += librispeech.train_clean_360_cuts() @@ -649,13 +662,14 @@ def run(rank, world_size, args): valid_cuts += librispeech.dev_other_cuts() valid_dl = librispeech.valid_dataloaders(valid_cuts) - scan_pessimistic_batches_for_oom( - model=model, - train_dl=train_dl, - optimizer=optimizer, - sp=sp, - params=params, - ) + if not params.print_diagnostics: + scan_pessimistic_batches_for_oom( + model=model, + train_dl=train_dl, + optimizer=optimizer, + sp=sp, + params=params, + ) for epoch in range(params.start_epoch, params.num_epochs): fix_random_seed(params.seed + epoch) @@ -684,6 +698,10 @@ def run(rank, world_size, args): world_size=world_size, ) + if params.print_diagnostics: + diagnostic.print_diagnostics() + break + save_checkpoint( params=params, model=model, diff --git a/icefall/diagnostics.py b/icefall/diagnostics.py new file mode 100644 index 000000000..944f11f64 --- /dev/null +++ b/icefall/diagnostics.py @@ -0,0 +1,346 @@ +# Copyright 2022 Xiaomi Corp. (authors: Daniel Povey +# Zengwei Yao) +# +# See ../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import random +from typing import List, Tuple + +import torch +from torch import Tensor, nn + + +class TensorDiagnosticOptions(object): + """Options object for tensor diagnostics: + + Args: + memory_limit: + The maximum number of bytes per tensor (limits how many copies + of the tensor we cache). + """ + + def __init__(self, memory_limit: int): + self.memory_limit = memory_limit + + def dim_is_summarized(self, size: int): + return size > 10 and size != 31 + + +def get_sum_abs_stats( + x: Tensor, dim: int, stats_type: str +) -> Tuple[Tensor, int]: + """Returns the sum-of-absolute-value of this Tensor, for each index into + the specified axis/dim of the tensor. + + Args: + x: + Tensor, tensor to be analyzed + dim: + Dimension with 0 <= dim < x.ndim + stats_type: + Either "mean-abs" in which case the stats represent the mean absolute + value, or "pos-ratio" in which case the stats represent the proportion + of positive values (actually: the tensor is count of positive values, + count is the count of all values). + + Returns: + (sum_abs, count) where sum_abs is a Tensor of shape (x.shape[dim],), + and the count is an integer saying how many items were counted in + each element of sum_abs. + """ + if stats_type == "mean-abs": + x = x.abs() + else: + assert stats_type == "pos-ratio" + x = (x > 0).to(dtype=torch.float) + + orig_numel = x.numel() + sum_dims = [d for d in range(x.ndim) if d != dim] + x = torch.sum(x, dim=sum_dims) + count = orig_numel // x.numel() + x = x.flatten() + + return x, count + + +def get_diagnostics_for_dim( + dim: int, + tensors: List[Tensor], + options: TensorDiagnosticOptions, + sizes_same: bool, + stats_type: str, +) -> str: + """This function gets diagnostics for a dimension of a module. + + Args: + dim: + The dimension to analyze, with 0 <= dim < tensors[0].ndim + tensors: + List of cached tensors to get the stats + options: + Options object + sizes_same: + True if all the tensor sizes are the same on this dimension + stats_type: either "mean-abs" or "pos-ratio", dictates the type of + stats we accumulate, mean-abs is mean absolute value, "pos-ratio" is + proportion of positive to nonnegative values. + + Returns: + Diagnostic as a string, either percentiles or the actual values, + see the code. + """ + + # stats_and_counts is a list of pair (Tensor, int) + stats_and_counts = [get_sum_abs_stats(x, dim, stats_type) for x in tensors] + stats = [x[0] for x in stats_and_counts] + counts = [x[1] for x in stats_and_counts] + if sizes_same: + stats = torch.stack(stats).sum(dim=0) + count = sum(counts) + stats = stats / count + else: + stats = [x[0] / x[1] for x in stats_and_counts] + stats = torch.cat(stats, dim=0) + + # If `summarize` we print percentiles of the stats; + # else, we print out individual elements. + summarize = (not sizes_same) or options.dim_is_summarized(stats.numel()) + if summarize: + # Print out percentiles. + stats = stats.sort()[0] + num_percentiles = 10 + size = stats.numel() + percentiles = [] + for i in range(num_percentiles + 1): + index = (i * (size - 1)) // num_percentiles + percentiles.append(stats[index].item()) + percentiles = ["%.2g" % x for x in percentiles] + percentiles = " ".join(percentiles) + return f"percentiles: [{percentiles}]" + else: + stats = stats.tolist() + stats = ["%.2g" % x for x in stats] + stats = "[" + " ".join(stats) + "]" + return stats + + +def print_diagnostics_for_dim( + name: str, dim: int, tensors: List[Tensor], options: TensorDiagnosticOptions +): + """This function prints diagnostics for a dimension of a tensor. + + Args: + name: + The tensor name. + dim: + The dimension to analyze, with 0 <= dim < tensors[0].ndim. + tensors: + List of cached tensors to get the stats. + options: + Options object. + """ + + for stats_type in ["mean-abs", "pos-ratio"]: + # stats_type will be "mean-abs" or "pos-ratio". + sizes = [x.shape[dim] for x in tensors] + sizes_same = all([x == sizes[0] for x in sizes]) + s = get_diagnostics_for_dim( + dim, tensors, options, sizes_same, stats_type + ) + + min_size = min(sizes) + max_size = max(sizes) + size_str = f"{min_size}" if sizes_same else f"{min_size}..{max_size}" + print(f"module={name}, dim={dim}, size={size_str}, {stats_type} {s}") + + +class TensorDiagnostic(object): + """This class is not directly used by the user, it is responsible for + collecting diagnostics for a single parameter tensor of a torch.nn.Module. + + Args: + opts: + Options object. + name: + The tensor name. + """ + + def __init__(self, opts: TensorDiagnosticOptions, name: str): + self.name = name + self.opts = opts + # A list to cache the tensors. + self.saved_tensors = [] + + def accumulate(self, x): + """Accumulate tensors.""" + if isinstance(x, Tuple): + x = x[0] + if not isinstance(x, Tensor): + return + if x.device == torch.device("cpu"): + x = x.detach().clone() + else: + x = x.detach().to("cpu", non_blocking=True) + self.saved_tensors.append(x) + num = len(self.saved_tensors) + if num & (num - 1) == 0: # power of 2.. + self._limit_memory() + + def _limit_memory(self): + """Only keep the newly cached tensors to limit memory.""" + if len(self.saved_tensors) > 1024: + self.saved_tensors = self.saved_tensors[-1024:] + return + + tot_mem = 0.0 + for i in reversed(range(len(self.saved_tensors))): + tot_mem += ( + self.saved_tensors[i].numel() + * self.saved_tensors[i].element_size() + ) + if tot_mem > self.opts.memory_limit: + self.saved_tensors = self.saved_tensors[i:] + return + + def print_diagnostics(self): + """Print diagnostics for each dimension of the tensor.""" + if len(self.saved_tensors) == 0: + print("{name}: no stats".format(name=self.name)) + return + + if self.saved_tensors[0].ndim == 0: + # Ensure there is at least one dim. + self.saved_tensors = [x.unsqueeze(0) for x in self.saved_tensors] + + ndim = self.saved_tensors[0].ndim + for dim in range(ndim): + print_diagnostics_for_dim( + self.name, dim, self.saved_tensors, self.opts + ) + + +class ModelDiagnostic(object): + """This class stores diagnostics for all tensors in the torch.nn.Module. + + Args: + opts: + Options object. + """ + + def __init__(self, opts: TensorDiagnosticOptions): + # In this dictionary, the keys are tensors names and the values + # are corresponding TensorDiagnostic objects. + self.diagnostics = dict() + self.opts = opts + + def __getitem__(self, name: str): + if name not in self.diagnostics: + self.diagnostics[name] = TensorDiagnostic(self.opts, name) + return self.diagnostics[name] + + def print_diagnostics(self): + """Print diagnostics for each tensor.""" + for k in sorted(self.diagnostics.keys()): + self.diagnostics[k].print_diagnostics() + + +def attach_diagnostics( + model: nn.Module, opts: TensorDiagnosticOptions +) -> ModelDiagnostic: + """Attach a ModelDiagnostic object to the model by + 1) registering forward hook and backward hook on each module, to accumulate + its output tensors and gradient tensors, respectively; + 2) registering backward hook on each module parameter, to accumulate its + values and gradients. + + Args: + model: + the model to be analyzed. + opts: + Options object. + + Returns: + The ModelDiagnostic object attached to the model. + """ + + ans = ModelDiagnostic(opts) + for name, module in model.named_modules(): + if name == "": + name = "" + + # Setting model_diagnostic=ans and n=name below, instead of trying to + # capture the variables, ensures that we use the current values. + # (matters for name, since the variable gets overwritten). + # These closures don't really capture by value, only by + # "the final value the variable got in the function" :-( + def forward_hook( + _module, _input, _output, _model_diagnostic=ans, _name=name + ): + if isinstance(_output, Tensor): + _model_diagnostic[f"{_name}.output"].accumulate(_output) + elif isinstance(_output, tuple): + for i, o in enumerate(_output): + _model_diagnostic[f"{_name}.output[{i}]"].accumulate(o) + + def backward_hook( + _module, _input, _output, _model_diagnostic=ans, _name=name + ): + if isinstance(_output, Tensor): + _model_diagnostic[f"{_name}.grad"].accumulate(_output) + elif isinstance(_output, tuple): + for i, o in enumerate(_output): + _model_diagnostic[f"{_name}.grad[{i}]"].accumulate(o) + + module.register_forward_hook(forward_hook) + module.register_backward_hook(backward_hook) + + for name, parameter in model.named_parameters(): + + def param_backward_hook( + grad, _parameter=parameter, _model_diagnostic=ans, _name=name + ): + _model_diagnostic[f"{_name}.param_value"].accumulate(_parameter) + _model_diagnostic[f"{_name}.param_grad"].accumulate(grad) + + parameter.register_hook(param_backward_hook) + + return ans + + +def _test_tensor_diagnostic(): + opts = TensorDiagnosticOptions(2 ** 20) + + diagnostic = TensorDiagnostic(opts, "foo") + + for _ in range(10): + diagnostic.accumulate(torch.randn(50, 100) * 10.0) + + diagnostic.print_diagnostics() + + model = nn.Sequential(nn.Linear(100, 50), nn.Linear(50, 80)) + + diagnostic = attach_diagnostics(model, opts) + for _ in range(10): + T = random.randint(200, 300) + x = torch.randn(T, 100) + y = model(x) + y.sum().backward() + + diagnostic.print_diagnostics() + + +if __name__ == "__main__": + _test_tensor_diagnostic() From 1603744469d167d848e074f2ea98c587153205fa Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Sat, 5 Mar 2022 19:26:06 +0800 Subject: [PATCH 20/25] Refactor conformer. (#237) --- .../ASR/transducer_stateless/conformer.py | 19 +++---- .../transducer_stateless/test_conformer.py | 51 +++++++++++++++++++ 2 files changed, 59 insertions(+), 11 deletions(-) create mode 100755 egs/librispeech/ASR/transducer_stateless/test_conformer.py diff --git a/egs/librispeech/ASR/transducer_stateless/conformer.py b/egs/librispeech/ASR/transducer_stateless/conformer.py index 81d7708f9..fc838f75b 100644 --- a/egs/librispeech/ASR/transducer_stateless/conformer.py +++ b/egs/librispeech/ASR/transducer_stateless/conformer.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. - +import copy import math import warnings from typing import Optional, Tuple @@ -264,13 +264,12 @@ class ConformerEncoderLayer(nn.Module): return src -class ConformerEncoder(nn.TransformerEncoder): +class ConformerEncoder(nn.Module): r"""ConformerEncoder is a stack of N encoder layers Args: encoder_layer: an instance of the ConformerEncoderLayer() class (required). num_layers: the number of sub-encoder-layers in the encoder (required). - norm: the layer normalization component (optional). Examples:: >>> encoder_layer = ConformerEncoderLayer(d_model=512, nhead=8) @@ -280,12 +279,12 @@ class ConformerEncoder(nn.TransformerEncoder): >>> out = conformer_encoder(src, pos_emb) """ - def __init__( - self, encoder_layer: nn.Module, num_layers: int, norm: nn.Module = None - ) -> None: - super(ConformerEncoder, self).__init__( - encoder_layer=encoder_layer, num_layers=num_layers, norm=norm + def __init__(self, encoder_layer: nn.Module, num_layers: int) -> None: + super().__init__() + self.layers = nn.ModuleList( + [copy.deepcopy(encoder_layer) for i in range(num_layers)] ) + self.num_layers = num_layers def forward( self, @@ -320,9 +319,6 @@ class ConformerEncoder(nn.TransformerEncoder): src_key_padding_mask=src_key_padding_mask, ) - if self.norm is not None: - output = self.norm(output) - return output @@ -643,6 +639,7 @@ class RelPositionMultiheadAttention(nn.Module): if _b is not None: _b = _b[_start:_end] q = nn.functional.linear(query, _w, _b) + # This is inline in_proj function with in_proj_weight and in_proj_bias _b = in_proj_bias _start = embed_dim diff --git a/egs/librispeech/ASR/transducer_stateless/test_conformer.py b/egs/librispeech/ASR/transducer_stateless/test_conformer.py new file mode 100755 index 000000000..d1350c8ab --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless/test_conformer.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 +# Copyright 2022 Xiaomi Corp. (authors: Daniel Povey +# Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +To run this file, do: + + cd icefall/egs/librispeech/ASR + python ./transducer_stateless/test_conformer.py +""" + +import torch +from conformer import Conformer + + +def test_conformer(): + feature_dim = 50 + c = Conformer( + num_features=feature_dim, output_dim=256, d_model=128, nhead=4 + ) + batch_size = 5 + seq_len = 20 + # Just make sure the forward pass runs. + logits, lengths = c( + torch.randn(batch_size, seq_len, feature_dim), + torch.full((batch_size,), seq_len, dtype=torch.int64), + ) + print(logits.shape) + print(lengths.shape) + + +def main(): + test_conformer() + + +if __name__ == "__main__": + main() From 2f4e71f433fdb8cf7d07014d75bd9218c6822db8 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Sat, 12 Mar 2022 16:16:15 +0800 Subject: [PATCH 21/25] Add force alignment for stateless transducer. (#239) * Add force alignment for stateless transducer. * Add more documentation. * Compute word starting time from framewise token alignment. * Update README to include force alignment information. * Fix typos. * Fix more typos. * Fixes after review. --- egs/librispeech/ASR/prepare.sh | 27 +- .../ASR/tdnn_lstm_ctc/asr_datamodule.py | 8 +- .../ASR/transducer_stateless/README.md | 117 +++++++ .../ASR/transducer_stateless/alignment.py | 268 ++++++++++++++ .../ASR/transducer_stateless/compute_ali.py | 326 ++++++++++++++++++ .../transducer_stateless/test_compute_ali.py | 167 +++++++++ 6 files changed, 903 insertions(+), 10 deletions(-) create mode 100644 egs/librispeech/ASR/transducer_stateless/alignment.py create mode 100755 egs/librispeech/ASR/transducer_stateless/compute_ali.py create mode 100755 egs/librispeech/ASR/transducer_stateless/test_compute_ali.py diff --git a/egs/librispeech/ASR/prepare.sh b/egs/librispeech/ASR/prepare.sh index 3b2678ec4..1bbf7bbcf 100755 --- a/egs/librispeech/ASR/prepare.sh +++ b/egs/librispeech/ASR/prepare.sh @@ -60,8 +60,11 @@ log "dl_dir: $dl_dir" if [ $stage -le -1 ] && [ $stop_stage -ge -1 ]; then log "Stage -1: Download LM" - [ ! -e $dl_dir/lm ] && mkdir -p $dl_dir/lm - ./local/download_lm.py --out-dir=$dl_dir/lm + mkdir -p $dl_dir/lm + if [ ! -e $dl_dir/lm/.done ]; then + ./local/download_lm.py --out-dir=$dl_dir/lm + touch $dl_dir/lm/.done + fi fi if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then @@ -91,7 +94,10 @@ if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then # We assume that you have downloaded the LibriSpeech corpus # to $dl_dir/LibriSpeech mkdir -p data/manifests - lhotse prepare librispeech -j $nj $dl_dir/LibriSpeech data/manifests + if [ ! -e data/manifests/.librispeech.done ]; then + lhotse prepare librispeech -j $nj $dl_dir/LibriSpeech data/manifests + touch data/manifests/.librispeech.done + fi fi if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then @@ -99,19 +105,28 @@ if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then # We assume that you have downloaded the musan corpus # to data/musan mkdir -p data/manifests - lhotse prepare musan $dl_dir/musan data/manifests + if [ ! -e data/manifests/.musan.done ]; then + lhotse prepare musan $dl_dir/musan data/manifests + touch data/manifests/.musan.done + fi fi if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then log "Stage 3: Compute fbank for librispeech" mkdir -p data/fbank - ./local/compute_fbank_librispeech.py + if [ ! -e data/fbank/.librispeech.done ]; then + ./local/compute_fbank_librispeech.py + touch data/fbank/.librispeech.done + fi fi if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then log "Stage 4: Compute fbank for musan" mkdir -p data/fbank - ./local/compute_fbank_musan.py + if [ ! -e data/fbank/.musan.done ]; then + ./local/compute_fbank_musan.py + touch data/fbank/.musan.done + fi fi if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then diff --git a/egs/librispeech/ASR/tdnn_lstm_ctc/asr_datamodule.py b/egs/librispeech/ASR/tdnn_lstm_ctc/asr_datamodule.py index e075a2d03..2af2f5e8a 100644 --- a/egs/librispeech/ASR/tdnn_lstm_ctc/asr_datamodule.py +++ b/egs/librispeech/ASR/tdnn_lstm_ctc/asr_datamodule.py @@ -180,14 +180,14 @@ class LibriSpeechAsrDataModule: ) def train_dataloaders(self, cuts_train: CutSet) -> DataLoader: - logging.info("About to get Musan cuts") - cuts_musan = load_manifest( - self.args.manifest_dir / "cuts_musan.json.gz" - ) transforms = [] if self.args.enable_musan: logging.info("Enable MUSAN") + logging.info("About to get Musan cuts") + cuts_musan = load_manifest( + self.args.manifest_dir / "cuts_musan.json.gz" + ) transforms.append( CutMix( cuts=cuts_musan, prob=0.5, snr=(10, 20), preserve_id=True diff --git a/egs/librispeech/ASR/transducer_stateless/README.md b/egs/librispeech/ASR/transducer_stateless/README.md index 964bddfab..978fa2ada 100644 --- a/egs/librispeech/ASR/transducer_stateless/README.md +++ b/egs/librispeech/ASR/transducer_stateless/README.md @@ -20,3 +20,120 @@ export CUDA_VISIBLE_DEVICES="0,1,2,3" --max-duration 250 \ --lr-factor 2.5 ``` + +## How to get framewise token alignment + +Assume that you already have a trained model. If not, you can either +train one by yourself or download a pre-trained model from hugging face: + + +**Caution**: If you are going to use your own trained model, remember +to set `--modified-transducer-prob` to a nonzero value since the +force alignment code assumes that `--max-sym-per-frame` is 1. + + +The following shows how to get framewise token alignment using the above +pre-trained model. + +```bash +git clone https://github.com/k2-fsa/icefall +cd icefall/egs/librispeech/ASR +mkdir tmp +sudo apt-get install git-lfs +git lfs install +git clone https://huggingface.co/csukuangfj/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01 ./tmp/ + +ln -s $PWD/tmp/exp/pretrained.pt $PWD/tmp/epoch-999.pt + +./transducer_stateless/compute_ali.py \ + --exp-dir ./tmp/exp \ + --bpe-model ./tmp/data/lang_bpe_500/bpe.model \ + --epoch 999 \ + --avg 1 \ + --max-duration 100 \ + --dataset dev-clean \ + --out-dir data/ali +``` + +After running the above commands, you will find the following two files +in the folder `./data/ali`: + +``` +-rw-r--r-- 1 xxx xxx 412K Mar 7 15:45 cuts_dev-clean.json.gz +-rw-r--r-- 1 xxx xxx 2.9M Mar 7 15:45 token_ali_dev-clean.h5 +``` + +You can find usage examples in `./test_compute_ali.py` about +extracting framewise token alignment information from the above +two files. + +## How to get word starting time from framewise token alignment + +Assume you have run the above commands to get framewise token alignment +using a pre-trained model from `tmp/exp/epoch-999.pt`. You can use the following +commands to obtain word starting time. + +```bash +./transducer_stateless/test_compute_ali.py \ + --bpe-model ./tmp/data/lang_bpe_500/bpe.model \ + --ali-dir data/ali \ + --dataset dev-clean +``` + +**Caution**: Since the frame shift is 10ms and the subsampling factor +of the model is 4, the time resolution is 0.04 second. + +**Note**: The script `test_compute_ali.py` is for illustration only +and it processes only one batch and then exits. + +You will get the following output: + +``` +5694-64029-0022-1998-0 +[('THE', '0.20'), ('LEADEN', '0.36'), ('HAIL', '0.72'), ('STORM', '1.00'), ('SWEPT', '1.48'), ('THEM', '1.88'), ('OFF', '2.00'), ('THE', '2.24'), ('FIELD', '2.36'), ('THEY', '3.20'), ('FELL', '3.36'), ('BACK', '3.64'), ('AND', '3.92'), ('RE', '4.04'), ('FORMED', '4.20')] + +3081-166546-0040-308-0 +[('IN', '0.32'), ('OLDEN', '0.60'), ('DAYS', '1.00'), ('THEY', '1.40'), ('WOULD', '1.56'), ('HAVE', '1.76'), ('SAID', '1.92'), ('STRUCK', '2.60'), ('BY', '3.16'), ('A', '3.36'), ('BOLT', '3.44'), ('FROM', '3.84'), ('HEAVEN', '4.04')] + +2035-147960-0016-1283-0 +[('A', '0.44'), ('SNAKE', '0.52'), ('OF', '0.84'), ('HIS', '0.96'), ('SIZE', '1.12'), ('IN', '1.60'), ('FIGHTING', '1.72'), ('TRIM', '2.12'), ('WOULD', '2.56'), ('BE', '2.76'), ('MORE', '2.88'), ('THAN', '3.08'), ('ANY', '3.28'), ('BOY', '3.56'), ('COULD', '3.88'), ('HANDLE', '4.04')] + +2428-83699-0020-1734-0 +[('WHEN', '0.28'), ('THE', '0.48'), ('TRAP', '0.60'), ('DID', '0.88'), ('APPEAR', '1.08'), ('IT', '1.80'), ('LOOKED', '1.96'), ('TO', +'2.24'), ('ME', '2.36'), ('UNCOMMONLY', '2.52'), ('LIKE', '3.16'), ('AN', '3.40'), ('OPEN', '3.56'), ('SPRING', '3.92'), ('CART', '4.28')] + +8297-275154-0026-2108-0 +[('LET', '0.44'), ('ME', '0.72'), ('REST', '0.92'), ('A', '1.32'), ('LITTLE', '1.40'), ('HE', '1.80'), ('PLEADED', '2.00'), ('IF', '3.04'), ("I'M", '3.28'), ('NOT', '3.52'), ('IN', '3.76'), ('THE', '3.88'), ('WAY', '4.00')] + +652-129742-0007-1002-0 +[('SURROUND', '0.28'), ('WITH', '0.80'), ('A', '0.92'), ('GARNISH', '1.00'), ('OF', '1.44'), ('COOKED', '1.56'), ('AND', '1.88'), ('DICED', '4.16'), ('CARROTS', '4.28'), ('TURNIPS', '4.44'), ('GREEN', '4.60'), ('PEAS', '4.72')] +``` + + +For the row: +``` +5694-64029-0022-1998-0 +[('THE', '0.20'), ('LEADEN', '0.36'), ('HAIL', '0.72'), ('STORM', '1.00'), ('SWEPT', '1.48'), +('THEM', '1.88'), ('OFF', '2.00'), ('THE', '2.24'), ('FIELD', '2.36'), ('THEY', '3.20'), ('FELL', '3.36'), +('BACK', '3.64'), ('AND', '3.92'), ('RE', '4.04'), ('FORMED', '4.20')] +``` + +- `5694-64029-0022-1998-0` is the cut ID. +- `('THE', '0.20')` means the word `THE` starts at 0.20 second. +- `('LEADEN', '0.36')` means the word `LEADEN` starts at 0.36 second. + + +You can compare the above word starting time with the one +from + +``` +5694-64029-0022 ",THE,LEADEN,HAIL,STORM,SWEPT,THEM,OFF,THE,FIELD,,THEY,FELL,BACK,AND,RE,FORMED," "0.230,0.360,0.670,1.010,1.440,1.860,1.990,2.230,2.350,2.870,3.230,3.390,3.660,3.960,4.060,4.160,4.850,4.9" +``` + +We reformat it below for readability: + +``` +5694-64029-0022 ",THE,LEADEN,HAIL,STORM,SWEPT,THEM,OFF,THE,FIELD,,THEY,FELL,BACK,AND,RE,FORMED," +"0.230,0.360,0.670,1.010,1.440,1.860,1.990,2.230,2.350,2.870,3.230,3.390,3.660,3.960,4.060,4.160,4.850,4.9" + the leaden hail storm swept them off the field sil they fell back and re formed sil +``` diff --git a/egs/librispeech/ASR/transducer_stateless/alignment.py b/egs/librispeech/ASR/transducer_stateless/alignment.py new file mode 100644 index 000000000..f143611ea --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless/alignment.py @@ -0,0 +1,268 @@ +# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from dataclasses import dataclass +from typing import Iterator, List, Optional + +import sentencepiece as spm +import torch +from model import Transducer + +# The force alignment problem can be formulated as finding +# a path in a rectangular lattice, where the path starts +# from the lower left corner and ends at the upper right +# corner. The horizontal axis of the lattice is `t` (representing +# acoustic frame indexes) and the vertical axis is `u` (representing +# BPE tokens of the transcript). +# +# The notations `t` and `u` are from the paper +# https://arxiv.org/pdf/1211.3711.pdf +# +# Beam search is used to find the path with the +# highest log probabilities. +# +# It assumes the maximum number of symbols that can be +# emitted per frame is 1. You can use `--modified-transducer-prob` +# from `./train.py` to train a model that satisfies this assumption. + + +# AlignItem is the ending node of a path originated from the starting node. +# len(ys) equals to `t` and pos_u is the u coordinate +# in the lattice. +@dataclass +class AlignItem: + # total log prob of the path that ends at this item. + # The path is originated from the starting node. + log_prob: float + + # It contains framewise token alignment + ys: List[int] + + # It equals to the number of non-zero entries in ys + pos_u: int + + +class AlignItemList: + def __init__(self, items: Optional[List[AlignItem]] = None): + """ + Args: + items: + A list of AlignItem + """ + if items is None: + items = [] + self.data = items + + def __iter__(self) -> Iterator: + return iter(self.data) + + def __len__(self) -> int: + """Return the number of AlignItem in this object.""" + return len(self.data) + + def __getitem__(self, i: int) -> AlignItem: + """Return the i-th item in this object.""" + return self.data[i] + + def append(self, item: AlignItem) -> None: + """Append an item to the end of this object.""" + self.data.append(item) + + def get_decoder_input( + self, + ys: List[int], + context_size: int, + blank_id: int, + ) -> List[List[int]]: + """Get input for the decoder for each item in this object. + + Args: + ys: + The transcript of the utterance in BPE tokens. + context_size: + Context size of the NN decoder model. + blank_id: + The ID of the blank symbol. + Returns: + Return a list-of-list int. `ans[i]` contains the decoder + input for the i-th item in this object and its lengths + is `context_size`. + """ + ans: List[List[int]] = [] + buf = [blank_id] * context_size + ys + for item in self: + # fmt: off + ans.append(buf[item.pos_u:(item.pos_u + context_size)]) + # fmt: on + return ans + + def topk(self, k: int) -> "AlignItemList": + """Return the top-k items. + + Items are ordered by their log probs in descending order + and the top-k items are returned. + + Args: + k: + Size of top-k. + Returns: + Return a new AlignItemList that contains the top-k items + in this object. Caution: It uses shallow copy. + """ + items = list(self) + items = sorted(items, key=lambda i: i.log_prob, reverse=True) + return AlignItemList(items[:k]) + + +def force_alignment( + model: Transducer, + encoder_out: torch.Tensor, + ys: List[int], + beam_size: int = 4, +) -> List[int]: + """Compute the force alignment of an utterance given its transcript + in BPE tokens and the corresponding acoustic output from the encoder. + + Caution: + We assume that the maximum number of sybmols per frame is 1. + That is, the model should be trained using a nonzero value + for the option `--modified-transducer-prob` in train.py. + + Args: + model: + The transducer model. + encoder_out: + A tensor of shape (N, T, C). Support only for N==1 at present. + ys: + A list of BPE token IDs. We require that len(ys) <= T. + beam_size: + Size of the beam used in beam search. + Returns: + Return a list of int such that + - len(ans) == T + - After removing blanks from ans, we have ans == ys. + """ + assert encoder_out.ndim == 3, encoder_out.ndim + assert encoder_out.size(0) == 1, encoder_out.size(0) + assert 0 < len(ys) <= encoder_out.size(1), (len(ys), encoder_out.size(1)) + + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + + device = model.device + + T = encoder_out.size(1) + U = len(ys) + assert 0 < U <= T + + encoder_out_len = torch.tensor([1]) + decoder_out_len = encoder_out_len + + start = AlignItem(log_prob=0.0, ys=[], pos_u=0) + B = AlignItemList([start]) + + for t in range(T): + # fmt: off + current_encoder_out = encoder_out[:, t:t+1, :] + # current_encoder_out is of shape (1, 1, encoder_out_dim) + # fmt: on + + A = B # shallow copy + B = AlignItemList() + + decoder_input = A.get_decoder_input( + ys=ys, context_size=context_size, blank_id=blank_id + ) + decoder_input = torch.tensor(decoder_input, device=device) + # decoder_input is of shape (num_active_items, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False) + # decoder_output is of shape (num_active_items, 1, decoder_output_dim) + + current_encoder_out = current_encoder_out.expand( + decoder_out.size(0), 1, -1 + ) + + logits = model.joiner( + current_encoder_out, + decoder_out, + encoder_out_len.expand(decoder_out.size(0)), + decoder_out_len.expand(decoder_out.size(0)), + ) + + # logits is of shape (num_active_items, vocab_size) + log_probs = logits.log_softmax(dim=-1).tolist() + + for i, item in enumerate(A): + if (T - 1 - t) >= (U - item.pos_u): + # horizontal transition (left -> right) + new_item = AlignItem( + log_prob=item.log_prob + log_probs[i][blank_id], + ys=item.ys + [blank_id], + pos_u=item.pos_u, + ) + B.append(new_item) + + if item.pos_u < U: + # diagonal transition (lower left -> upper right) + u = ys[item.pos_u] + new_item = AlignItem( + log_prob=item.log_prob + log_probs[i][u], + ys=item.ys + [u], + pos_u=item.pos_u + 1, + ) + B.append(new_item) + + if len(B) > beam_size: + B = B.topk(beam_size) + + ans = B.topk(1)[0].ys + + assert len(ans) == T + assert list(filter(lambda i: i != blank_id, ans)) == ys + + return ans + + +def get_word_starting_frames( + ali: List[int], sp: spm.SentencePieceProcessor +) -> List[int]: + """Get the starting frame of each word from the given token alignments. + + When a word is encoded into BPE tokens, the first token starts + with underscore "_", which can be used to identify the starting frame + of a word. + + Args: + ali: + Framewise token alignment. It can be the return value of + :func:`force_alignment`. + sp: + The sentencepiece model. + Returns: + Return a list of int representing the starting frame of each word + in the alignment. + Caution: + You have to take into account the model subsampling factor when + converting the starting frame into time. + """ + underscore = b"\xe2\x96\x81".decode() # '_' + ans = [] + for i in range(len(ali)): + if sp.id_to_piece(ali[i]).startswith(underscore): + ans.append(i) + return ans diff --git a/egs/librispeech/ASR/transducer_stateless/compute_ali.py b/egs/librispeech/ASR/transducer_stateless/compute_ali.py new file mode 100755 index 000000000..48769e9d1 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless/compute_ali.py @@ -0,0 +1,326 @@ +#!/usr/bin/env python3 +# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Usage: + ./transducer_stateless/compute_ali.py \ + --exp-dir ./transducer_stateless/exp \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --epoch 20 \ + --avg 10 \ + --max-duration 300 \ + --dataset train-clean-100 \ + --out-dir data/ali +""" + +import argparse +import logging +from pathlib import Path +from typing import List + +import numpy as np +import sentencepiece as spm +import torch +from alignment import force_alignment +from asr_datamodule import LibriSpeechAsrDataModule +from lhotse import CutSet +from lhotse.features.io import FeaturesWriter, NumpyHdf5Writer +from train import get_params, get_transducer_model + +from icefall.checkpoint import average_checkpoints, load_checkpoint +from icefall.utils import AttributeDict, setup_logger + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=34, + help="It specifies the checkpoint to use for decoding." + "Note: Epoch counts from 0.", + ) + parser.add_argument( + "--avg", + type=int, + default=20, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch'. ", + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="transducer_stateless/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--out-dir", + type=str, + required=True, + help="""Output directory. + It contains 2 generated files: + + - token_ali_xxx.h5 + - cuts_xxx.json.gz + + where xxx is the value of `--dataset`. For instance, if + `--dataset` is `train-clean-100`, it will contain 2 files: + + - `token_ali_train-clean-100.h5` + - `cuts_train-clean-100.json.gz` + """, + ) + + parser.add_argument( + "--dataset", + type=str, + required=True, + help="""The name of the dataset to compute alignments for. + Possible values are: + - test-clean. + - test-other + - train-clean-100 + - train-clean-360 + - train-other-500 + - dev-clean + - dev-other + """, + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + return parser + + +def compute_alignments( + model: torch.nn.Module, + dl: torch.utils.data, + ali_writer: FeaturesWriter, + params: AttributeDict, + sp: spm.SentencePieceProcessor, +): + try: + num_batches = len(dl) + except TypeError: + num_batches = "?" + num_cuts = 0 + + device = model.device + cuts = [] + + for batch_idx, batch in enumerate(dl): + feature = batch["inputs"] + + # at entry, feature is [N, T, C] + assert feature.ndim == 3 + feature = feature.to(device) + + supervisions = batch["supervisions"] + + cut_list = supervisions["cut"] + for cut in cut_list: + assert len(cut.supervisions) == 1, f"{len(cut.supervisions)}" + + feature_lens = supervisions["num_frames"].to(device) + + encoder_out, encoder_out_lens = model.encoder( + x=feature, x_lens=feature_lens + ) + + batch_size = encoder_out.size(0) + + texts = supervisions["text"] + + ys_list: List[List[int]] = sp.encode(texts, out_type=int) + + ali_list = [] + for i in range(batch_size): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + + ali = force_alignment( + model=model, + encoder_out=encoder_out_i, + ys=ys_list[i], + beam_size=params.beam_size, + ) + ali_list.append(ali) + assert len(ali_list) == len(cut_list) + + for cut, ali in zip(cut_list, ali_list): + cut.token_alignment = ali_writer.store_array( + key=cut.id, + value=np.asarray(ali, dtype=np.int32), + # frame shift is 0.01s, subsampling_factor is 4 + frame_shift=0.04, + temporal_dim=0, + start=0, + ) + + cuts += cut_list + + num_cuts += len(cut_list) + + if batch_idx % 2 == 0: + batch_str = f"{batch_idx}/{num_batches}" + + logging.info( + f"batch {batch_str}, cuts processed until now is {num_cuts}" + ) + + return CutSet.from_cuts(cuts) + + +@torch.no_grad() +def main(): + parser = get_parser() + LibriSpeechAsrDataModule.add_arguments(parser) + args = parser.parse_args() + + args.enable_spec_aug = False + args.enable_musan = False + args.return_cuts = True + args.concatenate_cuts = False + + params = get_params() + params.update(vars(args)) + + setup_logger(f"{params.exp_dir}/log-ali") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(f"Computing alignments for {params.dataset} - started") + logging.info(params) + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + logging.info(f"Device: {device}") + + out_dir = Path(params.out_dir) + out_dir.mkdir(exist_ok=True) + + out_ali_filename = out_dir / f"token_ali_{params.dataset}.h5" + out_manifest_filename = out_dir / f"cuts_{params.dataset}.json.gz" + + done_file = out_dir / f".{params.dataset}.done" + if done_file.is_file(): + logging.info(f"{done_file} exists - skipping") + exit() + + logging.info("About to create model") + model = get_transducer_model(params) + + if params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if start >= 0: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict( + average_checkpoints(filenames, device=device), strict=False + ) + + model.to(device) + model.eval() + model.device = device + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + librispeech = LibriSpeechAsrDataModule(args) + if params.dataset == "test-clean": + test_clean_cuts = librispeech.test_clean_cuts() + dl = librispeech.test_dataloaders(test_clean_cuts) + elif params.dataset == "test-other": + test_other_cuts = librispeech.test_other_cuts() + dl = librispeech.test_dataloaders(test_other_cuts) + elif params.dataset == "train-clean-100": + train_clean_100_cuts = librispeech.train_clean_100_cuts() + dl = librispeech.train_dataloaders(train_clean_100_cuts) + elif params.dataset == "train-clean-360": + train_clean_360_cuts = librispeech.train_clean_360_cuts() + dl = librispeech.train_dataloaders(train_clean_360_cuts) + elif params.dataset == "train-other-500": + train_other_500_cuts = librispeech.train_other_500_cuts() + dl = librispeech.train_dataloaders(train_other_500_cuts) + elif params.dataset == "dev-clean": + dev_clean_cuts = librispeech.dev_clean_cuts() + dl = librispeech.valid_dataloaders(dev_clean_cuts) + else: + assert params.dataset == "dev-other", f"{params.dataset}" + dev_other_cuts = librispeech.dev_other_cuts() + dl = librispeech.valid_dataloaders(dev_other_cuts) + + logging.info(f"Processing {params.dataset}") + + with NumpyHdf5Writer(out_ali_filename) as ali_writer: + cut_set = compute_alignments( + model=model, + dl=dl, + ali_writer=ali_writer, + params=params, + sp=sp, + ) + + cut_set.to_file(out_manifest_filename) + + logging.info( + f"For dataset {params.dataset}, its framewise token alignments are " + f"saved to {out_ali_filename} and the cut manifest " + f"file is {out_manifest_filename}. Number of cuts: {len(cut_set)}" + ) + done_file.touch() + + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/transducer_stateless/test_compute_ali.py b/egs/librispeech/ASR/transducer_stateless/test_compute_ali.py new file mode 100755 index 000000000..99d5b3788 --- /dev/null +++ b/egs/librispeech/ASR/transducer_stateless/test_compute_ali.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python3 +# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +This script shows how to get word starting time +from framewise token alignment. + +Usage: + ./transducer_stateless/compute_ali.py \ + --exp-dir ./transducer_stateless/exp \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --epoch 20 \ + --avg 10 \ + --max-duration 300 \ + --dataset train-clean-100 \ + --out-dir data/ali + +And the you can run: + + ./transducer_stateless/test_compute_ali.py \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --ali-dir data/ali \ + --dataset train-clean-100 +""" +import argparse +import logging +from pathlib import Path + +import sentencepiece as spm +import torch +from alignment import get_word_starting_frames +from lhotse import CutSet, load_manifest +from lhotse.dataset import K2SpeechRecognitionDataset, SingleCutSampler +from lhotse.dataset.collation import collate_custom_field + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--ali-dir", + type=Path, + default="./data/ali", + help="It specifies the directory where alignments can be found.", + ) + + parser.add_argument( + "--dataset", + type=str, + required=True, + help="""The name of the dataset: + Possible values are: + - test-clean. + - test-other + - train-clean-100 + - train-clean-360 + - train-other-500 + - dev-clean + - dev-other + """, + ) + + return parser + + +def main(): + args = get_parser().parse_args() + + sp = spm.SentencePieceProcessor() + sp.load(args.bpe_model) + + cuts_json = args.ali_dir / f"cuts_{args.dataset}.json.gz" + + logging.info(f"Loading {cuts_json}") + cuts = load_manifest(cuts_json) + + sampler = SingleCutSampler( + cuts, + max_duration=30, + shuffle=False, + ) + + dataset = K2SpeechRecognitionDataset(return_cuts=True) + + dl = torch.utils.data.DataLoader( + dataset, + sampler=sampler, + batch_size=None, + num_workers=1, + persistent_workers=False, + ) + + frame_shift = 10 # ms + subsampling_factor = 4 + + frame_shift_in_second = frame_shift * subsampling_factor / 1000.0 + + # key: cut.id + # value: a list of pairs (word, time_in_second) + word_starting_time_dict = {} + for batch in dl: + supervisions = batch["supervisions"] + cuts = supervisions["cut"] + + token_alignment, token_alignment_length = collate_custom_field( + CutSet.from_cuts(cuts), "token_alignment" + ) + + for i in range(len(cuts)): + assert ( + (cuts[i].features.num_frames - 1) // 2 - 1 + ) // 2 == token_alignment_length[i] + + word_starting_frames = get_word_starting_frames( + token_alignment[i, : token_alignment_length[i]].tolist(), sp=sp + ) + word_starting_time = [ + "{:.2f}".format(i * frame_shift_in_second) + for i in word_starting_frames + ] + + words = supervisions["text"][i].split() + + assert len(word_starting_frames) == len(words) + word_starting_time_dict[cuts[i].id] = list( + zip(words, word_starting_time) + ) + + # This is a demo script and we exit here after processing + # one batch. + # You can find word starting time in the dict "word_starting_time_dict" + for cut_id, word_time in word_starting_time_dict.items(): + print(f"{cut_id}\n{word_time}\n") + break + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() From bb7f6ed6b74df6e3c1b4ae31ae54c3f0cd32b705 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Sat, 12 Mar 2022 16:16:55 +0800 Subject: [PATCH 22/25] Add modified beam search for pruned rnn-t. (#248) * Add modified beam search for pruned rnn-t. * Fix style issues. * Update RESULTS.md. * Fix typos. * Minor fixes. * Test the pre-trained model using GitHub actions. * Let the user install optimized_transducer on her own. * Fix errors in GitHub CI. --- .../workflows/run-librispeech-2022-03-12.yml | 157 ++++++++++++++++++ README.md | 2 +- egs/librispeech/ASR/README.md | 1 + egs/librispeech/ASR/RESULTS.md | 108 +++++++++++- .../beam_search.py | 144 ++++++++++++++-- .../ASR/pruned_transducer_stateless/decode.py | 105 +++--------- .../ASR/pruned_transducer_stateless/export.py | 76 +-------- .../pruned_transducer_stateless/pretrained.py | 99 +++-------- requirements.txt | 1 - 9 files changed, 439 insertions(+), 254 deletions(-) create mode 100644 .github/workflows/run-librispeech-2022-03-12.yml diff --git a/.github/workflows/run-librispeech-2022-03-12.yml b/.github/workflows/run-librispeech-2022-03-12.yml new file mode 100644 index 000000000..74052312e --- /dev/null +++ b/.github/workflows/run-librispeech-2022-03-12.yml @@ -0,0 +1,157 @@ +# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com) + +# See ../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: run-librispeech-2022-03-12 +# stateless transducer + k2 pruned rnnt-loss + +on: + push: + branches: + - master + pull_request: + types: [labeled] + +jobs: + run_librispeech_2022_03_12: + if: github.event.label.name == 'ready' || github.event_name == 'push' + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-18.04] + python-version: [3.7, 3.8, 3.9] + torch: ["1.10.0"] + torchaudio: ["0.10.0"] + k2-version: ["1.9.dev20211101"] + + fail-fast: false + + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Python dependencies + run: | + python3 -m pip install --upgrade pip pytest + # numpy 1.20.x does not support python 3.6 + pip install numpy==1.19 + pip install torch==${{ matrix.torch }}+cpu torchaudio==${{ matrix.torchaudio }}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html + pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.org/nightly/ + + python3 -m pip install git+https://github.com/lhotse-speech/lhotse + python3 -m pip install kaldifeat + # We are in ./icefall and there is a file: requirements.txt in it + pip install -r requirements.txt + + - name: Install graphviz + shell: bash + run: | + python3 -m pip install -qq graphviz + sudo apt-get -qq install graphviz + + - name: Download pre-trained model + shell: bash + run: | + sudo apt-get -qq install git-lfs tree sox + cd egs/librispeech/ASR + mkdir tmp + cd tmp + git lfs install + git clone https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12 + cd .. + tree tmp + soxi tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12/test_wavs/*.wav + ls -lh tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12/test_wavs/*.wav + + - name: Run greedy search decoding (max-sym-per-frame 1) + shell: bash + run: | + export PYTHONPATH=$PWD:PYTHONPATH + dir=./tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12 + cd egs/librispeech/ASR + ./pruned_transducer_stateless/pretrained.py \ + --method greedy_search \ + --max-sym-per-frame 1 \ + --checkpoint $dir/exp/pretrained.pt \ + --bpe-model $dir/data/lang_bpe_500/bpe.model \ + $dir/test_wavs/1089-134686-0001.wav \ + $dir/test_wavs/1221-135766-0001.wav \ + $dir/test_wavs/1221-135766-0002.wav + + - name: Run greedy search decoding (max-sym-per-frame 2) + shell: bash + run: | + export PYTHONPATH=$PWD:PYTHONPATH + dir=./tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12 + cd egs/librispeech/ASR + ./pruned_transducer_stateless/pretrained.py \ + --method greedy_search \ + --max-sym-per-frame 2 \ + --checkpoint $dir/exp/pretrained.pt \ + --bpe-model $dir/data/lang_bpe_500/bpe.model \ + $dir/test_wavs/1089-134686-0001.wav \ + $dir/test_wavs/1221-135766-0001.wav \ + $dir/test_wavs/1221-135766-0002.wav + + - name: Run greedy search decoding (max-sym-per-frame 3) + shell: bash + run: | + export PYTHONPATH=$PWD:PYTHONPATH + dir=./tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12 + cd egs/librispeech/ASR + ./pruned_transducer_stateless/pretrained.py \ + --method greedy_search \ + --max-sym-per-frame 3 \ + --checkpoint $dir/exp/pretrained.pt \ + --bpe-model $dir/data/lang_bpe_500/bpe.model \ + $dir/test_wavs/1089-134686-0001.wav \ + $dir/test_wavs/1221-135766-0001.wav \ + $dir/test_wavs/1221-135766-0002.wav + + - name: Run beam search decoding + shell: bash + run: | + export PYTHONPATH=$PWD:$PYTHONPATH + dir=./tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12 + cd egs/librispeech/ASR + ./pruned_transducer_stateless/pretrained.py \ + --method beam_search \ + --beam-size 4 \ + --checkpoint $dir/exp/pretrained.pt \ + --bpe-model $dir/data/lang_bpe_500/bpe.model \ + $dir/test_wavs/1089-134686-0001.wav \ + $dir/test_wavs/1221-135766-0001.wav \ + $dir/test_wavs/1221-135766-0002.wav + + - name: Run modified beam search decoding + shell: bash + run: | + export PYTHONPATH=$PWD:$PYTHONPATH + dir=./tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12 + cd egs/librispeech/ASR + ./pruned_transducer_stateless/pretrained.py \ + --method modified_beam_search \ + --beam-size 4 \ + --checkpoint $dir/exp/pretrained.pt \ + --bpe-model $dir/data/lang_bpe_500/bpe.model \ + $dir/test_wavs/1089-134686-0001.wav \ + $dir/test_wavs/1221-135766-0001.wav \ + $dir/test_wavs/1221-135766-0002.wav diff --git a/README.md b/README.md index a49b30df0..79d8039ff 100644 --- a/README.md +++ b/README.md @@ -84,7 +84,7 @@ The best WER using modified beam search with beam size 4 is: | | test-clean | test-other | |-----|------------|------------| -| WER | 2.61 | 6.46 | +| WER | 2.56 | 6.27 | Note: No auxiliary losses are used in the training and no LMs are used in the decoding. diff --git a/egs/librispeech/ASR/README.md b/egs/librispeech/ASR/README.md index 30b5c5c6f..a7b2e2c3b 100644 --- a/egs/librispeech/ASR/README.md +++ b/egs/librispeech/ASR/README.md @@ -15,6 +15,7 @@ The following table lists the differences among them. | `transducer_stateless` | Conformer | Embedding + Conv1d | | | `transducer_lstm` | LSTM | LSTM | | | `transducer_stateless_multi_datasets` | Conformer | Embedding + Conv1d | Using data from GigaSpeech as extra training data | +| `pruned_transducer_stateless` | Conformer | Embedding + Conv1d | Using k2 pruned RNN-T loss | The decoder in `transducer_stateless` is modified from the paper [Rnn-Transducer with Stateless Prediction Network](https://ieeexplore.ieee.org/document/9054419/). diff --git a/egs/librispeech/ASR/RESULTS.md b/egs/librispeech/ASR/RESULTS.md index cc2aebac1..6dbc659f7 100644 --- a/egs/librispeech/ASR/RESULTS.md +++ b/egs/librispeech/ASR/RESULTS.md @@ -2,12 +2,111 @@ ### LibriSpeech BPE training results (Pruned Transducer) -#### Conformer encoder + embedding decoder - Conformer encoder + non-current decoder. The decoder contains only an embedding layer, a Conv1d (with kernel size 2) and a linear layer (to transform tensor dim). +#### 2022-03-12 + +[pruned_transducer_stateless](./pruned_transducer_stateless) + +Using commit `1603744469d167d848e074f2ea98c587153205fa`. +See + +The WERs are: + +| | test-clean | test-other | comment | +|-------------------------------------|------------|------------|------------------------------------------| +| greedy search (max sym per frame 1) | 2.62 | 6.37 | --epoch 42, --avg 11, --max-duration 100 | +| greedy search (max sym per frame 2) | 2.62 | 6.37 | --epoch 42, --avg 11, --max-duration 100 | +| greedy search (max sym per frame 3) | 2.62 | 6.37 | --epoch 42, --avg 11, --max-duration 100 | +| modified beam search (beam size 4) | 2.56 | 6.27 | --epoch 42, --avg 11, --max-duration 100 | +| beam search (beam size 4) | 2.57 | 6.27 | --epoch 42, --avg 11, --max-duration 100 | + +The decoding time for `test-clean` and `test-other` is given below: +(A V100 GPU with 32 GB RAM is used for decoding. Note: Not all GPU RAM is used during decoding.) + +| decoding method | test-clean (seconds) | test-other (seconds)| +|---|---:|---:| +| greedy search (--max-sym-per-frame=1) | 160 | 159 | +| greedy search (--max-sym-per-frame=2) | 184 | 177 | +| greedy search (--max-sym-per-frame=3) | 210 | 213 | +| modified beam search (--beam-size 4)| 273 | 269 | +|beam search (--beam-size 4) | 2741 | 2221 | + +We recommend you to use `modified_beam_search`. + +Training command: + +```bash +cd egs/librispeech/ASR/ +./prepare.sh + +export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7" + +. path.sh + +./pruned_transducer_stateless/train.py \ + --world-size 8 \ + --num-epochs 60 \ + --start-epoch 0 \ + --exp-dir pruned_transducer_stateless/exp \ + --full-libri 1 \ + --max-duration 300 \ + --prune-range 5 \ + --lr-factor 5 \ + --lm-scale 0.25 +``` + +The tensorboard training log can be found at + + +The command for decoding is: + +```bash +epoch=42 +avg=11 +sym=1 + +# greedy search + +./pruned_transducer_stateless/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir ./pruned_transducer_stateless/exp \ + --max-duration 100 \ + --decoding-method greedy_search \ + --beam-size 4 \ + --max-sym-per-frame $sym + +# modified beam search +./pruned_transducer_stateless/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir ./pruned_transducer_stateless/exp \ + --max-duration 100 \ + --decoding-method modified_beam_search \ + --beam-size 4 + +# beam search +# (not recommended) +./pruned_transducer_stateless/decode.py \ + --epoch $epoch \ + --avg $avg \ + --exp-dir ./pruned_transducer_stateless/exp \ + --max-duration 100 \ + --decoding-method beam_search \ + --beam-size 4 +``` + +You can find a pre-trained model, decoding logs, and decoding results at + + +#### 2022-02-18 + +[pruned_transducer_stateless](./pruned_transducer_stateless) + + The WERs are | | test-clean | test-other | comment | @@ -62,7 +161,7 @@ See ##### 2022-03-01 -Using commit `fill in it after merging`. +Using commit `2332ba312d7ce72f08c7bac1e3312f7e3dd722dc`. It uses [GigaSpeech](https://github.com/SpeechColab/GigaSpeech) as extra training data. 20% of the time it selects a batch from L subset of @@ -129,6 +228,9 @@ sym=1 --beam-size 4 ``` +You can find a pretrained model by visiting + + ##### 2022-02-07 diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/beam_search.py b/egs/librispeech/ASR/pruned_transducer_stateless/beam_search.py index 3d4818509..38ab16507 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless/beam_search.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless/beam_search.py @@ -17,7 +17,6 @@ from dataclasses import dataclass from typing import Dict, List, Optional -import numpy as np import torch from model import Transducer @@ -48,7 +47,7 @@ def greedy_search( device = model.device decoder_input = torch.tensor( - [blank_id] * context_size, device=device + [blank_id] * context_size, device=device, dtype=torch.int64 ).reshape(1, context_size) decoder_out = model.decoder(decoder_input, need_pad=False) @@ -103,8 +102,9 @@ class Hypothesis: # Newly predicted tokens are appended to `ys`. ys: List[int] - # The log prob of ys - log_prob: float + # The log prob of ys. + # It contains only one entry. + log_prob: torch.Tensor @property def key(self) -> str: @@ -113,7 +113,7 @@ class Hypothesis: class HypothesisList(object): - def __init__(self, data: Optional[Dict[str, Hypothesis]] = None): + def __init__(self, data: Optional[Dict[str, Hypothesis]] = None) -> None: """ Args: data: @@ -125,10 +125,10 @@ class HypothesisList(object): self._data = data @property - def data(self): + def data(self) -> Dict[str, Hypothesis]: return self._data - def add(self, hyp: Hypothesis): + def add(self, hyp: Hypothesis) -> None: """Add a Hypothesis to `self`. If `hyp` already exists in `self`, its probability is updated using @@ -140,8 +140,10 @@ class HypothesisList(object): """ key = hyp.key if key in self: - old_hyp = self._data[key] - old_hyp.log_prob = np.logaddexp(old_hyp.log_prob, hyp.log_prob) + old_hyp = self._data[key] # shallow copy + torch.logaddexp( + old_hyp.log_prob, hyp.log_prob, out=old_hyp.log_prob + ) else: self._data[key] = hyp @@ -153,7 +155,8 @@ class HypothesisList(object): length_norm: If True, the `log_prob` of a hypothesis is normalized by the number of tokens in it. - + Returns: + Return the hypothesis that has the largest `log_prob`. """ if length_norm: return max( @@ -165,6 +168,9 @@ class HypothesisList(object): def remove(self, hyp: Hypothesis) -> None: """Remove a given hypothesis. + Caution: + `self` is modified **in-place**. + Args: hyp: The hypothesis to be removed from `self`. @@ -175,7 +181,7 @@ class HypothesisList(object): assert key in self, f"{key} does not exist" del self._data[key] - def filter(self, threshold: float) -> "HypothesisList": + def filter(self, threshold: torch.Tensor) -> "HypothesisList": """Remove all Hypotheses whose log_prob is less than threshold. Caution: @@ -183,10 +189,10 @@ class HypothesisList(object): Returns: Return a new HypothesisList containing all hypotheses from `self` - that have `log_prob` being greater than the given `threshold`. + with `log_prob` being greater than the given `threshold`. """ ans = HypothesisList() - for key, hyp in self._data.items(): + for _, hyp in self._data.items(): if hyp.log_prob > threshold: ans.add(hyp) # shallow copy return ans @@ -216,6 +222,106 @@ class HypothesisList(object): return ", ".join(s) +def modified_beam_search( + model: Transducer, + encoder_out: torch.Tensor, + beam: int = 4, +) -> List[int]: + """It limits the maximum number of symbols per frame to 1. + + Args: + model: + An instance of `Transducer`. + encoder_out: + A tensor of shape (N, T, C) from the encoder. Support only N==1 for now. + beam: + Beam size. + Returns: + Return the decoded result. + """ + + assert encoder_out.ndim == 3 + + # support only batch_size == 1 for now + assert encoder_out.size(0) == 1, encoder_out.size(0) + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + + device = model.device + + T = encoder_out.size(1) + + B = HypothesisList() + B.add( + Hypothesis( + ys=[blank_id] * context_size, + log_prob=torch.zeros(1, dtype=torch.float32, device=device), + ) + ) + + for t in range(T): + # fmt: off + current_encoder_out = encoder_out[:, t:t+1, :].unsqueeze(2) + # current_encoder_out is of shape (1, 1, 1, encoder_out_dim) + # fmt: on + A = list(B) + B = HypothesisList() + + ys_log_probs = torch.cat([hyp.log_prob.reshape(1, 1) for hyp in A]) + # ys_log_probs is of shape (num_hyps, 1) + + decoder_input = torch.tensor( + [hyp.ys[-context_size:] for hyp in A], + device=device, + dtype=torch.int64, + ) + # decoder_input is of shape (num_hyps, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False).unsqueeze(1) + # decoder_output is of shape (num_hyps, 1, 1, decoder_output_dim) + + current_encoder_out = current_encoder_out.expand( + decoder_out.size(0), 1, 1, -1 + ) # (num_hyps, 1, 1, encoder_out_dim) + + logits = model.joiner( + current_encoder_out, + decoder_out, + ) + # logits is of shape (num_hyps, 1, 1, vocab_size) + logits = logits.squeeze(1).squeeze(1) + + # now logits is of shape (num_hyps, vocab_size) + log_probs = logits.log_softmax(dim=-1) + + log_probs.add_(ys_log_probs) + + log_probs = log_probs.reshape(-1) + topk_log_probs, topk_indexes = log_probs.topk(beam) + + # topk_hyp_indexes are indexes into `A` + topk_hyp_indexes = topk_indexes // logits.size(-1) + topk_token_indexes = topk_indexes % logits.size(-1) + + topk_hyp_indexes = topk_hyp_indexes.tolist() + topk_token_indexes = topk_token_indexes.tolist() + + for i in range(len(topk_hyp_indexes)): + hyp = A[topk_hyp_indexes[i]] + new_ys = hyp.ys[:] + new_token = topk_token_indexes[i] + if new_token != blank_id: + new_ys.append(new_token) + new_log_prob = topk_log_probs[i] + new_hyp = Hypothesis(ys=new_ys, log_prob=new_log_prob) + B.add(new_hyp) + + best_hyp = B.get_most_probable(length_norm=True) + ys = best_hyp.ys[context_size:] # [context_size:] to remove blanks + + return ys + + def beam_search( model: Transducer, encoder_out: torch.Tensor, @@ -246,7 +352,9 @@ def beam_search( device = model.device decoder_input = torch.tensor( - [blank_id] * context_size, device=device + [blank_id] * context_size, + device=device, + dtype=torch.int64, ).reshape(1, context_size) decoder_out = model.decoder(decoder_input, need_pad=False) @@ -283,7 +391,9 @@ def beam_search( if cached_key not in decoder_cache: decoder_input = torch.tensor( - [y_star.ys[-context_size:]], device=device + [y_star.ys[-context_size:]], + device=device, + dtype=torch.int64, ).reshape(1, context_size) decoder_out = model.decoder(decoder_input, need_pad=False) @@ -297,7 +407,7 @@ def beam_search( current_encoder_out, decoder_out.unsqueeze(1) ) - # TODO(fangjun): Cache the blank posterior + # TODO(fangjun): Scale the blank posterior log_prob = logits.log_softmax(dim=-1) # log_prob is (1, 1, 1, vocab_size) @@ -309,7 +419,7 @@ def beam_search( # First, process the blank symbol skip_log_prob = log_prob[blank_id] - new_y_star_log_prob = y_star.log_prob + skip_log_prob.item() + new_y_star_log_prob = y_star.log_prob + skip_log_prob # ys[:] returns a copy of ys B.add(Hypothesis(ys=y_star.ys[:], log_prob=new_y_star_log_prob)) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/decode.py b/egs/librispeech/ASR/pruned_transducer_stateless/decode.py index 9479d57a8..86ec6172f 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless/decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless/decode.py @@ -33,6 +33,15 @@ Usage: --max-duration 100 \ --decoding-method beam_search \ --beam-size 4 + +(3) modified beam search +./pruned_transducer_stateless/decode.py \ + --epoch 28 \ + --avg 15 \ + --exp-dir ./pruned_transducer_stateless/exp \ + --max-duration 100 \ + --decoding-method modified_beam_search \ + --beam-size 4 """ @@ -46,14 +55,10 @@ import sentencepiece as spm import torch import torch.nn as nn from asr_datamodule import LibriSpeechAsrDataModule -from beam_search import beam_search, greedy_search -from conformer import Conformer -from decoder import Decoder -from joiner import Joiner -from model import Transducer +from beam_search import beam_search, greedy_search, modified_beam_search +from train import get_params, get_transducer_model from icefall.checkpoint import average_checkpoints, load_checkpoint -from icefall.env import get_env_info from icefall.utils import ( AttributeDict, setup_logger, @@ -104,6 +109,7 @@ def get_parser(): help="""Possible values are: - greedy_search - beam_search + - modified_beam_search """, ) @@ -111,7 +117,8 @@ def get_parser(): "--beam-size", type=int, default=4, - help="Used only when --decoding-method is beam_search", + help="""Used only when --decoding-method is + beam_search or modified_beam_search""", ) parser.add_argument( @@ -125,78 +132,13 @@ def get_parser(): "--max-sym-per-frame", type=int, default=3, - help="Maximum number of symbols per frame", + help="""Maximum number of symbols per frame. + Used only when --decoding_method is greedy_search""", ) return parser -def get_params() -> AttributeDict: - params = AttributeDict( - { - # parameters for conformer - "feature_dim": 80, - "subsampling_factor": 4, - "attention_dim": 512, - "nhead": 8, - "dim_feedforward": 2048, - "num_encoder_layers": 12, - "vgg_frontend": False, - # parameters for decoder - "embedding_dim": 512, - "env_info": get_env_info(), - } - ) - return params - - -def get_encoder_model(params: AttributeDict) -> nn.Module: - # TODO: We can add an option to switch between Conformer and Transformer - encoder = Conformer( - num_features=params.feature_dim, - output_dim=params.vocab_size, - subsampling_factor=params.subsampling_factor, - d_model=params.attention_dim, - nhead=params.nhead, - dim_feedforward=params.dim_feedforward, - num_encoder_layers=params.num_encoder_layers, - vgg_frontend=params.vgg_frontend, - ) - return encoder - - -def get_decoder_model(params: AttributeDict) -> nn.Module: - decoder = Decoder( - vocab_size=params.vocab_size, - embedding_dim=params.embedding_dim, - blank_id=params.blank_id, - context_size=params.context_size, - ) - return decoder - - -def get_joiner_model(params: AttributeDict) -> nn.Module: - joiner = Joiner( - input_dim=params.vocab_size, - inner_dim=params.embedding_dim, - output_dim=params.vocab_size, - ) - return joiner - - -def get_transducer_model(params: AttributeDict) -> nn.Module: - encoder = get_encoder_model(params) - decoder = get_decoder_model(params) - joiner = get_joiner_model(params) - - model = Transducer( - encoder=encoder, - decoder=decoder, - joiner=joiner, - ) - return model - - def decode_one_batch( params: AttributeDict, model: nn.Module, @@ -258,6 +200,10 @@ def decode_one_batch( hyp = beam_search( model=model, encoder_out=encoder_out_i, beam=params.beam_size ) + elif params.decoding_method == "modified_beam_search": + hyp = modified_beam_search( + model=model, encoder_out=encoder_out_i, beam=params.beam_size + ) else: raise ValueError( f"Unsupported decoding method: {params.decoding_method}" @@ -391,11 +337,15 @@ def main(): params = get_params() params.update(vars(args)) - assert params.decoding_method in ("greedy_search", "beam_search") + assert params.decoding_method in ( + "greedy_search", + "beam_search", + "modified_beam_search", + ) params.res_dir = params.exp_dir / params.decoding_method params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" - if params.decoding_method == "beam_search": + if "beam_search" in params.decoding_method: params.suffix += f"-beam-{params.beam_size}" else: params.suffix += f"-context-{params.context_size}" @@ -469,8 +419,5 @@ def main(): logging.info("Done!") -torch.set_num_threads(1) -torch.set_num_interop_threads(1) - if __name__ == "__main__": main() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/export.py b/egs/librispeech/ASR/pruned_transducer_stateless/export.py index 94987c39a..7d2a07817 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless/export.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless/export.py @@ -39,7 +39,7 @@ you can do: --exp-dir ./pruned_transducer_stateless/exp \ --epoch 9999 \ --avg 1 \ - --max-duration 1 \ + --max-duration 100 \ --bpe-model data/lang_bpe_500/bpe.model """ @@ -49,15 +49,10 @@ from pathlib import Path import sentencepiece as spm import torch -import torch.nn as nn -from conformer import Conformer -from decoder import Decoder -from joiner import Joiner -from model import Transducer +from train import get_params, get_transducer_model from icefall.checkpoint import average_checkpoints, load_checkpoint -from icefall.env import get_env_info -from icefall.utils import AttributeDict, str2bool +from icefall.utils import str2bool def get_parser(): @@ -117,71 +112,6 @@ def get_parser(): return parser -def get_params() -> AttributeDict: - params = AttributeDict( - { - # parameters for conformer - "feature_dim": 80, - "subsampling_factor": 4, - "attention_dim": 512, - "nhead": 8, - "dim_feedforward": 2048, - "num_encoder_layers": 12, - "vgg_frontend": False, - # parameters for decoder - "embedding_dim": 512, - "env_info": get_env_info(), - } - ) - return params - - -def get_encoder_model(params: AttributeDict) -> nn.Module: - encoder = Conformer( - num_features=params.feature_dim, - output_dim=params.vocab_size, - subsampling_factor=params.subsampling_factor, - d_model=params.attention_dim, - nhead=params.nhead, - dim_feedforward=params.dim_feedforward, - num_encoder_layers=params.num_encoder_layers, - vgg_frontend=params.vgg_frontend, - ) - return encoder - - -def get_decoder_model(params: AttributeDict) -> nn.Module: - decoder = Decoder( - vocab_size=params.vocab_size, - embedding_dim=params.embedding_dim, - blank_id=params.blank_id, - context_size=params.context_size, - ) - return decoder - - -def get_joiner_model(params: AttributeDict) -> nn.Module: - joiner = Joiner( - input_dim=params.vocab_size, - inner_dim=params.embedding_dim, - output_dim=params.vocab_size, - ) - return joiner - - -def get_transducer_model(params: AttributeDict) -> nn.Module: - encoder = get_encoder_model(params) - decoder = get_decoder_model(params) - joiner = get_joiner_model(params) - - model = Transducer( - encoder=encoder, - decoder=decoder, - joiner=joiner, - ) - return model - - def main(): args = get_parser().parse_args() args.exp_dir = Path(args.exp_dir) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/pretrained.py b/egs/librispeech/ASR/pruned_transducer_stateless/pretrained.py index 73c5aee5c..e6528b8d7 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless/pretrained.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless/pretrained.py @@ -49,17 +49,10 @@ from typing import List import kaldifeat import sentencepiece as spm import torch -import torch.nn as nn import torchaudio -from beam_search import beam_search, greedy_search -from conformer import Conformer -from decoder import Decoder -from joiner import Joiner -from model import Transducer +from beam_search import beam_search, greedy_search, modified_beam_search from torch.nn.utils.rnn import pad_sequence - -from icefall.env import get_env_info -from icefall.utils import AttributeDict +from train import get_params, get_transducer_model def get_parser(): @@ -91,6 +84,7 @@ def get_parser(): help="""Possible values are: - greedy_search - beam_search + - modified_beam_search """, ) @@ -104,11 +98,18 @@ def get_parser(): "The sample rate has to be 16kHz.", ) + parser.add_argument( + "--sample-rate", + type=int, + default=16000, + help="The sample rate of the input sound file", + ) + parser.add_argument( "--beam-size", type=int, default=4, - help="Used only when --method is beam_search", + help="Used only when --method is beam_search and modified_beam_search", ) parser.add_argument( @@ -130,72 +131,6 @@ def get_parser(): return parser -def get_params() -> AttributeDict: - params = AttributeDict( - { - "sample_rate": 16000, - # parameters for conformer - "feature_dim": 80, - "subsampling_factor": 4, - "attention_dim": 512, - "nhead": 8, - "dim_feedforward": 2048, - "num_encoder_layers": 12, - "vgg_frontend": False, - # parameters for decoder - "embedding_dim": 512, - "env_info": get_env_info(), - } - ) - return params - - -def get_encoder_model(params: AttributeDict) -> nn.Module: - encoder = Conformer( - num_features=params.feature_dim, - output_dim=params.vocab_size, - subsampling_factor=params.subsampling_factor, - d_model=params.attention_dim, - nhead=params.nhead, - dim_feedforward=params.dim_feedforward, - num_encoder_layers=params.num_encoder_layers, - vgg_frontend=params.vgg_frontend, - ) - return encoder - - -def get_decoder_model(params: AttributeDict) -> nn.Module: - decoder = Decoder( - vocab_size=params.vocab_size, - embedding_dim=params.embedding_dim, - blank_id=params.blank_id, - context_size=params.context_size, - ) - return decoder - - -def get_joiner_model(params: AttributeDict) -> nn.Module: - joiner = Joiner( - input_dim=params.vocab_size, - inner_dim=params.embedding_dim, - output_dim=params.vocab_size, - ) - return joiner - - -def get_transducer_model(params: AttributeDict) -> nn.Module: - encoder = get_encoder_model(params) - decoder = get_decoder_model(params) - joiner = get_joiner_model(params) - - model = Transducer( - encoder=encoder, - decoder=decoder, - joiner=joiner, - ) - return model - - def read_sound_files( filenames: List[str], expected_sample_rate: float ) -> List[torch.Tensor]: @@ -220,6 +155,7 @@ def read_sound_files( return ans +@torch.no_grad() def main(): parser = get_parser() args = parser.parse_args() @@ -278,10 +214,9 @@ def main(): feature_lengths = torch.tensor(feature_lengths, device=device) - with torch.no_grad(): - encoder_out, encoder_out_lens = model.encoder( - x=features, x_lens=feature_lengths - ) + encoder_out, encoder_out_lens = model.encoder( + x=features, x_lens=feature_lengths + ) num_waves = encoder_out.size(0) hyps = [] @@ -303,6 +238,10 @@ def main(): hyp = beam_search( model=model, encoder_out=encoder_out_i, beam=params.beam_size ) + elif params.method == "modified_beam_search": + hyp = modified_beam_search( + model=model, encoder_out=encoder_out_i, beam=params.beam_size + ) else: raise ValueError(f"Unsupported method: {params.method}") diff --git a/requirements.txt b/requirements.txt index 09d9ef69f..4eaa86a67 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,4 +3,3 @@ kaldialign sentencepiece>=0.1.96 tensorboard typeguard -optimized_transducer From d0d806560f58a5ae7a127fd4039734a7d603e78b Mon Sep 17 00:00:00 2001 From: Mingshuang Luo <37799481+luomingshuang@users.noreply.github.com> Date: Mon, 14 Mar 2022 00:30:58 +0800 Subject: [PATCH 23/25] Change for asr_datamodule.py (#241) * change for asr_datamodule.py * fix style check * do a fix --- .../ASR/tdnn_lstm_ctc/asr_datamodule.py | 14 +- .../asr_datamodule.py | 16 +- .../ASR/conformer_mmi/asr_datamodule.py | 357 +----------------- .../ASR/tdnn_lstm_ctc/asr_datamodule.py | 14 +- .../asr_datamodule.py | 16 +- .../ASR/tdnn_ligru_ctc/asr_datamodule.py | 331 +--------------- egs/timit/ASR/tdnn_lstm_ctc/asr_datamodule.py | 15 +- 7 files changed, 69 insertions(+), 694 deletions(-) mode change 100644 => 120000 egs/librispeech/ASR/conformer_mmi/asr_datamodule.py mode change 100644 => 120000 egs/timit/ASR/tdnn_ligru_ctc/asr_datamodule.py diff --git a/egs/aishell/ASR/tdnn_lstm_ctc/asr_datamodule.py b/egs/aishell/ASR/tdnn_lstm_ctc/asr_datamodule.py index 65caa656e..507db2933 100644 --- a/egs/aishell/ASR/tdnn_lstm_ctc/asr_datamodule.py +++ b/egs/aishell/ASR/tdnn_lstm_ctc/asr_datamodule.py @@ -1,4 +1,5 @@ # Copyright 2021 Piotr Żelasko +# Copyright 2022 Xiaomi Corporation (Author: Mingshuang Luo) # # See ../../../../LICENSE for clarification regarding multiple authors # @@ -16,6 +17,7 @@ import argparse +import inspect import logging from functools import lru_cache from pathlib import Path @@ -210,10 +212,20 @@ class AishellAsrDataModule: logging.info( f"Time warp factor: {self.args.spec_aug_time_warp_factor}" ) + # Set the value of num_frame_masks according to Lhotse's version. + # In different Lhotse's versions, the default of num_frame_masks is + # different. + num_frame_masks = 10 + num_frame_masks_parameter = inspect.signature( + SpecAugment.__init__ + ).parameters["num_frame_masks"] + if num_frame_masks_parameter.default == 1: + num_frame_masks = 2 + logging.info(f"Num frame mask: {num_frame_masks}") input_transforms.append( SpecAugment( time_warp_factor=self.args.spec_aug_time_warp_factor, - num_frame_masks=2, + num_frame_masks=num_frame_masks, features_mask_size=27, num_feature_masks=2, frames_mask_size=100, diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/asr_datamodule.py b/egs/aishell/ASR/transducer_stateless_modified-2/asr_datamodule.py index fe0d0a872..20eb8155c 100644 --- a/egs/aishell/ASR/transducer_stateless_modified-2/asr_datamodule.py +++ b/egs/aishell/ASR/transducer_stateless_modified-2/asr_datamodule.py @@ -1,5 +1,6 @@ # Copyright 2021 Piotr Żelasko -# 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# 2022 Xiaomi Corp. (authors: Fangjun Kuang +# Mingshuang Luo) # # See ../../../../LICENSE for clarification regarding multiple authors # @@ -16,6 +17,7 @@ # limitations under the License. import argparse +import inspect import logging from pathlib import Path from typing import Optional @@ -180,10 +182,20 @@ class AsrDataModule: logging.info( f"Time warp factor: {self.args.spec_aug_time_warp_factor}" ) + # Set the value of num_frame_masks according to Lhotse's version. + # In different Lhotse's versions, the default of num_frame_masks is + # different. + num_frame_masks = 10 + num_frame_masks_parameter = inspect.signature( + SpecAugment.__init__ + ).parameters["num_frame_masks"] + if num_frame_masks_parameter.default == 1: + num_frame_masks = 2 + logging.info(f"Num frame mask: {num_frame_masks}") input_transforms.append( SpecAugment( time_warp_factor=self.args.spec_aug_time_warp_factor, - num_frame_masks=2, + num_frame_masks=num_frame_masks, features_mask_size=27, num_feature_masks=2, frames_mask_size=100, diff --git a/egs/librispeech/ASR/conformer_mmi/asr_datamodule.py b/egs/librispeech/ASR/conformer_mmi/asr_datamodule.py deleted file mode 100644 index d3eab87a9..000000000 --- a/egs/librispeech/ASR/conformer_mmi/asr_datamodule.py +++ /dev/null @@ -1,356 +0,0 @@ -# Copyright 2021 Piotr Żelasko -# -# See ../../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import argparse -import logging -from functools import lru_cache -from pathlib import Path -from typing import List, Union - -from lhotse import CutSet, Fbank, FbankConfig, load_manifest -from lhotse.dataset import ( - BucketingSampler, - CutConcatenate, - CutMix, - K2SpeechRecognitionDataset, - PrecomputedFeatures, - SingleCutSampler, - SpecAugment, -) -from lhotse.dataset.input_strategies import OnTheFlyFeatures -from torch.utils.data import DataLoader - -from icefall.dataset.datamodule import DataModule -from icefall.utils import str2bool - - -class LibriSpeechAsrDataModule(DataModule): - """ - DataModule for k2 ASR experiments. - It assumes there is always one train and valid dataloader, - but there can be multiple test dataloaders (e.g. LibriSpeech test-clean - and test-other). - - It contains all the common data pipeline modules used in ASR - experiments, e.g.: - - dynamic batch size, - - bucketing samplers, - - cut concatenation, - - augmentation, - - on-the-fly feature extraction - - This class should be derived for specific corpora used in ASR tasks. - """ - - @classmethod - def add_arguments(cls, parser: argparse.ArgumentParser): - super().add_arguments(parser) - group = parser.add_argument_group( - title="ASR data related options", - description="These options are used for the preparation of " - "PyTorch DataLoaders from Lhotse CutSet's -- they control the " - "effective batch sizes, sampling strategies, applied data " - "augmentations, etc.", - ) - group.add_argument( - "--full-libri", - type=str2bool, - default=True, - help="When enabled, use 960h LibriSpeech. " - "Otherwise, use 100h subset.", - ) - group.add_argument( - "--feature-dir", - type=Path, - default=Path("data/fbank"), - help="Path to directory with train/valid/test cuts.", - ) - group.add_argument( - "--max-duration", - type=int, - default=200.0, - help="Maximum pooled recordings duration (seconds) in a " - "single batch. You can reduce it if it causes CUDA OOM.", - ) - group.add_argument( - "--bucketing-sampler", - type=str2bool, - default=True, - help="When enabled, the batches will come from buckets of " - "similar duration (saves padding frames).", - ) - group.add_argument( - "--num-buckets", - type=int, - default=30, - help="The number of buckets for the BucketingSampler" - "(you might want to increase it for larger datasets).", - ) - group.add_argument( - "--concatenate-cuts", - type=str2bool, - default=False, - help="When enabled, utterances (cuts) will be concatenated " - "to minimize the amount of padding.", - ) - group.add_argument( - "--duration-factor", - type=float, - default=1.0, - help="Determines the maximum duration of a concatenated cut " - "relative to the duration of the longest cut in a batch.", - ) - group.add_argument( - "--gap", - type=float, - default=1.0, - help="The amount of padding (in seconds) inserted between " - "concatenated cuts. This padding is filled with noise when " - "noise augmentation is used.", - ) - group.add_argument( - "--on-the-fly-feats", - type=str2bool, - default=False, - help="When enabled, use on-the-fly cut mixing and feature " - "extraction. Will drop existing precomputed feature manifests " - "if available.", - ) - group.add_argument( - "--shuffle", - type=str2bool, - default=True, - help="When enabled (=default), the examples will be " - "shuffled for each epoch.", - ) - group.add_argument( - "--return-cuts", - type=str2bool, - default=True, - help="When enabled, each batch will have the " - "field: batch['supervisions']['cut'] with the cuts that " - "were used to construct it.", - ) - - group.add_argument( - "--num-workers", - type=int, - default=2, - help="The number of training dataloader workers that " - "collect the batches.", - ) - - def train_dataloaders(self) -> DataLoader: - logging.info("About to get train cuts") - cuts_train = self.train_cuts() - - logging.info("About to get Musan cuts") - cuts_musan = load_manifest(self.args.feature_dir / "cuts_musan.json.gz") - - logging.info("About to create train dataset") - transforms = [ - CutMix(cuts=cuts_musan, prob=0.5, snr=(10, 20), preserve_id=True) - ] - if self.args.concatenate_cuts: - logging.info( - f"Using cut concatenation with duration factor " - f"{self.args.duration_factor} and gap {self.args.gap}." - ) - # Cut concatenation should be the first transform in the list, - # so that if we e.g. mix noise in, it will fill the gaps between - # different utterances. - transforms = [ - CutConcatenate( - duration_factor=self.args.duration_factor, gap=self.args.gap - ) - ] + transforms - - input_transforms = [ - SpecAugment( - num_frame_masks=2, - features_mask_size=27, - num_feature_masks=2, - frames_mask_size=100, - ) - ] - - train = K2SpeechRecognitionDataset( - cut_transforms=transforms, - input_transforms=input_transforms, - return_cuts=self.args.return_cuts, - ) - - if self.args.on_the_fly_feats: - # NOTE: the PerturbSpeed transform should be added only if we - # remove it from data prep stage. - # Add on-the-fly speed perturbation; since originally it would - # have increased epoch size by 3, we will apply prob 2/3 and use - # 3x more epochs. - # Speed perturbation probably should come first before - # concatenation, but in principle the transforms order doesn't have - # to be strict (e.g. could be randomized) - # transforms = [PerturbSpeed(factors=[0.9, 1.1], p=2/3)] + transforms # noqa - # Drop feats to be on the safe side. - train = K2SpeechRecognitionDataset( - cut_transforms=transforms, - input_strategy=OnTheFlyFeatures( - Fbank(FbankConfig(num_mel_bins=80)) - ), - input_transforms=input_transforms, - return_cuts=self.args.return_cuts, - ) - - if self.args.bucketing_sampler: - logging.info("Using BucketingSampler.") - train_sampler = BucketingSampler( - cuts_train, - max_duration=self.args.max_duration, - shuffle=self.args.shuffle, - num_buckets=self.args.num_buckets, - bucket_method="equal_duration", - drop_last=True, - ) - else: - logging.info("Using SingleCutSampler.") - train_sampler = SingleCutSampler( - cuts_train, - max_duration=self.args.max_duration, - shuffle=self.args.shuffle, - ) - logging.info("About to create train dataloader") - - train_dl = DataLoader( - train, - sampler=train_sampler, - batch_size=None, - num_workers=self.args.num_workers, - persistent_workers=False, - ) - - return train_dl - - def valid_dataloaders(self) -> DataLoader: - logging.info("About to get dev cuts") - cuts_valid = self.valid_cuts() - - transforms = [] - if self.args.concatenate_cuts: - transforms = [ - CutConcatenate( - duration_factor=self.args.duration_factor, gap=self.args.gap - ) - ] + transforms - - logging.info("About to create dev dataset") - if self.args.on_the_fly_feats: - validate = K2SpeechRecognitionDataset( - cut_transforms=transforms, - input_strategy=OnTheFlyFeatures( - Fbank(FbankConfig(num_mel_bins=80)) - ), - return_cuts=self.args.return_cuts, - ) - else: - validate = K2SpeechRecognitionDataset( - cut_transforms=transforms, - return_cuts=self.args.return_cuts, - ) - valid_sampler = SingleCutSampler( - cuts_valid, - max_duration=self.args.max_duration, - shuffle=False, - ) - logging.info("About to create dev dataloader") - valid_dl = DataLoader( - validate, - sampler=valid_sampler, - batch_size=None, - num_workers=2, - persistent_workers=False, - ) - - return valid_dl - - def test_dataloaders(self) -> Union[DataLoader, List[DataLoader]]: - cuts = self.test_cuts() - is_list = isinstance(cuts, list) - test_loaders = [] - if not is_list: - cuts = [cuts] - - for cuts_test in cuts: - logging.debug("About to create test dataset") - test = K2SpeechRecognitionDataset( - input_strategy=OnTheFlyFeatures( - Fbank(FbankConfig(num_mel_bins=80)) - ) - if self.args.on_the_fly_feats - else PrecomputedFeatures(), - return_cuts=self.args.return_cuts, - ) - sampler = SingleCutSampler( - cuts_test, max_duration=self.args.max_duration - ) - logging.debug("About to create test dataloader") - test_dl = DataLoader( - test, batch_size=None, sampler=sampler, num_workers=1 - ) - test_loaders.append(test_dl) - - if is_list: - return test_loaders - else: - return test_loaders[0] - - @lru_cache() - def train_cuts(self) -> CutSet: - logging.info("About to get train cuts") - cuts_train = load_manifest( - self.args.feature_dir / "cuts_train-clean-100.json.gz" - ) - if self.args.full_libri: - cuts_train = ( - cuts_train - + load_manifest( - self.args.feature_dir / "cuts_train-clean-360.json.gz" - ) - + load_manifest( - self.args.feature_dir / "cuts_train-other-500.json.gz" - ) - ) - return cuts_train - - @lru_cache() - def valid_cuts(self) -> CutSet: - logging.info("About to get dev cuts") - cuts_valid = load_manifest( - self.args.feature_dir / "cuts_dev-clean.json.gz" - ) + load_manifest(self.args.feature_dir / "cuts_dev-other.json.gz") - return cuts_valid - - @lru_cache() - def test_cuts(self) -> List[CutSet]: - test_sets = ["test-clean", "test-other"] - cuts = [] - for test_set in test_sets: - logging.debug("About to get test cuts") - cuts.append( - load_manifest( - self.args.feature_dir / f"cuts_{test_set}.json.gz" - ) - ) - return cuts diff --git a/egs/librispeech/ASR/conformer_mmi/asr_datamodule.py b/egs/librispeech/ASR/conformer_mmi/asr_datamodule.py new file mode 120000 index 000000000..a73848de9 --- /dev/null +++ b/egs/librispeech/ASR/conformer_mmi/asr_datamodule.py @@ -0,0 +1 @@ +../conformer_ctc/asr_datamodule.py \ No newline at end of file diff --git a/egs/librispeech/ASR/tdnn_lstm_ctc/asr_datamodule.py b/egs/librispeech/ASR/tdnn_lstm_ctc/asr_datamodule.py index 2af2f5e8a..51e10fb2f 100644 --- a/egs/librispeech/ASR/tdnn_lstm_ctc/asr_datamodule.py +++ b/egs/librispeech/ASR/tdnn_lstm_ctc/asr_datamodule.py @@ -1,4 +1,5 @@ # Copyright 2021 Piotr Żelasko +# Copyright 2022 Xiaomi Corporation (Author: Mingshuang Luo) # # See ../../../../LICENSE for clarification regarding multiple authors # @@ -16,6 +17,7 @@ import argparse +import inspect import logging from functools import lru_cache from pathlib import Path @@ -216,10 +218,20 @@ class LibriSpeechAsrDataModule: logging.info( f"Time warp factor: {self.args.spec_aug_time_warp_factor}" ) + # Set the value of num_frame_masks according to Lhotse's version. + # In different Lhotse's versions, the default of num_frame_masks is + # different. + num_frame_masks = 10 + num_frame_masks_parameter = inspect.signature( + SpecAugment.__init__ + ).parameters["num_frame_masks"] + if num_frame_masks_parameter.default == 1: + num_frame_masks = 2 + logging.info(f"Num frame mask: {num_frame_masks}") input_transforms.append( SpecAugment( time_warp_factor=self.args.spec_aug_time_warp_factor, - num_frame_masks=2, + num_frame_masks=num_frame_masks, features_mask_size=27, num_feature_masks=2, frames_mask_size=100, diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/asr_datamodule.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/asr_datamodule.py index fe0d0a872..669ad1d1b 100644 --- a/egs/librispeech/ASR/transducer_stateless_multi_datasets/asr_datamodule.py +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/asr_datamodule.py @@ -1,5 +1,6 @@ # Copyright 2021 Piotr Żelasko -# 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# 2022 Xiaomi Corp. (authors: Fangjun Kuang +# Mingshuang Luo) # # See ../../../../LICENSE for clarification regarding multiple authors # @@ -16,6 +17,7 @@ # limitations under the License. import argparse +import inspect import logging from pathlib import Path from typing import Optional @@ -180,10 +182,20 @@ class AsrDataModule: logging.info( f"Time warp factor: {self.args.spec_aug_time_warp_factor}" ) + # Set the value of num_frame_masks according to Lhotse's version. + # In different Lhotse's versions, the default of num_frame_masks is + # different. + num_frame_masks = 10 + num_frame_masks_parameter = inspect.signature( + SpecAugment.__init__ + ).parameters["num_frame_masks"] + if num_frame_masks_parameter.default == 1: + num_frame_masks = 2 + logging.info(f"Num frame mask: {num_frame_masks}") input_transforms.append( SpecAugment( time_warp_factor=self.args.spec_aug_time_warp_factor, - num_frame_masks=2, + num_frame_masks=num_frame_masks, features_mask_size=27, num_feature_masks=2, frames_mask_size=100, diff --git a/egs/timit/ASR/tdnn_ligru_ctc/asr_datamodule.py b/egs/timit/ASR/tdnn_ligru_ctc/asr_datamodule.py deleted file mode 100644 index 8b20d345d..000000000 --- a/egs/timit/ASR/tdnn_ligru_ctc/asr_datamodule.py +++ /dev/null @@ -1,330 +0,0 @@ -# Copyright 2021 Piotr Żelasko -# 2021 Xiaomi Corp. (authors: Mingshuang Luo) -# -# See ../../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import argparse -import logging -from functools import lru_cache -from pathlib import Path -from typing import List, Union - -from lhotse import CutSet, Fbank, FbankConfig, load_manifest -from lhotse.dataset import ( - BucketingSampler, - CutConcatenate, - CutMix, - K2SpeechRecognitionDataset, - PrecomputedFeatures, - SingleCutSampler, - SpecAugment, -) -from lhotse.dataset.input_strategies import OnTheFlyFeatures -from torch.utils.data import DataLoader - -from icefall.dataset.datamodule import DataModule -from icefall.utils import str2bool - - -class TimitAsrDataModule(DataModule): - """ - DataModule for k2 ASR experiments. - It assumes there is always one train and valid dataloader, - but there can be multiple test dataloaders (e.g. LibriSpeech test-clean - and test-other). - - It contains all the common data pipeline modules used in ASR - experiments, e.g.: - - dynamic batch size, - - bucketing samplers, - - cut concatenation, - - augmentation, - - on-the-fly feature extraction - - This class should be derived for specific corpora used in ASR tasks. - """ - - @classmethod - def add_arguments(cls, parser: argparse.ArgumentParser): - super().add_arguments(parser) - group = parser.add_argument_group( - title="ASR data related options", - description="These options are used for the preparation of " - "PyTorch DataLoaders from Lhotse CutSet's -- they control the " - "effective batch sizes, sampling strategies, applied data " - "augmentations, etc.", - ) - group.add_argument( - "--feature-dir", - type=Path, - default=Path("data/fbank"), - help="Path to directory with train/valid/test cuts.", - ) - group.add_argument( - "--max-duration", - type=int, - default=200.0, - help="Maximum pooled recordings duration (seconds) in a " - "single batch. You can reduce it if it causes CUDA OOM.", - ) - group.add_argument( - "--bucketing-sampler", - type=str2bool, - default=True, - help="When enabled, the batches will come from buckets of " - "similar duration (saves padding frames).", - ) - group.add_argument( - "--num-buckets", - type=int, - default=30, - help="The number of buckets for the BucketingSampler" - "(you might want to increase it for larger datasets).", - ) - group.add_argument( - "--concatenate-cuts", - type=str2bool, - default=False, - help="When enabled, utterances (cuts) will be concatenated " - "to minimize the amount of padding.", - ) - group.add_argument( - "--duration-factor", - type=float, - default=1.0, - help="Determines the maximum duration of a concatenated cut " - "relative to the duration of the longest cut in a batch.", - ) - group.add_argument( - "--gap", - type=float, - default=1.0, - help="The amount of padding (in seconds) inserted between " - "concatenated cuts. This padding is filled with noise when " - "noise augmentation is used.", - ) - group.add_argument( - "--on-the-fly-feats", - type=str2bool, - default=False, - help="When enabled, use on-the-fly cut mixing and feature " - "extraction. Will drop existing precomputed feature manifests " - "if available.", - ) - group.add_argument( - "--shuffle", - type=str2bool, - default=True, - help="When enabled (=default), the examples will be " - "shuffled for each epoch.", - ) - group.add_argument( - "--return-cuts", - type=str2bool, - default=True, - help="When enabled, each batch will have the " - "field: batch['supervisions']['cut'] with the cuts that " - "were used to construct it.", - ) - - group.add_argument( - "--num-workers", - type=int, - default=2, - help="The number of training dataloader workers that " - "collect the batches.", - ) - - def train_dataloaders(self) -> DataLoader: - logging.info("About to get train cuts") - cuts_train = self.train_cuts() - - logging.info("About to get Musan cuts") - cuts_musan = load_manifest(self.args.feature_dir / "cuts_musan.json.gz") - - logging.info("About to create train dataset") - transforms = [CutMix(cuts=cuts_musan, prob=0.5, snr=(10, 20))] - if self.args.concatenate_cuts: - logging.info( - f"Using cut concatenation with duration factor " - f"{self.args.duration_factor} and gap {self.args.gap}." - ) - # Cut concatenation should be the first transform in the list, - # so that if we e.g. mix noise in, it will fill the gaps between - # different utterances. - transforms = [ - CutConcatenate( - duration_factor=self.args.duration_factor, gap=self.args.gap - ) - ] + transforms - - input_transforms = [ - SpecAugment( - num_frame_masks=2, - features_mask_size=27, - num_feature_masks=2, - frames_mask_size=100, - ) - ] - - train = K2SpeechRecognitionDataset( - cut_transforms=transforms, - input_transforms=input_transforms, - return_cuts=self.args.return_cuts, - ) - - if self.args.on_the_fly_feats: - # NOTE: the PerturbSpeed transform should be added only if we - # remove it from data prep stage. - # Add on-the-fly speed perturbation; since originally it would - # have increased epoch size by 3, we will apply prob 2/3 and use - # 3x more epochs. - # Speed perturbation probably should come first before - # concatenation, but in principle the transforms order doesn't have - # to be strict (e.g. could be randomized) - # transforms = [PerturbSpeed(factors=[0.9, 1.1], p=2/3)] + transforms # noqa - # Drop feats to be on the safe side. - train = K2SpeechRecognitionDataset( - cut_transforms=transforms, - input_strategy=OnTheFlyFeatures( - Fbank(FbankConfig(num_mel_bins=80)) - ), - input_transforms=input_transforms, - return_cuts=self.args.return_cuts, - ) - - if self.args.bucketing_sampler: - logging.info("Using BucketingSampler.") - train_sampler = BucketingSampler( - cuts_train, - max_duration=self.args.max_duration, - shuffle=self.args.shuffle, - num_buckets=self.args.num_buckets, - bucket_method="equal_duration", - drop_last=True, - ) - else: - logging.info("Using SingleCutSampler.") - train_sampler = SingleCutSampler( - cuts_train, - max_duration=self.args.max_duration, - shuffle=self.args.shuffle, - ) - logging.info("About to create train dataloader") - - train_dl = DataLoader( - train, - sampler=train_sampler, - batch_size=None, - num_workers=self.args.num_workers, - persistent_workers=False, - ) - - return train_dl - - def valid_dataloaders(self) -> DataLoader: - logging.info("About to get dev cuts") - cuts_valid = self.valid_cuts() - - transforms = [] - if self.args.concatenate_cuts: - transforms = [ - CutConcatenate( - duration_factor=self.args.duration_factor, gap=self.args.gap - ) - ] + transforms - - logging.info("About to create dev dataset") - if self.args.on_the_fly_feats: - validate = K2SpeechRecognitionDataset( - cut_transforms=transforms, - input_strategy=OnTheFlyFeatures( - Fbank(FbankConfig(num_mel_bins=80)) - ), - return_cuts=self.args.return_cuts, - ) - else: - validate = K2SpeechRecognitionDataset( - cut_transforms=transforms, - return_cuts=self.args.return_cuts, - ) - valid_sampler = SingleCutSampler( - cuts_valid, - max_duration=self.args.max_duration, - shuffle=False, - ) - logging.info("About to create dev dataloader") - valid_dl = DataLoader( - validate, - sampler=valid_sampler, - batch_size=None, - num_workers=2, - persistent_workers=False, - ) - - return valid_dl - - def test_dataloaders(self) -> Union[DataLoader, List[DataLoader]]: - cuts = self.test_cuts() - is_list = isinstance(cuts, list) - test_loaders = [] - if not is_list: - cuts = [cuts] - - for cuts_test in cuts: - logging.debug("About to create test dataset") - test = K2SpeechRecognitionDataset( - input_strategy=OnTheFlyFeatures( - Fbank(FbankConfig(num_mel_bins=80)) - ) - if self.args.on_the_fly_feats - else PrecomputedFeatures(), - return_cuts=self.args.return_cuts, - ) - sampler = SingleCutSampler( - cuts_test, max_duration=self.args.max_duration - ) - logging.debug("About to create test dataloader") - test_dl = DataLoader( - test, batch_size=None, sampler=sampler, num_workers=1 - ) - test_loaders.append(test_dl) - - if is_list: - return test_loaders - else: - return test_loaders[0] - - @lru_cache() - def train_cuts(self) -> CutSet: - logging.info("About to get train cuts") - cuts_train = load_manifest(self.args.feature_dir / "cuts_TRAIN.json.gz") - - return cuts_train - - @lru_cache() - def valid_cuts(self) -> CutSet: - logging.info("About to get dev cuts") - cuts_valid = load_manifest(self.args.feature_dir / "cuts_DEV.json.gz") - - return cuts_valid - - @lru_cache() - def test_cuts(self) -> CutSet: - logging.debug("About to get test cuts") - cuts_test = load_manifest(self.args.feature_dir / "cuts_TEST.json.gz") - - return cuts_test diff --git a/egs/timit/ASR/tdnn_ligru_ctc/asr_datamodule.py b/egs/timit/ASR/tdnn_ligru_ctc/asr_datamodule.py new file mode 120000 index 000000000..fa1b8cca3 --- /dev/null +++ b/egs/timit/ASR/tdnn_ligru_ctc/asr_datamodule.py @@ -0,0 +1 @@ +../tdnn_lstm_ctc/asr_datamodule.py \ No newline at end of file diff --git a/egs/timit/ASR/tdnn_lstm_ctc/asr_datamodule.py b/egs/timit/ASR/tdnn_lstm_ctc/asr_datamodule.py index b0e28d05d..a7029f514 100644 --- a/egs/timit/ASR/tdnn_lstm_ctc/asr_datamodule.py +++ b/egs/timit/ASR/tdnn_lstm_ctc/asr_datamodule.py @@ -1,5 +1,5 @@ # Copyright 2021 Piotr Żelasko -# 2021 Xiaomi Corp. (authors: Mingshuang Luo) +# 2022 Xiaomi Corporation (Author: Mingshuang Luo) # # See ../../../../LICENSE for clarification regarding multiple authors # @@ -17,6 +17,7 @@ import argparse +import inspect import logging from functools import lru_cache from pathlib import Path @@ -171,9 +172,19 @@ class TimitAsrDataModule(DataModule): ) ] + transforms + # Set the value of num_frame_masks according to Lhotse's version. + # In different Lhotse's versions, the default of num_frame_masks is + # different. + num_frame_masks = 10 + num_frame_masks_parameter = inspect.signature( + SpecAugment.__init__ + ).parameters["num_frame_masks"] + if num_frame_masks_parameter.default == 1: + num_frame_masks = 2 + logging.info(f"Num frame mask: {num_frame_masks}") input_transforms = [ SpecAugment( - num_frame_masks=2, + num_frame_masks=num_frame_masks, features_mask_size=27, num_feature_masks=2, frames_mask_size=100, From a7643301ecc7ca704b1fdc44b30b435fceda663f Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Tue, 15 Mar 2022 15:34:21 +0800 Subject: [PATCH 24/25] Cache pip packages for GitHub actions (#253) * Cache pip packages in GitHub actions. --- .../workflows/run-librispeech-2022-03-12.yml | 99 ++++++++++++------- .../run-pretrained-conformer-ctc.yml | 60 ++++++----- ...-transducer-stateless-librispeech-100h.yml | 70 ++++++++----- ...r-stateless-librispeech-multi-datasets.yml | 70 ++++++++----- ...ransducer-stateless-modified-2-aishell.yml | 70 ++++++++----- ...-transducer-stateless-modified-aishell.yml | 70 ++++++++----- .../run-pretrained-transducer-stateless.yml | 70 ++++++++----- .../workflows/run-pretrained-transducer.yml | 60 ++++++----- .github/workflows/run-yesno-recipe.yml | 20 ++-- requirements-ci.txt | 21 ++++ 10 files changed, 390 insertions(+), 220 deletions(-) create mode 100644 requirements-ci.txt diff --git a/.github/workflows/run-librispeech-2022-03-12.yml b/.github/workflows/run-librispeech-2022-03-12.yml index 74052312e..221104f8f 100644 --- a/.github/workflows/run-librispeech-2022-03-12.yml +++ b/.github/workflows/run-librispeech-2022-03-12.yml @@ -32,9 +32,6 @@ jobs: matrix: os: [ubuntu-18.04] python-version: [3.7, 3.8, 3.9] - torch: ["1.10.0"] - torchaudio: ["0.10.0"] - k2-version: ["1.9.dev20211101"] fail-fast: false @@ -43,49 +40,67 @@ jobs: with: fetch-depth: 0 - - name: Setup Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 - with: - python-version: ${{ matrix.python-version }} - - - name: Install Python dependencies - run: | - python3 -m pip install --upgrade pip pytest - # numpy 1.20.x does not support python 3.6 - pip install numpy==1.19 - pip install torch==${{ matrix.torch }}+cpu torchaudio==${{ matrix.torchaudio }}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.org/nightly/ - - python3 -m pip install git+https://github.com/lhotse-speech/lhotse - python3 -m pip install kaldifeat - # We are in ./icefall and there is a file: requirements.txt in it - pip install -r requirements.txt - - name: Install graphviz shell: bash run: | - python3 -m pip install -qq graphviz sudo apt-get -qq install graphviz + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + cache-dependency-path: '**/requirements-ci.txt' + + - name: Install Python dependencies + run: | + grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install + + - name: Cache kaldifeat + id: my-cache + uses: actions/cache@v2 + with: + path: | + ~/tmp/kaldifeat + key: cache-tmp-${{ matrix.python-version }} + + - name: Install kaldifeat + if: steps.my-cache.outputs.cache-hit != 'true' + shell: bash + run: | + mkdir -p ~/tmp + cd ~/tmp + git clone https://github.com/csukuangfj/kaldifeat + cd kaldifeat + mkdir build + cd build + cmake -DCMAKE_BUILD_TYPE=Release .. + make -j2 _kaldifeat + - name: Download pre-trained model shell: bash run: | - sudo apt-get -qq install git-lfs tree sox - cd egs/librispeech/ASR - mkdir tmp - cd tmp + sudo apt-get -qq install git-lfs + mkdir -p ~/tmp + cd ~/tmp git lfs install git clone https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12 - cd .. - tree tmp - soxi tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12/test_wavs/*.wav - ls -lh tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12/test_wavs/*.wav + + - name: Display test files + shell: bash + run: | + sudo apt-get -qq install tree sox + tree ~/tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12 + soxi ~/tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12/test_wavs/*.wav + ls -lh ~/tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12/test_wavs/*.wav - name: Run greedy search decoding (max-sym-per-frame 1) shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH - dir=./tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12 + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH + dir=~/tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12 cd egs/librispeech/ASR ./pruned_transducer_stateless/pretrained.py \ --method greedy_search \ @@ -99,8 +114,10 @@ jobs: - name: Run greedy search decoding (max-sym-per-frame 2) shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH - dir=./tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12 + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH + dir=~/tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12 cd egs/librispeech/ASR ./pruned_transducer_stateless/pretrained.py \ --method greedy_search \ @@ -114,8 +131,10 @@ jobs: - name: Run greedy search decoding (max-sym-per-frame 3) shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH - dir=./tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12 + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH + dir=~/tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12 cd egs/librispeech/ASR ./pruned_transducer_stateless/pretrained.py \ --method greedy_search \ @@ -130,7 +149,9 @@ jobs: shell: bash run: | export PYTHONPATH=$PWD:$PYTHONPATH - dir=./tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12 + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH + dir=~/tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12 cd egs/librispeech/ASR ./pruned_transducer_stateless/pretrained.py \ --method beam_search \ @@ -145,7 +166,9 @@ jobs: shell: bash run: | export PYTHONPATH=$PWD:$PYTHONPATH - dir=./tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12 + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH + dir=~/tmp/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12 cd egs/librispeech/ASR ./pruned_transducer_stateless/pretrained.py \ --method modified_beam_search \ diff --git a/.github/workflows/run-pretrained-conformer-ctc.yml b/.github/workflows/run-pretrained-conformer-ctc.yml index 1758a3521..cd24c9c44 100644 --- a/.github/workflows/run-pretrained-conformer-ctc.yml +++ b/.github/workflows/run-pretrained-conformer-ctc.yml @@ -31,9 +31,6 @@ jobs: matrix: os: [ubuntu-18.04] python-version: [3.7, 3.8, 3.9] - torch: ["1.10.0"] - torchaudio: ["0.10.0"] - k2-version: ["1.9.dev20211101"] fail-fast: false @@ -42,30 +39,43 @@ jobs: with: fetch-depth: 0 - - name: Setup Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 - with: - python-version: ${{ matrix.python-version }} - - - name: Install Python dependencies - run: | - python3 -m pip install --upgrade pip pytest - # numpy 1.20.x does not support python 3.6 - pip install numpy==1.19 - pip install torch==${{ matrix.torch }}+cpu torchaudio==${{ matrix.torchaudio }}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.org/nightly/ - - python3 -m pip install git+https://github.com/lhotse-speech/lhotse - python3 -m pip install kaldifeat - # We are in ./icefall and there is a file: requirements.txt in it - pip install -r requirements.txt - - name: Install graphviz shell: bash run: | - python3 -m pip install -qq graphviz sudo apt-get -qq install graphviz + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + cache-dependency-path: '**/requirements-ci.txt' + + - name: Install Python dependencies + run: | + grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install + + - name: Cache kaldifeat + id: my-cache + uses: actions/cache@v2 + with: + path: | + ~/tmp/kaldifeat + key: cache-tmp-${{ matrix.python-version }} + + - name: Install kaldifeat + if: steps.my-cache.outputs.cache-hit != 'true' + shell: bash + run: | + mkdir -p ~/tmp + cd ~/tmp + git clone https://github.com/csukuangfj/kaldifeat + cd kaldifeat + mkdir build + cd build + cmake -DCMAKE_BUILD_TYPE=Release .. + make -j2 _kaldifeat + - name: Download pre-trained model shell: bash run: | @@ -83,7 +93,9 @@ jobs: - name: Run CTC decoding shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./conformer_ctc/pretrained.py \ --num-classes 500 \ @@ -98,6 +110,8 @@ jobs: shell: bash run: | export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./conformer_ctc/pretrained.py \ --num-classes 500 \ diff --git a/.github/workflows/run-pretrained-transducer-stateless-librispeech-100h.yml b/.github/workflows/run-pretrained-transducer-stateless-librispeech-100h.yml index efea5366b..b827ec82e 100644 --- a/.github/workflows/run-pretrained-transducer-stateless-librispeech-100h.yml +++ b/.github/workflows/run-pretrained-transducer-stateless-librispeech-100h.yml @@ -31,9 +31,6 @@ jobs: matrix: os: [ubuntu-18.04] python-version: [3.7, 3.8, 3.9] - torch: ["1.10.0"] - torchaudio: ["0.10.0"] - k2-version: ["1.9.dev20211101"] fail-fast: false @@ -42,30 +39,43 @@ jobs: with: fetch-depth: 0 - - name: Setup Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 - with: - python-version: ${{ matrix.python-version }} - - - name: Install Python dependencies - run: | - python3 -m pip install --upgrade pip pytest - # numpy 1.20.x does not support python 3.6 - pip install numpy==1.19 - pip install torch==${{ matrix.torch }}+cpu torchaudio==${{ matrix.torchaudio }}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.org/nightly/ - - python3 -m pip install git+https://github.com/lhotse-speech/lhotse - python3 -m pip install kaldifeat - # We are in ./icefall and there is a file: requirements.txt in it - pip install -r requirements.txt - - name: Install graphviz shell: bash run: | - python3 -m pip install -qq graphviz sudo apt-get -qq install graphviz + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + cache-dependency-path: '**/requirements-ci.txt' + + - name: Install Python dependencies + run: | + grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install + + - name: Cache kaldifeat + id: my-cache + uses: actions/cache@v2 + with: + path: | + ~/tmp/kaldifeat + key: cache-tmp-${{ matrix.python-version }} + + - name: Install kaldifeat + if: steps.my-cache.outputs.cache-hit != 'true' + shell: bash + run: | + mkdir -p ~/tmp + cd ~/tmp + git clone https://github.com/csukuangfj/kaldifeat + cd kaldifeat + mkdir build + cd build + cmake -DCMAKE_BUILD_TYPE=Release .. + make -j2 _kaldifeat + - name: Download pre-trained model shell: bash run: | @@ -84,7 +94,9 @@ jobs: - name: Run greedy search decoding (max-sym-per-frame 1) shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./transducer_stateless_multi_datasets/pretrained.py \ --method greedy_search \ @@ -98,7 +110,9 @@ jobs: - name: Run greedy search decoding (max-sym-per-frame 2) shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./transducer_stateless_multi_datasets/pretrained.py \ --method greedy_search \ @@ -112,7 +126,9 @@ jobs: - name: Run greedy search decoding (max-sym-per-frame 3) shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./transducer_stateless_multi_datasets/pretrained.py \ --method greedy_search \ @@ -127,6 +143,8 @@ jobs: shell: bash run: | export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./transducer_stateless_multi_datasets/pretrained.py \ --method beam_search \ @@ -141,6 +159,8 @@ jobs: shell: bash run: | export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./transducer_stateless_multi_datasets/pretrained.py \ --method modified_beam_search \ diff --git a/.github/workflows/run-pretrained-transducer-stateless-librispeech-multi-datasets.yml b/.github/workflows/run-pretrained-transducer-stateless-librispeech-multi-datasets.yml index ccf9028cb..ffd9bdaec 100644 --- a/.github/workflows/run-pretrained-transducer-stateless-librispeech-multi-datasets.yml +++ b/.github/workflows/run-pretrained-transducer-stateless-librispeech-multi-datasets.yml @@ -31,9 +31,6 @@ jobs: matrix: os: [ubuntu-18.04] python-version: [3.7, 3.8, 3.9] - torch: ["1.10.0"] - torchaudio: ["0.10.0"] - k2-version: ["1.9.dev20211101"] fail-fast: false @@ -42,30 +39,43 @@ jobs: with: fetch-depth: 0 - - name: Setup Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 - with: - python-version: ${{ matrix.python-version }} - - - name: Install Python dependencies - run: | - python3 -m pip install --upgrade pip pytest - # numpy 1.20.x does not support python 3.6 - pip install numpy==1.19 - pip install torch==${{ matrix.torch }}+cpu torchaudio==${{ matrix.torchaudio }}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.org/nightly/ - - python3 -m pip install git+https://github.com/lhotse-speech/lhotse - python3 -m pip install kaldifeat - # We are in ./icefall and there is a file: requirements.txt in it - pip install -r requirements.txt - - name: Install graphviz shell: bash run: | - python3 -m pip install -qq graphviz sudo apt-get -qq install graphviz + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + cache-dependency-path: '**/requirements-ci.txt' + + - name: Install Python dependencies + run: | + grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install + + - name: Cache kaldifeat + id: my-cache + uses: actions/cache@v2 + with: + path: | + ~/tmp/kaldifeat + key: cache-tmp-${{ matrix.python-version }} + + - name: Install kaldifeat + if: steps.my-cache.outputs.cache-hit != 'true' + shell: bash + run: | + mkdir -p ~/tmp + cd ~/tmp + git clone https://github.com/csukuangfj/kaldifeat + cd kaldifeat + mkdir build + cd build + cmake -DCMAKE_BUILD_TYPE=Release .. + make -j2 _kaldifeat + - name: Download pre-trained model shell: bash run: | @@ -85,7 +95,9 @@ jobs: - name: Run greedy search decoding (max-sym-per-frame 1) shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./transducer_stateless_multi_datasets/pretrained.py \ --method greedy_search \ @@ -99,7 +111,9 @@ jobs: - name: Run greedy search decoding (max-sym-per-frame 2) shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./transducer_stateless_multi_datasets/pretrained.py \ --method greedy_search \ @@ -113,7 +127,9 @@ jobs: - name: Run greedy search decoding (max-sym-per-frame 3) shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./transducer_stateless_multi_datasets/pretrained.py \ --method greedy_search \ @@ -128,6 +144,8 @@ jobs: shell: bash run: | export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./transducer_stateless_multi_datasets/pretrained.py \ --method beam_search \ @@ -143,6 +161,8 @@ jobs: shell: bash run: | export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./transducer_stateless_multi_datasets/pretrained.py \ --method modified_beam_search \ diff --git a/.github/workflows/run-pretrained-transducer-stateless-modified-2-aishell.yml b/.github/workflows/run-pretrained-transducer-stateless-modified-2-aishell.yml index c27ffc374..12652a22d 100644 --- a/.github/workflows/run-pretrained-transducer-stateless-modified-2-aishell.yml +++ b/.github/workflows/run-pretrained-transducer-stateless-modified-2-aishell.yml @@ -31,9 +31,6 @@ jobs: matrix: os: [ubuntu-18.04] python-version: [3.7, 3.8, 3.9] - torch: ["1.10.0"] - torchaudio: ["0.10.0"] - k2-version: ["1.9.dev20211101"] fail-fast: false @@ -42,30 +39,43 @@ jobs: with: fetch-depth: 0 - - name: Setup Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 - with: - python-version: ${{ matrix.python-version }} - - - name: Install Python dependencies - run: | - python3 -m pip install --upgrade pip pytest - # numpy 1.20.x does not support python 3.6 - pip install numpy==1.19 - pip install torch==${{ matrix.torch }}+cpu torchaudio==${{ matrix.torchaudio }}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.org/nightly/ - - python3 -m pip install git+https://github.com/lhotse-speech/lhotse - python3 -m pip install kaldifeat - # We are in ./icefall and there is a file: requirements.txt in it - pip install -r requirements.txt - - name: Install graphviz shell: bash run: | - python3 -m pip install -qq graphviz sudo apt-get -qq install graphviz + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + cache-dependency-path: '**/requirements-ci.txt' + + - name: Install Python dependencies + run: | + grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install + + - name: Cache kaldifeat + id: my-cache + uses: actions/cache@v2 + with: + path: | + ~/tmp/kaldifeat + key: cache-tmp-${{ matrix.python-version }} + + - name: Install kaldifeat + if: steps.my-cache.outputs.cache-hit != 'true' + shell: bash + run: | + mkdir -p ~/tmp + cd ~/tmp + git clone https://github.com/csukuangfj/kaldifeat + cd kaldifeat + mkdir build + cd build + cmake -DCMAKE_BUILD_TYPE=Release .. + make -j2 _kaldifeat + - name: Download pre-trained model shell: bash run: | @@ -84,7 +94,9 @@ jobs: - name: Run greedy search decoding (max-sym-per-frame 1) shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/aishell/ASR ./transducer_stateless_modified-2/pretrained.py \ --method greedy_search \ @@ -98,7 +110,9 @@ jobs: - name: Run greedy search decoding (max-sym-per-frame 2) shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/aishell/ASR ./transducer_stateless_modified-2/pretrained.py \ --method greedy_search \ @@ -112,7 +126,9 @@ jobs: - name: Run greedy search decoding (max-sym-per-frame 3) shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/aishell/ASR ./transducer_stateless_modified-2/pretrained.py \ --method greedy_search \ @@ -127,6 +143,8 @@ jobs: shell: bash run: | export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/aishell/ASR ./transducer_stateless_modified-2/pretrained.py \ --method beam_search \ @@ -142,6 +160,8 @@ jobs: shell: bash run: | export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/aishell/ASR ./transducer_stateless_modified-2/pretrained.py \ --method modified_beam_search \ diff --git a/.github/workflows/run-pretrained-transducer-stateless-modified-aishell.yml b/.github/workflows/run-pretrained-transducer-stateless-modified-aishell.yml index 2e38abb5a..aa69d1500 100644 --- a/.github/workflows/run-pretrained-transducer-stateless-modified-aishell.yml +++ b/.github/workflows/run-pretrained-transducer-stateless-modified-aishell.yml @@ -31,9 +31,6 @@ jobs: matrix: os: [ubuntu-18.04] python-version: [3.7, 3.8, 3.9] - torch: ["1.10.0"] - torchaudio: ["0.10.0"] - k2-version: ["1.9.dev20211101"] fail-fast: false @@ -42,30 +39,43 @@ jobs: with: fetch-depth: 0 - - name: Setup Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 - with: - python-version: ${{ matrix.python-version }} - - - name: Install Python dependencies - run: | - python3 -m pip install --upgrade pip pytest - # numpy 1.20.x does not support python 3.6 - pip install numpy==1.19 - pip install torch==${{ matrix.torch }}+cpu torchaudio==${{ matrix.torchaudio }}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.org/nightly/ - - python3 -m pip install git+https://github.com/lhotse-speech/lhotse - python3 -m pip install kaldifeat - # We are in ./icefall and there is a file: requirements.txt in it - pip install -r requirements.txt - - name: Install graphviz shell: bash run: | - python3 -m pip install -qq graphviz sudo apt-get -qq install graphviz + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + cache-dependency-path: '**/requirements-ci.txt' + + - name: Install Python dependencies + run: | + grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install + + - name: Cache kaldifeat + id: my-cache + uses: actions/cache@v2 + with: + path: | + ~/tmp/kaldifeat + key: cache-tmp-${{ matrix.python-version }} + + - name: Install kaldifeat + if: steps.my-cache.outputs.cache-hit != 'true' + shell: bash + run: | + mkdir -p ~/tmp + cd ~/tmp + git clone https://github.com/csukuangfj/kaldifeat + cd kaldifeat + mkdir build + cd build + cmake -DCMAKE_BUILD_TYPE=Release .. + make -j2 _kaldifeat + - name: Download pre-trained model shell: bash run: | @@ -84,7 +94,9 @@ jobs: - name: Run greedy search decoding (max-sym-per-frame 1) shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/aishell/ASR ./transducer_stateless_modified/pretrained.py \ --method greedy_search \ @@ -98,7 +110,9 @@ jobs: - name: Run greedy search decoding (max-sym-per-frame 2) shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/aishell/ASR ./transducer_stateless_modified/pretrained.py \ --method greedy_search \ @@ -112,7 +126,9 @@ jobs: - name: Run greedy search decoding (max-sym-per-frame 3) shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/aishell/ASR ./transducer_stateless_modified/pretrained.py \ --method greedy_search \ @@ -127,6 +143,8 @@ jobs: shell: bash run: | export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/aishell/ASR ./transducer_stateless_modified/pretrained.py \ --method beam_search \ @@ -142,6 +160,8 @@ jobs: shell: bash run: | export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/aishell/ASR ./transducer_stateless_modified/pretrained.py \ --method modified_beam_search \ diff --git a/.github/workflows/run-pretrained-transducer-stateless.yml b/.github/workflows/run-pretrained-transducer-stateless.yml index de66b90c5..535e46261 100644 --- a/.github/workflows/run-pretrained-transducer-stateless.yml +++ b/.github/workflows/run-pretrained-transducer-stateless.yml @@ -31,9 +31,6 @@ jobs: matrix: os: [ubuntu-18.04] python-version: [3.7, 3.8, 3.9] - torch: ["1.10.0"] - torchaudio: ["0.10.0"] - k2-version: ["1.9.dev20211101"] fail-fast: false @@ -42,30 +39,43 @@ jobs: with: fetch-depth: 0 - - name: Setup Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 - with: - python-version: ${{ matrix.python-version }} - - - name: Install Python dependencies - run: | - python3 -m pip install --upgrade pip pytest - # numpy 1.20.x does not support python 3.6 - pip install numpy==1.19 - pip install torch==${{ matrix.torch }}+cpu torchaudio==${{ matrix.torchaudio }}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.org/nightly/ - - python3 -m pip install git+https://github.com/lhotse-speech/lhotse - python3 -m pip install kaldifeat - # We are in ./icefall and there is a file: requirements.txt in it - pip install -r requirements.txt - - name: Install graphviz shell: bash run: | - python3 -m pip install -qq graphviz sudo apt-get -qq install graphviz + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + cache-dependency-path: '**/requirements-ci.txt' + + - name: Install Python dependencies + run: | + grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install + + - name: Cache kaldifeat + id: my-cache + uses: actions/cache@v2 + with: + path: | + ~/tmp/kaldifeat + key: cache-tmp-${{ matrix.python-version }} + + - name: Install kaldifeat + if: steps.my-cache.outputs.cache-hit != 'true' + shell: bash + run: | + mkdir -p ~/tmp + cd ~/tmp + git clone https://github.com/csukuangfj/kaldifeat + cd kaldifeat + mkdir build + cd build + cmake -DCMAKE_BUILD_TYPE=Release .. + make -j2 _kaldifeat + - name: Download pre-trained model shell: bash run: | @@ -83,7 +93,9 @@ jobs: - name: Run greedy search decoding (max-sym-per-frame 1) shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./transducer_stateless/pretrained.py \ --method greedy_search \ @@ -97,7 +109,9 @@ jobs: - name: Run greedy search decoding (max-sym-per-frame 2) shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./transducer_stateless/pretrained.py \ --method greedy_search \ @@ -111,7 +125,9 @@ jobs: - name: Run greedy search decoding (max-sym-per-frame 3) shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./transducer_stateless/pretrained.py \ --method greedy_search \ @@ -126,6 +142,8 @@ jobs: shell: bash run: | export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./transducer_stateless/pretrained.py \ --method beam_search \ @@ -140,6 +158,8 @@ jobs: shell: bash run: | export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./transducer_stateless/pretrained.py \ --method modified_beam_search \ diff --git a/.github/workflows/run-pretrained-transducer.yml b/.github/workflows/run-pretrained-transducer.yml index f0ebddba3..41e4cfe0d 100644 --- a/.github/workflows/run-pretrained-transducer.yml +++ b/.github/workflows/run-pretrained-transducer.yml @@ -31,9 +31,6 @@ jobs: matrix: os: [ubuntu-18.04] python-version: [3.7, 3.8, 3.9] - torch: ["1.10.0"] - torchaudio: ["0.10.0"] - k2-version: ["1.9.dev20211101"] fail-fast: false @@ -42,30 +39,43 @@ jobs: with: fetch-depth: 0 - - name: Setup Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 - with: - python-version: ${{ matrix.python-version }} - - - name: Install Python dependencies - run: | - python3 -m pip install --upgrade pip pytest - # numpy 1.20.x does not support python 3.6 - pip install numpy==1.19 - pip install torch==${{ matrix.torch }}+cpu torchaudio==${{ matrix.torchaudio }}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.org/nightly/ - - python3 -m pip install git+https://github.com/lhotse-speech/lhotse - python3 -m pip install kaldifeat - # We are in ./icefall and there is a file: requirements.txt in it - pip install -r requirements.txt - - name: Install graphviz shell: bash run: | - python3 -m pip install -qq graphviz sudo apt-get -qq install graphviz + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' + cache-dependency-path: '**/requirements-ci.txt' + + - name: Install Python dependencies + run: | + grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install + + - name: Cache kaldifeat + id: my-cache + uses: actions/cache@v2 + with: + path: | + ~/tmp/kaldifeat + key: cache-tmp-${{ matrix.python-version }} + + - name: Install kaldifeat + if: steps.my-cache.outputs.cache-hit != 'true' + shell: bash + run: | + mkdir -p ~/tmp + cd ~/tmp + git clone https://github.com/csukuangfj/kaldifeat + cd kaldifeat + mkdir build + cd build + cmake -DCMAKE_BUILD_TYPE=Release .. + make -j2 _kaldifeat + - name: Download pre-trained model shell: bash run: | @@ -84,7 +94,9 @@ jobs: - name: Run greedy search decoding shell: bash run: | - export PYTHONPATH=$PWD:PYTHONPATH + export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./transducer/pretrained.py \ --method greedy_search \ @@ -98,6 +110,8 @@ jobs: shell: bash run: | export PYTHONPATH=$PWD:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH + export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH cd egs/librispeech/ASR ./transducer/pretrained.py \ --method beam_search \ diff --git a/.github/workflows/run-yesno-recipe.yml b/.github/workflows/run-yesno-recipe.yml index 98b2e4ebd..38c36a7c6 100644 --- a/.github/workflows/run-yesno-recipe.yml +++ b/.github/workflows/run-yesno-recipe.yml @@ -33,9 +33,6 @@ jobs: # TODO: enable macOS for CPU testing os: [ubuntu-18.04] python-version: [3.8] - torch: ["1.10.0"] - torchaudio: ["0.10.0"] - k2-version: ["1.9.dev20211101"] fail-fast: false steps: @@ -43,10 +40,17 @@ jobs: with: fetch-depth: 0 + - name: Install graphviz + shell: bash + run: | + sudo apt-get -qq install graphviz + - name: Setup Python ${{ matrix.python-version }} - uses: actions/setup-python@v1 + uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} + cache: 'pip' + cache-dependency-path: '**/requirements-ci.txt' - name: Install libnsdfile and libsox if: startsWith(matrix.os, 'ubuntu') @@ -57,13 +61,7 @@ jobs: - name: Install Python dependencies run: | - python3 -m pip install -U pip - pip install torch==${{ matrix.torch }}+cpu torchaudio==${{ matrix.torchaudio }}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html - pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.org/nightly/ - python3 -m pip install git+https://github.com/lhotse-speech/lhotse - - # We are in ./icefall and there is a file: requirements.txt in it - python3 -m pip install -r requirements.txt + grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install - name: Run yesno recipe shell: bash diff --git a/requirements-ci.txt b/requirements-ci.txt new file mode 100644 index 000000000..b5ee6b51c --- /dev/null +++ b/requirements-ci.txt @@ -0,0 +1,21 @@ +# Usage: grep -v '^#' requirements-ci.txt | xargs -n 1 -L 1 pip install +# dependencies for GitHub actions +# +# See https://github.com/actions/setup-python#caching-packages-dependencies + +# numpy 1.20.x does not support python 3.6 +numpy==1.19 +pytest==7.1.0 +graphviz==0.19.1 + +-f https://download.pytorch.org/whl/cpu/torch_stable.html torch==1.10.0+cpu +-f https://download.pytorch.org/whl/cpu/torch_stable.html torchaudio==0.10.0+cpu + +-f https://k2-fsa.org/nightly/ k2==1.9.dev20211101+cpu.torch1.10.0 + +git+https://github.com/lhotse-speech/lhotse +kaldilm==1.11 +kaldialign==0.2 +sentencepiece==0.1.96 +tensorboard==2.8.0 +typeguard==2.13.3 From 518ec6414a676ec0ce583d4e728ea010efc7e2aa Mon Sep 17 00:00:00 2001 From: Mingshuang Luo <37799481+luomingshuang@users.noreply.github.com> Date: Wed, 16 Mar 2022 20:17:45 +0800 Subject: [PATCH 25/25] Update diagnostics.py (#254) * update diagnostics.py * do some changes --- icefall/diagnostics.py | 167 ++++++++++++++++++++++++++++------------- 1 file changed, 115 insertions(+), 52 deletions(-) diff --git a/icefall/diagnostics.py b/icefall/diagnostics.py index 944f11f64..fa9b98fa0 100644 --- a/icefall/diagnostics.py +++ b/icefall/diagnostics.py @@ -1,5 +1,6 @@ # Copyright 2022 Xiaomi Corp. (authors: Daniel Povey -# Zengwei Yao) +# Zengwei Yao +# Mingshuang Luo) # # See ../LICENSE for clarification regarding multiple authors # @@ -17,7 +18,7 @@ import random -from typing import List, Tuple +from typing import List, Optional, Tuple import torch from torch import Tensor, nn @@ -28,22 +29,29 @@ class TensorDiagnosticOptions(object): Args: memory_limit: - The maximum number of bytes per tensor (limits how many copies - of the tensor we cache). + The maximum number of bytes per tensor + (limits how many copies of the tensor we cache). + max_eig_dim: + The maximum dimension for which we print out eigenvalues + (limited for speed reasons). """ - def __init__(self, memory_limit: int): + def __init__(self, memory_limit: int = (2 ** 20), max_eig_dim: int = 512): self.memory_limit = memory_limit + self.max_eig_dim = max_eig_dim def dim_is_summarized(self, size: int): return size > 10 and size != 31 -def get_sum_abs_stats( - x: Tensor, dim: int, stats_type: str +def get_tensor_stats( + x: Tensor, + dim: int, + stats_type: str, ) -> Tuple[Tensor, int]: - """Returns the sum-of-absolute-value of this Tensor, for each index into - the specified axis/dim of the tensor. + """ + Returns the specified transformation of the Tensor (either x or x.abs() + or (x > 0), summed over all but the index `dim`. Args: x: @@ -51,28 +59,38 @@ def get_sum_abs_stats( dim: Dimension with 0 <= dim < x.ndim stats_type: - Either "mean-abs" in which case the stats represent the mean absolute - value, or "pos-ratio" in which case the stats represent the proportion - of positive values (actually: the tensor is count of positive values, - count is the count of all values). - + The stats_type includes several types: + "abs" -> take abs() before summing + "positive" -> take (x > 0) before summing + "rms" -> square before summing, we'll take sqrt later + "value -> just sum x itself Returns: - (sum_abs, count) where sum_abs is a Tensor of shape (x.shape[dim],), - and the count is an integer saying how many items were counted in - each element of sum_abs. + stats: a Tensor of shape (x.shape[dim],). + count: an integer saying how many items were counted in each element + of stats. """ - if stats_type == "mean-abs": + + count = x.numel() // x.shape[dim] + + if stats_type == "eigs": + x = x.transpose(dim, -1) + x = x.reshape(-1, x.shape[-1]) + # shape of returned tensor: (s, s), + # where s is size of dimension `dim` of original x. + return torch.matmul(x.transpose(0, 1), x), count + elif stats_type == "abs": x = x.abs() - else: - assert stats_type == "pos-ratio" + elif stats_type == "rms": + x = x ** 2 + elif stats_type == "positive": x = (x > 0).to(dtype=torch.float) + else: + assert stats_type == "value" - orig_numel = x.numel() sum_dims = [d for d in range(x.ndim) if d != dim] - x = torch.sum(x, dim=sum_dims) - count = orig_numel // x.numel() + if len(sum_dims) > 0: + x = torch.sum(x, dim=sum_dims) x = x.flatten() - return x, count @@ -83,43 +101,58 @@ def get_diagnostics_for_dim( sizes_same: bool, stats_type: str, ) -> str: - """This function gets diagnostics for a dimension of a module. + """ + This function gets diagnostics for a dimension of a module. Args: dim: - The dimension to analyze, with 0 <= dim < tensors[0].ndim - tensors: - List of cached tensors to get the stats + the dimension to analyze, with 0 <= dim < tensors[0].ndim options: - Options object + options object sizes_same: True if all the tensor sizes are the same on this dimension - stats_type: either "mean-abs" or "pos-ratio", dictates the type of - stats we accumulate, mean-abs is mean absolute value, "pos-ratio" is - proportion of positive to nonnegative values. - + stats_type: either "abs" or "positive" or "eigs" or "value", + imdictates the type of stats we accumulate, abs is mean absolute + value, "positive" is proportion of positive to nonnegative values, + "eigs" is eigenvalues after doing outer product on this dim, sum + over all other dimes. Returns: Diagnostic as a string, either percentiles or the actual values, - see the code. + see the code. Will return the empty string if the diagnostics did + not make sense to print out for this dimension, e.g. dimension + mismatch and stats_type == "eigs". """ # stats_and_counts is a list of pair (Tensor, int) - stats_and_counts = [get_sum_abs_stats(x, dim, stats_type) for x in tensors] + stats_and_counts = [get_tensor_stats(x, dim, stats_type) for x in tensors] stats = [x[0] for x in stats_and_counts] counts = [x[1] for x in stats_and_counts] - if sizes_same: + + if stats_type == "eigs": + try: + stats = torch.stack(stats).sum(dim=0) + except: # noqa + return "" + count = sum(counts) + stats = stats / count + stats, _ = torch.symeig(stats) + stats = stats.abs().sqrt() + # sqrt so it reflects data magnitude, like stddev- not variance + elif sizes_same: stats = torch.stack(stats).sum(dim=0) count = sum(counts) stats = stats / count else: stats = [x[0] / x[1] for x in stats_and_counts] stats = torch.cat(stats, dim=0) + if stats_type == "rms": + stats = stats.sqrt() - # If `summarize` we print percentiles of the stats; - # else, we print out individual elements. + # if `summarize` we print percentiles of the stats; else, + # we print out individual elements. summarize = (not sizes_same) or options.dim_is_summarized(stats.numel()) if summarize: - # Print out percentiles. + # print out percentiles. stats = stats.sort()[0] num_percentiles = 10 size = stats.numel() @@ -129,12 +162,25 @@ def get_diagnostics_for_dim( percentiles.append(stats[index].item()) percentiles = ["%.2g" % x for x in percentiles] percentiles = " ".join(percentiles) - return f"percentiles: [{percentiles}]" + ans = f"percentiles: [{percentiles}]" else: - stats = stats.tolist() - stats = ["%.2g" % x for x in stats] - stats = "[" + " ".join(stats) + "]" - return stats + ans = stats.tolist() + ans = ["%.2g" % x for x in ans] + ans = "[" + " ".join(ans) + "]" + if stats_type == "value": + # This norm is useful because it is strictly less than the largest + # sqrt(eigenvalue) of the variance, which we print out, and shows, + # speaking in an approximate way, how much of that largest eigenvalue + # can be attributed to the mean of the distribution. + norm = (stats ** 2).sum().sqrt().item() + mean = stats.mean().item() + rms = (stats ** 2).mean().sqrt().item() + ans += f", norm={norm:.2g}, mean={mean:.2g}, rms={rms:.2g}" + else: + mean = stats.mean().item() + rms = (stats ** 2).mean().sqrt().item() + ans += f", mean={mean:.2g}, rms={rms:.2g}" + return ans def print_diagnostics_for_dim( @@ -153,17 +199,27 @@ def print_diagnostics_for_dim( Options object. """ - for stats_type in ["mean-abs", "pos-ratio"]: - # stats_type will be "mean-abs" or "pos-ratio". + ndim = tensors[0].ndim + if ndim > 1: + stats_types = ["abs", "positive", "value", "rms"] + if tensors[0].shape[dim] <= options.max_eig_dim: + stats_types.append("eigs") + else: + stats_types = ["value", "abs"] + + for stats_type in stats_types: sizes = [x.shape[dim] for x in tensors] sizes_same = all([x == sizes[0] for x in sizes]) s = get_diagnostics_for_dim( dim, tensors, options, sizes_same, stats_type ) + if s == "": + continue min_size = min(sizes) max_size = max(sizes) size_str = f"{min_size}" if sizes_same else f"{min_size}..{max_size}" + # stats_type will be "abs" or "positive". print(f"module={name}, dim={dim}, size={size_str}, {stats_type} {s}") @@ -225,11 +281,15 @@ class TensorDiagnostic(object): # Ensure there is at least one dim. self.saved_tensors = [x.unsqueeze(0) for x in self.saved_tensors] + try: + device = torch.device("cuda") + except: # noqa + device = torch.device("cpu") + ndim = self.saved_tensors[0].ndim + tensors = [x.to(device) for x in self.saved_tensors] for dim in range(ndim): - print_diagnostics_for_dim( - self.name, dim, self.saved_tensors, self.opts - ) + print_diagnostics_for_dim(self.name, dim, tensors, self.opts) class ModelDiagnostic(object): @@ -240,11 +300,14 @@ class ModelDiagnostic(object): Options object. """ - def __init__(self, opts: TensorDiagnosticOptions): + def __init__(self, opts: Optional[TensorDiagnosticOptions] = None): # In this dictionary, the keys are tensors names and the values # are corresponding TensorDiagnostic objects. + if opts is None: + self.opts = TensorDiagnosticOptions() + else: + self.opts = opts self.diagnostics = dict() - self.opts = opts def __getitem__(self, name: str): if name not in self.diagnostics: @@ -321,7 +384,7 @@ def attach_diagnostics( def _test_tensor_diagnostic(): - opts = TensorDiagnosticOptions(2 ** 20) + opts = TensorDiagnosticOptions(2 ** 20, 512) diagnostic = TensorDiagnostic(opts, "foo")