From bc2882ddcc86efda9c9c2e6cd9c109bea6af8772 Mon Sep 17 00:00:00 2001 From: Zengwei Yao Date: Tue, 12 Jul 2022 19:19:58 +0800 Subject: [PATCH 01/38] Simplified memory bank for Emformer (#440) * init files * use average value as memory vector for each chunk * change tail padding length from right_context_length to chunk_length * correct the files, ln -> cp * fix bug in conv_emformer_transducer_stateless2/emformer.py * fix doc in conv_emformer_transducer_stateless/emformer.py * refactor init states for stream * modify .flake8 * fix bug about memory mask when memory_size==0 * add @torch.jit.export for init_states function * update RESULTS.md * minor change * update README.md * modify doc * replace torch.div() with << * fix bug, >> -> << * use i&i-1 to judge if it is a power of 2 * minor fix * fix error in RESULTS.md --- .flake8 | 2 +- egs/librispeech/ASR/README.md | 4 +- egs/librispeech/ASR/RESULTS.md | 312 +++ .../decode.py | 4 +- .../emformer.py | 57 +- .../stream.py | 36 +- .../streaming_decode.py | 2 + .../train.py | 2 +- .../asr_datamodule.py | 1 + .../beam_search.py | 1 + .../decode.py | 657 ++++++ .../decoder.py | 1 + .../emformer.py | 1841 +++++++++++++++++ .../encoder_interface.py | 1 + .../export.py | 287 +++ .../joiner.py | 1 + .../model.py | 1 + .../optim.py | 1 + .../scaling.py | 1 + .../stream.py | 1 + .../streaming_decode.py | 980 +++++++++ .../test_emformer.py | 194 ++ .../train.py | 1136 ++++++++++ 23 files changed, 5476 insertions(+), 47 deletions(-) create mode 120000 egs/librispeech/ASR/conv_emformer_transducer_stateless2/asr_datamodule.py create mode 120000 egs/librispeech/ASR/conv_emformer_transducer_stateless2/beam_search.py create mode 100755 egs/librispeech/ASR/conv_emformer_transducer_stateless2/decode.py create mode 120000 egs/librispeech/ASR/conv_emformer_transducer_stateless2/decoder.py create mode 100644 egs/librispeech/ASR/conv_emformer_transducer_stateless2/emformer.py create mode 120000 egs/librispeech/ASR/conv_emformer_transducer_stateless2/encoder_interface.py create mode 100755 egs/librispeech/ASR/conv_emformer_transducer_stateless2/export.py create mode 120000 egs/librispeech/ASR/conv_emformer_transducer_stateless2/joiner.py create mode 120000 egs/librispeech/ASR/conv_emformer_transducer_stateless2/model.py create mode 120000 egs/librispeech/ASR/conv_emformer_transducer_stateless2/optim.py create mode 120000 egs/librispeech/ASR/conv_emformer_transducer_stateless2/scaling.py create mode 120000 egs/librispeech/ASR/conv_emformer_transducer_stateless2/stream.py create mode 100755 egs/librispeech/ASR/conv_emformer_transducer_stateless2/streaming_decode.py create mode 100644 egs/librispeech/ASR/conv_emformer_transducer_stateless2/test_emformer.py create mode 100755 egs/librispeech/ASR/conv_emformer_transducer_stateless2/train.py diff --git a/.flake8 b/.flake8 index c7c4f1855..b2eb2e943 100644 --- a/.flake8 +++ b/.flake8 @@ -9,7 +9,7 @@ per-file-ignores = egs/*/ASR/pruned_transducer_stateless*/*.py: E501, egs/*/ASR/*/optim.py: E501, egs/*/ASR/*/scaling.py: E501, - egs/librispeech/ASR/conv_emformer_transducer_stateless/*.py: E501, E203 + egs/librispeech/ASR/conv_emformer_transducer_stateless*/*.py: E501, E203 # invalid escape sequence (cause by tex formular), W605 icefall/utils.py: E501, W605 diff --git a/egs/librispeech/ASR/README.md b/egs/librispeech/ASR/README.md index 318d908d1..cbdee53e6 100644 --- a/egs/librispeech/ASR/README.md +++ b/egs/librispeech/ASR/README.md @@ -23,8 +23,8 @@ The following table lists the differences among them. | `pruned_transducer_stateless5` | Conformer(modified) | Embedding + Conv1d | same as pruned_transducer_stateless4 + more layers + random combiner| | `pruned_transducer_stateless6` | Conformer(modified) | Embedding + Conv1d | same as pruned_transducer_stateless4 + distillation with hubert| | `pruned_stateless_emformer_rnnt2` | Emformer(from torchaudio) | Embedding + Conv1d | Using Emformer from torchaudio for streaming ASR| -| `conv_emformer_transducer_stateless` | Emformer | Embedding + Conv1d | Using Emformer augmented with convolution for streaming ASR + mechanisms in reworked model | - +| `conv_emformer_transducer_stateless` | ConvEmformer | Embedding + Conv1d | Using ConvEmformer for streaming ASR + mechanisms in reworked model | +| `conv_emformer_transducer_stateless2` | ConvEmformer | Embedding + Conv1d | Using ConvEmformer with simplified memory for streaming ASR + mechanisms in reworked model | The decoder in `transducer_stateless` is modified from the paper [Rnn-Transducer with Stateless Prediction Network](https://ieeexplore.ieee.org/document/9054419/). diff --git a/egs/librispeech/ASR/RESULTS.md b/egs/librispeech/ASR/RESULTS.md index 892a58fb2..d4096884b 100644 --- a/egs/librispeech/ASR/RESULTS.md +++ b/egs/librispeech/ASR/RESULTS.md @@ -1,5 +1,317 @@ ## Results +### LibriSpeech BPE training results (Pruned Stateless Conv-Emformer RNN-T 2) + +[conv_emformer_transducer_stateless2](./conv_emformer_transducer_stateless2) + +It implements [Emformer](https://arxiv.org/abs/2010.10759) augmented with convolution module and simplified memory bank for streaming ASR. +It is modified from [torchaudio](https://github.com/pytorch/audio). + +See for more details. + +#### With lower latency setup, training on full librispeech + +In this model, the lengths of chunk and right context are 32 frames (i.e., 0.32s) and 8 frames (i.e., 0.08s), respectively. + +The WERs are: + +| | test-clean | test-other | comment | decoding mode | +|-------------------------------------|------------|------------|----------------------|----------------------| +| greedy search (max sym per frame 1) | 3.5 | 9.09 | --epoch 30 --avg 10 | simulated streaming | +| greedy search (max sym per frame 1) | 3.57 | 9.1 | --epoch 30 --avg 10 | streaming | +| fast beam search | 3.5 | 8.91 | --epoch 30 --avg 10 | simulated streaming | +| fast beam search | 3.54 | 8.91 | --epoch 30 --avg 10 | streaming | +| modified beam search | 3.43 | 8.86 | --epoch 30 --avg 10 | simulated streaming | +| modified beam search | 3.48 | 8.88 | --epoch 30 --avg 10 | streaming | + +The training command is: + +```bash +./conv_emformer_transducer_stateless2/train.py \ + --world-size 6 \ + --num-epochs 30 \ + --start-epoch 1 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --full-libri 1 \ + --max-duration 280 \ + --master-port 12321 \ + --num-encoder-layers 12 \ + --chunk-length 32 \ + --cnn-module-kernel 31 \ + --left-context-length 32 \ + --right-context-length 8 \ + --memory-size 32 +``` + +The tensorboard log can be found at + + +The simulated streaming decoding command using greedy search is: +```bash +./conv_emformer_transducer_stateless2/decode.py \ + --epoch 30 \ + --avg 10 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --max-duration 300 \ + --num-encoder-layers 12 \ + --chunk-length 32 \ + --cnn-module-kernel 31 \ + --left-context-length 32 \ + --right-context-length 8 \ + --memory-size 32 \ + --decoding-method greedy_search \ + --use-averaged-model True +``` + +The simulated streaming decoding command using fast beam search is: +```bash +./conv_emformer_transducer_stateless2/decode.py \ + --epoch 30 \ + --avg 10 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --max-duration 300 \ + --num-encoder-layers 12 \ + --chunk-length 32 \ + --cnn-module-kernel 31 \ + --left-context-length 32 \ + --right-context-length 8 \ + --memory-size 32 \ + --decoding-method fast_beam_search \ + --use-averaged-model True \ + --beam 4 \ + --max-contexts 4 \ + --max-states 8 +``` + +The simulated streaming decoding command using modified beam search is: +```bash +./conv_emformer_transducer_stateless2/decode.py \ + --epoch 30 \ + --avg 10 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --max-duration 300 \ + --num-encoder-layers 12 \ + --chunk-length 32 \ + --cnn-module-kernel 31 \ + --left-context-length 32 \ + --right-context-length 8 \ + --memory-size 32 \ + --decoding-method modified_beam_search \ + --use-averaged-model True \ + --beam-size 4 +``` + +The streaming decoding command using greedy search is: +```bash +./conv_emformer_transducer_stateless2/streaming_decode.py \ + --epoch 30 \ + --avg 10 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --num-decode-streams 2000 \ + --num-encoder-layers 12 \ + --chunk-length 32 \ + --cnn-module-kernel 31 \ + --left-context-length 32 \ + --right-context-length 8 \ + --memory-size 32 \ + --decoding-method greedy_search \ + --use-averaged-model True +``` + +The streaming decoding command using fast beam search is: +```bash +./conv_emformer_transducer_stateless2/streaming_decode.py \ + --epoch 30 \ + --avg 10 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --num-decode-streams 2000 \ + --num-encoder-layers 12 \ + --chunk-length 32 \ + --cnn-module-kernel 31 \ + --left-context-length 32 \ + --right-context-length 8 \ + --memory-size 32 \ + --decoding-method fast_beam_search \ + --use-averaged-model True \ + --beam 4 \ + --max-contexts 4 \ + --max-states 8 +``` + +The streaming decoding command using modified beam search is: +```bash +./conv_emformer_transducer_stateless2/streaming_decode.py \ + --epoch 30 \ + --avg 10 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --num-decode-streams 2000 \ + --num-encoder-layers 12 \ + --chunk-length 32 \ + --cnn-module-kernel 31 \ + --left-context-length 32 \ + --right-context-length 8 \ + --memory-size 32 \ + --decoding-method modified_beam_search \ + --use-averaged-model True \ + --beam-size 4 +``` + +Pretrained models, training logs, decoding logs, and decoding results +are available at + + +#### With higher latency setup, training on full librispeech + +In this model, the lengths of chunk and right context are 64 frames (i.e., 0.64s) and 16 frames (i.e., 0.16s), respectively. + +The WERs are: + +| | test-clean | test-other | comment | decoding mode | +|-------------------------------------|------------|------------|----------------------|----------------------| +| greedy search (max sym per frame 1) | 3.3 | 8.71 | --epoch 30 --avg 10 | simulated streaming | +| greedy search (max sym per frame 1) | 3.35 | 8.65 | --epoch 30 --avg 10 | streaming | +| fast beam search | 3.27 | 8.58 | --epoch 30 --avg 10 | simulated streaming | +| fast beam search | 3.31 | 8.48 | --epoch 30 --avg 10 | streaming | +| modified beam search | 3.26 | 8.56 | --epoch 30 --avg 10 | simulated streaming | +| modified beam search | 3.29 | 8.47 | --epoch 30 --avg 10 | streaming | + +The training command is: + +```bash +./conv_emformer_transducer_stateless2/train.py \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 1 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --full-libri 1 \ + --max-duration 280 \ + --master-port 12321 \ + --num-encoder-layers 12 \ + --chunk-length 64 \ + --cnn-module-kernel 31 \ + --left-context-length 64 \ + --right-context-length 16 \ + --memory-size 32 +``` + +The tensorboard log can be found at + + +The simulated streaming decoding command using greedy search is: +```bash +./conv_emformer_transducer_stateless2/decode.py \ + --epoch 30 \ + --avg 10 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --max-duration 300 \ + --num-encoder-layers 12 \ + --chunk-length 64 \ + --cnn-module-kernel 31 \ + --left-context-length 64 \ + --right-context-length 16 \ + --memory-size 32 \ + --decoding-method greedy_search \ + --use-averaged-model True +``` + +The simulated streaming decoding command using fast beam search is: +```bash +./conv_emformer_transducer_stateless2/decode.py \ + --epoch 30 \ + --avg 10 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --max-duration 300 \ + --num-encoder-layers 12 \ + --chunk-length 64 \ + --cnn-module-kernel 31 \ + --left-context-length 64 \ + --right-context-length 16 \ + --memory-size 32 \ + --decoding-method fast_beam_search \ + --use-averaged-model True \ + --beam 4 \ + --max-contexts 4 \ + --max-states 8 +``` + +The simulated streaming decoding command using modified beam search is: +```bash +./conv_emformer_transducer_stateless2/decode.py \ + --epoch 30 \ + --avg 10 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --max-duration 300 \ + --num-encoder-layers 12 \ + --chunk-length 64 \ + --cnn-module-kernel 31 \ + --left-context-length 64 \ + --right-context-length 16 \ + --memory-size 32 \ + --decoding-method modified_beam_search \ + --use-averaged-model True \ + --beam-size 4 +``` + +The streaming decoding command using greedy search is: +```bash +./conv_emformer_transducer_stateless2/streaming_decode.py \ + --epoch 30 \ + --avg 10 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --num-decode-streams 2000 \ + --num-encoder-layers 12 \ + --chunk-length 64 \ + --cnn-module-kernel 31 \ + --left-context-length 64 \ + --right-context-length 16 \ + --memory-size 32 \ + --decoding-method greedy_search \ + --use-averaged-model True +``` + +The streaming decoding command using fast beam search is: +```bash +./conv_emformer_transducer_stateless2/streaming_decode.py \ + --epoch 30 \ + --avg 10 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --num-decode-streams 2000 \ + --num-encoder-layers 12 \ + --chunk-length 64 \ + --cnn-module-kernel 31 \ + --left-context-length 64 \ + --right-context-length 16 \ + --memory-size 32 \ + --decoding-method fast_beam_search \ + --use-averaged-model True \ + --beam 4 \ + --max-contexts 4 \ + --max-states 8 +``` + +The streaming decoding command using modified beam search is: +```bash +./conv_emformer_transducer_stateless2/streaming_decode.py \ + --epoch 30 \ + --avg 10 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --num-decode-streams 2000 \ + --num-encoder-layers 12 \ + --chunk-length 64 \ + --cnn-module-kernel 31 \ + --left-context-length 64 \ + --right-context-length 16 \ + --memory-size 32 \ + --decoding-method modified_beam_search \ + --use-averaged-model True \ + --beam-size 4 +``` + +Pretrained models, training logs, decoding logs, and decoding results +are available at + + + ### LibriSpeech BPE training results (Pruned Stateless Streaming Conformer RNN-T) #### [pruned_transducer_stateless](./pruned_transducer_stateless) diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless/decode.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless/decode.py index aadac2ae4..287fb94df 100755 --- a/egs/librispeech/ASR/conv_emformer_transducer_stateless/decode.py +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless/decode.py @@ -277,10 +277,10 @@ def decode_one_batch( supervisions = batch["supervisions"] feature_lens = supervisions["num_frames"].to(device) - feature_lens += params.right_context_length + feature_lens += params.chunk_length feature = torch.nn.functional.pad( feature, - pad=(0, 0, 0, params.right_context_length), + pad=(0, 0, 0, params.chunk_length), value=LOG_EPS, ) diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless/emformer.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless/emformer.py index 46993da48..8ca7d5568 100644 --- a/egs/librispeech/ASR/conv_emformer_transducer_stateless/emformer.py +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless/emformer.py @@ -1141,8 +1141,8 @@ class EmformerEncoderLayer(nn.Module): - output utterance, with shape (U, B, D); - output right_context, with shape (R, B, D); - output memory, with shape (1, B, D) or (0, B, D). - - output state. - - updated conv_cache. + - updated attention cache. + - updated convolution cache. """ R = right_context.size(0) src = torch.cat([right_context, utterance]) @@ -1252,6 +1252,11 @@ class EmformerEncoder(nn.Module): ): super().__init__() + assert ( + chunk_length - 1 + ) & chunk_length == 0, "chunk_length should be a power of 2." + self.shift = int(math.log(chunk_length, 2)) + self.use_memory = memory_size > 0 self.init_memory_op = nn.AvgPool1d( kernel_size=chunk_length, @@ -1525,7 +1530,6 @@ class EmformerEncoder(nn.Module): right_context at the end. states (List[torch.Tensor, List[List[torch.Tensor]], List[torch.Tensor]]: # noqa Cached states containing: - - past_lens: number of past frames for each sample in batch - attn_caches: attention states from preceding chunk's computation, where each element corresponds to each emformer layer - conv_caches: left context for causal convolution, where each @@ -1580,13 +1584,15 @@ class EmformerEncoder(nn.Module): # calcualte padding mask to mask out initial zero caches chunk_mask = make_pad_mask(output_lengths).to(x.device) memory_mask = ( - torch.div( - num_processed_frames, self.chunk_length, rounding_mode="floor" - ).view(x.size(1), 1) - <= torch.arange(self.memory_size, device=x.device).expand( - x.size(1), self.memory_size - ) - ).flip(1) + ( + (num_processed_frames >> self.shift).view(x.size(1), 1) + <= torch.arange(self.memory_size, device=x.device).expand( + x.size(1), self.memory_size + ) + ).flip(1) + if self.use_memory + else torch.empty(0).to(dtype=torch.bool, device=x.device) + ) left_context_mask = ( num_processed_frames.view(x.size(1), 1) <= torch.arange(self.left_context_length, device=x.device).expand( @@ -1631,6 +1637,31 @@ class EmformerEncoder(nn.Module): ) return output, output_lengths, output_states + @torch.jit.export + def init_states(self, device: torch.device = torch.device("cpu")): + """Create initial states.""" + attn_caches = [ + [ + torch.zeros(self.memory_size, self.d_model, device=device), + torch.zeros( + self.left_context_length, self.d_model, device=device + ), + torch.zeros( + self.left_context_length, self.d_model, device=device + ), + ] + for _ in range(self.num_encoder_layers) + ] + conv_caches = [ + torch.zeros(self.d_model, self.cnn_module_kernel - 1, device=device) + for _ in range(self.num_encoder_layers) + ] + states: Tuple[List[List[torch.Tensor]], List[torch.Tensor]] = ( + attn_caches, + conv_caches, + ) + return states + class Emformer(EncoderInterface): def __init__( @@ -1655,6 +1686,7 @@ class Emformer(EncoderInterface): self.subsampling_factor = subsampling_factor self.right_context_length = right_context_length + self.chunk_length = chunk_length if subsampling_factor != 4: raise NotImplementedError("Support only 'subsampling_factor=4'.") if chunk_length % subsampling_factor != 0: @@ -1803,6 +1835,11 @@ class Emformer(EncoderInterface): return output, output_lengths, output_states + @torch.jit.export + def init_states(self, device: torch.device = torch.device("cpu")): + """Create initial states.""" + return self.encoder.init_states(device) + class Conv2dSubsampling(nn.Module): """Convolutional 2D subsampling (to 1/4 length). diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless/stream.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless/stream.py index 31ad3f50a..69ee7ee9a 100644 --- a/egs/librispeech/ASR/conv_emformer_transducer_stateless/stream.py +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless/stream.py @@ -43,15 +43,12 @@ class Stream(object): device: The device to run this stream. """ - self.device = device self.LOG_EPS = LOG_EPS # Containing attention caches and convolution caches self.states: Optional[ Tuple[List[List[torch.Tensor]], List[torch.Tensor]] ] = None - # Initailize zero states. - self.init_states(params) # It uses different attributes for different decoding methods. self.context_size = params.context_size @@ -107,34 +104,11 @@ class Stream(object): def set_ground_truth(self, ground_truth: str) -> None: self.ground_truth = ground_truth - def init_states(self, params: AttributeDict) -> None: - attn_caches = [ - [ - torch.zeros( - params.memory_size, params.encoder_dim, device=self.device - ), - torch.zeros( - params.left_context_length // params.subsampling_factor, - params.encoder_dim, - device=self.device, - ), - torch.zeros( - params.left_context_length // params.subsampling_factor, - params.encoder_dim, - device=self.device, - ), - ] - for _ in range(params.num_encoder_layers) - ] - conv_caches = [ - torch.zeros( - params.encoder_dim, - params.cnn_module_kernel - 1, - device=self.device, - ) - for _ in range(params.num_encoder_layers) - ] - self.states = (attn_caches, conv_caches) + def set_states( + self, states: Tuple[List[List[torch.Tensor]], List[torch.Tensor]] + ) -> None: + """Set states.""" + self.states = states def get_feature_chunk(self) -> torch.Tensor: """Get a chunk of feature frames. diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless/streaming_decode.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless/streaming_decode.py index 4fac405b0..0a6bbfa8b 100755 --- a/egs/librispeech/ASR/conv_emformer_transducer_stateless/streaming_decode.py +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless/streaming_decode.py @@ -683,6 +683,8 @@ def decode_dataset( LOG_EPS=LOG_EPSILON, ) + stream.set_states(model.encoder.init_states(device)) + audio: np.ndarray = cut.load_audio() # audio.shape: (1, num_samples) assert len(audio.shape) == 2 diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless/train.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless/train.py index acaf1397f..7dc9314f8 100755 --- a/egs/librispeech/ASR/conv_emformer_transducer_stateless/train.py +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless/train.py @@ -28,7 +28,7 @@ export CUDA_VISIBLE_DEVICES="0,1,2,3" --start-epoch 1 \ --exp-dir conv_emformer_transducer_stateless/exp \ --full-libri 1 \ - --max-duration 300 \ + --max-duration 280 \ --master-port 12321 \ --num-encoder-layers 12 \ --chunk-length 32 \ diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/asr_datamodule.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/asr_datamodule.py new file mode 120000 index 000000000..104eeea5d --- /dev/null +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/asr_datamodule.py @@ -0,0 +1 @@ +../conv_emformer_transducer_stateless/asr_datamodule.py \ No newline at end of file diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/beam_search.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/beam_search.py new file mode 120000 index 000000000..8554e44cc --- /dev/null +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/beam_search.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/beam_search.py \ No newline at end of file diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/decode.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/decode.py new file mode 100755 index 000000000..402ec4293 --- /dev/null +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/decode.py @@ -0,0 +1,657 @@ +#!/usr/bin/env python3 +# +# Copyright 2021-2022 Xiaomi Corporation (Author: Fangjun Kuang, +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +(1) greedy search +./conv_emformer_transducer_stateless2/decode.py \ + --epoch 30 \ + --avg 10 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --max-duration 300 \ + --num-encoder-layers 12 \ + --chunk-length 32 \ + --cnn-module-kernel 31 \ + --left-context-length 32 \ + --right-context-length 8 \ + --memory-size 32 \ + --decoding-method greedy_search \ + --use-averaged-model True + +(2) modified beam search +./conv_emformer_transducer_stateless2/decode.py \ + --epoch 30 \ + --avg 10 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --max-duration 300 \ + --num-encoder-layers 12 \ + --chunk-length 32 \ + --cnn-module-kernel 31 \ + --left-context-length 32 \ + --right-context-length 8 \ + --memory-size 32 \ + --decoding-method modified_beam_search \ + --use-averaged-model True \ + --beam-size 4 + +(3) fast beam search +./conv_emformer_transducer_stateless2/decode.py \ + --epoch 30 \ + --avg 10 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --max-duration 300 \ + --num-encoder-layers 12 \ + --chunk-length 32 \ + --cnn-module-kernel 31 \ + --left-context-length 32 \ + --right-context-length 8 \ + --memory-size 32 \ + --decoding-method fast_beam_search \ + --use-averaged-model True \ + --beam 4 \ + --max-contexts 4 \ + --max-states 8 +""" + + +import argparse +import logging +import math +from collections import defaultdict +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import k2 +import sentencepiece as spm +import torch +import torch.nn as nn +from asr_datamodule import LibriSpeechAsrDataModule +from beam_search import ( + beam_search, + fast_beam_search_one_best, + greedy_search, + greedy_search_batch, + modified_beam_search, +) +from train import add_model_arguments, get_params, get_transducer_model + +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from icefall.utils import ( + AttributeDict, + setup_logger, + store_transcripts, + str2bool, + write_error_stats, +) + +LOG_EPS = math.log(1e-10) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=30, + help="""It specifies the checkpoint to use for decoding. + Note: Epoch counts from 1. + You can specify --avg to use more checkpoints for model averaging.""", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=10, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch' and '--iter'", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=True, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="pruned_transducer_stateless4/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - modified_beam_search + - fast_beam_search + """, + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="""An integer indicating how many candidates we will keep for each + frame. Used only when --decoding-method is beam_search or + modified_beam_search.""", + ) + + parser.add_argument( + "--beam", + type=float, + default=4, + help="""A floating point value to calculate the cutoff score during beam + search (i.e., `cutoff = max-score - beam`), which is the same as the + `beam` in Kaldi. + Used only when --decoding-method is fast_beam_search""", + ) + + parser.add_argument( + "--max-contexts", + type=int, + default=4, + help="""Used only when --decoding-method is + fast_beam_search""", + ) + + parser.add_argument( + "--max-states", + type=int, + default=8, + help="""Used only when --decoding-method is + fast_beam_search""", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=1, + help="""Maximum number of symbols per frame. + Used only when --decoding_method is greedy_search""", + ) + + add_model_arguments(parser) + + return parser + + +def decode_one_batch( + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + batch: dict, + decoding_graph: Optional[k2.Fsa] = None, +) -> Dict[str, List[List[str]]]: + """Decode one batch and return the result in a dict. The dict has the + following format: + + - key: It indicates the setting used for decoding. For example, + if greedy_search is used, it would be "greedy_search" + If beam search with a beam size of 7 is used, it would be + "beam_7" + - value: It contains the decoding result. `len(value)` equals to + batch size. `value[i]` is the decoding result for the i-th + utterance in the given batch. + Args: + params: + It's the return value of :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + batch: + It is the return value from iterating + `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation + for the format of the `batch`. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used + only when --decoding_method is fast_beam_search. + Returns: + Return the decoding result. See above description for the format of + the returned dict. + """ + device = next(model.parameters()).device + feature = batch["inputs"] + assert feature.ndim == 3 + + feature = feature.to(device) + # at entry, feature is (N, T, C) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + feature_lens += params.chunk_length + feature = torch.nn.functional.pad( + feature, + pad=(0, 0, 0, params.chunk_length), + value=LOG_EPS, + ) + + encoder_out, encoder_out_lens = model.encoder( + x=feature, x_lens=feature_lens + ) + hyps = [] + + if params.decoding_method == "fast_beam_search": + hyp_tokens = fast_beam_search_one_best( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif ( + params.decoding_method == "greedy_search" + and params.max_sym_per_frame == 1 + ): + hyp_tokens = greedy_search_batch( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif params.decoding_method == "modified_beam_search": + hyp_tokens = modified_beam_search( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam_size, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + else: + batch_size = encoder_out.size(0) + + for i in range(batch_size): + # fmt: off + encoder_out_i = encoder_out[i:i + 1, :encoder_out_lens[i]] + # fmt: on + if params.decoding_method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.decoding_method == "beam_search": + hyp = beam_search( + model=model, + encoder_out=encoder_out_i, + beam=params.beam_size, + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + hyps.append(sp.decode(hyp).split()) + + if params.decoding_method == "greedy_search": + return {"greedy_search": hyps} + elif params.decoding_method == "fast_beam_search": + return { + ( + f"beam_{params.beam}_" + f"max_contexts_{params.max_contexts}_" + f"max_states_{params.max_states}" + ): hyps + } + else: + return {f"beam_size_{params.beam_size}": hyps} + + +def decode_dataset( + dl: torch.utils.data.DataLoader, + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + decoding_graph: Optional[k2.Fsa] = None, +) -> Dict[str, List[Tuple[List[str], List[str]]]]: + """Decode dataset. + + Args: + dl: + PyTorch's dataloader containing the dataset to decode. + params: + It is returned by :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used + only when --decoding_method is fast_beam_search. + Returns: + Return a dict, whose key may be "greedy_search" if greedy search + is used, or it may be "beam_7" if beam size of 7 is used. + Its value is a list of tuples. Each tuple contains two elements: + The first is the reference transcript, and the second is the + predicted result. + """ + num_cuts = 0 + + try: + num_batches = len(dl) + except TypeError: + num_batches = "?" + + if params.decoding_method == "greedy_search": + log_interval = 100 + else: + log_interval = 2 + + results = defaultdict(list) + for batch_idx, batch in enumerate(dl): + texts = batch["supervisions"]["text"] + + hyps_dict = decode_one_batch( + params=params, + model=model, + sp=sp, + decoding_graph=decoding_graph, + batch=batch, + ) + + for name, hyps in hyps_dict.items(): + this_batch = [] + assert len(hyps) == len(texts) + for hyp_words, ref_text in zip(hyps, texts): + ref_words = ref_text.split() + this_batch.append((ref_words, hyp_words)) + + results[name].extend(this_batch) + + num_cuts += len(texts) + + if batch_idx % log_interval == 0: + batch_str = f"{batch_idx}/{num_batches}" + + logging.info( + f"batch {batch_str}, cuts processed until now is {num_cuts}" + ) + return results + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[List[int], List[int]]]], +): + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = ( + params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + ) + store_transcripts(filename=recog_path, texts=results) + logging.info(f"The transcripts are stored in {recog_path}") + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = ( + params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_filename, "w") as f: + wer = write_error_stats( + f, f"{test_set_name}-{key}", results, enable_log=True + ) + test_set_wers[key] = wer + + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = ( + params.res_dir + / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_info, "w") as f: + print("settings\tWER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, WER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + LibriSpeechAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + assert params.decoding_method in ( + "greedy_search", + "beam_search", + "fast_beam_search", + "modified_beam_search", + ) + params.res_dir = params.exp_dir / params.decoding_method + + if params.iter > 0: + params.suffix = f"iter-{params.iter}-avg-{params.avg}" + else: + params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + + if "fast_beam_search" in params.decoding_method: + params.suffix += f"-beam-{params.beam}" + params.suffix += f"-max-contexts-{params.max_contexts}" + params.suffix += f"-max-states-{params.max_states}" + elif "beam_search" in params.decoding_method: + params.suffix += ( + f"-{params.decoding_method}-beam-size-{params.beam_size}" + ) + else: + params.suffix += f"-context-{params.context_size}" + params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}" + + if params.use_averaged_model: + params.suffix += "-use-averaged-model" + + setup_logger(f"{params.res_dir}/log-decode-{params.suffix}") + logging.info("Decoding started") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # and is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.unk_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + else: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg + 1] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + else: + assert params.avg > 0 + start = params.epoch - params.avg + assert start >= 1 + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + + model.to(device) + model.eval() + + if params.decoding_method == "fast_beam_search": + decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device) + else: + decoding_graph = None + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + librispeech = LibriSpeechAsrDataModule(args) + + test_clean_cuts = librispeech.test_clean_cuts() + test_other_cuts = librispeech.test_other_cuts() + + test_clean_dl = librispeech.test_dataloaders(test_clean_cuts) + test_other_dl = librispeech.test_dataloaders(test_other_cuts) + + test_sets = ["test-clean", "test-other"] + test_dl = [test_clean_dl, test_other_dl] + + for test_set, test_dl in zip(test_sets, test_dl): + results_dict = decode_dataset( + dl=test_dl, + params=params, + model=model, + sp=sp, + decoding_graph=decoding_graph, + ) + + save_results( + params=params, + test_set_name=test_set, + results_dict=results_dict, + ) + + logging.info("Done!") + + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/decoder.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/decoder.py new file mode 120000 index 000000000..1db262df7 --- /dev/null +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/decoder.py @@ -0,0 +1 @@ +../conv_emformer_transducer_stateless/decoder.py \ No newline at end of file diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/emformer.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/emformer.py new file mode 100644 index 000000000..f16f5acc7 --- /dev/null +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/emformer.py @@ -0,0 +1,1841 @@ +# Copyright 2022 Xiaomi Corporation (Author: Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# It is modified based on +# 1) https://github.com/pytorch/audio/blob/main/torchaudio/models/emformer.py # noqa +# 2) https://github.com/pytorch/audio/blob/main/torchaudio/prototype/models/conv_emformer.py # noqa + +import math +from typing import List, Optional, Tuple + +import torch +import torch.nn as nn +from encoder_interface import EncoderInterface +from scaling import ( + ActivationBalancer, + BasicNorm, + DoubleSwish, + ScaledConv1d, + ScaledConv2d, + ScaledLinear, +) + +from icefall.utils import make_pad_mask + + +LOG_EPSILON = math.log(1e-10) + + +def unstack_states( + states: Tuple[List[List[torch.Tensor]], List[torch.Tensor]] +) -> List[Tuple[List[List[torch.Tensor]], List[torch.Tensor]]]: + """Unstack the emformer state corresponding to a batch of utterances + into a list of states, where the i-th entry is the state from the i-th + utterance in the batch. + + Args: + states: + A tuple of 2 elements. + ``states[0]`` is the attention caches of a batch of utterance. + ``states[1]`` is the convolution caches of a batch of utterance. + ``len(states[0])`` and ``len(states[1])`` both eqaul to number of layers. # noqa + + Returns: + A list of states. + ``states[i]`` is a tuple of 2 elements of i-th utterance. + ``states[i][0]`` is the attention caches of i-th utterance. + ``states[i][1]`` is the convolution caches of i-th utterance. + ``len(states[i][0])`` and ``len(states[i][1])`` both eqaul to number of layers. # noqa + """ + + attn_caches, conv_caches = states + batch_size = conv_caches[0].size(0) + num_layers = len(attn_caches) + + list_attn_caches = [None] * batch_size + for i in range(batch_size): + list_attn_caches[i] = [[] for _ in range(num_layers)] + for li, layer in enumerate(attn_caches): + for s in layer: + s_list = s.unbind(dim=1) + for bi, b in enumerate(list_attn_caches): + b[li].append(s_list[bi]) + + list_conv_caches = [None] * batch_size + for i in range(batch_size): + list_conv_caches[i] = [None] * num_layers + for li, layer in enumerate(conv_caches): + c_list = layer.unbind(dim=0) + for bi, b in enumerate(list_conv_caches): + b[li] = c_list[bi] + + ans = [None] * batch_size + for i in range(batch_size): + ans[i] = [list_attn_caches[i], list_conv_caches[i]] + + return ans + + +def stack_states( + state_list: List[Tuple[List[List[torch.Tensor]], List[torch.Tensor]]] +) -> Tuple[List[List[torch.Tensor]], List[torch.Tensor]]: + """Stack list of emformer states that correspond to separate utterances + into a single emformer state so that it can be used as an input for + emformer when those utterances are formed into a batch. + + Note: + It is the inverse of :func:`unstack_states`. + + Args: + state_list: + Each element in state_list corresponding to the internal state + of the emformer model for a single utterance. + ``states[i]`` is a tuple of 2 elements of i-th utterance. + ``states[i][0]`` is the attention caches of i-th utterance. + ``states[i][1]`` is the convolution caches of i-th utterance. + ``len(states[i][0])`` and ``len(states[i][1])`` both eqaul to number of layers. # noqa + + Returns: + A new state corresponding to a batch of utterances. + See the input argument of :func:`unstack_states` for the meaning + of the returned tensor. + """ + batch_size = len(state_list) + + attn_caches = [] + for layer in state_list[0][0]: + if batch_size > 1: + # Note: We will stack attn_caches[layer][s][] later to get attn_caches[layer][s] # noqa + attn_caches.append([[s] for s in layer]) + else: + attn_caches.append([s.unsqueeze(1) for s in layer]) + for b, states in enumerate(state_list[1:], 1): + for li, layer in enumerate(states[0]): + for si, s in enumerate(layer): + attn_caches[li][si].append(s) + if b == batch_size - 1: + attn_caches[li][si] = torch.stack( + attn_caches[li][si], dim=1 + ) + + conv_caches = [] + for layer in state_list[0][1]: + if batch_size > 1: + # Note: We will stack conv_caches[layer][] later to get conv_caches[layer] # noqa + conv_caches.append([layer]) + else: + conv_caches.append(layer.unsqueeze(0)) + for b, states in enumerate(state_list[1:], 1): + for li, layer in enumerate(states[1]): + conv_caches[li].append(layer) + if b == batch_size - 1: + conv_caches[li] = torch.stack(conv_caches[li], dim=0) + + return [attn_caches, conv_caches] + + +class ConvolutionModule(nn.Module): + """ConvolutionModule. + + Modified from https://github.com/pytorch/audio/blob/main/torchaudio/prototype/models/conv_emformer.py # noqa + + Args: + chunk_length (int): + Length of each chunk. + right_context_length (int): + Length of right context. + channels (int): + The number of input channels and output channels of conv layers. + kernel_size (int): + Kernerl size of conv layers. + bias (bool): + Whether to use bias in conv layers (default=True). + """ + + def __init__( + self, + chunk_length: int, + right_context_length: int, + channels: int, + kernel_size: int, + bias: bool = True, + ) -> None: + """Construct an ConvolutionModule object.""" + super().__init__() + # kernerl_size should be an odd number for 'SAME' padding + assert (kernel_size - 1) % 2 == 0, kernel_size + + self.chunk_length = chunk_length + self.right_context_length = right_context_length + self.channels = channels + + self.pointwise_conv1 = ScaledConv1d( + channels, + 2 * channels, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + ) + # After pointwise_conv1 we put x through a gated linear unit + # (nn.functional.glu). + # For most layers the normal rms value of channels of x seems to be in + # the range 1 to 4, but sometimes, for some reason, for layer 0 the rms + # ends up being very large, between 50 and 100 for different channels. + # This will cause very peaky and sparse derivatives for the sigmoid + # gating function, which will tend to make the loss function not learn + # effectively. (for most layers the average absolute values are in the + # range 0.5..9.0, and the average p(x>0), i.e. positive proportion, + # at the output of pointwise_conv1.output is around 0.35 to 0.45 for + # different layers, which likely breaks down as 0.5 for the "linear" + # half and 0.2 to 0.3 for the part that goes into the sigmoid. + # The idea is that if we constrain the rms values to a reasonable range + # via a constraint of max_abs=10.0, it will be in a better position to + # start learning something, i.e. to latch onto the correct range. + self.deriv_balancer1 = ActivationBalancer( + channel_dim=1, max_abs=10.0, min_positive=0.05, max_positive=1.0 + ) + + # make it causal by padding cached (kernel_size - 1) frames on the left + self.cache_size = kernel_size - 1 + self.depthwise_conv = ScaledConv1d( + channels, + channels, + kernel_size, + stride=1, + padding=0, + groups=channels, + bias=bias, + ) + + self.deriv_balancer2 = ActivationBalancer( + channel_dim=1, min_positive=0.05, max_positive=1.0 + ) + + self.activation = DoubleSwish() + + self.pointwise_conv2 = ScaledConv1d( + channels, + channels, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + initial_scale=0.25, + ) + + def _split_right_context( + self, + pad_utterance: torch.Tensor, + right_context: torch.Tensor, + ) -> torch.Tensor: + """ + Args: + pad_utterance: + Its shape is (cache_size + U, B, D). + right_context: + Its shape is (R, B, D). + + Returns: + Right context segments padding with corresponding context. + Its shape is (num_segs * B, D, cache_size + right_context_length). + """ + U_, B, D = pad_utterance.size() + R = right_context.size(0) + assert self.right_context_length != 0 + assert R % self.right_context_length == 0 + num_chunks = R // self.right_context_length + right_context = right_context.reshape( + num_chunks, self.right_context_length, B, D + ) + right_context = right_context.permute(0, 2, 1, 3).reshape( + num_chunks * B, self.right_context_length, D + ) + + intervals = torch.arange( + 0, self.chunk_length * (num_chunks - 1), self.chunk_length + ) + first = torch.arange( + self.chunk_length, self.chunk_length + self.cache_size + ) + indexes = intervals.unsqueeze(1) + first.unsqueeze(0) + indexes = torch.cat( + [indexes, torch.arange(U_ - self.cache_size, U_).unsqueeze(0)] + ) + padding = pad_utterance[indexes] # (num_chunks, cache_size, B, D) + padding = padding.permute(0, 2, 1, 3).reshape( + num_chunks * B, self.cache_size, D + ) + + pad_right_context = torch.cat([padding, right_context], dim=1) + # (num_chunks * B, cache_size + right_context_length, D) + return pad_right_context.permute(0, 2, 1) + + def _merge_right_context( + self, right_context: torch.Tensor, B: int + ) -> torch.Tensor: + """ + Args: + right_context: + Right context segments. + It shape is (num_segs * B, D, right_context_length). + B: + Batch size. + + Returns: + A tensor of shape (B, D, R), where + R = num_segs * right_context_length. + """ + right_context = right_context.reshape( + -1, B, self.channels, self.right_context_length + ) + right_context = right_context.permute(1, 2, 0, 3) + right_context = right_context.reshape(B, self.channels, -1) + return right_context + + def forward( + self, + utterance: torch.Tensor, + right_context: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Causal convolution module. + + Args: + utterance (torch.Tensor): + Utterance tensor of shape (U, B, D). + right_context (torch.Tensor): + Right context tensor of shape (R, B, D). + + Returns: + A tuple of 2 tensors: + - output utterance of shape (U, B, D). + - output right_context of shape (R, B, D). + """ + U, B, D = utterance.size() + R, _, _ = right_context.size() + + # point-wise conv and GLU mechanism + x = torch.cat([right_context, utterance], dim=0) # (R + U, B, D) + x = x.permute(1, 2, 0) # (B, D, R + U) + x = self.pointwise_conv1(x) # (B, 2 * D, R + U) + x = self.deriv_balancer1(x) + x = nn.functional.glu(x, dim=1) # (B, D, R + U) + utterance = x[:, :, R:] # (B, D, U) + right_context = x[:, :, :R] # (B, D, R) + + # make causal convolution + cache = torch.zeros( + B, D, self.cache_size, device=x.device, dtype=x.dtype + ) + pad_utterance = torch.cat( + [cache, utterance], dim=2 + ) # (B, D, cache + U) + + # depth-wise conv on utterance + utterance = self.depthwise_conv(pad_utterance) # (B, D, U) + + if self.right_context_length > 0: + # depth-wise conv on right_context + pad_right_context = self._split_right_context( + pad_utterance.permute(2, 0, 1), right_context.permute(2, 0, 1) + ) # (num_segs * B, D, cache_size + right_context_length) + right_context = self.depthwise_conv( + pad_right_context + ) # (num_segs * B, D, right_context_length) + right_context = self._merge_right_context( + right_context, B + ) # (B, D, R) + + x = torch.cat([right_context, utterance], dim=2) # (B, D, R + U) + x = self.deriv_balancer2(x) + x = self.activation(x) + + # point-wise conv + x = self.pointwise_conv2(x) # (B, D, R + U) + + right_context = x[:, :, :R] # (B, D, R) + utterance = x[:, :, R:] # (B, D, U) + return ( + utterance.permute(2, 0, 1), + right_context.permute(2, 0, 1), + ) + + @torch.jit.export + def infer( + self, + utterance: torch.Tensor, + right_context: torch.Tensor, + cache: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Causal convolution module applied on both utterance and right_context. + + Args: + utterance (torch.Tensor): + Utterance tensor of shape (U, B, D). + right_context (torch.Tensor): + Right context tensor of shape (R, B, D). + cache (torch.Tensor, optional): + Cached tensor for left padding of shape (B, D, cache_size). + + Returns: + A tuple of 3 tensors: + - output utterance of shape (U, B, D). + - output right_context of shape (R, B, D). + - updated cache tensor of shape (B, D, cache_size). + """ + U, B, D = utterance.size() + R, _, _ = right_context.size() + + # point-wise conv + x = torch.cat([utterance, right_context], dim=0) # (U + R, B, D) + x = x.permute(1, 2, 0) # (B, D, U + R) + x = self.pointwise_conv1(x) # (B, 2 * D, U + R) + x = self.deriv_balancer1(x) + x = nn.functional.glu(x, dim=1) # (B, D, U + R) + + # make causal convolution + assert cache.shape == (B, D, self.cache_size), cache.shape + x = torch.cat([cache, x], dim=2) # (B, D, cache_size + U + R) + # update cache + new_cache = x[:, :, -R - self.cache_size : -R] + + # 1-D depth-wise conv + x = self.depthwise_conv(x) # (B, D, U + R) + + x = self.deriv_balancer2(x) + x = self.activation(x) + + # point-wise conv + x = self.pointwise_conv2(x) # (B, D, U + R) + + utterance = x[:, :, :U] # (B, D, U) + right_context = x[:, :, U:] # (B, D, R) + return ( + utterance.permute(2, 0, 1), + right_context.permute(2, 0, 1), + new_cache, + ) + + +class EmformerAttention(nn.Module): + r"""Emformer layer attention module. + + Args: + embed_dim (int): + Embedding dimension. + nhead (int): + Number of attention heads in each Emformer layer. + dropout (float, optional): + Dropout probability. (Default: 0.0) + tanh_on_mem (bool, optional): + If ``True``, applies tanh to memory elements. (Default: ``False``) + negative_inf (float, optional): + Value to use for negative infinity in attention weights. (Default: -1e8) + """ + + def __init__( + self, + embed_dim: int, + nhead: int, + dropout: float = 0.0, + tanh_on_mem: bool = False, + negative_inf: float = -1e8, + ): + super().__init__() + + if embed_dim % nhead != 0: + raise ValueError( + f"embed_dim ({embed_dim}) is not a multiple of" + f"nhead ({nhead})." + ) + + self.embed_dim = embed_dim + self.nhead = nhead + self.tanh_on_mem = tanh_on_mem + self.negative_inf = negative_inf + self.head_dim = embed_dim // nhead + self.dropout = dropout + + self.emb_to_key_value = ScaledLinear( + embed_dim, 2 * embed_dim, bias=True + ) + self.emb_to_query = ScaledLinear(embed_dim, embed_dim, bias=True) + self.out_proj = ScaledLinear( + embed_dim, embed_dim, bias=True, initial_scale=0.25 + ) + + def _gen_attention_probs( + self, + attention_weights: torch.Tensor, + attention_mask: torch.Tensor, + padding_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + """Given the entire attention weights, mask out unecessary connections + and optionally with padding positions, to obtain underlying chunk-wise + attention probabilities. + + B: batch size; + Q: length of query; + KV: length of key and value. + + Args: + attention_weights (torch.Tensor): + Attention weights computed on the entire concatenated tensor + with shape (B * nhead, Q, KV). + attention_mask (torch.Tensor): + Mask tensor where chunk-wise connections are filled with `False`, + and other unnecessary connections are filled with `True`, + with shape (Q, KV). + padding_mask (torch.Tensor, optional): + Mask tensor where the padding positions are fill with `True`, + and other positions are filled with `False`, with shapa `(B, KV)`. + + Returns: + A tensor of shape (B * nhead, Q, KV). + """ + attention_weights_float = attention_weights.float() + attention_weights_float = attention_weights_float.masked_fill( + attention_mask.unsqueeze(0), self.negative_inf + ) + if padding_mask is not None: + Q = attention_weights.size(1) + B = attention_weights.size(0) // self.nhead + attention_weights_float = attention_weights_float.view( + B, self.nhead, Q, -1 + ) + attention_weights_float = attention_weights_float.masked_fill( + padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), + self.negative_inf, + ) + attention_weights_float = attention_weights_float.view( + B * self.nhead, Q, -1 + ) + + attention_probs = nn.functional.softmax( + attention_weights_float, dim=-1 + ).type_as(attention_weights) + + attention_probs = nn.functional.dropout( + attention_probs, p=self.dropout, training=self.training + ) + return attention_probs + + def _forward_impl( + self, + utterance: torch.Tensor, + right_context: torch.Tensor, + memory: torch.Tensor, + attention_mask: torch.Tensor, + padding_mask: Optional[torch.Tensor] = None, + left_context_key: Optional[torch.Tensor] = None, + left_context_val: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Underlying chunk-wise attention implementation.""" + U, B, _ = utterance.size() + R = right_context.size(0) + M = memory.size(0) + scaling = float(self.head_dim) ** -0.5 + + # compute query with [right_context, utterance]. + query = self.emb_to_query(torch.cat([right_context, utterance])) + # compute key and value with [memory, right_context, utterance]. + key, value = self.emb_to_key_value( + torch.cat([memory, right_context, utterance]) + ).chunk(chunks=2, dim=2) + + if left_context_key is not None and left_context_val is not None: + # now compute key and value with + # [memory, right context, left context, uttrance] + # this is used in inference mode + key = torch.cat([key[: M + R], left_context_key, key[M + R :]]) + value = torch.cat( + [value[: M + R], left_context_val, value[M + R :]] + ) + Q = query.size(0) + # KV = key.size(0) + + reshaped_query, reshaped_key, reshaped_value = [ + tensor.contiguous() + .view(-1, B * self.nhead, self.head_dim) + .transpose(0, 1) + for tensor in [query, key, value] + ] # (B * nhead, Q or KV, head_dim) + attention_weights = torch.bmm( + reshaped_query * scaling, reshaped_key.transpose(1, 2) + ) # (B * nhead, Q, KV) + + # compute attention probabilities + attention_probs = self._gen_attention_probs( + attention_weights, attention_mask, padding_mask + ) + + # compute attention outputs + attention = torch.bmm(attention_probs, reshaped_value) + assert attention.shape == (B * self.nhead, Q, self.head_dim) + attention = ( + attention.transpose(0, 1).contiguous().view(Q, B, self.embed_dim) + ) + + # apply output projection + output_right_context_utterance = self.out_proj(attention) + + return output_right_context_utterance, key, value + + def forward( + self, + utterance: torch.Tensor, + right_context: torch.Tensor, + memory: torch.Tensor, + attention_mask: torch.Tensor, + padding_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + # TODO: Modify docs. + """Forward pass for training and validation mode. + + B: batch size; + D: embedding dimension; + R: length of the hard-copied right contexts; + U: length of full utterance; + M: length of memory vectors. + + It computes a `big` attention matrix on full utterance and + then utilizes a pre-computed mask to simulate chunk-wise attention. + + It concatenates three blocks: hard-copied right contexts, + and full utterance, as a `big` block, + to compute the query tensor: + query = [right_context, utterance], + with length Q = R + U. + It concatenates the three blocks: memory vectors, + hard-copied right contexts, and full utterance as another `big` block, + to compute the key and value tensors: + key & value = [memory, right_context, utterance], + with length KV = M + R + U. + Attention scores is computed with above `big` query and key. + + Then the underlying chunk-wise attention is obtained by applying + the attention mask. Suppose + c_i: chunk at index i; + r_i: right context that c_i can use; + l_i: left context that c_i can use; + m_i: past memory vectors from previous layer that c_i can use; + The target chunk-wise attention is: + c_i, r_i (in query) -> l_i, c_i, r_i, m_i (in key) + + Args: + utterance (torch.Tensor): + Full utterance frames, with shape (U, B, D). + right_context (torch.Tensor): + Hard-copied right context frames, with shape (R, B, D), + where R = num_chunks * right_context_length + memory (torch.Tensor): + Memory elements, with shape (M, B, D), where M = num_chunks - 1. + It is an empty tensor without using memory. + attention_mask (torch.Tensor): + Pre-computed attention mask to simulate underlying chunk-wise + attention, with shape (Q, KV). + padding_mask (torch.Tensor): + Padding mask of key tensor, with shape (B, KV). + + Returns: + Output of right context and utterance, with shape (R + U, B, D). + """ + output_right_context_utterance, _, _ = self._forward_impl( + utterance, + right_context, + memory, + attention_mask, + padding_mask=padding_mask, + ) + return output_right_context_utterance + + @torch.jit.export + def infer( + self, + utterance: torch.Tensor, + right_context: torch.Tensor, + memory: torch.Tensor, + left_context_key: torch.Tensor, + left_context_val: torch.Tensor, + padding_mask: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Forward pass for inference. + + B: batch size; + D: embedding dimension; + R: length of right context; + U: length of utterance, i.e., current chunk; + L: length of cached left context; + M: length of cached memory vectors. + + It concatenates the right context and utterance (i.e., current chunk) + of current chunk, to compute the query tensor: + query = [right_context, utterance], + with length Q = R + U. + It concatenates the memory vectors, right context, left context, and + current chunk, to compute the key and value tensors: + key & value = [memory, right_context, left_context, utterance], + with length KV = M + R + L + U. + + The chunk-wise attention is: + chunk, right context (in query) -> + left context, chunk, right context, memory vectors (in key). + + Args: + utterance (torch.Tensor): + Current chunk frames, with shape (U, B, D), where U = chunk_length. + right_context (torch.Tensor): + Right context frames, with shape (R, B, D), + where R = right_context_length. + memory (torch.Tensor): + Memory vectors, with shape (M, B, D), or empty tensor. + left_context_key (torch,Tensor): + Cached attention key of left context from preceding computation, + with shape (L, B, D). + left_context_val (torch.Tensor): + Cached attention value of left context from preceding computation, + with shape (L, B, D). + padding_mask (torch.Tensor): + Padding mask of key tensor, with shape (B, KV). + + Returns: + A tuple containing 4 tensors: + - output of right context and utterance, with shape (R + U, B, D). + - attention key of left context and utterance, which would be cached + for next computation, with shape (L + U, B, D). + - attention value of left context and utterance, which would be + cached for next computation, with shape (L + U, B, D). + """ + U = utterance.size(0) + R = right_context.size(0) + L = left_context_key.size(0) + M = memory.size(0) + + # query = [right context, utterance] + Q = R + U + # key, value = [memory, right context, left context, uttrance] + KV = M + R + L + U + attention_mask = torch.zeros(Q, KV).to( + dtype=torch.bool, device=utterance.device + ) + + output_right_context_utterance, key, value = self._forward_impl( + utterance, + right_context, + memory, + attention_mask, + padding_mask=padding_mask, + left_context_key=left_context_key, + left_context_val=left_context_val, + ) + return ( + output_right_context_utterance, + key[M + R :], + value[M + R :], + ) + + +class EmformerEncoderLayer(nn.Module): + """Emformer layer that constitutes Emformer. + + Args: + d_model (int): + Input dimension. + nhead (int): + Number of attention heads. + dim_feedforward (int): + Hidden layer dimension of feedforward network. + chunk_length (int): + Length of each input segment. + dropout (float, optional): + Dropout probability. (Default: 0.0) + layer_dropout (float, optional): + Layer dropout probability. (Default: 0.0) + cnn_module_kernel (int): + Kernel size of convolution module. + left_context_length (int, optional): + Length of left context. (Default: 0) + right_context_length (int, optional): + Length of right context. (Default: 0) + memory_size (int, optional): + Number of memory elements to use. (Default: 0) + tanh_on_mem (bool, optional): + If ``True``, applies tanh to memory elements. (Default: ``False``) + negative_inf (float, optional): + Value to use for negative infinity in attention weights. (Default: -1e8) + """ + + def __init__( + self, + d_model: int, + nhead: int, + dim_feedforward: int, + chunk_length: int, + dropout: float = 0.1, + layer_dropout: float = 0.075, + cnn_module_kernel: int = 31, + left_context_length: int = 0, + right_context_length: int = 0, + memory_size: int = 0, + tanh_on_mem: bool = False, + negative_inf: float = -1e8, + ): + super().__init__() + + self.attention = EmformerAttention( + embed_dim=d_model, + nhead=nhead, + dropout=dropout, + tanh_on_mem=tanh_on_mem, + negative_inf=negative_inf, + ) + self.summary_op = nn.AvgPool1d( + kernel_size=chunk_length, stride=chunk_length, ceil_mode=True + ) + + self.feed_forward_macaron = nn.Sequential( + ScaledLinear(d_model, dim_feedforward), + ActivationBalancer(channel_dim=-1), + DoubleSwish(), + nn.Dropout(dropout), + ScaledLinear(dim_feedforward, d_model, initial_scale=0.25), + ) + + self.feed_forward = nn.Sequential( + ScaledLinear(d_model, dim_feedforward), + ActivationBalancer(channel_dim=-1), + DoubleSwish(), + nn.Dropout(dropout), + ScaledLinear(dim_feedforward, d_model, initial_scale=0.25), + ) + + self.conv_module = ConvolutionModule( + chunk_length, + right_context_length, + d_model, + cnn_module_kernel, + ) + + self.norm_final = BasicNorm(d_model) + + # try to ensure the output is close to zero-mean + # (or at least, zero-median). + self.balancer = ActivationBalancer( + channel_dim=-1, min_positive=0.45, max_positive=0.55, max_abs=6.0 + ) + + self.dropout = nn.Dropout(dropout) + + self.layer_dropout = layer_dropout + self.left_context_length = left_context_length + self.chunk_length = chunk_length + self.memory_size = memory_size + self.d_model = d_model + self.use_memory = memory_size > 0 + + def _update_attn_cache( + self, + next_key: torch.Tensor, + next_val: torch.Tensor, + memory: torch.Tensor, + attn_cache: List[torch.Tensor], + ) -> List[torch.Tensor]: + """Update cached attention state: + 1) output memory of current chunk in the lower layer; + 2) attention key and value in current chunk's computation, which would + be resued in next chunk's computation. + """ + new_memory = torch.cat([attn_cache[0], memory]) + new_key = torch.cat([attn_cache[1], next_key]) + new_val = torch.cat([attn_cache[2], next_val]) + attn_cache[0] = new_memory[new_memory.size(0) - self.memory_size :] + attn_cache[1] = new_key[new_key.size(0) - self.left_context_length :] + attn_cache[2] = new_val[new_val.size(0) - self.left_context_length :] + return attn_cache + + def _apply_conv_module_forward( + self, + right_context_utterance: torch.Tensor, + R: int, + ) -> torch.Tensor: + """Apply convolution module in training and validation mode.""" + utterance = right_context_utterance[R:] + right_context = right_context_utterance[:R] + utterance, right_context = self.conv_module(utterance, right_context) + right_context_utterance = torch.cat([right_context, utterance]) + return right_context_utterance + + def _apply_conv_module_infer( + self, + right_context_utterance: torch.Tensor, + R: int, + conv_cache: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Apply convolution module on utterance in inference mode.""" + utterance = right_context_utterance[R:] + right_context = right_context_utterance[:R] + utterance, right_context, conv_cache = self.conv_module.infer( + utterance, right_context, conv_cache + ) + right_context_utterance = torch.cat([right_context, utterance]) + return right_context_utterance, conv_cache + + def _apply_attention_module_forward( + self, + right_context_utterance: torch.Tensor, + R: int, + attention_mask: torch.Tensor, + padding_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + """Apply attention module in training and validation mode.""" + utterance = right_context_utterance[R:] + right_context = right_context_utterance[:R] + + if self.use_memory: + memory = self.summary_op(utterance.permute(1, 2, 0)).permute( + 2, 0, 1 + )[:-1, :, :] + else: + memory = torch.empty(0).to( + dtype=utterance.dtype, device=utterance.device + ) + output_right_context_utterance = self.attention( + utterance=utterance, + right_context=right_context, + memory=memory, + attention_mask=attention_mask, + padding_mask=padding_mask, + ) + + return output_right_context_utterance + + def _apply_attention_module_infer( + self, + right_context_utterance: torch.Tensor, + R: int, + attn_cache: List[torch.Tensor], + padding_mask: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, List[torch.Tensor]]: + """Apply attention module in inference mode. + 1) Unpack cached states including: + - memory from previous chunks; + - attention key and value of left context from preceding + chunk's compuation; + 2) Apply attention computation; + 3) Update cached attention states including: + - memory of current chunk; + - attention key and value in current chunk's computation, which would + be resued in next chunk's computation. + """ + utterance = right_context_utterance[R:] + right_context = right_context_utterance[:R] + + pre_memory = attn_cache[0] + left_context_key = attn_cache[1] + left_context_val = attn_cache[2] + + if self.use_memory: + memory = self.summary_op(utterance.permute(1, 2, 0)).permute( + 2, 0, 1 + )[:1, :, :] + else: + memory = torch.empty(0).to( + dtype=utterance.dtype, device=utterance.device + ) + ( + output_right_context_utterance, + next_key, + next_val, + ) = self.attention.infer( + utterance=utterance, + right_context=right_context, + memory=pre_memory, + left_context_key=left_context_key, + left_context_val=left_context_val, + padding_mask=padding_mask, + ) + attn_cache = self._update_attn_cache( + next_key, next_val, memory, attn_cache + ) + return output_right_context_utterance, attn_cache + + def forward( + self, + utterance: torch.Tensor, + right_context: torch.Tensor, + attention_mask: torch.Tensor, + padding_mask: Optional[torch.Tensor] = None, + warmup: float = 1.0, + ) -> Tuple[torch.Tensor, torch.Tensor]: + r"""Forward pass for training and validation mode. + + B: batch size; + D: embedding dimension; + R: length of hard-copied right contexts; + U: length of full utterance; + M: length of memory vectors. + + Args: + utterance (torch.Tensor): + Utterance frames, with shape (U, B, D). + right_context (torch.Tensor): + Right context frames, with shape (R, B, D). + attention_mask (torch.Tensor): + Attention mask for underlying attention module, + with shape (Q, KV), where Q = R + U, KV = M + R + U. + padding_mask (torch.Tensor): + Padding mask of ker tensor, with shape (B, KV). + + Returns: + A tuple containing 2 tensors: + - output utterance, with shape (U, B, D). + - output right context, with shape (R, B, D). + """ + R = right_context.size(0) + src = torch.cat([right_context, utterance]) + src_orig = src + + warmup_scale = min(0.1 + warmup, 1.0) + # alpha = 1.0 means fully use this encoder layer, 0.0 would mean + # completely bypass it. + if self.training: + alpha = ( + warmup_scale + if torch.rand(()).item() <= (1.0 - self.layer_dropout) + else 0.1 + ) + else: + alpha = 1.0 + + # macaron style feed forward module + src = src + self.dropout(self.feed_forward_macaron(src)) + + # emformer attention module + src_att = self._apply_attention_module_forward( + src, R, attention_mask, padding_mask=padding_mask + ) + src = src + self.dropout(src_att) + + # convolution module + src_conv = self._apply_conv_module_forward(src, R) + src = src + self.dropout(src_conv) + + # feed forward module + src = src + self.dropout(self.feed_forward(src)) + + src = self.norm_final(self.balancer(src)) + + if alpha != 1.0: + src = alpha * src + (1 - alpha) * src_orig + + output_utterance = src[R:] + output_right_context = src[:R] + return output_utterance, output_right_context + + @torch.jit.export + def infer( + self, + utterance: torch.Tensor, + right_context: torch.Tensor, + attn_cache: List[torch.Tensor], + conv_cache: torch.Tensor, + padding_mask: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor], torch.Tensor]: + """Forward pass for inference. + + B: batch size; + D: embedding dimension; + R: length of right_context; + U: length of utterance; + M: length of memory. + + Args: + utterance (torch.Tensor): + Utterance frames, with shape (U, B, D). + right_context (torch.Tensor): + Right context frames, with shape (R, B, D). + attn_cache (List[torch.Tensor]): + Cached attention tensors generated in preceding computation, + including memory, key and value of left context. + conv_cache (torch.Tensor, optional): + Cache tensor of left context for causal convolution. + padding_mask (torch.Tensor): + Padding mask of ker tensor. + + Returns: + (Tensor, Tensor, List[torch.Tensor], Tensor): + - output utterance, with shape (U, B, D); + - output right_context, with shape (R, B, D); + - output attention cache; + - output convolution cache. + """ + R = right_context.size(0) + src = torch.cat([right_context, utterance]) + + # macaron style feed forward module + src = src + self.dropout(self.feed_forward_macaron(src)) + + # emformer attention module + src_att, attn_cache = self._apply_attention_module_infer( + src, R, attn_cache, padding_mask=padding_mask + ) + src = src + self.dropout(src_att) + + # convolution module + src_conv, conv_cache = self._apply_conv_module_infer(src, R, conv_cache) + src = src + self.dropout(src_conv) + + # feed forward module + src = src + self.dropout(self.feed_forward(src)) + + src = self.norm_final(self.balancer(src)) + + output_utterance = src[R:] + output_right_context = src[:R] + return ( + output_utterance, + output_right_context, + attn_cache, + conv_cache, + ) + + +def _gen_attention_mask_block( + col_widths: List[int], + col_mask: List[bool], + num_rows: int, + device: torch.device, +) -> torch.Tensor: + assert len(col_widths) == len( + col_mask + ), "Length of col_widths must match that of col_mask" + + mask_block = [ + torch.ones(num_rows, col_width, device=device) + if is_ones_col + else torch.zeros(num_rows, col_width, device=device) + for col_width, is_ones_col in zip(col_widths, col_mask) + ] + return torch.cat(mask_block, dim=1) + + +class EmformerEncoder(nn.Module): + """Implements the Emformer architecture introduced in + *Emformer: Efficient Memory Transformer Based Acoustic Model for Low Latency + Streaming Speech Recognition* + [:footcite:`shi2021emformer`]. + + In this model, the memory bank computation is simplifed, using the averaged + value of each chunk as its memory vector. + + Args: + d_model (int): + Input dimension. + nhead (int): + Number of attention heads in each emformer layer. + dim_feedforward (int): + Hidden layer dimension of each emformer layer's feedforward network. + num_encoder_layers (int): + Number of emformer layers to instantiate. + chunk_length (int): + Length of each input segment. + dropout (float, optional): + Dropout probability. (default: 0.0) + layer_dropout (float, optional): + Layer dropout probability. (default: 0.0) + cnn_module_kernel (int): + Kernel size of convolution module. + left_context_length (int, optional): + Length of left context. (default: 0) + right_context_length (int, optional): + Length of right context. (default: 0) + memory_size (int, optional): + Number of memory elements to use. (default: 0) + tanh_on_mem (bool, optional): + If ``true``, applies tanh to memory elements. (default: ``false``) + negative_inf (float, optional): + Value to use for negative infinity in attention weights. (default: -1e8) + """ + + def __init__( + self, + chunk_length: int, + d_model: int = 256, + nhead: int = 4, + dim_feedforward: int = 2048, + num_encoder_layers: int = 12, + dropout: float = 0.1, + layer_dropout: float = 0.075, + cnn_module_kernel: int = 31, + left_context_length: int = 0, + right_context_length: int = 0, + memory_size: int = 0, + tanh_on_mem: bool = False, + negative_inf: float = -1e8, + ): + super().__init__() + + assert ( + chunk_length - 1 + ) & chunk_length == 0, "chunk_length should be a power of 2." + self.shift = int(math.log(chunk_length, 2)) + + self.use_memory = memory_size > 0 + + self.emformer_layers = nn.ModuleList( + [ + EmformerEncoderLayer( + d_model=d_model, + nhead=nhead, + dim_feedforward=dim_feedforward, + chunk_length=chunk_length, + dropout=dropout, + layer_dropout=layer_dropout, + cnn_module_kernel=cnn_module_kernel, + left_context_length=left_context_length, + right_context_length=right_context_length, + memory_size=memory_size, + tanh_on_mem=tanh_on_mem, + negative_inf=negative_inf, + ) + for layer_idx in range(num_encoder_layers) + ] + ) + + self.num_encoder_layers = num_encoder_layers + self.d_model = d_model + self.left_context_length = left_context_length + self.right_context_length = right_context_length + self.chunk_length = chunk_length + self.memory_size = memory_size + self.cnn_module_kernel = cnn_module_kernel + + def _gen_right_context(self, x: torch.Tensor) -> torch.Tensor: + """Hard copy each chunk's right context and concat them.""" + T = x.shape[0] + num_chunks = math.ceil( + (T - self.right_context_length) / self.chunk_length + ) + # first (num_chunks - 1) right context block + intervals = torch.arange( + 0, self.chunk_length * (num_chunks - 1), self.chunk_length + ) + first = torch.arange( + self.chunk_length, self.chunk_length + self.right_context_length + ) + indexes = intervals.unsqueeze(1) + first.unsqueeze(0) + # cat last right context block + indexes = torch.cat( + [ + indexes, + torch.arange(T - self.right_context_length, T).unsqueeze(0), + ] + ) + right_context_blocks = x[indexes.reshape(-1)] + return right_context_blocks + + def _gen_attention_mask_col_widths( + self, chunk_idx: int, U: int + ) -> List[int]: + """Calculate column widths (key, value) in attention mask for the + chunk_idx chunk.""" + num_chunks = math.ceil(U / self.chunk_length) + rc = self.right_context_length + lc = self.left_context_length + rc_start = chunk_idx * rc + rc_end = rc_start + rc + chunk_start = max(chunk_idx * self.chunk_length - lc, 0) + chunk_end = min((chunk_idx + 1) * self.chunk_length, U) + R = rc * num_chunks + + if self.use_memory: + m_start = max(chunk_idx - self.memory_size, 0) + M = num_chunks - 1 + col_widths = [ + m_start, # before memory + chunk_idx - m_start, # memory + M - chunk_idx, # after memory + rc_start, # before right context + rc, # right context + R - rc_end, # after right context + chunk_start, # before chunk + chunk_end - chunk_start, # chunk + U - chunk_end, # after chunk + ] + else: + col_widths = [ + rc_start, # before right context + rc, # right context + R - rc_end, # after right context + chunk_start, # before chunk + chunk_end - chunk_start, # chunk + U - chunk_end, # after chunk + ] + + return col_widths + + def _gen_attention_mask(self, utterance: torch.Tensor) -> torch.Tensor: + """Generate attention mask to simulate underlying chunk-wise attention + computation, where chunk-wise connections are filled with `False`, + and other unnecessary connections beyond chunk are filled with `True`. + + R: length of hard-copied right contexts; + U: length of full utterance; + M: length of memory vectors; + Q: length of attention query; + KV: length of attention key and value. + + The shape of attention mask is (Q, KV). + If self.use_memory is `True`: + query = [right_context, utterance]; + key, value = [memory, right_context, utterance]; + Q = R + U, KV = M + R + U. + Otherwise: + query = [right_context, utterance] + key, value = [right_context, utterance] + Q = R + U, KV = R + U. + + Suppose: + c_i: chunk at index i; + r_i: right context that c_i can use; + l_i: left context that c_i can use; + m_i: past memory vectors from previous layer that c_i can use; + The target chunk-wise attention is: + c_i, r_i (in query) -> l_i, c_i, r_i, m_i (in key). + """ + U = utterance.size(0) + num_chunks = math.ceil(U / self.chunk_length) + + right_context_mask = [] + utterance_mask = [] + + if self.use_memory: + num_cols = 9 + # right context and utterance both attend to memory, right context, + # utterance + right_context_utterance_cols_mask = [ + idx in [1, 4, 7] for idx in range(num_cols) + ] + else: + num_cols = 6 + # right context and utterance both attend to right context and + # utterance + right_context_utterance_cols_mask = [ + idx in [1, 4] for idx in range(num_cols) + ] + masks_to_concat = [right_context_mask, utterance_mask] + + for chunk_idx in range(num_chunks): + col_widths = self._gen_attention_mask_col_widths(chunk_idx, U) + + right_context_mask_block = _gen_attention_mask_block( + col_widths, + right_context_utterance_cols_mask, + self.right_context_length, + utterance.device, + ) + right_context_mask.append(right_context_mask_block) + + utterance_mask_block = _gen_attention_mask_block( + col_widths, + right_context_utterance_cols_mask, + min( + self.chunk_length, + U - chunk_idx * self.chunk_length, + ), + utterance.device, + ) + utterance_mask.append(utterance_mask_block) + + attention_mask = ( + 1 - torch.cat([torch.cat(mask) for mask in masks_to_concat]) + ).to(torch.bool) + return attention_mask + + def forward( + self, x: torch.Tensor, lengths: torch.Tensor, warmup: float = 1.0 + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Forward pass for training and validation mode. + + B: batch size; + D: input dimension; + U: length of utterance. + + Args: + x (torch.Tensor): + Utterance frames right-padded with right context frames, + with shape (U + right_context_length, B, D). + lengths (torch.Tensor): + With shape (B,) and i-th element representing number of valid + utterance frames for i-th batch element in x, which contains the + right_context at the end. + + Returns: + A tuple of 2 tensors: + - output utterance frames, with shape (U, B, D). + - output_lengths, with shape (B,), without containing the + right_context at the end. + """ + U = x.size(0) - self.right_context_length + + right_context = self._gen_right_context(x) + utterance = x[:U] + output_lengths = torch.clamp(lengths - self.right_context_length, min=0) + attention_mask = self._gen_attention_mask(utterance) + + M = ( + right_context.size(0) // self.right_context_length - 1 + if self.use_memory + else 0 + ) + padding_mask = make_pad_mask(M + right_context.size(0) + output_lengths) + + output = utterance + for layer in self.emformer_layers: + output, right_context = layer( + output, + right_context, + attention_mask, + padding_mask=padding_mask, + warmup=warmup, + ) + + return output, output_lengths + + @torch.jit.export + def infer( + self, + x: torch.Tensor, + lengths: torch.Tensor, + num_processed_frames: torch.Tensor, + states: Tuple[List[List[torch.Tensor]], List[torch.Tensor]], + ) -> Tuple[ + torch.Tensor, + torch.Tensor, + Tuple[List[List[torch.Tensor]], List[torch.Tensor]], + ]: + """Forward pass for streaming inference. + + B: batch size; + D: input dimension; + U: length of utterance. + + Args: + x (torch.Tensor): + Utterance frames right-padded with right context frames, + with shape (U + right_context_length, B, D). + lengths (torch.Tensor): + With shape (B,) and i-th element representing number of valid + utterance frames for i-th batch element in x, which contains the + right_context at the end. + states (List[torch.Tensor, List[List[torch.Tensor]], List[torch.Tensor]]: # noqa + Cached states containing: + - attn_caches: attention states from preceding chunk's computation, + where each element corresponds to each emformer layer + - conv_caches: left context for causal convolution, where each + element corresponds to each layer. + + Returns: + (Tensor, Tensor, List[List[torch.Tensor]], List[torch.Tensor]): + - output utterance frames, with shape (U, B, D). + - output lengths, with shape (B,), without containing the + right_context at the end. + - updated states from current chunk's computation. + """ + assert num_processed_frames.shape == (x.size(1),) + + attn_caches = states[0] + assert len(attn_caches) == self.num_encoder_layers, len(attn_caches) + for i in range(len(attn_caches)): + assert attn_caches[i][0].shape == ( + self.memory_size, + x.size(1), + self.d_model, + ), attn_caches[i][0].shape + assert attn_caches[i][1].shape == ( + self.left_context_length, + x.size(1), + self.d_model, + ), attn_caches[i][1].shape + assert attn_caches[i][2].shape == ( + self.left_context_length, + x.size(1), + self.d_model, + ), attn_caches[i][2].shape + + conv_caches = states[1] + assert len(conv_caches) == self.num_encoder_layers, len(conv_caches) + for i in range(len(conv_caches)): + assert conv_caches[i].shape == ( + x.size(1), + self.d_model, + self.cnn_module_kernel - 1, + ), conv_caches[i].shape + + right_context = x[-self.right_context_length :] + utterance = x[: -self.right_context_length] + output_lengths = torch.clamp(lengths - self.right_context_length, min=0) + + # calcualte padding mask to mask out initial zero caches + chunk_mask = make_pad_mask(output_lengths).to(x.device) + memory_mask = ( + ( + (num_processed_frames >> self.shift).view(x.size(1), 1) + <= torch.arange(self.memory_size, device=x.device).expand( + x.size(1), self.memory_size + ) + ).flip(1) + if self.use_memory + else torch.empty(0).to(dtype=torch.bool, device=x.device) + ) + left_context_mask = ( + num_processed_frames.view(x.size(1), 1) + <= torch.arange(self.left_context_length, device=x.device).expand( + x.size(1), self.left_context_length + ) + ).flip(1) + right_context_mask = torch.zeros( + x.size(1), + self.right_context_length, + dtype=torch.bool, + device=x.device, + ) + padding_mask = torch.cat( + [memory_mask, right_context_mask, left_context_mask, chunk_mask], + dim=1, + ) + + output = utterance + output_attn_caches: List[List[torch.Tensor]] = [] + output_conv_caches: List[torch.Tensor] = [] + for layer_idx, layer in enumerate(self.emformer_layers): + ( + output, + right_context, + output_attn_cache, + output_conv_cache, + ) = layer.infer( + output, + right_context, + padding_mask=padding_mask, + attn_cache=attn_caches[layer_idx], + conv_cache=conv_caches[layer_idx], + ) + output_attn_caches.append(output_attn_cache) + output_conv_caches.append(output_conv_cache) + + output_states: Tuple[List[List[torch.Tensor]], List[torch.Tensor]] = ( + output_attn_caches, + output_conv_caches, + ) + return output, output_lengths, output_states + + @torch.jit.export + def init_states(self, device: torch.device = torch.device("cpu")): + """Create initial states.""" + attn_caches = [ + [ + torch.zeros(self.memory_size, self.d_model, device=device), + torch.zeros( + self.left_context_length, self.d_model, device=device + ), + torch.zeros( + self.left_context_length, self.d_model, device=device + ), + ] + for _ in range(self.num_encoder_layers) + ] + conv_caches = [ + torch.zeros(self.d_model, self.cnn_module_kernel - 1, device=device) + for _ in range(self.num_encoder_layers) + ] + states: Tuple[List[List[torch.Tensor]], List[torch.Tensor]] = ( + attn_caches, + conv_caches, + ) + return states + + +class Emformer(EncoderInterface): + def __init__( + self, + num_features: int, + chunk_length: int, + subsampling_factor: int = 4, + d_model: int = 256, + nhead: int = 4, + dim_feedforward: int = 2048, + num_encoder_layers: int = 12, + dropout: float = 0.1, + layer_dropout: float = 0.075, + cnn_module_kernel: int = 3, + left_context_length: int = 0, + right_context_length: int = 0, + memory_size: int = 0, + tanh_on_mem: bool = False, + negative_inf: float = -1e8, + ): + super().__init__() + + self.subsampling_factor = subsampling_factor + self.right_context_length = right_context_length + self.chunk_length = chunk_length + if subsampling_factor != 4: + raise NotImplementedError("Support only 'subsampling_factor=4'.") + if chunk_length % subsampling_factor != 0: + raise NotImplementedError( + "chunk_length must be a mutiple of subsampling_factor." + ) + if ( + left_context_length != 0 + and left_context_length % subsampling_factor != 0 + ): + raise NotImplementedError( + "left_context_length must be 0 or a mutiple of subsampling_factor." # noqa + ) + if ( + right_context_length != 0 + and right_context_length % subsampling_factor != 0 + ): + raise NotImplementedError( + "right_context_length must be 0 or a mutiple of subsampling_factor." # noqa + ) + + # self.encoder_embed converts the input of shape (N, T, num_features) + # to the shape (N, T//subsampling_factor, d_model). + # That is, it does two things simultaneously: + # (1) subsampling: T -> T//subsampling_factor + # (2) embedding: num_features -> d_model + self.encoder_embed = Conv2dSubsampling(num_features, d_model) + + self.encoder = EmformerEncoder( + chunk_length=chunk_length // subsampling_factor, + d_model=d_model, + nhead=nhead, + dim_feedforward=dim_feedforward, + num_encoder_layers=num_encoder_layers, + dropout=dropout, + layer_dropout=layer_dropout, + cnn_module_kernel=cnn_module_kernel, + left_context_length=left_context_length // subsampling_factor, + right_context_length=right_context_length // subsampling_factor, + memory_size=memory_size, + tanh_on_mem=tanh_on_mem, + negative_inf=negative_inf, + ) + + def forward( + self, x: torch.Tensor, x_lens: torch.Tensor, warmup: float = 1.0 + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Forward pass for training and non-streaming inference. + + B: batch size; + D: feature dimension; + T: length of utterance. + + Args: + x (torch.Tensor): + Utterance frames right-padded with right context frames, + with shape (B, T, D). + x_lens (torch.Tensor): + With shape (B,) and i-th element representing number of valid + utterance frames for i-th batch element in x, containing the + right_context at the end. + warmup: + A floating point value that gradually increases from 0 throughout + training; when it is >= 1.0 we are "fully warmed up". It is used + to turn modules on sequentially. + + Returns: + (Tensor, Tensor): + - output embedding, with shape (B, T', D), where + T' = ((T - 1) // 2 - 1) // 2 - self.right_context_length // 4. + - output lengths, with shape (B,), without containing the + right_context at the end. + """ + x = self.encoder_embed(x) + x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C) + + x_lens = (((x_lens - 1) >> 1) - 1) >> 1 + assert x.size(0) == x_lens.max().item() + + output, output_lengths = self.encoder( + x, x_lens, warmup=warmup + ) # (T, N, C) + + output = output.permute(1, 0, 2) # (T, N, C) -> (N, T, C) + + return output, output_lengths + + @torch.jit.export + def infer( + self, + x: torch.Tensor, + x_lens: torch.Tensor, + num_processed_frames: torch.Tensor, + states: Tuple[List[List[torch.Tensor]], List[torch.Tensor]], + ) -> Tuple[ + torch.Tensor, + torch.Tensor, + Tuple[List[List[torch.Tensor]], List[torch.Tensor]], + ]: + """Forward pass for streaming inference. + + B: batch size; + D: feature dimension; + T: length of utterance. + + Args: + x (torch.Tensor): + Utterance frames right-padded with right context frames, + with shape (B, T, D). + lengths (torch.Tensor): + With shape (B,) and i-th element representing number of valid + utterance frames for i-th batch element in x, containing the + right_context at the end. + states (List[torch.Tensor, List[List[torch.Tensor]], List[torch.Tensor]]: # noqa + Cached states containing: + - past_lens: number of past frames for each sample in batch + - attn_caches: attention states from preceding chunk's computation, + where each element corresponds to each emformer layer + - conv_caches: left context for causal convolution, where each + element corresponds to each layer. + Returns: + (Tensor, Tensor): + - output embedding, with shape (B, T', D), where + T' = ((T - 1) // 2 - 1) // 2 - self.right_context_length // 4. + - output lengths, with shape (B,), without containing the + right_context at the end. + - updated states from current chunk's computation. + """ + x = self.encoder_embed(x) + # drop the first and last frames + x = x[:, 1:-1, :] + x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C) + + # Caution: We assume the subsampling factor is 4! + x_lens = (((x_lens - 1) >> 1) - 1) >> 1 + x_lens -= 2 + assert x.size(0) == x_lens.max().item() + + num_processed_frames = num_processed_frames >> 2 + + output, output_lengths, output_states = self.encoder.infer( + x, x_lens, num_processed_frames, states + ) + + output = output.permute(1, 0, 2) # (T, N, C) -> (N, T, C) + + return output, output_lengths, output_states + + @torch.jit.export + def init_states(self, device: torch.device = torch.device("cpu")): + """Create initial states.""" + return self.encoder.init_states(device) + + +class Conv2dSubsampling(nn.Module): + """Convolutional 2D subsampling (to 1/4 length). + + Convert an input of shape (N, T, idim) to an output + with shape (N, T', odim), where + T' = ((T-1)//2 - 1)//2, which approximates T' == T//4 + + It is based on + https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/subsampling.py # noqa + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + layer1_channels: int = 8, + layer2_channels: int = 32, + layer3_channels: int = 128, + ) -> None: + """ + Args: + in_channels: + Number of channels in. The input shape is (N, T, in_channels). + Caution: It requires: T >=7, in_channels >=7 + out_channels + Output dim. The output shape is (N, ((T-1)//2 - 1)//2, out_channels) + layer1_channels: + Number of channels in layer1 + layer1_channels: + Number of channels in layer2 + """ + assert in_channels >= 7 + super().__init__() + + self.conv = nn.Sequential( + ScaledConv2d( + in_channels=1, + out_channels=layer1_channels, + kernel_size=3, + padding=1, + ), + ActivationBalancer(channel_dim=1), + DoubleSwish(), + ScaledConv2d( + in_channels=layer1_channels, + out_channels=layer2_channels, + kernel_size=3, + stride=2, + ), + ActivationBalancer(channel_dim=1), + DoubleSwish(), + ScaledConv2d( + in_channels=layer2_channels, + out_channels=layer3_channels, + kernel_size=3, + stride=2, + ), + ActivationBalancer(channel_dim=1), + DoubleSwish(), + ) + self.out = ScaledLinear( + layer3_channels * (((in_channels - 1) // 2 - 1) // 2), out_channels + ) + # set learn_eps=False because out_norm is preceded by `out`, and `out` + # itself has learned scale, so the extra degree of freedom is not + # needed. + self.out_norm = BasicNorm(out_channels, learn_eps=False) + # constrain median of output to be close to zero. + self.out_balancer = ActivationBalancer( + channel_dim=-1, min_positive=0.45, max_positive=0.55 + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Subsample x. + + Args: + x: + Its shape is (N, T, idim). + + Returns: + Return a tensor of shape (N, ((T-1)//2 - 1)//2, odim) + """ + # On entry, x is (N, T, idim) + x = x.unsqueeze(1) # (N, T, idim) -> (N, 1, T, idim) i.e., (N, C, H, W) + x = self.conv(x) + # Now x is of shape (N, odim, ((T-1)//2 - 1)//2, ((idim-1)//2 - 1)//2) + b, c, t, f = x.size() + x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f)) + # Now x is of shape (N, ((T-1)//2 - 1))//2, odim) + x = self.out_norm(x) + x = self.out_balancer(x) + return x diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/encoder_interface.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/encoder_interface.py new file mode 120000 index 000000000..ee2f09151 --- /dev/null +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/encoder_interface.py @@ -0,0 +1 @@ +../conv_emformer_transducer_stateless/encoder_interface.py \ No newline at end of file diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/export.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/export.py new file mode 100755 index 000000000..ab15e0241 --- /dev/null +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/export.py @@ -0,0 +1,287 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script converts several saved checkpoints +# to a single one using model averaging. +""" +Usage: +./conv_emformer_transducer_stateless2/export.py \ + --exp-dir ./conv_emformer_transducer_stateless2/exp \ + --bpe-model data/lang_bpe_500/bpe.model \ + --epoch 30 \ + --avg 10 \ + --use-averaged-model=True \ + --num-encoder-layers 12 \ + --chunk-length 32 \ + --cnn-module-kernel 31 \ + --left-context-length 32 \ + --right-context-length 8 \ + --memory-size 32 \ + --jit False + +It will generate a file exp_dir/pretrained.pt + +To use the generated file with `conv_emformer_transducer_stateless2/decode.py`, +you can do: + + cd /path/to/exp_dir + ln -s pretrained.pt epoch-9999.pt + + cd /path/to/egs/librispeech/ASR + ./conv_emformer_transducer_stateless2/decode.py \ + --exp-dir ./conv_emformer_transducer_stateless2/exp \ + --epoch 9999 \ + --avg 1 \ + --max-duration 100 \ + --bpe-model data/lang_bpe_500/bpe.model \ + --use-averaged-model=False \ + --num-encoder-layers 12 \ + --chunk-length 32 \ + --cnn-module-kernel 31 \ + --left-context-length 32 \ + --right-context-length 8 \ + --memory-size 32 +""" + +import argparse +import logging +from pathlib import Path + +import sentencepiece as spm +import torch +from train import add_model_arguments, get_params, get_transducer_model + +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from icefall.utils import str2bool + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=28, + help="""It specifies the checkpoint to use for averaging. + Note: Epoch counts from 0. + You can specify --avg to use more checkpoints for model averaging.""", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=15, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch' and '--iter'", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="pruned_transducer_stateless2/exp", + help="""It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--jit", + type=str2bool, + default=False, + help="""True to save a model after applying torch.jit.script. + """, + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=True, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + add_model_arguments(parser) + + return parser + + +def main(): + args = get_parser().parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + device = torch.device("cpu") + + logging.info(f"device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + else: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg + 1] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + + model.eval() + + if params.jit: + # We won't use the forward() method of the model in C++, so just ignore + # it here. + # Otherwise, one of its arguments is a ragged tensor and is not + # torch scriptabe. + model.__class__.forward = torch.jit.ignore(model.__class__.forward) + logging.info("Using torch.jit.script") + model = torch.jit.script(model) + filename = params.exp_dir / "cpu_jit.pt" + model.save(str(filename)) + logging.info(f"Saved to {filename}") + else: + logging.info("Not using torch.jit.script") + # Save it using a format so that it can be loaded + # by :func:`load_checkpoint` + filename = params.exp_dir / "pretrained.pt" + torch.save({"model": model.state_dict()}, str(filename)) + logging.info(f"Saved to {filename}") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/joiner.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/joiner.py new file mode 120000 index 000000000..1eb4dcc83 --- /dev/null +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/joiner.py @@ -0,0 +1 @@ +../conv_emformer_transducer_stateless/joiner.py \ No newline at end of file diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/model.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/model.py new file mode 120000 index 000000000..322b694e0 --- /dev/null +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/model.py @@ -0,0 +1 @@ +../conv_emformer_transducer_stateless/model.py \ No newline at end of file diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/optim.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/optim.py new file mode 120000 index 000000000..8f19a99da --- /dev/null +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/optim.py @@ -0,0 +1 @@ +../conv_emformer_transducer_stateless/optim.py \ No newline at end of file diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/scaling.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/scaling.py new file mode 120000 index 000000000..12f22cf9c --- /dev/null +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/scaling.py @@ -0,0 +1 @@ +../conv_emformer_transducer_stateless/scaling.py \ No newline at end of file diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/stream.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/stream.py new file mode 120000 index 000000000..bf9cbbe2e --- /dev/null +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/stream.py @@ -0,0 +1 @@ +../conv_emformer_transducer_stateless/stream.py \ No newline at end of file diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/streaming_decode.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/streaming_decode.py new file mode 100755 index 000000000..0f687898f --- /dev/null +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/streaming_decode.py @@ -0,0 +1,980 @@ +#!/usr/bin/env python3 +# +# Copyright 2021-2022 Xiaomi Corporation (Author: Fangjun Kuang, +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +(1) greedy search +./conv_emformer_transducer_stateless2/streaming_decode.py \ + --epoch 30 \ + --avg 10 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --num-decode-streams 2000 \ + --num-encoder-layers 12 \ + --chunk-length 32 \ + --cnn-module-kernel 31 \ + --left-context-length 32 \ + --right-context-length 8 \ + --memory-size 32 \ + --decoding-method greedy_search \ + --use-averaged-model True + +(2) modified beam search +./conv_emformer_transducer_stateless2/streaming_decode.py \ + --epoch 30 \ + --avg 10 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --num-decode-streams 2000 \ + --num-encoder-layers 12 \ + --chunk-length 32 \ + --cnn-module-kernel 31 \ + --left-context-length 32 \ + --right-context-length 8 \ + --memory-size 32 \ + --decoding-method modified_beam_search \ + --use-averaged-model True \ + --beam-size 4 + +(3) fast beam search +./conv_emformer_transducer_stateless2/streaming_decode.py \ + --epoch 30 \ + --avg 10 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --num-decode-streams 2000 \ + --num-encoder-layers 12 \ + --chunk-length 32 \ + --cnn-module-kernel 31 \ + --left-context-length 32 \ + --right-context-length 8 \ + --memory-size 32 \ + --decoding-method fast_beam_search \ + --use-averaged-model True \ + --beam 4 \ + --max-contexts 4 \ + --max-states 8 +""" +import argparse +import logging +import warnings +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import k2 +from lhotse import CutSet +import numpy as np +import sentencepiece as spm +import torch +import torch.nn as nn +from asr_datamodule import LibriSpeechAsrDataModule +from beam_search import Hypothesis, HypothesisList, get_hyps_shape +from emformer import LOG_EPSILON, stack_states, unstack_states +from kaldifeat import Fbank, FbankOptions +from stream import Stream +from torch.nn.utils.rnn import pad_sequence +from train import add_model_arguments, get_params, get_transducer_model + +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from icefall.decode import one_best_decoding +from icefall.utils import ( + AttributeDict, + get_texts, + setup_logger, + store_transcripts, + str2bool, + write_error_stats, +) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=28, + help="It specifies the checkpoint to use for decoding." + "Note: Epoch counts from 0.", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=15, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch'. ", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=False, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="transducer_emformer/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - modified_beam_search + - fast_beam_search + """, + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="""An interger indicating how many candidates we will keep for each + frame. Used only when --decoding-method is beam_search or + modified_beam_search.""", + ) + + parser.add_argument( + "--beam", + type=float, + default=4, + help="""A floating point value to calculate the cutoff score during beam + search (i.e., `cutoff = max-score - beam`), which is the same as the + `beam` in Kaldi. + Used only when --decoding-method is fast_beam_search""", + ) + + parser.add_argument( + "--max-contexts", + type=int, + default=4, + help="""Used only when --decoding-method is + fast_beam_search""", + ) + + parser.add_argument( + "--max-states", + type=int, + default=8, + help="""Used only when --decoding-method is + fast_beam_search""", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=1, + help="""Maximum number of symbols per frame. + Used only when --decoding_method is greedy_search""", + ) + + parser.add_argument( + "--sampling-rate", + type=float, + default=16000, + help="Sample rate of the audio", + ) + + parser.add_argument( + "--num-decode-streams", + type=int, + default=2000, + help="The number of streams that can be decoded parallel", + ) + + add_model_arguments(parser) + + return parser + + +def greedy_search( + model: nn.Module, + encoder_out: torch.Tensor, + streams: List[Stream], +) -> None: + """Greedy search in batch mode. It hardcodes --max-sym-per-frame=1. + + Args: + model: + The transducer model. + encoder_out: + Output from the encoder. Its shape is (N, T, C), where N >= 1. + streams: + A list of Stream objects. + """ + assert len(streams) == encoder_out.size(0) + assert encoder_out.ndim == 3 + + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + device = next(model.parameters()).device + T = encoder_out.size(1) + + encoder_out = model.joiner.encoder_proj(encoder_out) + + decoder_input = torch.tensor( + [stream.hyp[-context_size:] for stream in streams], + device=device, + dtype=torch.int64, + ) + # decoder_out is of shape (batch_size, 1, decoder_out_dim) + decoder_out = model.decoder(decoder_input, need_pad=False) + decoder_out = model.joiner.decoder_proj(decoder_out) + + for t in range(T): + # current_encoder_out's shape: (batch_size, 1, encoder_out_dim) + current_encoder_out = encoder_out[:, t : t + 1, :] # noqa + + logits = model.joiner( + current_encoder_out.unsqueeze(2), + decoder_out.unsqueeze(1), + project_input=False, + ) + # logits'shape (batch_size, vocab_size) + logits = logits.squeeze(1).squeeze(1) + + assert logits.ndim == 2, logits.shape + y = logits.argmax(dim=1).tolist() + emitted = False + for i, v in enumerate(y): + if v != blank_id: + streams[i].hyp.append(v) + emitted = True + if emitted: + # update decoder output + decoder_input = torch.tensor( + [stream.hyp[-context_size:] for stream in streams], + device=device, + dtype=torch.int64, + ) + decoder_out = model.decoder( + decoder_input, + need_pad=False, + ) + decoder_out = model.joiner.decoder_proj(decoder_out) + + +def modified_beam_search( + model: nn.Module, + encoder_out: torch.Tensor, + streams: List[Stream], + beam: int = 4, +): + """Beam search in batch mode with --max-sym-per-frame=1 being hardcoded. + + Args: + model: + The RNN-T model. + encoder_out: + A 3-D tensor of shape (N, T, encoder_out_dim) containing the output of + the encoder model. + streams: + A list of stream objects. + beam: + Number of active paths during the beam search. + """ + assert encoder_out.ndim == 3, encoder_out.shape + assert len(streams) == encoder_out.size(0) + + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + device = next(model.parameters()).device + batch_size = len(streams) + T = encoder_out.size(1) + + B = [stream.hyps for stream in streams] + + encoder_out = model.joiner.encoder_proj(encoder_out) + + for t in range(T): + current_encoder_out = encoder_out[:, t].unsqueeze(1).unsqueeze(1) + # current_encoder_out's shape: (batch_size, 1, 1, encoder_out_dim) + + hyps_shape = get_hyps_shape(B).to(device) + + A = [list(b) for b in B] + B = [HypothesisList() for _ in range(batch_size)] + + ys_log_probs = torch.stack( + [hyp.log_prob.reshape(1) for hyps in A for hyp in hyps], dim=0 + ) # (num_hyps, 1) + + decoder_input = torch.tensor( + [hyp.ys[-context_size:] for hyps in A for hyp in hyps], + device=device, + dtype=torch.int64, + ) # (num_hyps, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False).unsqueeze(1) + decoder_out = model.joiner.decoder_proj(decoder_out) + # decoder_out is of shape (num_hyps, 1, 1, decoder_output_dim) + + # Note: For torch 1.7.1 and below, it requires a torch.int64 tensor + # as index, so we use `to(torch.int64)` below. + current_encoder_out = torch.index_select( + current_encoder_out, + dim=0, + index=hyps_shape.row_ids(1).to(torch.int64), + ) # (num_hyps, encoder_out_dim) + + logits = model.joiner( + current_encoder_out, decoder_out, project_input=False + ) + # logits is of shape (num_hyps, 1, 1, vocab_size) + + logits = logits.squeeze(1).squeeze(1) + + log_probs = logits.log_softmax(dim=-1) # (num_hyps, vocab_size) + + log_probs.add_(ys_log_probs) + + vocab_size = log_probs.size(-1) + + log_probs = log_probs.reshape(-1) + + row_splits = hyps_shape.row_splits(1) * vocab_size + log_probs_shape = k2.ragged.create_ragged_shape2( + row_splits=row_splits, cached_tot_size=log_probs.numel() + ) + ragged_log_probs = k2.RaggedTensor( + shape=log_probs_shape, value=log_probs + ) + + for i in range(batch_size): + topk_log_probs, topk_indexes = ragged_log_probs[i].topk(beam) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + topk_hyp_indexes = (topk_indexes // vocab_size).tolist() + topk_token_indexes = (topk_indexes % vocab_size).tolist() + + for k in range(len(topk_hyp_indexes)): + hyp_idx = topk_hyp_indexes[k] + hyp = A[i][hyp_idx] + + new_ys = hyp.ys[:] + new_token = topk_token_indexes[k] + if new_token != blank_id: + new_ys.append(new_token) + + new_log_prob = topk_log_probs[k] + new_hyp = Hypothesis(ys=new_ys, log_prob=new_log_prob) + B[i].add(new_hyp) + + for i in range(batch_size): + streams[i].hyps = B[i] + + +def fast_beam_search_one_best( + model: nn.Module, + streams: List[Stream], + encoder_out: torch.Tensor, + processed_lens: torch.Tensor, + beam: float, + max_states: int, + max_contexts: int, +) -> None: + """It limits the maximum number of symbols per frame to 1. + + A lattice is first obtained using modified beam search, and then + the shortest path within the lattice is used as the final output. + + Args: + model: + An instance of `Transducer`. + streams: + A list of stream objects. + encoder_out: + A tensor of shape (N, T, C) from the encoder. + processed_lens: + A tensor of shape (N,) containing the number of processed frames + in `encoder_out` before padding. + beam: + Beam value, similar to the beam used in Kaldi.. + max_states: + Max states per stream per frame. + max_contexts: + Max contexts pre stream per frame. + """ + assert encoder_out.ndim == 3 + + context_size = model.decoder.context_size + vocab_size = model.decoder.vocab_size + + B, T, C = encoder_out.shape + assert B == len(streams) + + config = k2.RnntDecodingConfig( + vocab_size=vocab_size, + decoder_history_len=context_size, + beam=beam, + max_contexts=max_contexts, + max_states=max_states, + ) + individual_streams = [] + for i in range(B): + individual_streams.append(streams[i].rnnt_decoding_stream) + decoding_streams = k2.RnntDecodingStreams(individual_streams, config) + + encoder_out = model.joiner.encoder_proj(encoder_out) + + for t in range(T): + # shape is a RaggedShape of shape (B, context) + # contexts is a Tensor of shape (shape.NumElements(), context_size) + shape, contexts = decoding_streams.get_contexts() + # `nn.Embedding()` in torch below v1.7.1 supports only torch.int64 + contexts = contexts.to(torch.int64) + # decoder_out is of shape (shape.NumElements(), 1, decoder_out_dim) + decoder_out = model.decoder(contexts, need_pad=False) + decoder_out = model.joiner.decoder_proj(decoder_out) + # current_encoder_out is of shape + # (shape.NumElements(), 1, joiner_dim) + # fmt: off + current_encoder_out = torch.index_select( + encoder_out[:, t:t + 1, :], 0, shape.row_ids(1).to(torch.int64) + ) + # fmt: on + logits = model.joiner( + current_encoder_out.unsqueeze(2), + decoder_out.unsqueeze(1), + project_input=False, + ) + logits = logits.squeeze(1).squeeze(1) + log_probs = logits.log_softmax(dim=-1) + decoding_streams.advance(log_probs) + + decoding_streams.terminate_and_flush_to_streams() + + lattice = decoding_streams.format_output(processed_lens.tolist()) + + best_path = one_best_decoding(lattice) + hyps = get_texts(best_path) + + for i in range(B): + streams[i].hyp = hyps[i] + + +def decode_one_chunk( + model: nn.Module, + streams: List[Stream], + params: AttributeDict, + decoding_graph: Optional[k2.Fsa] = None, +) -> List[int]: + """ + Args: + model: + The Transducer model. + streams: + A list of Stream objects. + params: + It is returned by :func:`get_params`. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used + only when --decoding_method is fast_beam_search. + + Returns: + A list of indexes indicating the finished streams. + """ + device = next(model.parameters()).device + + feature_list = [] + feature_len_list = [] + state_list = [] + num_processed_frames_list = [] + + for stream in streams: + # We should first get `stream.num_processed_frames` + # before calling `stream.get_feature_chunk()` + # since `stream.num_processed_frames` would be updated + num_processed_frames_list.append(stream.num_processed_frames) + feature = stream.get_feature_chunk() + feature_len = feature.size(0) + feature_list.append(feature) + feature_len_list.append(feature_len) + state_list.append(stream.states) + + features = pad_sequence( + feature_list, batch_first=True, padding_value=LOG_EPSILON + ).to(device) + feature_lens = torch.tensor(feature_len_list, device=device) + num_processed_frames = torch.tensor( + num_processed_frames_list, device=device + ) + + # Make sure it has at least 1 frame after subsampling, first-and-last-frame cutting, and right context cutting # noqa + tail_length = ( + 3 * params.subsampling_factor + params.right_context_length + 3 + ) + if features.size(1) < tail_length: + pad_length = tail_length - features.size(1) + feature_lens += pad_length + features = torch.nn.functional.pad( + features, + (0, 0, 0, pad_length), + mode="constant", + value=LOG_EPSILON, + ) + + # Stack states of all streams + states = stack_states(state_list) + + encoder_out, encoder_out_lens, states = model.encoder.infer( + x=features, + x_lens=feature_lens, + states=states, + num_processed_frames=num_processed_frames, + ) + + if params.decoding_method == "greedy_search": + greedy_search( + model=model, + streams=streams, + encoder_out=encoder_out, + ) + elif params.decoding_method == "modified_beam_search": + modified_beam_search( + model=model, + streams=streams, + encoder_out=encoder_out, + beam=params.beam_size, + ) + elif params.decoding_method == "fast_beam_search": + # feature_len is needed to get partial results. + # The rnnt_decoding_stream for fast_beam_search. + fast_beam_search_one_best( + model=model, + streams=streams, + encoder_out=encoder_out, + processed_lens=(num_processed_frames >> 2) + encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + + # Update cached states of each stream + state_list = unstack_states(states) + for i, s in enumerate(state_list): + streams[i].states = s + + finished_streams = [i for i, stream in enumerate(streams) if stream.done] + return finished_streams + + +def create_streaming_feature_extractor() -> Fbank: + """Create a CPU streaming feature extractor. + + At present, we assume it returns a fbank feature extractor with + fixed options. In the future, we will support passing in the options + from outside. + + Returns: + Return a CPU streaming feature extractor. + """ + opts = FbankOptions() + opts.device = "cpu" + opts.frame_opts.dither = 0 + opts.frame_opts.snip_edges = False + opts.frame_opts.samp_freq = 16000 + opts.mel_opts.num_bins = 80 + return Fbank(opts) + + +def decode_dataset( + cuts: CutSet, + model: nn.Module, + params: AttributeDict, + sp: spm.SentencePieceProcessor, + decoding_graph: Optional[k2.Fsa] = None, +): + """Decode dataset. + + Args: + cuts: + Lhotse Cutset containing the dataset to decode. + params: + It is returned by :func:`get_params`. + model: + The Transducer model. + sp: + The BPE model. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used + only when --decoding_method is fast_beam_search. + + Returns: + Return a dict, whose key may be "greedy_search" if greedy search + is used, or it may be "beam_7" if beam size of 7 is used. + Its value is a list of tuples. Each tuple contains two elements: + The first is the reference transcript, and the second is the + predicted result. + """ + device = next(model.parameters()).device + + log_interval = 300 + + fbank = create_streaming_feature_extractor() + + decode_results = [] + streams = [] + for num, cut in enumerate(cuts): + # Each utterance has a Stream. + stream = Stream( + params=params, + decoding_graph=decoding_graph, + device=device, + LOG_EPS=LOG_EPSILON, + ) + + stream.set_states(model.encoder.init_states(device)) + + audio: np.ndarray = cut.load_audio() + # audio.shape: (1, num_samples) + assert len(audio.shape) == 2 + assert audio.shape[0] == 1, "Should be single channel" + assert audio.dtype == np.float32, audio.dtype + # The trained model is using normalized samples + assert audio.max() <= 1, "Should be normalized to [-1, 1])" + + samples = torch.from_numpy(audio).squeeze(0) + feature = fbank(samples) + stream.set_feature(feature) + stream.set_ground_truth(cut.supervisions[0].text) + + streams.append(stream) + + while len(streams) >= params.num_decode_streams: + finished_streams = decode_one_chunk( + model=model, + streams=streams, + params=params, + decoding_graph=decoding_graph, + ) + + for i in sorted(finished_streams, reverse=True): + decode_results.append( + ( + streams[i].ground_truth.split(), + sp.decode(streams[i].decoding_result()).split(), + ) + ) + del streams[i] + + if num % log_interval == 0: + logging.info(f"Cuts processed until now is {num}.") + + while len(streams) > 0: + finished_streams = decode_one_chunk( + model=model, + streams=streams, + params=params, + decoding_graph=decoding_graph, + ) + + for i in sorted(finished_streams, reverse=True): + decode_results.append( + ( + streams[i].ground_truth.split(), + sp.decode(streams[i].decoding_result()).split(), + ) + ) + del streams[i] + + if params.decoding_method == "greedy_search": + key = "greedy_search" + elif params.decoding_method == "fast_beam_search": + key = ( + f"beam_{params.beam}_" + f"max_contexts_{params.max_contexts}_" + f"max_states_{params.max_states}" + ) + else: + key = f"beam_size_{params.beam_size}" + + return {key: decode_results} + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[List[str], List[str]]]], +): + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = ( + params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + ) + store_transcripts(filename=recog_path, texts=sorted(results)) + logging.info(f"The transcripts are stored in {recog_path}") + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = ( + params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_filename, "w") as f: + wer = write_error_stats( + f, f"{test_set_name}-{key}", results, enable_log=True + ) + test_set_wers[key] = wer + + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = ( + params.res_dir + / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_info, "w") as f: + print("settings\tWER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, WER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + LibriSpeechAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + assert params.decoding_method in ( + "greedy_search", + "fast_beam_search", + "modified_beam_search", + ) + params.res_dir = params.exp_dir / "streaming" / params.decoding_method + + if params.iter > 0: + params.suffix = f"iter-{params.iter}-avg-{params.avg}" + else: + params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + + # for streaming + params.suffix += f"-streaming-chunk-length-{params.chunk_length}" + params.suffix += f"-left-context-length-{params.left_context_length}" + params.suffix += f"-right-context-length-{params.right_context_length}" + params.suffix += f"-memory-size-{params.memory_size}" + + if "fast_beam_search" in params.decoding_method: + params.suffix += f"-beam-{params.beam}" + params.suffix += f"-max-contexts-{params.max_contexts}" + params.suffix += f"-max-states-{params.max_states}" + elif "beam_search" in params.decoding_method: + params.suffix += ( + f"-{params.decoding_method}-beam-size-{params.beam_size}" + ) + else: + params.suffix += f"-context-{params.context_size}" + params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}" + + if params.use_averaged_model: + params.suffix += "-use-averaged-model" + + setup_logger(f"{params.res_dir}/log-streaming-decode") + logging.info("Decoding started") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # and are defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.unk_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + params.device = device + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + else: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg + 1] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + + model.eval() + + if params.decoding_method == "fast_beam_search": + decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device) + else: + decoding_graph = None + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + librispeech = LibriSpeechAsrDataModule(args) + + test_clean_cuts = librispeech.test_clean_cuts() + test_other_cuts = librispeech.test_other_cuts() + + test_sets = ["test-clean", "test-other"] + test_cuts = [test_clean_cuts, test_other_cuts] + + for test_set, test_cut in zip(test_sets, test_cuts): + results_dict = decode_dataset( + cuts=test_cut, + model=model, + params=params, + sp=sp, + decoding_graph=decoding_graph, + ) + + save_results( + params=params, + test_set_name=test_set, + results_dict=results_dict, + ) + + logging.info("Done!") + + +if __name__ == "__main__": + torch.manual_seed(20220410) + main() diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/test_emformer.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/test_emformer.py new file mode 100644 index 000000000..8cde6205b --- /dev/null +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/test_emformer.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 Xiaomi Corporation (Author: Fangjun Kuang, +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import torch +from emformer import ConvolutionModule, Emformer, stack_states, unstack_states + + +def test_convolution_module_forward(): + B, D = 2, 256 + chunk_length = 4 + right_context_length = 2 + num_chunks = 3 + U = num_chunks * chunk_length + R = num_chunks * right_context_length + kernel_size = 31 + conv_module = ConvolutionModule( + chunk_length, + right_context_length, + D, + kernel_size, + ) + + utterance = torch.randn(U, B, D) + right_context = torch.randn(R, B, D) + + utterance, right_context = conv_module(utterance, right_context) + assert utterance.shape == (U, B, D), utterance.shape + assert right_context.shape == (R, B, D), right_context.shape + + +def test_convolution_module_infer(): + from emformer import ConvolutionModule + + B, D = 2, 256 + chunk_length = 4 + right_context_length = 2 + num_chunks = 1 + U = num_chunks * chunk_length + R = num_chunks * right_context_length + kernel_size = 31 + conv_module = ConvolutionModule( + chunk_length, + right_context_length, + D, + kernel_size, + ) + + utterance = torch.randn(U, B, D) + right_context = torch.randn(R, B, D) + cache = torch.randn(B, D, kernel_size - 1) + + utterance, right_context, new_cache = conv_module.infer( + utterance, right_context, cache + ) + assert utterance.shape == (U, B, D), utterance.shape + assert right_context.shape == (R, B, D), right_context.shape + assert new_cache.shape == (B, D, kernel_size - 1), new_cache.shape + + +def test_state_stack_unstack(): + num_features = 80 + chunk_length = 32 + encoder_dim = 512 + num_encoder_layers = 2 + kernel_size = 31 + left_context_length = 32 + right_context_length = 8 + memory_size = 32 + + model = Emformer( + num_features=num_features, + chunk_length=chunk_length, + subsampling_factor=4, + d_model=encoder_dim, + num_encoder_layers=num_encoder_layers, + cnn_module_kernel=kernel_size, + left_context_length=left_context_length, + right_context_length=right_context_length, + memory_size=memory_size, + ) + + for batch_size in [1, 2]: + attn_caches = [ + [ + torch.zeros(memory_size, batch_size, encoder_dim), + torch.zeros(left_context_length // 4, batch_size, encoder_dim), + torch.zeros( + left_context_length // 4, + batch_size, + encoder_dim, + ), + ] + for _ in range(num_encoder_layers) + ] + conv_caches = [ + torch.zeros(batch_size, encoder_dim, kernel_size - 1) + for _ in range(num_encoder_layers) + ] + states = [attn_caches, conv_caches] + x = torch.randn(batch_size, 23, num_features) + x_lens = torch.full((batch_size,), 23) + num_processed_frames = torch.full((batch_size,), 0) + y, y_lens, states = model.infer( + x, x_lens, num_processed_frames=num_processed_frames, states=states + ) + + state_list = unstack_states(states) + states2 = stack_states(state_list) + + for ss, ss2 in zip(states[0], states2[0]): + for s, s2 in zip(ss, ss2): + assert torch.allclose(s, s2), f"{s.sum()}, {s2.sum()}" + + for s, s2 in zip(states[1], states2[1]): + assert torch.allclose(s, s2), f"{s.sum()}, {s2.sum()}" + + +def test_torchscript_consistency_infer(): + r"""Verify that scripting Emformer does not change the behavior of method `infer`.""" # noqa + num_features = 80 + chunk_length = 32 + encoder_dim = 512 + num_encoder_layers = 2 + kernel_size = 31 + left_context_length = 32 + right_context_length = 8 + memory_size = 32 + batch_size = 2 + + model = Emformer( + num_features=num_features, + chunk_length=chunk_length, + subsampling_factor=4, + d_model=encoder_dim, + num_encoder_layers=num_encoder_layers, + cnn_module_kernel=kernel_size, + left_context_length=left_context_length, + right_context_length=right_context_length, + memory_size=memory_size, + ).eval() + attn_caches = [ + [ + torch.zeros(memory_size, batch_size, encoder_dim), + torch.zeros(left_context_length // 4, batch_size, encoder_dim), + torch.zeros( + left_context_length // 4, + batch_size, + encoder_dim, + ), + ] + for _ in range(num_encoder_layers) + ] + conv_caches = [ + torch.zeros(batch_size, encoder_dim, kernel_size - 1) + for _ in range(num_encoder_layers) + ] + states = [attn_caches, conv_caches] + x = torch.randn(batch_size, 23, num_features) + x_lens = torch.full((batch_size,), 23) + num_processed_frames = torch.full((batch_size,), 0) + y, y_lens, out_states = model.infer( + x, x_lens, num_processed_frames=num_processed_frames, states=states + ) + + sc_model = torch.jit.script(model).eval() + sc_y, sc_y_lens, sc_out_states = sc_model.infer( + x, x_lens, num_processed_frames=num_processed_frames, states=states + ) + + assert torch.allclose(y, sc_y) + + +if __name__ == "__main__": + test_convolution_module_forward() + test_convolution_module_infer() + test_state_stack_unstack() + test_torchscript_consistency_infer() diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/train.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/train.py new file mode 100755 index 000000000..dfe1b6136 --- /dev/null +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/train.py @@ -0,0 +1,1136 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, +# Wei Kang, +# Mingshuang Luo,) +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +./conv_emformer_transducer_stateless2/train.py \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 1 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --full-libri 1 \ + --max-duration 280 \ + --master-port 12321 \ + --num-encoder-layers 12 \ + --chunk-length 32 \ + --cnn-module-kernel 31 \ + --left-context-length 32 \ + --right-context-length 8 \ + --memory-size 32 + +# For mix precision training: +./conv_emformer_transducer_stateless2/train.py \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 1 \ + --use-fp16 1 \ + --exp-dir conv_emformer_transducer_stateless2/exp \ + --full-libri 1 \ + --max-duration 300 \ + --master-port 12321 \ + --num-encoder-layers 12 \ + --chunk-length 32 \ + --cnn-module-kernel 31 \ + --left-context-length 32 \ + --right-context-length 8 \ + --memory-size 32 +""" + + +import argparse +import copy +import logging +import warnings +from pathlib import Path +from shutil import copyfile +from typing import Any, Dict, Optional, Tuple, Union + +import k2 +import optim +import sentencepiece as spm +import torch +import torch.multiprocessing as mp +import torch.nn as nn +from asr_datamodule import LibriSpeechAsrDataModule +from decoder import Decoder +from emformer import Emformer +from joiner import Joiner +from lhotse.cut import Cut +from lhotse.dataset.sampling.base import CutSampler +from lhotse.utils import fix_random_seed +from model import Transducer +from optim import Eden, Eve +from torch import Tensor +from torch.cuda.amp import GradScaler +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.tensorboard import SummaryWriter + +from icefall import diagnostics +from icefall.checkpoint import load_checkpoint, remove_checkpoints +from icefall.checkpoint import save_checkpoint as save_checkpoint_impl +from icefall.checkpoint import ( + save_checkpoint_with_global_batch_idx, + update_averaged_model, +) +from icefall.dist import cleanup_dist, setup_dist +from icefall.env import get_env_info +from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool + +LRSchedulerType = Union[ + torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler +] + + +def add_model_arguments(parser: argparse.ArgumentParser): + parser.add_argument( + "--encoder-dim", + type=int, + default=512, + help="Attention dim for the Emformer", + ) + + parser.add_argument( + "--nhead", + type=int, + default=8, + help="Number of attention heads for the Emformer", + ) + + parser.add_argument( + "--dim-feedforward", + type=int, + default=2048, + help="Feed-forward dimension for the Emformer", + ) + + parser.add_argument( + "--num-encoder-layers", + type=int, + default=12, + help="Number of encoder layers for the Emformer", + ) + + parser.add_argument( + "--cnn-module-kernel", + type=int, + default=31, + help="Kernel size for the convolution module.", + ) + + parser.add_argument( + "--left-context-length", + type=int, + default=32, + help="""Number of frames before subsampling for left context + in the Emformer.""", + ) + + parser.add_argument( + "--chunk-length", + type=int, + default=32, + help="""Number of frames before subsampling for each chunk + in the Emformer.""", + ) + + parser.add_argument( + "--right-context-length", + type=int, + default=8, + help="""Number of frames before subsampling for right context + in the Emformer.""", + ) + + parser.add_argument( + "--memory-size", + type=int, + default=0, + help="Number of entries in the memory for the Emformer", + ) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--world-size", + type=int, + default=1, + help="Number of GPUs for DDP training.", + ) + + parser.add_argument( + "--master-port", + type=int, + default=12354, + help="Master port to use for DDP training.", + ) + + parser.add_argument( + "--tensorboard", + type=str2bool, + default=True, + help="Should various information be logged in tensorboard.", + ) + + parser.add_argument( + "--num-epochs", + type=int, + default=30, + help="Number of epochs to train.", + ) + + parser.add_argument( + "--start-epoch", + type=int, + default=1, + help="""Resume training from this epoch. It should be positive. + If larger than 1, it will load checkpoint from + exp-dir/epoch-{start_epoch-1}.pt + """, + ) + + parser.add_argument( + "--start-batch", + type=int, + default=0, + help="""If positive, --start-epoch is ignored and + it loads the checkpoint from exp-dir/checkpoint-{start_batch}.pt + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="pruned_transducer_stateless2/exp", + help="""The experiment dir. + It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--initial-lr", + type=float, + default=0.003, + help="""The initial learning rate. This value should not need to be + changed.""", + ) + + parser.add_argument( + "--lr-batches", + type=float, + default=5000, + help="""Number of steps that affects how rapidly the learning rate decreases. + We suggest not to change this.""", + ) + + parser.add_argument( + "--lr-epochs", + type=float, + default=6, + help="""Number of epochs that affects how rapidly the learning rate decreases. + """, + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + parser.add_argument( + "--prune-range", + type=int, + default=5, + help="The prune range for rnnt loss, it means how many symbols(context)" + "we are using to compute the loss", + ) + + parser.add_argument( + "--lm-scale", + type=float, + default=0.25, + help="The scale to smooth the loss with lm " + "(output of prediction network) part.", + ) + + parser.add_argument( + "--am-scale", + type=float, + default=0.0, + help="The scale to smooth the loss with am (output of encoder network)" + "part.", + ) + + parser.add_argument( + "--simple-loss-scale", + type=float, + default=0.5, + help="To get pruning ranges, we will calculate a simple version" + "loss(joiner is just addition), this simple loss also uses for" + "training (as a regularization item). We will scale the simple loss" + "with this parameter before adding to the final loss.", + ) + + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + + parser.add_argument( + "--print-diagnostics", + type=str2bool, + default=False, + help="Accumulate stats on activations, print them and exit.", + ) + + parser.add_argument( + "--save-every-n", + type=int, + default=8000, + help="""Save checkpoint after processing this number of batches" + periodically. We save checkpoint to exp-dir/ whenever + params.batch_idx_train % save_every_n == 0. The checkpoint filename + has the form: f'exp-dir/checkpoint-{params.batch_idx_train}.pt' + Note: It also saves checkpoint to `exp-dir/epoch-xxx.pt` at the + end of each epoch where `xxx` is the epoch number counting from 0. + """, + ) + + parser.add_argument( + "--keep-last-k", + type=int, + default=20, + help="""Only keep this number of checkpoints on disk. + For instance, if it is 3, there are only 3 checkpoints + in the exp-dir with filenames `checkpoint-xxx.pt`. + It does not affect checkpoints with name `epoch-xxx.pt`. + """, + ) + + parser.add_argument( + "--average-period", + type=int, + default=100, + help="""Update the averaged model, namely `model_avg`, after processing + this number of batches. `model_avg` is a separate version of model, + in which each floating-point parameter is the average of all the + parameters from the start of training. Each time we take the average, + we do: `model_avg = model * (average_period / batch_idx_train) + + model_avg * ((batch_idx_train - average_period) / batch_idx_train)`. + """, + ) + + parser.add_argument( + "--use-fp16", + type=str2bool, + default=False, + help="Whether to use half precision training.", + ) + + add_model_arguments(parser) + + return parser + + +def get_params() -> AttributeDict: + """Return a dict containing training parameters. + + All training related parameters that are not passed from the commandline + are saved in the variable `params`. + + Commandline options are merged into `params` after they are parsed, so + you can also access them via `params`. + + Explanation of options saved in `params`: + + - best_train_loss: Best training loss so far. It is used to select + the model that has the lowest training loss. It is + updated during the training. + + - best_valid_loss: Best validation loss so far. It is used to select + the model that has the lowest validation loss. It is + updated during the training. + + - best_train_epoch: It is the epoch that has the best training loss. + + - best_valid_epoch: It is the epoch that has the best validation loss. + + - batch_idx_train: Used to writing statistics to tensorboard. It + contains number of batches trained so far across + epochs. + + - log_interval: Print training loss if batch_idx % log_interval` is 0 + + - reset_interval: Reset statistics if batch_idx % reset_interval is 0 + + - valid_interval: Run validation if batch_idx % valid_interval is 0 + + - feature_dim: The model input dim. It has to match the one used + in computing features. + + - subsampling_factor: The subsampling factor for the model. + + - encoder_dim: Hidden dim for multi-head attention model. + + - num_decoder_layers: Number of decoder layer of transformer decoder. + + - warm_step: The warm_step for Noam optimizer. + """ + params = AttributeDict( + { + "best_train_loss": float("inf"), + "best_valid_loss": float("inf"), + "best_train_epoch": -1, + "best_valid_epoch": -1, + "batch_idx_train": 0, + "log_interval": 50, + "reset_interval": 200, + "valid_interval": 3000, # For the 100h subset, use 800 + # parameters for Emformer + "feature_dim": 80, + "subsampling_factor": 4, + # parameters for decoder + "decoder_dim": 512, + # parameters for joiner + "joiner_dim": 512, + # parameters for Noam + "model_warm_step": 3000, # arg given to model, not for lrate + "env_info": get_env_info(), + } + ) + + return params + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + # TODO: We can add an option to switch between Conformer and Transformer + encoder = Emformer( + num_features=params.feature_dim, + chunk_length=params.chunk_length, + subsampling_factor=params.subsampling_factor, + d_model=params.encoder_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + cnn_module_kernel=params.cnn_module_kernel, + left_context_length=params.left_context_length, + right_context_length=params.right_context_length, + memory_size=params.memory_size, + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + decoder_dim=params.decoder_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + encoder_dim=params.encoder_dim, + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict) -> nn.Module: + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + encoder_dim=params.encoder_dim, + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return model + + +def load_checkpoint_if_available( + params: AttributeDict, + model: nn.Module, + model_avg: nn.Module = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, +) -> Optional[Dict[str, Any]]: + """Load checkpoint from file. + + If params.start_batch is positive, it will load the checkpoint from + `params.exp_dir/checkpoint-{params.start_batch}.pt`. Otherwise, if + params.start_epoch is larger than 1, it will load the checkpoint from + `params.start_epoch - 1`. + + Apart from loading state dict for `model` and `optimizer` it also updates + `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, + and `best_valid_loss` in `params`. + + Args: + params: + The return value of :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer that we are using. + scheduler: + The scheduler that we are using. + Returns: + Return a dict containing previously saved training info. + """ + if params.start_batch > 0: + filename = params.exp_dir / f"checkpoint-{params.start_batch}.pt" + elif params.start_epoch > 1: + filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" + else: + return None + + assert filename.is_file(), f"{filename} does not exist!" + + saved_params = load_checkpoint( + filename, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + ) + + keys = [ + "best_train_epoch", + "best_valid_epoch", + "batch_idx_train", + "best_train_loss", + "best_valid_loss", + ] + for k in keys: + params[k] = saved_params[k] + + if params.start_batch > 0: + if "cur_epoch" in saved_params: + params["start_epoch"] = saved_params["cur_epoch"] + + if "cur_batch_idx" in saved_params: + params["cur_batch_idx"] = saved_params["cur_batch_idx"] + + return saved_params + + +def save_checkpoint( + params: AttributeDict, + model: Union[nn.Module, DDP], + model_avg: Optional[nn.Module] = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, + sampler: Optional[CutSampler] = None, + scaler: Optional[GradScaler] = None, + rank: int = 0, +) -> None: + """Save model, optimizer, scheduler and training stats to file. + + Args: + params: + It is returned by :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer used in the training. + sampler: + The sampler for the training dataset. + scaler: + The scaler used for mix precision training. + """ + if rank != 0: + return + filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" + save_checkpoint_impl( + filename=filename, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=sampler, + scaler=scaler, + rank=rank, + ) + + if params.best_train_epoch == params.cur_epoch: + best_train_filename = params.exp_dir / "best-train-loss.pt" + copyfile(src=filename, dst=best_train_filename) + + if params.best_valid_epoch == params.cur_epoch: + best_valid_filename = params.exp_dir / "best-valid-loss.pt" + copyfile(src=filename, dst=best_valid_filename) + + +def compute_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + sp: spm.SentencePieceProcessor, + batch: dict, + is_training: bool, + warmup: float = 1.0, +) -> Tuple[Tensor, MetricsTracker]: + """ + Compute RNN-T loss given the model and its inputs. + + Args: + params: + Parameters for training. See :func:`get_params`. + model: + The model for training. It is an instance of Conformer in our case. + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + is_training: + True for training. False for validation. When it is True, this + function enables autograd during computation; when it is False, it + disables autograd. + warmup: a floating point value which increases throughout training; + values >= 1.0 are fully warmed up and have all modules present. + """ + device = ( + model.device + if isinstance(model, DDP) + else next(model.parameters()).device + ) + feature = batch["inputs"] + # at entry, feature is (N, T, C) + assert feature.ndim == 3 + feature = feature.to(device) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + texts = batch["supervisions"]["text"] + y = sp.encode(texts, out_type=int) + y = k2.RaggedTensor(y).to(device) + + with torch.set_grad_enabled(is_training): + simple_loss, pruned_loss = model( + x=feature, + x_lens=feature_lens, + y=y, + prune_range=params.prune_range, + am_scale=params.am_scale, + lm_scale=params.lm_scale, + warmup=warmup, + ) + # after the main warmup step, we keep pruned_loss_scale small + # for the same amount of time (model_warm_step), to avoid + # overwhelming the simple_loss and causing it to diverge, + # in case it had not fully learned the alignment yet. + pruned_loss_scale = ( + 0.0 + if warmup < 1.0 + else (0.1 if warmup > 1.0 and warmup < 2.0 else 1.0) + ) + loss = ( + params.simple_loss_scale * simple_loss + + pruned_loss_scale * pruned_loss + ) + + assert loss.requires_grad == is_training + + info = MetricsTracker() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + info["frames"] = ( + (feature_lens // params.subsampling_factor).sum().item() + ) + + # Note: We use reduction=sum while computing the loss. + info["loss"] = loss.detach().cpu().item() + info["simple_loss"] = simple_loss.detach().cpu().item() + info["pruned_loss"] = pruned_loss.detach().cpu().item() + + return loss, info + + +def compute_validation_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + sp: spm.SentencePieceProcessor, + valid_dl: torch.utils.data.DataLoader, + world_size: int = 1, +) -> MetricsTracker: + """Run the validation process.""" + model.eval() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(valid_dl): + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=False, + ) + assert loss.requires_grad is False + tot_loss = tot_loss + loss_info + + if world_size > 1: + tot_loss.reduce(loss.device) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + if loss_value < params.best_valid_loss: + params.best_valid_epoch = params.cur_epoch + params.best_valid_loss = loss_value + + return tot_loss + + +def train_one_epoch( + params: AttributeDict, + model: Union[nn.Module, DDP], + optimizer: torch.optim.Optimizer, + scheduler: LRSchedulerType, + sp: spm.SentencePieceProcessor, + train_dl: torch.utils.data.DataLoader, + valid_dl: torch.utils.data.DataLoader, + scaler: GradScaler, + model_avg: Optional[nn.Module] = None, + tb_writer: Optional[SummaryWriter] = None, + world_size: int = 1, + rank: int = 0, +) -> None: + """Train the model for one epoch. + + The training loss from the mean of all frames is saved in + `params.train_loss`. It runs the validation process every + `params.valid_interval` batches. + + Args: + params: + It is returned by :func:`get_params`. + model: + The model for training. + optimizer: + The optimizer we are using. + scheduler: + The learning rate scheduler, we call step() every step. + train_dl: + Dataloader for the training dataset. + valid_dl: + Dataloader for the validation dataset. + scaler: + The scaler used for mix precision training. + model_avg: + The stored model averaged from the start of training. + tb_writer: + Writer to write log messages to tensorboard. + world_size: + Number of nodes in DDP training. If it is 1, DDP is disabled. + rank: + The rank of the node in DDP training. If no DDP is used, it should + be set to 0. + """ + model.train() + + tot_loss = MetricsTracker() + + cur_batch_idx = params.get("cur_batch_idx", 0) + + for batch_idx, batch in enumerate(train_dl): + if batch_idx < cur_batch_idx: + continue + cur_batch_idx = batch_idx + + params.batch_idx_train += 1 + batch_size = len(batch["supervisions"]["text"]) + + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=True, + warmup=(params.batch_idx_train / params.model_warm_step), + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + scaler.scale(loss).backward() + scheduler.step_batch(params.batch_idx_train) + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad() + + if params.print_diagnostics and batch_idx == 5: + return + + if ( + rank == 0 + and params.batch_idx_train > 0 + and params.batch_idx_train % params.average_period == 0 + ): + update_averaged_model( + params=params, + model_cur=model, + model_avg=model_avg, + ) + + if ( + params.batch_idx_train > 0 + and params.batch_idx_train % params.save_every_n == 0 + ): + params.cur_batch_idx = batch_idx + save_checkpoint_with_global_batch_idx( + out_dir=params.exp_dir, + global_batch_idx=params.batch_idx_train, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + del params.cur_batch_idx + remove_checkpoints( + out_dir=params.exp_dir, + topk=params.keep_last_k, + rank=rank, + ) + + if batch_idx % params.log_interval == 0: + cur_lr = scheduler.get_last_lr()[0] + logging.info( + f"Epoch {params.cur_epoch}, " + f"batch {batch_idx}, loss[{loss_info}], " + f"tot_loss[{tot_loss}], batch size: {batch_size}, " + f"lr: {cur_lr:.2e}" + ) + + if tb_writer is not None: + tb_writer.add_scalar( + "train/learning_rate", cur_lr, params.batch_idx_train + ) + + loss_info.write_summary( + tb_writer, "train/current_", params.batch_idx_train + ) + tot_loss.write_summary( + tb_writer, "train/tot_", params.batch_idx_train + ) + + if batch_idx > 0 and batch_idx % params.valid_interval == 0: + logging.info("Computing validation loss") + valid_info = compute_validation_loss( + params=params, + model=model, + sp=sp, + valid_dl=valid_dl, + world_size=world_size, + ) + model.train() + logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") + if tb_writer is not None: + valid_info.write_summary( + tb_writer, "train/valid_", params.batch_idx_train + ) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + params.train_loss = loss_value + if params.train_loss < params.best_train_loss: + params.best_train_epoch = params.cur_epoch + params.best_train_loss = params.train_loss + + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + if params.full_libri is False: + params.valid_interval = 1600 + + fix_random_seed(params.seed) + if world_size > 1: + setup_dist(rank, world_size, params.master_port) + + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info("Training started") + + if args.tensorboard and rank == 0: + tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") + else: + tb_writer = None + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", rank) + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + assert params.save_every_n >= params.average_period + model_avg: Optional[nn.Module] = None + if rank == 0: + # model_avg is only used with rank 0 + model_avg = copy.deepcopy(model) + + assert params.start_epoch > 0, params.start_epoch + checkpoints = load_checkpoint_if_available( + params=params, model=model, model_avg=model_avg + ) + + model.to(device) + if world_size > 1: + logging.info("Using DDP") + model = DDP(model, device_ids=[rank]) + + optimizer = Eve(model.parameters(), lr=params.initial_lr) + + scheduler = Eden(optimizer, params.lr_batches, params.lr_epochs) + + if checkpoints and "optimizer" in checkpoints: + logging.info("Loading optimizer state dict") + optimizer.load_state_dict(checkpoints["optimizer"]) + + if ( + checkpoints + and "scheduler" in checkpoints + and checkpoints["scheduler"] is not None + ): + logging.info("Loading scheduler state dict") + scheduler.load_state_dict(checkpoints["scheduler"]) + + if params.print_diagnostics: + opts = diagnostics.TensorDiagnosticOptions( + 2 ** 22 + ) # allow 4 megabytes per sub-module + diagnostic = diagnostics.attach_diagnostics(model, opts) + + librispeech = LibriSpeechAsrDataModule(args) + + train_cuts = librispeech.train_clean_100_cuts() + if params.full_libri: + train_cuts += librispeech.train_clean_360_cuts() + train_cuts += librispeech.train_other_500_cuts() + + def remove_short_and_long_utt(c: Cut): + # Keep only utterances with duration between 1 second and 20 seconds + # + # Caution: There is a reason to select 20.0 here. Please see + # ../local/display_manifest_statistics.py + # + # You should use ../local/display_manifest_statistics.py to get + # an utterance duration distribution for your dataset to select + # the threshold + return 1.0 <= c.duration <= 20.0 + + train_cuts = train_cuts.filter(remove_short_and_long_utt) + + if params.start_batch > 0 and checkpoints and "sampler" in checkpoints: + # We only load the sampler's state dict when it loads a checkpoint + # saved in the middle of an epoch + sampler_state_dict = checkpoints["sampler"] + else: + sampler_state_dict = None + + train_dl = librispeech.train_dataloaders( + train_cuts, sampler_state_dict=sampler_state_dict + ) + + valid_cuts = librispeech.dev_clean_cuts() + valid_cuts += librispeech.dev_other_cuts() + valid_dl = librispeech.valid_dataloaders(valid_cuts) + + if not params.print_diagnostics: + scan_pessimistic_batches_for_oom( + model=model, + train_dl=train_dl, + optimizer=optimizer, + sp=sp, + params=params, + ) + + scaler = GradScaler(enabled=params.use_fp16) + if checkpoints and "grad_scaler" in checkpoints: + logging.info("Loading grad scaler state dict") + scaler.load_state_dict(checkpoints["grad_scaler"]) + + for epoch in range(params.start_epoch, params.num_epochs + 1): + scheduler.step_epoch(epoch - 1) + fix_random_seed(params.seed + epoch - 1) + train_dl.sampler.set_epoch(epoch - 1) + + if tb_writer is not None: + tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) + + params.cur_epoch = epoch + + train_one_epoch( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sp=sp, + train_dl=train_dl, + valid_dl=valid_dl, + scaler=scaler, + tb_writer=tb_writer, + world_size=world_size, + rank=rank, + ) + + if params.print_diagnostics: + diagnostic.print_diagnostics() + break + + save_checkpoint( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + + logging.info("Done!") + + if world_size > 1: + torch.distributed.barrier() + cleanup_dist() + + +def scan_pessimistic_batches_for_oom( + model: Union[nn.Module, DDP], + train_dl: torch.utils.data.DataLoader, + optimizer: torch.optim.Optimizer, + sp: spm.SentencePieceProcessor, + params: AttributeDict, +): + from lhotse.dataset import find_pessimistic_batches + + logging.info( + "Sanity check -- see if any of the batches in epoch 1 would cause OOM." + ) + batches, crit_values = find_pessimistic_batches(train_dl.sampler) + for criterion, cuts in batches.items(): + batch = train_dl.dataset[cuts] + try: + # warmup = 0.0 is so that the derivs for the pruned loss stay zero + # (i.e. are not remembered by the decaying-average in adam), because + # we want to avoid these params being subject to shrinkage in adam. + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, _ = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=True, + warmup=0.0, + ) + loss.backward() + optimizer.step() + optimizer.zero_grad() + except RuntimeError as e: + if "CUDA out of memory" in str(e): + logging.error( + "Your GPU ran out of memory with the current " + "max_duration setting. We recommend decreasing " + "max_duration and trying again.\n" + f"Failing criterion: {criterion} " + f"(={crit_values[criterion]}) ..." + ) + raise + + +def main(): + parser = get_parser() + LibriSpeechAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + world_size = args.world_size + assert world_size >= 1 + if world_size > 1: + mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) + else: + run(rank=0, world_size=1, args=args) + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + +if __name__ == "__main__": + main() From f8d28f09987d44f017a8741f8dac84357c1da6d6 Mon Sep 17 00:00:00 2001 From: "LIyong.Guo" <839019390@qq.com> Date: Wed, 13 Jul 2022 21:16:45 +0800 Subject: [PATCH 02/38] update multi_quantization installation (#469) * update multi_quantization installation * Update egs/librispeech/ASR/pruned_transducer_stateless6/train.py Co-authored-by: Fangjun Kuang Co-authored-by: Fangjun Kuang --- egs/librispeech/ASR/distillation_with_hubert.sh | 6 +++--- egs/librispeech/ASR/pruned_transducer_stateless6/model.py | 6 ++++-- egs/librispeech/ASR/pruned_transducer_stateless6/train.py | 5 +++++ 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/egs/librispeech/ASR/distillation_with_hubert.sh b/egs/librispeech/ASR/distillation_with_hubert.sh index 3d4c4856a..9c47e8eae 100755 --- a/egs/librispeech/ASR/distillation_with_hubert.sh +++ b/egs/librispeech/ASR/distillation_with_hubert.sh @@ -77,9 +77,9 @@ if [ $stage -le 0 ] && [ $stop_stage -ge 0 ] && [ ! "$use_extracted_codebook" == fi # Install quantization toolkit: - # pip install git+https://github.com/danpovey/quantization.git@master - # when testing this code: - # commit c17ffe67aa2e6ca6b6855c50fde812f2eed7870b is used. + # pip install git+https://github.com/k2-fsa/multi_quantization.git + # or + # pip install multi_quantization has_quantization=$(python3 -c "import importlib; print(importlib.util.find_spec('quantization') is not None)") if [ $has_quantization == 'False' ]; then diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/model.py b/egs/librispeech/ASR/pruned_transducer_stateless6/model.py index 66bb33e8d..1ed5636c8 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless6/model.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless6/model.py @@ -23,7 +23,7 @@ from scaling import ScaledLinear from icefall.utils import add_sos -from quantization.prediction import JointCodebookLoss +from multi_quantization.prediction import JointCodebookLoss class Transducer(nn.Module): @@ -75,7 +75,9 @@ class Transducer(nn.Module): self.simple_lm_proj = ScaledLinear(decoder_dim, vocab_size) if num_codebooks > 0: self.codebook_loss_net = JointCodebookLoss( - predictor_channels=encoder_dim, num_codebooks=num_codebooks + predictor_channels=encoder_dim, + num_codebooks=num_codebooks, + is_joint=False, ) def forward( diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/train.py b/egs/librispeech/ASR/pruned_transducer_stateless6/train.py index b904e1e59..c054527ca 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless6/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless6/train.py @@ -879,6 +879,11 @@ def run(rank, world_size, args): The return value of get_parser().parse_args() """ params = get_params() + + # Note: it's better to set --spec-aug-time-warpi-factor=-1 + # when doing distillation with vq. + assert args.spec_aug_time_warp_factor < 1 + params.update(vars(args)) if params.full_libri is False: params.valid_interval = 1600 From c17233eca7d34c3f113abe6517c7e9ea1d74766a Mon Sep 17 00:00:00 2001 From: Yuekai Zhang Date: Thu, 14 Jul 2022 14:46:56 +0800 Subject: [PATCH 03/38] [Ready] [Recipes] add aishell2 (#465) * add aishell2 * fix aishell2 * add manifest stats * update prepare char dict * fix lint * setting max duration * lint * change context size to 1 * update result * update hf link * fix decoding comment * add more decoding methods * update result * change context-size 2 default --- egs/aishell2/ASR/README.md | 19 + egs/aishell2/ASR/RESULTS.md | 89 ++ egs/aishell2/ASR/local/__init__.py | 0 .../ASR/local/compute_fbank_aishell2.py | 114 ++ egs/aishell2/ASR/local/compute_fbank_musan.py | 1 + .../ASR/local/display_manifest_statistics.py | 96 ++ egs/aishell2/ASR/local/prepare_char.py | 1 + egs/aishell2/ASR/local/prepare_lang.py | 1 + egs/aishell2/ASR/local/prepare_words.py | 1 + egs/aishell2/ASR/local/text2segments.py | 1 + egs/aishell2/ASR/local/text2token.py | 1 + egs/aishell2/ASR/prepare.sh | 181 +++ .../pruned_transducer_stateless5/__init__.py | 0 .../asr_datamodule.py | 418 ++++++ .../beam_search.py | 1 + .../pruned_transducer_stateless5/conformer.py | 1 + .../pruned_transducer_stateless5/decode.py | 791 ++++++++++++ .../pruned_transducer_stateless5/decoder.py | 1 + .../encoder_interface.py | 1 + .../pruned_transducer_stateless5/export.py | 274 ++++ .../pruned_transducer_stateless5/joiner.py | 1 + .../ASR/pruned_transducer_stateless5/model.py | 1 + .../ASR/pruned_transducer_stateless5/optim.py | 1 + .../pretrained.py | 342 +++++ .../pruned_transducer_stateless5/scaling.py | 1 + .../ASR/pruned_transducer_stateless5/train.py | 1131 +++++++++++++++++ egs/aishell2/ASR/shared | 1 + egs/wenetspeech/ASR/local/prepare_words.py | 10 + 28 files changed, 3480 insertions(+) create mode 100644 egs/aishell2/ASR/README.md create mode 100644 egs/aishell2/ASR/RESULTS.md create mode 100755 egs/aishell2/ASR/local/__init__.py create mode 100755 egs/aishell2/ASR/local/compute_fbank_aishell2.py create mode 120000 egs/aishell2/ASR/local/compute_fbank_musan.py create mode 100755 egs/aishell2/ASR/local/display_manifest_statistics.py create mode 120000 egs/aishell2/ASR/local/prepare_char.py create mode 120000 egs/aishell2/ASR/local/prepare_lang.py create mode 120000 egs/aishell2/ASR/local/prepare_words.py create mode 120000 egs/aishell2/ASR/local/text2segments.py create mode 120000 egs/aishell2/ASR/local/text2token.py create mode 100755 egs/aishell2/ASR/prepare.sh create mode 100755 egs/aishell2/ASR/pruned_transducer_stateless5/__init__.py create mode 100755 egs/aishell2/ASR/pruned_transducer_stateless5/asr_datamodule.py create mode 120000 egs/aishell2/ASR/pruned_transducer_stateless5/beam_search.py create mode 120000 egs/aishell2/ASR/pruned_transducer_stateless5/conformer.py create mode 100755 egs/aishell2/ASR/pruned_transducer_stateless5/decode.py create mode 120000 egs/aishell2/ASR/pruned_transducer_stateless5/decoder.py create mode 120000 egs/aishell2/ASR/pruned_transducer_stateless5/encoder_interface.py create mode 100755 egs/aishell2/ASR/pruned_transducer_stateless5/export.py create mode 120000 egs/aishell2/ASR/pruned_transducer_stateless5/joiner.py create mode 120000 egs/aishell2/ASR/pruned_transducer_stateless5/model.py create mode 120000 egs/aishell2/ASR/pruned_transducer_stateless5/optim.py create mode 100755 egs/aishell2/ASR/pruned_transducer_stateless5/pretrained.py create mode 120000 egs/aishell2/ASR/pruned_transducer_stateless5/scaling.py create mode 100755 egs/aishell2/ASR/pruned_transducer_stateless5/train.py create mode 120000 egs/aishell2/ASR/shared diff --git a/egs/aishell2/ASR/README.md b/egs/aishell2/ASR/README.md new file mode 100644 index 000000000..ba38a1ec7 --- /dev/null +++ b/egs/aishell2/ASR/README.md @@ -0,0 +1,19 @@ + +# Introduction + +This recipe includes some different ASR models trained with Aishell2. + +[./RESULTS.md](./RESULTS.md) contains the latest results. + +# Transducers + +There are various folders containing the name `transducer` in this folder. +The following table lists the differences among them. + +| | Encoder | Decoder | Comment | +|---------------------------------------|---------------------|--------------------|-----------------------------| +| `pruned_transducer_stateless5` | Conformer(modified) | Embedding + Conv1d | same as pruned_transducer_stateless5 in librispeech recipe | + +The decoder in `transducer_stateless` is modified from the paper +[Rnn-Transducer with Stateless Prediction Network](https://ieeexplore.ieee.org/document/9054419/). +We place an additional Conv1d layer right after the input embedding layer. diff --git a/egs/aishell2/ASR/RESULTS.md b/egs/aishell2/ASR/RESULTS.md new file mode 100644 index 000000000..7114bd5f5 --- /dev/null +++ b/egs/aishell2/ASR/RESULTS.md @@ -0,0 +1,89 @@ +## Results + +### Aishell2 char-based training results (Pruned Transducer 5) + +#### 2022-07-11 + +Using the codes from this commit https://github.com/k2-fsa/icefall/pull/465. + +When training with context size equals to 1, the WERs are + +| | dev-ios | test-ios | comment | +|------------------------------------|-------|----------|----------------------------------| +| greedy search | 5.57 | 5.89 | --epoch 25, --avg 5, --max-duration 600 | +| modified beam search (beam size 4) | 5.32 | 5.56 | --epoch 25, --avg 5, --max-duration 600 | +| fast beam search (set as default) | 5.5 | 5.78 | --epoch 25, --avg 5, --max-duration 600 | +| fast beam search nbest | 5.46 | 5.74 | --epoch 25, --avg 5, --max-duration 600 | +| fast beam search oracle | 1.92 | 2.2 | --epoch 25, --avg 5, --max-duration 600 | +| fast beam search nbest LG | 5.59 | 5.93 | --epoch 25, --avg 5, --max-duration 600 | + +The training command for reproducing is given below: + +```bash +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +./pruned_transducer_stateless5/train.py \ + --world-size 4 \ + --lang-dir data/lang_char \ + --num-epochs 40 \ + --start-epoch 1 \ + --exp-dir /result \ + --max-duration 300 \ + --use-fp16 0 \ + --num-encoder-layers 24 \ + --dim-feedforward 1536 \ + --nhead 8 \ + --encoder-dim 384 \ + --decoder-dim 512 \ + --joiner-dim 512 \ + --context-size 1 +``` + +The decoding command is: +```bash +for method in greedy_search modified_beam_search \ + fast_beam_search fast_beam_search_nbest \ + fast_beam_search_nbest_oracle fast_beam_search_nbest_LG; do + ./pruned_transducer_stateless5/decode.py \ + --epoch 25 \ + --avg 5 \ + --exp-dir ./pruned_transducer_stateless5/exp \ + --max-duration 600 \ + --decoding-method $method \ + --max-sym-per-frame 1 \ + --num-encoder-layers 24 \ + --dim-feedforward 1536 \ + --nhead 8 \ + --encoder-dim 384 \ + --decoder-dim 512 \ + --joiner-dim 512 \ + --context-size 1 \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 \ + --num-paths 200 \ + --nbest-scale 0.5 \ + --context-size 1 \ + --use-averaged-model True +done +``` +The tensorboard training log can be found at +https://tensorboard.dev/experiment/RXyX4QjQQVKjBS2eQ2Qajg/#scalars + +A pre-trained model and decoding logs can be found at + +When training with context size equals to 2, the WERs are + +| | dev-ios | test-ios | comment | +|------------------------------------|-------|----------|----------------------------------| +| greedy search | 5.47 | 5.81 | --epoch 25, --avg 5, --max-duration 600 | +| modified beam search (beam size 4) | 5.38 | 5.61 | --epoch 25, --avg 5, --max-duration 600 | +| fast beam search (set as default) | 5.36 | 5.61 | --epoch 25, --avg 5, --max-duration 600 | +| fast beam search nbest | 5.37 | 5.6 | --epoch 25, --avg 5, --max-duration 600 | +| fast beam search oracle | 2.04 | 2.2 | --epoch 25, --avg 5, --max-duration 600 | +| fast beam search nbest LG | 5.59 | 5.82 | --epoch 25, --avg 5, --max-duration 600 | + +The tensorboard training log can be found at +https://tensorboard.dev/experiment/5AxJ8LHoSre8kDAuLp4L7Q/#scalars + +A pre-trained model and decoding logs can be found at diff --git a/egs/aishell2/ASR/local/__init__.py b/egs/aishell2/ASR/local/__init__.py new file mode 100755 index 000000000..e69de29bb diff --git a/egs/aishell2/ASR/local/compute_fbank_aishell2.py b/egs/aishell2/ASR/local/compute_fbank_aishell2.py new file mode 100755 index 000000000..7bc969a1a --- /dev/null +++ b/egs/aishell2/ASR/local/compute_fbank_aishell2.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +This file computes fbank features of the aishell2 dataset. +It looks for manifests in the directory data/manifests. + +The generated fbank features are saved in data/fbank. +""" + +import argparse +import logging +import os +from pathlib import Path + +import torch +from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter +from lhotse.recipes.utils import read_manifests_if_cached + +from icefall.utils import get_executor + +# Torch's multithreaded behavior needs to be disabled or +# it wastes a lot of CPU and slow things down. +# Do this outside of main() in case it needs to take effect +# even when we are not invoking the main (e.g. when spawning subprocesses). +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + + +def compute_fbank_aishell2(num_mel_bins: int = 80): + src_dir = Path("data/manifests") + output_dir = Path("data/fbank") + num_jobs = min(15, os.cpu_count()) + + dataset_parts = ( + "train", + "dev", + "test", + ) + prefix = "aishell2" + suffix = "jsonl.gz" + manifests = read_manifests_if_cached( + dataset_parts=dataset_parts, + output_dir=src_dir, + prefix=prefix, + suffix=suffix, + ) + assert manifests is not None + + extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins)) + + with get_executor() as ex: # Initialize the executor only once. + for partition, m in manifests.items(): + if (output_dir / f"{prefix}_cuts_{partition}.{suffix}").is_file(): + logging.info(f"{partition} already exists - skipping.") + continue + logging.info(f"Processing {partition}") + cut_set = CutSet.from_manifests( + recordings=m["recordings"], + supervisions=m["supervisions"], + ) + if "train" in partition: + cut_set = ( + cut_set + + cut_set.perturb_speed(0.9) + + cut_set.perturb_speed(1.1) + ) + cut_set = cut_set.compute_and_store_features( + extractor=extractor, + storage_path=f"{output_dir}/{prefix}_feats_{partition}", + # when an executor is specified, make more partitions + num_jobs=num_jobs if ex is None else 80, + executor=ex, + storage_type=LilcomChunkyWriter, + ) + cut_set.to_file(output_dir / f"{prefix}_cuts_{partition}.{suffix}") + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--num-mel-bins", + type=int, + default=80, + help="""The number of mel bins for Fbank""", + ) + + return parser.parse_args() + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + + args = get_args() + compute_fbank_aishell2(num_mel_bins=args.num_mel_bins) diff --git a/egs/aishell2/ASR/local/compute_fbank_musan.py b/egs/aishell2/ASR/local/compute_fbank_musan.py new file mode 120000 index 000000000..5833f2484 --- /dev/null +++ b/egs/aishell2/ASR/local/compute_fbank_musan.py @@ -0,0 +1 @@ +../../../librispeech/ASR/local/compute_fbank_musan.py \ No newline at end of file diff --git a/egs/aishell2/ASR/local/display_manifest_statistics.py b/egs/aishell2/ASR/local/display_manifest_statistics.py new file mode 100755 index 000000000..14844cbf3 --- /dev/null +++ b/egs/aishell2/ASR/local/display_manifest_statistics.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This file displays duration statistics of utterances in a manifest. +You can use the displayed value to choose minimum/maximum duration +to remove short and long utterances during the training. + +See the function `remove_short_and_long_utt()` in transducer_stateless/train.py +for usage. +""" + + +from lhotse import load_manifest_lazy + + +def main(): + paths = [ + "./data/fbank/aishell2_cuts_train.jsonl.gz", + "./data/fbank/aishell2_cuts_dev.jsonl.gz", + "./data/fbank/aishell2_cuts_test.jsonl.gz", + ] + + for path in paths: + print(f"Starting display the statistics for {path}") + cuts = load_manifest_lazy(path) + cuts.describe() + + +if __name__ == "__main__": + main() + +""" +Starting display the statistics for ./data/fbank/aishell2_cuts_train.jsonl.gz +Cuts count: 3026106 +Total duration (hours): 3021.2 +Speech duration (hours): 3021.2 (100.0%) +*** +Duration statistics (seconds): +mean 3.6 +std 1.5 +min 0.3 +25% 2.4 +50% 3.3 +75% 4.4 +99% 8.2 +99.5% 8.9 +99.9% 10.6 +max 21.5 +Starting display the statistics for ./data/fbank/aishell2_cuts_dev.jsonl.gz +Cuts count: 2500 +Total duration (hours): 2.0 +Speech duration (hours): 2.0 (100.0%) +*** +Duration statistics (seconds): +mean 2.9 +std 1.0 +min 1.1 +25% 2.2 +50% 2.7 +75% 3.4 +99% 6.3 +99.5% 6.7 +99.9% 7.8 +max 9.4 +Starting display the statistics for ./data/fbank/aishell2_cuts_test.jsonl.gz +Cuts count: 5000 +Total duration (hours): 4.0 +Speech duration (hours): 4.0 (100.0%) +*** +Duration statistics (seconds): +mean 2.9 +std 1.0 +min 1.1 +25% 2.2 +50% 2.7 +75% 3.3 +99% 6.2 +99.5% 6.6 +99.9% 7.7 +max 8.5 +""" diff --git a/egs/aishell2/ASR/local/prepare_char.py b/egs/aishell2/ASR/local/prepare_char.py new file mode 120000 index 000000000..8779181e5 --- /dev/null +++ b/egs/aishell2/ASR/local/prepare_char.py @@ -0,0 +1 @@ +../../../aidatatang_200zh/ASR/local/prepare_char.py \ No newline at end of file diff --git a/egs/aishell2/ASR/local/prepare_lang.py b/egs/aishell2/ASR/local/prepare_lang.py new file mode 120000 index 000000000..5d88dc1c8 --- /dev/null +++ b/egs/aishell2/ASR/local/prepare_lang.py @@ -0,0 +1 @@ +../../../wenetspeech/ASR/local/prepare_lang.py \ No newline at end of file diff --git a/egs/aishell2/ASR/local/prepare_words.py b/egs/aishell2/ASR/local/prepare_words.py new file mode 120000 index 000000000..e58fabb8f --- /dev/null +++ b/egs/aishell2/ASR/local/prepare_words.py @@ -0,0 +1 @@ +../../../wenetspeech/ASR/local/prepare_words.py \ No newline at end of file diff --git a/egs/aishell2/ASR/local/text2segments.py b/egs/aishell2/ASR/local/text2segments.py new file mode 120000 index 000000000..7d68a39c3 --- /dev/null +++ b/egs/aishell2/ASR/local/text2segments.py @@ -0,0 +1 @@ +../../../wenetspeech/ASR/local/text2segments.py \ No newline at end of file diff --git a/egs/aishell2/ASR/local/text2token.py b/egs/aishell2/ASR/local/text2token.py new file mode 120000 index 000000000..81e459d69 --- /dev/null +++ b/egs/aishell2/ASR/local/text2token.py @@ -0,0 +1 @@ +../../../aidatatang_200zh/ASR/local/text2token.py \ No newline at end of file diff --git a/egs/aishell2/ASR/prepare.sh b/egs/aishell2/ASR/prepare.sh new file mode 100755 index 000000000..06810bfdd --- /dev/null +++ b/egs/aishell2/ASR/prepare.sh @@ -0,0 +1,181 @@ +#!/usr/bin/env bash + +set -eou pipefail + +nj=30 +stage=0 +stop_stage=5 + +# We assume dl_dir (download dir) contains the following +# directories and files. If not, you need to apply aishell2 through +# their official website. +# https://www.aishelltech.com/aishell_2 +# +# - $dl_dir/aishell2 +# +# +# - $dl_dir/musan +# This directory contains the following directories downloaded from +# http://www.openslr.org/17/ +# +# - music +# - noise +# - speech + +dl_dir=$PWD/download + +. shared/parse_options.sh || exit 1 + +# All files generated by this script are saved in "data". +# You can safely remove "data" and rerun this script to regenerate it. +mkdir -p data + +log() { + # This function is from espnet + local fname=${BASH_SOURCE[1]##*/} + echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*" +} + +log "dl_dir: $dl_dir" + +if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then + log "stage 0: Download data" + + # If you have pre-downloaded it to /path/to/aishell2, + # you can create a symlink + # + # ln -sfv /path/to/aishell2 $dl_dir/aishell2 + # + # The directory structure is + # aishell2/ + # |-- AISHELL-2 + # | |-- iOS + # |-- data + # |-- wav + # |-- trans.txt + # |-- dev + # |-- wav + # |-- trans.txt + # |-- test + # |-- wav + # |-- trans.txt + + + # If you have pre-downloaded it to /path/to/musan, + # you can create a symlink + # + # ln -sfv /path/to/musan $dl_dir/musan + # + if [ ! -d $dl_dir/musan ]; then + lhotse download musan $dl_dir + fi +fi + +if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then + log "Stage 1: Prepare aishell2 manifest" + # We assume that you have downloaded and unzip the aishell2 corpus + # to $dl_dir/aishell2 + if [ ! -f data/manifests/.aishell2_manifests.done ]; then + mkdir -p data/manifests + lhotse prepare aishell2 $dl_dir/aishell2 data/manifests -j $nj + touch data/manifests/.aishell2_manifests.done + fi +fi + +if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then + log "Stage 2: Prepare musan manifest" + # We assume that you have downloaded the musan corpus + # to data/musan + if [ ! -f data/manifests/.musan_manifests.done ]; then + log "It may take 6 minutes" + mkdir -p data/manifests + lhotse prepare musan $dl_dir/musan data/manifests + touch data/manifests/.musan_manifests.done + fi +fi + +if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then + log "Stage 3: Compute fbank for aishell2" + if [ ! -f data/fbank/.aishell2.done ]; then + mkdir -p data/fbank + ./local/compute_fbank_aishell2.py + touch data/fbank/.aishell2.done + fi +fi + +if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then + log "Stage 4: Compute fbank for musan" + if [ ! -f data/fbank/.msuan.done ]; then + mkdir -p data/fbank + ./local/compute_fbank_musan.py + touch data/fbank/.msuan.done + fi +fi + +lang_char_dir=data/lang_char +if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then + log "Stage 5: Prepare char based lang" + mkdir -p $lang_char_dir + + # Prepare text. + # Note: in Linux, you can install jq with the following command: + # 1. wget -O jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 + # 2. chmod +x ./jq + # 3. cp jq /usr/bin + if [ ! -f $lang_char_dir/text ]; then + gunzip -c data/manifests/aishell2_supervisions_train.jsonl.gz \ + | jq '.text' | sed 's/"//g' \ + | ./local/text2token.py -t "char" > $lang_char_dir/text + fi + + # The implementation of chinese word segmentation for text, + # and it will take about 15 minutes. + # If you can't install paddle-tiny with python 3.8, please refer to + # https://github.com/fxsjy/jieba/issues/920 + if [ ! -f $lang_char_dir/text_words_segmentation ]; then + python3 ./local/text2segments.py \ + --input-file $lang_char_dir/text \ + --output-file $lang_char_dir/text_words_segmentation + fi + + cat $lang_char_dir/text_words_segmentation | sed 's/ /\n/g' \ + | sort -u | sed '/^$/d' | uniq > $lang_char_dir/words_no_ids.txt + + if [ ! -f $lang_char_dir/words.txt ]; then + python3 ./local/prepare_words.py \ + --input-file $lang_char_dir/words_no_ids.txt \ + --output-file $lang_char_dir/words.txt + fi + + if [ ! -f $lang_char_dir/L_disambig.pt ]; then + python3 ./local/prepare_char.py + fi +fi + +if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then + log "Stage 6: Prepare G" + # We assume you have install kaldilm, if not, please install + # it using: pip install kaldilm + + if [ ! -f ${lang_char_dir}/3-gram.unpruned.arpa ]; then + ./shared/make_kn_lm.py \ + -ngram-order 3 \ + -text $lang_char_dir/text_words_segmentation \ + -lm $lang_char_dir/3-gram.unpruned.arpa + fi + + mkdir -p data/lm + if [ ! -f data/lm/G_3_gram.fst.txt ]; then + # It is used in building LG + python3 -m kaldilm \ + --read-symbol-table="$lang_char_dir/words.txt" \ + --disambig-symbol='#0' \ + --max-order=3 \ + $lang_char_dir/3-gram.unpruned.arpa > data/lm/G_3_gram.fst.txt + fi +fi + +if [ $stage -le 7 ] && [ $stop_stage -ge 7 ]; then + log "Stage 7: Compile LG" + ./local/compile_lg.py --lang-dir $lang_char_dir +fi diff --git a/egs/aishell2/ASR/pruned_transducer_stateless5/__init__.py b/egs/aishell2/ASR/pruned_transducer_stateless5/__init__.py new file mode 100755 index 000000000..e69de29bb diff --git a/egs/aishell2/ASR/pruned_transducer_stateless5/asr_datamodule.py b/egs/aishell2/ASR/pruned_transducer_stateless5/asr_datamodule.py new file mode 100755 index 000000000..b7a21f579 --- /dev/null +++ b/egs/aishell2/ASR/pruned_transducer_stateless5/asr_datamodule.py @@ -0,0 +1,418 @@ +# Copyright 2021 Piotr Żelasko +# Copyright 2022 Xiaomi Corporation (Author: Mingshuang Luo) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import inspect +import logging +from functools import lru_cache +from pathlib import Path +from typing import Any, Dict, Optional + +import torch +from lhotse import CutSet, Fbank, FbankConfig, load_manifest, load_manifest_lazy +from lhotse.dataset import ( # noqa F401 for PrecomputedFeatures + CutConcatenate, + CutMix, + DynamicBucketingSampler, + K2SpeechRecognitionDataset, + PrecomputedFeatures, + SingleCutSampler, + SpecAugment, +) +from lhotse.dataset.input_strategies import ( # noqa F401 For AudioSamples + AudioSamples, + OnTheFlyFeatures, +) +from lhotse.utils import fix_random_seed +from torch.utils.data import DataLoader + +from icefall.utils import str2bool + + +class _SeedWorkers: + def __init__(self, seed: int): + self.seed = seed + + def __call__(self, worker_id: int): + fix_random_seed(self.seed + worker_id) + + +class AiShell2AsrDataModule: + """ + DataModule for k2 ASR experiments. + It assumes there is always one train and valid dataloader, + but there can be multiple test dataloaders (e.g. ios, android, mic). + + It contains all the common data pipeline modules used in ASR + experiments, e.g.: + - dynamic batch size, + - bucketing samplers, + - cut concatenation, + - augmentation, + - on-the-fly feature extraction + + This class should be derived for specific corpora used in ASR tasks. + """ + + def __init__(self, args: argparse.Namespace): + self.args = args + + @classmethod + def add_arguments(cls, parser: argparse.ArgumentParser): + group = parser.add_argument_group( + title="ASR data related options", + description="These options are used for the preparation of " + "PyTorch DataLoaders from Lhotse CutSet's -- they control the " + "effective batch sizes, sampling strategies, applied data " + "augmentations, etc.", + ) + group.add_argument( + "--manifest-dir", + type=Path, + default=Path("data/fbank"), + help="Path to directory with train/valid/test cuts.", + ) + group.add_argument( + "--max-duration", + type=int, + default=200.0, + help="Maximum pooled recordings duration (seconds) in a " + "single batch. You can reduce it if it causes CUDA OOM.", + ) + group.add_argument( + "--bucketing-sampler", + type=str2bool, + default=True, + help="When enabled, the batches will come from buckets of " + "similar duration (saves padding frames).", + ) + group.add_argument( + "--num-buckets", + type=int, + default=30, + help="The number of buckets for the DynamicBucketingSampler" + "(you might want to increase it for larger datasets).", + ) + group.add_argument( + "--concatenate-cuts", + type=str2bool, + default=False, + help="When enabled, utterances (cuts) will be concatenated " + "to minimize the amount of padding.", + ) + group.add_argument( + "--duration-factor", + type=float, + default=1.0, + help="Determines the maximum duration of a concatenated cut " + "relative to the duration of the longest cut in a batch.", + ) + group.add_argument( + "--gap", + type=float, + default=1.0, + help="The amount of padding (in seconds) inserted between " + "concatenated cuts. This padding is filled with noise when " + "noise augmentation is used.", + ) + group.add_argument( + "--on-the-fly-feats", + type=str2bool, + default=False, + help="When enabled, use on-the-fly cut mixing and feature " + "extraction. Will drop existing precomputed feature manifests " + "if available.", + ) + group.add_argument( + "--shuffle", + type=str2bool, + default=True, + help="When enabled (=default), the examples will be " + "shuffled for each epoch.", + ) + group.add_argument( + "--drop-last", + type=str2bool, + default=True, + help="Whether to drop last batch. Used by sampler.", + ) + group.add_argument( + "--return-cuts", + type=str2bool, + default=True, + help="When enabled, each batch will have the " + "field: batch['supervisions']['cut'] with the cuts that " + "were used to construct it.", + ) + + group.add_argument( + "--num-workers", + type=int, + default=2, + help="The number of training dataloader workers that " + "collect the batches.", + ) + + group.add_argument( + "--enable-spec-aug", + type=str2bool, + default=True, + help="When enabled, use SpecAugment for training dataset.", + ) + + group.add_argument( + "--spec-aug-time-warp-factor", + type=int, + default=80, + help="Used only when --enable-spec-aug is True. " + "It specifies the factor for time warping in SpecAugment. " + "Larger values mean more warping. " + "A value less than 1 means to disable time warp.", + ) + + group.add_argument( + "--enable-musan", + type=str2bool, + default=True, + help="When enabled, select noise from MUSAN and mix it" + "with training dataset. ", + ) + + group.add_argument( + "--input-strategy", + type=str, + default="PrecomputedFeatures", + help="AudioSamples or PrecomputedFeatures", + ) + + def train_dataloaders( + self, + cuts_train: CutSet, + sampler_state_dict: Optional[Dict[str, Any]] = None, + ) -> DataLoader: + """ + Args: + cuts_train: + CutSet for training. + sampler_state_dict: + The state dict for the training sampler. + """ + transforms = [] + if self.args.enable_musan: + logging.info("Enable MUSAN") + logging.info("About to get Musan cuts") + cuts_musan = load_manifest( + self.args.manifest_dir / "musan_cuts.jsonl.gz" + ) + transforms.append( + CutMix( + cuts=cuts_musan, prob=0.5, snr=(10, 20), preserve_id=True + ) + ) + else: + logging.info("Disable MUSAN") + + if self.args.concatenate_cuts: + logging.info( + f"Using cut concatenation with duration factor " + f"{self.args.duration_factor} and gap {self.args.gap}." + ) + # Cut concatenation should be the first transform in the list, + # so that if we e.g. mix noise in, it will fill the gaps between + # different utterances. + transforms = [ + CutConcatenate( + duration_factor=self.args.duration_factor, gap=self.args.gap + ) + ] + transforms + + input_transforms = [] + if self.args.enable_spec_aug: + logging.info("Enable SpecAugment") + logging.info( + f"Time warp factor: {self.args.spec_aug_time_warp_factor}" + ) + # Set the value of num_frame_masks according to Lhotse's version. + # In different Lhotse's versions, the default of num_frame_masks is + # different. + num_frame_masks = 10 + num_frame_masks_parameter = inspect.signature( + SpecAugment.__init__ + ).parameters["num_frame_masks"] + if num_frame_masks_parameter.default == 1: + num_frame_masks = 2 + logging.info(f"Num frame mask: {num_frame_masks}") + input_transforms.append( + SpecAugment( + time_warp_factor=self.args.spec_aug_time_warp_factor, + num_frame_masks=num_frame_masks, + features_mask_size=27, + num_feature_masks=2, + frames_mask_size=100, + ) + ) + else: + logging.info("Disable SpecAugment") + + logging.info("About to create train dataset") + train = K2SpeechRecognitionDataset( + input_strategy=eval(self.args.input_strategy)(), + cut_transforms=transforms, + input_transforms=input_transforms, + return_cuts=self.args.return_cuts, + ) + + if self.args.on_the_fly_feats: + # NOTE: the PerturbSpeed transform should be added only if we + # remove it from data prep stage. + # Add on-the-fly speed perturbation; since originally it would + # have increased epoch size by 3, we will apply prob 2/3 and use + # 3x more epochs. + # Speed perturbation probably should come first before + # concatenation, but in principle the transforms order doesn't have + # to be strict (e.g. could be randomized) + # transforms = [PerturbSpeed(factors=[0.9, 1.1], p=2/3)] + transforms # noqa + # Drop feats to be on the safe side. + train = K2SpeechRecognitionDataset( + cut_transforms=transforms, + input_strategy=OnTheFlyFeatures( + Fbank(FbankConfig(num_mel_bins=80)) + ), + input_transforms=input_transforms, + return_cuts=self.args.return_cuts, + ) + + if self.args.bucketing_sampler: + logging.info("Using DynamicBucketingSampler.") + train_sampler = DynamicBucketingSampler( + cuts_train, + max_duration=self.args.max_duration, + shuffle=self.args.shuffle, + num_buckets=self.args.num_buckets, + drop_last=self.args.drop_last, + ) + else: + logging.info("Using SingleCutSampler.") + train_sampler = SingleCutSampler( + cuts_train, + max_duration=self.args.max_duration, + shuffle=self.args.shuffle, + ) + logging.info("About to create train dataloader") + + if sampler_state_dict is not None: + logging.info("Loading sampler state dict") + train_sampler.load_state_dict(sampler_state_dict) + + # 'seed' is derived from the current random state, which will have + # previously been set in the main process. + seed = torch.randint(0, 100000, ()).item() + worker_init_fn = _SeedWorkers(seed) + + train_dl = DataLoader( + train, + sampler=train_sampler, + batch_size=None, + num_workers=self.args.num_workers, + persistent_workers=False, + worker_init_fn=worker_init_fn, + ) + + return train_dl + + def valid_dataloaders(self, cuts_valid: CutSet) -> DataLoader: + transforms = [] + if self.args.concatenate_cuts: + transforms = [ + CutConcatenate( + duration_factor=self.args.duration_factor, gap=self.args.gap + ) + ] + transforms + + logging.info("About to create dev dataset") + if self.args.on_the_fly_feats: + validate = K2SpeechRecognitionDataset( + cut_transforms=transforms, + input_strategy=OnTheFlyFeatures( + Fbank(FbankConfig(num_mel_bins=80)) + ), + return_cuts=self.args.return_cuts, + ) + else: + validate = K2SpeechRecognitionDataset( + cut_transforms=transforms, + return_cuts=self.args.return_cuts, + ) + valid_sampler = DynamicBucketingSampler( + cuts_valid, + max_duration=self.args.max_duration, + shuffle=False, + ) + logging.info("About to create dev dataloader") + valid_dl = DataLoader( + validate, + sampler=valid_sampler, + batch_size=None, + num_workers=2, + persistent_workers=False, + ) + + return valid_dl + + def test_dataloaders(self, cuts: CutSet) -> DataLoader: + logging.debug("About to create test dataset") + test = K2SpeechRecognitionDataset( + input_strategy=OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80))) + if self.args.on_the_fly_feats + else eval(self.args.input_strategy)(), + return_cuts=self.args.return_cuts, + ) + sampler = DynamicBucketingSampler( + cuts, + max_duration=self.args.max_duration, + shuffle=False, + ) + logging.debug("About to create test dataloader") + test_dl = DataLoader( + test, + batch_size=None, + sampler=sampler, + num_workers=self.args.num_workers, + ) + return test_dl + + @lru_cache() + def train_cuts(self) -> CutSet: + logging.info("About to gen cuts from aishell2_cuts_train.jsonl.gz") + return load_manifest_lazy( + self.args.manifest_dir / "aishell2_cuts_train.jsonl.gz" + ) + + @lru_cache() + def valid_cuts(self) -> CutSet: + logging.info("About to gen cuts from aishell2_cuts_dev.jsonl.gz") + return load_manifest_lazy( + self.args.manifest_dir / "aishell2_cuts_dev.jsonl.gz" + ) + + @lru_cache() + def test_cuts(self) -> CutSet: + logging.info("About to gen cuts from aishell2_cuts_test.jsonl.gz") + return load_manifest_lazy( + self.args.manifest_dir / "aishell2_cuts_test.jsonl.gz" + ) diff --git a/egs/aishell2/ASR/pruned_transducer_stateless5/beam_search.py b/egs/aishell2/ASR/pruned_transducer_stateless5/beam_search.py new file mode 120000 index 000000000..e24eca39f --- /dev/null +++ b/egs/aishell2/ASR/pruned_transducer_stateless5/beam_search.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless2/beam_search.py \ No newline at end of file diff --git a/egs/aishell2/ASR/pruned_transducer_stateless5/conformer.py b/egs/aishell2/ASR/pruned_transducer_stateless5/conformer.py new file mode 120000 index 000000000..c7c1a4b6e --- /dev/null +++ b/egs/aishell2/ASR/pruned_transducer_stateless5/conformer.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless5/conformer.py \ No newline at end of file diff --git a/egs/aishell2/ASR/pruned_transducer_stateless5/decode.py b/egs/aishell2/ASR/pruned_transducer_stateless5/decode.py new file mode 100755 index 000000000..f03bd34d3 --- /dev/null +++ b/egs/aishell2/ASR/pruned_transducer_stateless5/decode.py @@ -0,0 +1,791 @@ +#!/usr/bin/env python3 +# +# Copyright 2021-2022 Xiaomi Corporation (Author: Fangjun Kuang, +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +(1) greedy search +./pruned_transducer_stateless5/decode.py \ + --epoch 25 \ + --avg 5 \ + --exp-dir ./pruned_transducer_stateless5/exp \ + --lang-dir data/lang_char \ + --max-duration 600 \ + --decoding-method greedy_search + +(2) beam search (not recommended) +./pruned_transducer_stateless5/decode.py \ + --epoch 25 \ + --avg 5 \ + --exp-dir ./pruned_transducer_stateless5/exp \ + --lang-dir data/lang_char \ + --max-duration 600 \ + --decoding-method beam_search \ + --beam-size 4 + +(3) modified beam search +./pruned_transducer_stateless5/decode.py \ + --epoch 25 \ + --avg 5 \ + --exp-dir ./pruned_transducer_stateless5/exp \ + --lang-dir data/lang_char \ + --max-duration 600 \ + --decoding-method modified_beam_search \ + --beam-size 4 + +(4) fast beam search (one best) +./pruned_transducer_stateless5/decode.py \ + --epoch 25 \ + --avg 5 \ + --exp-dir ./pruned_transducer_stateless5/exp \ + --lang-dir data/lang_char \ + --max-duration 600 \ + --decoding-method fast_beam_search \ + --beam 4 \ + --max-contexts 4 \ + --max-states 8 + +(5) fast beam search (nbest) +./pruned_transducer_stateless5/decode.py \ + --epoch 25 \ + --avg 5 \ + --exp-dir ./pruned_transducer_stateless5/exp \ + --lang-dir data/lang_char \ + --max-duration 600 \ + --decoding-method fast_beam_search_nbest \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 \ + --num-paths 200 \ + --nbest-scale 0.5 + +(6) fast beam search (nbest oracle WER) +./pruned_transducer_stateless5/decode.py \ + --epoch 25 \ + --avg 5 \ + --exp-dir ./pruned_transducer_stateless5/exp \ + --lang-dir data/lang_char \ + --max-duration 600 \ + --decoding-method fast_beam_search_nbest_oracle \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 \ + --num-paths 200 \ + --nbest-scale 0.5 + +(7) fast beam search (with LG) +./pruned_transducer_stateless5/decode.py \ + --epoch 25 \ + --avg 5 \ + --exp-dir ./pruned_transducer_stateless5/exp \ + --lang-dir data/lang_char \ + --max-duration 600 \ + --decoding-method fast_beam_search_nbest_LG \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 +""" + + +import argparse +import logging +from collections import defaultdict +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import k2 +import torch +import torch.nn as nn +from asr_datamodule import AiShell2AsrDataModule +from beam_search import ( + beam_search, + fast_beam_search_nbest, + fast_beam_search_nbest_LG, + fast_beam_search_nbest_oracle, + fast_beam_search_one_best, + greedy_search, + greedy_search_batch, + modified_beam_search, +) +from train import add_model_arguments, get_params, get_transducer_model + +from icefall.char_graph_compiler import CharCtcTrainingGraphCompiler +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from icefall.lexicon import Lexicon +from icefall.utils import ( + AttributeDict, + setup_logger, + store_transcripts, + str2bool, + write_error_stats, +) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=30, + help="""It specifies the checkpoint to use for decoding. + Note: Epoch counts from 1. + You can specify --avg to use more checkpoints for model averaging.""", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=15, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch' and '--iter'", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=True, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="pruned_transducer_stateless5/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--lang-dir", + type=Path, + default="data/lang_char", + help="The lang dir containing word table and LG graph", + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + - modified_beam_search + - fast_beam_search + - fast_beam_search_nbest + - fast_beam_search_nbest_oracle + - fast_beam_search_nbest_LG + If you use fast_beam_search_nbest_LG, you have to specify + `--lang-dir`, which should contain `LG.pt`. + """, + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="""An integer indicating how many candidates we will keep for each + frame. Used only when --decoding-method is beam_search or + modified_beam_search.""", + ) + + parser.add_argument( + "--beam", + type=float, + default=20.0, + help="""A floating point value to calculate the cutoff score during beam + search (i.e., `cutoff = max-score - beam`), which is the same as the + `beam` in Kaldi. + Used only when --decoding-method is fast_beam_search, + fast_beam_search_nbest, fast_beam_search_nbest_LG, + and fast_beam_search_nbest_oracle + """, + ) + + parser.add_argument( + "--ngram-lm-scale", + type=float, + default=0.01, + help=""" + Used only when --decoding_method is fast_beam_search_nbest_LG. + It specifies the scale for n-gram LM scores. + """, + ) + + parser.add_argument( + "--max-contexts", + type=int, + default=8, + help="""Used only when --decoding-method is + fast_beam_search, fast_beam_search_nbest, fast_beam_search_nbest_LG, + and fast_beam_search_nbest_oracle""", + ) + + parser.add_argument( + "--max-states", + type=int, + default=64, + help="""Used only when --decoding-method is + fast_beam_search, fast_beam_search_nbest, fast_beam_search_nbest_LG, + and fast_beam_search_nbest_oracle""", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=1, + help="""Maximum number of symbols per frame. + Used only when --decoding_method is greedy_search""", + ) + + parser.add_argument( + "--num-paths", + type=int, + default=200, + help="""Number of paths for nbest decoding. + Used only when the decoding method is fast_beam_search_nbest, + fast_beam_search_nbest_LG, and fast_beam_search_nbest_oracle""", + ) + + parser.add_argument( + "--nbest-scale", + type=float, + default=0.5, + help="""Scale applied to lattice scores when computing nbest paths. + Used only when the decoding method is fast_beam_search_nbest, + fast_beam_search_nbest_LG, and fast_beam_search_nbest_oracle""", + ) + + add_model_arguments(parser) + + return parser + + +def decode_one_batch( + params: AttributeDict, + model: nn.Module, + lexicon: Lexicon, + graph_compiler: CharCtcTrainingGraphCompiler, + batch: dict, + decoding_graph: Optional[k2.Fsa] = None, +) -> Dict[str, List[List[str]]]: + """Decode one batch and return the result in a dict. The dict has the + following format: + + - key: It indicates the setting used for decoding. For example, + if greedy_search is used, it would be "greedy_search" + If beam search with a beam size of 7 is used, it would be + "beam_7" + - value: It contains the decoding result. `len(value)` equals to + batch size. `value[i]` is the decoding result for the i-th + utterance in the given batch. + Args: + params: + It's the return value of :func:`get_params`. + model: + The neural model. + batch: + It is the return value from iterating + `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation + for the format of the `batch`. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used + only when --decoding_method is fast_beam_search, fast_beam_search_nbest, + fast_beam_search_nbest_oracle, and fast_beam_search_nbest_LG. + Returns: + Return the decoding result. See above description for the format of + the returned dict. + """ + device = next(model.parameters()).device + feature = batch["inputs"] + assert feature.ndim == 3 + + feature = feature.to(device) + # at entry, feature is (N, T, C) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + encoder_out, encoder_out_lens = model.encoder( + x=feature, x_lens=feature_lens + ) + hyps = [] + + if params.decoding_method == "fast_beam_search": + hyp_tokens = fast_beam_search_one_best( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + ) + for i in range(encoder_out.size(0)): + hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]]) + elif params.decoding_method == "fast_beam_search_nbest_LG": + hyp_tokens = fast_beam_search_nbest_LG( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + num_paths=params.num_paths, + nbest_scale=params.nbest_scale, + ) + for hyp in hyp_tokens: + sentence = "".join([lexicon.word_table[i] for i in hyp]) + hyps.append(list(sentence)) + elif params.decoding_method == "fast_beam_search_nbest": + hyp_tokens = fast_beam_search_nbest( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + num_paths=params.num_paths, + nbest_scale=params.nbest_scale, + ) + for i in range(encoder_out.size(0)): + hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]]) + elif params.decoding_method == "fast_beam_search_nbest_oracle": + hyp_tokens = fast_beam_search_nbest_oracle( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + num_paths=params.num_paths, + ref_texts=graph_compiler.texts_to_ids(supervisions["text"]), + nbest_scale=params.nbest_scale, + ) + for i in range(encoder_out.size(0)): + hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]]) + elif ( + params.decoding_method == "greedy_search" + and params.max_sym_per_frame == 1 + ): + hyp_tokens = greedy_search_batch( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + ) + for i in range(encoder_out.size(0)): + hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]]) + elif params.decoding_method == "modified_beam_search": + hyp_tokens = modified_beam_search( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam_size, + ) + for i in range(encoder_out.size(0)): + hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]]) + else: + batch_size = encoder_out.size(0) + + for i in range(batch_size): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + if params.decoding_method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.decoding_method == "beam_search": + hyp = beam_search( + model=model, + encoder_out=encoder_out_i, + beam=params.beam_size, + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + hyps.append([lexicon.token_table[idx] for idx in hyp]) + + if params.decoding_method == "greedy_search": + return {"greedy_search": hyps} + elif "fast_beam_search" in params.decoding_method: + key = f"beam_{params.beam}_" + key += f"max_contexts_{params.max_contexts}_" + key += f"max_states_{params.max_states}" + if "nbest" in params.decoding_method: + key += f"_num_paths_{params.num_paths}_" + key += f"nbest_scale_{params.nbest_scale}" + if "LG" in params.decoding_method: + key += f"_ngram_lm_scale_{params.ngram_lm_scale}" + + return {key: hyps} + else: + return {f"beam_size_{params.beam_size}": hyps} + + +def decode_dataset( + dl: torch.utils.data.DataLoader, + params: AttributeDict, + model: nn.Module, + lexicon: Lexicon, + graph_compiler: CharCtcTrainingGraphCompiler, + decoding_graph: Optional[k2.Fsa] = None, +) -> Dict[str, List[Tuple[List[str], List[str]]]]: + """Decode dataset. + + Args: + dl: + PyTorch's dataloader containing the dataset to decode. + params: + It is returned by :func:`get_params`. + model: + The neural model. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used + only when --decoding_method is fast_beam_search, fast_beam_search_nbest, + fast_beam_search_nbest_oracle, and fast_beam_search_nbest_LG. + Returns: + Return a dict, whose key may be "greedy_search" if greedy search + is used, or it may be "beam_7" if beam size of 7 is used. + Its value is a list of tuples. Each tuple contains two elements: + The first is the reference transcript, and the second is the + predicted result. + """ + num_cuts = 0 + + try: + num_batches = len(dl) + except TypeError: + num_batches = "?" + + if params.decoding_method == "greedy_search": + log_interval = 50 + else: + log_interval = 20 + + results = defaultdict(list) + for batch_idx, batch in enumerate(dl): + texts = batch["supervisions"]["text"] + + hyps_dict = decode_one_batch( + params=params, + model=model, + lexicon=lexicon, + graph_compiler=graph_compiler, + decoding_graph=decoding_graph, + batch=batch, + ) + + for name, hyps in hyps_dict.items(): + this_batch = [] + assert len(hyps) == len(texts) + for hyp_words, ref_text in zip(hyps, texts): + this_batch.append((ref_text, hyp_words)) + + results[name].extend(this_batch) + + num_cuts += len(texts) + + if batch_idx % log_interval == 0: + batch_str = f"{batch_idx}/{num_batches}" + + logging.info( + f"batch {batch_str}, cuts processed until now is {num_cuts}" + ) + return results + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[List[int], List[int]]]], +): + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = ( + params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + ) + store_transcripts(filename=recog_path, texts=results) + logging.info(f"The transcripts are stored in {recog_path}") + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = ( + params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_filename, "w") as f: + wer = write_error_stats( + f, f"{test_set_name}-{key}", results, enable_log=True + ) + test_set_wers[key] = wer + + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = ( + params.res_dir + / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_info, "w") as f: + print("settings\tWER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, WER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + AiShell2AsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + assert params.decoding_method in ( + "greedy_search", + "beam_search", + "fast_beam_search", + "fast_beam_search_nbest", + "fast_beam_search_nbest_LG", + "fast_beam_search_nbest_oracle", + "modified_beam_search", + ) + params.res_dir = params.exp_dir / params.decoding_method + + if params.iter > 0: + params.suffix = f"iter-{params.iter}-avg-{params.avg}" + else: + params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + + if "fast_beam_search" in params.decoding_method: + params.suffix += f"-beam-{params.beam}" + params.suffix += f"-max-contexts-{params.max_contexts}" + params.suffix += f"-max-states-{params.max_states}" + if "nbest" in params.decoding_method: + params.suffix += f"-nbest-scale-{params.nbest_scale}" + params.suffix += f"-num-paths-{params.num_paths}" + if "LG" in params.decoding_method: + params.suffix += f"-ngram-lm-scale-{params.ngram_lm_scale}" + elif "beam_search" in params.decoding_method: + params.suffix += ( + f"-{params.decoding_method}-beam-size-{params.beam_size}" + ) + else: + params.suffix += f"-context-{params.context_size}" + params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}" + + if params.use_averaged_model: + params.suffix += "-use-averaged-model" + + setup_logger(f"{params.res_dir}/log-decode-{params.suffix}") + logging.info("Decoding started") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"Device: {device}") + + lexicon = Lexicon(params.lang_dir) + params.blank_id = lexicon.token_table[""] + params.unk_id = lexicon.token_table[""] + params.vocab_size = max(lexicon.tokens) + 1 + + graph_compiler = CharCtcTrainingGraphCompiler( + lexicon=lexicon, + device=device, + ) + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + else: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg + 1] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + + model.to(device) + model.eval() + + if "fast_beam_search" in params.decoding_method: + if params.decoding_method == "fast_beam_search_nbest_LG": + lexicon = Lexicon(params.lang_dir) + lg_filename = params.lang_dir / "LG.pt" + logging.info(f"Loading {lg_filename}") + decoding_graph = k2.Fsa.from_dict( + torch.load(lg_filename, map_location=device) + ) + decoding_graph.scores *= params.ngram_lm_scale + else: + decoding_graph = k2.trivial_graph( + params.vocab_size - 1, device=device + ) + else: + decoding_graph = None + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + aishell2 = AiShell2AsrDataModule(args) + + valid_cuts = aishell2.valid_cuts() + test_cuts = aishell2.test_cuts() + + # use ios sets for dev and test + dev_dl = aishell2.valid_dataloaders(valid_cuts) + test_dl = aishell2.test_dataloaders(test_cuts) + + test_sets = ["dev", "test"] + test_dl = [dev_dl, test_dl] + + for test_set, test_dl in zip(test_sets, test_dl): + results_dict = decode_dataset( + dl=test_dl, + params=params, + model=model, + lexicon=lexicon, + graph_compiler=graph_compiler, + decoding_graph=decoding_graph, + ) + + save_results( + params=params, + test_set_name=test_set, + results_dict=results_dict, + ) + + logging.info("Done!") + + +if __name__ == "__main__": + main() diff --git a/egs/aishell2/ASR/pruned_transducer_stateless5/decoder.py b/egs/aishell2/ASR/pruned_transducer_stateless5/decoder.py new file mode 120000 index 000000000..722e1c894 --- /dev/null +++ b/egs/aishell2/ASR/pruned_transducer_stateless5/decoder.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless2/decoder.py \ No newline at end of file diff --git a/egs/aishell2/ASR/pruned_transducer_stateless5/encoder_interface.py b/egs/aishell2/ASR/pruned_transducer_stateless5/encoder_interface.py new file mode 120000 index 000000000..f58253127 --- /dev/null +++ b/egs/aishell2/ASR/pruned_transducer_stateless5/encoder_interface.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless2/encoder_interface.py \ No newline at end of file diff --git a/egs/aishell2/ASR/pruned_transducer_stateless5/export.py b/egs/aishell2/ASR/pruned_transducer_stateless5/export.py new file mode 100755 index 000000000..bc7bd71cb --- /dev/null +++ b/egs/aishell2/ASR/pruned_transducer_stateless5/export.py @@ -0,0 +1,274 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script converts several saved checkpoints +# to a single one using model averaging. +""" +Usage: +./pruned_transducer_stateless5/export.py \ + --exp-dir ./pruned_transducer_stateless5/exp \ + --lang-dir data/lang_char + --epoch 25 \ + --avg 5 + +It will generate a file exp_dir/pretrained.pt + +To use the generated file with `pruned_transducer_stateless5/decode.py`, +you can do: + + cd /path/to/exp_dir + ln -s pretrained.pt epoch-9999.pt + + cd /path/to/egs/aishell2/ASR + ./pruned_transducer_stateless5/decode.py \ + --exp-dir ./pruned_transducer_stateless5/exp \ + --epoch 9999 \ + --avg 1 \ + --max-duration 600 \ + --decoding-method greedy_search \ + --lang-dir data/lang_char +""" + +import argparse +import logging +from pathlib import Path + +import torch +from train import add_model_arguments, get_params, get_transducer_model + +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from icefall.lexicon import Lexicon +from icefall.utils import str2bool + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=28, + help="""It specifies the checkpoint to use for averaging. + Note: Epoch counts from 1. + You can specify --avg to use more checkpoints for model averaging.""", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=15, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch' and '--iter'", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=False, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="pruned_transducer_stateless5/exp", + help="""It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--lang-dir", + type=str, + default="data/lang_char", + help="The lang dir", + ) + + parser.add_argument( + "--jit", + type=str2bool, + default=False, + help="""True to save a model after applying torch.jit.script. + """, + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + add_model_arguments(parser) + + return parser + + +def main(): + args = get_parser().parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + lexicon = Lexicon(params.lang_dir) + params.blank_id = lexicon.token_table[""] + params.unk_id = lexicon.token_table[""] + params.vocab_size = max(lexicon.tokens) + 1 + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + else: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg + 1] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + + model.to("cpu") + model.eval() + + if params.jit: + # We won't use the forward() method of the model in C++, so just ignore + # it here. + # Otherwise, one of its arguments is a ragged tensor and is not + # torch scriptabe. + model.__class__.forward = torch.jit.ignore(model.__class__.forward) + logging.info("Using torch.jit.script") + model = torch.jit.script(model) + filename = params.exp_dir / "cpu_jit.pt" + model.save(str(filename)) + logging.info(f"Saved to {filename}") + else: + logging.info("Not using torch.jit.script") + # Save it using a format so that it can be loaded + # by :func:`load_checkpoint` + filename = params.exp_dir / "pretrained.pt" + torch.save({"model": model.state_dict()}, str(filename)) + logging.info(f"Saved to {filename}") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/aishell2/ASR/pruned_transducer_stateless5/joiner.py b/egs/aishell2/ASR/pruned_transducer_stateless5/joiner.py new file mode 120000 index 000000000..9052f3cbb --- /dev/null +++ b/egs/aishell2/ASR/pruned_transducer_stateless5/joiner.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless2/joiner.py \ No newline at end of file diff --git a/egs/aishell2/ASR/pruned_transducer_stateless5/model.py b/egs/aishell2/ASR/pruned_transducer_stateless5/model.py new file mode 120000 index 000000000..a99e74334 --- /dev/null +++ b/egs/aishell2/ASR/pruned_transducer_stateless5/model.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless2/model.py \ No newline at end of file diff --git a/egs/aishell2/ASR/pruned_transducer_stateless5/optim.py b/egs/aishell2/ASR/pruned_transducer_stateless5/optim.py new file mode 120000 index 000000000..0a2f285aa --- /dev/null +++ b/egs/aishell2/ASR/pruned_transducer_stateless5/optim.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless2/optim.py \ No newline at end of file diff --git a/egs/aishell2/ASR/pruned_transducer_stateless5/pretrained.py b/egs/aishell2/ASR/pruned_transducer_stateless5/pretrained.py new file mode 100755 index 000000000..09de1bece --- /dev/null +++ b/egs/aishell2/ASR/pruned_transducer_stateless5/pretrained.py @@ -0,0 +1,342 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +(1) greedy search +./pruned_transducer_stateless5/pretrained.py \ + --checkpoint ./pruned_transducer_stateless5/exp/pretrained.pt \ + --lang-dir ./data/lang_char \ + --method greedy_search \ + /path/to/foo.wav \ + /path/to/bar.wav + +(2) modified beam search +./pruned_transducer_stateless5/pretrained.py \ + --checkpoint ./pruned_transducer_stateless5/exp/pretrained.pt \ + --lang-dir ./data/lang_char \ + --method modified_beam_search \ + --beam-size 4 \ + /path/to/foo.wav \ + /path/to/bar.wav + +(3) fast beam search +./pruned_transducer_stateless5/pretrained.py \ + --checkpoint ./pruned_transducer_stateless5/exp/pretrained.pt \ + --lang-dir ./data/lang_char \ + --method fast_beam_search \ + --beam-size 4 \ + /path/to/foo.wav \ + /path/to/bar.wav + +You can also use `./pruned_transducer_stateless5/exp/epoch-xx.pt`. + +Note: ./pruned_transducer_stateless5/exp/pretrained.pt is generated by +./pruned_transducer_stateless5/export.py +""" + + +import argparse +import logging +import math +from typing import List + +import k2 +import kaldifeat +import torch +import torchaudio +from beam_search import ( + beam_search, + fast_beam_search_one_best, + greedy_search, + greedy_search_batch, + modified_beam_search, +) +from torch.nn.utils.rnn import pad_sequence +from train import add_model_arguments, get_params, get_transducer_model + +from icefall.lexicon import Lexicon + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--checkpoint", + type=str, + required=True, + help="Path to the checkpoint. " + "The checkpoint is assumed to be saved by " + "icefall.checkpoint.save_checkpoint().", + ) + + parser.add_argument( + "--lang-dir", + type=str, + help="""Path to lang. + """, + ) + + parser.add_argument( + "--method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + - modified_beam_search + - fast_beam_search + """, + ) + + parser.add_argument( + "sound_files", + type=str, + nargs="+", + help="The input sound file(s) to transcribe. " + "Supported formats are those supported by torchaudio.load(). " + "For example, wav and flac are supported. " + "The sample rate has to be 16kHz.", + ) + + parser.add_argument( + "--sample-rate", + type=int, + default=16000, + help="The sample rate of the input sound file", + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="""An integer indicating how many candidates we will keep for each + frame. Used only when --method is beam_search or + modified_beam_search.""", + ) + + parser.add_argument( + "--beam", + type=float, + default=4, + help="""A floating point value to calculate the cutoff score during beam + search (i.e., `cutoff = max-score - beam`), which is the same as the + `beam` in Kaldi. + Used only when --method is fast_beam_search""", + ) + + parser.add_argument( + "--max-contexts", + type=int, + default=4, + help="""Used only when --method is fast_beam_search""", + ) + + parser.add_argument( + "--max-states", + type=int, + default=8, + help="""Used only when --method is fast_beam_search""", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=1, + help="""Maximum number of symbols per frame. Used only when + --method is greedy_search. + """, + ) + + add_model_arguments(parser) + + return parser + + +def read_sound_files( + filenames: List[str], expected_sample_rate: float +) -> List[torch.Tensor]: + """Read a list of sound files into a list 1-D float32 torch tensors. + Args: + filenames: + A list of sound filenames. + expected_sample_rate: + The expected sample rate of the sound files. + Returns: + Return a list of 1-D float32 torch tensors. + """ + ans = [] + for f in filenames: + wave, sample_rate = torchaudio.load(f) + assert sample_rate == expected_sample_rate, ( + f"expected sample rate: {expected_sample_rate}. " + f"Given: {sample_rate}" + ) + # We use only the first channel + ans.append(wave[0]) + return ans + + +@torch.no_grad() +def main(): + parser = get_parser() + args = parser.parse_args() + + params = get_params() + + params.update(vars(args)) + + lexicon = Lexicon(params.lang_dir) + params.blank_id = lexicon.token_table[""] + params.unk_id = lexicon.token_table[""] + params.vocab_size = max(lexicon.tokens) + 1 + + logging.info(f"{params}") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + logging.info("Creating model") + model = get_transducer_model(params) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + checkpoint = torch.load(args.checkpoint, map_location="cpu") + model.load_state_dict(checkpoint["model"], strict=False) + model.to(device) + model.eval() + model.device = device + + logging.info("Constructing Fbank computer") + opts = kaldifeat.FbankOptions() + opts.device = device + opts.frame_opts.dither = 0 + opts.frame_opts.snip_edges = False + opts.frame_opts.samp_freq = params.sample_rate + opts.mel_opts.num_bins = params.feature_dim + + fbank = kaldifeat.Fbank(opts) + + logging.info(f"Reading sound files: {params.sound_files}") + waves = read_sound_files( + filenames=params.sound_files, expected_sample_rate=params.sample_rate + ) + waves = [w.to(device) for w in waves] + + logging.info("Decoding started") + features = fbank(waves) + feature_lengths = [f.size(0) for f in features] + + features = pad_sequence( + features, batch_first=True, padding_value=math.log(1e-10) + ) + + feature_lengths = torch.tensor(feature_lengths, device=device) + + encoder_out, encoder_out_lens = model.encoder( + x=features, x_lens=feature_lengths + ) + + num_waves = encoder_out.size(0) + hyps = [] + msg = f"Using {params.method}" + if params.method == "beam_search": + msg += f" with beam size {params.beam_size}" + logging.info(msg) + + if params.method == "fast_beam_search": + decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device) + hyp_tokens = fast_beam_search_one_best( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + ) + for i in range(encoder_out.size(0)): + hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]]) + elif params.method == "modified_beam_search": + hyp_tokens = modified_beam_search( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam_size, + ) + + for i in range(encoder_out.size(0)): + hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]]) + elif params.method == "greedy_search" and params.max_sym_per_frame == 1: + hyp_tokens = greedy_search_batch( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + ) + for i in range(encoder_out.size(0)): + hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]]) + else: + for i in range(num_waves): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + if params.method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.method == "beam_search": + hyp = beam_search( + model=model, + encoder_out=encoder_out_i, + beam=params.beam_size, + ) + else: + raise ValueError(f"Unsupported method: {params.method}") + + hyps.append([lexicon.token_table[idx] for idx in hyp]) + + s = "\n" + for filename, hyp in zip(params.sound_files, hyps): + words = "".join(hyp) + s += f"{filename}:\n{words}\n\n" + logging.info(s) + + logging.info("Decoding Done") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/aishell2/ASR/pruned_transducer_stateless5/scaling.py b/egs/aishell2/ASR/pruned_transducer_stateless5/scaling.py new file mode 120000 index 000000000..c10cdfe12 --- /dev/null +++ b/egs/aishell2/ASR/pruned_transducer_stateless5/scaling.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless2/scaling.py \ No newline at end of file diff --git a/egs/aishell2/ASR/pruned_transducer_stateless5/train.py b/egs/aishell2/ASR/pruned_transducer_stateless5/train.py new file mode 100755 index 000000000..838a0497f --- /dev/null +++ b/egs/aishell2/ASR/pruned_transducer_stateless5/train.py @@ -0,0 +1,1131 @@ +#!/usr/bin/env python3 +# Copyright 2021-2022 Xiaomi Corp. (authors: Fangjun Kuang, +# Wei Kang, +# Mingshuang Luo,) +# Zengwei Yao) +# Copyright 2022 Nvidia (authors: Yuekai Zhang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +./pruned_transducer_stateless5/train.py \ + --world-size 4 \ + --lang-dir data/lang_char \ + --num-epochs 40 \ + --start-epoch 1 \ + --exp-dir pruned_transducer_stateless5/exp \ + --max-duration 300 \ + --use-fp16 0 \ + --num-encoder-layers 24 \ + --dim-feedforward 1536 \ + --nhead 8 \ + --encoder-dim 384 \ + --decoder-dim 512 \ + --joiner-dim 512 + +# For mix precision training: + +./pruned_transducer_stateless5/train.py \ + --lang-dir data/lang_char \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 1 \ + --use-fp16 1 \ + --exp-dir pruned_transducer_stateless5/exp \ + --max-duration 550 + +""" + + +import argparse +import copy +import logging +import warnings +from pathlib import Path +from shutil import copyfile +from typing import Any, Dict, Optional, Tuple, Union + +import k2 +import optim +import torch +import torch.multiprocessing as mp +import torch.nn as nn +from asr_datamodule import AiShell2AsrDataModule +from conformer import Conformer +from decoder import Decoder +from joiner import Joiner +from lhotse.cut import Cut +from lhotse.dataset.sampling.base import CutSampler +from lhotse.utils import fix_random_seed +from model import Transducer +from optim import Eden, Eve +from torch import Tensor +from torch.cuda.amp import GradScaler +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.tensorboard import SummaryWriter + +from icefall import diagnostics +from icefall.char_graph_compiler import CharCtcTrainingGraphCompiler +from icefall.checkpoint import load_checkpoint, remove_checkpoints +from icefall.checkpoint import save_checkpoint as save_checkpoint_impl +from icefall.checkpoint import ( + save_checkpoint_with_global_batch_idx, + update_averaged_model, +) +from icefall.dist import cleanup_dist, setup_dist +from icefall.env import get_env_info +from icefall.lexicon import Lexicon +from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool + +LRSchedulerType = Union[ + torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler +] + + +def add_model_arguments(parser: argparse.ArgumentParser): + parser.add_argument( + "--num-encoder-layers", + type=int, + default=24, + help="Number of conformer encoder layers..", + ) + + parser.add_argument( + "--dim-feedforward", + type=int, + default=1536, + help="Feedforward dimension of the conformer encoder layer.", + ) + + parser.add_argument( + "--nhead", + type=int, + default=8, + help="Number of attention heads in the conformer encoder layer.", + ) + + parser.add_argument( + "--encoder-dim", + type=int, + default=384, + help="Attention dimension in the conformer encoder layer.", + ) + + parser.add_argument( + "--decoder-dim", + type=int, + default=512, + help="Embedding dimension in the decoder model.", + ) + + parser.add_argument( + "--joiner-dim", + type=int, + default=512, + help="""Dimension used in the joiner model. + Outputs from the encoder and decoder model are projected + to this dimension before adding. + """, + ) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--world-size", + type=int, + default=1, + help="Number of GPUs for DDP training.", + ) + + parser.add_argument( + "--master-port", + type=int, + default=12354, + help="Master port to use for DDP training.", + ) + + parser.add_argument( + "--tensorboard", + type=str2bool, + default=True, + help="Should various information be logged in tensorboard.", + ) + + parser.add_argument( + "--num-epochs", + type=int, + default=30, + help="Number of epochs to train.", + ) + + parser.add_argument( + "--start-epoch", + type=int, + default=1, + help="""Resume training from this epoch. It should be positive. + If larger than 1, it will load checkpoint from + exp-dir/epoch-{start_epoch-1}.pt + """, + ) + + parser.add_argument( + "--start-batch", + type=int, + default=0, + help="""If positive, --start-epoch is ignored and + it loads the checkpoint from exp-dir/checkpoint-{start_batch}.pt + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="pruned_transducer_stateless5/exp", + help="""The experiment dir. + It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--lang-dir", + type=str, + default="data/lang_char", + help="""The lang dir + It contains language related input files such as + "lexicon.txt" + """, + ) + + parser.add_argument( + "--initial-lr", + type=float, + default=0.003, + help="The initial learning rate. This value should not need " + "to be changed.", + ) + + parser.add_argument( + "--lr-batches", + type=float, + default=5000, + help="""Number of steps that affects how rapidly the learning rate + decreases. We suggest not to change this.""", + ) + + parser.add_argument( + "--lr-epochs", + type=float, + default=6, + help="""Number of epochs that affects how rapidly the learning rate decreases. + """, + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + parser.add_argument( + "--prune-range", + type=int, + default=5, + help="The prune range for rnnt loss, it means how many symbols(context)" + "we are using to compute the loss", + ) + + parser.add_argument( + "--lm-scale", + type=float, + default=0.25, + help="The scale to smooth the loss with lm " + "(output of prediction network) part.", + ) + + parser.add_argument( + "--am-scale", + type=float, + default=0.0, + help="The scale to smooth the loss with am (output of encoder network)" + "part.", + ) + + parser.add_argument( + "--simple-loss-scale", + type=float, + default=0.5, + help="To get pruning ranges, we will calculate a simple version" + "loss(joiner is just addition), this simple loss also uses for" + "training (as a regularization item). We will scale the simple loss" + "with this parameter before adding to the final loss.", + ) + + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + + parser.add_argument( + "--print-diagnostics", + type=str2bool, + default=False, + help="Accumulate stats on activations, print them and exit.", + ) + + parser.add_argument( + "--save-every-n", + type=int, + default=4000, + help="""Save checkpoint after processing this number of batches" + periodically. We save checkpoint to exp-dir/ whenever + params.batch_idx_train % save_every_n == 0. The checkpoint filename + has the form: f'exp-dir/checkpoint-{params.batch_idx_train}.pt' + Note: It also saves checkpoint to `exp-dir/epoch-xxx.pt` at the + end of each epoch where `xxx` is the epoch number counting from 0. + """, + ) + + parser.add_argument( + "--keep-last-k", + type=int, + default=30, + help="""Only keep this number of checkpoints on disk. + For instance, if it is 3, there are only 3 checkpoints + in the exp-dir with filenames `checkpoint-xxx.pt`. + It does not affect checkpoints with name `epoch-xxx.pt`. + """, + ) + + parser.add_argument( + "--average-period", + type=int, + default=100, + help="""Update the averaged model, namely `model_avg`, after processing + this number of batches. `model_avg` is a separate version of model, + in which each floating-point parameter is the average of all the + parameters from the start of training. Each time we take the average, + we do: `model_avg = model * (average_period / batch_idx_train) + + model_avg * ((batch_idx_train - average_period) / batch_idx_train)`. + """, + ) + + parser.add_argument( + "--use-fp16", + type=str2bool, + default=False, + help="Whether to use half precision training.", + ) + + add_model_arguments(parser) + + return parser + + +def get_params() -> AttributeDict: + """Return a dict containing training parameters. + + All training related parameters that are not passed from the commandline + are saved in the variable `params`. + + Commandline options are merged into `params` after they are parsed, so + you can also access them via `params`. + + Explanation of options saved in `params`: + + - best_train_loss: Best training loss so far. It is used to select + the model that has the lowest training loss. It is + updated during the training. + + - best_valid_loss: Best validation loss so far. It is used to select + the model that has the lowest validation loss. It is + updated during the training. + + - best_train_epoch: It is the epoch that has the best training loss. + + - best_valid_epoch: It is the epoch that has the best validation loss. + + - batch_idx_train: Used to writing statistics to tensorboard. It + contains number of batches trained so far across + epochs. + + - log_interval: Print training loss if batch_idx % log_interval` is 0 + + - reset_interval: Reset statistics if batch_idx % reset_interval is 0 + + - valid_interval: Run validation if batch_idx % valid_interval is 0 + + - feature_dim: The model input dim. It has to match the one used + in computing features. + + - subsampling_factor: The subsampling factor for the model. + + - encoder_dim: Hidden dim for multi-head attention model. + + - num_decoder_layers: Number of decoder layer of transformer decoder. + + - warm_step: The warm_step for Noam optimizer. + """ + params = AttributeDict( + { + "best_train_loss": float("inf"), + "best_valid_loss": float("inf"), + "best_train_epoch": -1, + "best_valid_epoch": -1, + "batch_idx_train": 0, + "log_interval": 50, + "reset_interval": 200, + "valid_interval": 3000, + # parameters for conformer + "feature_dim": 80, + "subsampling_factor": 4, + # parameters for Noam + "model_warm_step": 3000, # arg given to model, not for lrate + "env_info": get_env_info(), + } + ) + + return params + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + # TODO: We can add an option to switch between Conformer and Transformer + encoder = Conformer( + num_features=params.feature_dim, + subsampling_factor=params.subsampling_factor, + d_model=params.encoder_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + decoder_dim=params.decoder_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + encoder_dim=params.encoder_dim, + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict) -> nn.Module: + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + encoder_dim=params.encoder_dim, + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return model + + +def load_checkpoint_if_available( + params: AttributeDict, + model: nn.Module, + model_avg: nn.Module = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, +) -> Optional[Dict[str, Any]]: + """Load checkpoint from file. + + If params.start_batch is positive, it will load the checkpoint from + `params.exp_dir/checkpoint-{params.start_batch}.pt`. Otherwise, if + params.start_epoch is larger than 1, it will load the checkpoint from + `params.start_epoch - 1`. + + Apart from loading state dict for `model` and `optimizer` it also updates + `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, + and `best_valid_loss` in `params`. + + Args: + params: + The return value of :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer that we are using. + scheduler: + The scheduler that we are using. + Returns: + Return a dict containing previously saved training info. + """ + if params.start_batch > 0: + filename = params.exp_dir / f"checkpoint-{params.start_batch}.pt" + elif params.start_epoch > 1: + filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" + else: + return None + + assert filename.is_file(), f"{filename} does not exist!" + + saved_params = load_checkpoint( + filename, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + ) + + keys = [ + "best_train_epoch", + "best_valid_epoch", + "batch_idx_train", + "best_train_loss", + "best_valid_loss", + ] + for k in keys: + params[k] = saved_params[k] + + if params.start_batch > 0: + if "cur_epoch" in saved_params: + params["start_epoch"] = saved_params["cur_epoch"] + + return saved_params + + +def save_checkpoint( + params: AttributeDict, + model: Union[nn.Module, DDP], + model_avg: Optional[nn.Module] = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, + sampler: Optional[CutSampler] = None, + scaler: Optional[GradScaler] = None, + rank: int = 0, +) -> None: + """Save model, optimizer, scheduler and training stats to file. + + Args: + params: + It is returned by :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer used in the training. + sampler: + The sampler for the training dataset. + scaler: + The scaler used for mix precision training. + """ + if rank != 0: + return + filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" + save_checkpoint_impl( + filename=filename, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=sampler, + scaler=scaler, + rank=rank, + ) + + if params.best_train_epoch == params.cur_epoch: + best_train_filename = params.exp_dir / "best-train-loss.pt" + copyfile(src=filename, dst=best_train_filename) + + if params.best_valid_epoch == params.cur_epoch: + best_valid_filename = params.exp_dir / "best-valid-loss.pt" + copyfile(src=filename, dst=best_valid_filename) + + +def compute_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + graph_compiler: CharCtcTrainingGraphCompiler, + batch: dict, + is_training: bool, + warmup: float = 1.0, +) -> Tuple[Tensor, MetricsTracker]: + """ + Compute RNN-T loss given the model and its inputs. + + Args: + params: + Parameters for training. See :func:`get_params`. + model: + The model for training. It is an instance of Conformer in our case. + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + is_training: + True for training. False for validation. When it is True, this + function enables autograd during computation; when it is False, it + disables autograd. + warmup: a floating point value which increases throughout training; + values >= 1.0 are fully warmed up and have all modules present. + """ + device = ( + model.device + if isinstance(model, DDP) + else next(model.parameters()).device + ) + feature = batch["inputs"] + # at entry, feature is (N, T, C) + assert feature.ndim == 3 + feature = feature.to(device) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + texts = batch["supervisions"]["text"] + y = graph_compiler.texts_to_ids(texts) + assert type(y) == list + y = k2.RaggedTensor(y).to(device) + + with torch.set_grad_enabled(is_training): + simple_loss, pruned_loss = model( + x=feature, + x_lens=feature_lens, + y=y, + prune_range=params.prune_range, + am_scale=params.am_scale, + lm_scale=params.lm_scale, + warmup=warmup, + ) + # after the main warmup step, we keep pruned_loss_scale small + # for the same amount of time (model_warm_step), to avoid + # overwhelming the simple_loss and causing it to diverge, + # in case it had not fully learned the alignment yet. + pruned_loss_scale = ( + 0.0 + if warmup < 1.0 + else (0.1 if warmup > 1.0 and warmup < 2.0 else 1.0) + ) + loss = ( + params.simple_loss_scale * simple_loss + + pruned_loss_scale * pruned_loss + ) + + assert loss.requires_grad == is_training + + info = MetricsTracker() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + info["frames"] = ( + (feature_lens // params.subsampling_factor).sum().item() + ) + + # Note: We use reduction=sum while computing the loss. + info["loss"] = loss.detach().cpu().item() + info["simple_loss"] = simple_loss.detach().cpu().item() + info["pruned_loss"] = pruned_loss.detach().cpu().item() + + return loss, info + + +def compute_validation_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + graph_compiler: CharCtcTrainingGraphCompiler, + valid_dl: torch.utils.data.DataLoader, + world_size: int = 1, +) -> MetricsTracker: + """Run the validation process.""" + model.eval() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(valid_dl): + loss, loss_info = compute_loss( + params=params, + model=model, + graph_compiler=graph_compiler, + batch=batch, + is_training=False, + ) + assert loss.requires_grad is False + tot_loss = tot_loss + loss_info + + if world_size > 1: + tot_loss.reduce(loss.device) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + if loss_value < params.best_valid_loss: + params.best_valid_epoch = params.cur_epoch + params.best_valid_loss = loss_value + + return tot_loss + + +def train_one_epoch( + params: AttributeDict, + model: Union[nn.Module, DDP], + optimizer: torch.optim.Optimizer, + scheduler: LRSchedulerType, + graph_compiler: CharCtcTrainingGraphCompiler, + train_dl: torch.utils.data.DataLoader, + valid_dl: torch.utils.data.DataLoader, + scaler: GradScaler, + model_avg: Optional[nn.Module] = None, + tb_writer: Optional[SummaryWriter] = None, + world_size: int = 1, + rank: int = 0, +) -> None: + """Train the model for one epoch. + + The training loss from the mean of all frames is saved in + `params.train_loss`. It runs the validation process every + `params.valid_interval` batches. + + Args: + params: + It is returned by :func:`get_params`. + model: + The model for training. + optimizer: + The optimizer we are using. + scheduler: + The learning rate scheduler, we call step() every step. + train_dl: + Dataloader for the training dataset. + valid_dl: + Dataloader for the validation dataset. + scaler: + The scaler used for mix precision training. + model_avg: + The stored model averaged from the start of training. + tb_writer: + Writer to write log messages to tensorboard. + world_size: + Number of nodes in DDP training. If it is 1, DDP is disabled. + rank: + The rank of the node in DDP training. If no DDP is used, it should + be set to 0. + """ + model.train() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(train_dl): + + params.batch_idx_train += 1 + batch_size = len(batch["supervisions"]["text"]) + + try: + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, loss_info = compute_loss( + params=params, + model=model, + graph_compiler=graph_compiler, + batch=batch, + is_training=True, + warmup=(params.batch_idx_train / params.model_warm_step), + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + scaler.scale(loss).backward() + scheduler.step_batch(params.batch_idx_train) + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad() + except: # noqa + display_and_save_batch( + batch, params=params, graph_compiler=graph_compiler + ) + raise + + if params.print_diagnostics and batch_idx == 5: + return + + if ( + rank == 0 + and params.batch_idx_train > 0 + and params.batch_idx_train % params.average_period == 0 + ): + update_averaged_model( + params=params, + model_cur=model, + model_avg=model_avg, + ) + + if ( + params.batch_idx_train > 0 + and params.batch_idx_train % params.save_every_n == 0 + ): + save_checkpoint_with_global_batch_idx( + out_dir=params.exp_dir, + global_batch_idx=params.batch_idx_train, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + remove_checkpoints( + out_dir=params.exp_dir, + topk=params.keep_last_k, + rank=rank, + ) + + if batch_idx % params.log_interval == 0: + cur_lr = scheduler.get_last_lr()[0] + logging.info( + f"Epoch {params.cur_epoch}, " + f"batch {batch_idx}, loss[{loss_info}], " + f"tot_loss[{tot_loss}], batch size: {batch_size}, " + f"lr: {cur_lr:.2e}" + ) + + if tb_writer is not None: + tb_writer.add_scalar( + "train/learning_rate", cur_lr, params.batch_idx_train + ) + + loss_info.write_summary( + tb_writer, "train/current_", params.batch_idx_train + ) + tot_loss.write_summary( + tb_writer, "train/tot_", params.batch_idx_train + ) + + if batch_idx > 0 and batch_idx % params.valid_interval == 0: + logging.info("Computing validation loss") + valid_info = compute_validation_loss( + params=params, + model=model, + graph_compiler=graph_compiler, + valid_dl=valid_dl, + world_size=world_size, + ) + model.train() + logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") + if tb_writer is not None: + valid_info.write_summary( + tb_writer, "train/valid_", params.batch_idx_train + ) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + params.train_loss = loss_value + if params.train_loss < params.best_train_loss: + params.best_train_epoch = params.cur_epoch + params.best_train_loss = params.train_loss + + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + + fix_random_seed(params.seed) + if world_size > 1: + setup_dist(rank, world_size, params.master_port) + + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info("Training started") + + if args.tensorboard and rank == 0: + tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") + else: + tb_writer = None + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", rank) + logging.info(f"Device: {device}") + + lexicon = Lexicon(params.lang_dir) + graph_compiler = CharCtcTrainingGraphCompiler( + lexicon=lexicon, + device=device, + ) + + params.blank_id = lexicon.token_table[""] + params.vocab_size = max(lexicon.tokens) + 1 + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + assert params.save_every_n >= params.average_period + model_avg: Optional[nn.Module] = None + if rank == 0: + # model_avg is only used with rank 0 + model_avg = copy.deepcopy(model) + + assert params.start_epoch > 0, params.start_epoch + checkpoints = load_checkpoint_if_available( + params=params, model=model, model_avg=model_avg + ) + + model.to(device) + if world_size > 1: + logging.info("Using DDP") + model = DDP(model, device_ids=[rank]) + + optimizer = Eve(model.parameters(), lr=params.initial_lr) + + scheduler = Eden(optimizer, params.lr_batches, params.lr_epochs) + + if checkpoints and "optimizer" in checkpoints: + logging.info("Loading optimizer state dict") + optimizer.load_state_dict(checkpoints["optimizer"]) + + if ( + checkpoints + and "scheduler" in checkpoints + and checkpoints["scheduler"] is not None + ): + logging.info("Loading scheduler state dict") + scheduler.load_state_dict(checkpoints["scheduler"]) + + if params.print_diagnostics: + opts = diagnostics.TensorDiagnosticOptions( + 2 ** 22 + ) # allow 4 megabytes per sub-module + diagnostic = diagnostics.attach_diagnostics(model, opts) + + aishell2 = AiShell2AsrDataModule(args) + + train_cuts = aishell2.train_cuts() + + def remove_short_and_long_utt(c: Cut): + # Keep only utterances with duration between 1 second and 8 seconds + # + # Caution: There is a reason to select 8.0 here. Please see + # ../local/display_manifest_statistics.py + # + # You should use ../local/display_manifest_statistics.py to get + # an utterance duration distribution for your dataset to select + # the threshold + return 1.0 <= c.duration <= 8.0 + + train_cuts = train_cuts.filter(remove_short_and_long_utt) + + if params.start_batch > 0 and checkpoints and "sampler" in checkpoints: + # We only load the sampler's state dict when it loads a checkpoint + # saved in the middle of an epoch + sampler_state_dict = checkpoints["sampler"] + else: + sampler_state_dict = None + + train_dl = aishell2.train_dataloaders( + train_cuts, sampler_state_dict=sampler_state_dict + ) + + valid_cuts = aishell2.valid_cuts() + valid_dl = aishell2.valid_dataloaders(valid_cuts) + + if not params.print_diagnostics: + scan_pessimistic_batches_for_oom( + model=model, + train_dl=train_dl, + optimizer=optimizer, + graph_compiler=graph_compiler, + params=params, + warmup=0.0 if params.start_epoch == 1 else 1.0, + ) + + scaler = GradScaler(enabled=params.use_fp16) + if checkpoints and "grad_scaler" in checkpoints: + logging.info("Loading grad scaler state dict") + scaler.load_state_dict(checkpoints["grad_scaler"]) + + for epoch in range(params.start_epoch, params.num_epochs + 1): + scheduler.step_epoch(epoch - 1) + fix_random_seed(params.seed + epoch - 1) + train_dl.sampler.set_epoch(epoch - 1) + + if tb_writer is not None: + tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) + + params.cur_epoch = epoch + + train_one_epoch( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + graph_compiler=graph_compiler, + train_dl=train_dl, + valid_dl=valid_dl, + scaler=scaler, + tb_writer=tb_writer, + world_size=world_size, + rank=rank, + ) + + if params.print_diagnostics: + diagnostic.print_diagnostics() + break + + save_checkpoint( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + + logging.info("Done!") + + if world_size > 1: + torch.distributed.barrier() + cleanup_dist() + + +def display_and_save_batch( + batch: dict, + params: AttributeDict, + graph_compiler: CharCtcTrainingGraphCompiler, +) -> None: + """Display the batch statistics and save the batch into disk. + + Args: + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + params: + Parameters for training. See :func:`get_params`. + """ + from lhotse.utils import uuid4 + + filename = f"{params.exp_dir}/batch-{uuid4()}.pt" + logging.info(f"Saving batch to {filename}") + torch.save(batch, filename) + + supervisions = batch["supervisions"] + features = batch["inputs"] + + logging.info(f"features shape: {features.shape}") + + y = graph_compiler.texts_to_ids(supervisions["text"]) + num_tokens = sum(len(i) for i in y) + logging.info(f"num tokens: {num_tokens}") + + +def scan_pessimistic_batches_for_oom( + model: Union[nn.Module, DDP], + train_dl: torch.utils.data.DataLoader, + optimizer: torch.optim.Optimizer, + graph_compiler: CharCtcTrainingGraphCompiler, + params: AttributeDict, + warmup: float, +): + from lhotse.dataset import find_pessimistic_batches + + logging.info( + "Sanity check -- see if any of the batches in epoch 1 would cause OOM." + ) + batches, crit_values = find_pessimistic_batches(train_dl.sampler) + for criterion, cuts in batches.items(): + batch = train_dl.dataset[cuts] + try: + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, _ = compute_loss( + params=params, + model=model, + graph_compiler=graph_compiler, + batch=batch, + is_training=True, + warmup=warmup, + ) + loss.backward() + optimizer.step() + optimizer.zero_grad() + except Exception as e: + if "CUDA out of memory" in str(e): + logging.error( + "Your GPU ran out of memory with the current " + "max_duration setting. We recommend decreasing " + "max_duration and trying again.\n" + f"Failing criterion: {criterion} " + f"(={crit_values[criterion]}) ..." + ) + display_and_save_batch( + batch, params=params, graph_compiler=graph_compiler + ) + raise + + +def main(): + parser = get_parser() + AiShell2AsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + world_size = args.world_size + assert world_size >= 1 + if world_size > 1: + mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) + else: + run(rank=0, world_size=1, args=args) + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + +if __name__ == "__main__": + main() diff --git a/egs/aishell2/ASR/shared b/egs/aishell2/ASR/shared new file mode 120000 index 000000000..4c5e91438 --- /dev/null +++ b/egs/aishell2/ASR/shared @@ -0,0 +1 @@ +../../../icefall/shared/ \ No newline at end of file diff --git a/egs/wenetspeech/ASR/local/prepare_words.py b/egs/wenetspeech/ASR/local/prepare_words.py index 65aca2983..d5f833db1 100644 --- a/egs/wenetspeech/ASR/local/prepare_words.py +++ b/egs/wenetspeech/ASR/local/prepare_words.py @@ -75,6 +75,16 @@ def main(): logging.info("Starting writing the words.txt") f_out = open(output_file, "w", encoding="utf-8") + + # LG decoding needs below symbols. + id1, id2, id3 = ( + str(len(new_lines)), + str(len(new_lines) + 1), + str(len(new_lines) + 2), + ) + add_words = ["#0 " + id1, " " + id2, " " + id3] + new_lines.extend(add_words) + for line in new_lines: f_out.write(line) f_out.write("\n") From ffca1ae7fb75bc65c6013cde40bcc0545774372f Mon Sep 17 00:00:00 2001 From: ezerhouni <61225408+ezerhouni@users.noreply.github.com> Date: Fri, 15 Jul 2022 04:32:54 +0200 Subject: [PATCH 04/38] [WIP] Rnn-T LM nbest rescoring (#471) --- .../beam_search.py | 187 +++++++++++++++++- .../pruned_transducer_stateless3/decode.py | 177 ++++++++++++++++- 2 files changed, 358 insertions(+), 6 deletions(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/beam_search.py b/egs/librispeech/ASR/pruned_transducer_stateless2/beam_search.py index 6b6190a09..ed6a6ea82 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/beam_search.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/beam_search.py @@ -19,6 +19,7 @@ from dataclasses import dataclass from typing import Dict, List, Optional import k2 +import sentencepiece as spm import torch from model import Transducer @@ -34,6 +35,7 @@ def fast_beam_search_one_best( beam: float, max_states: int, max_contexts: int, + temperature: float = 1.0, ) -> List[List[int]]: """It limits the maximum number of symbols per frame to 1. @@ -56,6 +58,8 @@ def fast_beam_search_one_best( Max states per stream per frame. max_contexts: Max contexts pre stream per frame. + temperature: + Softmax temperature. Returns: Return the decoded result. """ @@ -67,6 +71,7 @@ def fast_beam_search_one_best( beam=beam, max_states=max_states, max_contexts=max_contexts, + temperature=temperature, ) best_path = one_best_decoding(lattice) @@ -85,6 +90,7 @@ def fast_beam_search_nbest_LG( num_paths: int, nbest_scale: float = 0.5, use_double_scores: bool = True, + temperature: float = 1.0, ) -> List[List[int]]: """It limits the maximum number of symbols per frame to 1. @@ -120,6 +126,8 @@ def fast_beam_search_nbest_LG( use_double_scores: True to use double precision for computation. False to use single precision. + temperature: + Softmax temperature. Returns: Return the decoded result. """ @@ -131,6 +139,7 @@ def fast_beam_search_nbest_LG( beam=beam, max_states=max_states, max_contexts=max_contexts, + temperature=temperature, ) nbest = Nbest.from_lattice( @@ -201,6 +210,7 @@ def fast_beam_search_nbest( num_paths: int, nbest_scale: float = 0.5, use_double_scores: bool = True, + temperature: float = 1.0, ) -> List[List[int]]: """It limits the maximum number of symbols per frame to 1. @@ -236,6 +246,8 @@ def fast_beam_search_nbest( use_double_scores: True to use double precision for computation. False to use single precision. + temperature: + Softmax temperature. Returns: Return the decoded result. """ @@ -247,6 +259,7 @@ def fast_beam_search_nbest( beam=beam, max_states=max_states, max_contexts=max_contexts, + temperature=temperature, ) nbest = Nbest.from_lattice( @@ -282,6 +295,7 @@ def fast_beam_search_nbest_oracle( ref_texts: List[List[int]], use_double_scores: bool = True, nbest_scale: float = 0.5, + temperature: float = 1.0, ) -> List[List[int]]: """It limits the maximum number of symbols per frame to 1. @@ -321,7 +335,8 @@ def fast_beam_search_nbest_oracle( nbest_scale: It's the scale applied to the lattice.scores. A smaller value yields more unique paths. - + temperature: + Softmax temperature. Returns: Return the decoded result. """ @@ -333,6 +348,7 @@ def fast_beam_search_nbest_oracle( beam=beam, max_states=max_states, max_contexts=max_contexts, + temperature=temperature, ) nbest = Nbest.from_lattice( @@ -373,6 +389,7 @@ def fast_beam_search( beam: float, max_states: int, max_contexts: int, + temperature: float = 1.0, ) -> k2.Fsa: """It limits the maximum number of symbols per frame to 1. @@ -392,6 +409,8 @@ def fast_beam_search( Max states per stream per frame. max_contexts: Max contexts pre stream per frame. + temperature: + Softmax temperature. Returns: Return an FsaVec with axes [utt][state][arc] containing the decoded lattice. Note: When the input graph is a TrivialGraph, the returned @@ -440,7 +459,7 @@ def fast_beam_search( project_input=False, ) logits = logits.squeeze(1).squeeze(1) - log_probs = logits.log_softmax(dim=-1) + log_probs = (logits / temperature).log_softmax(dim=-1) decoding_streams.advance(log_probs) decoding_streams.terminate_and_flush_to_streams() lattice = decoding_streams.format_output(encoder_out_lens.tolist()) @@ -783,6 +802,7 @@ def modified_beam_search( encoder_out: torch.Tensor, encoder_out_lens: torch.Tensor, beam: int = 4, + temperature: float = 1.0, ) -> List[List[int]]: """Beam search in batch mode with --max-sym-per-frame=1 being hardcoded. @@ -796,6 +816,8 @@ def modified_beam_search( encoder_out before padding. beam: Number of active paths during the beam search. + temperature: + Softmax temperature. Returns: Return a list-of-list of token IDs. ans[i] is the decoding results for the i-th utterance. @@ -879,7 +901,9 @@ def modified_beam_search( logits = logits.squeeze(1).squeeze(1) # (num_hyps, vocab_size) - log_probs = logits.log_softmax(dim=-1) # (num_hyps, vocab_size) + log_probs = (logits / temperature).log_softmax( + dim=-1 + ) # (num_hyps, vocab_size) log_probs.add_(ys_log_probs) @@ -1043,6 +1067,7 @@ def beam_search( model: Transducer, encoder_out: torch.Tensor, beam: int = 4, + temperature: float = 1.0, ) -> List[int]: """ It implements Algorithm 1 in https://arxiv.org/pdf/1211.3711.pdf @@ -1056,6 +1081,8 @@ def beam_search( A tensor of shape (N, T, C) from the encoder. Support only N==1 for now. beam: Beam size. + temperature: + Softmax temperature. Returns: Return the decoded result. """ @@ -1132,7 +1159,7 @@ def beam_search( ) # TODO(fangjun): Scale the blank posterior - log_prob = logits.log_softmax(dim=-1) + log_prob = (logits / temperature).log_softmax(dim=-1) # log_prob is (1, 1, 1, vocab_size) log_prob = log_prob.squeeze() # Now log_prob is (vocab_size,) @@ -1171,3 +1198,155 @@ def beam_search( best_hyp = B.get_most_probable(length_norm=True) ys = best_hyp.ys[context_size:] # [context_size:] to remove blanks return ys + + +def fast_beam_search_with_nbest_rescoring( + model: Transducer, + decoding_graph: k2.Fsa, + encoder_out: torch.Tensor, + encoder_out_lens: torch.Tensor, + beam: float, + max_states: int, + max_contexts: int, + ngram_lm_scale_list: List[float], + num_paths: int, + G: k2.Fsa, + sp: spm.SentencePieceProcessor, + word_table: k2.SymbolTable, + oov_word: str = "", + use_double_scores: bool = True, + nbest_scale: float = 0.5, + temperature: float = 1.0, +) -> Dict[str, List[List[int]]]: + """It limits the maximum number of symbols per frame to 1. + A lattice is first obtained using modified beam search, and then + the shortest path within the lattice is used as the final output. + Args: + model: + An instance of `Transducer`. + decoding_graph: + Decoding graph used for decoding, may be a TrivialGraph or a HLG. + encoder_out: + A tensor of shape (N, T, C) from the encoder. + encoder_out_lens: + A tensor of shape (N,) containing the number of frames in `encoder_out` + before padding. + beam: + Beam value, similar to the beam used in Kaldi. + max_states: + Max states per stream per frame. + max_contexts: + Max contexts pre stream per frame. + ngram_lm_scale_list: + A list of floats representing LM score scales. + num_paths: + Number of paths to extract from the decoded lattice. + G: + An FsaVec containing only a single FSA. It is an n-gram LM. + sp: + The BPE model. + word_table: + The word symbol table. + oov_word: + OOV words are replaced with this word. + use_double_scores: + True to use double precision for computation. False to use + single precision. + nbest_scale: + It's the scale applied to the lattice.scores. A smaller value + yields more unique paths. + temperature: + Softmax temperature. + Returns: + Return the decoded result in a dict, where the key has the form + 'ngram_lm_scale_xx' and the value is the decoded results. `xx` is the + ngram LM scale value used during decoding, i.e., 0.1. + """ + lattice = fast_beam_search( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=beam, + max_states=max_states, + max_contexts=max_contexts, + temperature=temperature, + ) + + nbest = Nbest.from_lattice( + lattice=lattice, + num_paths=num_paths, + use_double_scores=use_double_scores, + nbest_scale=nbest_scale, + ) + # at this point, nbest.fsa.scores are all zeros. + + nbest = nbest.intersect(lattice) + # Now nbest.fsa.scores contains acoustic scores + + am_scores = nbest.tot_scores() + + # Now we need to compute the LM scores of each path. + # (1) Get the token IDs of each Path. We assume the decoding_graph + # is an acceptor, i.e., lattice is also an acceptor + tokens_shape = nbest.fsa.arcs.shape().remove_axis(1) # [path][arc] + + tokens = k2.RaggedTensor(tokens_shape, nbest.fsa.labels.contiguous()) + tokens = tokens.remove_values_leq(0) # remove -1 and 0 + + token_list: List[List[int]] = tokens.tolist() + word_list: List[List[str]] = sp.decode(token_list) + + assert isinstance(oov_word, str), oov_word + assert oov_word in word_table, oov_word + oov_word_id = word_table[oov_word] + + word_ids_list: List[List[int]] = [] + + for words in word_list: + this_word_ids = [] + for w in words.split(): + if w in word_table: + this_word_ids.append(word_table[w]) + else: + this_word_ids.append(oov_word_id) + word_ids_list.append(this_word_ids) + + word_fsas = k2.linear_fsa(word_ids_list, device=lattice.device) + word_fsas_with_self_loops = k2.add_epsilon_self_loops(word_fsas) + + num_unique_paths = len(word_ids_list) + + b_to_a_map = torch.zeros( + num_unique_paths, + dtype=torch.int32, + device=lattice.device, + ) + + rescored_word_fsas = k2.intersect_device( + a_fsas=G, + b_fsas=word_fsas_with_self_loops, + b_to_a_map=b_to_a_map, + sorted_match_a=True, + ret_arc_maps=False, + ) + + rescored_word_fsas = k2.remove_epsilon_self_loops(rescored_word_fsas) + rescored_word_fsas = k2.top_sort(k2.connect(rescored_word_fsas)) + ngram_lm_scores = rescored_word_fsas.get_tot_scores( + use_double_scores=True, + log_semiring=False, + ) + + ans: Dict[str, List[List[int]]] = {} + for s in ngram_lm_scale_list: + key = f"ngram_lm_scale_{s}" + tot_scores = am_scores.values + s * ngram_lm_scores + ragged_tot_scores = k2.RaggedTensor(nbest.shape, tot_scores) + max_indexes = ragged_tot_scores.argmax() + best_path = k2.index_fsa(nbest.fsa, max_indexes) + hyps = get_texts(best_path) + + ans[key] = hyps + + return ans diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/decode.py b/egs/librispeech/ASR/pruned_transducer_stateless3/decode.py index 44fc34640..8f55413e4 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/decode.py @@ -111,6 +111,7 @@ from beam_search import ( fast_beam_search_nbest_LG, fast_beam_search_nbest_oracle, fast_beam_search_one_best, + fast_beam_search_with_nbest_rescoring, greedy_search, greedy_search_batch, modified_beam_search, @@ -312,6 +313,35 @@ def get_parser(): help="left context can be seen during decoding (in frames after subsampling)", ) + parser.add_argument( + "--temperature", + type=float, + default=1.0, + help="""Softmax temperature. + The output of the model is (logits / temperature).log_softmax(). + """, + ) + + parser.add_argument( + "--lm-dir", + type=Path, + default=Path("./data/lm"), + help="""Used only when --decoding-method is + fast_beam_search_with_nbest_rescoring. + It should contain either G_4_gram.pt or G_4_gram.fst.txt + """, + ) + + parser.add_argument( + "--words-txt", + type=Path, + default=Path("./data/lang_bpe_500/words.txt"), + help="""Used only when --decoding-method is + fast_beam_search_with_nbest_rescoring. + It is the word table. + """, + ) + add_model_arguments(parser) return parser @@ -324,6 +354,7 @@ def decode_one_batch( batch: dict, word_table: Optional[k2.SymbolTable] = None, decoding_graph: Optional[k2.Fsa] = None, + G: Optional[k2.Fsa] = None, ) -> Dict[str, List[List[str]]]: """Decode one batch and return the result in a dict. The dict has the following format: @@ -352,6 +383,11 @@ def decode_one_batch( The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used only when --decoding_method is fast_beam_search, fast_beam_search_nbest, fast_beam_search_nbest_oracle, and fast_beam_search_nbest_LG. + G: + Optional. Used only when decoding method is fast_beam_search, + fast_beam_search_nbest, fast_beam_search_nbest_oracle, + or fast_beam_search_with_nbest_rescoring. + It an FsaVec containing an acceptor. Returns: Return the decoding result. See above description for the format of the returned dict. @@ -397,6 +433,7 @@ def decode_one_batch( beam=params.beam, max_contexts=params.max_contexts, max_states=params.max_states, + temperature=params.temperature, ) for hyp in sp.decode(hyp_tokens): hyps.append(hyp.split()) @@ -411,6 +448,7 @@ def decode_one_batch( max_states=params.max_states, num_paths=params.num_paths, nbest_scale=params.nbest_scale, + temperature=params.temperature, ) for hyp in hyp_tokens: hyps.append([word_table[i] for i in hyp]) @@ -425,6 +463,7 @@ def decode_one_batch( max_states=params.max_states, num_paths=params.num_paths, nbest_scale=params.nbest_scale, + temperature=params.temperature, ) for hyp in sp.decode(hyp_tokens): hyps.append(hyp.split()) @@ -440,6 +479,7 @@ def decode_one_batch( num_paths=params.num_paths, ref_texts=sp.encode(supervisions["text"]), nbest_scale=params.nbest_scale, + temperature=params.temperature, ) for hyp in sp.decode(hyp_tokens): hyps.append(hyp.split()) @@ -460,9 +500,32 @@ def decode_one_batch( encoder_out=encoder_out, encoder_out_lens=encoder_out_lens, beam=params.beam_size, + temperature=params.temperature, ) for hyp in sp.decode(hyp_tokens): hyps.append(hyp.split()) + elif params.decoding_method == "fast_beam_search_with_nbest_rescoring": + ngram_lm_scale_list = [-0.5, -0.2, -0.1, -0.05, -0.02, 0] + ngram_lm_scale_list += [0.01, 0.02, 0.05] + ngram_lm_scale_list += [0.1, 0.3, 0.5, 0.8] + ngram_lm_scale_list += [1.0, 1.5, 2.5, 3] + hyp_tokens = fast_beam_search_with_nbest_rescoring( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_states=params.max_states, + max_contexts=params.max_contexts, + ngram_lm_scale_list=ngram_lm_scale_list, + num_paths=params.num_paths, + G=G, + sp=sp, + word_table=word_table, + use_double_scores=True, + nbest_scale=params.nbest_scale, + temperature=params.temperature, + ) else: batch_size = encoder_out.size(0) @@ -496,6 +559,7 @@ def decode_one_batch( f"beam_{params.beam}_" f"max_contexts_{params.max_contexts}_" f"max_states_{params.max_states}" + f"temperature_{params.temperature}" ): hyps } elif params.decoding_method == "fast_beam_search": @@ -504,8 +568,23 @@ def decode_one_batch( f"beam_{params.beam}_" f"max_contexts_{params.max_contexts}_" f"max_states_{params.max_states}" + f"temperature_{params.temperature}" ): hyps } + elif params.decoding_method == "fast_beam_search_with_nbest_rescoring": + prefix = ( + f"beam_{params.beam}_" + f"max_contexts_{params.max_contexts}_" + f"max_states_{params.max_states}_" + f"num_paths_{params.num_paths}_" + f"nbest_scale_{params.nbest_scale}_" + f"temperature_{params.temperature}_" + ) + ans: Dict[str, List[List[str]]] = {} + for key, hyp in hyp_tokens.items(): + t: List[str] = sp.decode(hyp) + ans[prefix + key] = [s.split() for s in t] + return ans elif "fast_beam_search" in params.decoding_method: key = f"beam_{params.beam}_" key += f"max_contexts_{params.max_contexts}_" @@ -515,10 +594,14 @@ def decode_one_batch( key += f"nbest_scale_{params.nbest_scale}" if "LG" in params.decoding_method: key += f"_ngram_lm_scale_{params.ngram_lm_scale}" - return {key: hyps} else: - return {f"beam_size_{params.beam_size}": hyps} + return { + ( + f"beam_size_{params.beam_size}_" + f"temperature_{params.temperature}" + ): hyps + } def decode_dataset( @@ -528,6 +611,7 @@ def decode_dataset( sp: spm.SentencePieceProcessor, word_table: Optional[k2.SymbolTable] = None, decoding_graph: Optional[k2.Fsa] = None, + G: Optional[k2.Fsa] = None, ) -> Dict[str, List[Tuple[List[str], List[str]]]]: """Decode dataset. @@ -546,6 +630,11 @@ def decode_dataset( The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used only when --decoding_method is fast_beam_search, fast_beam_search_nbest, fast_beam_search_nbest_oracle, and fast_beam_search_nbest_LG. + G: + Optional. Used only when decoding method is fast_beam_search, + fast_beam_search_nbest, fast_beam_search_nbest_oracle, + or fast_beam_search_with_nbest_rescoring. + It's an FsaVec containing an acceptor. Returns: Return a dict, whose key may be "greedy_search" if greedy search is used, or it may be "beam_7" if beam size of 7 is used. @@ -576,6 +665,7 @@ def decode_dataset( word_table=word_table, decoding_graph=decoding_graph, batch=batch, + G=G, ) for name, hyps in hyps_dict.items(): @@ -642,6 +732,71 @@ def save_results( logging.info(s) +def load_ngram_LM( + lm_dir: Path, word_table: k2.SymbolTable, device: torch.device +) -> k2.Fsa: + """Read a ngram model from the given directory. + Args: + lm_dir: + It should contain either G_4_gram.pt or G_4_gram.fst.txt + word_table: + The word table mapping words to IDs and vice versa. + device: + The resulting FSA will be moved to this device. + Returns: + Return an FsaVec containing a single acceptor. + """ + lm_dir = Path(lm_dir) + assert lm_dir.is_dir(), f"{lm_dir} does not exist" + + pt_file = lm_dir / "G_4_gram.pt" + + if pt_file.is_file(): + logging.info(f"Loading pre-compiled {pt_file}") + d = torch.load(pt_file, map_location=device) + G = k2.Fsa.from_dict(d) + G = k2.add_epsilon_self_loops(G) + G = k2.arc_sort(G) + return G + + txt_file = lm_dir / "G_4_gram.fst.txt" + + assert txt_file.is_file(), f"{txt_file} does not exist" + logging.info(f"Loading {txt_file}") + logging.warning("It may take 8 minutes (Will be cached for later use).") + with open(txt_file) as f: + G = k2.Fsa.from_openfst(f.read(), acceptor=False) + + # G.aux_labels is not needed in later computations, so + # remove it here. + del G.aux_labels + # Now G is an acceptor + + first_word_disambig_id = word_table["#0"] + # CAUTION: The following line is crucial. + # Arcs entering the back-off state have label equal to #0. + # We have to change it to 0 here. + G.labels[G.labels >= first_word_disambig_id] = 0 + + # See https://github.com/k2-fsa/k2/issues/874 + # for why we need to set G.properties to None + G.__dict__["_properties"] = None + + G = k2.Fsa.from_fsas([G]).to(device) + + # Save a dummy value so that it can be loaded in C++. + # See https://github.com/pytorch/pytorch/issues/67902 + # for why we need to do this. + G.dummy = 1 + + logging.info(f"Saving to {pt_file} for later use") + torch.save(G.as_dict(), pt_file) + + G = k2.add_epsilon_self_loops(G) + G = k2.arc_sort(G) + return G + + @torch.no_grad() def main(): parser = get_parser() @@ -660,6 +815,7 @@ def main(): "fast_beam_search_nbest_LG", "fast_beam_search_nbest_oracle", "modified_beam_search", + "fast_beam_search_with_nbest_rescoring", ) params.res_dir = params.exp_dir / params.decoding_method @@ -676,6 +832,7 @@ def main(): params.suffix += f"-beam-{params.beam}" params.suffix += f"-max-contexts-{params.max_contexts}" params.suffix += f"-max-states-{params.max_states}" + params.suffix += f"-temperature-{params.temperature}" if "nbest" in params.decoding_method: params.suffix += f"-nbest-scale-{params.nbest_scale}" params.suffix += f"-num-paths-{params.num_paths}" @@ -685,9 +842,11 @@ def main(): params.suffix += ( f"-{params.decoding_method}-beam-size-{params.beam_size}" ) + params.suffix += f"-temperature-{params.temperature}" else: params.suffix += f"-context-{params.context_size}" params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}" + params.suffix += f"-temperature-{params.temperature}" setup_logger(f"{params.res_dir}/log-decode-{params.suffix}") logging.info("Decoding started") @@ -760,6 +919,19 @@ def main(): torch.load(lg_filename, map_location=device) ) decoding_graph.scores *= params.ngram_lm_scale + elif params.decoding_method == "fast_beam_search_with_nbest_rescoring": + logging.info(f"Loading word symbol table from {params.words_txt}") + word_table = k2.SymbolTable.from_file(params.words_txt) + + G = load_ngram_LM( + lm_dir=params.lm_dir, + word_table=word_table, + device=device, + ) + decoding_graph = k2.trivial_graph( + params.vocab_size - 1, device=device + ) + logging.info(f"G properties_str: {G.properties_str}") else: word_table = None decoding_graph = k2.trivial_graph( @@ -792,6 +964,7 @@ def main(): sp=sp, word_table=word_table, decoding_graph=decoding_graph, + G=G, ) save_results( From aec222e2fe96bba7b2a7c96bcb2327a2fd45dfdc Mon Sep 17 00:00:00 2001 From: Mingshuang Luo <37799481+luomingshuang@users.noreply.github.com> Date: Mon, 18 Jul 2022 14:36:40 +0800 Subject: [PATCH 05/38] add compile_lg.py for aishell2 recipe (#481) --- egs/aishell2/ASR/local/compile_lg.py | 1 + 1 file changed, 1 insertion(+) create mode 120000 egs/aishell2/ASR/local/compile_lg.py diff --git a/egs/aishell2/ASR/local/compile_lg.py b/egs/aishell2/ASR/local/compile_lg.py new file mode 120000 index 000000000..462d6d3fb --- /dev/null +++ b/egs/aishell2/ASR/local/compile_lg.py @@ -0,0 +1 @@ +../../../librispeech/ASR/local/compile_lg.py \ No newline at end of file From 608473b4eb9a8f44d594aa84cab907f1c44fda74 Mon Sep 17 00:00:00 2001 From: ezerhouni <61225408+ezerhouni@users.noreply.github.com> Date: Mon, 18 Jul 2022 10:52:17 +0200 Subject: [PATCH 06/38] Add RNN-LM rescoring in fast beam search (#475) --- .../beam_search.py | 207 +++++++++++++++++- .../pruned_transducer_stateless3/decode.py | 127 ++++++++++- icefall/decode.py | 2 + 3 files changed, 325 insertions(+), 11 deletions(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/beam_search.py b/egs/librispeech/ASR/pruned_transducer_stateless2/beam_search.py index ed6a6ea82..769cd2a1d 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/beam_search.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/beam_search.py @@ -24,7 +24,7 @@ import torch from model import Transducer from icefall.decode import Nbest, one_best_decoding -from icefall.utils import get_texts +from icefall.utils import add_eos, add_sos, get_texts def fast_beam_search_one_best( @@ -46,7 +46,7 @@ def fast_beam_search_one_best( model: An instance of `Transducer`. decoding_graph: - Decoding graph used for decoding, may be a TrivialGraph or a HLG. + Decoding graph used for decoding, may be a TrivialGraph or a LG. encoder_out: A tensor of shape (N, T, C) from the encoder. encoder_out_lens: @@ -106,7 +106,7 @@ def fast_beam_search_nbest_LG( model: An instance of `Transducer`. decoding_graph: - Decoding graph used for decoding, may be a TrivialGraph or a HLG. + Decoding graph used for decoding, may be a TrivialGraph or a LG. encoder_out: A tensor of shape (N, T, C) from the encoder. encoder_out_lens: @@ -226,7 +226,7 @@ def fast_beam_search_nbest( model: An instance of `Transducer`. decoding_graph: - Decoding graph used for decoding, may be a TrivialGraph or a HLG. + Decoding graph used for decoding, may be a TrivialGraph or a LG. encoder_out: A tensor of shape (N, T, C) from the encoder. encoder_out_lens: @@ -311,7 +311,7 @@ def fast_beam_search_nbest_oracle( model: An instance of `Transducer`. decoding_graph: - Decoding graph used for decoding, may be a TrivialGraph or a HLG. + Decoding graph used for decoding, may be a TrivialGraph or a LG. encoder_out: A tensor of shape (N, T, C) from the encoder. encoder_out_lens: @@ -397,7 +397,7 @@ def fast_beam_search( model: An instance of `Transducer`. decoding_graph: - Decoding graph used for decoding, may be a TrivialGraph or a HLG. + Decoding graph used for decoding, may be a TrivialGraph or a LG. encoder_out: A tensor of shape (N, T, C) from the encoder. encoder_out_lens: @@ -1219,13 +1219,15 @@ def fast_beam_search_with_nbest_rescoring( temperature: float = 1.0, ) -> Dict[str, List[List[int]]]: """It limits the maximum number of symbols per frame to 1. - A lattice is first obtained using modified beam search, and then - the shortest path within the lattice is used as the final output. + A lattice is first obtained using fast beam search, num_path are selected + and rescored using a given language model. The shortest path within the + lattice is used as the final output. + Args: model: An instance of `Transducer`. decoding_graph: - Decoding graph used for decoding, may be a TrivialGraph or a HLG. + Decoding graph used for decoding, may be a TrivialGraph or a LG. encoder_out: A tensor of shape (N, T, C) from the encoder. encoder_out_lens: @@ -1350,3 +1352,190 @@ def fast_beam_search_with_nbest_rescoring( ans[key] = hyps return ans + + +def fast_beam_search_with_nbest_rnn_rescoring( + model: Transducer, + decoding_graph: k2.Fsa, + encoder_out: torch.Tensor, + encoder_out_lens: torch.Tensor, + beam: float, + max_states: int, + max_contexts: int, + ngram_lm_scale_list: List[float], + num_paths: int, + G: k2.Fsa, + sp: spm.SentencePieceProcessor, + word_table: k2.SymbolTable, + rnn_lm_model: torch.nn.Module, + rnn_lm_scale_list: List[float], + oov_word: str = "", + use_double_scores: bool = True, + nbest_scale: float = 0.5, + temperature: float = 1.0, +) -> Dict[str, List[List[int]]]: + """It limits the maximum number of symbols per frame to 1. + A lattice is first obtained using fast beam search, num_path are selected + and rescored using a given language model and a rnn-lm. + The shortest path within the lattice is used as the final output. + + Args: + model: + An instance of `Transducer`. + decoding_graph: + Decoding graph used for decoding, may be a TrivialGraph or a LG. + encoder_out: + A tensor of shape (N, T, C) from the encoder. + encoder_out_lens: + A tensor of shape (N,) containing the number of frames in `encoder_out` + before padding. + beam: + Beam value, similar to the beam used in Kaldi. + max_states: + Max states per stream per frame. + max_contexts: + Max contexts pre stream per frame. + ngram_lm_scale_list: + A list of floats representing LM score scales. + num_paths: + Number of paths to extract from the decoded lattice. + G: + An FsaVec containing only a single FSA. It is an n-gram LM. + sp: + The BPE model. + word_table: + The word symbol table. + rnn_lm_model: + A rnn-lm model used for LM rescoring + rnn_lm_scale_list: + A list of floats representing RNN score scales. + oov_word: + OOV words are replaced with this word. + use_double_scores: + True to use double precision for computation. False to use + single precision. + nbest_scale: + It's the scale applied to the lattice.scores. A smaller value + yields more unique paths. + temperature: + Softmax temperature. + Returns: + Return the decoded result in a dict, where the key has the form + 'ngram_lm_scale_xx' and the value is the decoded results. `xx` is the + ngram LM scale value used during decoding, i.e., 0.1. + """ + lattice = fast_beam_search( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=beam, + max_states=max_states, + max_contexts=max_contexts, + temperature=temperature, + ) + + nbest = Nbest.from_lattice( + lattice=lattice, + num_paths=num_paths, + use_double_scores=use_double_scores, + nbest_scale=nbest_scale, + ) + # at this point, nbest.fsa.scores are all zeros. + + nbest = nbest.intersect(lattice) + # Now nbest.fsa.scores contains acoustic scores + + am_scores = nbest.tot_scores() + + # Now we need to compute the LM scores of each path. + # (1) Get the token IDs of each Path. We assume the decoding_graph + # is an acceptor, i.e., lattice is also an acceptor + tokens_shape = nbest.fsa.arcs.shape().remove_axis(1) # [path][arc] + + tokens = k2.RaggedTensor(tokens_shape, nbest.fsa.labels.contiguous()) + tokens = tokens.remove_values_leq(0) # remove -1 and 0 + + token_list: List[List[int]] = tokens.tolist() + word_list: List[List[str]] = sp.decode(token_list) + + assert isinstance(oov_word, str), oov_word + assert oov_word in word_table, oov_word + oov_word_id = word_table[oov_word] + + word_ids_list: List[List[int]] = [] + + for words in word_list: + this_word_ids = [] + for w in words.split(): + if w in word_table: + this_word_ids.append(word_table[w]) + else: + this_word_ids.append(oov_word_id) + word_ids_list.append(this_word_ids) + + word_fsas = k2.linear_fsa(word_ids_list, device=lattice.device) + word_fsas_with_self_loops = k2.add_epsilon_self_loops(word_fsas) + + num_unique_paths = len(word_ids_list) + + b_to_a_map = torch.zeros( + num_unique_paths, + dtype=torch.int32, + device=lattice.device, + ) + + rescored_word_fsas = k2.intersect_device( + a_fsas=G, + b_fsas=word_fsas_with_self_loops, + b_to_a_map=b_to_a_map, + sorted_match_a=True, + ret_arc_maps=False, + ) + + rescored_word_fsas = k2.remove_epsilon_self_loops(rescored_word_fsas) + rescored_word_fsas = k2.top_sort(k2.connect(rescored_word_fsas)) + ngram_lm_scores = rescored_word_fsas.get_tot_scores( + use_double_scores=True, + log_semiring=False, + ) + + # Now RNN-LM + blank_id = model.decoder.blank_id + sos_id = sp.piece_to_id("sos_id") + eos_id = sp.piece_to_id("eos_id") + + sos_tokens = add_sos(tokens, sos_id) + tokens_eos = add_eos(tokens, eos_id) + sos_tokens_row_splits = sos_tokens.shape.row_splits(1) + sentence_lengths = sos_tokens_row_splits[1:] - sos_tokens_row_splits[:-1] + + x_tokens = sos_tokens.pad(mode="constant", padding_value=blank_id) + y_tokens = tokens_eos.pad(mode="constant", padding_value=blank_id) + + x_tokens = x_tokens.to(torch.int64) + y_tokens = y_tokens.to(torch.int64) + sentence_lengths = sentence_lengths.to(torch.int64) + + rnn_lm_nll = rnn_lm_model(x=x_tokens, y=y_tokens, lengths=sentence_lengths) + assert rnn_lm_nll.ndim == 2 + assert rnn_lm_nll.shape[0] == len(token_list) + rnn_lm_scores = -1 * rnn_lm_nll.sum(dim=1) + + ans: Dict[str, List[List[int]]] = {} + for n_scale in ngram_lm_scale_list: + for rnn_scale in rnn_lm_scale_list: + key = f"ngram_lm_scale_{n_scale}_rnn_lm_scale_{rnn_scale}" + tot_scores = ( + am_scores.values + + n_scale * ngram_lm_scores + + rnn_scale * rnn_lm_scores + ) + ragged_tot_scores = k2.RaggedTensor(nbest.shape, tot_scores) + max_indexes = ragged_tot_scores.argmax() + best_path = k2.index_fsa(nbest.fsa, max_indexes) + hyps = get_texts(best_path) + + ans[key] = hyps + + return ans diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/decode.py b/egs/librispeech/ASR/pruned_transducer_stateless3/decode.py index 8f55413e4..c3a03f2e1 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/decode.py @@ -112,6 +112,7 @@ from beam_search import ( fast_beam_search_nbest_oracle, fast_beam_search_one_best, fast_beam_search_with_nbest_rescoring, + fast_beam_search_with_nbest_rnn_rescoring, greedy_search, greedy_search_batch, modified_beam_search, @@ -125,8 +126,10 @@ from icefall.checkpoint import ( load_checkpoint, ) from icefall.lexicon import Lexicon +from icefall.rnn_lm.model import RnnLmModel from icefall.utils import ( AttributeDict, + load_averaged_model, setup_logger, store_transcripts, str2bool, @@ -342,6 +345,62 @@ def get_parser(): """, ) + parser.add_argument( + "--rnn-lm-exp-dir", + type=str, + default="rnn_lm/exp", + help="""Used only when --method is rnn-lm. + It specifies the path to RNN LM exp dir. + """, + ) + + parser.add_argument( + "--rnn-lm-epoch", + type=int, + default=7, + help="""Used only when --method is rnn-lm. + It specifies the checkpoint to use. + """, + ) + + parser.add_argument( + "--rnn-lm-avg", + type=int, + default=2, + help="""Used only when --method is rnn-lm. + It specifies the number of checkpoints to average. + """, + ) + + parser.add_argument( + "--rnn-lm-embedding-dim", + type=int, + default=2048, + help="Embedding dim of the model", + ) + + parser.add_argument( + "--rnn-lm-hidden-dim", + type=int, + default=2048, + help="Hidden dim of the model", + ) + + parser.add_argument( + "--rnn-lm-num-layers", + type=int, + default=4, + help="Number of RNN layers the model", + ) + parser.add_argument( + "--rnn-lm-tie-weights", + type=str2bool, + default=True, + help="""True to share the weights between the input embedding layer and the + last output linear layer + """, + ) + add_model_arguments(parser) return parser @@ -355,6 +414,7 @@ def decode_one_batch( word_table: Optional[k2.SymbolTable] = None, decoding_graph: Optional[k2.Fsa] = None, G: Optional[k2.Fsa] = None, + rnn_lm_model: torch.nn.Module = None, ) -> Dict[str, List[List[str]]]: """Decode one batch and return the result in a dict. The dict has the following format: @@ -526,6 +586,30 @@ def decode_one_batch( nbest_scale=params.nbest_scale, temperature=params.temperature, ) + elif params.decoding_method == "fast_beam_search_with_nbest_rnn_rescoring": + ngram_lm_scale_list = [-0.5, -0.2, -0.1, -0.05, -0.02, 0] + ngram_lm_scale_list += [0.01, 0.02, 0.05] + ngram_lm_scale_list += [0.1, 0.3, 0.5, 0.8] + ngram_lm_scale_list += [1.0, 1.5, 2.5, 3] + hyp_tokens = fast_beam_search_with_nbest_rnn_rescoring( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_states=params.max_states, + max_contexts=params.max_contexts, + ngram_lm_scale_list=ngram_lm_scale_list, + num_paths=params.num_paths, + G=G, + sp=sp, + word_table=word_table, + rnn_lm_model=rnn_lm_model, + rnn_lm_scale_list=ngram_lm_scale_list, + use_double_scores=True, + nbest_scale=params.nbest_scale, + temperature=params.temperature, + ) else: batch_size = encoder_out.size(0) @@ -571,7 +655,10 @@ def decode_one_batch( f"temperature_{params.temperature}" ): hyps } - elif params.decoding_method == "fast_beam_search_with_nbest_rescoring": + elif params.decoding_method in [ + "fast_beam_search_with_nbest_rescoring", + "fast_beam_search_with_nbest_rnn_rescoring", + ]: prefix = ( f"beam_{params.beam}_" f"max_contexts_{params.max_contexts}_" @@ -612,6 +699,7 @@ def decode_dataset( word_table: Optional[k2.SymbolTable] = None, decoding_graph: Optional[k2.Fsa] = None, G: Optional[k2.Fsa] = None, + rnn_lm_model: torch.nn.Module = None, ) -> Dict[str, List[Tuple[List[str], List[str]]]]: """Decode dataset. @@ -666,6 +754,7 @@ def decode_dataset( decoding_graph=decoding_graph, batch=batch, G=G, + rnn_lm_model=rnn_lm_model, ) for name, hyps in hyps_dict.items(): @@ -816,6 +905,7 @@ def main(): "fast_beam_search_nbest_oracle", "modified_beam_search", "fast_beam_search_with_nbest_rescoring", + "fast_beam_search_with_nbest_rnn_rescoring", ) params.res_dir = params.exp_dir / params.decoding_method @@ -919,7 +1009,10 @@ def main(): torch.load(lg_filename, map_location=device) ) decoding_graph.scores *= params.ngram_lm_scale - elif params.decoding_method == "fast_beam_search_with_nbest_rescoring": + elif params.decoding_method in [ + "fast_beam_search_with_nbest_rescoring", + "fast_beam_search_with_nbest_rnn_rescoring", + ]: logging.info(f"Loading word symbol table from {params.words_txt}") word_table = k2.SymbolTable.from_file(params.words_txt) @@ -932,14 +1025,43 @@ def main(): params.vocab_size - 1, device=device ) logging.info(f"G properties_str: {G.properties_str}") + rnn_lm_model = None + if ( + params.decoding_method + == "fast_beam_search_with_nbest_rnn_rescoring" + ): + rnn_lm_model = RnnLmModel( + vocab_size=params.vocab_size, + embedding_dim=params.rnn_lm_embedding_dim, + hidden_dim=params.rnn_lm_hidden_dim, + num_layers=params.rnn_lm_num_layers, + tie_weights=params.rnn_lm_tie_weights, + ) + if params.rnn_lm_avg == 1: + load_checkpoint( + f"{params.rnn_lm_exp_dir}/epoch-{params.rnn_lm_epoch}.pt", + rnn_lm_model, + ) + rnn_lm_model.to(device) + else: + rnn_lm_model = load_averaged_model( + params.rnn_lm_exp_dir, + rnn_lm_model, + params.rnn_lm_epoch, + params.rnn_lm_avg, + device, + ) + rnn_lm_model.eval() else: word_table = None decoding_graph = k2.trivial_graph( params.vocab_size - 1, device=device ) + rnn_lm_model = None else: decoding_graph = None word_table = None + rnn_lm_model = None num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") @@ -965,6 +1087,7 @@ def main(): word_table=word_table, decoding_graph=decoding_graph, G=G, + rnn_lm_model=rnn_lm_model, ) save_results( diff --git a/icefall/decode.py b/icefall/decode.py index 680e29619..e596876f4 100644 --- a/icefall/decode.py +++ b/icefall/decode.py @@ -1006,6 +1006,8 @@ def rescore_with_rnn_lm( An FsaVec with axes [utt][state][arc]. num_paths: Number of paths to extract from the given lattice for rescoring. + rnn_lm_model: + A rnn-lm model used for LM rescoring model: A transformer model. See the class "Transformer" in conformer_ctc/transformer.py for its interface. From a35b28cd8d43de40337d8b8bf347bd98fba6e02d Mon Sep 17 00:00:00 2001 From: yaozengwei Date: Tue, 19 Jul 2022 14:29:23 +0800 Subject: [PATCH 07/38] fix for case of None stats --- icefall/diagnostics.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/icefall/diagnostics.py b/icefall/diagnostics.py index 4850308d9..2e6087ad5 100644 --- a/icefall/diagnostics.py +++ b/icefall/diagnostics.py @@ -178,6 +178,9 @@ class TensorDiagnostic(object): def print_diagnostics(self): """Print diagnostics for each dimension of the tensor.""" + if self.stats is None: + print(f"Warning: the stats of {self.name} is None.") + return for dim, this_dim_stats in enumerate(self.stats): for stats_type, stats_list in this_dim_stats.items(): # stats_type could be "rms", "value", "abs", "eigs", "positive". From 3d2986b4c219ae3d65a1a367ac8f1a7f3653071a Mon Sep 17 00:00:00 2001 From: Mingshuang Luo <37799481+luomingshuang@users.noreply.github.com> Date: Wed, 20 Jul 2022 21:32:53 +0800 Subject: [PATCH 08/38] Update conformer.py for aishell4 (#484) * update conformer.py for aishell4 * update conformer.py * add strict=False when model.load_state_dict --- .../pruned_transducer_stateless5/conformer.py | 1333 +---------------- .../pruned_transducer_stateless5/decode.py | 14 +- .../pruned_transducer_stateless5/export.py | 14 +- 3 files changed, 21 insertions(+), 1340 deletions(-) mode change 100644 => 120000 egs/aishell4/ASR/pruned_transducer_stateless5/conformer.py diff --git a/egs/aishell4/ASR/pruned_transducer_stateless5/conformer.py b/egs/aishell4/ASR/pruned_transducer_stateless5/conformer.py deleted file mode 100644 index d7b29f37c..000000000 --- a/egs/aishell4/ASR/pruned_transducer_stateless5/conformer.py +++ /dev/null @@ -1,1332 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) 2021 University of Chinese Academy of Sciences (author: Han Zhu) -# -# See ../../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy -import math -import warnings -from typing import List, Optional, Tuple - -import torch -from encoder_interface import EncoderInterface -from scaling import ( - ActivationBalancer, - BasicNorm, - DoubleSwish, - ScaledConv1d, - ScaledConv2d, - ScaledLinear, -) -from torch import Tensor, nn - -from icefall.utils import make_pad_mask - - -class Conformer(EncoderInterface): - """ - Args: - num_features (int): Number of input features - subsampling_factor (int): subsampling factor of encoder (the convolution layers before transformers) - d_model (int): attention dimension, also the output dimension - nhead (int): number of head - dim_feedforward (int): feedforward dimention - num_encoder_layers (int): number of encoder layers - dropout (float): dropout rate - layer_dropout (float): layer-dropout rate. - cnn_module_kernel (int): Kernel size of convolution module - vgg_frontend (bool): whether to use vgg frontend. - """ - - def __init__( - self, - num_features: int, - subsampling_factor: int = 4, - d_model: int = 256, - nhead: int = 4, - dim_feedforward: int = 2048, - num_encoder_layers: int = 12, - dropout: float = 0.1, - layer_dropout: float = 0.075, - cnn_module_kernel: int = 31, - aux_layer_period: int = 3, - ) -> None: - super(Conformer, self).__init__() - - self.num_features = num_features - self.subsampling_factor = subsampling_factor - if subsampling_factor != 4: - raise NotImplementedError("Support only 'subsampling_factor=4'.") - - # self.encoder_embed converts the input of shape (N, T, num_features) - # to the shape (N, T//subsampling_factor, d_model). - # That is, it does two things simultaneously: - # (1) subsampling: T -> T//subsampling_factor - # (2) embedding: num_features -> d_model - self.encoder_embed = Conv2dSubsampling(num_features, d_model) - - self.encoder_pos = RelPositionalEncoding(d_model, dropout) - - encoder_layer = ConformerEncoderLayer( - d_model, - nhead, - dim_feedforward, - dropout, - layer_dropout, - cnn_module_kernel, - ) - self.encoder = ConformerEncoder( - encoder_layer, - num_encoder_layers, - aux_layers=list(range(0, num_encoder_layers - 1, aux_layer_period)), - ) - - def forward( - self, x: torch.Tensor, x_lens: torch.Tensor, warmup: float = 1.0 - ) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Args: - x: - The input tensor. Its shape is (batch_size, seq_len, feature_dim). - x_lens: - A tensor of shape (batch_size,) containing the number of frames in - `x` before padding. - warmup: - A floating point value that gradually increases from 0 throughout - training; when it is >= 1.0 we are "fully warmed up". It is used - to turn modules on sequentially. - Returns: - Return a tuple containing 2 tensors: - - embeddings: its shape is (batch_size, output_seq_len, d_model) - - lengths, a tensor of shape (batch_size,) containing the number - of frames in `embeddings` before padding. - """ - x = self.encoder_embed(x) - x, pos_emb = self.encoder_pos(x) - x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C) - - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - # Caution: We assume the subsampling factor is 4! - lengths = ((x_lens - 1) // 2 - 1) // 2 - assert x.size(0) == lengths.max().item() - mask = make_pad_mask(lengths) - - x = self.encoder( - x, pos_emb, src_key_padding_mask=mask, warmup=warmup - ) # (T, N, C) - - x = x.permute(1, 0, 2) # (T, N, C) ->(N, T, C) - - return x, lengths - - -class ConformerEncoderLayer(nn.Module): - """ - ConformerEncoderLayer is made up of self-attn, feedforward and convolution networks. - See: "Conformer: Convolution-augmented Transformer for Speech Recognition" - - Args: - d_model: the number of expected features in the input (required). - nhead: the number of heads in the multiheadattention models (required). - dim_feedforward: the dimension of the feedforward network model (default=2048). - dropout: the dropout value (default=0.1). - cnn_module_kernel (int): Kernel size of convolution module. - - Examples:: - >>> encoder_layer = ConformerEncoderLayer(d_model=512, nhead=8) - >>> src = torch.rand(10, 32, 512) - >>> pos_emb = torch.rand(32, 19, 512) - >>> out = encoder_layer(src, pos_emb) - """ - - def __init__( - self, - d_model: int, - nhead: int, - dim_feedforward: int = 2048, - dropout: float = 0.1, - layer_dropout: float = 0.075, - cnn_module_kernel: int = 31, - ) -> None: - super(ConformerEncoderLayer, self).__init__() - - self.layer_dropout = layer_dropout - - self.d_model = d_model - - self.self_attn = RelPositionMultiheadAttention( - d_model, nhead, dropout=0.0 - ) - - self.feed_forward = nn.Sequential( - ScaledLinear(d_model, dim_feedforward), - ActivationBalancer(channel_dim=-1), - DoubleSwish(), - nn.Dropout(dropout), - ScaledLinear(dim_feedforward, d_model, initial_scale=0.25), - ) - - self.feed_forward_macaron = nn.Sequential( - ScaledLinear(d_model, dim_feedforward), - ActivationBalancer(channel_dim=-1), - DoubleSwish(), - nn.Dropout(dropout), - ScaledLinear(dim_feedforward, d_model, initial_scale=0.25), - ) - - self.conv_module = ConvolutionModule(d_model, cnn_module_kernel) - - self.norm_final = BasicNorm(d_model) - - # try to ensure the output is close to zero-mean (or at least, zero-median). - self.balancer = ActivationBalancer( - channel_dim=-1, min_positive=0.45, max_positive=0.55, max_abs=6.0 - ) - - self.dropout = nn.Dropout(dropout) - - def forward( - self, - src: Tensor, - pos_emb: Tensor, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - warmup: float = 1.0, - ) -> Tensor: - """ - Pass the input through the encoder layer. - - Args: - src: the sequence to the encoder layer (required). - pos_emb: Positional embedding tensor (required). - src_mask: the mask for the src sequence (optional). - src_key_padding_mask: the mask for the src keys per batch (optional). - warmup: controls selective bypass of of layers; if < 1.0, we will - bypass layers more frequently. - - Shape: - src: (S, N, E). - pos_emb: (N, 2*S-1, E) - src_mask: (S, S). - src_key_padding_mask: (N, S). - S is the source sequence length, N is the batch size, E is the feature number - """ - src_orig = src - - warmup_scale = min(0.1 + warmup, 1.0) - # alpha = 1.0 means fully use this encoder layer, 0.0 would mean - # completely bypass it. - if self.training: - alpha = ( - warmup_scale - if torch.rand(()).item() <= (1.0 - self.layer_dropout) - else 0.1 - ) - else: - alpha = 1.0 - - # macaron style feed forward module - src = src + self.dropout(self.feed_forward_macaron(src)) - - # multi-headed self-attention module - src_att = self.self_attn( - src, - src, - src, - pos_emb=pos_emb, - attn_mask=src_mask, - key_padding_mask=src_key_padding_mask, - )[0] - src = src + self.dropout(src_att) - - # convolution module - src = src + self.dropout(self.conv_module(src)) - - # feed forward module - src = src + self.dropout(self.feed_forward(src)) - - src = self.norm_final(self.balancer(src)) - - if alpha != 1.0: - src = alpha * src + (1 - alpha) * src_orig - - return src - - -class ConformerEncoder(nn.Module): - r"""ConformerEncoder is a stack of N encoder layers - - Args: - encoder_layer: an instance of the ConformerEncoderLayer() class (required). - num_layers: the number of sub-encoder-layers in the encoder (required). - - Examples:: - >>> encoder_layer = ConformerEncoderLayer(d_model=512, nhead=8) - >>> conformer_encoder = ConformerEncoder(encoder_layer, num_layers=6) - >>> src = torch.rand(10, 32, 512) - >>> pos_emb = torch.rand(32, 19, 512) - >>> out = conformer_encoder(src, pos_emb) - """ - - def __init__( - self, - encoder_layer: nn.Module, - num_layers: int, - aux_layers: List[int], - ) -> None: - super().__init__() - self.layers = nn.ModuleList( - [copy.deepcopy(encoder_layer) for i in range(num_layers)] - ) - self.num_layers = num_layers - - assert num_layers - 1 not in aux_layers - self.aux_layers = set(aux_layers + [num_layers - 1]) - - num_channels = encoder_layer.norm_final.num_channels - self.combiner = RandomCombine( - num_inputs=len(self.aux_layers), - num_channels=num_channels, - final_weight=0.5, - pure_prob=0.333, - stddev=2.0, - ) - - def forward( - self, - src: Tensor, - pos_emb: Tensor, - mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - warmup: float = 1.0, - ) -> Tensor: - r"""Pass the input through the encoder layers in turn. - - Args: - src: the sequence to the encoder (required). - pos_emb: Positional embedding tensor (required). - mask: the mask for the src sequence (optional). - src_key_padding_mask: the mask for the src keys per batch (optional). - - Shape: - src: (S, N, E). - pos_emb: (N, 2*S-1, E) - mask: (S, S). - src_key_padding_mask: (N, S). - S is the source sequence length, T is the target sequence length, N is the batch size, E is the feature number - - """ - output = src - - outputs = [] - - for i, mod in enumerate(self.layers): - output = mod( - output, - pos_emb, - src_mask=mask, - src_key_padding_mask=src_key_padding_mask, - warmup=warmup, - ) - if i in self.aux_layers: - outputs.append(output) - - output = self.combiner(outputs) - - return output - - -class RelPositionalEncoding(torch.nn.Module): - """Relative positional encoding module. - - See : Appendix B in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" - Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/embedding.py - - Args: - d_model: Embedding dimension. - dropout_rate: Dropout rate. - max_len: Maximum input length. - - """ - - def __init__( - self, d_model: int, dropout_rate: float, max_len: int = 5000 - ) -> None: - """Construct an PositionalEncoding object.""" - super(RelPositionalEncoding, self).__init__() - self.d_model = d_model - self.dropout = torch.nn.Dropout(p=dropout_rate) - self.pe = None - self.extend_pe(torch.tensor(0.0).expand(1, max_len)) - - def extend_pe(self, x: Tensor) -> None: - """Reset the positional encodings.""" - if self.pe is not None: - # self.pe contains both positive and negative parts - # the length of self.pe is 2 * input_len - 1 - if self.pe.size(1) >= x.size(1) * 2 - 1: - # Note: TorchScript doesn't implement operator== for torch.Device - if self.pe.dtype != x.dtype or str(self.pe.device) != str( - x.device - ): - self.pe = self.pe.to(dtype=x.dtype, device=x.device) - return - # Suppose `i` means to the position of query vector and `j` means the - # position of key vector. We use position relative positions when keys - # are to the left (i>j) and negative relative positions otherwise (i Tuple[Tensor, Tensor]: - """Add positional encoding. - - Args: - x (torch.Tensor): Input tensor (batch, time, `*`). - - Returns: - torch.Tensor: Encoded tensor (batch, time, `*`). - torch.Tensor: Encoded tensor (batch, 2*time-1, `*`). - - """ - self.extend_pe(x) - pos_emb = self.pe[ - :, - self.pe.size(1) // 2 - - x.size(1) - + 1 : self.pe.size(1) // 2 # noqa E203 - + x.size(1), - ] - return self.dropout(x), self.dropout(pos_emb) - - -class RelPositionMultiheadAttention(nn.Module): - r"""Multi-Head Attention layer with relative position encoding - - See reference: "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" - - Args: - embed_dim: total dimension of the model. - num_heads: parallel attention heads. - dropout: a Dropout layer on attn_output_weights. Default: 0.0. - - Examples:: - - >>> rel_pos_multihead_attn = RelPositionMultiheadAttention(embed_dim, num_heads) - >>> attn_output, attn_output_weights = multihead_attn(query, key, value, pos_emb) - """ - - def __init__( - self, - embed_dim: int, - num_heads: int, - dropout: float = 0.0, - ) -> None: - super(RelPositionMultiheadAttention, self).__init__() - self.embed_dim = embed_dim - self.num_heads = num_heads - self.dropout = dropout - self.head_dim = embed_dim // num_heads - assert ( - self.head_dim * num_heads == self.embed_dim - ), "embed_dim must be divisible by num_heads" - - self.in_proj = ScaledLinear(embed_dim, 3 * embed_dim, bias=True) - self.out_proj = ScaledLinear( - embed_dim, embed_dim, bias=True, initial_scale=0.25 - ) - - # linear transformation for positional encoding. - self.linear_pos = ScaledLinear(embed_dim, embed_dim, bias=False) - # these two learnable bias are used in matrix c and matrix d - # as described in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" Section 3.3 - self.pos_bias_u = nn.Parameter(torch.Tensor(num_heads, self.head_dim)) - self.pos_bias_v = nn.Parameter(torch.Tensor(num_heads, self.head_dim)) - self.pos_bias_u_scale = nn.Parameter(torch.zeros(()).detach()) - self.pos_bias_v_scale = nn.Parameter(torch.zeros(()).detach()) - self._reset_parameters() - - def _pos_bias_u(self): - return self.pos_bias_u * self.pos_bias_u_scale.exp() - - def _pos_bias_v(self): - return self.pos_bias_v * self.pos_bias_v_scale.exp() - - def _reset_parameters(self) -> None: - nn.init.normal_(self.pos_bias_u, std=0.01) - nn.init.normal_(self.pos_bias_v, std=0.01) - - def forward( - self, - query: Tensor, - key: Tensor, - value: Tensor, - pos_emb: Tensor, - key_padding_mask: Optional[Tensor] = None, - need_weights: bool = True, - attn_mask: Optional[Tensor] = None, - ) -> Tuple[Tensor, Optional[Tensor]]: - r""" - Args: - query, key, value: map a query and a set of key-value pairs to an output. - pos_emb: Positional embedding tensor - key_padding_mask: if provided, specified padding elements in the key will - be ignored by the attention. When given a binary mask and a value is True, - the corresponding value on the attention layer will be ignored. When given - a byte mask and a value is non-zero, the corresponding value on the attention - layer will be ignored - need_weights: output attn_output_weights. - attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all - the batches while a 3D mask allows to specify a different mask for the entries of each batch. - - Shape: - - Inputs: - - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is - the embedding dimension. - - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is - the embedding dimension. - - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is - the embedding dimension. - - pos_emb: :math:`(N, 2*L-1, E)` where L is the target sequence length, N is the batch size, E is - the embedding dimension. - - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. - If a ByteTensor is provided, the non-zero positions will be ignored while the position - with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the - value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. - - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. - 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, - S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked - positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend - while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` - is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor - is provided, it will be added to the attention weight. - - - Outputs: - - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, - E is the embedding dimension. - - attn_output_weights: :math:`(N, L, S)` where N is the batch size, - L is the target sequence length, S is the source sequence length. - """ - return self.multi_head_attention_forward( - query, - key, - value, - pos_emb, - self.embed_dim, - self.num_heads, - self.in_proj.get_weight(), - self.in_proj.get_bias(), - self.dropout, - self.out_proj.get_weight(), - self.out_proj.get_bias(), - training=self.training, - key_padding_mask=key_padding_mask, - need_weights=need_weights, - attn_mask=attn_mask, - ) - - def rel_shift(self, x: Tensor) -> Tensor: - """Compute relative positional encoding. - - Args: - x: Input tensor (batch, head, time1, 2*time1-1). - time1 means the length of query vector. - - Returns: - Tensor: tensor of shape (batch, head, time1, time2) - (note: time2 has the same value as time1, but it is for - the key, while time1 is for the query). - """ - (batch_size, num_heads, time1, n) = x.shape - assert n == 2 * time1 - 1 - # Note: TorchScript requires explicit arg for stride() - batch_stride = x.stride(0) - head_stride = x.stride(1) - time1_stride = x.stride(2) - n_stride = x.stride(3) - return x.as_strided( - (batch_size, num_heads, time1, time1), - (batch_stride, head_stride, time1_stride - n_stride, n_stride), - storage_offset=n_stride * (time1 - 1), - ) - - def multi_head_attention_forward( - self, - query: Tensor, - key: Tensor, - value: Tensor, - pos_emb: Tensor, - embed_dim_to_check: int, - num_heads: int, - in_proj_weight: Tensor, - in_proj_bias: Tensor, - dropout_p: float, - out_proj_weight: Tensor, - out_proj_bias: Tensor, - training: bool = True, - key_padding_mask: Optional[Tensor] = None, - need_weights: bool = True, - attn_mask: Optional[Tensor] = None, - ) -> Tuple[Tensor, Optional[Tensor]]: - r""" - Args: - query, key, value: map a query and a set of key-value pairs to an output. - pos_emb: Positional embedding tensor - embed_dim_to_check: total dimension of the model. - num_heads: parallel attention heads. - in_proj_weight, in_proj_bias: input projection weight and bias. - dropout_p: probability of an element to be zeroed. - out_proj_weight, out_proj_bias: the output projection weight and bias. - training: apply dropout if is ``True``. - key_padding_mask: if provided, specified padding elements in the key will - be ignored by the attention. This is an binary mask. When the value is True, - the corresponding value on the attention layer will be filled with -inf. - need_weights: output attn_output_weights. - attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all - the batches while a 3D mask allows to specify a different mask for the entries of each batch. - - Shape: - Inputs: - - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is - the embedding dimension. - - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is - the embedding dimension. - - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is - the embedding dimension. - - pos_emb: :math:`(N, 2*L-1, E)` or :math:`(1, 2*L-1, E)` where L is the target sequence - length, N is the batch size, E is the embedding dimension. - - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. - If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions - will be unchanged. If a BoolTensor is provided, the positions with the - value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. - - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. - 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, - S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked - positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend - while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` - are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor - is provided, it will be added to the attention weight. - - Outputs: - - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, - E is the embedding dimension. - - attn_output_weights: :math:`(N, L, S)` where N is the batch size, - L is the target sequence length, S is the source sequence length. - """ - - tgt_len, bsz, embed_dim = query.size() - assert embed_dim == embed_dim_to_check - assert key.size(0) == value.size(0) and key.size(1) == value.size(1) - - head_dim = embed_dim // num_heads - assert ( - head_dim * num_heads == embed_dim - ), "embed_dim must be divisible by num_heads" - - scaling = float(head_dim) ** -0.5 - - if torch.equal(query, key) and torch.equal(key, value): - # self-attention - q, k, v = nn.functional.linear( - query, in_proj_weight, in_proj_bias - ).chunk(3, dim=-1) - - elif torch.equal(key, value): - # encoder-decoder attention - # This is inline in_proj function with in_proj_weight and in_proj_bias - _b = in_proj_bias - _start = 0 - _end = embed_dim - _w = in_proj_weight[_start:_end, :] - if _b is not None: - _b = _b[_start:_end] - q = nn.functional.linear(query, _w, _b) - - # This is inline in_proj function with in_proj_weight and in_proj_bias - _b = in_proj_bias - _start = embed_dim - _end = None - _w = in_proj_weight[_start:, :] - if _b is not None: - _b = _b[_start:] - k, v = nn.functional.linear(key, _w, _b).chunk(2, dim=-1) - - else: - # This is inline in_proj function with in_proj_weight and in_proj_bias - _b = in_proj_bias - _start = 0 - _end = embed_dim - _w = in_proj_weight[_start:_end, :] - if _b is not None: - _b = _b[_start:_end] - q = nn.functional.linear(query, _w, _b) - - # This is inline in_proj function with in_proj_weight and in_proj_bias - _b = in_proj_bias - _start = embed_dim - _end = embed_dim * 2 - _w = in_proj_weight[_start:_end, :] - if _b is not None: - _b = _b[_start:_end] - k = nn.functional.linear(key, _w, _b) - - # This is inline in_proj function with in_proj_weight and in_proj_bias - _b = in_proj_bias - _start = embed_dim * 2 - _end = None - _w = in_proj_weight[_start:, :] - if _b is not None: - _b = _b[_start:] - v = nn.functional.linear(value, _w, _b) - - if attn_mask is not None: - assert ( - attn_mask.dtype == torch.float32 - or attn_mask.dtype == torch.float64 - or attn_mask.dtype == torch.float16 - or attn_mask.dtype == torch.uint8 - or attn_mask.dtype == torch.bool - ), "Only float, byte, and bool types are supported for attn_mask, not {}".format( - attn_mask.dtype - ) - if attn_mask.dtype == torch.uint8: - warnings.warn( - "Byte tensor for attn_mask is deprecated. Use bool tensor instead." - ) - attn_mask = attn_mask.to(torch.bool) - - if attn_mask.dim() == 2: - attn_mask = attn_mask.unsqueeze(0) - if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: - raise RuntimeError( - "The size of the 2D attn_mask is not correct." - ) - elif attn_mask.dim() == 3: - if list(attn_mask.size()) != [ - bsz * num_heads, - query.size(0), - key.size(0), - ]: - raise RuntimeError( - "The size of the 3D attn_mask is not correct." - ) - else: - raise RuntimeError( - "attn_mask's dimension {} is not supported".format( - attn_mask.dim() - ) - ) - # attn_mask's dim is 3 now. - - # convert ByteTensor key_padding_mask to bool - if ( - key_padding_mask is not None - and key_padding_mask.dtype == torch.uint8 - ): - warnings.warn( - "Byte tensor for key_padding_mask is deprecated. Use bool tensor instead." - ) - key_padding_mask = key_padding_mask.to(torch.bool) - - q = (q * scaling).contiguous().view(tgt_len, bsz, num_heads, head_dim) - k = k.contiguous().view(-1, bsz, num_heads, head_dim) - v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) - - src_len = k.size(0) - - if key_padding_mask is not None: - assert key_padding_mask.size(0) == bsz, "{} == {}".format( - key_padding_mask.size(0), bsz - ) - assert key_padding_mask.size(1) == src_len, "{} == {}".format( - key_padding_mask.size(1), src_len - ) - - q = q.transpose(0, 1) # (batch, time1, head, d_k) - - pos_emb_bsz = pos_emb.size(0) - assert pos_emb_bsz in (1, bsz) # actually it is 1 - p = self.linear_pos(pos_emb).view(pos_emb_bsz, -1, num_heads, head_dim) - p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k) - - q_with_bias_u = (q + self._pos_bias_u()).transpose( - 1, 2 - ) # (batch, head, time1, d_k) - - q_with_bias_v = (q + self._pos_bias_v()).transpose( - 1, 2 - ) # (batch, head, time1, d_k) - - # compute attention score - # first compute matrix a and matrix c - # as described in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" Section 3.3 - k = k.permute(1, 2, 3, 0) # (batch, head, d_k, time2) - matrix_ac = torch.matmul( - q_with_bias_u, k - ) # (batch, head, time1, time2) - - # compute matrix b and matrix d - matrix_bd = torch.matmul( - q_with_bias_v, p.transpose(-2, -1) - ) # (batch, head, time1, 2*time1-1) - matrix_bd = self.rel_shift(matrix_bd) - - attn_output_weights = ( - matrix_ac + matrix_bd - ) # (batch, head, time1, time2) - - attn_output_weights = attn_output_weights.view( - bsz * num_heads, tgt_len, -1 - ) - - assert list(attn_output_weights.size()) == [ - bsz * num_heads, - tgt_len, - src_len, - ] - - if attn_mask is not None: - if attn_mask.dtype == torch.bool: - attn_output_weights.masked_fill_(attn_mask, float("-inf")) - else: - attn_output_weights += attn_mask - - if key_padding_mask is not None: - attn_output_weights = attn_output_weights.view( - bsz, num_heads, tgt_len, src_len - ) - attn_output_weights = attn_output_weights.masked_fill( - key_padding_mask.unsqueeze(1).unsqueeze(2), - float("-inf"), - ) - attn_output_weights = attn_output_weights.view( - bsz * num_heads, tgt_len, src_len - ) - - attn_output_weights = nn.functional.softmax(attn_output_weights, dim=-1) - attn_output_weights = nn.functional.dropout( - attn_output_weights, p=dropout_p, training=training - ) - - attn_output = torch.bmm(attn_output_weights, v) - assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] - attn_output = ( - attn_output.transpose(0, 1) - .contiguous() - .view(tgt_len, bsz, embed_dim) - ) - attn_output = nn.functional.linear( - attn_output, out_proj_weight, out_proj_bias - ) - - if need_weights: - # average attention weights over heads - attn_output_weights = attn_output_weights.view( - bsz, num_heads, tgt_len, src_len - ) - return attn_output, attn_output_weights.sum(dim=1) / num_heads - else: - return attn_output, None - - -class ConvolutionModule(nn.Module): - """ConvolutionModule in Conformer model. - Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/conformer/convolution.py - - Args: - channels (int): The number of channels of conv layers. - kernel_size (int): Kernerl size of conv layers. - bias (bool): Whether to use bias in conv layers (default=True). - - """ - - def __init__( - self, channels: int, kernel_size: int, bias: bool = True - ) -> None: - """Construct an ConvolutionModule object.""" - super(ConvolutionModule, self).__init__() - # kernerl_size should be a odd number for 'SAME' padding - assert (kernel_size - 1) % 2 == 0 - - self.pointwise_conv1 = ScaledConv1d( - channels, - 2 * channels, - kernel_size=1, - stride=1, - padding=0, - bias=bias, - ) - - # after pointwise_conv1 we put x through a gated linear unit (nn.functional.glu). - # For most layers the normal rms value of channels of x seems to be in the range 1 to 4, - # but sometimes, for some reason, for layer 0 the rms ends up being very large, - # between 50 and 100 for different channels. This will cause very peaky and - # sparse derivatives for the sigmoid gating function, which will tend to make - # the loss function not learn effectively. (for most layers the average absolute values - # are in the range 0.5..9.0, and the average p(x>0), i.e. positive proportion, - # at the output of pointwise_conv1.output is around 0.35 to 0.45 for different - # layers, which likely breaks down as 0.5 for the "linear" half and - # 0.2 to 0.3 for the part that goes into the sigmoid. The idea is that if we - # constrain the rms values to a reasonable range via a constraint of max_abs=10.0, - # it will be in a better position to start learning something, i.e. to latch onto - # the correct range. - self.deriv_balancer1 = ActivationBalancer( - channel_dim=1, max_abs=10.0, min_positive=0.05, max_positive=1.0 - ) - - self.depthwise_conv = ScaledConv1d( - channels, - channels, - kernel_size, - stride=1, - padding=(kernel_size - 1) // 2, - groups=channels, - bias=bias, - ) - - self.deriv_balancer2 = ActivationBalancer( - channel_dim=1, min_positive=0.05, max_positive=1.0 - ) - - self.activation = DoubleSwish() - - self.pointwise_conv2 = ScaledConv1d( - channels, - channels, - kernel_size=1, - stride=1, - padding=0, - bias=bias, - initial_scale=0.25, - ) - - def forward(self, x: Tensor) -> Tensor: - """Compute convolution module. - - Args: - x: Input tensor (#time, batch, channels). - - Returns: - Tensor: Output tensor (#time, batch, channels). - - """ - # exchange the temporal dimension and the feature dimension - x = x.permute(1, 2, 0) # (#batch, channels, time). - - # GLU mechanism - x = self.pointwise_conv1(x) # (batch, 2*channels, time) - - x = self.deriv_balancer1(x) - x = nn.functional.glu(x, dim=1) # (batch, channels, time) - - # 1D Depthwise Conv - x = self.depthwise_conv(x) - - x = self.deriv_balancer2(x) - x = self.activation(x) - - x = self.pointwise_conv2(x) # (batch, channel, time) - - return x.permute(2, 0, 1) - - -class Conv2dSubsampling(nn.Module): - """Convolutional 2D subsampling (to 1/4 length). - - Convert an input of shape (N, T, idim) to an output - with shape (N, T', odim), where - T' = ((T-1)//2 - 1)//2, which approximates T' == T//4 - - It is based on - https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/subsampling.py # noqa - """ - - def __init__( - self, - in_channels: int, - out_channels: int, - layer1_channels: int = 8, - layer2_channels: int = 32, - layer3_channels: int = 128, - ) -> None: - """ - Args: - in_channels: - Number of channels in. The input shape is (N, T, in_channels). - Caution: It requires: T >=7, in_channels >=7 - out_channels - Output dim. The output shape is (N, ((T-1)//2 - 1)//2, out_channels) - layer1_channels: - Number of channels in layer1 - layer1_channels: - Number of channels in layer2 - """ - assert in_channels >= 7 - super().__init__() - - self.conv = nn.Sequential( - ScaledConv2d( - in_channels=1, - out_channels=layer1_channels, - kernel_size=3, - padding=1, - ), - ActivationBalancer(channel_dim=1), - DoubleSwish(), - ScaledConv2d( - in_channels=layer1_channels, - out_channels=layer2_channels, - kernel_size=3, - stride=2, - ), - ActivationBalancer(channel_dim=1), - DoubleSwish(), - ScaledConv2d( - in_channels=layer2_channels, - out_channels=layer3_channels, - kernel_size=3, - stride=2, - ), - ActivationBalancer(channel_dim=1), - DoubleSwish(), - ) - self.out = ScaledLinear( - layer3_channels * (((in_channels - 1) // 2 - 1) // 2), out_channels - ) - # set learn_eps=False because out_norm is preceded by `out`, and `out` - # itself has learned scale, so the extra degree of freedom is not - # needed. - self.out_norm = BasicNorm(out_channels, learn_eps=False) - # constrain median of output to be close to zero. - self.out_balancer = ActivationBalancer( - channel_dim=-1, min_positive=0.45, max_positive=0.55 - ) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - """Subsample x. - - Args: - x: - Its shape is (N, T, idim). - - Returns: - Return a tensor of shape (N, ((T-1)//2 - 1)//2, odim) - """ - # On entry, x is (N, T, idim) - x = x.unsqueeze(1) # (N, T, idim) -> (N, 1, T, idim) i.e., (N, C, H, W) - x = self.conv(x) - # Now x is of shape (N, odim, ((T-1)//2 - 1)//2, ((idim-1)//2 - 1)//2) - b, c, t, f = x.size() - x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f)) - # Now x is of shape (N, ((T-1)//2 - 1))//2, odim) - x = self.out_norm(x) - x = self.out_balancer(x) - return x - - -class RandomCombine(nn.Module): - """ - This module combines a list of Tensors, all with the same shape, to - produce a single output of that same shape which, in training time, - is a random combination of all the inputs; but which in test time - will be just the last input. - - All but the last input will have a linear transform before we - randomly combine them; these linear transforms will be initialized - to the identity transform. - - The idea is that the list of Tensors will be a list of outputs of multiple - conformer layers. This has a similar effect as iterated loss. (See: - DEJA-VU: DOUBLE FEATURE PRESENTATION AND ITERATED LOSS IN DEEP TRANSFORMER - NETWORKS). - """ - - def __init__( - self, - num_inputs: int, - num_channels: int, - final_weight: float = 0.5, - pure_prob: float = 0.5, - stddev: float = 2.0, - ) -> None: - """ - Args: - num_inputs: - The number of tensor inputs, which equals the number of layers' - outputs that are fed into this module. E.g. in an 18-layer neural - net if we output layers 16, 12, 18, num_inputs would be 3. - num_channels: - The number of channels on the input, e.g. 512. - final_weight: - The amount of weight or probability we assign to the - final layer when randomly choosing layers or when choosing - continuous layer weights. - pure_prob: - The probability, on each frame, with which we choose - only a single layer to output (rather than an interpolation) - stddev: - A standard deviation that we add to log-probs for computing - randomized weights. - - The method of choosing which layers, or combinations of layers, to use, - is conceptually as follows:: - - With probability `pure_prob`:: - With probability `final_weight`: choose final layer, - Else: choose random non-final layer. - Else:: - Choose initial log-weights that correspond to assigning - weight `final_weight` to the final layer and equal - weights to other layers; then add Gaussian noise - with variance `stddev` to these log-weights, and normalize - to weights (note: the average weight assigned to the - final layer here will not be `final_weight` if stddev>0). - """ - super().__init__() - assert 0 <= pure_prob <= 1, pure_prob - assert 0 < final_weight < 1, final_weight - assert num_inputs >= 1 - - self.linear = nn.ModuleList( - [ - nn.Linear(num_channels, num_channels, bias=True) - for _ in range(num_inputs - 1) - ] - ) - - self.num_inputs = num_inputs - self.final_weight = final_weight - self.pure_prob = pure_prob - self.stddev = stddev - - self.final_log_weight = ( - torch.tensor( - (final_weight / (1 - final_weight)) * (self.num_inputs - 1) - ) - .log() - .item() - ) - self._reset_parameters() - - def _reset_parameters(self): - for i in range(len(self.linear)): - nn.init.eye_(self.linear[i].weight) - nn.init.constant_(self.linear[i].bias, 0.0) - - def forward(self, inputs: List[Tensor]) -> Tensor: - """Forward function. - Args: - inputs: - A list of Tensor, e.g. from various layers of a transformer. - All must be the same shape, of (*, num_channels) - Returns: - A Tensor of shape (*, num_channels). In test mode - this is just the final input. - """ - num_inputs = self.num_inputs - assert len(inputs) == num_inputs - if not self.training: - return inputs[-1] - - # Shape of weights: (*, num_inputs) - num_channels = inputs[0].shape[-1] - num_frames = inputs[0].numel() // num_channels - - mod_inputs = [] - for i in range(num_inputs - 1): - mod_inputs.append(self.linear[i](inputs[i])) - mod_inputs.append(inputs[num_inputs - 1]) - - ndim = inputs[0].ndim - # stacked_inputs: (num_frames, num_channels, num_inputs) - stacked_inputs = torch.stack(mod_inputs, dim=ndim).reshape( - (num_frames, num_channels, num_inputs) - ) - - # weights: (num_frames, num_inputs) - weights = self._get_random_weights( - inputs[0].dtype, inputs[0].device, num_frames - ) - - weights = weights.reshape(num_frames, num_inputs, 1) - # ans: (num_frames, num_channels, 1) - ans = torch.matmul(stacked_inputs, weights) - # ans: (*, num_channels) - ans = ans.reshape(*tuple(inputs[0].shape[:-1]), num_channels) - - if __name__ == "__main__": - # for testing only... - print("Weights = ", weights.reshape(num_frames, num_inputs)) - return ans - - def _get_random_weights( - self, dtype: torch.dtype, device: torch.device, num_frames: int - ) -> Tensor: - """Return a tensor of random weights, of shape - `(num_frames, self.num_inputs)`, - Args: - dtype: - The data-type desired for the answer, e.g. float, double. - device: - The device needed for the answer. - num_frames: - The number of sets of weights desired - Returns: - A tensor of shape (num_frames, self.num_inputs), such that - `ans.sum(dim=1)` is all ones. - """ - pure_prob = self.pure_prob - if pure_prob == 0.0: - return self._get_random_mixed_weights(dtype, device, num_frames) - elif pure_prob == 1.0: - return self._get_random_pure_weights(dtype, device, num_frames) - else: - p = self._get_random_pure_weights(dtype, device, num_frames) - m = self._get_random_mixed_weights(dtype, device, num_frames) - return torch.where( - torch.rand(num_frames, 1, device=device) < self.pure_prob, p, m - ) - - def _get_random_pure_weights( - self, dtype: torch.dtype, device: torch.device, num_frames: int - ): - """Return a tensor of random one-hot weights, of shape - `(num_frames, self.num_inputs)`, - Args: - dtype: - The data-type desired for the answer, e.g. float, double. - device: - The device needed for the answer. - num_frames: - The number of sets of weights desired. - Returns: - A one-hot tensor of shape `(num_frames, self.num_inputs)`, with - exactly one weight equal to 1.0 on each frame. - """ - final_prob = self.final_weight - - # final contains self.num_inputs - 1 in all elements - final = torch.full((num_frames,), self.num_inputs - 1, device=device) - # nonfinal contains random integers in [0..num_inputs - 2], these are for non-final weights. - nonfinal = torch.randint( - self.num_inputs - 1, (num_frames,), device=device - ) - - indexes = torch.where( - torch.rand(num_frames, device=device) < final_prob, final, nonfinal - ) - ans = torch.nn.functional.one_hot( - indexes, num_classes=self.num_inputs - ).to(dtype=dtype) - return ans - - def _get_random_mixed_weights( - self, dtype: torch.dtype, device: torch.device, num_frames: int - ): - """Return a tensor of random one-hot weights, of shape - `(num_frames, self.num_inputs)`, - Args: - dtype: - The data-type desired for the answer, e.g. float, double. - device: - The device needed for the answer. - num_frames: - The number of sets of weights desired. - Returns: - A tensor of shape (num_frames, self.num_inputs), which elements - in [0..1] that sum to one over the second axis, i.e. - `ans.sum(dim=1)` is all ones. - """ - logprobs = ( - torch.randn(num_frames, self.num_inputs, dtype=dtype, device=device) - * self.stddev - ) - logprobs[:, -1] += self.final_log_weight - return logprobs.softmax(dim=1) - - -def _test_random_combine(final_weight: float, pure_prob: float, stddev: float): - print( - f"_test_random_combine: final_weight={final_weight}, pure_prob={pure_prob}, stddev={stddev}" - ) - num_inputs = 3 - num_channels = 50 - m = RandomCombine( - num_inputs=num_inputs, - num_channels=num_channels, - final_weight=final_weight, - pure_prob=pure_prob, - stddev=stddev, - ) - - x = [torch.ones(3, 4, num_channels) for _ in range(num_inputs)] - - y = m(x) - assert y.shape == x[0].shape - assert torch.allclose(y, x[0]) # .. since actually all ones. - - -def _test_random_combine_main(): - _test_random_combine(0.999, 0, 0.0) - _test_random_combine(0.5, 0, 0.0) - _test_random_combine(0.999, 0, 0.0) - _test_random_combine(0.5, 0, 0.3) - _test_random_combine(0.5, 1, 0.3) - _test_random_combine(0.5, 0.5, 0.3) - - feature_dim = 50 - c = Conformer( - num_features=feature_dim, output_dim=256, d_model=128, nhead=4 - ) - batch_size = 5 - seq_len = 20 - # Just make sure the forward pass runs. - f = c( - torch.randn(batch_size, seq_len, feature_dim), - torch.full((batch_size,), seq_len, dtype=torch.int64), - ) - f # to remove flake8 warnings - - -if __name__ == "__main__": - feature_dim = 50 - c = Conformer(num_features=feature_dim, d_model=128, nhead=4) - batch_size = 5 - seq_len = 20 - # Just make sure the forward pass runs. - f = c( - torch.randn(batch_size, seq_len, feature_dim), - torch.full((batch_size,), seq_len, dtype=torch.int64), - warmup=0.5, - ) - - _test_random_combine_main() diff --git a/egs/aishell4/ASR/pruned_transducer_stateless5/conformer.py b/egs/aishell4/ASR/pruned_transducer_stateless5/conformer.py new file mode 120000 index 000000000..c7c1a4b6e --- /dev/null +++ b/egs/aishell4/ASR/pruned_transducer_stateless5/conformer.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless5/conformer.py \ No newline at end of file diff --git a/egs/aishell4/ASR/pruned_transducer_stateless5/decode.py b/egs/aishell4/ASR/pruned_transducer_stateless5/decode.py index 705e34647..d329410e1 100755 --- a/egs/aishell4/ASR/pruned_transducer_stateless5/decode.py +++ b/egs/aishell4/ASR/pruned_transducer_stateless5/decode.py @@ -523,7 +523,9 @@ def main(): ) logging.info(f"averaging {filenames}") model.to(device) - model.load_state_dict(average_checkpoints(filenames, device=device)) + model.load_state_dict( + average_checkpoints(filenames, device=device), strict=False + ) elif params.avg == 1: load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) else: @@ -534,7 +536,9 @@ def main(): filenames.append(f"{params.exp_dir}/epoch-{i}.pt") logging.info(f"averaging {filenames}") model.to(device) - model.load_state_dict(average_checkpoints(filenames, device=device)) + model.load_state_dict( + average_checkpoints(filenames, device=device), strict=False + ) else: if params.iter > 0: filenames = find_checkpoints( @@ -562,7 +566,8 @@ def main(): filename_start=filename_start, filename_end=filename_end, device=device, - ) + ), + strict=False, ) else: assert params.avg > 0, params.avg @@ -580,7 +585,8 @@ def main(): filename_start=filename_start, filename_end=filename_end, device=device, - ) + ), + strict=False, ) model.to(device) diff --git a/egs/aishell4/ASR/pruned_transducer_stateless5/export.py b/egs/aishell4/ASR/pruned_transducer_stateless5/export.py index f42a85373..993341131 100755 --- a/egs/aishell4/ASR/pruned_transducer_stateless5/export.py +++ b/egs/aishell4/ASR/pruned_transducer_stateless5/export.py @@ -184,7 +184,9 @@ def main(): ) logging.info(f"averaging {filenames}") model.to(device) - model.load_state_dict(average_checkpoints(filenames, device=device)) + model.load_state_dict( + average_checkpoints(filenames, device=device), strict=False + ) elif params.avg == 1: load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) else: @@ -195,7 +197,9 @@ def main(): filenames.append(f"{params.exp_dir}/epoch-{i}.pt") logging.info(f"averaging {filenames}") model.to(device) - model.load_state_dict(average_checkpoints(filenames, device=device)) + model.load_state_dict( + average_checkpoints(filenames, device=device), strict=False + ) else: if params.iter > 0: filenames = find_checkpoints( @@ -223,7 +227,8 @@ def main(): filename_start=filename_start, filename_end=filename_end, device=device, - ) + ), + strict=False, ) else: assert params.avg > 0, params.avg @@ -241,7 +246,8 @@ def main(): filename_start=filename_start, filename_end=filename_end, device=device, - ) + ), + strict=False, ) model.eval() From 116d0cf26dd323a257c5785f213500c0dc13cb8c Mon Sep 17 00:00:00 2001 From: Quandwang Date: Fri, 22 Jul 2022 15:31:25 +0800 Subject: [PATCH 09/38] CTC attention model with reworked Conformer encoder and reworked Transformer decoder (#462) * ctc attention model with reworked conformer encoder and reworked transformer decoder * remove unnecessary func * resolve flake8 conflicts * fix typos and modify the expr of ScaledEmbedding * use original beam size * minor changes to the scripts * add rnn lm decoding * minor changes * check whether q k v weight is None * check whether q k v weight is None * check whether q k v weight is None * style correction * update results * update results * upload the decoding results of rnn-lm to the RESULTS * upload the decoding results of rnn-lm to the RESULTS * Update egs/librispeech/ASR/RESULTS.md Co-authored-by: Fangjun Kuang * Update egs/librispeech/ASR/RESULTS.md Co-authored-by: Fangjun Kuang * Update egs/librispeech/ASR/RESULTS.md Co-authored-by: Fangjun Kuang Co-authored-by: Fangjun Kuang --- .flake8 | 6 +- egs/librispeech/ASR/RESULTS.md | 114 +- .../ASR/conformer_ctc2/__init__.py | 1 + .../ASR/conformer_ctc2/asr_datamodule.py | 1 + .../ASR/conformer_ctc2/attention.py | 252 ++++ .../ASR/conformer_ctc2/conformer.py | 964 ++++++++++++++ egs/librispeech/ASR/conformer_ctc2/decode.py | 996 +++++++++++++++ egs/librispeech/ASR/conformer_ctc2/export.py | 281 +++++ .../ASR/conformer_ctc2/label_smoothing.py | 1 + egs/librispeech/ASR/conformer_ctc2/optim.py | 1 + egs/librispeech/ASR/conformer_ctc2/scaling.py | 1 + .../ASR/conformer_ctc2/subsampling.py | 121 ++ egs/librispeech/ASR/conformer_ctc2/train.py | 1119 +++++++++++++++++ .../ASR/conformer_ctc2/transformer.py | 1092 ++++++++++++++++ .../pruned_transducer_stateless2/scaling.py | 3 +- 15 files changed, 4949 insertions(+), 4 deletions(-) create mode 120000 egs/librispeech/ASR/conformer_ctc2/__init__.py create mode 120000 egs/librispeech/ASR/conformer_ctc2/asr_datamodule.py create mode 100644 egs/librispeech/ASR/conformer_ctc2/attention.py create mode 100644 egs/librispeech/ASR/conformer_ctc2/conformer.py create mode 100755 egs/librispeech/ASR/conformer_ctc2/decode.py create mode 100755 egs/librispeech/ASR/conformer_ctc2/export.py create mode 120000 egs/librispeech/ASR/conformer_ctc2/label_smoothing.py create mode 120000 egs/librispeech/ASR/conformer_ctc2/optim.py create mode 120000 egs/librispeech/ASR/conformer_ctc2/scaling.py create mode 100644 egs/librispeech/ASR/conformer_ctc2/subsampling.py create mode 100755 egs/librispeech/ASR/conformer_ctc2/train.py create mode 100644 egs/librispeech/ASR/conformer_ctc2/transformer.py diff --git a/.flake8 b/.flake8 index b2eb2e943..cbf0d8484 100644 --- a/.flake8 +++ b/.flake8 @@ -4,12 +4,14 @@ statistics=true max-line-length = 80 per-file-ignores = # line too long - icefall/diagnostics.py: E501 + icefall/diagnostics.py: E501, egs/*/ASR/*/conformer.py: E501, egs/*/ASR/pruned_transducer_stateless*/*.py: E501, egs/*/ASR/*/optim.py: E501, egs/*/ASR/*/scaling.py: E501, - egs/librispeech/ASR/conv_emformer_transducer_stateless*/*.py: E501, E203 + egs/librispeech/ASR/conv_emformer_transducer_stateless*/*.py: E501, E203, + egs/librispeech/ASR/conformer_ctc2/*py: E501, + egs/librispeech/ASR/RESULTS.md: E999, # invalid escape sequence (cause by tex formular), W605 icefall/utils.py: E501, W605 diff --git a/egs/librispeech/ASR/RESULTS.md b/egs/librispeech/ASR/RESULTS.md index d4096884b..b10ae98e6 100644 --- a/egs/librispeech/ASR/RESULTS.md +++ b/egs/librispeech/ASR/RESULTS.md @@ -1,6 +1,6 @@ ## Results -### LibriSpeech BPE training results (Pruned Stateless Conv-Emformer RNN-T 2) +#### LibriSpeech BPE training results (Pruned Stateless Conv-Emformer RNN-T 2) [conv_emformer_transducer_stateless2](./conv_emformer_transducer_stateless2) @@ -1998,6 +1998,118 @@ avg=11 You can find the tensorboard log at: +### LibriSpeech BPE training results (Conformer-CTC 2) + +#### [conformer_ctc2](./conformer_ctc2) + +#### 2022-07-21 + +It implements a 'reworked' version of CTC attention model. +As demenstrated by pruned_transducer_stateless2, reworked Conformer model has superior performance compared to the original Conformer. +So in this modified version of CTC attention model, it has the reworked Conformer as the encoder and the reworked Transformer as the decoder. +conformer_ctc2 also integrates with the idea of the 'averaging models' in pruned_transducer_stateless4. + +The WERs on comparisons with a baseline model, for the librispeech test dataset, are listed as below. + +The baseline model is the original conformer CTC attention model trained with icefall/egs/librispeech/ASR/conformer_ctc. +The model is downloaded from . +This model has 12 layers of Conformer encoder layers and 6 Transformer decoder layers. +Number of model parameters is 109,226,120. +It has been trained with 90 epochs with full Librispeech dataset. + +For this reworked CTC attention model, it has 12 layers of reworked Conformer encoder layers and 6 reworked Transformer decoder layers. +Number of model parameters is 103,071,035. +With full Librispeech data set, it was trained for **only** 30 epochs because the reworked model would converge much faster. +Please refer to to see the loss convergence curve. +Please find the above trained model at in huggingface. + +The decoding configuration for the reworked model is --epoch 30, --avg 8, --use-averaged-model True, which is the best after searching. + +| WER | reworked ctc attention | with --epoch 30 --avg 8 --use-averaged-model True | | ctc attention| with --epoch 77 --avg 55 | | +|------------------------|-------|------|------|------|------|-----| +| test sets | test-clean | test-other | Avg | test-clean | test-other | Avg | +| ctc-greedy-search | 2.98% | 7.14%| 5.06%| 2.90%| 7.47%| 5.19%| +| ctc-decoding | 2.98% | 7.14%| 5.06%| 2.90%| 7.47%| 5.19%| +| 1best | 2.93% | 6.37%| 4.65%| 2.70%| 6.49%| 4.60%| +| nbest | 2.94% | 6.39%| 4.67%| 2.70%| 6.48%| 4.59%| +| nbest-rescoring | 2.68% | 5.77%| 4.23%| 2.55%| 6.07%| 4.31%| +| whole-lattice-rescoring| 2.66% | 5.76%| 4.21%| 2.56%| 6.04%| 4.30%| +| attention-decoder | 2.59% | 5.54%| 4.07%| 2.41%| 5.77%| 4.09%| +| nbest-oracle | 1.53% | 3.47%| 2.50%| 1.69%| 4.02%| 2.86%| +| rnn-lm | 2.37% | 4.98%| 3.68%| 2.31%| 5.35%| 3.83%| + + + +conformer_ctc2 also implements the CTC greedy search decoding, it has the identical WERs with the CTC-decoding method. +For other decoding methods, the average WER of the two test sets with the two models is similar. +Except for the 1best and nbest methods, the overall performance of reworked model is better than the baseline model. + + +To reproduce the above result, use the following commands. + +The training commands are: + +```bash + WORLD_SIZE=8 + export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7" + ./conformer_ctc2/train.py \ + --manifest-dir data/fbank \ + --exp-dir conformer_ctc2/exp \ + --full-libri 1 \ + --spec-aug-time-warp-factor 80 \ + --max-duration 300 \ + --world-size ${WORLD_SIZE} \ + --start-epoch 1 \ + --num-epochs 30 \ + --att-rate 0.7 \ + --num-decoder-layers 6 +``` + + +And the following commands are for decoding: + +```bash + + +for method in ctc-greedy-search ctc-decoding 1best nbest-oracle; do + python3 ./conformer_ctc2/decode.py \ + --exp-dir conformer_ctc2/exp \ + --use-averaged-model True --epoch 30 --avg 8 --max-duration 200 --method $method +done + +for method in nbest nbest-rescoring whole-lattice-rescoring attention-decoder ; do + python3 ./conformer_ctc2/decode.py \ + --exp-dir conformer_ctc2/exp \ + --use-averaged-model True --epoch 30 --avg 8 --max-duration 20 --method $method +done + +rnn_dir=$(git rev-parse --show-toplevel)/icefall/rnn_lm +./conformer_ctc2/decode.py \ + --exp-dir conformer_ctc2/exp \ + --lang-dir data/lang_bpe_500 \ + --lm-dir data/lm \ + --max-duration 30 \ + --concatenate-cuts 0 \ + --bucketing-sampler 1 \ + --num-paths 1000 \ + --use-averaged-model True \ + --epoch 30 \ + --avg 8 \ + --nbest-scale 0.5 \ + --rnn-lm-exp-dir ${rnn_dir}/exp \ + --rnn-lm-epoch 29 \ + --rnn-lm-avg 3 \ + --rnn-lm-embedding-dim 2048 \ + --rnn-lm-hidden-dim 2048 \ + --rnn-lm-num-layers 3 \ + --rnn-lm-tie-weights true \ + --method rnn-lm +``` + +You can find the RNN-LM pre-trained model at + + + ### LibriSpeech BPE training results (Conformer-CTC) #### 2021-11-09 diff --git a/egs/librispeech/ASR/conformer_ctc2/__init__.py b/egs/librispeech/ASR/conformer_ctc2/__init__.py new file mode 120000 index 000000000..b24e5e357 --- /dev/null +++ b/egs/librispeech/ASR/conformer_ctc2/__init__.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/__init__.py \ No newline at end of file diff --git a/egs/librispeech/ASR/conformer_ctc2/asr_datamodule.py b/egs/librispeech/ASR/conformer_ctc2/asr_datamodule.py new file mode 120000 index 000000000..a074d6085 --- /dev/null +++ b/egs/librispeech/ASR/conformer_ctc2/asr_datamodule.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/asr_datamodule.py \ No newline at end of file diff --git a/egs/librispeech/ASR/conformer_ctc2/attention.py b/egs/librispeech/ASR/conformer_ctc2/attention.py new file mode 100644 index 000000000..1375d7245 --- /dev/null +++ b/egs/librispeech/ASR/conformer_ctc2/attention.py @@ -0,0 +1,252 @@ +# Copyright 2022 Xiaomi Corp. (author: Quandong Wang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional, Tuple + +import torch +import torch.nn as nn +from torch import Tensor +from torch.nn.init import xavier_normal_ + +from scaling import ScaledLinear + + +class MultiheadAttention(nn.Module): + r"""Allows the model to jointly attend to information + from different representation subspaces. + See `Attention Is All You Need `_. + + .. math:: + \text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O + + where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`. + + Args: + embed_dim: Total dimension of the model. + num_heads: Number of parallel attention heads. Note that ``embed_dim`` will be split + across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``). + dropout: Dropout probability on ``attn_output_weights``. Default: ``0.0`` (no dropout). + bias: If specified, adds bias to input / output projection layers. Default: ``True``. + add_bias_kv: If specified, adds bias to the key and value sequences at dim=0. Default: ``False``. + add_zero_attn: If specified, adds a new batch of zeros to the key and value sequences at dim=1. + Default: ``False``. + kdim: Total number of features for keys. Default: ``None`` (uses ``kdim=embed_dim``). + vdim: Total number of features for values. Default: ``None`` (uses ``vdim=embed_dim``). + batch_first: If ``True``, then the input and output tensors are provided + as (batch, seq, feature). Default: ``False`` (seq, batch, feature). + + Examples:: + + >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads) + >>> attn_output, attn_output_weights = multihead_attn(query, key, value) + """ + __constants__ = ["batch_first"] + bias_k: Optional[torch.Tensor] + bias_v: Optional[torch.Tensor] + + def __init__( + self, + embed_dim, + num_heads, + dropout=0.0, + bias=True, + add_bias_kv=False, + add_zero_attn=False, + kdim=None, + vdim=None, + batch_first=False, + device=None, + dtype=None, + ) -> None: + factory_kwargs = {"device": device, "dtype": dtype} + super(MultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.kdim = kdim if kdim is not None else embed_dim + self.vdim = vdim if vdim is not None else embed_dim + self._qkv_same_embed_dim = ( + self.kdim == embed_dim and self.vdim == embed_dim + ) + + self.num_heads = num_heads + self.dropout = dropout + self.batch_first = batch_first + self.head_dim = embed_dim // num_heads + assert ( + self.head_dim * num_heads == self.embed_dim + ), "embed_dim must be divisible by num_heads" + + if self._qkv_same_embed_dim is False: + self.q_proj_weight = ScaledLinear(embed_dim, embed_dim, bias=bias) + self.k_proj_weight = ScaledLinear(self.kdim, embed_dim, bias=bias) + self.v_proj_weight = ScaledLinear(self.vdim, embed_dim, bias=bias) + self.register_parameter("in_proj_weight", None) + else: + self.in_proj_weight = ScaledLinear( + embed_dim, 3 * embed_dim, bias=bias + ) + self.register_parameter("q_proj_weight", None) + self.register_parameter("k_proj_weight", None) + self.register_parameter("v_proj_weight", None) + + if not bias: + self.register_parameter("in_proj_bias", None) + + self.out_proj = ScaledLinear(embed_dim, embed_dim, bias=bias) + + if add_bias_kv: + self.bias_k = nn.Parameter( + torch.empty((1, 1, embed_dim), **factory_kwargs) + ) + self.bias_v = nn.Parameter( + torch.empty((1, 1, embed_dim), **factory_kwargs) + ) + else: + self.bias_k = self.bias_v = None + + self.add_zero_attn = add_zero_attn + + self._reset_parameters() + + def _reset_parameters(self): + if self.bias_k is not None: + xavier_normal_(self.bias_k) + if self.bias_v is not None: + xavier_normal_(self.bias_v) + + def __setstate__(self, state): + # Support loading old MultiheadAttention checkpoints generated by v1.1.0 + if "_qkv_same_embed_dim" not in state: + state["_qkv_same_embed_dim"] = True + + super(MultiheadAttention, self).__setstate__(state) + + def forward( + self, + query: Tensor, + key: Tensor, + value: Tensor, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query: Query embeddings of shape :math:`(L, N, E_q)` when ``batch_first=False`` or :math:`(N, L, E_q)` + when ``batch_first=True``, where :math:`L` is the target sequence length, :math:`N` is the batch size, + and :math:`E_q` is the query embedding dimension ``embed_dim``. Queries are compared against + key-value pairs to produce the output. See "Attention Is All You Need" for more details. + key: Key embeddings of shape :math:`(S, N, E_k)` when ``batch_first=False`` or :math:`(N, S, E_k)` when + ``batch_first=True``, where :math:`S` is the source sequence length, :math:`N` is the batch size, and + :math:`E_k` is the key embedding dimension ``kdim``. See "Attention Is All You Need" for more details. + value: Value embeddings of shape :math:`(S, N, E_v)` when ``batch_first=False`` or :math:`(N, S, E_v)` when + ``batch_first=True``, where :math:`S` is the source sequence length, :math:`N` is the batch size, and + :math:`E_v` is the value embedding dimension ``vdim``. See "Attention Is All You Need" for more details. + key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within ``key`` + to ignore for the purpose of attention (i.e. treat as "padding"). Binary and byte masks are supported. + For a binary mask, a ``True`` value indicates that the corresponding ``key`` value will be ignored for + the purpose of attention. For a byte mask, a non-zero value indicates that the corresponding ``key`` + value will be ignored. + need_weights: If specified, returns ``attn_output_weights`` in addition to ``attn_outputs``. + Default: ``True``. + attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape + :math:`(L, S)` or :math:`(N\cdot\text{num\_heads}, L, S)`, where :math:`N` is the batch size, + :math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be + broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch. + Binary, byte, and float masks are supported. For a binary mask, a ``True`` value indicates that the + corresponding position is not allowed to attend. For a byte mask, a non-zero value indicates that the + corresponding position is not allowed to attend. For a float mask, the mask values will be added to + the attention weight. + + Outputs: + - **attn_output** - Attention outputs of shape :math:`(L, N, E)` when ``batch_first=False`` or + :math:`(N, L, E)` when ``batch_first=True``, where :math:`L` is the target sequence length, :math:`N` is + the batch size, and :math:`E` is the embedding dimension ``embed_dim``. + - **attn_output_weights** - Attention output weights of shape :math:`(N, L, S)`, where :math:`N` is the batch + size, :math:`L` is the target sequence length, and :math:`S` is the source sequence length. Only returned + when ``need_weights=True``. + """ + if self.batch_first: + query, key, value = [x.transpose(1, 0) for x in (query, key, value)] + + if not self._qkv_same_embed_dim: + q_proj_weight = ( + self.q_proj_weight.get_weight() + if self.q_proj_weight is not None + else None + ) + k_proj_weight = ( + self.k_proj_weight.get_weight() + if self.k_proj_weight is not None + else None + ) + v_proj_weight = ( + self.v_proj_weight.get_weight() + if self.v_proj_weight is not None + else None + ) + ( + attn_output, + attn_output_weights, + ) = nn.functional.multi_head_attention_forward( + query, + key, + value, + self.embed_dim, + self.num_heads, + self.in_proj_weight.get_weight(), + self.in_proj_weight.get_bias(), + self.bias_k, + self.bias_v, + self.add_zero_attn, + self.dropout, + self.out_proj.get_weight(), + self.out_proj.get_bias(), + training=self.training, + key_padding_mask=key_padding_mask, + need_weights=need_weights, + attn_mask=attn_mask, + use_separate_proj_weight=True, + q_proj_weight=q_proj_weight, + k_proj_weight=k_proj_weight, + v_proj_weight=v_proj_weight, + ) + else: + ( + attn_output, + attn_output_weights, + ) = nn.functional.multi_head_attention_forward( + query, + key, + value, + self.embed_dim, + self.num_heads, + self.in_proj_weight.get_weight(), + self.in_proj_weight.get_bias(), + self.bias_k, + self.bias_v, + self.add_zero_attn, + self.dropout, + self.out_proj.get_weight(), + self.out_proj.get_bias(), + training=self.training, + key_padding_mask=key_padding_mask, + need_weights=need_weights, + attn_mask=attn_mask, + ) + if self.batch_first: + return attn_output.transpose(1, 0), attn_output_weights + else: + return attn_output, attn_output_weights diff --git a/egs/librispeech/ASR/conformer_ctc2/conformer.py b/egs/librispeech/ASR/conformer_ctc2/conformer.py new file mode 100644 index 000000000..fb11a5fc8 --- /dev/null +++ b/egs/librispeech/ASR/conformer_ctc2/conformer.py @@ -0,0 +1,964 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021 University of Chinese Academy of Sciences (author: Han Zhu) +# 2022 Xiaomi Corp. (author: Quandong Wang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import math +import warnings +from typing import Optional, Tuple + +import torch +from scaling import ( + ActivationBalancer, + BasicNorm, + DoubleSwish, + ScaledConv1d, + ScaledLinear, +) +from torch import Tensor, nn +from subsampling import Conv2dSubsampling + +from transformer import Supervisions, Transformer, encoder_padding_mask + + +class Conformer(Transformer): + """ + Args: + num_features (int): Number of input features + num_classes (int): Number of output classes + subsampling_factor (int): subsampling factor of encoder (the convolution layers before transformers) + d_model (int): attention dimension, also the output dimension + nhead (int): number of head + dim_feedforward (int): feedforward dimention + num_encoder_layers (int): number of encoder layers + num_decoder_layers (int): number of decoder layers + dropout (float): dropout rate + layer_dropout (float): layer-dropout rate. + cnn_module_kernel (int): Kernel size of convolution module + vgg_frontend (bool): whether to use vgg frontend. + """ + + def __init__( + self, + num_features: int, + num_classes: int, + subsampling_factor: int = 4, + d_model: int = 256, + nhead: int = 4, + dim_feedforward: int = 2048, + num_encoder_layers: int = 12, + num_decoder_layers: int = 6, + dropout: float = 0.1, + layer_dropout: float = 0.075, + cnn_module_kernel: int = 31, + ) -> None: + super(Conformer, self).__init__( + num_features=num_features, + num_classes=num_classes, + subsampling_factor=subsampling_factor, + d_model=d_model, + nhead=nhead, + dim_feedforward=dim_feedforward, + num_encoder_layers=num_encoder_layers, + num_decoder_layers=num_decoder_layers, + dropout=dropout, + layer_dropout=layer_dropout, + ) + + self.num_features = num_features + self.subsampling_factor = subsampling_factor + if subsampling_factor != 4: + raise NotImplementedError("Support only 'subsampling_factor=4'.") + + # self.encoder_embed converts the input of shape (N, T, num_features) + # to the shape (N, T//subsampling_factor, d_model). + # That is, it does two things simultaneously: + # (1) subsampling: T -> T//subsampling_factor + # (2) embedding: num_features -> d_model + self.encoder_embed = Conv2dSubsampling(num_features, d_model) + + self.encoder_pos = RelPositionalEncoding(d_model, dropout) + + encoder_layer = ConformerEncoderLayer( + d_model, + nhead, + dim_feedforward, + dropout, + layer_dropout, + cnn_module_kernel, + ) + self.encoder = ConformerEncoder(encoder_layer, num_encoder_layers) + + def run_encoder( + self, + x: torch.Tensor, + supervisions: Optional[Supervisions] = None, + warmup: float = 1.0, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + """ + Args: + x: + The input tensor. Its shape is (batch_size, seq_len, feature_dim). + supervisions: + Supervision in lhotse format. + See https://github.com/lhotse-speech/lhotse/blob/master/lhotse/dataset/speech_recognition.py#L32 # noqa + CAUTION: It contains length information, i.e., start and number of + frames, before subsampling + It is read directly from the batch, without any sorting. It is used + to compute encoder padding mask, which is used as memory key padding + mask for the decoder. + warmup: + A floating point value that gradually increases from 0 throughout + training; when it is >= 1.0 we are "fully warmed up". It is used + to turn modules on sequentially. + Returns: + Tensor: Predictor tensor of dimension (input_length, batch_size, d_model). + Tensor: Mask tensor of dimension (batch_size, input_length) + """ + x = self.encoder_embed(x) + x, pos_emb = self.encoder_pos(x) + x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C) + mask = encoder_padding_mask(x.size(0), supervisions) + if mask is not None: + mask = mask.to(x.device) + + # Caution: We assume the subsampling factor is 4! + + x = self.encoder( + x, pos_emb, src_key_padding_mask=mask, warmup=warmup + ) # (T, N, C) + + # x = x.permute(1, 0, 2) # (T, N, C) ->(N, T, C) + + # return x, lengths + return x, mask + + +class ConformerEncoderLayer(nn.Module): + """ + ConformerEncoderLayer is made up of self-attn, feedforward and convolution networks. + See: "Conformer: Convolution-augmented Transformer for Speech Recognition" + + Args: + d_model: the number of expected features in the input (required). + nhead: the number of heads in the multiheadattention models (required). + dim_feedforward: the dimension of the feedforward network model (default=2048). + dropout: the dropout value (default=0.1). + cnn_module_kernel (int): Kernel size of convolution module. + + Examples:: + >>> encoder_layer = ConformerEncoderLayer(d_model=512, nhead=8) + >>> src = torch.rand(10, 32, 512) + >>> pos_emb = torch.rand(32, 19, 512) + >>> out = encoder_layer(src, pos_emb) + """ + + def __init__( + self, + d_model: int, + nhead: int, + dim_feedforward: int = 2048, + dropout: float = 0.1, + layer_dropout: float = 0.075, + cnn_module_kernel: int = 31, + ) -> None: + super(ConformerEncoderLayer, self).__init__() + + self.layer_dropout = layer_dropout + + self.d_model = d_model + + self.self_attn = RelPositionMultiheadAttention( + d_model, nhead, dropout=0.0 + ) + + self.feed_forward = nn.Sequential( + ScaledLinear(d_model, dim_feedforward), + ActivationBalancer(channel_dim=-1), + DoubleSwish(), + nn.Dropout(dropout), + ScaledLinear(dim_feedforward, d_model, initial_scale=0.25), + ) + + self.feed_forward_macaron = nn.Sequential( + ScaledLinear(d_model, dim_feedforward), + ActivationBalancer(channel_dim=-1), + DoubleSwish(), + nn.Dropout(dropout), + ScaledLinear(dim_feedforward, d_model, initial_scale=0.25), + ) + + self.conv_module = ConvolutionModule(d_model, cnn_module_kernel) + + self.norm_final = BasicNorm(d_model) + + # try to ensure the output is close to zero-mean (or at least, zero-median). + self.balancer = ActivationBalancer( + channel_dim=-1, min_positive=0.45, max_positive=0.55, max_abs=6.0 + ) + + self.dropout = nn.Dropout(dropout) + + def forward( + self, + src: Tensor, + pos_emb: Tensor, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + warmup: float = 1.0, + ) -> Tensor: + """ + Pass the input through the encoder layer. + + Args: + src: the sequence to the encoder layer (required). + pos_emb: Positional embedding tensor (required). + src_mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional). + warmup: controls selective bypass of of layers; if < 1.0, we will + bypass layers more frequently. + + Shape: + src: (S, N, E). + pos_emb: (N, 2*S-1, E) + src_mask: (S, S). + src_key_padding_mask: (N, S). + S is the source sequence length, N is the batch size, E is the feature number + """ + src_orig = src + + warmup_scale = min(0.1 + warmup, 1.0) + # alpha = 1.0 means fully use this encoder layer, 0.0 would mean + # completely bypass it. + if self.training: + alpha = ( + warmup_scale + if torch.rand(()).item() <= (1.0 - self.layer_dropout) + else 0.1 + ) + else: + alpha = 1.0 + + # macaron style feed forward module + src = src + self.dropout(self.feed_forward_macaron(src)) + + # multi-headed self-attention module + src_att = self.self_attn( + src, + src, + src, + pos_emb=pos_emb, + attn_mask=src_mask, + key_padding_mask=src_key_padding_mask, + )[0] + src = src + self.dropout(src_att) + + # convolution module + src = src + self.dropout(self.conv_module(src)) + + # feed forward module + src = src + self.dropout(self.feed_forward(src)) + + src = self.norm_final(self.balancer(src)) + + if alpha != 1.0: + src = alpha * src + (1 - alpha) * src_orig + + return src + + +class ConformerEncoder(nn.Module): + r"""ConformerEncoder is a stack of N encoder layers + + Args: + encoder_layer: an instance of the ConformerEncoderLayer() class (required). + num_layers: the number of sub-encoder-layers in the encoder (required). + + Examples:: + >>> encoder_layer = ConformerEncoderLayer(d_model=512, nhead=8) + >>> conformer_encoder = ConformerEncoder(encoder_layer, num_layers=6) + >>> src = torch.rand(10, 32, 512) + >>> pos_emb = torch.rand(32, 19, 512) + >>> out = conformer_encoder(src, pos_emb) + """ + + def __init__(self, encoder_layer: nn.Module, num_layers: int) -> None: + super().__init__() + self.layers = nn.ModuleList( + [copy.deepcopy(encoder_layer) for i in range(num_layers)] + ) + self.num_layers = num_layers + + def forward( + self, + src: Tensor, + pos_emb: Tensor, + mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + warmup: float = 1.0, + ) -> Tensor: + r"""Pass the input through the encoder layers in turn. + + Args: + src: the sequence to the encoder (required). + pos_emb: Positional embedding tensor (required). + mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional). + + Shape: + src: (S, N, E). + pos_emb: (N, 2*S-1, E) + mask: (S, S). + src_key_padding_mask: (N, S). + S is the source sequence length, T is the target sequence length, N is the batch size, E is the feature number + + """ + output = src + + for i, mod in enumerate(self.layers): + output = mod( + output, + pos_emb, + src_mask=mask, + src_key_padding_mask=src_key_padding_mask, + warmup=warmup, + ) + + return output + + +class RelPositionalEncoding(torch.nn.Module): + """Relative positional encoding module. + + See : Appendix B in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" + Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/embedding.py + + Args: + d_model: Embedding dimension. + dropout_rate: Dropout rate. + max_len: Maximum input length. + + """ + + def __init__( + self, d_model: int, dropout_rate: float, max_len: int = 5000 + ) -> None: + """Construct an PositionalEncoding object.""" + super(RelPositionalEncoding, self).__init__() + self.d_model = d_model + self.dropout = torch.nn.Dropout(p=dropout_rate) + self.pe = None + self.extend_pe(torch.tensor(0.0).expand(1, max_len)) + + def extend_pe(self, x: Tensor) -> None: + """Reset the positional encodings.""" + if self.pe is not None: + # self.pe contains both positive and negative parts + # the length of self.pe is 2 * input_len - 1 + if self.pe.size(1) >= x.size(1) * 2 - 1: + # Note: TorchScript doesn't implement operator== for torch.Device + if self.pe.dtype != x.dtype or str(self.pe.device) != str( + x.device + ): + self.pe = self.pe.to(dtype=x.dtype, device=x.device) + return + # Suppose `i` means to the position of query vecotr and `j` means the + # position of key vector. We use position relative positions when keys + # are to the left (i>j) and negative relative positions otherwise (i Tuple[Tensor, Tensor]: + """Add positional encoding. + + Args: + x (torch.Tensor): Input tensor (batch, time, `*`). + + Returns: + torch.Tensor: Encoded tensor (batch, time, `*`). + torch.Tensor: Encoded tensor (batch, 2*time-1, `*`). + + """ + self.extend_pe(x) + pos_emb = self.pe[ + :, + self.pe.size(1) // 2 + - x.size(1) + + 1 : self.pe.size(1) // 2 # noqa E203 + + x.size(1), + ] + return self.dropout(x), self.dropout(pos_emb) + + +class RelPositionMultiheadAttention(nn.Module): + r"""Multi-Head Attention layer with relative position encoding + + See reference: "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" + + Args: + embed_dim: total dimension of the model. + num_heads: parallel attention heads. + dropout: a Dropout layer on attn_output_weights. Default: 0.0. + + Examples:: + + >>> rel_pos_multihead_attn = RelPositionMultiheadAttention(embed_dim, num_heads) + >>> attn_output, attn_output_weights = multihead_attn(query, key, value, pos_emb) + """ + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + ) -> None: + super(RelPositionMultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert ( + self.head_dim * num_heads == self.embed_dim + ), "embed_dim must be divisible by num_heads" + + self.in_proj = ScaledLinear(embed_dim, 3 * embed_dim, bias=True) + self.out_proj = ScaledLinear( + embed_dim, embed_dim, bias=True, initial_scale=0.25 + ) + + # linear transformation for positional encoding. + self.linear_pos = ScaledLinear(embed_dim, embed_dim, bias=False) + # these two learnable bias are used in matrix c and matrix d + # as described in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" Section 3.3 + self.pos_bias_u = nn.Parameter(torch.Tensor(num_heads, self.head_dim)) + self.pos_bias_v = nn.Parameter(torch.Tensor(num_heads, self.head_dim)) + self.pos_bias_u_scale = nn.Parameter(torch.zeros(()).detach()) + self.pos_bias_v_scale = nn.Parameter(torch.zeros(()).detach()) + self._reset_parameters() + + def _pos_bias_u(self): + return self.pos_bias_u * self.pos_bias_u_scale.exp() + + def _pos_bias_v(self): + return self.pos_bias_v * self.pos_bias_v_scale.exp() + + def _reset_parameters(self) -> None: + nn.init.normal_(self.pos_bias_u, std=0.01) + nn.init.normal_(self.pos_bias_v, std=0.01) + + def forward( + self, + query: Tensor, + key: Tensor, + value: Tensor, + pos_emb: Tensor, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + pos_emb: Positional embedding tensor + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. When given a binary mask and a value is True, + the corresponding value on the attention layer will be ignored. When given + a byte mask and a value is non-zero, the corresponding value on the attention + layer will be ignored + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + + Shape: + - Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - pos_emb: :math:`(N, 2*L-1, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the position + with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + + - Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + return self.multi_head_attention_forward( + query, + key, + value, + pos_emb, + self.embed_dim, + self.num_heads, + self.in_proj.get_weight(), + self.in_proj.get_bias(), + self.dropout, + self.out_proj.get_weight(), + self.out_proj.get_bias(), + training=self.training, + key_padding_mask=key_padding_mask, + need_weights=need_weights, + attn_mask=attn_mask, + ) + + def rel_shift(self, x: Tensor) -> Tensor: + """Compute relative positional encoding. + + Args: + x: Input tensor (batch, head, time1, 2*time1-1). + time1 means the length of query vector. + + Returns: + Tensor: tensor of shape (batch, head, time1, time2) + (note: time2 has the same value as time1, but it is for + the key, while time1 is for the query). + """ + (batch_size, num_heads, time1, n) = x.shape + assert n == 2 * time1 - 1 + # Note: TorchScript requires explicit arg for stride() + batch_stride = x.stride(0) + head_stride = x.stride(1) + time1_stride = x.stride(2) + n_stride = x.stride(3) + return x.as_strided( + (batch_size, num_heads, time1, time1), + (batch_stride, head_stride, time1_stride - n_stride, n_stride), + storage_offset=n_stride * (time1 - 1), + ) + + def multi_head_attention_forward( + self, + query: Tensor, + key: Tensor, + value: Tensor, + pos_emb: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Tensor, + in_proj_bias: Tensor, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Tensor, + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + pos_emb: Positional embedding tensor + embed_dim_to_check: total dimension of the model. + num_heads: parallel attention heads. + in_proj_weight, in_proj_bias: input projection weight and bias. + dropout_p: probability of an element to be zeroed. + out_proj_weight, out_proj_bias: the output projection weight and bias. + training: apply dropout if is ``True``. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + + Shape: + Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - pos_emb: :math:`(N, 2*L-1, E)` or :math:`(1, 2*L-1, E)` where L is the target sequence + length, N is the batch size, E is the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions + will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + + Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == embed_dim_to_check + assert key.size(0) == value.size(0) and key.size(1) == value.size(1) + + head_dim = embed_dim // num_heads + assert ( + head_dim * num_heads == embed_dim + ), "embed_dim must be divisible by num_heads" + + scaling = float(head_dim) ** -0.5 + + if torch.equal(query, key) and torch.equal(key, value): + # self-attention + q, k, v = nn.functional.linear( + query, in_proj_weight, in_proj_bias + ).chunk(3, dim=-1) + + elif torch.equal(key, value): + # encoder-decoder attention + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = nn.functional.linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + k, v = nn.functional.linear(key, _w, _b).chunk(2, dim=-1) + + else: + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = nn.functional.linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = embed_dim * 2 + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + k = nn.functional.linear(key, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim * 2 + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + v = nn.functional.linear(value, _w, _b) + + if attn_mask is not None: + assert ( + attn_mask.dtype == torch.float32 + or attn_mask.dtype == torch.float64 + or attn_mask.dtype == torch.float16 + or attn_mask.dtype == torch.uint8 + or attn_mask.dtype == torch.bool + ), "Only float, byte, and bool types are supported for attn_mask, not {}".format( + attn_mask.dtype + ) + if attn_mask.dtype == torch.uint8: + warnings.warn( + "Byte tensor for attn_mask is deprecated. Use bool tensor instead." + ) + attn_mask = attn_mask.to(torch.bool) + + if attn_mask.dim() == 2: + attn_mask = attn_mask.unsqueeze(0) + if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: + raise RuntimeError( + "The size of the 2D attn_mask is not correct." + ) + elif attn_mask.dim() == 3: + if list(attn_mask.size()) != [ + bsz * num_heads, + query.size(0), + key.size(0), + ]: + raise RuntimeError( + "The size of the 3D attn_mask is not correct." + ) + else: + raise RuntimeError( + "attn_mask's dimension {} is not supported".format( + attn_mask.dim() + ) + ) + # attn_mask's dim is 3 now. + + # convert ByteTensor key_padding_mask to bool + if ( + key_padding_mask is not None + and key_padding_mask.dtype == torch.uint8 + ): + warnings.warn( + "Byte tensor for key_padding_mask is deprecated. Use bool tensor instead." + ) + key_padding_mask = key_padding_mask.to(torch.bool) + + q = (q * scaling).contiguous().view(tgt_len, bsz, num_heads, head_dim) + k = k.contiguous().view(-1, bsz, num_heads, head_dim) + v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + + src_len = k.size(0) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz, "{} == {}".format( + key_padding_mask.size(0), bsz + ) + assert key_padding_mask.size(1) == src_len, "{} == {}".format( + key_padding_mask.size(1), src_len + ) + + q = q.transpose(0, 1) # (batch, time1, head, d_k) + + pos_emb_bsz = pos_emb.size(0) + assert pos_emb_bsz in (1, bsz) # actually it is 1 + p = self.linear_pos(pos_emb).view(pos_emb_bsz, -1, num_heads, head_dim) + p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k) + + q_with_bias_u = (q + self._pos_bias_u()).transpose( + 1, 2 + ) # (batch, head, time1, d_k) + + q_with_bias_v = (q + self._pos_bias_v()).transpose( + 1, 2 + ) # (batch, head, time1, d_k) + + # compute attention score + # first compute matrix a and matrix c + # as described in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" Section 3.3 + k = k.permute(1, 2, 3, 0) # (batch, head, d_k, time2) + matrix_ac = torch.matmul( + q_with_bias_u, k + ) # (batch, head, time1, time2) + + # compute matrix b and matrix d + matrix_bd = torch.matmul( + q_with_bias_v, p.transpose(-2, -1) + ) # (batch, head, time1, 2*time1-1) + matrix_bd = self.rel_shift(matrix_bd) + + attn_output_weights = ( + matrix_ac + matrix_bd + ) # (batch, head, time1, time2) + + attn_output_weights = attn_output_weights.view( + bsz * num_heads, tgt_len, -1 + ) + + assert list(attn_output_weights.size()) == [ + bsz * num_heads, + tgt_len, + src_len, + ] + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_output_weights.masked_fill_(attn_mask, float("-inf")) + else: + attn_output_weights += attn_mask + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view( + bsz, num_heads, tgt_len, src_len + ) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float("-inf"), + ) + attn_output_weights = attn_output_weights.view( + bsz * num_heads, tgt_len, src_len + ) + + attn_output_weights = nn.functional.softmax(attn_output_weights, dim=-1) + attn_output_weights = nn.functional.dropout( + attn_output_weights, p=dropout_p, training=training + ) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + attn_output = ( + attn_output.transpose(0, 1) + .contiguous() + .view(tgt_len, bsz, embed_dim) + ) + attn_output = nn.functional.linear( + attn_output, out_proj_weight, out_proj_bias + ) + + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view( + bsz, num_heads, tgt_len, src_len + ) + return attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return attn_output, None + + +class ConvolutionModule(nn.Module): + """ConvolutionModule in Conformer model. + Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/conformer/convolution.py + + Args: + channels (int): The number of channels of conv layers. + kernel_size (int): Kernerl size of conv layers. + bias (bool): Whether to use bias in conv layers (default=True). + + """ + + def __init__( + self, channels: int, kernel_size: int, bias: bool = True + ) -> None: + """Construct an ConvolutionModule object.""" + super(ConvolutionModule, self).__init__() + # kernerl_size should be a odd number for 'SAME' padding + assert (kernel_size - 1) % 2 == 0 + + self.pointwise_conv1 = ScaledConv1d( + channels, + 2 * channels, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + ) + + # after pointwise_conv1 we put x through a gated linear unit (nn.functional.glu). + # For most layers the normal rms value of channels of x seems to be in the range 1 to 4, + # but sometimes, for some reason, for layer 0 the rms ends up being very large, + # between 50 and 100 for different channels. This will cause very peaky and + # sparse derivatives for the sigmoid gating function, which will tend to make + # the loss function not learn effectively. (for most layers the average absolute values + # are in the range 0.5..9.0, and the average p(x>0), i.e. positive proportion, + # at the output of pointwise_conv1.output is around 0.35 to 0.45 for different + # layers, which likely breaks down as 0.5 for the "linear" half and + # 0.2 to 0.3 for the part that goes into the sigmoid. The idea is that if we + # constrain the rms values to a reasonable range via a constraint of max_abs=10.0, + # it will be in a better position to start learning something, i.e. to latch onto + # the correct range. + self.deriv_balancer1 = ActivationBalancer( + channel_dim=1, max_abs=10.0, min_positive=0.05, max_positive=1.0 + ) + + self.depthwise_conv = ScaledConv1d( + channels, + channels, + kernel_size, + stride=1, + padding=(kernel_size - 1) // 2, + groups=channels, + bias=bias, + ) + + self.deriv_balancer2 = ActivationBalancer( + channel_dim=1, min_positive=0.05, max_positive=1.0 + ) + + self.activation = DoubleSwish() + + self.pointwise_conv2 = ScaledConv1d( + channels, + channels, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + initial_scale=0.25, + ) + + def forward(self, x: Tensor) -> Tensor: + """Compute convolution module. + + Args: + x: Input tensor (#time, batch, channels). + + Returns: + Tensor: Output tensor (#time, batch, channels). + + """ + # exchange the temporal dimension and the feature dimension + x = x.permute(1, 2, 0) # (#batch, channels, time). + + # GLU mechanism + x = self.pointwise_conv1(x) # (batch, 2*channels, time) + + x = self.deriv_balancer1(x) + x = nn.functional.glu(x, dim=1) # (batch, channels, time) + + # 1D Depthwise Conv + x = self.depthwise_conv(x) + + x = self.deriv_balancer2(x) + x = self.activation(x) + + x = self.pointwise_conv2(x) # (batch, channel, time) + + return x.permute(2, 0, 1) + + +if __name__ == "__main__": + feature_dim = 50 + c = Conformer(num_features=feature_dim, d_model=128, nhead=4) + batch_size = 5 + seq_len = 20 + # Just make sure the forward pass runs. + f = c( + torch.randn(batch_size, seq_len, feature_dim), + torch.full((batch_size,), seq_len, dtype=torch.int64), + warmup=0.5, + ) diff --git a/egs/librispeech/ASR/conformer_ctc2/decode.py b/egs/librispeech/ASR/conformer_ctc2/decode.py new file mode 100755 index 000000000..8a4cad1ad --- /dev/null +++ b/egs/librispeech/ASR/conformer_ctc2/decode.py @@ -0,0 +1,996 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corporation (Author: Liyong Guo, +# Fangjun Kuang, +# Quandong Wang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import logging +from collections import defaultdict +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import k2 +import sentencepiece as spm +import torch +import torch.nn as nn +from asr_datamodule import LibriSpeechAsrDataModule +from conformer import Conformer + +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) + +from icefall.bpe_graph_compiler import BpeCtcTrainingGraphCompiler +from icefall.decode import ( + get_lattice, + nbest_decoding, + nbest_oracle, + one_best_decoding, + rescore_with_attention_decoder, + rescore_with_n_best_list, + rescore_with_rnn_lm, + rescore_with_whole_lattice, +) +from icefall.env import get_env_info +from icefall.lexicon import Lexicon +from icefall.rnn_lm.model import RnnLmModel +from icefall.utils import ( + AttributeDict, + get_texts, + load_averaged_model, + setup_logger, + store_transcripts, + str2bool, + write_error_stats, +) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=77, + help="""It specifies the checkpoint to use for decoding. + Note: Epoch counts from 1. + You can specify --avg to use more checkpoints for model averaging.""", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=15, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch' and '--iter'", + ) + + parser.add_argument( + "--method", + type=str, + default="attention-decoder", + help="""Decoding method. + Supported values are: + - (0) ctc-decoding. Use CTC decoding. It uses a sentence piece + model, i.e., lang_dir/bpe.model, to convert word pieces to words. + It needs neither a lexicon nor an n-gram LM. + - (1) ctc-greedy-search. It only use CTC output and a sentence piece + model for decoding. It produces the same results with ctc-decoding. + - (2) 1best. Extract the best path from the decoding lattice as the + decoding result. + - (3) nbest. Extract n paths from the decoding lattice; the path + with the highest score is the decoding result. + - (4) nbest-rescoring. Extract n paths from the decoding lattice, + rescore them with an n-gram LM (e.g., a 4-gram LM), the path with + the highest score is the decoding result. + - (5) whole-lattice-rescoring. Rescore the decoding lattice with an + n-gram LM (e.g., a 4-gram LM), the best path of rescored lattice + is the decoding result. + - (6) attention-decoder. Extract n paths from the LM rescored + lattice, the path with the highest score is the decoding result. + - (7) rnn-lm. Rescoring with attention-decoder and RNN LM. We assume + you have trained an RNN LM using ./rnn_lm/train.py + - (8) nbest-oracle. Its WER is the lower bound of any n-best + rescoring method can achieve. Useful for debugging n-best + rescoring method. + """, + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=True, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--num-decoder-layers", + type=int, + default=6, + help="""Number of decoder layer of transformer decoder. + Setting this to 0 will not create the decoder at all (pure CTC model) + """, + ) + + parser.add_argument( + "--num-paths", + type=int, + default=100, + help="""Number of paths for n-best based decoding method. + Used only when "method" is one of the following values: + nbest, nbest-rescoring, attention-decoder, rnn-lm, and nbest-oracle + """, + ) + + parser.add_argument( + "--nbest-scale", + type=float, + default=0.5, + help="""The scale to be applied to `lattice.scores`. + It's needed if you use any kinds of n-best based rescoring. + Used only when "method" is one of the following values: + nbest, nbest-rescoring, attention-decoder, rnn-lm, and nbest-oracle + A smaller value results in more unique paths. + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="conformer_ctc2/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--lang-dir", + type=str, + default="data/lang_bpe_500", + help="The lang dir", + ) + + parser.add_argument( + "--lm-dir", + type=str, + default="data/lm", + help="""The n-gram LM dir. + It should contain either G_4_gram.pt or G_4_gram.fst.txt + """, + ) + + parser.add_argument( + "--rnn-lm-exp-dir", + type=str, + default="rnn_lm/exp", + help="""Used only when --method is rnn-lm. + It specifies the path to RNN LM exp dir. + """, + ) + + parser.add_argument( + "--rnn-lm-epoch", + type=int, + default=7, + help="""Used only when --method is rnn-lm. + It specifies the checkpoint to use. + """, + ) + + parser.add_argument( + "--rnn-lm-avg", + type=int, + default=2, + help="""Used only when --method is rnn-lm. + It specifies the number of checkpoints to average. + """, + ) + + parser.add_argument( + "--rnn-lm-embedding-dim", + type=int, + default=2048, + help="Embedding dim of the model", + ) + + parser.add_argument( + "--rnn-lm-hidden-dim", + type=int, + default=2048, + help="Hidden dim of the model", + ) + + parser.add_argument( + "--rnn-lm-num-layers", + type=int, + default=4, + help="Number of RNN layers the model", + ) + parser.add_argument( + "--rnn-lm-tie-weights", + type=str2bool, + default=False, + help="""True to share the weights between the input embedding layer and the + last output linear layer + """, + ) + + return parser + + +def get_params() -> AttributeDict: + params = AttributeDict( + { + # parameters for conformer + "subsampling_factor": 4, + "feature_dim": 80, + "nhead": 8, + "dim_feedforward": 2048, + "encoder_dim": 512, + "num_encoder_layers": 12, + # parameters for decoding + "search_beam": 20, + "output_beam": 8, + "min_active_states": 30, + "max_active_states": 10000, + "use_double_scores": True, + "env_info": get_env_info(), + } + ) + return params + + +def ctc_greedy_search( + nnet_output: torch.Tensor, + memory: torch.Tensor, + memory_key_padding_mask: torch.Tensor, +) -> List[List[int]]: + """Apply CTC greedy search + + Args: + speech (torch.Tensor): (batch, max_len, feat_dim) + speech_length (torch.Tensor): (batch, ) + Returns: + List[List[int]]: best path result + """ + batch_size = memory.shape[1] + # Let's assume B = batch_size + encoder_out = memory + encoder_mask = memory_key_padding_mask + maxlen = encoder_out.size(0) + + ctc_probs = nnet_output # (B, maxlen, vocab_size) + topk_prob, topk_index = ctc_probs.topk(1, dim=2) # (B, maxlen, 1) + topk_index = topk_index.view(batch_size, maxlen) # (B, maxlen) + topk_index = topk_index.masked_fill_(encoder_mask, 0) # (B, maxlen) + hyps = [hyp.tolist() for hyp in topk_index] + scores = topk_prob.max(1) + hyps = [remove_duplicates_and_blank(hyp) for hyp in hyps] + return hyps, scores + + +def remove_duplicates_and_blank(hyp: List[int]) -> List[int]: + # from https://github.com/wenet-e2e/wenet/blob/main/wenet/utils/common.py + new_hyp: List[int] = [] + cur = 0 + while cur < len(hyp): + if hyp[cur] != 0: + new_hyp.append(hyp[cur]) + prev = cur + while cur < len(hyp) and hyp[cur] == hyp[prev]: + cur += 1 + return new_hyp + + +def decode_one_batch( + params: AttributeDict, + model: nn.Module, + rnn_lm_model: Optional[nn.Module], + HLG: Optional[k2.Fsa], + H: Optional[k2.Fsa], + bpe_model: Optional[spm.SentencePieceProcessor], + batch: dict, + word_table: k2.SymbolTable, + sos_id: int, + eos_id: int, + G: Optional[k2.Fsa] = None, +) -> Dict[str, List[List[str]]]: + """Decode one batch and return the result in a dict. The dict has the + following format: + + - key: It indicates the setting used for decoding. For example, + if no rescoring is used, the key is the string `no_rescore`. + If LM rescoring is used, the key is the string `lm_scale_xxx`, + where `xxx` is the value of `lm_scale`. An example key is + `lm_scale_0.7` + - value: It contains the decoding result. `len(value)` equals to + batch size. `value[i]` is the decoding result for the i-th + utterance in the given batch. + Args: + params: + It's the return value of :func:`get_params`. + + - params.method is "1best", it uses 1best decoding without LM rescoring. + - params.method is "nbest", it uses nbest decoding without LM rescoring. + - params.method is "nbest-rescoring", it uses nbest LM rescoring. + - params.method is "whole-lattice-rescoring", it uses whole lattice LM + rescoring. + + model: + The neural model. + rnn_lm_model: + The neural model for RNN LM. + HLG: + The decoding graph. Used only when params.method is NOT ctc-decoding. + H: + The ctc topo. Used only when params.method is ctc-decoding. + bpe_model: + The BPE model. Used only when params.method is ctc-decoding. + batch: + It is the return value from iterating + `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation + for the format of the `batch`. + word_table: + The word symbol table. + sos_id: + The token ID of the SOS. + eos_id: + The token ID of the EOS. + G: + An LM. It is not None when params.method is "nbest-rescoring" + or "whole-lattice-rescoring". In general, the G in HLG + is a 3-gram LM, while this G is a 4-gram LM. + Returns: + Return the decoding result. See above description for the format of + the returned dict. Note: If it decodes to nothing, then return None. + """ + if HLG is not None: + device = HLG.device + else: + device = H.device + feature = batch["inputs"] + assert feature.ndim == 3 + feature = feature.to(device) + # at entry, feature is (N, T, C) + + supervisions = batch["supervisions"] + + nnet_output, memory, memory_key_padding_mask = model(feature, supervisions) + # nnet_output is (N, T, C) + + supervision_segments = torch.stack( + ( + supervisions["sequence_idx"], + torch.div( + supervisions["start_frame"], + params.subsampling_factor, + rounding_mode="trunc", + ), + torch.div( + supervisions["num_frames"], + params.subsampling_factor, + rounding_mode="trunc", + ), + ), + 1, + ).to(torch.int32) + + if H is None: + assert HLG is not None + decoding_graph = HLG + else: + assert HLG is None + assert bpe_model is not None + decoding_graph = H + + lattice = get_lattice( + nnet_output=nnet_output, + decoding_graph=decoding_graph, + supervision_segments=supervision_segments, + search_beam=params.search_beam, + output_beam=params.output_beam, + min_active_states=params.min_active_states, + max_active_states=params.max_active_states, + subsampling_factor=params.subsampling_factor, + ) + + if params.method == "ctc-decoding": + best_path = one_best_decoding( + lattice=lattice, use_double_scores=params.use_double_scores + ) + # Note: `best_path.aux_labels` contains token IDs, not word IDs + # since we are using H, not HLG here. + # + # token_ids is a lit-of-list of IDs + token_ids = get_texts(best_path) + + # hyps is a list of str, e.g., ['xxx yyy zzz', ...] + hyps = bpe_model.decode(token_ids) + + # hyps is a list of list of str, e.g., [['xxx', 'yyy', 'zzz'], ... ] + hyps = [s.split() for s in hyps] + key = "ctc-decoding" + return {key: hyps} + + if params.method == "ctc-greedy-search": + hyps, _ = ctc_greedy_search( + nnet_output, + memory, + memory_key_padding_mask, + ) + + # hyps is a list of str, e.g., ['xxx yyy zzz', ...] + hyps = bpe_model.decode(hyps) + + # hyps is a list of list of str, e.g., [['xxx', 'yyy', 'zzz'], ... ] + hyps = [s.split() for s in hyps] + key = "ctc-greedy-search" + return {key: hyps} + + if params.method == "nbest-oracle": + # Note: You can also pass rescored lattices to it. + # We choose the HLG decoded lattice for speed reasons + # as HLG decoding is faster and the oracle WER + # is only slightly worse than that of rescored lattices. + best_path = nbest_oracle( + lattice=lattice, + num_paths=params.num_paths, + ref_texts=supervisions["text"], + word_table=word_table, + nbest_scale=params.nbest_scale, + oov="", + ) + hyps = get_texts(best_path) + hyps = [[word_table[i] for i in ids] for ids in hyps] + key = f"oracle_{params.num_paths}_nbest_scale_{params.nbest_scale}" # noqa + return {key: hyps} + + if params.method in ["1best", "nbest"]: + if params.method == "1best": + best_path = one_best_decoding( + lattice=lattice, use_double_scores=params.use_double_scores + ) + key = "no_rescore" + else: + best_path = nbest_decoding( + lattice=lattice, + num_paths=params.num_paths, + use_double_scores=params.use_double_scores, + nbest_scale=params.nbest_scale, + ) + key = f"no_rescore-nbest-scale-{params.nbest_scale}-{params.num_paths}" # noqa + + hyps = get_texts(best_path) + hyps = [[word_table[i] for i in ids] for ids in hyps] + return {key: hyps} + + assert params.method in [ + "nbest-rescoring", + "whole-lattice-rescoring", + "attention-decoder", + "rnn-lm", + ] + + lm_scale_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7] + lm_scale_list += [0.8, 0.9, 1.0, 1.1, 1.2, 1.3] + lm_scale_list += [1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0] + + if params.method == "nbest-rescoring": + best_path_dict = rescore_with_n_best_list( + lattice=lattice, + G=G, + num_paths=params.num_paths, + lm_scale_list=lm_scale_list, + nbest_scale=params.nbest_scale, + ) + elif params.method == "whole-lattice-rescoring": + best_path_dict = rescore_with_whole_lattice( + lattice=lattice, + G_with_epsilon_loops=G, + lm_scale_list=lm_scale_list, + ) + elif params.method == "attention-decoder": + # lattice uses a 3-gram Lm. We rescore it with a 4-gram LM. + rescored_lattice = rescore_with_whole_lattice( + lattice=lattice, + G_with_epsilon_loops=G, + lm_scale_list=None, + ) + # TODO: pass `lattice` instead of `rescored_lattice` to + # `rescore_with_attention_decoder` + + best_path_dict = rescore_with_attention_decoder( + lattice=rescored_lattice, + num_paths=params.num_paths, + model=model, + memory=memory, + memory_key_padding_mask=memory_key_padding_mask, + sos_id=sos_id, + eos_id=eos_id, + nbest_scale=params.nbest_scale, + ) + elif params.method == "rnn-lm": + # lattice uses a 3-gram Lm. We rescore it with a 4-gram LM. + rescored_lattice = rescore_with_whole_lattice( + lattice=lattice, + G_with_epsilon_loops=G, + lm_scale_list=None, + ) + + best_path_dict = rescore_with_rnn_lm( + lattice=rescored_lattice, + num_paths=params.num_paths, + rnn_lm_model=rnn_lm_model, + model=model, + memory=memory, + memory_key_padding_mask=memory_key_padding_mask, + sos_id=sos_id, + eos_id=eos_id, + blank_id=0, + nbest_scale=params.nbest_scale, + ) + else: + assert False, f"Unsupported decoding method: {params.method}" + + ans = dict() + if best_path_dict is not None: + for lm_scale_str, best_path in best_path_dict.items(): + hyps = get_texts(best_path) + hyps = [[word_table[i] for i in ids] for ids in hyps] + ans[lm_scale_str] = hyps + else: + ans = None + return ans + + +def decode_dataset( + dl: torch.utils.data.DataLoader, + params: AttributeDict, + model: nn.Module, + rnn_lm_model: Optional[nn.Module], + HLG: Optional[k2.Fsa], + H: Optional[k2.Fsa], + bpe_model: Optional[spm.SentencePieceProcessor], + word_table: k2.SymbolTable, + sos_id: int, + eos_id: int, + G: Optional[k2.Fsa] = None, +) -> Dict[str, List[Tuple[List[str], List[str]]]]: + """Decode dataset. + + Args: + dl: + PyTorch's dataloader containing the dataset to decode. + params: + It is returned by :func:`get_params`. + model: + The neural model. + rnn_lm_model: + The neural model for RNN LM. + HLG: + The decoding graph. Used only when params.method is NOT ctc-decoding. + H: + The ctc topo. Used only when params.method is ctc-decoding. + bpe_model: + The BPE model. Used only when params.method is ctc-decoding. + word_table: + It is the word symbol table. + sos_id: + The token ID for SOS. + eos_id: + The token ID for EOS. + G: + An LM. It is not None when params.method is "nbest-rescoring" + or "whole-lattice-rescoring". In general, the G in HLG + is a 3-gram LM, while this G is a 4-gram LM. + Returns: + Return a dict, whose key may be "no-rescore" if no LM rescoring + is used, or it may be "lm_scale_0.7" if LM rescoring is used. + Its value is a list of tuples. Each tuple contains two elements: + The first is the reference transcript, and the second is the + predicted result. + """ + num_cuts = 0 + + try: + num_batches = len(dl) + except TypeError: + num_batches = "?" + + results = defaultdict(list) + for batch_idx, batch in enumerate(dl): + texts = batch["supervisions"]["text"] + + hyps_dict = decode_one_batch( + params=params, + model=model, + rnn_lm_model=rnn_lm_model, + HLG=HLG, + H=H, + bpe_model=bpe_model, + batch=batch, + word_table=word_table, + G=G, + sos_id=sos_id, + eos_id=eos_id, + ) + + if hyps_dict is not None: + for lm_scale, hyps in hyps_dict.items(): + this_batch = [] + assert len(hyps) == len(texts) + for hyp_words, ref_text in zip(hyps, texts): + ref_words = ref_text.split() + this_batch.append((ref_words, hyp_words)) + + results[lm_scale].extend(this_batch) + else: + assert ( + len(results) > 0 + ), "It should not decode to empty in the first batch!" + this_batch = [] + hyp_words = [] + for ref_text in texts: + ref_words = ref_text.split() + this_batch.append((ref_words, hyp_words)) + + for lm_scale in results.keys(): + results[lm_scale].extend(this_batch) + + num_cuts += len(texts) + + if batch_idx % 100 == 0: + batch_str = f"{batch_idx}/{num_batches}" + + logging.info( + f"batch {batch_str}, cuts processed until now is {num_cuts}" + ) + return results + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[List[int], List[int]]]], +): + if params.method in ("attention-decoder", "rnn-lm"): + # Set it to False since there are too many logs. + enable_log = False + else: + enable_log = True + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = params.exp_dir / f"recogs-{test_set_name}-{key}.txt" + store_transcripts(filename=recog_path, texts=results) + if enable_log: + logging.info(f"The transcripts are stored in {recog_path}") + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = params.exp_dir / f"errs-{test_set_name}-{key}.txt" + with open(errs_filename, "w") as f: + wer = write_error_stats( + f, f"{test_set_name}-{key}", results, enable_log=enable_log + ) + test_set_wers[key] = wer + + if enable_log: + logging.info( + "Wrote detailed error stats to {}".format(errs_filename) + ) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = params.exp_dir / f"wer-summary-{test_set_name}.txt" + with open(errs_info, "w") as f: + print("settings\tWER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, WER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + LibriSpeechAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + args.lang_dir = Path(args.lang_dir) + args.lm_dir = Path(args.lm_dir) + + params = get_params() + params.update(vars(args)) + + setup_logger(f"{params.exp_dir}/log-{params.method}/log-decode") + logging.info("Decoding started") + logging.info(params) + + lexicon = Lexicon(params.lang_dir) + max_token_id = max(lexicon.tokens) + num_classes = max_token_id + 1 # +1 for the blank + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + graph_compiler = BpeCtcTrainingGraphCompiler( + params.lang_dir, + device=device, + sos_token="", + eos_token="", + ) + sos_id = graph_compiler.sos_id + eos_id = graph_compiler.eos_id + + params.num_classes = num_classes + params.sos_id = sos_id + params.eos_id = eos_id + + if params.method == "ctc-decoding" or params.method == "ctc-greedy-search": + HLG = None + H = k2.ctc_topo( + max_token=max_token_id, + modified=False, + device=device, + ) + bpe_model = spm.SentencePieceProcessor() + bpe_model.load(str(params.lang_dir / "bpe.model")) + else: + H = None + bpe_model = None + HLG = k2.Fsa.from_dict( + torch.load(f"{params.lang_dir}/HLG.pt", map_location=device) + ) + assert HLG.requires_grad is False + + if not hasattr(HLG, "lm_scores"): + HLG.lm_scores = HLG.scores.clone() + + if params.method in ( + "nbest-rescoring", + "whole-lattice-rescoring", + "attention-decoder", + "rnn-lm", + ): + if not (params.lm_dir / "G_4_gram.pt").is_file(): + logging.info("Loading G_4_gram.fst.txt") + logging.warning("It may take 8 minutes.") + with open(params.lm_dir / "G_4_gram.fst.txt") as f: + first_word_disambig_id = lexicon.word_table["#0"] + + G = k2.Fsa.from_openfst(f.read(), acceptor=False) + # G.aux_labels is not needed in later computations, so + # remove it here. + del G.aux_labels + # CAUTION: The following line is crucial. + # Arcs entering the back-off state have label equal to #0. + # We have to change it to 0 here. + G.labels[G.labels >= first_word_disambig_id] = 0 + # See https://github.com/k2-fsa/k2/issues/874 + # for why we need to set G.properties to None + G.__dict__["_properties"] = None + G = k2.Fsa.from_fsas([G]).to(device) + G = k2.arc_sort(G) + # Save a dummy value so that it can be loaded in C++. + # See https://github.com/pytorch/pytorch/issues/67902 + # for why we need to do this. + G.dummy = 1 + + torch.save(G.as_dict(), params.lm_dir / "G_4_gram.pt") + else: + logging.info("Loading pre-compiled G_4_gram.pt") + d = torch.load(params.lm_dir / "G_4_gram.pt", map_location=device) + G = k2.Fsa.from_dict(d) + + if params.method in [ + "whole-lattice-rescoring", + "attention-decoder", + "rnn-lm", + ]: + # Add epsilon self-loops to G as we will compose + # it with the whole lattice later + G = k2.add_epsilon_self_loops(G) + G = k2.arc_sort(G) + G = G.to(device) + + # G.lm_scores is used to replace HLG.lm_scores during + # LM rescoring. + G.lm_scores = G.scores.clone() + else: + G = None + + model = Conformer( + num_features=params.feature_dim, + nhead=params.nhead, + d_model=params.encoder_dim, + num_classes=num_classes, + subsampling_factor=params.subsampling_factor, + num_encoder_layers=params.num_encoder_layers, + num_decoder_layers=params.num_decoder_layers, + ) + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + else: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg + 1] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + + model.to(device) + model.eval() + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + rnn_lm_model = None + if params.method == "rnn-lm": + rnn_lm_model = RnnLmModel( + vocab_size=params.num_classes, + embedding_dim=params.rnn_lm_embedding_dim, + hidden_dim=params.rnn_lm_hidden_dim, + num_layers=params.rnn_lm_num_layers, + tie_weights=params.rnn_lm_tie_weights, + ) + if params.rnn_lm_avg == 1: + load_checkpoint( + f"{params.rnn_lm_exp_dir}/epoch-{params.rnn_lm_epoch}.pt", + rnn_lm_model, + ) + rnn_lm_model.to(device) + else: + rnn_lm_model = load_averaged_model( + params.rnn_lm_exp_dir, + rnn_lm_model, + params.rnn_lm_epoch, + params.rnn_lm_avg, + device, + ) + rnn_lm_model.eval() + + librispeech = LibriSpeechAsrDataModule(args) + + test_clean_cuts = librispeech.test_clean_cuts() + test_other_cuts = librispeech.test_other_cuts() + + test_clean_dl = librispeech.test_dataloaders(test_clean_cuts) + test_other_dl = librispeech.test_dataloaders(test_other_cuts) + + test_sets = ["test-clean", "test-other"] + test_dl = [test_clean_dl, test_other_dl] + + for test_set, test_dl in zip(test_sets, test_dl): + results_dict = decode_dataset( + dl=test_dl, + params=params, + model=model, + rnn_lm_model=rnn_lm_model, + HLG=HLG, + H=H, + bpe_model=bpe_model, + word_table=lexicon.word_table, + G=G, + sos_id=sos_id, + eos_id=eos_id, + ) + + save_results( + params=params, test_set_name=test_set, results_dict=results_dict + ) + + logging.info("Done!") + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/conformer_ctc2/export.py b/egs/librispeech/ASR/conformer_ctc2/export.py new file mode 100755 index 000000000..584b3c3fc --- /dev/null +++ b/egs/librispeech/ASR/conformer_ctc2/export.py @@ -0,0 +1,281 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang, +# Quandong Wang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script converts several saved checkpoints +# to a single one using model averaging. +""" +Usage: +./conformer_ctc2/export.py \ + --exp-dir ./conformer_ctc2/exp \ + --epoch 20 \ + --avg 10 + +It will generate a file exp_dir/pretrained.pt + +To use the generated file with `conformer_ctc2/decode.py`, +you can do: + + cd /path/to/exp_dir + ln -s pretrained.pt epoch-9999.pt + + cd /path/to/egs/librispeech/ASR + ./conformer_ctc2/decode.py \ + --exp-dir ./conformer_ctc2/exp \ + --epoch 9999 \ + --avg 1 \ + --max-duration 100 +""" + +import argparse +import logging +from pathlib import Path + +import torch +from decode import get_params + +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from conformer import Conformer + +from icefall.utils import str2bool +from icefall.lexicon import Lexicon + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=28, + help="""It specifies the checkpoint to use for averaging. + Note: Epoch counts from 0. + You can specify --avg to use more checkpoints for model averaging.""", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=15, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch' and '--iter'", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=True, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--num-decoder-layers", + type=int, + default=6, + help="""Number of decoder layer of transformer decoder. + Setting this to 0 will not create the decoder at all (pure CTC model) + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="conformer_ctc2/exp", + help="""It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--lang-dir", + type=str, + default="data/lang_bpe_500", + help="The lang dir", + ) + + parser.add_argument( + "--jit", + type=str2bool, + default=True, + help="""True to save a model after applying torch.jit.script. + """, + ) + + return parser + + +def main(): + args = get_parser().parse_args() + args.exp_dir = Path(args.exp_dir) + args.lang_dir = Path(args.lang_dir) + + params = get_params() + params.update(vars(args)) + + lexicon = Lexicon(params.lang_dir) + max_token_id = max(lexicon.tokens) + num_classes = max_token_id + 1 # +1 for the blank + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + logging.info(params) + + logging.info("About to create model") + + model = Conformer( + num_features=params.feature_dim, + nhead=params.nhead, + d_model=params.encoder_dim, + num_classes=num_classes, + subsampling_factor=params.subsampling_factor, + num_encoder_layers=params.num_encoder_layers, + num_decoder_layers=params.num_decoder_layers, + ) + + model.to(device) + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + else: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg + 1] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + + model.eval() + + model.to("cpu") + model.eval() + + if params.jit: + logging.info("Using torch.jit.script") + model = torch.jit.script(model) + filename = params.exp_dir / "cpu_jit.pt" + model.save(str(filename)) + logging.info(f"Saved to {filename}") + else: + logging.info("Not using torch.jit.script") + # Save it using a format so that it can be loaded + # by :func:`load_checkpoint` + filename = params.exp_dir / "pretrained.pt" + torch.save({"model": model.state_dict()}, str(filename)) + logging.info(f"Saved to {filename}") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/librispeech/ASR/conformer_ctc2/label_smoothing.py b/egs/librispeech/ASR/conformer_ctc2/label_smoothing.py new file mode 120000 index 000000000..08734abd7 --- /dev/null +++ b/egs/librispeech/ASR/conformer_ctc2/label_smoothing.py @@ -0,0 +1 @@ +../conformer_ctc/label_smoothing.py \ No newline at end of file diff --git a/egs/librispeech/ASR/conformer_ctc2/optim.py b/egs/librispeech/ASR/conformer_ctc2/optim.py new file mode 120000 index 000000000..e2deb4492 --- /dev/null +++ b/egs/librispeech/ASR/conformer_ctc2/optim.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/optim.py \ No newline at end of file diff --git a/egs/librispeech/ASR/conformer_ctc2/scaling.py b/egs/librispeech/ASR/conformer_ctc2/scaling.py new file mode 120000 index 000000000..09d802cc4 --- /dev/null +++ b/egs/librispeech/ASR/conformer_ctc2/scaling.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/scaling.py \ No newline at end of file diff --git a/egs/librispeech/ASR/conformer_ctc2/subsampling.py b/egs/librispeech/ASR/conformer_ctc2/subsampling.py new file mode 100644 index 000000000..3fcb4196f --- /dev/null +++ b/egs/librispeech/ASR/conformer_ctc2/subsampling.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021 University of Chinese Academy of Sciences (author: Han Zhu) +# 2022 Xiaomi Corporation (author: Quandong Wang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +from scaling import ( + ActivationBalancer, + BasicNorm, + DoubleSwish, + ScaledConv2d, + ScaledLinear, +) +from torch import nn + + +class Conv2dSubsampling(nn.Module): + """Convolutional 2D subsampling (to 1/4 length). + + Convert an input of shape (N, T, idim) to an output + with shape (N, T', odim), where + T' = ((T-1)//2 - 1)//2, which approximates T' == T//4 + + It is based on + https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/subsampling.py # noqa + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + layer1_channels: int = 8, + layer2_channels: int = 32, + layer3_channels: int = 128, + ) -> None: + """ + Args: + in_channels: + Number of channels in. The input shape is (N, T, in_channels). + Caution: It requires: T >=7, in_channels >=7 + out_channels + Output dim. The output shape is (N, ((T-1)//2 - 1)//2, out_channels) + layer1_channels: + Number of channels in layer1 + layer1_channels: + Number of channels in layer2 + """ + assert in_channels >= 7 + super().__init__() + + self.conv = nn.Sequential( + ScaledConv2d( + in_channels=1, + out_channels=layer1_channels, + kernel_size=3, + padding=1, + ), + ActivationBalancer(channel_dim=1), + DoubleSwish(), + ScaledConv2d( + in_channels=layer1_channels, + out_channels=layer2_channels, + kernel_size=3, + stride=2, + ), + ActivationBalancer(channel_dim=1), + DoubleSwish(), + ScaledConv2d( + in_channels=layer2_channels, + out_channels=layer3_channels, + kernel_size=3, + stride=2, + ), + ActivationBalancer(channel_dim=1), + DoubleSwish(), + ) + self.out = ScaledLinear( + layer3_channels * (((in_channels - 1) // 2 - 1) // 2), out_channels + ) + # set learn_eps=False because out_norm is preceded by `out`, and `out` + # itself has learned scale, so the extra degree of freedom is not + # needed. + self.out_norm = BasicNorm(out_channels, learn_eps=False) + # constrain median of output to be close to zero. + self.out_balancer = ActivationBalancer( + channel_dim=-1, min_positive=0.45, max_positive=0.55 + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Subsample x. + + Args: + x: + Its shape is (N, T, idim). + + Returns: + Return a tensor of shape (N, ((T-1)//2 - 1)//2, odim) + """ + # On entry, x is (N, T, idim) + x = x.unsqueeze(1) # (N, T, idim) -> (N, 1, T, idim) i.e., (N, C, H, W) + x = self.conv(x) + # Now x is of shape (N, odim, ((T-1)//2 - 1)//2, ((idim-1)//2 - 1)//2) + b, c, t, f = x.size() + x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f)) + # Now x is of shape (N, ((T-1)//2 - 1))//2, odim) + x = self.out_norm(x) + x = self.out_balancer(x) + return x diff --git a/egs/librispeech/ASR/conformer_ctc2/train.py b/egs/librispeech/ASR/conformer_ctc2/train.py new file mode 100755 index 000000000..d7baa229f --- /dev/null +++ b/egs/librispeech/ASR/conformer_ctc2/train.py @@ -0,0 +1,1119 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, +# Wei Kang, +# Mingshuang Luo, +# Zengwei Yao, +# Quandong Wang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +./conformer_ctc2/train.py \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 1 \ + --exp-dir conformer_ctc2/exp \ + --full-libri 1 \ + --max-duration 300 + +# For mix precision training: + +./conformer_ctc2/train.py \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 1 \ + --use-fp16 1 \ + --exp-dir conformer_ctc2/exp \ + --full-libri 1 \ + --max-duration 550 + +""" + + +import argparse +import copy +import logging +import warnings +from pathlib import Path +from shutil import copyfile +from typing import Any, Dict, Optional, Tuple, Union + +import k2 +import optim +import torch +import torch.multiprocessing as mp +import torch.nn as nn +from asr_datamodule import LibriSpeechAsrDataModule +from conformer import Conformer +from lhotse.cut import Cut +from lhotse.dataset.sampling.base import CutSampler +from lhotse.utils import fix_random_seed +from optim import Eden, Eve +from torch import Tensor +from torch.cuda.amp import GradScaler +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.tensorboard import SummaryWriter + +from icefall.bpe_graph_compiler import BpeCtcTrainingGraphCompiler +from icefall import diagnostics +from icefall.checkpoint import load_checkpoint, remove_checkpoints +from icefall.checkpoint import save_checkpoint as save_checkpoint_impl +from icefall.checkpoint import ( + save_checkpoint_with_global_batch_idx, + update_averaged_model, +) +from icefall.dist import cleanup_dist, setup_dist +from icefall.env import get_env_info +from icefall.graph_compiler import CtcTrainingGraphCompiler +from icefall.lexicon import Lexicon +from icefall.utils import ( + AttributeDict, + MetricsTracker, + encode_supervisions, + setup_logger, + str2bool, +) + +LRSchedulerType = Union[ + torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler +] + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--world-size", + type=int, + default=1, + help="Number of GPUs for DDP training.", + ) + + parser.add_argument( + "--master-port", + type=int, + default=12354, + help="Master port to use for DDP training.", + ) + + parser.add_argument( + "--tensorboard", + type=str2bool, + default=True, + help="Should various information be logged in tensorboard.", + ) + + parser.add_argument( + "--num-epochs", + type=int, + default=30, + help="Number of epochs to train.", + ) + + parser.add_argument( + "--start-epoch", + type=int, + default=1, + help="""Resume training from this epoch. It should be positive. + If larger than 1, it will load checkpoint from + exp-dir/epoch-{start_epoch-1}.pt + """, + ) + + parser.add_argument( + "--start-batch", + type=int, + default=0, + help="""If positive, --start-epoch is ignored and + it loads the checkpoint from exp-dir/checkpoint-{start_batch}.pt + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="conformer_ctc2/exp", + help="""The experiment dir. + It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--lang-dir", + type=str, + default="data/lang_bpe_500", + help="""The lang dir + It contains language related input files such as + "lexicon.txt" + """, + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--initial-lr", + type=float, + default=0.003, + help="""The initial learning rate. This value should not need to be + changed.""", + ) + + parser.add_argument( + "--lr-batches", + type=float, + default=5000, + help="""Number of steps that affects how rapidly the learning rate decreases. + We suggest not to change this.""", + ) + + parser.add_argument( + "--lr-epochs", + type=float, + default=6, + help="""Number of epochs that affects how rapidly the learning rate decreases. + """, + ) + + parser.add_argument( + "--att-rate", + type=float, + default=0.8, + help="""The attention rate. + The total loss is (1 - att_rate) * ctc_loss + att_rate * att_loss + """, + ) + + parser.add_argument( + "--num-decoder-layers", + type=int, + default=6, + help="""Number of decoder layer of transformer decoder. + Setting this to 0 will not create the decoder at all (pure CTC model) + """, + ) + + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + + parser.add_argument( + "--print-diagnostics", + type=str2bool, + default=False, + help="Accumulate stats on activations, print them and exit.", + ) + + parser.add_argument( + "--save-every-n", + type=int, + default=8000, + help="""Save checkpoint after processing this number of batches" + periodically. We save checkpoint to exp-dir/ whenever + params.batch_idx_train % save_every_n == 0. The checkpoint filename + has the form: f'exp-dir/checkpoint-{params.batch_idx_train}.pt' + Note: It also saves checkpoint to `exp-dir/epoch-xxx.pt` at the + end of each epoch where `xxx` is the epoch number counting from 0. + """, + ) + + parser.add_argument( + "--keep-last-k", + type=int, + default=20, + help="""Only keep this number of checkpoints on disk. + For instance, if it is 3, there are only 3 checkpoints + in the exp-dir with filenames `checkpoint-xxx.pt`. + It does not affect checkpoints with name `epoch-xxx.pt`. + """, + ) + + parser.add_argument( + "--average-period", + type=int, + default=100, + help="""Update the averaged model, namely `model_avg`, after processing + this number of batches. `model_avg` is a separate version of model, + in which each floating-point parameter is the average of all the + parameters from the start of training. Each time we take the average, + we do: `model_avg = model * (average_period / batch_idx_train) + + model_avg * ((batch_idx_train - average_period) / batch_idx_train)`. + """, + ) + + parser.add_argument( + "--use-fp16", + type=str2bool, + default=False, + help="Whether to use half precision training.", + ) + + return parser + + +def get_params() -> AttributeDict: + """Return a dict containing training parameters. + + All training related parameters that are not passed from the commandline + are saved in the variable `params`. + + Commandline options are merged into `params` after they are parsed, so + you can also access them via `params`. + + Explanation of options saved in `params`: + + - best_train_loss: Best training loss so far. It is used to select + the model that has the lowest training loss. It is + updated during the training. + + - best_valid_loss: Best validation loss so far. It is used to select + the model that has the lowest validation loss. It is + updated during the training. + + - best_train_epoch: It is the epoch that has the best training loss. + + - best_valid_epoch: It is the epoch that has the best validation loss. + + - batch_idx_train: Used to writing statistics to tensorboard. It + contains number of batches trained so far across + epochs. + + - log_interval: Print training loss if batch_idx % log_interval` is 0 + + - reset_interval: Reset statistics if batch_idx % reset_interval is 0 + + - valid_interval: Run validation if batch_idx % valid_interval is 0 + + - feature_dim: The model input dim. It has to match the one used + in computing features. + + - subsampling_factor: The subsampling factor for the model. + + - encoder_dim: Hidden dim for multi-head attention model. + + - num_decoder_layers: Number of decoder layer of transformer decoder. + + - beam_size: It is used in k2.ctc_loss + + - reduction: It is used in k2.ctc_loss + + - use_double_scores: It is used in k2.ctc_loss + + - warm_step: The warm_step for Noam optimizer. + """ + params = AttributeDict( + { + "best_train_loss": float("inf"), + "best_valid_loss": float("inf"), + "best_train_epoch": -1, + "best_valid_epoch": -1, + "batch_idx_train": 0, + "log_interval": 1, + "reset_interval": 200, + "valid_interval": 3000, # For the 100h subset, use 800 + # parameters for conformer + "feature_dim": 80, + "subsampling_factor": 4, + "encoder_dim": 512, + "nhead": 8, + "dim_feedforward": 2048, + "num_encoder_layers": 12, + # parameters for ctc loss + "beam_size": 10, + "reduction": "sum", + "use_double_scores": True, + # parameters for Noam + "model_warm_step": 3000, # arg given to model, not for lrate + "env_info": get_env_info(), + } + ) + + return params + + +def load_checkpoint_if_available( + params: AttributeDict, + model: nn.Module, + model_avg: nn.Module = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, +) -> Optional[Dict[str, Any]]: + """Load checkpoint from file. + + If params.start_batch is positive, it will load the checkpoint from + `params.exp_dir/checkpoint-{params.start_batch}.pt`. Otherwise, if + params.start_epoch is larger than 1, it will load the checkpoint from + `params.start_epoch - 1`. + + Apart from loading state dict for `model` and `optimizer` it also updates + `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, + and `best_valid_loss` in `params`. + + Args: + params: + The return value of :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer that we are using. + scheduler: + The scheduler that we are using. + Returns: + Return a dict containing previously saved training info. + """ + if params.start_batch > 0: + filename = params.exp_dir / f"checkpoint-{params.start_batch}.pt" + elif params.start_epoch > 1: + filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" + else: + return None + + assert filename.is_file(), f"{filename} does not exist!" + + saved_params = load_checkpoint( + filename, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + ) + + keys = [ + "best_train_epoch", + "best_valid_epoch", + "batch_idx_train", + "best_train_loss", + "best_valid_loss", + ] + for k in keys: + params[k] = saved_params[k] + + if params.start_batch > 0: + if "cur_epoch" in saved_params: + params["start_epoch"] = saved_params["cur_epoch"] + + if "cur_batch_idx" in saved_params: + params["cur_batch_idx"] = saved_params["cur_batch_idx"] + + return saved_params + + +def save_checkpoint( + params: AttributeDict, + model: Union[nn.Module, DDP], + model_avg: Optional[nn.Module] = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, + sampler: Optional[CutSampler] = None, + scaler: Optional[GradScaler] = None, + rank: int = 0, +) -> None: + """Save model, optimizer, scheduler and training stats to file. + + Args: + params: + It is returned by :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer used in the training. + sampler: + The sampler for the training dataset. + scaler: + The scaler used for mix precision training. + """ + if rank != 0: + return + filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" + save_checkpoint_impl( + filename=filename, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=sampler, + scaler=scaler, + rank=rank, + ) + + if params.best_train_epoch == params.cur_epoch: + best_train_filename = params.exp_dir / "best-train-loss.pt" + copyfile(src=filename, dst=best_train_filename) + + if params.best_valid_epoch == params.cur_epoch: + best_valid_filename = params.exp_dir / "best-valid-loss.pt" + copyfile(src=filename, dst=best_valid_filename) + + +def compute_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + batch: dict, + graph_compiler: BpeCtcTrainingGraphCompiler, + is_training: bool, + warmup: float = 1.0, +) -> Tuple[Tensor, MetricsTracker]: + """ + Compute CTC loss given the model and its inputs. + + Args: + params: + Parameters for training. See :func:`get_params`. + model: + The model for training. It is an instance of Conformer in our case. + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + graph_compiler: + It is used to build a decoding graph from a ctc topo and training + transcript. The training transcript is contained in the given `batch`, + while the ctc topo is built when this compiler is instantiated. + is_training: + True for training. False for validation. When it is True, this + function enables autograd during computation; when it is False, it + disables autograd. + warmup: a floating point value which increases throughout training; + values >= 1.0 are fully warmed up and have all modules present. + """ + device = ( + model.device + if isinstance(model, DDP) + else next(model.parameters()).device + ) + feature = batch["inputs"] + # at entry, feature is (N, T, C) + assert feature.ndim == 3 + feature = feature.to(device) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + with torch.set_grad_enabled(is_training): + nnet_output, encoder_memory, memory_mask = model( + feature, supervisions, warmup=warmup + ) + # logging.info('feature shape: {}'.format(feature.shape)) + # logging.info('nnet_output shape: {}'.format(nnet_output.shape)) + # logging.info('encoder_memory shape: {}'.format(encoder_memory.shape)) + # logging.info('memory_mask shape: {}'.format(memory_mask.shape)) + # after the main warmup step, we keep pruned_loss_scale small + # for the same amount of time (model_warm_step), to avoid + # overwhelming the simple_loss and causing it to diverge, + # in case it had not fully learned the alignment yet. + + # NOTE: We need `encode_supervisions` to sort sequences with + # different duration in decreasing order, required by + # `k2.intersect_dense` called in `k2.ctc_loss` + supervision_segments, texts = encode_supervisions( + supervisions, subsampling_factor=params.subsampling_factor + ) + + if isinstance(graph_compiler, BpeCtcTrainingGraphCompiler): + # Works with a BPE model + token_ids = graph_compiler.texts_to_ids(texts) + decoding_graph = graph_compiler.compile(token_ids) + elif isinstance(graph_compiler, CtcTrainingGraphCompiler): + # Works with a phone lexicon + decoding_graph = graph_compiler.compile(texts) + else: + raise ValueError( + f"Unsupported type of graph compiler: {type(graph_compiler)}" + ) + + dense_fsa_vec = k2.DenseFsaVec( + nnet_output, + supervision_segments, + allow_truncate=params.subsampling_factor - 1, + ) + + ctc_loss = k2.ctc_loss( + decoding_graph=decoding_graph, + dense_fsa_vec=dense_fsa_vec, + output_beam=params.beam_size, + reduction=params.reduction, + use_double_scores=params.use_double_scores, + ) + + if params.att_rate != 0.0: + with torch.set_grad_enabled(is_training): + mmodel = model.module if hasattr(model, "module") else model + # Note: We need to generate an unsorted version of token_ids + # `encode_supervisions()` called above sorts text, but + # encoder_memory and memory_mask are not sorted, so we + # use an unsorted version `supervisions["text"]` to regenerate + # the token_ids + # + # See https://github.com/k2-fsa/icefall/issues/97 + # for more details + unsorted_token_ids = graph_compiler.texts_to_ids( + supervisions["text"] + ) + att_loss = mmodel.decoder_forward( + encoder_memory, + memory_mask, + token_ids=unsorted_token_ids, + sos_id=graph_compiler.sos_id, + eos_id=graph_compiler.eos_id, + ) + loss = (1.0 - params.att_rate) * ctc_loss + params.att_rate * att_loss + else: + loss = ctc_loss + att_loss = torch.tensor([0]) + + assert loss.requires_grad == is_training + + info = MetricsTracker() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + info["frames"] = ( + (feature_lens // params.subsampling_factor).sum().item() + ) + info["ctc_loss"] = ctc_loss.detach().cpu().item() + if params.att_rate != 0.0: + info["att_loss"] = att_loss.detach().cpu().item() + + # Note: We use reduction=sum while computing the loss. + info["loss"] = loss.detach().cpu().item() + + return loss, info + + +def compute_validation_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + graph_compiler: BpeCtcTrainingGraphCompiler, + valid_dl: torch.utils.data.DataLoader, + world_size: int = 1, +) -> MetricsTracker: + """Run the validation process.""" + model.eval() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(valid_dl): + loss, loss_info = compute_loss( + params=params, + model=model, + batch=batch, + graph_compiler=graph_compiler, + is_training=False, + ) + assert loss.requires_grad is False + tot_loss = tot_loss + loss_info + + if world_size > 1: + tot_loss.reduce(loss.device) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + if loss_value < params.best_valid_loss: + params.best_valid_epoch = params.cur_epoch + params.best_valid_loss = loss_value + + return tot_loss + + +def train_one_epoch( + params: AttributeDict, + model: Union[nn.Module, DDP], + optimizer: torch.optim.Optimizer, + graph_compiler: BpeCtcTrainingGraphCompiler, + scheduler: LRSchedulerType, + train_dl: torch.utils.data.DataLoader, + valid_dl: torch.utils.data.DataLoader, + scaler: GradScaler, + model_avg: Optional[nn.Module] = None, + tb_writer: Optional[SummaryWriter] = None, + world_size: int = 1, + rank: int = 0, +) -> None: + """Train the model for one epoch. + + The training loss from the mean of all frames is saved in + `params.train_loss`. It runs the validation process every + `params.valid_interval` batches. + + Args: + params: + It is returned by :func:`get_params`. + model: + The model for training. + optimizer: + The optimizer we are using. + graph_compiler: + It is used to convert transcripts to FSAs. + scheduler: + The learning rate scheduler, we call step() every step. + train_dl: + Dataloader for the training dataset. + valid_dl: + Dataloader for the validation dataset. + scaler: + The scaler used for mix precision training. + model_avg: + The stored model averaged from the start of training. + tb_writer: + Writer to write log messages to tensorboard. + world_size: + Number of nodes in DDP training. If it is 1, DDP is disabled. + rank: + The rank of the node in DDP training. If no DDP is used, it should + be set to 0. + """ + model.train() + + tot_loss = MetricsTracker() + + cur_batch_idx = params.get("cur_batch_idx", 0) + + for batch_idx, batch in enumerate(train_dl): + if batch_idx < cur_batch_idx: + continue + cur_batch_idx = batch_idx + + params.batch_idx_train += 1 + batch_size = len(batch["supervisions"]["text"]) + batch_name = batch["supervisions"]["uttid"] + + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, loss_info = compute_loss( + params=params, + model=model, + batch=batch, + graph_compiler=graph_compiler, + is_training=True, + warmup=(params.batch_idx_train / params.model_warm_step), + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + # scaler.scale(loss).backward() + + try: + # loss.backward() + scaler.scale(loss).backward() + except RuntimeError as e: + if "CUDA out of memory" in str(e): + logging.error( + f"failing batch size:{batch_size} " + f"failing batch names {batch_name}" + ) + raise + + scheduler.step_batch(params.batch_idx_train) + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad() + + if params.print_diagnostics and batch_idx == 30: + return + + if ( + rank == 0 + and params.batch_idx_train > 0 + and params.batch_idx_train % params.average_period == 0 + ): + update_averaged_model( + params=params, + model_cur=model, + model_avg=model_avg, + ) + + if ( + params.batch_idx_train > 0 + and params.batch_idx_train % params.save_every_n == 0 + ): + params.cur_batch_idx = batch_idx + save_checkpoint_with_global_batch_idx( + out_dir=params.exp_dir, + global_batch_idx=params.batch_idx_train, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + del params.cur_batch_idx + remove_checkpoints( + out_dir=params.exp_dir, + topk=params.keep_last_k, + rank=rank, + ) + + if batch_idx % params.log_interval == 0: + cur_lr = scheduler.get_last_lr()[0] + logging.info( + f"Epoch {params.cur_epoch}, " + f"batch {batch_idx}, loss[{loss_info}], " + f"tot_loss[{tot_loss}], batch size: {batch_size}, " + f"lr: {cur_lr:.2e}" + ) + if loss_info["ctc_loss"] == float("inf") or loss_info[ + "att_loss" + ] == float("inf"): + logging.error( + "Your loss contains inf, something goes wrong" + f"failing batch names {batch_name}" + ) + if tb_writer is not None: + tb_writer.add_scalar( + "train/learning_rate", cur_lr, params.batch_idx_train + ) + + loss_info.write_summary( + tb_writer, "train/current_", params.batch_idx_train + ) + tot_loss.write_summary( + tb_writer, "train/tot_", params.batch_idx_train + ) + + if batch_idx > 0 and batch_idx % params.valid_interval == 0: + logging.info("Computing validation loss") + valid_info = compute_validation_loss( + params=params, + model=model, + graph_compiler=graph_compiler, + valid_dl=valid_dl, + world_size=world_size, + ) + model.train() + logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") + if tb_writer is not None: + valid_info.write_summary( + tb_writer, "train/valid_", params.batch_idx_train + ) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + params.train_loss = loss_value + if params.train_loss < params.best_train_loss: + params.best_train_epoch = params.cur_epoch + params.best_train_loss = params.train_loss + + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + if params.full_libri is False: + params.valid_interval = 1600 + + fix_random_seed(params.seed) + if world_size > 1: + setup_dist(rank, world_size, params.master_port) + + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info("Training started") + logging.info(params) + + if args.tensorboard and rank == 0: + tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") + else: + tb_writer = None + + lexicon = Lexicon(params.lang_dir) + max_token_id = max(lexicon.tokens) + num_classes = max_token_id + 1 # +1 for the blank + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", rank) + + if "lang_bpe" in str(params.lang_dir): + graph_compiler = BpeCtcTrainingGraphCompiler( + params.lang_dir, + device=device, + sos_token="", + eos_token="", + ) + elif "lang_phone" in str(params.lang_dir): + assert params.att_rate == 0, ( + "Attention decoder training does not support phone lang dirs " + "at this time due to a missing symbol. Set --att-rate=0 " + "for pure CTC training when using a phone-based lang dir." + ) + assert params.num_decoder_layers == 0, ( + "Attention decoder training does not support phone lang dirs " + "at this time due to a missing symbol. " + "Set --num-decoder-layers=0 for pure CTC training when using " + "a phone-based lang dir." + ) + graph_compiler = CtcTrainingGraphCompiler( + lexicon, + device=device, + ) + # Manually add the sos/eos ID with their default values + # from the BPE recipe which we're adapting here. + graph_compiler.sos_id = 1 + graph_compiler.eos_id = 1 + else: + raise ValueError( + f"Unsupported type of lang dir (we expected it to have " + f"'lang_bpe' or 'lang_phone' in its name): {params.lang_dir}" + ) + + logging.info("About to create model") + model = Conformer( + num_features=params.feature_dim, + nhead=params.nhead, + d_model=params.encoder_dim, + num_classes=num_classes, + subsampling_factor=params.subsampling_factor, + num_encoder_layers=params.num_encoder_layers, + num_decoder_layers=params.num_decoder_layers, + ) + + print(model) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + assert params.save_every_n >= params.average_period + model_avg: Optional[nn.Module] = None + if rank == 0: + # model_avg is only used with rank 0 + model_avg = copy.deepcopy(model) + + assert params.start_epoch > 0, params.start_epoch + checkpoints = load_checkpoint_if_available( + params=params, model=model, model_avg=model_avg + ) + + model.to(device) + if world_size > 1: + logging.info("Using DDP") + model = DDP(model, device_ids=[rank]) + + optimizer = Eve(model.parameters(), lr=params.initial_lr) + + scheduler = Eden(optimizer, params.lr_batches, params.lr_epochs) + + if checkpoints and "optimizer" in checkpoints: + logging.info("Loading optimizer state dict") + optimizer.load_state_dict(checkpoints["optimizer"]) + + if ( + checkpoints + and "scheduler" in checkpoints + and checkpoints["scheduler"] is not None + ): + logging.info("Loading scheduler state dict") + scheduler.load_state_dict(checkpoints["scheduler"]) + + if params.print_diagnostics: + diagnostic = diagnostics.attach_diagnostics(model) + + librispeech = LibriSpeechAsrDataModule(args) + + train_cuts = librispeech.train_clean_100_cuts() + if params.full_libri: + train_cuts += librispeech.train_clean_360_cuts() + train_cuts += librispeech.train_other_500_cuts() + + def remove_short_and_long_utt(c: Cut): + # Keep only utterances with duration between 1 second and 20 seconds + # + # Caution: There is a reason to select 20.0 here. Please see + # ../local/display_manifest_statistics.py + # + # You should use ../local/display_manifest_statistics.py to get + # an utterance duration distribution for your dataset to select + # the threshold + return 1.0 <= c.duration <= 20.0 + + def remove_invalid_utt_ctc(c: Cut): + # Caution: We assume the subsampling factor is 4! + # num_tokens = len(sp.encode(c.supervisions[0].text, out_type=int)) + num_tokens = len(graph_compiler.texts_to_ids(c.supervisions[0].text)) + min_output_input_ratio = 0.0005 + max_output_input_ratio = 0.1 + return ( + min_output_input_ratio + < num_tokens / float(c.features.num_frames) + < max_output_input_ratio + ) + + train_cuts = train_cuts.filter(remove_short_and_long_utt) + train_cuts = train_cuts.filter(remove_invalid_utt_ctc) + + if params.start_batch > 0 and checkpoints and "sampler" in checkpoints: + # We only load the sampler's state dict when it loads a checkpoint + # saved in the middle of an epoch + sampler_state_dict = checkpoints["sampler"] + else: + sampler_state_dict = None + + train_dl = librispeech.train_dataloaders( + train_cuts, sampler_state_dict=sampler_state_dict + ) + + valid_cuts = librispeech.dev_clean_cuts() + valid_cuts += librispeech.dev_other_cuts() + valid_dl = librispeech.valid_dataloaders(valid_cuts) + + if params.print_diagnostics: + scan_pessimistic_batches_for_oom( + model=model, + train_dl=train_dl, + optimizer=optimizer, + graph_compiler=graph_compiler, + params=params, + ) + + scaler = GradScaler(enabled=params.use_fp16) + if checkpoints and "grad_scaler" in checkpoints: + logging.info("Loading grad scaler state dict") + scaler.load_state_dict(checkpoints["grad_scaler"]) + + for epoch in range(params.start_epoch, params.num_epochs + 1): + scheduler.step_epoch(epoch - 1) + fix_random_seed(params.seed + epoch - 1) + train_dl.sampler.set_epoch(epoch - 1) + + if tb_writer is not None: + tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) + + params.cur_epoch = epoch + + train_one_epoch( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + graph_compiler=graph_compiler, + scheduler=scheduler, + train_dl=train_dl, + valid_dl=valid_dl, + scaler=scaler, + tb_writer=tb_writer, + world_size=world_size, + rank=rank, + ) + + if params.print_diagnostics: + diagnostic.print_diagnostics() + break + + save_checkpoint( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + + logging.info("Done!") + + if world_size > 1: + torch.distributed.barrier() + cleanup_dist() + + +def scan_pessimistic_batches_for_oom( + model: Union[nn.Module, DDP], + train_dl: torch.utils.data.DataLoader, + optimizer: torch.optim.Optimizer, + graph_compiler: BpeCtcTrainingGraphCompiler, + params: AttributeDict, +): + from lhotse.dataset import find_pessimistic_batches + + logging.info( + "Sanity check -- see if any of the batches in epoch 1 would cause OOM." + ) + batches, crit_values = find_pessimistic_batches(train_dl.sampler) + for criterion, cuts in batches.items(): + batch = train_dl.dataset[cuts] + try: + # warmup = 0.0 is so that the derivs for the pruned loss stay zero + # (i.e. are not remembered by the decaying-average in adam), because + # we want to avoid these params being subject to shrinkage in adam. + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, _ = compute_loss( + params=params, + model=model, + batch=batch, + graph_compiler=graph_compiler, + is_training=True, + warmup=0.0, + ) + loss.backward() + optimizer.step() + optimizer.zero_grad() + except RuntimeError as e: + if "CUDA out of memory" in str(e): + logging.error( + "Your GPU ran out of memory with the current " + "max_duration setting. We recommend decreasing " + "max_duration and trying again.\n" + f"Failing criterion: {criterion} " + f"(={crit_values[criterion]}) ..." + ) + raise + + +def main(): + parser = get_parser() + LibriSpeechAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + world_size = args.world_size + assert world_size >= 1 + if world_size > 1: + mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) + else: + run(rank=0, world_size=1, args=args) + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/conformer_ctc2/transformer.py b/egs/librispeech/ASR/conformer_ctc2/transformer.py new file mode 100644 index 000000000..fa179acc0 --- /dev/null +++ b/egs/librispeech/ASR/conformer_ctc2/transformer.py @@ -0,0 +1,1092 @@ +# Copyright 2021 University of Chinese Academy of Sciences (author: Han Zhu) +# Copyright 2022 Xiaomi Corp. (author: Quandong Wang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import math +from typing import Dict, List, Optional, Tuple + +import torch +import torch.nn as nn +from label_smoothing import LabelSmoothingLoss +from subsampling import Conv2dSubsampling +from attention import MultiheadAttention +from torch.nn.utils.rnn import pad_sequence + +from scaling import ( + ActivationBalancer, + BasicNorm, + DoubleSwish, + ScaledLinear, + ScaledEmbedding, +) + + +# Note: TorchScript requires Dict/List/etc. to be fully typed. +Supervisions = Dict[str, torch.Tensor] + + +class Transformer(nn.Module): + def __init__( + self, + num_features: int, + num_classes: int, + subsampling_factor: int = 4, + d_model: int = 256, + nhead: int = 4, + dim_feedforward: int = 2048, + num_encoder_layers: int = 12, + num_decoder_layers: int = 6, + dropout: float = 0.1, + layer_dropout: float = 0.075, + ) -> None: + """ + Args: + num_features: + The input dimension of the model. + num_classes: + The output dimension of the model. + subsampling_factor: + Number of output frames is num_in_frames // subsampling_factor. + Currently, subsampling_factor MUST be 4. + d_model: + Attention dimension. + nhead: + Number of heads in multi-head attention. + Must satisfy d_model // nhead == 0. + dim_feedforward: + The output dimension of the feedforward layers in encoder/decoder. + num_encoder_layers: + Number of encoder layers. + num_decoder_layers: + Number of decoder layers. + dropout: + Dropout in encoder/decoder. + layer_dropout (float): layer-dropout rate. + """ + super().__init__() + + self.num_features = num_features + self.num_classes = num_classes + self.subsampling_factor = subsampling_factor + if subsampling_factor != 4: + raise NotImplementedError("Support only 'subsampling_factor=4'.") + + # self.encoder_embed converts the input of shape (N, T, num_classes) + # to the shape (N, T//subsampling_factor, d_model). + # That is, it does two things simultaneously: + # (1) subsampling: T -> T//subsampling_factor + # (2) embedding: num_classes -> d_model + self.encoder_embed = Conv2dSubsampling(num_features, d_model) + + self.encoder_pos = PositionalEncoding(d_model, dropout) + + encoder_layer = TransformerEncoderLayer( + d_model=d_model, + nhead=nhead, + dim_feedforward=dim_feedforward, + dropout=dropout, + layer_dropout=layer_dropout, + ) + + self.encoder = TransformerEncoder( + encoder_layer=encoder_layer, + num_layers=num_encoder_layers, + ) + + # TODO(fangjun): remove dropout + self.encoder_output_layer = nn.Sequential( + nn.Dropout(p=dropout), ScaledLinear(d_model, num_classes, bias=True) + ) + + if num_decoder_layers > 0: + self.decoder_num_class = ( + self.num_classes + ) # bpe model already has sos/eos symbol + + self.decoder_embed = ScaledEmbedding( + num_embeddings=self.decoder_num_class, embedding_dim=d_model + ) + self.decoder_pos = PositionalEncoding(d_model, dropout) + + decoder_layer = TransformerDecoderLayer( + d_model=d_model, + nhead=nhead, + dim_feedforward=dim_feedforward, + dropout=dropout, + ) + + self.decoder = TransformerDecoder( + decoder_layer=decoder_layer, + num_layers=num_decoder_layers, + ) + + self.decoder_output_layer = ScaledLinear( + d_model, self.decoder_num_class, bias=True + ) + + self.decoder_criterion = LabelSmoothingLoss() + else: + self.decoder_criterion = None + + def forward( + self, + x: torch.Tensor, + supervision: Optional[Supervisions] = None, + warmup: float = 1.0, + ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: + """ + Args: + x: + The input tensor. Its shape is (N, T, C). + supervision: + Supervision in lhotse format. + See https://github.com/lhotse-speech/lhotse/blob/master/lhotse/dataset/speech_recognition.py#L32 # noqa + (CAUTION: It contains length information, i.e., start and number of + frames, before subsampling) + warmup: + A floating point value that gradually increases from 0 throughout + training; when it is >= 1.0 we are "fully warmed up". It is used + to turn modules on sequentially. + + Returns: + Return a tuple containing 3 tensors: + - CTC output for ctc decoding. Its shape is (N, T, C) + - Encoder output with shape (T, N, C). It can be used as key and + value for the decoder. + - Encoder output padding mask. It can be used as + memory_key_padding_mask for the decoder. Its shape is (N, T). + It is None if `supervision` is None. + """ + + encoder_memory, memory_key_padding_mask = self.run_encoder( + x, supervision, warmup + ) + + x = self.ctc_output(encoder_memory) + return x, encoder_memory, memory_key_padding_mask + + def run_encoder( + self, + x: torch.Tensor, + supervisions: Optional[Supervisions] = None, + warmup: float = 1.0, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + """Run the transformer encoder. + + Args: + x: + The model input. Its shape is (N, T, C). + supervisions: + Supervision in lhotse format. + See https://github.com/lhotse-speech/lhotse/blob/master/lhotse/dataset/speech_recognition.py#L32 # noqa + CAUTION: It contains length information, i.e., start and number of + frames, before subsampling + It is read directly from the batch, without any sorting. It is used + to compute the encoder padding mask, which is used as memory key + padding mask for the decoder. + Returns: + Return a tuple with two tensors: + - The encoder output, with shape (T, N, C) + - encoder padding mask, with shape (N, T). + The mask is None if `supervisions` is None. + It is used as memory key padding mask in the decoder. + """ + x = self.encoder_embed(x) + x = self.encoder_pos(x) + x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C) + mask = encoder_padding_mask(x.size(0), supervisions) + mask = mask.to(x.device) if mask is not None else None + x = self.encoder( + x, src_key_padding_mask=mask, warmup=warmup + ) # (T, N, C) + + return x, mask + + def ctc_output(self, x: torch.Tensor) -> torch.Tensor: + """ + Args: + x: + The output tensor from the transformer encoder. + Its shape is (T, N, C) + + Returns: + Return a tensor that can be used for CTC decoding. + Its shape is (N, T, C) + """ + x = self.encoder_output_layer(x) + x = x.permute(1, 0, 2) # (T, N, C) ->(N, T, C) + x = nn.functional.log_softmax(x, dim=-1) # (N, T, C) + return x + + @torch.jit.export + def decoder_forward( + self, + memory: torch.Tensor, + memory_key_padding_mask: torch.Tensor, + token_ids: List[List[int]], + sos_id: int, + eos_id: int, + ) -> torch.Tensor: + """ + Args: + memory: + It's the output of the encoder with shape (T, N, C) + memory_key_padding_mask: + The padding mask from the encoder. + token_ids: + A list-of-list IDs. Each sublist contains IDs for an utterance. + The IDs can be either phone IDs or word piece IDs. + sos_id: + sos token id + eos_id: + eos token id + + Returns: + A scalar, the **sum** of label smoothing loss over utterances + in the batch without any normalization. + """ + ys_in = add_sos(token_ids, sos_id=sos_id) + ys_in = [torch.tensor(y) for y in ys_in] + ys_in_pad = pad_sequence( + ys_in, batch_first=True, padding_value=float(eos_id) + ) + + ys_out = add_eos(token_ids, eos_id=eos_id) + ys_out = [torch.tensor(y) for y in ys_out] + ys_out_pad = pad_sequence( + ys_out, batch_first=True, padding_value=float(-1) + ) + + device = memory.device + ys_in_pad = ys_in_pad.to(device) + ys_out_pad = ys_out_pad.to(device) + + tgt_mask = generate_square_subsequent_mask(ys_in_pad.shape[-1]).to( + device + ) + + tgt_key_padding_mask = decoder_padding_mask(ys_in_pad, ignore_id=eos_id) + # TODO: Use length information to create the decoder padding mask + # We set the first column to False since the first column in ys_in_pad + # contains sos_id, which is the same as eos_id in our current setting. + tgt_key_padding_mask[:, 0] = False + + tgt = self.decoder_embed(ys_in_pad) # (N, T) -> (N, T, C) + tgt = self.decoder_pos(tgt) + tgt = tgt.permute(1, 0, 2) # (N, T, C) -> (T, N, C) + pred_pad = self.decoder( + tgt=tgt, + memory=memory, + tgt_mask=tgt_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + ) # (T, N, C) + pred_pad = pred_pad.permute(1, 0, 2) # (T, N, C) -> (N, T, C) + pred_pad = self.decoder_output_layer(pred_pad) # (N, T, C) + + decoder_loss = self.decoder_criterion(pred_pad, ys_out_pad) + + return decoder_loss + + @torch.jit.export + def decoder_nll( + self, + memory: torch.Tensor, + memory_key_padding_mask: torch.Tensor, + token_ids: List[torch.Tensor], + sos_id: int, + eos_id: int, + ) -> torch.Tensor: + """ + Args: + memory: + It's the output of the encoder with shape (T, N, C) + memory_key_padding_mask: + The padding mask from the encoder. + token_ids: + A list-of-list IDs (e.g., word piece IDs). + Each sublist represents an utterance. + sos_id: + The token ID for SOS. + eos_id: + The token ID for EOS. + Returns: + A 2-D tensor of shape (len(token_ids), max_token_length) + representing the cross entropy loss (i.e., negative log-likelihood). + """ + # The common part between this function and decoder_forward could be + # extracted as a separate function. + if isinstance(token_ids[0], torch.Tensor): + # This branch is executed by torchscript in C++. + # See https://github.com/k2-fsa/k2/pull/870 + # https://github.com/k2-fsa/k2/blob/3c1c18400060415b141ccea0115fd4bf0ad6234e/k2/torch/bin/attention_rescore.cu#L286 + token_ids = [tolist(t) for t in token_ids] + + ys_in = add_sos(token_ids, sos_id=sos_id) + ys_in = [torch.tensor(y) for y in ys_in] + ys_in_pad = pad_sequence( + ys_in, batch_first=True, padding_value=float(eos_id) + ) + + ys_out = add_eos(token_ids, eos_id=eos_id) + ys_out = [torch.tensor(y) for y in ys_out] + ys_out_pad = pad_sequence( + ys_out, batch_first=True, padding_value=float(-1) + ) + + device = memory.device + ys_in_pad = ys_in_pad.to(device, dtype=torch.int64) + ys_out_pad = ys_out_pad.to(device, dtype=torch.int64) + + tgt_mask = generate_square_subsequent_mask(ys_in_pad.shape[-1]).to( + device + ) + + tgt_key_padding_mask = decoder_padding_mask(ys_in_pad, ignore_id=eos_id) + # TODO: Use length information to create the decoder padding mask + # We set the first column to False since the first column in ys_in_pad + # contains sos_id, which is the same as eos_id in our current setting. + tgt_key_padding_mask[:, 0] = False + + tgt = self.decoder_embed(ys_in_pad) # (B, T) -> (B, T, F) + tgt = self.decoder_pos(tgt) + tgt = tgt.permute(1, 0, 2) # (B, T, F) -> (T, B, F) + pred_pad = self.decoder( + tgt=tgt, + memory=memory, + tgt_mask=tgt_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + ) # (T, B, F) + pred_pad = pred_pad.permute(1, 0, 2) # (T, B, F) -> (B, T, F) + pred_pad = self.decoder_output_layer(pred_pad) # (B, T, F) + # nll: negative log-likelihood + nll = torch.nn.functional.cross_entropy( + pred_pad.view(-1, self.decoder_num_class), + ys_out_pad.view(-1), + ignore_index=-1, + reduction="none", + ) + + nll = nll.view(pred_pad.shape[0], -1) + + return nll + + +class TransformerEncoderLayer(nn.Module): + """ + Modified from torch.nn.TransformerEncoderLayer. + + Args: + d_model: + the number of expected features in the input (required). + nhead: + the number of heads in the multiheadattention models (required). + dim_feedforward: + the dimension of the feedforward network model (default=2048). + dropout: + the dropout value (default=0.1). + activation: + the activation function of intermediate layer, relu or + gelu (default=relu). + + Examples:: + >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8) + >>> src = torch.rand(10, 32, 512) + >>> out = encoder_layer(src) + """ + + def __init__( + self, + d_model: int, + nhead: int, + dim_feedforward: int = 2048, + dropout: float = 0.1, + layer_dropout: float = 0.075, + activation: str = "relu", + ) -> None: + super(TransformerEncoderLayer, self).__init__() + + self.layer_dropout = layer_dropout + + self.self_attn = MultiheadAttention(d_model, nhead, dropout=0.0) + # Implementation of Feedforward model + + self.feed_forward = nn.Sequential( + ScaledLinear(d_model, dim_feedforward), + ActivationBalancer(channel_dim=-1), + DoubleSwish(), + nn.Dropout(dropout), + ScaledLinear(dim_feedforward, d_model, initial_scale=0.25), + ) + + self.norm_final = BasicNorm(d_model) + + # try to ensure the output is close to zero-mean (or at least, zero-median). + self.balancer = ActivationBalancer( + channel_dim=-1, min_positive=0.45, max_positive=0.55, max_abs=6.0 + ) + + self.dropout = nn.Dropout(dropout) + + # def __setstate__(self, state): + # if "activation" not in state: + # state["activation"] = nn.functional.relu + # super(TransformerEncoderLayer, self).__setstate__(state) + + def forward( + self, + src: torch.Tensor, + src_mask: Optional[torch.Tensor] = None, + src_key_padding_mask: Optional[torch.Tensor] = None, + warmup: float = 1.0, + ) -> torch.Tensor: + """ + Pass the input through the encoder layer. + + Args: + src: the sequence to the encoder layer (required). + src_mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional) + warmup: controls selective bypass of of layers; if < 1.0, we will + bypass layers more frequently. + + Shape: + src: (S, N, E). + src_mask: (S, S). + src_key_padding_mask: (N, S). + S is the source sequence length, T is the target sequence length, + N is the batch size, E is the feature number + """ + src_orig = src + + warmup_scale = min(0.1 + warmup, 1.0) + # alpha = 1.0 means fully use this encoder layer, 0.0 would mean + # completely bypass it. + if self.training: + alpha = ( + warmup_scale + if torch.rand(()).item() <= (1.0 - self.layer_dropout) + else 0.1 + ) + else: + alpha = 1.0 + + # src_att = self.self_attn(src, src, src, src_mask) + src_att = self.self_attn( + src, + src, + src, + attn_mask=src_mask, + key_padding_mask=src_key_padding_mask, + )[0] + src = src + self.dropout(src_att) + + src = src + self.dropout(self.feed_forward(src)) + + src = self.norm_final(self.balancer(src)) + + if alpha != 1.0: + src = alpha * src + (1 - alpha) * src_orig + + return src + + +class TransformerDecoderLayer(nn.Module): + """ + Modified from torch.nn.TransformerDecoderLayer. + Add support of normalize_before, + i.e., use layer_norm before the first block. + + Args: + d_model: + the number of expected features in the input (required). + nhead: + the number of heads in the multiheadattention models (required). + dim_feedforward: + the dimension of the feedforward network model (default=2048). + dropout: + the dropout value (default=0.1). + activation: + the activation function of intermediate layer, relu or + gelu (default=relu). + + Examples:: + >>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8) + >>> memory = torch.rand(10, 32, 512) + >>> tgt = torch.rand(20, 32, 512) + >>> out = decoder_layer(tgt, memory) + """ + + def __init__( + self, + d_model: int, + nhead: int, + dim_feedforward: int = 2048, + dropout: float = 0.1, + layer_dropout: float = 0.075, + # activation: str = "relu", + normalize_before: bool = True, + ) -> None: + super(TransformerDecoderLayer, self).__init__() + self.layer_dropout = layer_dropout + self.self_attn = MultiheadAttention(d_model, nhead, dropout=0.0) + self.src_attn = MultiheadAttention(d_model, nhead, dropout=0.0) + # Implementation of Feedforward model + self.feed_forward = nn.Sequential( + ScaledLinear(d_model, dim_feedforward), + ActivationBalancer(channel_dim=-1), + DoubleSwish(), + nn.Dropout(dropout), + ScaledLinear(dim_feedforward, d_model, initial_scale=0.25), + ) + + self.norm_final = BasicNorm(d_model) + + # try to ensure the output is close to zero-mean (or at least, zero-median). + self.balancer = ActivationBalancer( + channel_dim=-1, min_positive=0.45, max_positive=0.55, max_abs=6.0 + ) + + self.dropout = nn.Dropout(dropout) + + # def __setstate__(self, state): + # if "activation" not in state: + # state["activation"] = nn.functional.relu + # super(TransformerDecoderLayer, self).__setstate__(state) + + def forward( + self, + tgt: torch.Tensor, + memory: torch.Tensor, + tgt_mask: Optional[torch.Tensor] = None, + memory_mask: Optional[torch.Tensor] = None, + tgt_key_padding_mask: Optional[torch.Tensor] = None, + memory_key_padding_mask: Optional[torch.Tensor] = None, + warmup: float = 1.0, + ) -> torch.Tensor: + """Pass the inputs (and mask) through the decoder layer. + + Args: + tgt: + the sequence to the decoder layer (required). + memory: + the sequence from the last layer of the encoder (required). + tgt_mask: + the mask for the tgt sequence (optional). + memory_mask: + the mask for the memory sequence (optional). + tgt_key_padding_mask: + the mask for the tgt keys per batch (optional). + memory_key_padding_mask: + the mask for the memory keys per batch (optional). + warmup: controls selective bypass of of layers; if < 1.0, we will + bypass layers more frequently. + + + + Shape: + tgt: (T, N, E). + memory: (S, N, E). + tgt_mask: (T, T). + memory_mask: (T, S). + tgt_key_padding_mask: (N, T). + memory_key_padding_mask: (N, S). + S is the source sequence length, T is the target sequence length, + N is the batch size, E is the feature number + """ + tgt_orig = tgt + + warmup_scale = min(0.1 + warmup, 1.0) + # alpha = 1.0 means fully use this encoder layer, 0.0 would mean + # completely bypass it. + if self.training: + alpha = ( + warmup_scale + if torch.rand(()).item() <= (1.0 - self.layer_dropout) + else 0.1 + ) + else: + alpha = 1.0 + + # tgt_att = self.self_attn(tgt, tgt, tgt, tgt_mask) + tgt_att = self.self_attn( + tgt, + tgt, + tgt, + attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask, + )[0] + tgt = tgt + self.dropout(tgt_att) + + # src_att = self.src_attn(tgt, memory, memory, memory_mask) + src_att = self.src_attn( + tgt, + memory, + memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask, + )[0] + tgt = tgt + self.dropout(src_att) + + tgt = tgt + self.dropout(self.feed_forward(tgt)) + + tgt = self.norm_final(self.balancer(tgt)) + + if alpha != 1.0: + tgt = alpha * tgt + (1 - alpha) * tgt_orig + + return tgt + + +def _get_activation_fn(activation: str): + if activation == "relu": + return nn.functional.relu + elif activation == "gelu": + return nn.functional.gelu + + raise RuntimeError( + "activation should be relu/gelu, not {}".format(activation) + ) + + +class TransformerEncoder(nn.Module): + r"""TransformerEncoder is a stack of N encoder layers + + Args: + encoder_layer: an instance of the TransformerEncoderLayer() class (required). + num_layers: the number of sub-encoder-layers in the encoder (required). + + Examples:: + >>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8) + >>> transformer_encoder = TransformerEncoder(encoder_layer, num_layers=6) + >>> src = torch.rand(10, 32, 512) + >>> out = transformer_encoder(src) + """ + + def __init__(self, encoder_layer: nn.Module, num_layers: int) -> None: + super().__init__() + self.layers = nn.ModuleList( + [copy.deepcopy(encoder_layer) for i in range(num_layers)] + ) + self.num_layers = num_layers + + def forward( + self, + src: torch.Tensor, + mask: Optional[torch.Tensor] = None, + src_key_padding_mask: Optional[torch.Tensor] = None, + warmup: float = 1.0, + ) -> torch.Tensor: + r"""Pass the input through the encoder layers in turn. + + Args: + src: the sequence to the encoder (required). + mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional). + + Shape: + src: (S, N, E). + mask: (S, S). + src_key_padding_mask: (N, S). + S is the source sequence length, T is the target sequence length, N is the batch size, E is the feature number + + """ + output = src + + for i, mod in enumerate(self.layers): + output = mod( + output, + src_mask=mask, + src_key_padding_mask=src_key_padding_mask, + warmup=warmup, + ) + + return output + + +class TransformerDecoder(nn.Module): + r"""TransformerDecoder is a stack of N decoder layers + + Args: + decoder_layer: an instance of the TransformerDecoderLayer() class (required). + num_layers: the number of sub-decoder-layers in the decoder (required). + + Examples:: + >>> decoder_layer = TransformerDecoderLayer(d_model=512, nhead=8) + >>> transformer_decoder = TransformerDecoder(decoder_layer, num_layers=6) + >>> memory = torch.rand(10, 32, 512) + >>> tgt = torch.rand(10, 32, 512) + >>> out = transformer_decoder(tgt, memory) + """ + + def __init__(self, decoder_layer: nn.Module, num_layers: int) -> None: + super().__init__() + self.layers = nn.ModuleList( + [copy.deepcopy(decoder_layer) for i in range(num_layers)] + ) + self.num_layers = num_layers + + def forward( + self, + tgt: torch.Tensor, + memory: torch.Tensor, + tgt_mask: Optional[torch.Tensor] = None, + memory_mask: Optional[torch.Tensor] = None, + tgt_key_padding_mask: Optional[torch.Tensor] = None, + memory_key_padding_mask: Optional[torch.Tensor] = None, + warmup: float = 1.0, + ) -> torch.Tensor: + r"""Pass the input through the decoder layers in turn. + + Args: + tgt: the sequence to the decoder (required). + memory: the sequence from the last layer of the encoder (required). + tgt_mask: the mask for the tgt sequence (optional). + memory_mask: the mask for the memory sequence (optional). + tgt_key_padding_mask: the mask for the tgt keys per batch (optional). + memory_key_padding_mask: the mask for the memory keys per batch (optional). + + Shape: + tgt: (S, N, E). + tgt_mask: (S, S). + tgt_key_padding_mask: (N, S). + + """ + output = tgt + + for i, mod in enumerate(self.layers): + output = mod( + output, + memory, + tgt_mask=tgt_mask, + memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + warmup=warmup, + ) + + return output + + +class PositionalEncoding(nn.Module): + """This class implements the positional encoding + proposed in the following paper: + + - Attention Is All You Need: https://arxiv.org/pdf/1706.03762.pdf + + PE(pos, 2i) = sin(pos / (10000^(2i/d_modle)) + PE(pos, 2i+1) = cos(pos / (10000^(2i/d_modle)) + + Note:: + + 1 / (10000^(2i/d_model)) = exp(-log(10000^(2i/d_model))) + = exp(-1* 2i / d_model * log(100000)) + = exp(2i * -(log(10000) / d_model)) + """ + + def __init__(self, d_model: int, dropout: float = 0.1) -> None: + """ + Args: + d_model: + Embedding dimension. + dropout: + Dropout probability to be applied to the output of this module. + """ + super().__init__() + self.d_model = d_model + self.xscale = math.sqrt(self.d_model) + self.dropout = nn.Dropout(p=dropout) + # not doing: self.pe = None because of errors thrown by torchscript + self.pe = torch.zeros(1, 0, self.d_model, dtype=torch.float32) + + def extend_pe(self, x: torch.Tensor) -> None: + """Extend the time t in the positional encoding if required. + + The shape of `self.pe` is (1, T1, d_model). The shape of the input x + is (N, T, d_model). If T > T1, then we change the shape of self.pe + to (N, T, d_model). Otherwise, nothing is done. + + Args: + x: + It is a tensor of shape (N, T, C). + Returns: + Return None. + """ + if self.pe is not None: + if self.pe.size(1) >= x.size(1): + self.pe = self.pe.to(dtype=x.dtype, device=x.device) + return + pe = torch.zeros(x.size(1), self.d_model, dtype=torch.float32) + position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1) + div_term = torch.exp( + torch.arange(0, self.d_model, 2, dtype=torch.float32) + * -(math.log(10000.0) / self.d_model) + ) + pe[:, 0::2] = torch.sin(position * div_term) + pe[:, 1::2] = torch.cos(position * div_term) + pe = pe.unsqueeze(0) + # Now pe is of shape (1, T, d_model), where T is x.size(1) + self.pe = pe.to(device=x.device, dtype=x.dtype) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ + Add positional encoding. + + Args: + x: + Its shape is (N, T, C) + + Returns: + Return a tensor of shape (N, T, C) + """ + self.extend_pe(x) + x = x * self.xscale + self.pe[:, : x.size(1), :] + return self.dropout(x) + + +class Noam(object): + """ + Implements Noam optimizer. + + Proposed in + "Attention Is All You Need", https://arxiv.org/pdf/1706.03762.pdf + + Modified from + https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/optimizer.py # noqa + + Args: + params: + iterable of parameters to optimize or dicts defining parameter groups + model_size: + attention dimension of the transformer model + factor: + learning rate factor + warm_step: + warmup steps + """ + + def __init__( + self, + params, + model_size: int = 256, + factor: float = 10.0, + warm_step: int = 25000, + weight_decay=0, + ) -> None: + """Construct an Noam object.""" + self.optimizer = torch.optim.Adam( + params, lr=0, betas=(0.9, 0.98), eps=1e-9, weight_decay=weight_decay + ) + self._step = 0 + self.warmup = warm_step + self.factor = factor + self.model_size = model_size + self._rate = 0 + + @property + def param_groups(self): + """Return param_groups.""" + return self.optimizer.param_groups + + def step(self): + """Update parameters and rate.""" + self._step += 1 + rate = self.rate() + for p in self.optimizer.param_groups: + p["lr"] = rate + self._rate = rate + self.optimizer.step() + + def rate(self, step=None): + """Implement `lrate` above.""" + if step is None: + step = self._step + return ( + self.factor + * self.model_size ** (-0.5) + * min(step ** (-0.5), step * self.warmup ** (-1.5)) + ) + + def zero_grad(self): + """Reset gradient.""" + self.optimizer.zero_grad() + + def state_dict(self): + """Return state_dict.""" + return { + "_step": self._step, + "warmup": self.warmup, + "factor": self.factor, + "model_size": self.model_size, + "_rate": self._rate, + "optimizer": self.optimizer.state_dict(), + } + + def load_state_dict(self, state_dict): + """Load state_dict.""" + for key, value in state_dict.items(): + if key == "optimizer": + self.optimizer.load_state_dict(state_dict["optimizer"]) + else: + setattr(self, key, value) + + +def encoder_padding_mask( + max_len: int, supervisions: Optional[Supervisions] = None +) -> Optional[torch.Tensor]: + """Make mask tensor containing indexes of padded part. + + TODO:: + This function **assumes** that the model uses + a subsampling factor of 4. We should remove that + assumption later. + + Args: + max_len: + Maximum length of input features. + CAUTION: It is the length after subsampling. + supervisions: + Supervision in lhotse format. + See https://github.com/lhotse-speech/lhotse/blob/master/lhotse/dataset/speech_recognition.py#L32 # noqa + (CAUTION: It contains length information, i.e., start and number of + frames, before subsampling) + + Returns: + Tensor: Mask tensor of dimension (batch_size, input_length), + True denote the masked indices. + """ + if supervisions is None: + return None + + supervision_segments = torch.stack( + ( + supervisions["sequence_idx"], + supervisions["start_frame"], + supervisions["num_frames"], + ), + 1, + ).to(torch.int32) + + lengths = [ + 0 for _ in range(int(supervision_segments[:, 0].max().item()) + 1) + ] + for idx in range(supervision_segments.size(0)): + # Note: TorchScript doesn't allow to unpack tensors as tuples + sequence_idx = supervision_segments[idx, 0].item() + start_frame = supervision_segments[idx, 1].item() + num_frames = supervision_segments[idx, 2].item() + lengths[sequence_idx] = start_frame + num_frames + + lengths = [((i - 1) // 2 - 1) // 2 for i in lengths] + bs = int(len(lengths)) + seq_range = torch.arange(0, max_len, dtype=torch.int64) + seq_range_expand = seq_range.unsqueeze(0).expand(bs, max_len) + # Note: TorchScript doesn't implement Tensor.new() + seq_length_expand = torch.tensor( + lengths, device=seq_range_expand.device, dtype=seq_range_expand.dtype + ).unsqueeze(-1) + mask = seq_range_expand >= seq_length_expand + + return mask + + +def decoder_padding_mask( + ys_pad: torch.Tensor, ignore_id: int = -1 +) -> torch.Tensor: + """Generate a length mask for input. + + The masked position are filled with True, + Unmasked positions are filled with False. + + Args: + ys_pad: + padded tensor of dimension (batch_size, input_length). + ignore_id: + the ignored number (the padding number) in ys_pad + + Returns: + Tensor: + a bool tensor of the same shape as the input tensor. + """ + ys_mask = ys_pad == ignore_id + return ys_mask + + +def generate_square_subsequent_mask(sz: int) -> torch.Tensor: + """Generate a square mask for the sequence. The masked positions are + filled with float('-inf'). Unmasked positions are filled with float(0.0). + The mask can be used for masked self-attention. + + For instance, if sz is 3, it returns:: + + tensor([[0., -inf, -inf], + [0., 0., -inf], + [0., 0., 0]]) + + Args: + sz: mask size + + Returns: + A square mask of dimension (sz, sz) + """ + mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1) + mask = ( + mask.float() + .masked_fill(mask == 0, float("-inf")) + .masked_fill(mask == 1, float(0.0)) + ) + return mask + + +def add_sos(token_ids: List[List[int]], sos_id: int) -> List[List[int]]: + """Prepend sos_id to each utterance. + + Args: + token_ids: + A list-of-list of token IDs. Each sublist contains + token IDs (e.g., word piece IDs) of an utterance. + sos_id: + The ID of the SOS token. + + Return: + Return a new list-of-list, where each sublist starts + with SOS ID. + """ + return [[sos_id] + utt for utt in token_ids] + + +def add_eos(token_ids: List[List[int]], eos_id: int) -> List[List[int]]: + """Append eos_id to each utterance. + + Args: + token_ids: + A list-of-list of token IDs. Each sublist contains + token IDs (e.g., word piece IDs) of an utterance. + eos_id: + The ID of the EOS token. + + Return: + Return a new list-of-list, where each sublist ends + with EOS ID. + """ + return [utt + [eos_id] for utt in token_ids] + + +def tolist(t: torch.Tensor) -> List[int]: + """Used by jit""" + return torch.jit.annotate(List[int], t.tolist()) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py b/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py index c190be626..26a8cca44 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py @@ -643,7 +643,8 @@ class ScaledEmbedding(nn.Module): ) def extra_repr(self) -> str: - s = "{num_embeddings}, {embedding_dim}, scale={scale}" + # s = "{num_embeddings}, {embedding_dim}, scale={scale}" + s = "{num_embeddings}, {embedding_dim}" if self.padding_idx is not None: s += ", padding_idx={padding_idx}" if self.scale_grad_by_freq is not False: From d99796898cc369123dfdea8a0f660fe174a33c35 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Mon, 25 Jul 2022 12:06:40 +0800 Subject: [PATCH 10/38] Update doc to add a link to Nadira Povey's YouTube channel. (#492) * Update doc to add a link to Nadira Povey's YouTube channel. * fix a typo --- .github/workflows/build-doc.yml | 65 ++++++++++++++++++ README.md | 4 ++ docs/requirements.txt | 1 + docs/source/conf.py | 3 +- docs/source/huggingface/index.rst | 13 ++++ .../huggingface/pic/hugging-face-sherpa-2.png | Bin 0 -> 466071 bytes .../huggingface/pic/hugging-face-sherpa-3.png | Bin 0 -> 401685 bytes .../huggingface/pic/hugging-face-sherpa.png | Bin 0 -> 436271 bytes docs/source/huggingface/pretrained-models.rst | 17 +++++ docs/source/huggingface/spaces.rst | 65 ++++++++++++++++++ docs/source/index.rst | 1 + docs/source/installation/index.rst | 16 +++++ .../recipes/librispeech/conformer_ctc.rst | 11 +++ .../recipes/librispeech/tdnn_lstm_ctc.rst | 10 +++ 14 files changed, 205 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/build-doc.yml create mode 100644 docs/source/huggingface/index.rst create mode 100644 docs/source/huggingface/pic/hugging-face-sherpa-2.png create mode 100644 docs/source/huggingface/pic/hugging-face-sherpa-3.png create mode 100644 docs/source/huggingface/pic/hugging-face-sherpa.png create mode 100644 docs/source/huggingface/pretrained-models.rst create mode 100644 docs/source/huggingface/spaces.rst diff --git a/.github/workflows/build-doc.yml b/.github/workflows/build-doc.yml new file mode 100644 index 000000000..dd0969f51 --- /dev/null +++ b/.github/workflows/build-doc.yml @@ -0,0 +1,65 @@ +# Copyright 2022 Xiaomi Corp. (author: Fangjun Kuang) + +# See ../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# refer to https://github.com/actions/starter-workflows/pull/47/files + +# You can access it at https://k2-fsa.github.io/icefall/ +name: Generate doc +on: + push: + branches: + - master + - doc + pull_request: + types: [labeled] + +jobs: + build-doc: + if: github.event.label.name == 'doc' || github.event_name == 'push' + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest] + python-version: ["3.8"] + steps: + # refer to https://github.com/actions/checkout + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - name: Display Python version + run: python -c "import sys; print(sys.version)" + + - name: Build doc + shell: bash + run: | + cd docs + python3 -m pip install -r ./requirements.txt + make html + touch build/html/.nojekyll + + - name: Deploy + uses: peaceiris/actions-gh-pages@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: ./docs/build/html + publish_branch: gh-pages diff --git a/README.md b/README.md index be922c191..27398e712 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,10 @@ using . You can use to deploy models trained with icefall. +You can try pre-trained models from within your browser without the need +to download or install anything by visiting +See for more details. + ## Installation Please refer to diff --git a/docs/requirements.txt b/docs/requirements.txt index 74640391e..925a31089 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,2 +1,3 @@ sphinx_rtd_theme sphinx +sphinxcontrib-youtube==1.1.0 diff --git a/docs/source/conf.py b/docs/source/conf.py index 88522ff27..afac002d4 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -32,8 +32,9 @@ release = "0.1" # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - "sphinx_rtd_theme", "sphinx.ext.todo", + "sphinx_rtd_theme", + "sphinxcontrib.youtube", ] # Add any paths that contain templates here, relative to this directory. diff --git a/docs/source/huggingface/index.rst b/docs/source/huggingface/index.rst new file mode 100644 index 000000000..bd731793b --- /dev/null +++ b/docs/source/huggingface/index.rst @@ -0,0 +1,13 @@ +Huggingface +=========== + +This section describes how to find pre-trained models. +It also demonstrates how to try them from within your browser +without installing anything by using +`Huggingface spaces `_. + +.. toctree:: + :maxdepth: 2 + + pretrained-models + spaces diff --git a/docs/source/huggingface/pic/hugging-face-sherpa-2.png b/docs/source/huggingface/pic/hugging-face-sherpa-2.png new file mode 100644 index 0000000000000000000000000000000000000000..3b47bd51b9a16fa791a5f610e6b09b31183afc8c GIT binary patch literal 466071 zcmbq)19WB0@^2=|gcD6{Ow5UG+qNe*Pt1vJPHfwl7$>%En=jw>cmMDI-d*eNwf5=W zt=hY)y1VLEeM019MB!ktU_d}X;Kap*6hJ^=JU~Dof}kKiN-}@=dxL;{(KizmloJ;e zB#^VWF)_0=1_2QZNl1cJixWroxu+^7uHysG3s@71!QzvEqVe-l;#U&*Dn?FE3XaO@ zUbL|&j6&s3nRB2H!VXL1jL^4+082US<{Ao@;4KL1{*<(zwZCYyJ~}==E_Fu*>m!4$ zOFAO#^l30|Z)Cg2U)@kaK|PNQ1k!{LKN(EBgO70gcfZId^4C}1-BTx@q9-a^N?qQ_ zchyPsP!v#*Xr@6-fj|NT`)?qwF?^&zNFNN)+qt-FV@fnZmgrs(LRnS2#MGn`Ho-B? zXUnf!P0`1n2&NxA+|WRl#^ivMh(`RpOHqWa5`kYSKsX?x!N3hV5bZ&C2;JO}mASBM z!0M9t*FL3WTt)!PydQuV7r=c}xGIiH9>QQTlp3wKnfF?zcg-vNgjb7&ZV}{>Z(B-_ zy;y6bDHz2DSUWOiB97rDbJ6RP0Y@!hJ_>!qKZWYK%_-$<^zd|pf*e5fw za!BL?2jc@S33^xkWMX!%zcM+X|3vw9iIsJ{gS>6<#I88L`(tnJV%PJy zJpQy6~Xuzbv7x|{9)aRkSi$!}t@CW(P5I>lfx&d=>OA zH3xmgh}WWRKIwRo+>`r}sl!I%ps#{Ik$!*A`DX10x#8uC(gxAJ7gTGP_4cE42{{u{ z0~!H(9?B{}6PZ}}>f8YH^HTQvcjXyIGm%7v!}l$c+(6er{d7N?DOv0YA}|gnDV>JC$LADeuCEdvx;L zr*XjQ<3F~OG=MBeW%NfHnpXF!;p*uRRJoskIt%rI0S@fe@s4z9D(EtC4n(hd-95%q zOC%_@O0_w)c+L9xI(|rW96@Z$#7ieg*8F~?hWCK|sm4{TcYv6fR+@$lQsNT&M1PyQ zcwfPnyx(yY3IlTCa&mgg{i2M%3)c1N!6V<YQE6pYww&=$j$2s?>_qqhgu^H7@ux%fDuJ7c2AgQn!bX1>%52LG~* z_ZP{5K44>l%?Lotxt{3z!LAWCgQ z-vzRfrJ%>bbwQ)P2Sp1fOOcbtBQ+qwf0gNJEzY<|6eDH`xf8N+Wi3Fg4VnHK_VYLd z@H67K%GNgn)({~EQueqPae87NiYz554Z$U04v`iqbFy|aix`?g>z(iRlR*=mOmegeOBCEn+$G$zS*d(u1>?UG6)lR$id~8; z>+9;u>OAU1EX31^LEEz00YkWIJJ7vz0Y0QP3R6KmV z>ARtPf`0OR0$2sz$ysGK$N>_a1D(t7j8Bs|P;fPIBgb7c1Tq*hUOCX6IGiY2oLjWC zJI@f99V@nBVNwKWy z-pLc+*Yof8^8f5F32~0?v5dm(#1O%lVbaof<7d{G(yZQ;GHMyI2wf}VP~aBr*6=8C zox6vN_9YAiHl$A?%n%Kz9z+dKm83jwZsMe#sv)9ANyDnypmwLWs7a!Ms(xDCVk%v4 zqG_s8S!Z2yR#RPtY0S7d=0xQ$Fomm`xUt5yu07B-z(98uqDTWfZL(*FrWJd7SDWYzjX zi<1PJWS#^mm?D@Pga%iR{bZj!RyCtP5|n{=*Ec)OJnvL`oU!@TD+*7Cwf6nRhLw!f zS2J)~*`)=L{7ilCey@r*jz<&)hsVl|nVHMA4`6Zgy1TlI1@uF zYS62J4~QOqo+ghtcaIy#yJ_dGz@TNX9DlKcdfT32o(Xh zo+6)SwlM*36~l#ov{Ur=ADR=2k>AMDYQwG>7MRuWJib5R&L?sDFJt%x1_Z|k)CC*` zlLl45wd0$hCZej~&Y_-R^3b%=`ynD>R#R_50K>^PlrLLE+@sRzYuIDaF!7sNuJq#- z(ZicoXUM3zM)bB_)9zR;NnkG88lFNjxnMvz8E;%d`BiLYEV9oiLcV_qU} zx%78RL&>Pc+LBEY%BL*E=f%Y*Dka>|vJ<$Gl|rg|CF>eXddvFCZpaVhx$Q>?N|>TNl&apenq}2)`RYmjo(9t(sN&Fb z)$EYp9<=4Q55Pn@u?}N#Mw#Mz&oKQu91o!*SWoU7q(^<0F%!KuOwNLaOK}LqIBTfJYFOQ6BJe5RDrr- z9dqv7mrrBZW~@urFg84`34r6-Q@>?y7n6086`JOcxp~rY_&Uss^S&^>H|xB^Kj;sO zu#LawM(ldCRlRjtc;_a2AUm9?!By_QcXwhdsy2|nZT$%xQWbla|r0u2ZjSurq`lCYYYG+{1OoZ-^!4L*%?AB@YZ#Agu)pUaf1Ox;Cb?P4vUj(Uyab&c(%r)`gMQ#@>{Uo}HbY?mGh=0|U)R z3mOMEYe#)o8f%Ade;wrS;|Li$7}}fJI-1#76Z|o*zJZOCBR3JzAB6sK{q;SKUCsVU z$=cy>pY`#9bbr*)(bImX`^VTHUAg`!<&ZOTHMUd}GPC;N*#`|C1{O9ZuD^8nA65S( z`PZ(>4#xI^HdY@!9eMuA^}lug*UEqI_!pY$|D;LJ!utJRY5q&q-@0!%ZKNIeu`bg#n*6avCu!E3E*AV?@DFwVkA6qHcp54kzyF@N|iX zndla*$*I|;T)Wuv|AL+WkVYhd9BF{8VPAzngJXg-MsglZdlWjW1*w9s z$uW@S2bU4me=Gk#UN{#j9(z-fwJ1kC301qZpTFV1(x|_&fGyNV)7jZMQ${P(Xz;2` z@?O@4s>XU1ViD$pq7`_JgFQJbYm&XaJp~<|?CJiMiqwCoA-=;tKCNlk&8@A-9pdh; zO{Jc)2vNfmj%K-kk{cgA5?iYsF3HIfzc}QbqQ(rgL|DYHli%ROL!-o`9VoR-;yT5M zUC$kjtZopW<8;;79%qXseWgT z3{sMzuD%$bl8_(IMW>9DxBj2W(AJF5$0mMFM`veuS6mJZ75PsS-_{d+${^?P0X<7$ z20@aXk#KEgqbn$aqR1E<#(YJIg6nAximY-25p*+(WPR+=XrD;Pr&*TZ#pKdej2Af< zMF$z-tnl}0*@OE-c609<7v9l|GvsXRJK+^<)c4>d#{t5g!s?3}V(~=d#eN=)d#W56 z!`BZ6YnelJNdricxfuzxX-NM=74ISm_d#g&xiQ%)^r&qMfb@0L{-;K1Vfn6vJ91of zOL4p`g4TxJZJ?D?v<*0+xGS?>If42D^FKxRCqVf+l0s!^KO^+LG{=6uJ7G%b2Hyxm z;1Q4xiX?EsU;mmACe=skag;iIB`T_(SiW4@xT~I2q1fQpl^+VC)o5yD^mT@tPGh@b zvm<`F{>R$zXVLTtcSX2HVqPB*K`3b*P+GYcc|1M1hLzq+03h-=|Jl~Inh0e6 z^x3ps60_P=3B9*fSF+xwGfrTsu2rp-`jrw4e9PrUlq|_pl)hB8c*?WKTyAfL8wNoe0X$Hea@J^R8|8?M9PE5e?k`40ai_`mEe z@z|gY`&!m=trEh2DD?hF1#jQqSLnZP8k9J`r^vc_aVH(PD)ri;<`dr0h}={!WlCod zJ`Zj?ac(I0>u9qvODB8nrw`(NXYraUDk_GbOA zYM%z_7}CEedG-(Rcm-~NQ2pF6L%H#moC=GJlO(%yd_xqzps|!y zb4fJ>@u$WU+-VlKc0F63F}69U9p&elD90wq(2HV4PXC*Oa0W-fO#}7tj4^iOu5whh z%py8CtqHh&@(#2yFl;ICxz7VM-{$|zZfRK#gr2> zjdr{0JB25b|8OSyTqAX{N_rD|a^R~`{EIP>9>k|(E%PbXR7Qlsm~247S%XO4nk9~d z@aRzFj7NEpF`L8fk{4ESM|NKB4Sdnz_<-zV&B+v~JP;_?aSi11@T5tlg7ouYL>d&A zE!0*domjlJ802IN;Ang95X_Tp{m}EKDMSLvIMsQZ6^>Re!^QsBtC9h=!V_3fbYxK5 z?ab6f+Tl#}Y4L=P|7AA*n`ro&i9qsYy?J%*&UGq^B#r-BEk_J0H8ncspgQ#66sRD3 zo)_PXO@6q5rqa(Pa45b{PDho%B`yAwf;fKvOqxfzBu2G&77lP6lcyTtbP-<6+0_{(X2%a9;RWXE2ZgNYRf8*A7@+jQmLKk~c zjIE;Z)kysM(7n8IoEF&P;i;u_d+_vO2xv(``JAMT!DJcy^Ckb6Dg2LbClT$6TW#^b z`ICO}^?i#T5Rz3nHu{j1IZWT{J!9YLOH8b+6&de2*VWB6K@=t44&DKO=38c2SA#!RENLi?9V{r|FDyLJ&Tu5O5VozH3ibyD#K z66z5cWG+{yZ^2g(N1d<@8zjvvE|O8SWuiX@yVWi`lX^Tc=*Kl@T4#=a+4Z*8`^j(_ zCrBPu->WcgtnkvUB*7gk>}K|{1)mD*m!D@>Sw}?&dsh*8wou%eKHfM#|3*LD`Pf9O z^bL<5&MfV=b$F^IX;h%P+Mg3Ad&v(k_)9#Oac7)gh)Yq>;lrdK+M??p0>n(el4qzV zR?E&ke=H|X?e^4AN+RHVH{hF!|Eaz7iNDJY%I5-ltB^|b>8;)IYSu2k!IW&*m~#($ zd~DkPMWe1@iB6hyyC}-)W!u+3{jwDA<9;p0;e7w_9-YRVnX;xt1)EpdVPKQmCD_jVPN6%qc;#?ioSN; z;P2gG3>PzHG)_*Gl~IRAMC8Yj$tp-lggzyurj{r?JwGe*@%cQdwmMtEz`}l`qoc#l zS85>Oxn}deI4+a2fqMm!4~HAFe~gm(^ffso?w8;fH_A&f;M2~a%=~C&Ik0H##D12PT%&;omk#ZGCuhN0Q>PVuUgC$ zrVyxPB#X$i`ltci_CE6%85z9G(*mYZaF-&J^j+qlWl_eb*oG_I)BZ|N&Q+>?7K5?& zl5Xz)dOOXqba{1M(lKH({_A6{9WJ*n7DKEOA-L?~Sz`zT9&udzqLapqp?a0&@@dnu7nRrQsoY;=idVVq$&!++oPZ`JpalGHLv+w3+jDmr0w=HW`PzQ1^A;gls`ObU+8+v@W+>Rfd4~)6M`Muqy)9#LT z$oKNQGi>PYY*|*b@wKpJ=9ed@`C8dw-%Pt?C7qu6H9&0TECB@_@Y7tEIeLoG{vaPc zD;Kr%O`yBG`)0HjI9=cVB=@D&xl(!80p^0e2iyh?7HkojGOO(D@N8FR$V{1>>fHG0 zywf$6!3+nN(=K4?5+jAljWTeyM08TKvmo4iz2HIyLGgFJY>}yy`Wm)9f=7 zfpNxhK~bsjd^Kt9F#)d?2UK3N^Ve%_VVtZQ+)?$8sr~&W&4dVSPa(ZZXEJ7Q=#mRl zRPJGD3Bu(#^@xt=u6%HN3yBtD5jB*Nk%>ISyDRda4etNV@AeHt@P)%>k#pDJaOy00 z?we^tvj3I+ySY)hL!oAlcQu4pwkJCMN z&ZjJFV@ygsDPGo87tcC;@w}Mm7OZnQ*BKrk&fD2Bx`o3Z4kYMO_qKqvgWO>uCp74k zNTA#~+?5_nV=WYkz>cAdx*j%?GheB)Qc7^)Pe5KJtHtQm#9?QA>w zLdsh3eVIOE_O<3wH<2i$xtUSEF9TSb2I=N=0lS5c-|F^Yn`}Xa&gqBQ-%M(`rT(~| zr046Lc;5Gij3d?`(vxH=a@R|`vB9PVEo(z@;$v&p@rj%KmqV2o z%<>T}P8Oo_^7KdSKKnwzOzwjL<8?)?7S4>7rsNXK4C%MAibD5B*_~yfMQ-#vY40$2 zZ;q^zhGQ|@wPb5pP-tXuN^swDAdj%!yVb8C1h_mC*$9dWh!11MP~tf0yJ7mRm-zHn zQ0LaY^3497W--vJQ4L-5z|QW7x{iP|##pso#rK1SU)=ijqCbAz3eYDr+7QBFGG$LNB@ljn-Pp%~!&7^F z0$qIFL5ZXC8}_NM`xn#f??&_g4fVkh1u+_kz-B4=LQu#xasFw5Q5T$IdTQz*a9>xB zJLPH#aek?CC9i=~e z*h%rT#-do3_(_N7Na7fZ=Tn`%c7?bNL07l!7ozCNi_=w%C^nqlSvR4v)R|v~WH2RP z^+va9o(ZmeSJ3hC&*NEw0s_dt>KT9Y+}kseQ;}dYCKZ;q?sqdNp|fZesE%dUVVtIj zJO7p0^v2zGpyUYJmo81X-i25)n~g;E#7z3WVJmZ(-;qE?KyRXTz@xe_)c4Sz98`1s zI|TV~Ue`Ncu~4S?`*C&U2*3O9*%IY2_uC_3An$t{5!Z2zW6N;UXULtD>TOFF&pE*P zcZA>TCR)bOZ97m|QaccfjC<$JD9`uQS0~&dM~$s9;{!sItM_Z7xuQ`8o$qgHJP#XS z!du>Vi>8P5^Q82fZgjKonIcHc)=Q~?1Ppk>b$2V^Z!-Ou0>Wl#tjn;>Z1)ikBLJD|oj`P=~IXaj> zyhEjm&twU^EzKCqDRHq<^tv=T0dN*d_a(P&nLX#83&q+I>zDXen2ZA42!NLh&c{Ge zwfJ+yO~rk3_{+)3Ix$w4H=-w(F%50Fv1iu?xAjR}(PM`?u46}IZJr~|>MI0?`g(H_ zxza?M(E;>n_?^~H-Mer4$3OYko_(Ls<%Q&qZwSN&xtc_(g2u{A7ag!`a8&o!xdTVx+GPw7>G1#RX+Vu__Abb8!$~> z<(ymeXVuJDTl@k2XccQn2wVGk;&DGVx_l{%t1S)JhN}?Z2ajz!3~P&l+}DozE~j4N zNIdk;Zt0oJE%7q%4-CfF?d%(SQAPT_q;(ei@J|=HQ9tI&)a$(8z08*z%3IKR-X6AZ z-kurFmK(n99C!GnvMltmg5h14ouwpf%LGS7WSfjt6dX<$)-GAGabI+SF&X@ZO63ML zw9tAzpR%evoyY+0(sSC}Q1vpfy|%ql3V)9%0dPid4{eMBBdLCJV)PlIoBF^&X)3iZ}; zEwOW*`L8?ak_M48sM0)xoYxOFO6)_%5bxjWon=QITA|BiqCEM;Pv5>BPrrP=rW1xz%3@?-NG^K?!R2sN zYyS3du#GUXtE2I?i*a#v&@_vtfMlKKm*GHUF_F9OdfJ-%@mMShnVUpNSDj1YWV6G5 z(T8q2PLHuB)G!U5Nr9IE$2!`d52j^#m6cZyj#R9_ePc%%tks6C3735_sxXhOpEj}k-b=5DbdEsb7tw?wSu)OS|@tP+Y|QitCp)PTLcz&$-;TnIQg~$ z59E}~pTX>TfiAQ~#l{N-B2St02TGuNhN^a@N;y_L(;Eq#V6)BT7(=wU9jXv{Dm$B+ZvoOAVsdqz>QK3n@DD=9A%-Sus{Lvj;v~yj`s{btBF)&&1 z6E5d+c;!J?4H4kTtC{jp5qa{f`lJ(F*j2i|6BCq~p4QT{$`4#q&$8#TUT)V+mh=Rk z%9bli7#J9(U83=XFT}tsucx6yBiU8aXdav9U9=?U^YNvO_>34av;BJo=NEWV_k!r8 zLi9)1y$h#|A#ME{49Vx+T_Z&q4NA9%nhn1~{L=+g$+Skif}3y@^SSQ<8cZd^pSqPP zrfcDqiybf@Rc(If$>J-%fzIehL|a(W@4er5zEfOb-Px_#a&KgPgYX537QD>(YKUXp z{`Pd-ml?WpCy&krQ(QfhU1yrmN%{iAx?Eo!F)BoSr3oY2^f!}%Ynumip6RzdOY_kNu^pr z z6ifuTLRtzP22S(kmd4*a$;oD~a2Hxd!0VH(o~+NPztjyEy4XT#-#>Sv^|jCwl2zu> ziAC;EQAaQ@)>@#GV<*aN)m1%FyX%bl*HF&4V75A+|57Z>9qUH_SS*>D6HG>*vVBU( z#@SK0j%P~^RW1T7?zabWp(@rIt!uS5Az9BczJ1KRB>~ii9DK(;WR+1|O(R)tA!9;M zlgr`Ux|=VnC-&=jT&_R8-gZ@c(B_)#zO@|oW6w0KV8`j|cX}aQmLz6Ksg%f$cs+FF zq{?sJR_x+G2?oP6Hg2ccbQX|GrzS^fo9*W(^-tHFOAFzwR#;Rt2d$;iYvT_$q$7$M z4ZTq)>L}JjQd#1AUO`~4#CEzk)ZCCuWfV#f2YzD`y z6t(pPK`b6VM_tbn5o!|rC)3q?iFueI?L?@PrDn-yq$#Ix40gYxnc`do+$E0OFTs?t zLjS(2VSfAA)q1`uxVyKMRI8k7I3Dd#)O%Az^SGQW@L%tZ*YW`-Huz@>q_M-QuDC9) zZ}kllOx;w>~oRN`#yGKE>T>lrPGhaXHIeY5{1_Mtm|dx#LRKJ?Y$n zXS0zjJEShxm)^ghbys*FcivYMMQFrpcS9OW-_5K#Fi(-qz*PU>p`uDtsnn4+H3TAd zAU|~rGdeuOU{P_{KN*n*`wdqHhbXP(3N~e5t?`G3=((D zl%PO*)b5@@CkJqhM6|Z^H0JUUn7ep81=hUAXk7{Oc){W5=(f*Bk874U`ks(dmM^jKHsO@E9n zEsOgHB@>QkH*ffWX{K9HpWzzi1YR`TuQlk%khel)vsu{;k>KOgWO~DcG1hg*V*1Ku zlNG<|n8Z|*FAC3yZ2nNmG{-LQ?W52XY{0$!bJM-hh2eb({W+K{lHFppB*V%m5}I`# zH~8Rd44_Oz;p;ViLHciG6eJ`*CGKb$kq()9^%yjmQSWP)2F|l4!H|b+EkwR`?{`lf zv8*mXIa7+kb8PcTBgkEfKv@?Rzq>-PVEn>V=Cm5qe7#%lhxYrm+}biU+~4zOz&5IUAh6B2;`r<67x{iJfKvTuuH+%&>qoEFAjeG8+LgTva-F=ECVT&WC z@&}!w@{!i$$UkBo->j?U1Ov< z?Fo}NCh+t-3Bh+j)nBJkm+*eQZ5%7HF`C+!X4SD_tSAEuyf71BBP5Jz(OuQX)$6;q zq4IJXO86-niId;q`IH*m$9=yhxLSEA!>m48EIwVZXS+y&4?b6Y)aV?+bU7Qcb9$=H zgDOcQOr&?b!sh>Q^LoFI47C-A$wU6dGznf`f81g7y62j%2pV}tvs1_OXrUNPxm;tG zWR?+~MoXyG<-7n(U;pMN`5hHPxw)Ul#Mr3m=wfeZ9=$(;YQ9Cgm_gK5t>>Kjc>YpC zg{3X`=3sF;p@qPT0s?fr=^QneLtNt%6mczfk9ehw0<&O(e$NPU{FGlcg(9h}$!;LE z`S5w@E8dA|xWaQ}ygWf1_&-3~zbyc?L0v;}WQXOf(!^+pTsCc&B8-Q2zkhDldG$gp zHRG+dndqo}s3Y?Q6jSywaLI-VIa9JPc*&=H^-H#@J*d z1P0NEEKd911#=7Q-&W=wDZi%VVfF}E52&BI!TDyd#dQMs%2_y`h%E?P z*mvc&elc;pIia{ISqY6nc=xQ>>->Bwa;p^gV9+!3q$z~*!eANZz2YW7F(Fb>!kZ+5p&v(g8bW~`Dae2 zhSB8hPXK;MHRsy#Z z$7*7%Wrlo^Q@gQzffMf2g8`AJMyp-0rgM`iUF7Taxkl>9Z&`p!3<3whijsN-ZPDSR zR<@K{aott)m2c`%JL4uL*^)xBY*~A5`SeB5{%qy*@GKn)_i}?(d5Ko1KJsf%XjWCs zx6C5Md|5&d%7)}FQW0;t| zeGseykSD07E+B;$VTx){h6O49@z)a7>cv`<>FVtPB?m2e%=$K4 zdvM6~0Whh^+lz}lkunil6*naEQ_sq)yO$|*cBOIzT#m9#@lPiL6HEO(tSX=NySu73 z(NRt#4nrw>lU43M{~KhZDg+`_;U>jaJS;?8#F}A1G(4J8tE9;PU^&x7PorKV6v#VK znH6%e8;VO|oHkyIeXqKh!TH4@@q4*7OygPTL4m-?yR2M-U#IGD3u^K5LjCb~Qo{WF znD8H>cU2BYKh zX7B;-adK}Sh)Xnkqcm7b`9Bf*#%GND7A1Xd*{`vPm0Rb}#s-+t{M;13qFxseJo&2%Jct}Qu0#?{!K-kvSvo2gqiF+41sRyTgvugL32E9e{aahhK|F%4mBgRBZWI3;@Xo&4M}*CR7;Zp zP{5#$Gq3F7!Qfs#P+npIoDA+PruHUf7|`e-a3*~ zImF7b7RgGk$c}A84F4Wh4CRKFMbv#T-y*#;&i-MyI z8KC-us4#ZiY)KyFzXf4e6lTurXwt*QXuuNaov*|PJ;e7Rb0YV5TIy_a+=S?(3{fqI zL)46F+O(IefD^qmR|9KcOqmU1zn&WQZ9dnosk*f~HiufDPZtX9#*307Qu31r!0ks- zI9;!!59glc9N7jOs3~Eh3$|_8(x(d#Jj=24&GNo3g3hJm*q%v>;iKvKij^CSAVM%~kn|p={ks9njQ@`3(0ix=~Bqh^!_!H-;ZaYN>?sDa+NTDaa-n2#0mpo9O zY6qQwbDpK@S6+$z4#>}t(@yQ~$WS8$56QEz?4{z%)5P#q#>?Jyp35pE@Drh^PMVF@ z#-}gsLFUIaKT!?rGrpukg5c1=K7hi4prk zU6qr9gwA(SJoUwyb_d*}*;=H)8K}MM%-*~bsVhfdet6xNrRx1J<*OdC2FO!IcCQ3S zQ-tX~VD03AAvtSnc=0hKzpIXD$n*McQI_f;k;k~)Y^xMZyjFJuTb;NG>NfHFspGlP zsnxRE_65q317xFFg5hyXYJ3-v(SR%DOzr^z|TF*Oj zThCe~6C$qctoTrNS6$YXD>mgLOOu)3ds9x`&r%AD*~9#rYSfXLC5IKOX$N$&BsDtP zuW;aapKdWEIGbVx1YXIjLOGFH_*Zr$Czs>yp4UW%l-=d)`CPoU@^?s}$)jeUt_4ytI}U z*ZasXh+9^tGZZnG#Mxn19{Oa^ zkMkI zU_SG%RR4jX<>`QjTXh}9sFKR4@J|<_whQzm&tpE{L1odmVeKAF8lsHzo_%2Ry_C8Q zWCj-k0q9s5D;Ud?!T1Pr9=(!?{juEs%OGOE1~NmG&q6=I!Z;f*_m`>(>(9eEUtU~+ zoA@LLU{3~2Wrza^XfK1ltx_0IUV87J{IEwq!Vd}r5S*hI&o zSE1Q@%A{Lz=u(|b2D*)>BXpg4htq3Y}J^6KYyIUXzF_FNc4Cq#P<>3GAOs~uXl8f#pwe%0voY7cEaZCEa{ zXslqFX8-yVKw6QzJrFtl@>qaxo17e+2Ay2(Ik&67vcco^=7E!SO;Do49ZzO>vJP;1 zn~F@yN_v<2@I{kaWMw>@a>kxNKLwg*;i=X?w$yg)AKmGNb?8xddpu;C*?jrYh8X3z zbP+DlY1?it4BNyt7u0gPIzO%-NcJu%3*1OZ=|<_dK8i3tX1GC{|5D)=Z+e1U;a_WN z_?V&*SMuR&X7iNQMe*R4KZOFU^qme`s-&f)45)$COMEcA{yj?1C%5@*Y#8)(U-0V) zLn;-Ungp=TFN_E(8~|2)?xq;yl1#jU^xaU#v3i-VjmGX%pY8gq~zP*mGaK2s<2*8s$Mg9PF~0nURl@YEoL{L&9j}D_Y48|+*~#? z=|2?=KVmKR;3~xiKDW#;} zWjt)5NLl96!Bu&X!P}^#6S1&ZO>te3?5g}znD!+Ibh*Cppc!BBx8Af3sVO4Q=2Po! z;%nfH)}1_L>x#L&_+Ty=h2w@cCAIOgb*R@7$?yonSSw-GFs>)XRth2d)pvc8U}$`J=|*j-pujt0os z{ZLdYsLpw}o=|Z%x1%)lmB@Kh)w$6{YQABv?(0d-21^yU3jj;<>RZG4L<|=5Po+m? z_H=~K{ac)c&2OSVrVFIGcoWNd1k}K@UC!4~Ho82h(Unbt9;fmplQ`-<3_D?|nrzlP z%p;qsn2d1?m02eDW)@cCQCkyM6ry6hqtTJ;6b?#lSSQ!LPYbH_bXyuU!H%YyX#ZNgdc8fx6iYRQ|jK;a9-;vLK{vaem&LuA7-Yd$>&1xN`$jVA?U~Z+rVU&;Vf)H zgdVt}KSBud&&?fB9^+{yMqP%vksz}Ffz0m);TodpV_=`RT}Kj`8H`IHv(reDy9T&f za_RD+;j-K3aSRkeV)!4lS}rn>ayF`5D-;c&NK`Yq*Y2ckMBgD(VpB2mzFr@0y}QqM;>YZ!`kGCXIBHoe=jhQFI$0YuT7 zjU85hd-!y2k?X&*m)LFBqPX|@KQW;I06)41>b&QeN<5EoXDp2t0q-b1g@-86o3#sq zv+4Tl;VuU|QIbW67xFU)NYl^tA8{&_ClU#KO`EV#)`CWv(xFTaX;l@)n*5XE?c2E) z9Ne>mhPuayTeezi!)D=ARh20+o>6W_=k#WPjZ8)vk6IcS&Pp|@_|mY-6l8IE%`oL? z3w|G_0pOega=f^$Asnp$$?~Sw>Z03e%{`XFs9L^OC$si?RC-8~c>-oP zk%`OW5irmrrO|AkFZ}i8z`r%fGz(V{Nh+txm;i%XtLZ1&C2w>~_*|ZuBG&CpT)u3#6LHJk5j$pi|`xiI2~!m+9$_Cz1#BJdL5z_IoAP4!D-9|oU$%kg$Z1TxvIrJrpbBUv+BVvcw7 zdcO;ns+2Pz*En$?CTiAM^53rkDz)vYD)Lb~%jIah-mj0Uz72*%zf>!ALWR^90FmtuhEWfB$NffqH;3mb zz(wOEZ4sAshsIDI_ZxDWw-d&CghpP~%hq2s%Uwpm^W+=oUbf)n`&g{AkmtLz+O*X~ zfJ?OPgbbo_c{wKXZ=21#89n>7`UL0EgR3?7YZ~FN)~hAV+&3439BNj+bfjvmW$W3b z9^o+<*95go?m;gDrQ06@bVzvVcs?(L$6dm%O}*!R-BG+w23thQGmAzd>*N?p@1Gkt zjN%pfIN?xC8={$tM{P;HK`_hSU~Z6eT| zxQlu2{nuLwUdFE`wlYHmNOy*Z7~iOiEx)}M^6NV$-?$IR>_bEcK;qq$Wr) zP-o)|h}E4RvCtl;@Jn;1aISys-`# zq4ef+%}w#ZdZn!bdx&&NI&O8g?5brchBOn@uPo!BH*$Bp}c9n zyFP{G*bc_;&1K!I^emqPt^s2o;qDyxd(iJb@RBY<^&a!aX_V1WsiE8h|2bCM4%I|a zH_JS-K{<$>!MiYz4RZkOEW-A4kSX_g>!e1bS@{g%#q$3+kV+2osc26BO`I-s{;>Yp zS@Yw-Q?g9dz3dB**>ph!y!|505)g%PwnGpSp~wo0&ap2CfFT4{b%8VYcr`wJOM8b- zrz3^UYWbUXwqgv{=6b#P_yWfd|D805*?dLF>ax&oTV&7E^vtz-D=n)siZP73rVRHK z8$G38>LHyqm3Nb0e7*;qF^U0q_P!(0-_8Rx_p`G3fI%c!=xuv@e(4#lmw z6))})Mfd)jWx0L1*DSxERBvCXjBVAlNi%oBHagIz2A_Z@soInK7!c!*(l)sX{|NKC@ zuTYBZkI*OBdM9`@G^s-u3GNWVLM_FTo=oQ|(Wuh-Q96GN%DpAwb^OJBBiOB98{7Ip zztM|&I&Zl`wd5PnA>M4?uhx7ropL@|GLTuXz4Tc@JL$|bjaOX$eoVSJN!4bADI53O z&D==h;xpD1?M!G)nD)W}r#k9&k$FtH>gEsiQrVwmX5cnudkyZ2H?texF z5^FAh1=UgS3+%Iwl_Y9ui?YFd{D`Pmoo2G$XpajWfnoxiauj!Kwd0;NQR=n3>u85e zCBU3&-Ik!AWiq)du>>5UTc8_-8(qdB=830t)=(3DyyGmW8UZi#{&I6Hz$C%wx(b~> z3QDrQWhX!8+iS8?fY6$FQU4#HzPT+=lP}MdQ!js1k**EHT9GWl-JQ?C70_-;`j12Q_6Mt90E%dpMu`uXu)%So*A0@^>|2!hx&47@IucIqyTYBQ~xJ# zs}LzfG_=j^>vamgdZmm2+ZI4{UUiIn37LJwTO=+y1@2n}Ie-i~c#O1Wq` zs6Tqn_vdKJc`3I34F}EOEj;BF^xrj?dY-t#jb=OU14L*U zT~93a?vd&3{7l-QE>f*(s^{eR+sF0Dq}}W2T$ei zjn_{~_0fi_xqzpdbVx>Mfidjy0&bLaZ9;t4^lrR%PaWn5d?Yn3iK&;5ZWO^lx|#r6 z>uD&VSsMPu!0$0x))+coudx$TfbNf6I#kx7u%FRYJQuCafmkSO(_gsuiz4m^f(~&U z;T?`b2De@alUCzT{@N&5=+{rb$mr2C!$HAvX4H(S^Dv|gx= z`#(i!)V%57qNvynE0Vb34sK2DpCAqx`M@`|;2U_)5~B_p{gS8}bv%Ua43haBFL)j; zA!ZRxu<&tjsu1LS;QO%ooy2oaO^320+&gYKgsbiQ@QEFGoTWtIqO;#v1qrr*EIZ&;@2LIl+)o}PR_BLxVT@tlgsLJ%0>ga=8(#WH!Sb3Y#}#|H8(S`p;p95^_!#x z1>(FSY@}oZQum>1OgE!udUZFYDXVPlYny&sjzUP9u!Wjk(lrBZKK+pbRHEG9i>AjSW)X9&SgPl>F~IcmM!mf=4$Er4i6TZ&i7coKCjj^b=*dR zVfyfX2!`s4FLAI;rcJ>rrA?sWXER7&C2#Qb+>R+RI?slq(Cd#80j#G`r|>Jf|1(YU zUk-Fd{KL)f8b#2exvLX2vy1Vqd&t;Plk*!hls+(sT;!a<4cb70IYNnYjWXfc@TA(* z8t5C^OA2dr1f#W_A6QY-fS5CZyD;$O#cOUyQ?E`B_h!al5(QE{4aapOE|0V;m=_;C z8O%5+I9NEMUeXszRQI=v)$qz@iji{CW-X3x-}Q@#^<9s*3m7F|9KtE(Nr|pADV?qR zEDlG%@V}noj2T^*dh|<|v)S`{afse-?3v;4+!R1T%swRJ?p=P{c?X&zup*=33mRCB z2j8)=SPQ*(gn%i5nY_iFtBv5t$=@0`#(!k7cihjn6gsqo*kbVW#b$Af$8E;T>n>@u z<25kPH{AU72CIDR-QkOd$&8Gz`L7j)hrnSy*d+}mO}99*iZUewUui7qu?mR1yxKXg zP_nhPUgJjj2{M_fsKU6ORy+Yb88u!GEWqmn)vS!(u@*tFEQqPiqS)E%w2F~6A#nYn zXa-Mcq(xR8RWih$&EfvCAH9N)@)tWBIM`nyoip!XJcW@0mRR68kVHZ``pWX2+5P#f zrD3PnT=HJx%O0zLkL7Isr|lR9ELaMV{zfnm14u7Gjue|G`=d_nS3azDvV zL71cM>1p+HN#)rl>x86Ue6~LVMf)K3AY9^|Z)^%)0{+~UK*1`Yg|E#O5;mXY$r<${ z7LpDUCvpZ|*`57-ruTb5JBBRDBU`E7Pvco&0eDc! zxYdl};<*m91STx>ooo6XI`S;$Y8e3IKG_Z#+aG@C#~wptCX{s9R&u2<+Qffw%) zIWpOAuH`jpe?$>hw%>5mT4vUq%l%HO5f=M<*OFf^?C!t4_JHrdy*Bn#lY6dYHcJ38 zlAHl=xKbt|8>S{pq1#Ft?p)NI#jM zS~dJc@^viN?mU4RuNY*(?x!=^LRvbvm3)b|(b+z_aPirCOT|Baw_}V2l^Ee)ikEI~ z7eWtdxT5jz?KVCtCU443Hnd4=eg^>U1Zn7~3WI<~N4W`nfjONA0#xsQ&MbWO&r}1e zQ!0T1qpmaTJ97!)DE}Q)(h!VR<4k2nsF!E(TGXCYcf@XaEjx2Lf9lu@W#6bX8CD}K z%`$eb@rpVWWUJ58cuA<{7L;mWxTsS`9E!LXih<^p;a&O&#ze&Jhy z@Wpz|P|-;E3#Yi|{)l+9GMHKSl}tr3C1Ht*TwmQ4 z3w1oqag)+Q;?X#;%+)@;ua3MzekV&d^}c!p!N-p!3#gI#n=gtfw8MOa`$|{vI*JYI zd9h6wfoHZ`Zz|GY(kUZ5aeRd)&qz5i;RbL@_pJIQ@RQ8)xIa|rck#QOttI0koLr8z za5Nfi!XLEbI??oP%;TM#Ix&H+BdnrDoX}-+9UV~XcLH9YnGh-azke=iKQ-Q3Ll)qX z+QVLWmAxNeu*V?1d-SqDZ5Z2Za3dlSf-gg3?f_mDzMu=g3MK)#rjA~cmHEB|7@IO`+*WE-wG#&EwAgV~2#CqJ ztklwelU|lm!M+Y!SxvmSQ|jDk@IV)y=f))`4t$BR_%wB3<<9Z8Ur#n^)$V$hPcx;2 z_gTVvAAt~KN~h7XOt)-!czE^#6t@H>`0`0k%gKo@`ZD5qSiqsj#4lS^8Dqe_wU>Vc zVrOAoI7HwE^z;f;Fy`AM<|;iD6T*j2S!mgG7YGRrQJFZh^rNve!ws2aKzpdvr0ufS zu;j)%Qv39VJ#fA);C5QN)u5QoKNI9Gb!$MpVd-QN5ncG$b)gk)_n=^rQ!aq z9_CT{9Emy7hnT}pk}Y2Kqba|9vHY5gq$qpyiCDA@XRJf2K@t)?rtIn(En7T{o;?mF zW8#qeJ!__?wd|V5W}J!!N4*7e&#Paetv5>9ysln7F0wZd2eC@A$v!*2|Nqirrxb=? zMU1cXbb|OTm}vOKn&wLnp=+5Za7Zw!`Al%RDP73r&sgA2>50=2-au6!9;L=X+sg3^ z(I@yOMVopW*7@>W@YJv}aiaMd0iK%vRFrd;-MPJeW$0kF#`tWcIE1s@d*};dPBRVw zrutaMjRr{Vkj=r*St?v)uWi$ySuFv%1wXd_NPPbu;eYe8u#1iJG?L}|_Pl-caxy<* z$Ab+34k|5}Ow)4#YiK4*&%;#J(y_@wqyS3|h`g|Sq#`PbTpLpHm72*hEb{SI8kVL* zA%}@UnPVS#Ox=GbVt;tyMVvHFc#A?fJ3}oPNMo{>K`~S`n|nzx%uoz{U`Fh^Fv-hL zJ_3gU%?7j&A5vT1YRvZwCznRJEs&8WuLE{RPc~tt$u!A zdovM?w%@|8%n=V%8J*6hg*So*A7XB;`fI3BD1icgiOcW7Q*^xyn^Md~V%n_di>**G9raS#nukGhDtlc|fUMkWM zA85v_aK*#NgGBhxz)c{`2|L4{7Ge&5H{8efgljcj^<9p$iH~bMV2bO9(L)D>k5F->F7kaNt<%c+ zWNMif8?Xt2tb|2mPpoM?lY5FyV(TbC{z4-UIb<+Uz?&51O zgy2sw&;tl+nat-1!=}HJW>_pI6UknD>`2rh9ki7#T;Z}PTVmR$YrOHztcElnA7&@@ zZfkdZGoG{bl(<5nGe?8YBW~Jv=dCGIc3UF!!*Of2s3R#PTiI3`XA2Rz@a?{RTKp|e zS!neu-fTHv8*h?by$4yik#dkaJmwB(iyh4H2v89eNXcZ}E;=k=>pgsw{s>*P^#RaQ7j- z^_ST!Moa-Tnc43CNLG%_^TsxGVFHR@txFinYc&&o8?cW~S*Ksc(bakkF`tM#b$PB@ z05|I(EBsjFj}E~vsqBBDxSH9e8ah?nb{Jq8LM4*m@6xrv;eAV%F;CcI#2&&eGe3&v z*DL!B^`|#FK%-zd8Kn{Rp~8=ik`K9PPa7NsQF^~6DT)N8Mg0J=?GpZz@92F3hnLTVTSGl>M$X@CaYu1t}g`ZhO4Kg0o97fBUu=6+CK9zjWg^Vo7qNuzr_OK`v~ z`{-1w42;pJW~|$Oe%d;v*+so8ARA`=>z}FKCIUZa{uOjw%K~`sljnT79%sS9R2?=# z$=_mpH0}3$eX2{?Ml@-pJiEIP{wY%|uy_ZV?(x^T-4DSZngzZH2JeQgB2?~0sQXI= z(M~l*k;u}^ppT)CO*ZX0fROM)V2+uB!Xa;;ODF=)wKb)W5eF-3X zgwpBC)CfqYqb1p#*hd)QWG_l&gIWQ4VYAW4VXfZ-e^pm;1_Slrn_KNUuf|KQOXhd9udjnsCI@)vK8j(^ zSdt_3JPu_Xf&~!Q$?a`864HNiEU4KmRhAXtdU-J<4mvUYv88{_=J%3~kr@cufP7_q zwAPs}{Ejhilu{?gw4f9d>b$Fnz}!B9;RXFS9*HxN4x#z0&ZEkvKg&yNWvWiNO+F5NM#??nY=A z_`X~bb10L5Q3`=}e_Z<@NQX`zB`q*$V%-1jPy~*(Hq+NAuo)OkO>Kt=VQx%K!xtFo z8tYn#kq>(HiD!hqC4)^B28-@+x*vr<+v{ z5qX2SHbJNb)9)CP75oasgJ`Wi-EmhDHc&lbqLe38Qc zH{_|gHf(G=vHwrf-%`hFByH)d%1bo?yJ_v9Zb8E{nOex`mjg>QOaQ*GT!7vB>}ru5 zU1N<^HMQA=KJpHnM{Tg14Lw9QQTnrPdydZK$0mr0P#cH-YUO21i3M0T?^64$O~sMBshhsh7*ln{K?(4i9bPU7}bb5-`w+3 z(=|GS&c4a+fKU33iDK*$gsud0o_)~HYYL~0TqC<_r@4<|J}*>FZvrF_&A>c+|gfp92E zJMcpdG$6bYTiOpUzf#fl>x|)g#|dBsSPeqzuN1&7+kT9fdSi;p2UyXPyYH7gLSL%G z{NMECv_9;8{-4wceOW?Oz{Q zGu7+WuRmT7lz%+mS&hk07WGmQsq~W?{nZ~#Oo%N~USMHUD_Xr%d#_&R^SwH2u_s9@ z%obe+^#lq+UJEJ}5;-dj-1(X^Z0SkOD$d1SgmX;{e2eqF-zFY(yJ;=w&5rhu<)@+z zdA3DkGF0~7gj}DN56lk~=zN>Mn2g5r30}bPKNXeP{t`Rc@S}y}wvj2L1GP9IU_R~Xv4g+HgEu0t*+6o+pXK=(YtU{}{@J{{ zpl^4p>n1vGQH%mt>7)ODK2u}=r>XqI|2G`!vLuS;KmS|-bhy5}$~$>q&5u-v9-pE{ zrb7t>s9vB}=JpvoBQyMTHeu(jVSH)A`?-{6G(CH#G3~m!f$~MKaqIQx_G~kC*RnA# zz^Uw0E~7dO18;-!?!rn^g0aznY6`*wqS8|(9iu~EiTk|%+?!&K#%+HCv^DC604b*# z(NWq5a*5ea<)7cbw&-RBjg{7m7J{Bh)?eWo9Qw3Kb~G?A7Lt4GnJ zG!FB!*JXnoS)L!5MQX6`vnFV6m3l2w2>5MGcF%y99dYlRP1^o!bN02L-{Ds(Zz^iK zxjO))yqB1SgUrcQ3W-6vmpsmYrK2pV9$}8t$p7(KL$!q@Au?|WMTT}AyJm&s{ob{Y zcA4wigef8@OmAAuYa*TV8YYqc-AskLl9AD7a|l_d`P702?_!kJ+*n1sLcaTXs>3{` z{eFSzD(DAeE@1>usm0R7!z6NUf3{~I^bcqV*oNS`+eZ)3VgT~8&AcUp(P=5fg=$ zzDj2m*D_sqK|Q_;cg))n}MgQY*wLO19a9!BSrqC=EP!y_%pJ zw#xiLC8@2#h(vlE;6+JDvku+aNr0$2D;EcaTH(Y7$1Ad)OdFRda*UPg3J7LI-Nj~T zCKv9TX|9_Vo{o}`7q)eKTnW?y_`iMPI{NsaP(&eYVk;!P6wG!EJF3fAPk7SUGV~aC_23Uy~-BN`Msi{BX8E$a?L}n|{OliiTsK$Ml=pTtU!0K>7lH}6 zvgd@ls!k{u>aNy8Dn>tOI4)`Y4hCBMN1Btr1%9x#`*Sqt#8Msy(^}KXI^}FU&EG}* ze>ZvRt|3mO^ChB@3Kohu;_YISwuIj3{;VKcSTmnWwR%I1y4Qz>_K(+F9O}}$p+zY7 zIHpe|QPHWxeUy4<@ZDM=xKwp=SO9!K@qkmmbxfFDjwqqVg)CNOYT2h`ph~a{f1oYo;VYLZWDb znH0j?HN2Jlv;A?49qTmFmXF0%wVe7-?03*4X&l4c`RD_?O1zykpdl^+{cYlWBaRi! z7`BHsf3%QhnvFhDXy|^VD>M*C%fCv(huyldN|a1n@$7ZYCMAc4(qw9W?Y6V4KA%nl@m$|M6$Ol5>UAVn4 zX%&Rs%Gy4RZW=mdtrw*2-4AeVC>F^&>CTmWL{oqLR;)E;##_{XQyJ0vn|+L)%u!ub zGI-P9A06X(5%%vfH&RF}r%mKlfYS4VUSte8@=vZ7eYE#^pR6f;or#l#M9~hOf2Pqz zK7Yg!LXrTYcZYiB)KK%d<%|?9+|W0kKbtgq*x`k=c!i5uhs{Sm&|E$&lDBsP?VM3= z3nyH(U0uC6;|+w|Pr4W4?Qx}VZVtXF4lHjs^;B{Txgr-j+W(}n;?KAw*eFMJ1MLe9 zrA@1M2aL3*$s~?;{`%xjfK4TO8f=v;eHz5+^?3Ry-;F+$$Nr1MLy`DaLu_-5G6=zYdeP!t@CnI zWti;65uW7zoY8H?@3=#)D(Il=;55y>9n0MiSQZiuP4F8HNB12tqs0sm`7qDB@h$1>e4=Fi%;3$^eVgxw zb#s+8nwYF(EB`g9v*=969icm(mfVoswU>HXT~p6Y)Oyy+&Mu1oKXzFSVYy1r!Mz_v zY!0g6YUX>?p1oi3fn}WS#B;t##mhT4aF=!j{?te{I^ftKC7Q(Sqs4jC&w`Zm5sfqo z`P>>xhDuQ%C$^Dq(c4b^bA^^Bm8@R$)Sz8CuI`a;SyIb zH74WlVjnlQ#`TM6UGzd$ITWhJ1cz#A9tdR=?r~6KWNXQz0y(YiILs%M?1!S{AOpTf z9vZ|0g@m80PoSjr-!2sMuQ3EQ?jr^7@M`1p_7lb9%!{XLs2sG9WnE9MhCd8tZ(Bz~D{OV(?@UYS z`YR)%$X}?bYsV|ZK1&QEYJ6Wb@5DRB%#eHDU_BgeV)q*RYrJ_8z49CCf;w&>f9l~s z>J!r1&utkRB!>dje=uXqI#5)!HVcS1^HdM9(H#_|rgaYyi3Ffw zX3vN(f}YUSy*X8zY7m3auFLr_&c=!N^?1&+z~xR?Qulb_4^tF6O$v~AU8F`9da)8| z2bnORH)V$*urRzu-_-esZ=|0M8Z`$S7F5;sO4Ckie-Bq-zC*51+`=?U8nWM+?!2xJ zk@NTkymn62g2BtA?7^URSe3NUH$>-&aKcrDfSw;ra1sZsgDr{H#$XjniQHpui}WYU zoqkd8&M)x|e8U45bJDyqZD)uG087|+`GfjXSIVv{@+Y-L-zIk3YA(s|C7ktqT|6aE z5qA^m*t~hDPqn20N8g=>=#E|4Nn&OH-lWN(TEnY!g@53B&+5M^CkGT4gG>E+kKX)9 zadu7y?Kir3X&IjmY6U!L0uB!j9e>aO>%&F8oe7JwVoGec%zf1Ic789boK=uH67Ow2EIYR}ixwM&@&aN*9Je66)tg8~cl@a(?Hc64463?n_~Mox`(w74pC zr@C>xfe%QG$TnY8blB2_qKfOiD6w#l8X-1?TG2SIZy%~)wq4{0WaCmwzw6&jhf1I9 zUJr0c-dIw~xm%OhHbG?SRjP>ziC9M`76>z3nqTJ+JT5)hX_5~M4`%YnfDQSM*rN7R!TjBPHIJ0q3-2d=f;Vfc!q4Xm5>p4kyE}^36HdXzC|wms<&w)nl65 zUWsEi3J|q`ke2`=_h0-*G4QBGPY*(t@$Gbd;9_NQdZ%e-`_Tq(nEzHU~Q_K!0D^AVN;Ef)2#^8?-&thBUX!uX|nYBD~sX! z3Rm9;_Y|~@%Q0B@E5EQc187XY(%wcT05-z(XBHS4i%6YLP8=fW`48jub=lgTr7J7n zB>!lm?dByR%~S(j9d%z=st+8wtud3zHeFhZNfp*GKL6c~J*>H9dujM|j#Rez{xf#M z8q{AWqeSU4kh$8ekioO&ruwl@E$p5|J#i*Z_>B{@#Fc1w%oDAHGc?BU>FAS$;!A1P z{W)htLdq0^nAW~jOqtjC;ptR9)AB`C!GsRgZUbuw*Fv6}MqAbWNb-Lc)GnWgc_$6O zU={N4bqIyMom3xw5>HbUK89xG{uqquHw>;u+~3P9u~*nMwR=o8`DXprC(9+>)zbd$ z#J1w)p-R|8)m)5Y+y~&KE|0?eshD_(={GXFkPJ(CSDfW&H%f>1H~9s-%`tN}M|G5x zrjRryIKX#kM0dV)ru;-Chmz{#=BA4Pdy&7M(BxMvLoL&2>TJk2gYHa?8zBU(z)pt~ z;@wV%&3Z|C$DdlLi~7M(o99>s9n=!GMsC{=4?XpNdR9G-YWHYs8C1x$-fx)YKR$s>WJGzlFYUpcpbNFUQ;6MqoqO2Gezp} zdWI&aximEY`q2Oj15-7UdJnjz$Jg#pjqVm-LO$_-*3o5ANar@fKOvcjOds-uP7>MW zxr*osrw*r2MDUstBm!yKFc3s|l@@NF2kw1d_*sLB^xypcY(qu%1x_}d7X%TdL!2w` zS(^z#-#$ahnrw}+a)sI#L`fyA^wM#3ev7q@DSU|{iC zJB1~`f3E=7RrweMe8&2}Yj+(-DILq8II+i8!=9L|SS6F=d-1OV(XJUD;rg3i{X(ZD znU%RXow0rffnAOiu-4`(7xg!e`_E?`c{X_{I(i|O+9Am_R70PQ0)KFuNnEw) zWQ$){<0_t?4)HtPNzqtdQ%X&6-9REd%@NJABl8mb^>Z_Cz#|1e+{)RGw2>|6qxbxD zCBdnVdANYQPw8&V(3PJrDKSZ`ETdVqFJ+wWVS^wb~KIr z^)C?)pvHdZdsjLvV;{{%*E$DA7)DlRyttf4RtXi-dE`V;8&CmnaB1yrddutQcV#!< zm%;}zM5Hbm<;YKwS|24(SM+_vYYR$q{oHFmaNDk2H9cGh2Chif<>J6e@CWP9N!Mgt zXT9s4vkRoH{#KCQ*#OI!AO3c^cqu+T&w9@cszf8!_dMNcKFpWe%wIMmE&J@oJso+)ffipWunrp6fF0U-lYd*zf6{1ckUs<>K64fTw-*4G} zDE6a+B1gz?No9FT#`RK^iJqA|L6)&Rb9YPoDS}VSvd2-F&Ll1m1GJR%JCTY~840kC z4rMlTe8Aif)Im@y(L8-grkH@oxvS9oAIQc22ty)`f-!n=ay3{)!{uqH?!<+}E*}Em zH5+z809nm>`GV_s@63S#KvQ z0L=uNtn;H>Br7F`tjjh=4EtleH>6^d3MH2S@k|)5Xi)^j z$zemnfXpa^SouoKVavc|NOpWjRTB4W7LW+wY~b32noMmpi^v`tTh%DY(K3yPw$r9 zl+1^gvIpBDN`_Cgds1B`9SzPn~ZBGv98H&pb#2r5gcI8>F8kx+KMZ3|Tq?~1N zFAO?}q@^jd)*$5-LIPZU`UAAc%w20H@FQ&Ed*pQSu=be`yD0y4hk1GlhQwLuELlZQ zOvG*H>E(3pFR7oP)D+%ERlAO(EQK!rI@cgs^~G2{=nrk1Ae${w_ql1ExSu@8Za$gU zSVs*-wHseHbHVm8?un+Dd}v~w)to|ApN903{R?wuwT1P0kCc;LjB7DmZ)#ZH5NoIU zdtA9{uyTgRm9%psdjF3*QBq0TU@A?4WThcPuif|M?3*-18J7qyr?>WFK{p~G>@$Jb ze766TbGUmTFnGBuPF#)B>)JTd*+dH>0-JN`eg})f0eHHkO1(*s4uslAEat?q$yDrU z>Q)>Ew5d9qc{FB>PGY>amS`10Kc}9j{X5lF4|)l6lEdc(KNNVntqRL+M)pAVP@YTx zKK)!?C7c@0J19^ZZaz>Lzm%+Ph`myL0FI>`T4d$+Olph+A{0l?SU1t}869X>Zj9jJ zGVpP0M_!F(cNkr{-|{(E-w(+hO)Oz&%;@m!`>+ug$glVtKT#FRKAD0n+;G4Ebm>z8# zo_K^=qP|rVF{zTLOpdisykOBr|3}=E6We8WDif-(B0p6H@-}umjr-&sY%zO8&-kHj zCxN0SYcmcp6;iz>#LFQzlTy`^*z#)S1ykx~iJ~V&fK&DETXjfTGt7pnAy+8$1Ail6 zioulxTSCi??=;2+UH~${=qnXf5Fm$b{(cMIAuL7DkFUwP>ETSNV8?|H6gGQ6b%H>wts*%=%e<5GolpIXt+(c0=Ft2bcbp zZg8(qvgRj)w%qKi635EZIei5NPGheIos6E2E;5`k z_FDMz85w}uc=EFm0ivP_7#N?#L=X`_Qjo(2|73xcxI8L^{Ro!Np63cR|Gt2;FP9{h zO|(>F5np=Kf^>~Q%J+-8>sG}i8D1}}mKng@Ec(n!WUw{e@vpprjAAq9-s9DS->l~{ zn5|;UI}rnPt?Rm$gh1zX`G5laF0C* z=fckdTA9=+*?DZe@}h`)s50fmHJMSDp36S}xP08&`*uH~edyXd%0Mp7k-Veo1+m@5 zvTKL8)e_bF4Xf;DMLnsnEn-v8=w+V>*8?K8p(w@sb7T&E-8UjnZmqDzQ^v2d5f=z&ur#B{JO-?~>ACfB9-D*SWHvry>rn^VD)Bx+he`i({( zbqz24>WG)x%KLi7>Kv)zbuJ&6pWJrQ$_*)|cf*S3cSK3(izGke;tIt1evRt2Ug@`B z4ld9^=4DVMpbphg^0q|`k)&1nD>|$ihFzWe7Y*T=y%WABD5VwCoy3KMSD*L$6*1;? zvH5g%RW>#`*_MJ6pg@;}jkl{h#WIS|n)eG|O4gR4qH>EQK;Jyn0(wi7^|1_W&Kn6u z!F|8#xJVOqwyihA_{$UKm>z#WJtW%5i5m2H{R&8u%zT7H8#4~@0Exj;7fGpnW9ira zK-hODW?c7I7*T-Y{_j(($Bp*1emY;0rxh;lfySKu+vm|WCHamP&HDWq(?|!7U;Lkm z6|>OEB%GL3ODkK$2eT#r8cweq9GIzT>&a~c0I`aK+;LA?8&baKBA0FF8G4{N!x9l5 zEfi9rGpez}Y`;(Hmr*psoj3H7B^NO-f2;Iz4^T1ZgnTcN7M{Dq?7|AW%xFmGGGaaN zan&!j!_EsTA5U$EqO_dPP^M>^S2@1H3ACeoI`%iBGcCUiy+Iu;m$njQ3XEF3Y!@1dr{LALQ&V&HX-xLRYR%m<3q`mQQ2& zdB$959`{Qfth~7R<(2FFv++~K^j(W-InVZ*Li$ZI-Nh@HQLutd^?v?LcGMpytS;d6 zEiARe{8k!_ws<2fXz@0+7xngvR*)EmUQyCCWM%*I2klmz?+bTeS4)xE!Ak;#e~?4r z*Fd1h47$)Q;v^(zo-vip^~L9+8wl;iEiC^$opzaPl`_TqDzC|FTCl_Mg?q+A90{OV ztt*qFdz#5h#C6Od_C>LS(`wH2=@uK@MTgN#OFAYjd1`!?k<7dI=LD~Uq zL9_1Hdx7#;<;w_Gl>On&>g$@0H-0pr@Q~SX3T}ZkROjP3q^PpmY?pn%Y#rk9UEJ~`9TYt8+)kPF#28hDQ<%T)YpbhCulV^ zG&E~L%&!+WNC(Nh9r=2IiqlN$4KXrTTSh56AMWJzwiXXR#sql31xXJxm?Cy2RBLyI zc8OM-gr4Z_&%OQ#QCQA1Jd2G{hQJD70D4XG6!8r}Y!rzoW}hH(yg^=*=|)=c_FeuxgPfe`3eidUH1VO8v8#e`?7qe`dcA7zt7tJmdx836p* z7U#-O;m`%)h|+vzdjR8E$m|*?g%wZV%IEG}!OE(N*VaSO=U4dQ4f}6>?<;CEDy3eq z0LY;K6%T38`^M`);h!arvHswLHKTGJ`_#MZh@0)U-!KVtd>%_ajfwD^H)KHPk-wAo zDb!;;mV{r$**T%T*#tsp(CNwc`kKO#q*V)-`XTle_9AEE-FOOsG#->7O1v|5z zEUe!DBzW(dLu(J(tGtkE2A<%4KQ+IHOP0!MM@PC2CZCF?){yuoTy^7@?r_A>N??>v zsP=h8x3=QFR?NB1p$3XH`b6QzpL0>xb5IJqD4h*xH;*4A2jA=7&+c=@pwqmxF6cVC-JgXsxL+UAI{7_ zi9OldQ4DK=SvLg9LI}#f++TVT!{!Jjba_K2=$&SkZR9K!%#otJ7&ZqS4drjvs4`R@N zhmq7cR~jxU|1I8l(}^|}MyTPguC8&@iPzVBBe(m0nm;)1uFU6ibK)z_$X{-tBdZu$ z43&p=_*v88@)3`^-F*Q}25I7hAw>%zB{v-{k%gtHyvRq6IBs9KBURdOt^VoVi~_Pf z^rmMuwmkPysV5|o=ny$~F^r)t^{#h|nv5UmrVOfRXTj|Jtko4Z;nPly{tufT$$E&w zy;u}mv&+yLsiQ?r19dkL;SQkuE5+t3g2RHJr!`4w^T2h`mcivHmsR~+ zd9bidbo*^%VA>0=Y!95iu9W}5c~8ibM>|l!R%V5hohpHOg_QI-u4^`J&}%XK)YKT> z-$Z2u>PB=z&tgp*tT>B|8M22L$9so z@j3C|ynW8u3S9cO?qK$c!41N5Vw^&;TDQ?MZJHhTJ%Vno36Gy40{@}m3Zmk%(xG4WxHk-2B?Abx@lqT6Z7_MY1+)zotUEsz`zS!4^8 zK5jL6h!l+poHkq$$w8%t44AIxQpKrsxGl_nzltLP>(FaFdtD8t%#)c4ZiGgs15y|jWuySx-$*QVtRS}z^Ypt0_5c~9yc97VJ% zt19d8!>UD2tx|c|!R1cK$plB{mclOT6|_>`X))+wbeQ)quL^k|QK!Nqm(N@92ZYg6 znRz-$XQt5N-I0wD{Re@|(r{SOixFk2MQ?L+8qv;@`B(Ifn&5129hN2@DO>1@0|}_< z7tux*kvhsSU z);?1=Oj5Bl`A+J8iWJC-N{`W7GV8NN5AqY)ut*_oegBv;GeZ}9FS%ff1AL@{33t;Yv8)RpT#=qsxFq2YB*b`OU_m9X1D1_1JKhcEAynUIynk#`i_ydT+3=Y8j|E2kOCT z<8H_Vyte#8ehhKtA{!0N{#R-l7YGdhu)A;PBMLY9=D~CJ4YdH-llh=IHOJ-2Ulnol z-HG7Xky_T&=`WlwR@ry&edNqdgn{QjeGiW};EV>Cwa^Bba3PY%j8fCdZ{;ITDgD?2 z;QdeRBQauqT3^$a!uGtg2S24w1*BiCRJV_b4yG~EdpydQk3EK6h+?V{;SGi%<*iac zDvPc0Kz&5y%^u)nWqnMPK1wP&YU)iWF(}Mo9ra-o%4 zh~2XHbq`H-m(Ku?f3e$lLy0_@0YHm3+kJxhl{%9};^s}1*J`O}OSsutQvYj$wZ7?3 zL0Qag{{m^iA0|py|GdGAKkr89o3h6(v7}N=(l}~ zJFHeHA~MJljb}qr(@>Ki^lDR6`StxE1Ws)zuao5Ew6eZ1e0p0cQck`Y5l?Km?qN{t zrAfNTvl|?8^MI#|+Um^E>ZcBeBJkLNxvYY_ZQz)tz8I=hRFKZi{1lJZ{fMKUd{&%6 zb|yz(b(uvQJmw)_dDCetOT-^L7wZlPduRk|n8{sqB)a=O;F376>n$8Ma#WSWayhK} zwy2)(g^V5kn(irJ^-~^EecCUNDzycp$pJHeToNcrKRoaiJ`237>!D{~afLKs6I7Vu z&-qT?0B7%6gf$iT4M2QuyzeTJ($bD;KYzYMX{T~Kf4Cg37x`l_X205ckdy&1i@Dx| zcklS}!;jU+yOhS`)nUGc*P{i8de!+I8py)83G`^UZv7w0ItO{(d_DBxfJu|N=YtD& zE%(zWyD4K}7jUTd9A-seaV5ikM)D(5N!KMilTrL9mH#eyITsj4FomW&2!tUpuS#g4 zF@_f&)c!laZ}6{&WDz2FTsGI5T;Cq?87kfNDPjV@t8pR&P>_*VE1#`rj~8 z&6 zznOEtB6M2Y|1Uv~rEH#*z`*~Gt4hD?NAfOji`BYD6_JqS*1$$%n1H$^MDq`+%3?<0 zgtSisY)S%_#R!?_5r>b3#EowDAHVA>+9c`*&+!x!lfNc7fUburpW6%M5=8vr&&Dhj zCSkE`Qm3?2CSQXczgwP!%0u}&Ctu56qRvxo-wU7T$rh_c>yq4;`f=uh?y!6Hy?v-R z8;C0W%LN{F8`@*y1}MC_6SRM6<)?dl=pi8fG4a?xGxEPkd&{UeyDe)p0zrdI@Bkr5 zkdWZ+1b26LcMlfaf(9vqySoQ>3U_xYprCNQ&)e_Ux6kR@=k^)n`&pw#J)`#C&t5Xu zTyw3cPtY8)pddwM#U8gxauFw9{CULI{@8}Z7eyhnVou^E-0SlS>HgJOMWQ?E=GH-l ztlH&>F1!yWD?Hr|OSqwfuWU-Ao8OE+aMWKszTo0<@eX=oduuv;C*ba+{>b!Eb{zpO z<#F5i!nPQA9lDLlPVySdxm_XOI8OZYi;FGW^^%HqCm=8J?IP0R9a^EwCHG&E`6hOE ztJsz9uZ~AYXIviVb7G1H35a2XN*`Ow$+tWVuA;42;F!>Z9~_YtU&+1O_fq#+vRiJP zxcvF1_1o&;HFe40B<^$h@6+~s*-Jm-h-?H>k6g`$meV4kZvLE*DQ3t9QGXfVV6xTA zt0P#G|CN|s3!fj)1~|G^ z#2dPs3sZ!hQ5|TL_pPJn)+PINIiYZ^@P05S;)jtl3-p3eku2dqG3}nU;b^&ZtX&=E z(I0UJH*@-=KgApj3yJQv2IzDglctH$7JzKktbcVp{wgrPUSiTpIvDvJ-7{(R|(7)b~bjP zcOQcs`j?q|KrMb5igsBA9y}Pn`NV#9P|-|pp4)W=Q3X7ciihxUwg-{81n}y^Jdw#a zjtHJ^+8}TkA)e*T`PthoD~h0CJp)Vw90HO-Qhkj=m6P9^piI=g2l=a)$XRMsA-0U4 zNv(w?UkQKG*=7OmO1Jp(zPH zu!tP7C<+nW#U`f$I(`r(6Ck4S6Q~RJ$WmxJu@o6MWOb6Gr^o)Odc6y-VnG&X&J6ch zKUrxh`FzGZzYu=1G&;_=_&tzO#7LCRq;u7dI0F}~6d3m&3Yo78{4!G|?6tHf*}4Bo zo=9x;jWrzL2rcOS7q5+{lz!#t?yWjcmwa!G-Jq?s7?m4|(`;;!^@T9$-Oi}D`x_%!TD4NMh zP+~sXa5|B%*S=x)6?eMG4J*ob9mB=1^E-0Aviz=IOVQE(HEq3(4mIg)jy-qVwXp-I9tI)23Q?`QZlQ+?nS*&dLsS z>iLtC9D2N5JJ^_m-n6aH-Nj_%P%F8W%M&=7N$svc&?5l)f9JX>j;s&T= z@dy3cck>-~gvc0Oo)qN)Srp6U{dc3Yo%99U4HX4joUevUG&en0IrQ$g`@#iU$HJZ@ zLUwJJA43Y~@A2q-YoiF9um!b4SPZ%z=}S5F%Ip?ve~s}SS=UATK_kN?UKn+WZ0#iL zY@7>mohEzG;Z&8^IBb3Ne15#9_17uX^X)Z(GYDXwG4s~q=(!C2dW{S#<@wF1Q5{rH z$T>8Vw}8Fxj3ycvR~0uBz@qkAm)Vn`^>S8A+whwXf+4c|1QIOeKeYh9^uWbw)SBRU zXY;k9qw;RRttd0hIk`K9S^+2@n|R??LB6btsH#yA4I1{J_0o6+C=!zi*EH@BnERQB z%kj4oOYRX-Fc}7kj5k#9$$uN?n2?uqvz5T_{x~QW)>r$GxpVlA5=@iSjSdMR^Hf(6 zUAjNnLn4j;jkooNGFKSgNbo2xx!C0awlhDElxa^EH~<%q_e~!`p;7}9(%@ded!1ea z%4ifPK-%O7GL5gC%FtJ61d+xU_U6QWC`_&2mabasJi)wfN?3Bs)7R;p9^TJB*CjBI z`AP$phwY6%#+f^Bko2#*eytC!rb4;a(irFMHxDS~5_qtt2r}^gYotA5o;k6zv?MMA zR78nDD`DzSfE&!Qf1=QFfDh?A*?Q_GbhWp!+@RA#7+a)51M6?&+mwA z-!NPOCE;WM-qwo7+eo*YV+}zi>2mr~fB~1SiOA(XpwD%^?BtU8Mvo$$yQ`VqbkX(H zedyjYsh?3x+mp5Kxxqy)v8dKpNyo!y?nfemd!PIl$JkfZg2bQTh1G(P856Bbws101GmW_F4j?r1>c z#fKs#=Ts*=;OvAf6IxKd#+AvMGEC@n-w^rg&OwPP@$3P~mG{S9Nc0i$7TzRDOIXjM zfM%wOSdAJapZJNkq8F5s+3Dm;e~g9DV|q_dV{!`W<`SQD(20i>vswCn?rOF=1>K_L zE8LEC5E(yA*u0TH|E(2ZmDc9&sBM57z-xDeHKcb>sdA$IO~GJC85^&2Q+DsMaJG~5 z8Fn*2> zpp5{&`5-@SnuoIfOz}RF;Oh<5-gDz|^qHrC7tdZRpk2HF%M7Go$WfAfwSvmB`9tFw zbEG^i@TnU}9@`QAN*)dVP9EQ2?()Z==ZD5B`qtwYlq=CZl+ao3$CIHPBp`I;CY5QO zu|?F}@{Ng(QR$6q^5?-OVw%BQ-QU4}rCa4Ol@Lj|Aw`8{%HN!{Oe1+KL`f6%{JZ1z z!TbsevM;@Eqx1_fW>~hfXPvvKNCxA&nNaQTkEKuw&6Q|L*!)j3v;2L~hcqsz83>(N zshp3+UhDZ{3)_q^xCi zP2XR1if4{r^N-A=IUn@mPeS8moo()`4e`9Pc_A#x#t-Z0|55NF3{W$un)1>Xo@ZO) z0T9V~JEKNd_as1L!-|H1Au8B`01^zP?n0L|`hvo@L3=tW#h3$3Xl*I(L|bJMerK1l zo`|yf_QPK$fvr~E@z3|Ci;pNIQAnV#UTL+^uX6PEMvxZ;JLkdKodKL)>e)3!c8 zA!r*!DIjG_BciL8s~=!F689_-aTqfyz7GJ?#utqDlX0J#n*jq#;*=B zUj=cDq)j``I8m%C+O9nD-6Hp(8XqVohu!);oP4Cq7_^Jv0_VaRC_y*?qAb0MRn z*3;9{=^=xdu}}iMFM`WazAsDn@El~JMrV~ghWmtr%c)0q?%uSM+S>VG@>g%C&+T!I zy=H?cNtt$?V$@5MeqtwQ>Ucmmas;dhy5>j9MiSf%dT4>op40Mx9-?MAK#VgaP5lh@6emBrn zV*6lf_A=z#e60gE!z%V99$ z4x$dTLfhNzZM2<6XKXw8y)jv5#Gy!br5rJF@AF9q0w91od37jX%<0l_!uyqhPQbaN zAjk#CAsS|S+-aaK)M&W`J3&6<>AM|FCe~M1KCR(?(#|$l{_M%j7Ck!WQZtXb9CBuC zEGdaHWM*W&ILB7XvohNN$cy}vv@QorwOXL>(mp8mc;YcL z?SW-{bWsLLcXJYj>r1pZn%QQY-|-jEf!(o1Kg&jK=hG_*N;{F06C+5k5=i|c7sJlw9_SX zx*IJS>)7{7da~`yL=%9+|DTvAP02sw;B~bhfw9TjOiQO_JKEje7wesYI;79dF5IDd zo~;YoWu5idTU&%9^%)z4SAQBDMri1C>U+wOMRV(N0z1;ov}_p%o{!ZQJz+FX^-Hc0 zgU6y_DLchNnSF(Zp|_0+?Tmr0k!{JA$Pq-9$<6uXSJvD%SRoB3stuKKPUn&f!+Txy zL$o^>tMt;qbnyJG7LgVtKIWCtskT}}aimcVoo_8tbXiaZWciVPcNiq>_U*KP|G@OT zeDmP|izOz}>VTK!3JHbkhROWhhtS8*WFXG_=HFh|(^~<_;0w&DOH=tIjDX9NUjCl_;{PH%Pbm4`JxrN`ZE;L9aad%h~^i+kZROlg*NI6MJ5ctnQU?f|Fii%+G z69}VYr4l&;RnN4drx2p(jIx4*EfOb3M{8laByk*$*Z${27Vi&r2eTm*?@c$I4E3nf zJ2?268+g5M>@SJ&mh69zSLpHY@E8I;7?vshq+awsK;BS3Aj+|89>d>C{2hk4Da-ST z`QyGE1_L52LZ`)5Mbvyr`6qt>7?8p(F|b=X^u{~3ArpaWB+LI^{Mnr34IWItraOMx zQjwI1$ZC9tXElcvEcyeWAe$=MJg4o`T!j;K8m#1G8!ZIdec;F5MfN7)z)l*on)%hc z0uP+M_sDjUNyI|x*KcR)N1;kZD+GH0mF7x0y~@9S>?Xw#Z$!=H#*FG_I%`wGcYL3S z=5pVNwi=l1yI)Jeo`x4qP$rU9IQ96WD&dR%``{X*5(>OZ+uMuRLwfC~qSN=Fumrw~ z4|VNKPbx6C%@MiPtbpJ#A6d=~W62Ka&>I3tk6p&AJI|NFRWMMzpHUVJwEgi$1eEc8 zf^n@pO2x-%_38Z`%jpzp`AeoNp#J{G0QESUC&(?N0X(hjCg;@LuG>X*6GG-E>38a} z9_sDQ?C!q#*eqG2{zkZz0F~(N%aTZ)BW`Iu5u$9ed7fB+MF?>p>`iVAax~3P`4)iB z^0-^60S&v;(lB-+58<67EgfM4`R-cab`b{f0zMwwC-7u1Snw_)5(500%`~e`8zP#;&=`X}y6sC0dM!RHt=}eCk zg2Vo70;hli7Lt>B?fixf%+rbf{6>$0Zldq~BwnpY#NXFXX4Eo-zO0}P-$kSI8)tgC zrcM}VxH!?c0Yu zWoM&|Qz48tfd|9n_gofJ^-;r9S-=`y*ZiJeqrmx9e$6^3pKrPO8Ijo7r;bUwtE+Dp zW%?3Sg*ICzl-12m+7qbRXl$F)5c6X^F*yIuE2^TuFAZw(Izm#&*L#gLfVJvfL7=35$S$5w6bI_$_oN8l6&z z2xr}lCC06O>WP%-*K=0oZDPTEU*?MJykL)m)?`c2RPV>jx$1Y#!6ozaOhPM^+ifa~ zYQU?J$CpWD9&_Jyp{DDC?Mkth4rv{o-AkSG0c2XuY?)a0z3IuvdG}hK4ib}fRTu7s zpcWUk*2HON+~27?sTZe1;+d|+F3A6mn42UL zA@=M^2bXu}Q?9KH`f{p0w$`7)=$TqbX4&B`3cTMAUQl3=$Y)%tD1ttJ5L8(8)R2XOu8+ z7Z);~t3EqdgQ6^^;G?h@0IINadul}{!jD!yV_I)bG#3~c2eL{dd+9HJiSKCPR~Go(a(dv`bScfk!u5*Ff_5*g8?f()8@MqNWi6t?5M=c63EW1roS%|4jbyv9tg zKte(af@Oc#E9`H5yX#2)g7+s$<8Qvhb#z#VyZ;k%bPBu~9CR6=RM5;Jfj?rfk%DjE zBil$0;&)PC5H~YUm~1-?eL9<{MnxxJj$XdrpiFJenIGK;fwDsP4x1SaQpVz93T`9C+c8;`?jtA^(BAK zTNPUdGp6~Ogqoss(J0aB+e37kdYOvmKMw=9N6}RYtagl-jc2Nseq*=3wWyVJHS^HE zlG@w&>o!7jmkS3-+c`PfFH((yay;*PW2TtkBh{9kxZQAQ_QG=!{vo26)opt<t^hgrS#6$^H%olviVUl@adeK=06f%m9PvD{m<)0`s(W2? zI@T{h9V(8$>Ylh-tDTR|7w1{E{x#^d_nz+9eT!-cw1opbWUl4f;<4oRj9f)sbX?lY z1b&^FQ%g=gxuKNlw(xc%9IiYm zYi++AJ-)2(csz97-66dCSknu26<%U1D`Mw2Un9eUGn>-&4xY6Sbu$WpIte^?Cxq5e zddUcPA@GTqVt71VwX_Vp(nCAhl|R8yV=2Hvtup;=%>wr|loyb;jC7t`x%R*jawil< zxzfB`59-52wmMCR7IIo$;n#O_LB@THQ8zpRxV2igE%Bjyy~zSRhgq2Q)acMq^!baX zafALwrmaC&9dnRfh`aeM|1+JO1`@GpF1Ob?*WR*ILIZ=l>BWbepp3hDj;)KYi34Y$ z{^H|D70+tuQVew=a)`K@CRsTuLL9`1RB`Xwxb7h}hsxbCBtcBU_yeV-)|Zqee+@HV zJSb3PUscNqkp>KCt6NtD^-s>_#uHTCK2i_&pLcQju*#r6wmf7J84x77keKJ2t(g6F zVKg>Xr6ca!sP==3HrcOF*Zd9DEO?x6jrkBqW0Piw%B1b|4PS40UuU=-g^F#sT%?wW zWAhkm=E?EjCIC~OOHb0k=f-6j2)%ca=#OLT^5S-FXGv*l8r$6wkVMLgT$F{Hf?Js5 z?g%&$pL2PNS~~svVLcM+?&CuZR@X1l5vVq&`FI?i_Kzm?oORelKdH(pV#n=n8rQ45 z?idd3HV*=9Y$MJek&#glnaeG%RUUwxD!-$E1JbQ4kja_vHn)B{g(VJXdVQEOfug$N zc)j8Fg`RPhjA7kB%%oS$AFZreI{s0ADb(b2V>8aE!$nl1bA*`2=d_D@bJR}hhYiwNwt=97#iM-Xh(cL$nf#ynpaM)xe< zR9a9)>wcmLF%lQza1Ih}kiyvZKD(t)z82L%H|nhOWt7tv7oBphG<2c5j6KUmD{NA%8w~n$IgNC7g+|_dvu-j3;9O(EwTkK_e zIx$|2pAaAr;nY&0|D{@Iss4ft>mQ4#s%_Z=>F^TBiFYgpvEG z6w`JUZG{f9@W%(~8sT_UX|D5fCetR^|| z;7z>GYPXEwj-^v;Pu}4iUsk;#2#eKio0q!EI@Eo|O!*Uti10=7Wt`eW9kC+rHah3& zfWm9T#aFpMlu<L{tM3uk+>fHS&(``H7lF9Ou`b^WUypuYM7ZQUPs z6DJX~zcH-%pIr%mvx%n3f#NGQ)!3n~!D$26K=-XUH7EF!NUgUvrC0E%;n<-QGGW zVUFsk^%%;bbqrIGK1BDC?c!F}N~SD{|I+R2qZ${KV0E7Ou8Px1p|H}RveY-$JzS4* zDY%UPZ4m%B=C8Y2=@&0fnQC|UekD{nljn)d$j7)sE@Oft7pPcqk}(9!rm{8W--%U0 zj57FnKF?3tVgW)8@O7pT?C0+I9Madaj6wm*}7rhN}v|Fi5hTT$+ z5v5DZe6X0HPETBR--@zSPO0&{J19upGvm?fg$-|S8>{4ltwx#*+OPj&7659n@?o3` za>+S>lwOB4pbs9H8sE)2HT{#llNwYbMta9v3gGJCe#~z{=6Be#7 z*FitO>p8KJh0{NrJ0LH)-QkYsA`W9E)*PJs@~omZUENsogN~$5dF(p)OM9P`hL+~o zX*EYiZL&ng4@ z9>!5r;(O2Kn@+8yY>7~YrK@z^lqp-{G{7_SCW@Zn>j@IjQ;a^Clvg3wk-8$2 zIA6XU8G0m$Dc`1{Q$DWuW0f*1GPzsFE<7qcqP08FtG#QYK0@SED578wQRRnm>@eO< zmD%h?Fcn1efO^?=a#|BedtA93K+J;H&4KsJz+|(k<-eMh}zL3Fp?kJ-sn3z~X zTU|~?D`wk&$yTUwhcaBkh0LUkc%ZG41-%suNM>q%X^{*doD5xmILHHrSK88oD;=h* zO~U1@#}V$v#>Q0dgP_%z58ca|LG2OY;kX5ep!IKiXZJAIk3waXxwZy^Y?HdX#Auw0KNi*2X-ge+968w{Fbck zh(t0A_7z&5r;fH_>tMj|-}_q5=|}BvZzr7c>oW*wWQ4uKmVNpNF(c^6ASNa4oJ*{; zCuEb78dm4X=;xm&fwVFE3%gU_y1c#NX7oGUdbjly$zDv}Q;+@KtNP#*f*qUd-VUru zwX|HRm1K3CHuHXIlzd0Yqk}xVLCUmqZ;(Dk%#q_}H6>9P*PC=5l$w%aZr1gMEUVHL z#!G!}=ffOQ@0PE=zH0Dty6I5^v|J=J!u@4s>>Gs&K}gkOCb<0DVNd=KEyO%H+d_L#=n0#Pv^H7@-$%ghJ4T201-K zpV56&_;*Y{d;p_z5QL8v0*$CEaI9r03A%G9AAVQ0m7pY7S7v8I8O!s)D7Aju=996W z;RKtvDs(@4HJlY>k2r~kADvrkRh|cEc}#pAw`K46{_F8Uxp>H_ecR0B{ZptQ{nZ0g z@SC$=p(_0TyR5bQHAP{(tkeucBSzqiOorh+hEfmIin8C)4B76!n44o5iz!;NCiL4# z8@KjJSA{_^JR4prl0#k|if}F}r;A_Rnu-xA_Q@F2N=to=f%{Hq+}fR!Yq^;rEN1AL zbv$M`uk6ZNVOn2ir+(^n6i~r@e^&;eauqw#)NRpLhpFaWO;4^dp&Mhstx>)ZF-{Rsy@o*PBVtkfPaBNFK>tWQZAY8 zel)oQ6#Z%Vy;r-q%$X6leG!b!p&1*k<-1X_F;*{}zNOr^0;;D6f#`hl;QY^IQC6?P z;m~tNxZFk8&;3O7TB~rGAQqCb#2Us8NYX1 zO>SxJwBV_CbD*oSO68v^+D)-#wpkClEHiqhDWR^yw-Ewby(02L-W;OU^W3P%w?7oR z4duiwSf8urDB9{aW(&&8*Ifqs6CnVwc z$-5z6o=1++04fQ~I>iV<`4t0W#xaX^I2lQ@1`i~&$w96=rQGXF*rHYJCPApztb(l? zNli*BF}w9y5a&O8J=DT8q*cc6{J`BFrisNjoCI+B&~1W@)xHkCQq(uhU`6O4F`CED z#( zptI8-&o5lhc2q65+Oo1ak?1uXS$zs@qqiBD|37 zF-n%5z$-&7IZez$iJ3mL3v@)JrKHS35@)(1Ms*>Tuybb+cn1c z3mIFXhC41;6opOvkLLHcSdgom8TjYcfY9k>Q$7B zb&#vhD-?W&T!PF|d3dOCcnI>(AOR%`ed-%)Nuz`+7Q0u&NsKu_{Y?0{=ljdo=I=w= zswM3@ZrRE-j~_@mY#%tT#(vqV$ejNJ`TMVqQVeL^cZGfT{tP$y7T05M>jPaIfCI>K zIaowj!yFWI(l+OJ6!qLLIQ*_DO*5b{*p^V9wozl%2|H#KbE1wO{ zr@LYyt3HOu1qf}V$8<5j4Sj!+1h68m7vJpYf8gN`u^&Ao;5vuW57lkoVW9U_@8{V_ z<7TXuz(>6C!3X{B=##aFoPCXGf5i28m^MIUTo&W|t%Fy$jC0^qg~-cW%>D zN6#DPvVi1~{fuZ&7d6N|fHFKI!m`{-*oLLbPm3d95vY$YMITzvx(F;8B>u*{4S0qC z4c(_kWzq;On;+xIti|~h)~M(UOw+Id09&%Pp@FYdC!*IWUB{E8IXtY zPhweHUY;%{N`U!bkygvex}LNl4*c^woYbz2i=CVqPa*qp3(pZhx#&)f4IddO}1dXWB5&fB9} zN>H0u7q+32L>+|05@s`}FhSOVVDb$~oJ=YMlp#y*>;4N_vi~I^QvH5SQz$QM`3hCy z#mb$bX1t07&=6btbsfvs7V!WL&H>}Qp@pOWnlZgR|Wnun2aM}av{FBqDTRNy6ZVWt@N~v`9GJB zmD{aYRctjRca~HYWki|D51m+_os=sp+*9*(CC&9HnQNklpZexv zd40^w|CB{N7A22|$$ps|oEeI7wVx9+2_!`Vz;w&(v#tIqqZr zrv_Pp9Qp_#t$U=iBDuv@d7S$)JV@-XalJh)=Mu*JUrRtWw6uf?$Rc-+Jothg8?P>E z5s^banYGh59Mk2<@vn8t8W$I_qHv)zqU0s95|3rf3EyNSd~R>YT-4#T?Lm0GMu)M1 z5&*Ug<Gm&BS(}mo=58cO>$vD{;vO)M5Pni>0%;uP&!3bVe5QJZnqM;zq@Lc6d z6YBQqK=VCpiVzBZJE&J6YFP0ZF)u!D0{~_0bnO~FS_NX z*^*A_zsBk;cs3_Gn!n+%er$pN7<|%l&Br>y3CWE&zp;Pvre2R;qVe?-nLOj1Wov4h1Db`_^6G8JHxhaLU(-w6^ty{?k7PuIn$TzK>T&;4vZ( zt)lZdzTbc^-%VIAaw0!{jwKIFH2Nvf7md*l+kNaZoZx%uC2=`xr*wW|L2Sh!PQ2v3 zbg9hptY%cdc7)VV3^oL-(FV1puod6{NCKekA=HPIA^R&QHOZPGi=Q1=0aH`~$;+`9 z=16sAjGjp+2n~eue@`7`7QBxQ%XUBtOAx>%M&|Lqa|_LRhX!A2Q!*s;KqiuDb=$N& zl=#Vqu!z0Wj5Q*A%*UA71YdblG;m4L)qrYLCskNR5o$4SL_BPXcBVCE;3?(ArnGRy zN=(N&M|)LTtlA}~++XxULEW7OB|xJQH)%XvlwODCV;yhrdDC|%1cT+Y_}#8|0Y1Y? z47KJH6kJa?TeJ)eG8ub~c(KlH0RgY5a871Q@Gti#y33?(SOWc^7h%aYqqKDe$ziA_SlJNPJb`x*4>^n=>2bCJ(-u$`{5&TNG$n4-^QG z&`%MQ_{oZUmS{_3?6%r#jjl64<-By2_MwC?Y@ctw$%FVf)9hZ-qne!lUdf@o%x1Cl z5#tiI?+(WmRgPC0ypF^eT2z(YA@1Y-z!=r6ch4-^Q#T$$VJ>yNQ`2kky6_nr3_ofs zOn}vCCLCwu>6-koR+i8Jv8z5m37hzVgNwUy|MM6j|2A;Cbw%FB0DrKUU+%UzF2uEY zxwZDFUmHAktiJ@S(<`EeyTX0x9mcf587_%+RhL#A|X1Z#xZ!EJk{@M5Li>*zAAg}~ zfBarSFPgmENRQ|1Glqs9BMxz~Nu+F+8n!U@8B#OMjXbp5i4Kt0`<8Y(0I{TrWft$Q%blxj4&IOk6^6{GTTv6QGs+$dyw99c~@H6zEab> zmN`5ogR?9tF<(j-FVw%qhePs#zG<+W{1T+DRx4ZOgn&spMoPM~w!Q}hPwCb*XJ zHm~V#B=&Ey7y%>f z0q}4EgI2?Mc}5FO^4dd;VVqeA+HsNa{ng=p##Q9_{MOH>|DF$Eyt)q;K#ka_g=x!_W)r!xW|&nSV}VQ_|3;ZoRHS9x&-P zZzL!e{*#=TC<-9!X6T`Lbh+$2TohCeIPvjvd>Un^cMo)REYE?f%$dD444WBUox2n_ zLF<@|57TkV4(Th`s7$vM8lwll%Z^aLQ&-GRn#*7FN~nKD)LM%~ z4?*P(g>!rc3AXIENoScXC=YiX_ux$=VBpJcFfU6O)8MKqD+z)Ehi<@er|;>?J{Fg% zkox4rQ_$p_!1dNg<=!ROKks$l8Y zr;+s7jKrtVEt_jT#%{M%cbNxLA=xrN9y&gE(X!iV2I<~u={3M=tr62ecky4yD=Ux$ zphz19|E%cnPb}?!{NR6w{e2ueKu!X1tPp-U2wdhauQh-a!x~Mr5E1OMfgf}q2t6GTOY^zt89|1Uql z$-Yl->*(zZ-G6T%FsX>hBJ)?ElG-mSMg^qDZm)1#8st~2nJlh{P7CR|0}XS!Xi&3d zXI+0(?VW@ou9{W>B5=IIfmO zmJaiXlKaj<*MckpQT$@M(zlR29+z=u=l$*&b^&xlMMU?HP;gKSjajF+r0MCkc*kR0 z|Bxg!j8HXOMDKjWWn`(@&8gjE$riP_O{Xc5#LgyiXV=Bx3BAX7 zHG`X{+-$^&Yo{&UyY!S*osL>eH(C&zDj6P*s4~VoSaj{|dA%)5JVSp|YN}xFOd*Wk1S`7JMcMRHs2WbQPX~lrg&g1`=XOt@${Y)In{>w16(VnIr9e&59$GPffOnvtC{V}C?sgS&aJ8E^(>UVt zuz*(2>vxybETpf8S!jx$nMb_ZrYe$oe~9s|<6o@!pUmV}s_%OdRX~6e$?GkN^-4(p znG)b9Kc1!xks|rO4u9eN-u}Sin+lc3HqFq7Am0kQ(VWzCLlFlxBv#?B92y?BFbb?G z+Al>6qCE}Uv8P@gdjeY36_+Y4$xJy^TYXTz?N4%`;-e^i^ldxf~wZa5^e zv+(r`?XNq27*^f|?}(KdIXpbPu5h!Kj{Gi<-O@XW_bww|`t@1E$LD2tGd7SUko7{{ zS<)H?TiYe=4zO74@B1~8qM4fqPI_cW2YUZpjQc-&VgI<$?-=HcaO(?S#d<)aJNAPd z!2$w64+jATn+2cogxQV1a9K>OAX6IOSTxRv_O<;L3gigC0r!W~r;F|Il_8Xfo^YnEGdXv}7%+FNlI< zRks9w>9jX|v6*YVFa~SSCI1z&<;fLM!lk}?8RW)PQJ71I=XY{S8`RM=-Mp%>uy(C_ zCHKEAi16q45l;3GEBRL3>Aap|;-Zr8uJgS@jg5ug)BLqCoJd0vz^;81|Bj0dogVaC zY$+h6_>cvOvyW;2B=28XKU*mklX6Lv=?TmA{g_+#&*$;qqrSgxq^Q7dI(j2EIR%fl#}&`ItG25; zBJp=%JsvMef&A#N%SqzzN7|lQ4+a(Wt!!4loZbnaNPq}89-B7gd|4Nxj0KY#v$x;B z*#5VlJLDg1vrLQHJ>Mz>KoX0uT~FphzE-^YI@q~W7!t-XC~u$OYTBx&m(BU;UIc-b%tn8Y=&)d5grs-leRw?Z1Ux z{r|Q&M1L`A3|s<_UzT>zfTg$v=Wb74Je1-nOGp`v3K` zbpdjDB?Z|dT@U{+t9}t%liYzb)FhK3lObq1(>XCY`xmnaK##v1KVpK}Np?EFZ_Y@j z-(Q7AF0JzjT^**D@a8r9ufbxX_XeYFmfw7ReajLtCer7|m)<2cp|`XOob%QR{?e%? zjMvezyX~yqDPy9=e(AHjfl$P7{VeKM zufOc!`X9>J4t>JJQ|BK}Z?9Ix8wdmj18tG&HLCvOi5%-l302?ihQ&`O*Z_S_jo8!8 zxbZq7s;fh(uoX{v>3g)l3IdoCgE)A&@XSCnU@0U7PW@C_M?nyck8G&sOOf@GDxG#m z60#a&L3ZS2IZ5<3EQ>e9?$rHk2L~I@-~OAfE(pt8GYoUoqYuChNi#UXjW*iXdF?tR z88LZ@qXOqqY@|+X-{ntLRAz$m{=M)B5(9@(&CA0hE!1!t zgP?XLfp{bFC$1yR^_h!mUzcxVNm*U%%(jp_zg zP&Uo`8BqYbbY5sg`xvzGGQ_rl68C3XnidfuVR28qYJi9n$YBZS#s9oZFsE>0vs4_d zqx=3AwD^03?pMFHO8;D|{^EI^9FWVV68RS&S{4@-WgQ+8?%ZM@LEq2;H|SqRG5__= zsQ-&r*TcOE1A{C}ULK%56;-E?1~BlHSb)FyTkfP`wM!0>H)rxrUK?B^0$B*{mv06R zS^)98z*shakv_bavvz~3hdcf7SZ;CGtK#xf=iu-7iubfHpH|862qZSbBygtabUfT; z_E!UcdS2&E`-`*A%Z30A*EwuIxzV=B2rxcqhG9i{^tnV1wc3n*O(L4RB)$D$zGVXwt96515>Pj2A$c2ZRqwf6Xeh9GD^Km%H|MH>#wWa?$inxF49~(u) zKC7ytLfdI3EiFCH<3ku6)#nlmt3S3PDe)W-{&Kwm7@=!r)6#;u$Fhng4TeCA;i2~J ze=|J&7ysg(%s)0DAYc*55wS%w9UL40q-5v(+=5D$Wp|;>O4@a&&%2~~0G1c6vm^j> z;V#LWaW6ge7)D7Vv3vaXPkG}1ZuS1#7F9lDAkxy(7BMg}6|}bUP+dgE#1w>vA||g- z(#v1e9$WotBDKH!>#)mh-rt%VB}4A&OYD-#=(%05sQ#Te<$v;vf5-Ck&xkagW;Qkq ziAhOx6Vz~-6B843wcM_AlE2MIYP9AGAwyBOsxD=B^_E5|;4x5xw~lRH?a@+kc|oiG z!r634!vE#A z-p#sbRz2I9thLwsNK``RuisPs>o@7&_~y@(*dMB!(&FOmt}Y=V02A&Jp?a9!6lr5Q zX=Mi6l3e;Q0K9j=^OXT|OCCe)*I7rNnn1(gkZvVJeIbeG?=Ae-oBRL!jsEk8bu4d+ zQZEGsOaM}Il$4gt2F@J$?OT`QI*mf}Ww!Dh!Ed1@3!ua{g1+WHmzA#cJ)5QSjIjo- z)Z@jOsoSU7x#N*x{RQTwM4SK1zd{B46+`@u9j7NUKSeAQK1%W%R1~KrryAhOAGe>( zMzV`Qi^AS<9`I=V-`#JaKiDq;mKB|;v9UY=y3*3oNqXF{Amc_>2a^#={#IJ-ns1%4h^jy ztFo4J zQYCZNzsdXGX2Za|qt($BT3^{t|5FR#m($*(;8!yrsX>Xbi-DlCb~Y=hCx&%Qi`CD= zy(DHeh3h~rF9>WyygYx3&DfLp}kcmb^2n0tF6@e+-)`j^{bg zIlpoL)_oAacF89A=}3ZSPFTjOtAc#5G$onYTbQSr6d97Wf}CVep!*;G+_Go)&$i_F ze2FVq*eT_il>7e^PyNlZD2xAAE-+U1^9$~L6TRystMg2jDe3s;^J%Hv$nDOmz-C>$ zuMAMVA5KqCkG^TztPvI#-sL+w4C`&&9pYc>w}Sa$H(JwM zET5*2U)5pdr;WdF0)1};aP`5D8yc#rd+xiie%IzUy{P5qi7uoV*XAQXvuSnt4TV^2 zmzCNwC&vQE%SlC$?*yw3@w42oK=WOhle=!3QuN>(WMV$z*I~|`5W07>nxEnGq32YG z>h}{(fX2I^wZP!u%2Tla6M7e1we_jbHI~d(pkKXve!VttRE`#h-nvz^XXCAVRX*qT zxpig9jcyv<$jHflloR2hFgWCj)zl$om!TycL5x6US9ngNt@?M!w&D$R zVnR3Ll6ra;$;LX-x$L&L-}O5zG${9lar}XME2IlIm)Sskd=8FxN2QdGrWUwd=qR*V z)^j7YCNA#$wWzqawVR8_g^Fj)OK*8=tpd~WUjNB=LSNAwFK`{JhAYgLU^-vAwPm{& zcj12+bjS%Z+>w&}lbLR40$Z!4wKZer?%k=!Pu7^KTH=yo>|yq?`)_-*ugE&q^}S>} ztUj(2UhJjK<|NOS^L#&*&75g6Xa-ohqY^)8!=1s%Xsm^ z+sCJSzv0K_)WnG$Ova8`p5Ng4xuKGPyns>P7_^B(XX6JI1=G+O?e?h9sNEaZ3#Pk{ z&T$HtKaR2N-5v6B60jV}jz36VAEe3L*#xcU1aH`L{?r<`tA)l{=dv^X_;YCV&r2zhVp%ZyBwq9(lLL&`eknNV1^^9q-r^_)9S>^-Ob?vr7I1 z&|LR^uH6jxYnLX?o58aD2vVwZSUVJWSwc9+968b!QEWAJn1WQdd-hB8&e5i#W#sFu zl=X6QA;zfmM6)tLBgi!cIo&ZnpRJLnALv$yn%P76rnBWfJelirHk6p) z7U019nSM^JILT9oCDbEMsGa}m%#kCJM%RI1vtz??*u9>T)sK2f8+GK{q;=ho?n!sG z*Pr_luw!Ypts-N5)a+9E>YZ~9H4Q#d6#wzT=FwiaY}ZUUkDwm);8*DL`)Bstg9D?^ zYMw2QO>f_P&kxM)W#}ifan3OjW81xAek>o+F*PX*-MVetdkVk#?TPlgckk|4p^=ka zePh3p&n0B%gB{U;|^3(u!^2D_gMyVUvG3AolA1IzsZ1H%eOF#y;HZyu?zje!0H1zVecG(*RZ69SHe%3yi~n?g%%GKDEa1ZP@m+#0fi^&IZ4u z@+;L7ad=K-ZPK9K04{B2yz@oiWRo^Z4-WC~p*0to!JVzSA? zn~&fTp1Pr++G+F{10D2A?Zd+;jfW?W{`ljn6TjS5_H1~$EO*ap4WrkH)%<&jJ#p#T z>2B=`;K9%!YzcxjnqkT=K_mv11*+n}IyyQJ6@!>L%yc3;a<8zXh4V2Xm)#1=4# zN?ey>cA3OIN1tFK?-0jcIIgdHjggVABF~+^sE^|Tt!ES?LjvzX|NbdX{RH=3IW?=0 z91ET;6rM|9jI4_=QBg#C=TK&(gNX$<8&nS8`0jf#@fB;7|M7<|VR>q*rObCp=+kEL zYST`Qj_um{v+|BAN}TuGP`WLWk(RUaN=l}{WzC39*&|ZBsu1BGZtT&H8Lx4d>S79R zw4+O91PUW$uwC-W_&_#vlw_mTHkzRrlK5Std;7X{ryIFWeA@^QmtTX4J?6Sz!iir$ zoiS8Q=(*l`x3c=U@++-LPQC^qs)66Syt3vYk|j&93HLy2GdU$lX7I)QI!BbYE;?}9 z!{ddguJGn+k}Sc~w{ZZ;Yf*Z95XsUPxEBvJl!O^3QG2gS9jJ=dAJ9TjiD5BbS(CA= zi|ONR=PY zT^B;LRpu(&gY2~@waFahJ>(okkUg7-MVNW?z>fzLIWM$B|JR8Chvo25EYdWrtfC>k zlDg4G9IOFx_s`+_j8$)JC*z=9xQuar>4aBOSz+n~za!8a(}t(ym2tbG$LhDVl5%a0kukjjfwvDL+4Q;tKdV`cd6GM>CwmHI8y`aonCCF z5`yDe+1DY4K{5)332sH>5$9wM)u|vj;a>Ol4Mudi#|vt4LQlG(4907qzEB1wMVOh3 zCyo1cn>W9!i8j?^oy(cN1PNl+yC2e4Knw?K1pY^7GS*V69(A40uWz7&OF|;aC=;UA zOPBsPOck_@F{2g#@EQU=&hUFsD4cdCB=~k;=(^)Bz97ml)lmlI!&7L`3Su%d@|gU99sbbMd$mln>pnt$qfP{h`aY8ivvf zsv4ozu>>x9{RurePC;}I$HyFX#Pr6n*4W(xmc5MC_S&jsZ^k&f`l-9czHTr2GAF*| z@uneki=)J**@VPI69NZ@w1!vNV$TmLLl6i2r6%wb&6YAO4ob-HGff7jkaMgmU+tcZ zRa%~?G+H*eAwETa7A>?*u#}k{lX4v5g2o;!t0CS&EAeNW`g2AD%p0VGdZlYAZ==xF z50C>1jIbzBQUR<^6~6bpJAM;NS~yObWjk@l6A^ph%~Cp(9VIuN1PPoTsNX(72a9-p zGBiGN`c1!P;4(}=WgUfwC+v*C(=B`AeS4g^qH~!wdF3kge#Xss8Acj>C|1y>FK{jv z2H$N26+ha#PzoWOP$fIWvjf%@FqAcxW8peLxtYuyN6TgIAnsV<- z51n7*E;#8i0i&?09v=%Z??Q5W_16b~FoW=GE>>2(NM8*{FxT0i0Q$ZzclyLAkZ{5@ z``spC*Dv@fzXGkW23`m6N2wR<^1GmAXPI}*LCEWU+i4mY;rS_uRFq=C>=CCnwBHF; zi_AT5UvLhKgWX3m1;8YzV4H!f)Vc_i>-n*2C8BR{?uQ^>iEW{4XLT6~^yJ(IzFVgQ zpGA@o^!tS*%g6r!X58ff7QYZaQMmU6n@q?YVBhvUDEaX^C{Vay5jnPl&5vC!{O4+KyK07BA**9YPB3x4p!wHG& zfr8UO8@0pBsiu1cPIK?l2nqTbR(%>bV=Cp)`a|nx!@U^(47I{E6r9JPJ^L`|lyqP7 zG-ql&0I)f{xfmp^zS416pH-wWU3<~IUH+58yB(9-D-87h&E5VX;zax4_v!hPSC3;J zxOAz{Z5&-L4j#Q1pn`Q}WM7r)% ztuNr)tBD1Wp^(XtuqJ95^L~Gpa4wV(sY(76``estFtLPQq|P6-7h?KLk;0zBXlzu& zhFM-sTZ1u0M;KbU`;l;*AZQJ*4xoOd(FJ`H_Z&u4BXkbnXAY_)hwbH9&&{Tloy%O+ z!QD9Rj3eyg^Ysy>vtujZL$ZV!8)5#6Y=9dcOtW)`1$pQStcs=^nIPn>Wo2!VqB~}_ zB4p~s9A95B`g~^I|2}UgBI|sSXFf8)i~#bR_ZI+dz>haD-w^m|{XbF!L!_oR663Nl zHsOG}na>TYfbGYGI0EOl zzVTW2uDiFCs;`pcZJF%&&S=;F06wCVD8D8aY33~VF&sSgy(`y1$q|2}9Ev|E}E&Do}y|yurUI17wO^lurP;DGM0Anp2`Q@ zb6#CxLjfb_?VW5D$=nX3TgxYoWu|hpkZdCDjf9FBcm|4`Ez5iJ!>DX^8JDk==F)_3 zas+==(5XX3BiUW{_P>F>+BGCV>P2HU{X7bB7l= zW`{aLkDe;>NMZB>ZW(wshS0{z3g1v&Leb~@;7ru@d6z4>u7WkQE=g3|f5)hwm1bUH z*0}zY!aPx%)NZ?nHb>o{T&9kbF6+b9*p>ba;M93lmfEq|3d~=W())8$M}8$dHzlKdlGt zK$WX;MAk#W2q~Ftm#y_4Yhs? ziB~j*@atvtnH-u%0kbJiEdwZ7s!K7AgpC?vVu>q`XBRP?~iozV6R0H1G|{OnBa~` z!H3?KX1GA?G?6jL8AG~}oZr#+uA0F_&A^83OK`k*63%zsxWZi7Sipad%7HjM1>z8o zwO7M)xcJuz9T7eR%BRv{8y^$-(Q!RX*8~#Cf;F6#4zp`<{J74c;~lAwFqH#Uw1fEB zc`0D%_t2bD`G`6jNaN2pBfNrI;iy3wMWP=4CNo6I{;c?pBvYxb@#5MDT0KtRDgPxj z`))aPwI5}>p5Wa&T2;eHT+U#10>-z-gJuAmP;{4Zl)5OWlo0r-yf4c1=j#hcq?-aq zvn*R}Jv(G*bzTkXDC#n8xPSkYVOofk|4Hi@zwFYvVrC?gZ@a0tjy}$LYey2{|FvUv z7#ZxzA;*B)z6SY%dQ<{RO~RHk72THVp1?TR$&#_sWAYGJ7sd(p)z(j7 zFRN*yaD*Oaraws~*knh?QLLuk*Dzr~oYHSaUxhLCE_?JjW{cpr;)Er66*FDyxw(7~ zvZPj;H1ZSaa=B*Ppt(JT5uuf=&#k2TNr180s={c*WRg)+8&TjJnC#`6Ge|j$IL8C9 zdI}Gg&E@r!L!~%nLQ{P!TO-I63r`Pp%!_N=IjOC#sj~K;1G_I^ig#4m^P20jxJDI+ zu9+}!Rp9r*#K5Fi&v$29D~@@kWmG>^In0#%^h;oY<=EJx+*dBwuI(m!wBemRRK$xe z6YnMBRetGZJcz;Pt*{=tK~eA{KPn&x*5%r6k(57Nth)tC!!|9f+-m{$>aTsSq$B57~Z4(~Ptr538y)W3k>Zs^{>;XOLe^ z8F%l7Hhr27bsmSA*o+U>M4>iNj0J+}q#wZ&v}lbjgl*v*#cCEyv|wNFMqJ)YLo zdG(@2!4Rw#_n)8BHt5}N0q{)A-YB}%)-iLDyeiL025!}hSM{|(e1FV8S8G!HH`K?6 z<%h+^!4hv@tXzWsIBXRBKFuKh2(|e+&ci2ITZEh6tk_xg#HOx*5|oLFo0#k27q^l5 zUal_2J(w)bKvsnDD2zzNVFUjYDE5o~y7UpbC!EA`>S<|Qt1WOa2iYYam;W_j|J@i; z9{~WlKXI`6=*8Kja_F)F06Bz^!kT^$yNSPK2z^C};+!s9Oca>~UK(_)ycHgi8RNN8 zUl>!EK`^_wOt1vuS);s9T=b33+W8MQ3U*F*l>4uFlkwlTcnSyY^<%P&=Zd}Cw@4(5 zk{@&H&aLs65>NE+zsnqUB+-_miHfvUa0`CWw7hL1>Zc`GA**Js z11I=C{nool!(ieMRUs=Z%|8RtIEZ<7%kcZ>Klr6R{x0~SxkF&ffK#yst}Wor>Za!g zqtU9OcfLyP2^rnGgm?P8E#I2fs`+$NP`_K}pY-J7nm*urcN{+H68c^6s^HRMC}hwJ z?_>X<)rL+pET$5SG6dlCLu8x(eQJP2o&D6@`NM%E(+36PvO(c2Kq6VkFIOtRH8X*4~{! z`s)`HwV!V5uYzmt%U-U_>>Eu*OM4*^+M7g6Gi8fV@If2fEp7$fmdJAAc znnNgcSJ;|>_1V4@DIm%MDpkp^`6AQi7wUgVlT9(#`$6yf=a{?A%&s%04)tqxRwSbH zsKw|(J;t`~0m3Zkr>cVk_w1H0K^Dpx2FlY;9t6*8TlVwL^yUUnDT`Trd46xL`6&m< zHGqbyG&M5^DjmB)s{>M*=May%buXa;Q1S>p@?h_@o!FAzbXfEz?4RCr^sjH~)n4yE z{XvD4(J;_unH9*BwVY}WkJ4jM;CGDhA{cv1gCpDECsEyxG9$Vo;<%7+?}CZb*xX|dk}~hN5KSKBdiikZw|VyBx+QEzr6r%7bGzqp z?NSV%)v_;!Z^K{BMld!*5%?++iB##B#283oAT`CrTjhgiOO#Pw{21J!Yi$Wwza@#h zbNt~yEMGUW3(;6@rLH__?S9VUKU=*&`(gk)Uh=4yQ}1}WBJg;-M%DCf;7}$LR=lGu zBRlq>gwqB-N-&eXO`$(7Ve9tnoDUYt|8i6-9HQLo9REN(U7ieMs5qGFQDm~^u)_&4 zGE(*v-=7SUq7CFP1u6R{t~_mw+dbj$ed!Dqgo2+xuy*!8cl%Ic7|HN$cUpQMx19Rj z$rG{)HmZUtHHp|D$sXXFR(lKWKH@RznQWF9B4_VXjP8wcb@8p34a#DeFWP+6>UV9X zwAw^yy#}R-uh5MVGe>Ia#7&^KC{8F}k&BizpC|MQ=WbMj4CIR0jd0Jv^YsS0*{lPx_?EkjNpYh!u zhv3WW9#xX>qtsk|VnivaFXqL=+$#4C&A{oi14yTtS3>Vc?70>gFtVhN2EN|m`&GY~ zWVs&Jm=izqXHh(N>tJn2(8M{`zVGMWnH^u!xd4l0{5bEU!mciA7j_)R_}45UC-m8K zI6%H={n|fk{TZ3oJ<>BpW-S%s!|O~pEx|5--p4wldF;RUxhrAVF;jNUY^K(3)r_}s zgKxlo86p(AC;rKv>??2=CyZrE`2R-Nznkd|ujcjHD*sPN#sD}R-FB+Uymw}CQg$vS z-wP;pxtQz|5v1z_Af1KZW=`JKt!W#6tWa7GmfL7~>!4f$D?V?N$!SvOWuO* zYdCo?BF$F+p_|yZagrwLf9NIvz-fg^iA(+Jp&#$S&o}?q=Pc&8mnt`2BK5oW>7M}n zwYl*ZODNxow7LB4`p@~>c2WLTYvVueoM?ODt-$uC#-uIzlQ%q_Kj<}!9<=njr>43~ zh**96V9Lb4|4sii-WC9tIPGkGv|K~y<<-p6BJlW zLCd0lvoA(e4?6zu*nDXZ+jIJ}Yq|f~wS@fh&V6$&|1OH(T+52OZ?5GJr~J*e{CikZ z{>`3V*wcPmTTE4lKZ?5HAtz~`(zSUa(zg=sI>-9f7&joPU;alD1TixYbNq^pU zeZh5$|M*tY7qypuE9sm6zf{sMf3Q6J_1v8I)Md*k%qw9TEG38y%nTO8sKE8OP4^5f zQtPX=#t~iMh4LGG@a%kFi`%P3`&yoS7u3INk6nN-k> z8x<*h?oCqjTknqAbN;ajh$G`PbzE*HI>2~g+R5Smm!?!X=S3TSfWhb59XrCXo0M!i zspbn;vFAU*SX|M5KCd`_xBCuZN!{@K6X%iq#yszVN~h5ALK7PoO$+PM`XR9Z^Ew-M z0#?_fLbBCVMbYmPC(yNnrZNc(TU9qf%luozZ92VBXwn7sF83-JaWIUI5cV_6--T3e z508jQeq?$mgI-%SSWu$#tPN_R9n_dul8T>cPoXnp7B+Nc#(rr%idnSv=yS__?1>=6 zuD^Fa1TWt<$?__TpC~~-r*o~-)`^i_+Z0GgxP;d$LS}-Vzikl*&!pp0RWNYRDsEcY z+>-;~f)C;BR;Dap92{e;`?xG{$nQ#bji!xzB!pE{0WD?Qmd&=AVj@Ltz#Qx}SYbYeaeH-j8y#-37TL{r$igqN$I- zoilCNbgWj%#0+Ri26|jGN?Z5CJ z(eZvcFzb(Q;7;7S+=4~v|BMcpSUxyP_U+h$=e=B3)8;%EQ4T$2X&V$&ERsOnHJt3k zLn*zhl<9nhzwa8*`EK^PrL}Ls(a44@YF@d!=u#c71)5J;Cv6LhgCqw2{VC<=)jl4W zjCG%xX-Ilc=m_{JfbClsDUpKq>(1t8b@C0B;!m-=VMwS?ZIqJ>DaEO_zM2F~KGX;H zh|K3*2^G?(X&EQHPn&VPw}ni-@MD}(2>8ypKtl2KI%(rnTDrMYZoTHXEz#;c6z%gv zW8iU=R~akBn@VQO(6b7AifL$fgz;RrWy48T9jO4Dyq-Xsf;%{+-QG-KC79gi)bZ}v zE>yhRtDju)gF}OUEpooyfK4R{xDy9I8WB+;m|@<)T*#wZkZKY*D%!2|^=!RMUf&PP zxCvCxTf{+W-0-Z)hB8i1LFYx?EP~)F76#)@sWj=a^&Tj~uLqnj^$*DtO}Pr~OrwO8hTMUUC9+xB6~p>CW(98z_#y0khv zB#!P+8rf25w3nb`=h5P0&{2g}wt=iR zbKR&0d*ZmVXVpoxtc}l27m0C#UuWsMrUd5_w%{@?Gdt=4WhV7e*!4l8PFoBYcb+wS zEsgq-TnpG4*TQiNtU^3djsvh9q~kXHz~Q+T_H0E@)>M#C>H_QHkvO^9XchG2^w#fe z%Fg64a!fD{XR(!$_cc5Bp%I-^GUmim)-4<&*KwAaG{6YLb66^1uj|oxo`M~mW=@NY zLS-WEA<9?<+Ry|*+|7i`n3A#7C~{_Pqg8maCcP=R$`me20y9xk4p#x@19 zO&{}1{5+MXQq((C$BP*=WOt&oum~6grp#Q{=LZ0zDvnq?Mlt$VMq&y5u@rc6J60h~ zD-cPd!)u*ak+9iC&vN^R&C%D#9-WDsCgcj5x=d8$!e~Y_d)^{*TY7?sd|IXnp?x|d zImTANXvnyy+Mi`zEx(gYH7FqlmQt2!Q39cJ4a@a+)iRR07(vt)4FcfOYS?}qP9@&V zwGvS{tS@-l-F`1caXj<=Yyru1x4q>!d}K8=wOR*zsA?v$kf7yCgJ63yeXk9f|&9Yok;VGlEpgZ0EWrz=g#lfO|JBol7o20kRsxgtD+gB7_OfHP``Jh_IATH1V5{br=1t+(DHa7#tS-4;*x;GaWQ9UXV3mq5 zFs@8sjFqrglLjqqHIjBfHC$7pPOy3wbIXhyK=__-&{17+uT+R)qL zQ)Xvig@^e;FFgCzvYC@GdVZJf_WbIG3Ec*Mw!LnH9;F%tLb;(H^s9im`JPdY{>@xl z$ExwCT8dxwrmR--TG=+ zg@=cUfSsntZr2_!Ce1j7)mPtBhCn)|pBv#4*6z74b84V8)e*9sF@OOl!4tUcsYWCx z75sCTN0x#*)f-j5{T_w@>rVF`NVcYw>SRDYODj!D;6Jn~Qd``A_zgGJ&bK@Jb%e-A z5pgBE?EEUSuy&SR< z5H@(?WA5gw2Z~G933Nx}W#83ag!fH=QYxeov~c!x7|k5pvsa9xQS)sOzh(g<9EI*AwjF~S(*ha94GOx!6HoKP&&M?Vzd)q`3_sJ{>q>4-= zaZ1}WbO)>y0mhNKBb}!3A?M3G4&4zUkP}cn|rNgfc6L@JYLiZmBJ6nXqFUaP`tt5E-Ka0VDb>M z8vUK8OK6!3C9h4ft|*xj%|#NI?fpAbNzcWQ7|>+FfX1cnja{X`^PT$ zJBUT+2VNGe&RYr4A|0VtAFYl)=B(|~hBsdhW;mv?1Ewa(mo;X*hWCdmIo6^^8iFf2 z3ZhWpXfUHP_lo=k^x!)+8 z+5?{XfO~wQmiL0~haWsh4uy_MZ<`Dlmn=+Hvi0u>Cu$3x93%O5xg8w9PsnOB<0}X` z`77a&pz-^(w+%JuoI#`H71agPD3{TI%B1*z&^XeZ0T{NG?3QXJZ5x2Dq;*2(Tw(V`U9>l^7MewuOmD5H zSj%ZIXtVHFTn&sVtoW)ig;w8SKwe;U`{*){2KK3&88Rzi%P<_|N@0OkA|Qn&k1Q4H z-jo0&Pt^002O8)>;&8+P?gtflnb5EtE%_LwUBY`YO1ep@Yy(7s52SBjL22h;2_^7a z=@@JbkL$RZ6kyO;Qv5a>7dY7G6rWZ2f|mf0i4Z!i)qJd}QqFr zLtyZD*{Brl{W0mCfX8Km#me^Xv}xH!&>Aa;0*oEhLjET+Q<<0tN@ziZ7W(I|!S!^i zva?iPRWuUE0o~1FYoRH`;7>emoa-ngC-((Cv%SbasbkcuUPYhH@?1^=LWF1jOs)$b zzra2#zbqDwL;({*OK*Y~MS43X0~lF;$#mQ6XHLHS(6pWAlO}Id)&Rl-6l&E^h&v}r zpZH(@IcQLhvX|0;nnF~^Q>7{g`z3Tx8}tV<8MTb#8`Ms@BT01*PVR@YG5Z|dW~YD) z=@pT@yRZBpJ{qB`;$7M(-ZsU(p)rrs#u(4ZW(C7!x8dd>9M-LRyznGWH$&&KPIx(# zj#u<}FAy>3jN$7x;Q;a~UVB=Z&cK-gpT5f14<@3{FktQo{#_>^MS-dax(}?dUaKn) z$M9@?7gB46kPmV(SB1|A3JSZf)Ozz&rNYdvDCl=uvkr=V^+Q6 z{wiZKCJ9#zX#d`c-LNV+*R`9z`%}8 z#H#l*%}}OB(Jtp8rY#Lm#V`SbwesC#Jw1$>HeW)5kC0@qtEx9Q>^L~}$izRO(~~hZ z?V}F@oVkw<0prK!A%;dgDqGsp!uH}{?MYt+?+N6RX*Lx6%ghgi#`~JxPvLr94M5Bn z9U7=)NnvW)UA8SOh6*MpoZlbmlz)-GU<4*nT1*5ixsLlGSh0Z6_aj140{}&z<1E^)5%K@3yC+Q-g`BNW`(Z58Evt z>&@9M-;eo-_q>2WTmDe5`dd}&0eM;Kpo}47Bs_=3xWjo-?dnQba)yT(8Z<&q2Ly5TFz=EUEsyGeLb=YU-f0{*UUUl{j5p*Amu-!O>#$ zsN3sB~DFYGKWQUKzL_^SpkEVV{pXCg`bdjoWbp?3-O!nGGmibp{ z)uL*J17n5wsZY}^TM(Ww@Hw$nHrkDPP9JaMb7@**oAu_b`*PWZ3RKNajFHy?QQhrv zf>hZ3B-2Oxai``|+wd75c3oRAW$BzX4}=cqEj9_bcs4nBb8yd7TY@UEK;-Ln4Iua9 z>^8KgODB+y*+f4m3GacAD$qc(w&!Nv1C)WNUvXWD@aYd1ZxLVA3R4ffFf2ws&()y< z(Hg3iCYh!fqX~I6H_%>tGZEd>q5_j@E3*X>8cl%H7`f&X0+8ZJ&t9cQj+pu=W+fKx zuIN8ZR01|Ui|Eh~Q)=bk*az8up z)j9;vN=3W_j~`XTo#Y_69U`;OybIjGdx*_ytu?`=>#+b`(b6mJk2NCHXJ1Ei36 z2nU+9_91B81F4Sy>=2{E@mzaT64oLrZOG7%YUoeQmju`d^|#qT8SQf%UVo`+(80m- zt1j;F@s*-My$KF*3tHs>KF3sP+5j3W-nR zD0FzU;(#Q4=thSYsIOQa5~haxF4pp5W@p?FU@w$=0t4?&=hxQwn)d-Clu#Ai3C;&% zP!fE70ND547H*oJZ&yyqo`aIrgNE_KE^1k#MONStK%fYO8aiXW{3}fKXyqof!`L$P z4HuVc#h|Oul!mPS!OVf_P^FXhvm-@=Q}P$#mc&!x7#>fnskzzP+H1~9C3$-o)-s>R z@zF|A;(toJ?AjAR?6L<9_#mdn>HMF_o?YlcE??Z1AWSl8pQ(brE#QvlLOYQZe2rob zqf(>6=us_Z<)g9zJ%F}rxL9lS%MDnN!@m?#)|oV6g{ z_+Cr{;LgP$Hk%0iUTN9@fF$&5ao$gp9dbS4F#X?QzeVUq7*Tur<+7%UlCd>u77>MH z4?k#S)oQ3bXgCFy76d8fio!tNM!E(e;7BoDaCh2D%CIb#(a!4m&c^-F-cU)eefvTu zG73a#B=qEforV;C;;|!Xhfz$Ozk7kK_RcsZs-lhgRgDO90ID_gL~Vpw4s<&L#hSID z6NK|vfId;p4q&6QfDr&?UlyzV1h6CiL^43lH$mcC6bbQ4kcV!sX$2xj29!RoXkUu2 zD9CZmWo@V&N$tS-l>n7Bkm@rCXkc6vEOuVZ&Dlg}G5R}J0Bk%3(y$JM6H(7Mk@2IF zHRo#Q)e1=qERAnWjnZMzFJ))d_=k=c?6pYDnk#G{&hL+m0t`z_aQ}<_3*zq;=R|=? z#D6(dy!gv55L`b`w2OF^DDDOde=8ci{8b!aHVu_$K0HuH+hNc<@#e!IEf~m!*D5BQ z2AJ{o1J|Xz7Rpz_T2!S`G1&VIARgR-$HIr$zN6v>_N@V>evmT<+<}@#sg-LUcWp0+ zpV6Yn;NtsU^B_Ib15I8}fn2bugtixDwa$^Mi70I^@?MZ@Oa!?9X=zuRFDqXwb+!7O zwRH(A8r>p~8buzg?x}8OpBD~4$E0HHVP+lr^f3ZGtO=~%ndPspKf(i;%;WvKedytF z{%2%n>#7_0Gt!nI6$vqE?aA#<9?<2D@~`j!@7q1|t-0p()*X|vFjn?-ETxuGpJS+G zi!fAG3d(fkl3K)yI65f3V$Mh+M8!=DZs(TMhwAc|C+h@<30{%(m|s zgaxP|+c!^Du!_hbkC7?uoF+%9cPux+56YjyiZX=wgH!Zh$ylS~#)vVNP0#iI1k?sp=SITx zefPAxCRUmS2n((Iow`HjMhx~kn;cIkYR-g*5T&OX1?B@0edg^=`FO{&LLjZK>=Wt; zT@449Uc5YYh_gGi^x8E9VZ;i4MU<#w{^V89-sTpNf2KQ*wgzgpj`Dm==Zu__5TiFU-~ zKy6cg8|6oJ)oS@((6iiC&X64Db0ds(i$?*F4t`jX3Pp}`HNYhq0cAKK!R1tgjjYnV z1CZT$CnDb%pk;LF;RRk1Kyk@Khc+9N78z;a)mrZ}*#VGO15r(lyEj0_j=eHd0{F-R zbag(#<#+(yy%->ime3}&S$~+Rig(8}dS%OUa;DQTzVMni_HuvQk;A986wZ0U=t5$ zTe$qGk}+sovZxl~Y4FZAcj{p!?P)_&k6$-pl;_!+B}#j&B>%z9PzY>R!>a-u3jqt+?SQQaNfgE;UG$c$d1pWqg)05q|86sxF;tZYG7 zd-*NbuI+DVcvvYXgE~YEejo3SPx3OBjS8g#d-uGw=94yM?#(q+0qTeNJ$4TxKyx4S zE`2yoTZiGCFWNl%Mq&)e9k-ECQK(K`#8f!rinu&wD%DKHspb=tSpYDJ7p57h3ptt3 zC_H)qjBphQqI%SA$GcNSETpnog^)l)m8V^FU$SIj!wUQa>#g0rU%km3&J>GNh+g(I z*VV>tf1(uN3U(nw@m;S>?y#{?L>jyVI6IT#@tG3Rc742#-t5O|f9HIUseVb)7IeVN zWA*Yj5<_h?XS3Ho$ zm@sUjmXiT~!?u9%*_cc6%tkPI@39bny+X3hbRgdCVL?0GWz9H- zp{V^j)-vopyHFHNRLZ$N)}*+Q9v{mmwniT#vU>=7)a=WiGvop@tQvg*I#3ZqRuw7D9{>4G{_ZbjiZ6V{ z|3Gqc3ZOK6_!eNbTlmnR6@PGIyfgeW7v5hOWO%QWO9!jhbu1{Gh;K-}QBHl!gOFtw zK0Elr%K1n8{Ntkmf3sL_ensXjfVarGFJ|cBmq=Hj1Y--X7@7}B!&cJ7k4inb4*u=> z3gw$Bzx=Ab^J)017fDIs5gLpj;Y&7}Ugz(tdqZsN?qx|^M|Q5dxnlQw?UTop<%>aE z=#UiCMtp8DHUp~}*I);U!Omo-CEEE|;_N+xtPH-KWG`;#J`Ip1e=e@Y44=LA4^Fxr z04JUAzP+~&f)XH6(SiT@ohXh)TSyWke+c*jiH2NHfD>w7NxuBBcqqz8CW?|h(*SRz z#K2jr+zOhxQ+lR_J9R?t|``iC358kFc8PKb%YI*9*BES4_*e>F+nK%ig;=XE$ zruw-WLEKi*$m*Wr!FS%3mt|Wp;|rE6TDQZGB2JPX5p3W9Om-n2yD~fam>GG^j-tB` zPA6xZE+hl$E&tJfnx?R8yvx##S-h<^l{Jbw^wO=oWg(6PXvT_HjwSS%^IM0E+YMe%_pB;0sUFGb12)|p6wJ}hvuCBqHkbi}BuC3N=VWxLM>il=7t@ZQ^wQ&JR<0hc*IxB3L>EOhvP(~tRfT8CZpgf|OiM--BblFlqyY>d9JLoHcN z=a=zCHUIcwbqe5TJhKfT(|QDuX@NGzyp2;Z!JRVtW8VNYzf^i4hs&!(ib8G|DGJ%) zo1%PEly8c%Fg62R);C4@=25=Nqe%S}tOuJ(1@3Ip#{aDGXlA@CM#?64N0uJ%WnM8= z-zxOY>Joc-7IM3S=X<%PIUmF13Tc~lq2Y_TBB}F$8(My2Q3&&oZs@x53SHsbFj_j) zH#s)8qkk~_yw6pgpV<;U>r@ow0Z1PQGZ)+{zO5Mm-CDjxVMAbF#l@~XT7 z$ukRdV%}rzwSkN?5^_yU-K{<)bApMINiV6uxnx-K5|F2)0*A`e?l1Jh<-xmOdIehg z(BidR$9z`LccU-BvY$bJD**cK)J+#*w+tqn3?2t1W zF=@L=GEZ7tIWFXRP8R5*tIl}yqkc!d2vG|n1q~WOTS_l1kgr%{brOiBM@5&zTp%rm zXG2$h{BaQ`*775}Cd)&RC%j-Y&FnG+Gme#IQ-iyTdOl@YPDRFm==Y&jeSz}Qa7Dkd zA@8wa^9g`d`M-+t$400c&xZ~0MZ*Rl91%e=yLteM*+8Neqm4wCh_QQwzjhY7+Xn?h z??VR5uGEjrF)o4zE;Qs8x*B8CQa3y%;;i?qFYXmzRA-;gZ@`<6i*3LkA)}hoBCM6c zZ^RYhvypjz#GdWktn(syaIAfJ&|{y3o>#Azz*kVzHH5VF7HScK4vmw!oz7%N#?t_w zkDM-EITUf98{eSqDCle89-|#S| z`2J|=06*ApmT*iepkk+0k#MxdC|^S(cUS06$68^TMc_f0JVqZp`*EAo72g8E$ZrFv zlZNg(uLKExx251xLBiF0TO}4bJmfNwu`~M0pki)S-fGs%b~GirVJ27&Vy z@`s})aCnHI+05hwU~j@&TMl{mdJS{!ujhal(b`$LH9%(C_TJYW#AM;lefp1Mar6Y& zw3PI8^ZPM6Wu3#;(8RdbObEWWl!GtHR4jgaS9+nQh)aDJj3$+2V7p759C6rxj^<)J zibe`s!!1s|Tu`ZBvF6an+tiyF#ix*#!Vg2QZIXR7`}TiETo z8cdZI!*C;KZ!R$N4!1;UQx^@5Pv3}MjC)$@R4^XKPNnc>*sCY_lNGJW8YZCa zQ3Cw3=`jEHr+<0-d{73YhhBQk5wWr^J$X5Ui_GBxnV&-S$=+kAuAnM+dXz%>RoApP_kF+70@V(d**Tx%_$5emUsx`1Us&!LyEChtTR(sBA8EPi zXN$02A5s<sfXp%=Le(~DplsbV3F`f zv}Pub_(1;=K>SlO>_&O8GO!ax%(@6#9+cSAKR9HrhtlyprZY9`c(0m@+%>6fU$Gcs zx*|$wq)?0zj?K(#c)dvli1~fU!TV(x4W& z5#tkZ%ZpSMkTePa6e)!8jEnQA4$*t3X*LL z5y|#v)U}57|F^W>Kfs^`2J-i8)E*Hb{pWP(-skQBA-&56-~u?!4AoN?>gOD4(W3J% zZ=l(=wEVNuE}j{U+B2`27GJHIi-qtv%2&yX&T`^9aF&oSeDwcUQ)oBJ&x){th+%u7 z?1);^+tQX!M<4x#j{g5XrTL~P|4-$=zbVQuUy*D6rYPS$%7UnP5#+Gq|GG!nc&hjF z&9Z^K^kTAp&~)5^gZUW(T_lv=gQvwR5K{*t$O%A%C^w|E87>T_@VD@1CtxFGmJozT zr|0w(3ks6RE~gR#&EjHXv)mkj6i13FpHESEtwoxu@zWC?o$I_$!4h>Qp48KhDZS1U z09VQVy=>ika30MLcYkHDefWrS=zdHW>!{DAiaDRK+nUv;e4Lz{k@jyJx}f?#u2bC- zit@|%Ap=cCxd54&Qr^SWB&W2*^=Xpe9#*b>3PpGFI+=a1cJC>h!UQX1fmiP;9umVn z&hmRq?|lhG2d2pWV-gef-lfw}3=AB^x4|t3qk7=yiYG$oZ~NsTe-P#>=y%I=krSK`QwQj5cW5 zR8PU@?$sN}Gi$OjL~g@TcTDb@zw7XYGy~S`ViYwEwWBw@Pf|SL$kAOEB+7m-UF(sd~F}4 zh$sly0F~y91(l}Kqy-cO6#*6LU8ECwk(PiB8=xWrB1A<%YN(+Tih|Stp$7;>2)%`p zgpk}FXYlwbo{|5Zb-wezcNS}@qr)cqx8M4_&+~3S5V3VhVzmc$`#Fzvs-__9W>5dJ zwZkI1y(xFO(Yi=)d*iI(=et_8BTS8;5C!0sk4zxb zP^38W^!G!+EwuXDdLvcGsXa^(_hm6HNQqC!RHubhd@M+9`uER}m@Q>r0(URxTJB7s6(n>SM}q z5J3&XBcw_{9&cK5v}C^al7}?|EX=oz0S5fGGSL$CT_E{C11-(=2lAdF4|X_S@D6Y2 zWS4VLC#~40U-CAx^Cj0<11kU&pQroGd>nhC%-#jR){mEb2Bi+N0bof<8#eRo7NDD_zfTwj9Yt77nkH^Einq|uXR(Y>!QT#lS{T8oE0!F zKG87MrRC*Y=WU(H+UMiAT7c33>XP9l};e(HEyoiGpfytN0cM|*n*xkJjtaxeVI#X-&qW|E58O@0hy&_XA z(eyK?7LwF3ql56aMDbpVLGqm(7l&ewvp7*!b9Ec2$$ilAF}^WtWHE3={)H4rO5?pn z)h{t~>@iv+in^G!WExs#EmcBnmz$VasZUJ5pp~K%ZyHS5JYBGyXX`kfi>y9UJJ3+q zx47g!f1$cs_oItUAPNgws2=z7nt>#B;t9ac9`4fB;wog#DVZR7C<*V2UTP-T4LFHY zi$(#gE1^!T;CL{V?`-ItOLe1l3KG+TEqfE^JT7A$qqAB99Y~RGOJwnD-~Hplp_Q++ z9FBEqIRGxuDPu6V--(F)koP0{D7^dTcog4Qf_HflFK7j)34Q0W1#F(Ts+h~>7s7m8 z%zv557BgnJ}a&s|!=7K$kVk$_2;;U=qd4;nqg_V*O zrfW&Am3B1V-Y)OvZX6Mu4!5lXlFtfRI=XgzNG4}ljC#{}RmPL{Hu1z2{+mZYqB5~J zD>BDcXG%^Do7DB1gqGoGz@#*GNpbN|=}ZeN8U^*H+T3mGUG#c1^|{~j=m(xfV+NSa zs9)WvEFjYdBY-6L>X=@U9+=&dGAf8n)#*1%wW2J`Ncim>HGL!~v$bwO{xe_zYOn91zI%mW-3zRzrr%a2Mi^-*5mkz%tKRLmIpsh>K$a ztFxSQE;TOq%W%L>Ani0W1JWC^lYM-+I*@DL8U*GnI61|GhOn!W46vb&rUhaQ_bXVV z?@-joEFi@f4~|EaQev3sYi#l(!5~Qc-oyko6`|k|CvVOUTHqWq?SOGlcy}iW#xEO_ z8Veo$1mQlUY||cTipQdNX6zN=*}XeezOO{eKGWU-iQNB{$RoBa@EAuuyGGr^4^RED zvV$T#J7qX?9w6IgkK_~hoHTH;ZYrwvzO#Oc4&TeZxpwJzw?eyW3IDUTo0d=;S;~JbF zj&$g(fk&e-tYXEHka@gmN1M!Z?`FA1xFRm5_!tg|5VtuLo>6e=RO8E9CGCDWg`-=m z`C?r2JyvYa)xaL>Ko&rZ$3O)<~Y;{*LR@8_Y&%9iccTTzs6gfOgtAi4cZC$0(BYUq;0| zdS+KVKmP~~%tbT$i^%2>C26X0tnbp^Ht~k#XRwYTO;8xb1+~f=t6K{A-=MN>!^^gn zMa&|Zbgt6d`d-RH3RN|g}_6(2|b~$+~1`=y1;TK;9&*R z-p~E31g?Iw?Sye@C%zJum5(GH#RZBQdM@txz-7!6z@)B^AfaqmZwtG=2OPt#G<-KfoKdBmpHZz^-E1}kP|2xseO(bD_qTG zfCULa^hixBVn;!BWEO&9>2f7gd9|16Lc4n-0=-mJ0?7RZVSP^>Dbq2mjUr69-O49> zj0y)51cW06Wo(G34%57TGWj>8#wya0;Mgr?z_f5HFV^tx&FV+<*{ft-r;BcGzT?VH z3e;B%OL@ha6ka(~YWs3aHcCN2AjKs!!6V8Py1_aIJy-Q?GsiZ+00{HidywKR+>Kb!&9*~h zzz8sks-3u+4kVW^fy`#Ov2z|7=RYC$ z*(h%f^&>pAyW9Rz)Lp){&~bO^>StAWAG}z-UtAGOeq~W7ls?jC$c-`aA%-t}q!xOY z50LjQk!6>L9XR9(K?`w3P<>b-P_K>wjao9`RxM5=oXTRQvkk1{BhXI+O}smUMc~(b zdUuu&z8C+I=PGOpupgRFZ>W{o97xL;+^La7O<3XKv{(cAYn^R30BK`~W3h-r-o#zf zP7SP&tIy0K?sizwq%aquKIIl%7z9hiMWP)>2qz*d)i^C|uoEB(VufZX3WQXM0iwoxgcwFSH`+F^CuJIkdET0Pw$WPPoZ=Bky3t+D0}z|%IE5I z4WP>Dv#(P-JXHP>1sU%K&;vVU%N*TE{7mV7u$+J0t-?9y@!=V7Rav0nxn3yn3{9xvgjzo5lTkFh4BwxsAG`+(>$yujL_9; z#mM#$lkh&v6+=M4=}TA^>wB^^*=2~;V9yybn;@qFB{)TRt*s=AJY13U1VuaM6PHMO z>?*hb_h=DhsRHr^1t9q(;9{ZXyNw{AslL^#{&q*}z0ql2A>&@HlHP|fyqApab`gv& zR-gDG2c7wfQuM8dQml`0n$ZQ$Ih&ef9)}y9YLx=|hc^F}tL);9*mUX&Qq8`DA4E-Q zUx_cnt;Th{S0paj(--wP90{F!GH@bJ7i?PeDFvanOrny})dsy7ZWTBmOh9jSwk-k> zzIXME_Z-ImJgnjV}GF_1ei)d8zGF&al zIWVh#b?$p9qSEWH;|gTUh7OWP1s>ja!l%UDftWi$TpUmaD+4|Mp0s;}a3{aS($tS8 zIAd;!$;SY>MJU%z$;q!yNQ^RM6JFnfvBSHRYdZVtBK*1&u!6#u zsysFgqZXFFpi#p7`yYCB0La8%Q*TN5wwJnFVvtil5HC`BjF(t`Be7bI_Iz=r0nY3!V!&z}(fFY~KZNr)Ytu0InQzC+rPNO`JmJ7lSu*oSHTVZ6$D-Brg;5)( zicSrgF~S06xkL!G#2*Ai@a?_O-q~J~Phqh}5y-Z$@n*kU-PtdN zy1qg<2w&p~XQ#G%QRHZSNNKB@Dm)G=LZ03wsSA@xBzp$5YLU3i@gMFLiBcosj|C!iQ!LemMj1VtzAAVKr|_l8=Ia>vIMe zl^E|756sDrT!4`~+iZJx*iWG1`2c~))Z6N!O(0YM#kS|V&mN0VDOcg_p@s4x$dJ|BGf0UT=bmP~+P-BsGjV2Vi=sbk zvh4y9yfXm(VyxN=tJNXAv~PZRGl)9$NIIcF!FQYjBYav}_7znJYF7FAP8+61Si8c! z&;UMVAR3W?duA$01(r4nHgNckWK3hQK8~_0^=U`p7v-QUP%3_6vHEJio8PqGY_b~g zSOm0STHQMc!byaX@1T?Ym)CR#4@}3twYzT!1+)?S3yyQ&zkl2L^48D$hmoOY+;=$E zOH)hL0CWk^OP=PfQnn|yeI{}a4`FW7;aj{yO+=ui3f(#s$NVrJlX~+MMQ+ZUygazF zhEkz5AxjhPSVoSI9kxUF7PUvSDQ|}~A^^eUsuOIb>Q;Y$qT<|}=cZ!VoM5D@pNxtO z-K*PWnE<;=t#HAZ`$P4iYD4Jk5J07X-#y3e8&7rxI<)gq#v@KZ&h3a-=1_2_=-N`S z7o_Su@p*i49gSFL{M4c@SoaYhhgL{X+zbFsp@B8RHoemDDq^#Jx?To@jlXjadN+YL z&kuFZNe$=*WRV{sD?=TTx>gNBIX^D7+jLkEc;_~OX+v+ zRSp4-cxX}5BZKK}z|x^F;1uV_&c-fJ1E1w{PQjruRag7*#nQ1+ect6TQ^iM@iq*y_$exm)ef!~|tY~dC_T?M)HwQA!Z6d-?9Zp=D|e_!V@ zhnMXFdKpJz9|w02p2px267|L1nz<-og&S${a$GU*&Uf)IbjUe|BS`Vzn#ivB0{OV3 zN|0&L->H}u#Sn(OV4c!bO;Q~YC%>>vX%UUdGM>(o{kSM@+4Zs{vyO#fo+Fq96tE#c zQUq3onvsZNGbuxwIEY=OG5UZ{zJi$I2tjF|YwtXvPo zCX`v1#yUZ9AqxvjF^loRLDCb9(5@ONjy3_{j{IPkaV5H8{$YW49cqGBN1n>TMozf; zJH+c2DoPlLEQB;Czrp7va2g8uPu93&ro7q`C72#foLPrPTE_*D*d(58fUO*O`$GSU z7x~~SP)l(2%+m=7Z+U`f0s0 zh(J+WPO-;-By;lN$D?VZgma9qWI7`mLx}v>}OTrK0A(IEEBf#tVFhnnKOA`&e^ z`y@=d$MNcUeNnMKR;tF)jE@CKW$}-ed?z0yqPnM#^9@C{DLx){cs?VPFet4bE_R9i!j&q zkrT3$kKZu)i2ulKsv#r1OgPfNOt7(RBGMztKr?!h>sAra#J=N0quNBMjyVsl*hNE} zhUn|nR8#Ic0+szueNqhy68?qtjD^R(ya>19WMN*Y z&5O9$J)xRnS0zR9eFQ8PtbJulB6)H3;KJjy#WnfTV!J$6fMqiFr$5c)Xld?avNJV2 zCy5M^W7f0aF&!3%B3&v;?rpwe$yH<-EY?`D#2T3~{US^?#kVwXWjwaPy3bPqDd#^` zh2iS)LMRNs8$ury6o9!>OX76n6i<>2Z%)o7CL`}E!iYF3ioTJCa(pzqp1y~}uc`nT zHyIU<^5Hs`Bh;sP(_Ce!PDeUoOBX*c^r?wHMsq!;E)_Vv)1~fwo<=#WDg=IUCTqyN zujg}pGqy(#3o$8qXir(4>7xwg_%5rlLM;3|P=2$9SmF%Rn-G|&Ni;t|wUL0R4_oK)1D(z69Bc6`pm4Ys4b3nMuRut@qQ>BAn~y~i zB?s|&bK!g4W<}wHrzoTapmng7lHme54H+gUs=%*`r7_4L(`~7`%LwS%Iguj>FXHa$Gi`P zO1}~~5@I3KzY+=~^uJwRCN>|-fIrr`9Sm+uRz?$XvEhA7BW{B!cJ)FIkTIlVm)Pf5 z9HPU%^G6m)fy*WKr{Uy*KxXO_+B9!@1*90%<0OAc0qLAsW}Qy#(JhU!0*KTRx+`5r zsI1^Ir~MC^K*|;8zaJoWny!r+AaA}UZVd_;Pl&z9q)AVO8C#BiEkOVLLtx;h`IHa% zi+8;(D-nk5{ljes)d1&u35*0^A%|x%NXSSW9(fV)sdxsMuZoZw1h=Ai3BW7ePKSO3 zr5gJZMm+|&zaV-MkEwlE15v!PjV+KLY@GwE6b(Fv? zevS48W?r<>Jq@RECmd-l?DVX*IoA{fp^o0Fe8ePR%Fm>X^r(X-a(g24UqdoEFpZth zMEyIM9`Vsu{<4gQ==3;rH!7?wERc_wt#;bP_Y{i&b@D))lRqxx%Ib6iVr)`8PbURa zwJPk14G!M@1JjV5VXbW6Ke1i~y1pRy_~}62vvop-)326gm*!w~LdK4O_+p`vwNWjI z*Yy)&fP^Qcq~il86e;D;D}6JAnU6=X_*Ka)D-u?ooBGC@h8uU!*FFTI{^UYC9@RH9 zOPHa3aKQ+Mht!1{ds${7{IO)|5Qbd5?(0`j7bj>Pk7`Ge^PyTl@_-`c=<1BvjHyIq zB-YWbuK<(05`%TRzseU!XR%Z(6#PP`rylwAcB~-G-N*-674kA<50gVQ!UaV>wkRFz z;X`W>6MMx({rr6vu%07HV{cjeUd=y@Q3DetmrVsqLO^}wFnhev2!1>%UIdF4gDfiL zlI+;!BM@v89r3KHa33d@EZRVjM#^Zp&7pYJPTWU(wQyW=hUhVg*Yz_#HoG@?n(V@! zMM)xTo5v%`3Nw2YZV#bFuq)<1gEiL4=y|(Bf#UIJHs%L^AQwgouJH=fz7?VTg?_u3 zok1otAKzsRNS9EdXC7vcwdtCwPqe40M~BLu9PW6R*UJwSAvqQ}x1;bz?+@LTEj+41 zyrP^t!5Ha|;kNT`M_hn0qAHvSy0JUi&@@d-R`GIoJocSEJ~;*za>*@V{v+~^HlX7H z#rO@nv|S8UTcx-anVd&Cz|wVm>!}EY=Z&sKec?7}k_9pRvnHrOK|#@gO&odMZ>Fyq zTd^Z3ku7ff&;vnUL*!=vBz_kM`B~aPB5~Q+$K2x-wLt;IBJ7Ag5i>tA22d?B$FaK? z9`||IcaW-8W8X7m$$aphJfRv-=26FJp(f9x?td4+{jW>8`s1QdfVg{*38-NJVoT)wv6S_sHt>iwxWUNlXFmi`g6H}j z`Tn&hG~nWro>ISCMOpU`qn9nkjx3hjcX8`RKk zx%7YW1#`If9Mk3UABEf@@Ip((? zynO$0+qKA>-?uJc@)IS+7Tz)jO;E{!c>k4TIX$lr-8qoe9n7=6`R?CCr*wggUo~CR z$Zu2V8uuC4lzAw2Jtf!oN%ae0ruX8j*`8h3Y=5HETlZr7Glak1|NV9E{|jK`rLCLo zjjnp38~^>tbzKTI?Gfl?t$j?rb*b&ZX#6Kr+@D_3fVLaq;kp*_vlS-a_tk3#@b~RT z_%G$F>P7v*R60`gm8tZNw4$tj(rD4pUPsfTM1FC7!&p11e?5&12BtrCnYZ5mUuVWY zbK@UMF&d(3roSeDblLGUQGut%OmeyvJ^`4$UHl#v{PFDm{F~GpYu=XeGwIS#kj9^0 zGKZ!G*!}kkXnvja_yhjDR{qD+I17^NPT77cSNe&w3LgSM#rGQ&Rj&KNGEOqxs|`N~ zyZmh3-28`75YSrrc>g&3`-zvBF@2T)UHK10~8x>$G+|*Ux(8~!~cSme_@=kZfOA+LEwL7&7<`9C9uHoqsOnM z3zP0VTUWF3&zi&^TK{j$lmC0xBnBu6(e(eB>ptb5Zj$;*82n#eXt-e9pue^Y;3tmz zH!8D*w9MWc=`UkU`dy<{2<^ZB^$Q~uw>eE!Dzho0f@OJ=E;0^hh= zHVZ!gyq;3zF9#g|a)p0YwR(YTILhhP^>CE`YBT<4Xz{an^34PO+ZH-Ae#_Boptx{&X2pEb6(MR7TseNIcAZ7Tb6&i_*wgk_FiNCUNHVB6@5jYlX&BU zoOls&b$R9CSvvYn+m-&^FI9)kWO5xDU+PqEZU!r)C}Z@2MP}AhCv8 zE1i=s{4IcvVaxts{N=G)nX6szW{I>Awzn7`oghRuT_CDa#WwV^p7?6kVD#nSLZ^T> z`*u>Zu<=Pfmuv3I|Lzlh|M?>u8D3~jNWHf8s89ac)#*~IgRjlKYhwGW+yD0Bub%}B zqf^CYB*RAi)%wLNPVH`g+}Z2J*7km7bB0gP`Aq8|$}gV*?X0FY3B#=XKP5$-oiSv9 z?WlT_VfA6U)c(CnuXI4$kjg^^xdqjY+_{aQ8Btc+Tc6rm};r=^0=#!7cp2b0=ATlzy|XhmLb@ z^xg(MY5#H2wR@Mk?>M2)*WJjl2_(l8uW-h^VWIA!(;3>c*MaL#sAjv@n0>s`ydjG> zlV#P@NW%mqw4F%#>D6hL^)c*>^j`#$|7i*r8D3~+H-2ut^mbV!5Vd6p@cipuSjlT~ z(>!J4E(bow@@>opa&~3-yWG$GV34-{9*BiAu%=CsQmgu6u-LH*d5G*taV|Cuc`PH)Kw1Ou~unU?ueUo-6-3 z6V0bTHn>5U%vt@H)n6~(NN?r85Q}J>vf)jhk&YAJrhd7>%M7y&5O@H2d z=l;SpYY!Va*6oBzhv!RM?nOl@ngbki8S@6SEpGP zrzT79|L~Q+GTUQY^*{}>VOru3`tmk`4m?&iZ@Ay<-$6^Fv^2Nd*K=bw(4g9!n(nGU*&3ReuRVJs4?%B2aGwLZgzU$0ZhHl8$J!!rUTGvqj8+<)7mce7VHZ|nv z)Af!pVBqN$XsSNrYG!5JhLUHQ#O{n&26^=ADHdn{b0*rq`Tc#>jTxrP)<0u>=4p3B z)OeF87qG~^>&Vm&7W)KK9lu@4F7GLHF{!H%;FQW*Ai*=_|L9Rvn*}-qZ`Z6?+?-ns7wE?1r+_J zZkn%?o~&o6{|&xw=EKq&E|e{saCY4zyr-(NnrX$hP|Y5?u|gSrd97jC;qf0c(c3() zf9KzrVgA!BeN9&XG)w<9OJDo>f10JWR_vc<=~qhfzZn?$i)QJ@jaIg8KvD|cXi_qB zaClS%{b;9{4FF^wxZZ@{yif^e`^9fbeS-|`;o))f_xI0K-f;QYbZ(Pwn*M}U{ivIv zm}yJfwu#0f56MW?&PJfj*SPsje97>}TNRwJ{_uZr3#*kpy~-2(p4rxmMTZ_S7riv^ z_0CJ*jNgKZDzpBumE#)-NebNk*{bo?nAC+?x5MSAPOY;W=>yCRwx&Y*f0LG%9~?nN z4{?b`cC_`3|Ll&A%gayl8z7s5LmXX04G?-2u+*i!zOftXWG*oWW{Md0)M|6&H$+$W zJUfT)o$Sgkda3i%J5*6oc_msdCm}yR(hxJXnY%HR@9;(tF8WOgPs=hs*CL(~w{6?z zE}2q@cQj@j*?|xbMBU%T|4k$HI3%R->C-EDp1qr)^_FUzhYwGM@*UgY&w3Cbdhk6l zmqMA&xYHSQb&nr|MN5Yk++`gpAmT?put#jnwV-zyIOOvi!OymJ4VE)G zq7$x2TU2dJ4AcY{dAG%QO;`zs=J*3J&#Sp_a3dw%Njvp#!-oXDBWChrK90F%|yjtsCJhEPS^OY)4euFi5Mc@z0p*dEh`IP<&*Oy zZw#)De92I#)ZgvFQPkY54hdi#vF~)V+PBcUE$SZV`}>q-2>XI%?OVzzOU#vRpr~B* z1^Oq)k@~%keLXUz!TVne{YqqA>*C6G?{3P#9jm!FW?%IfB<<8FT=V#lxhAWR9Zzk> z$4iOR#fJ5n0%Np8Hlpt}8TEPp1y5k}9>9xYW92743*4^QEL&MBYg>W14!2$z?HTZOvhO}@+>ilIfMxsai! z`2v6!W%o_pO%Gye8~w<3ncd%jpk-*B?sb;J7J9I5X*{1~O*3zimOP52u&tBv)*Nz+ z>Grx8+Y$uOVsFasPMm{pN@t{f2|^9?*=b)3`1W-Z!NRDd(6SS}W1)CQHsWG3Rg06hCUgeMjz`tXe$)Nsa zdL+lrz(OjPwXh;4-CY+V5?+5>^s(hP7oF|!<`Xt8`~)9N!d+&3eIPC_9iI6xakBz& z*J(>zhdUPVk-B2@jwh<22bTd1#${2NEA9dxl3oT} zWQdcv5s$$Rk2(suPHKUShuDW&- z6b`={QznSOES9+@ybQ}k5uNo4M6!~N&u=0W0sqa}FH8G6pH9D80~MUdJ}_c#ql#|- zdyddgIa5n*DaD3)Lp}#5 zHT%T}g})-RCB^jfZ#4a}<1qF{QdQ%FjfIF^LT36BV9Y8o(89{9bk-Fc@(o2dN^Svy zLa(eS>2k0MmX7OleI9UHJ3k{I?SHc|_t?a$$1Z{$_gzV>tbyv{W_@D3QruF}XEBHy;U-6>Y|_ z2No8qygFC3<9AQn@qNiBo=Pjpv_@eYkxu(g3H<7C?T?Yj-atPt5vmEZUSGOf)2=As z!}p-y&^~;&UbXu6-V0VA{O1F${~Z50q4m$@U-m_07<4rSlcPRYsS}^}#qYDH+ z*dF=57<0CM7(TVW`GZ(CXPs3Z5Qd<3Z~$FQYE$yoo+h+3*BfWB*9PFy8-F z9eAP|u@^IPYy{g)n1|*1Aj2QqKO(~oqU$S2IQxX^9Aak#!Z<$HzrGw{m{W5C!!6pF zatb1MDMiX%FQ!P*bjfn15<~we5xcYq#xWQo9$(O$vKHgX1Y3h~c3s0(zMRR!EDn3a zoLn)GJ|ft3KQ&O>u5ELujQ~liB!mTK#dAUxANPdsvy?~HsT)#r6|vRq$F=Mf9Kl@$eHr|uw1vvj{efemp9`YWobw`6Pw{Oxe%$-S9JrUZGcGx8w$Vc%J($; z*f)iGYAxp$6x#EXhXacV*I{AH zI!v%du1$ACZtayRn^N#T$7x1Zor@{{XqRr>Sbo~|NA#iip(c4SM5hITZfAHQWq^4h z#rCk9P?t6x2UBmdzjOu`7p>AeYO|BvT3x~+8d+@z+w~b`{;#O|Z~pWe6lgNiJI*t_ zkP09w&@*wsM{hi=^PBX`F3kO0k6t zb5Y1-tr&tYh`0B2P*Td}5V!o3#XW>o?;0>hY|o0kbYauXy`GNTl3Z`w@_u4Y!22x( zvqCU#WOx~mx%z>{&TvjUc4DN#!tsWC7*ANZ!QxE$;5pVFVg5OjD`CGj@jK4B$tT<^ zn#C=8++sh_soVpICx48eO?v3KPPT+AbVyXhG6FG&QryK=X_xaRTv)^`>g=N5_FBOh z2gJY@n4%SEyJfFI?o?e@9vI`Ql4uhK4*f3)5Q9>eulN9ZR;D&Fq(D zm0iy!?Z;i<4>x&b>O%JH)B!v1@g#7iaTpk!7P{Pt;G0szq_=c~^izCo>6%Tnlij6x zWp44JP-p=O(tWAc(RSvomSQ~6c&WYVDE-57-OM_l5N`PMj1e_E7G&+*KB}VPdfBBO zemLb^Vo-LHuhazR3LRmHD0Dim*F9Zkpt(FTS2ta~aNPC$ivO5lwCQWUefwOZKcAS9 z?KrfP#JhhX>g4?9Ukc_42qmD))AFs-vUaD1xpUtDH)f{=!8G_!001IT-fq^+J9ST{ z9%OFs>Ha+Fi)lBm+MAFSa4o!@JwnkB?Sek5AD0K#wVk5l>mK|P)9n3vWM3^rPH}Wl z=*qowMI)K$@q-YFdFACgcAT^;CN0G(B77>$$-k+Ey(4K+PDkb$XV!u6Jh#&J_;WmN z642dUffkZN zy-X1|nt~KP;7!;EjcucW|2Cg~WrbaOtOS4p5B4rDoMHi*8oZwB7D3ufSxNakY37Xw zPtUzzWetI$Qs;6bMJ-k(OK>hH$iMUG4a>Wea$k%?WZt2z;@U^X_;Qg8H#l{=@t6x5 zDSr86PxOhj;@~z!N)AF3BX#s$RQ|}>sNnkULBbR|8SK3WlCC{s&U}P>7URP7(oan? z?WMm1M!Q?5(iZu9$~7H7wgt&UB!|KF?)S2JF(bB|$-HeL)0l(-D}i^X>*oDhz;bpa z;GzQ0S1=~wDM&6@AmjO7ZiBsEaj!X8I^=>w+Ror}u+mb;F&da2zex!_`v}av;7ZEe zv>UE|37NR)bar&+Y&Crg{V&bTkL- z>F_cUo7un)u7cg?4&m)NVp`O!8pc-}SUi~F%9k3KvlVW9zrA~V9S|eE(i(Gz$PW3g zM-(3#(Vd8~apb=n5YL7x?bt_xFqq0BGkZ>CuVhOItSGd4LaqvPw43wqI-wgA3TpSa zx%nZ~YHaXsIue?|;dhUshU*S$ zFA7#Z0F+Ji($)aGj?T6yb?S+J2uC{SjrAu^@2YyK=#;ja?UG(lFQQ8fy^>R-zs4 zyI*JZKhM?4xE{@$w@UvIFBF1h$0W0{RaOrfOf`++FP9?%KfddKA!3o0*qBYU%+7Oi zpqHmY`Z${kO&a~pvIPh=b1lD=U2kdm<;v4!EWhiQh?W?N;q zHh4;XigQ#v{W+0jrgyi3k*!w1cQhSXTGEcS#*-(PL)efzj;rIs!O|$Ts#vkk$cx0t zk{I)o0h5tL`mWk?SLvEV2$!3k9G1kE)5I8s${>(3RLn=tB>5NZEQW~&DT97jxD-&9 zc;TAO#1??A25_rV4Uy$zivAL_e7h)Nd;>=`f;@QO35o!zt#fx_fLwt41P1sr1%$*w%)%ZW%c z3vb{3`Mtq`*{owr;Xs;L1+_He*cq8Bgu=Vo!$bFnv(kCKGmAD+EqB7e#N;04mYzjB zy9r1zMA*J=t+G8`@4oHs!?DEPVp~a6NOIuo`1W~ZheMC8(18UeOY%O6aNU$4l9=zp z0VoiqoSu1i`nA};!-vt21UeK4qQevuJSJM%*+L5&=5HInzotmTqRiXxh3>Iz2@{H%w-v z86d-WtC%}d&)ls{!1Fo;)}QLLj}xeJlL%~mK5!72PW55pFN zjoiE?9_-OtfwL`Z@%&T;d)3+FsrAtzXC&IcCEAPVeF)!*6)GSm;A?b7asn*Tek|MD zstl>}U=>e4jE2GLs`%lKDL%o|jP@O%6lJvZcqYB_+`cX0q z(v_a^zlgLQR(?GVz)8YfH#^?KDs}G^U{dzP%a8wWMFtI6`UjEhn5G!CTHg_ zL^G)4w{m50_@}f%nS8?Ae7HS1_w7;%Sl0;*Dh%S_vJ;|C;yrmi1$!eeGS{TDCF83DRi58HpM+9DFJy`p}Z#x?}@g&@qsT zgi+BC<7=@nh*7ey%!L1$0Jt2`yP}yyh7EbV-iS;!dGbpoH-b}u9y;~bmq@2x*#J6! z`-OKvh2P}+#oV+JTh-B1Vys6TWWMhM+nL{{1btUbK_O zh}aVs%yc%#GG}>rE*{0au`tN<=_b=XW5huPfa96uz`wVAw8gKTQG@fBQ1sD28mD{# zff3Hm%TWMWmV4>{C%0CBAk2~RRQlp=^#)=b*Xa`1H_4bT`uXz1Y9^eFMoYH2Qm1ZB zIz#xujQ4f+a}%;cvk=!Zp6toyOKVsAqR^&-?wz1eu=ZdO62bJ zZ0%c0c71-vqw9^ONsCtwyX&1QPcEgLi)jWt+RI>~pH5VwVlvZCG@k<~5OMUk6e-!f z2fky+ABEn@9ipIm;!3SR{J=t$oDS6 z)Zjg~EU#kUy>G~FAWx=f;DKpn+H6-x2d>&?1^ufy#c6>&$4 z7rm$2k6up@)c5u-K0Dw!TaLfU(FuXZApP(JDORmq#Q>(aEp*SX)ec_$SQ()xCeSL7 zy*e<+LSoDji`+yHz9IG2p=h;_^PUA$s=EaS9%mb5+orPz>axjmyO0g$LUasz5Jwc@ zaSU&t5#(be*1GG-K1*O1@yd95r`ieYX@_aZ9x8SK$vO}dm-E{3%LTzj=c4&LN@paR z_LXXecD=X6XSaM@LEnlPkScPq9055W(@uSj2-YP=E_pk3-=MpgZ2e)x0w`Y>mP6h1 z@Ro_?2GOR4z2;>e_2M}diTqLvUU8Mqgnhr(B8^4?UO7p(38em~6h-z7pv0lcMx2`a z$?SoN!rQ|)zy1)%3$yRmXu0;l2-I)Mmx2hN%x1j9zt!n<`a2OvZN7VptZa!_o3e}p z&oDH~BJ9F;7>itjw;E1;HUT&@9usin0%;-hg%B+T`j%yn# z@(P(*WKT(fN#_Y?0;S|~_8|GPjP12W4q5{(wOwV{McZ3xzjbsrnyS`>n;(!g$E0V5 zTNo~y*;j@ulV;rLKe+Qt!CdVtb5J}OD|^%BtXzD8h5CrD&ov)MkpA{e1GAjk{0W%7 z>;dS_6@Lrl1^1%a^MnfWyNDP~p)hRv$v0g7Rf?z{B?jH=Ekf)E%p2GmV{)jbHHNQl^1<5DDmWJ}aaspJhCN~~-<@d@dUVR*1q z0LFzqkfOK>t>pMbIi<`@kmOkp9!AhwLE>oS!g6%(x%)l$w;fr&l9JbpLO0&wD@TO2 zkC`wX9fAYcD?7C7t1eAg*CuGp0=mt_sU1(xU(w~x3zN9s>l|EoMVI5 zrMmDLEx>9zN|;Wfv5FMe?ANw)?p>BIOdw-d9y*Q3SmXEF8tJ=UhTbJgMI|=Wg1j7u znlc1>fSM^C4o6YZ9A}`?eyWha2IjvsL1Fu8CnoXr#PUO$)c;U|3c+XhZj;ik6VyMM z)Txt&=A_=ZTO)j3a7Lv6nq$i1u^CGYimxk=5kU$*x|R2o-onQn@=L4THIH0!5s+e|4x1*&rgr3RC%Pkns(N->&E|nxRTk@M7R)H;pc`@v!l)li{ zg9EqmkB?qHztDBQet!?C?6qHf$&|KBepj>X^nSjUO@=4a+kryo$~7M3VtR{#Qu@4e z{Ck0Oq5+YOl_Oe?=s<~)B&_CwT8ORB1w$-L%v8el(kas8nWDkH*;r{G>v8{=n@v0n z*`I7WDze2MbHVV|-VWzMoIWoyq-ar0Vx_&&?Bw)`t&{f{-lmBvt{4fP?{_mwjEecS zBz}tth`d3EzDC}!1e!l`R0Z+Hd6vBJ%~;vl=S(7Qfr3f4hA{?m#fxeI`P zt~*1IY3{xn$WAYcN$D|g7EXA{R%c);{#MELQrBiPmoqJ+vmy0R61Uxx-XxXR>Ek`#3V>@Kgq4D?{??+ zEI9W#qtqLrs4OM0%7jBDBI_&YuSQ4Y_SU10$fB}Wf%lU~O3uoBR>Pn7+LiE2{cB*- z5x^~Yc)q&D{}f#pRwOdLeTLX0mc^@^f{>RMaC}%NcIeO~-SDjk_sKcz4QcnXvu0M= z?+Qrqt~3FtE<|7=Qs@cjYLWM;b7g1fg<8{dZ%gE*-LlYWSjqr8EjGAinfge?Y%Qwk z*&TJ8fm-GZ!z$XDPqjlKlW- z2fo(U4Gi3F>XZW~D1{t!b{mQibN`%%vrtXTV{1T%G`W}n5ba}7MoeR(yFoW!M~JDH zA;@Z_%u+O93^$w@p&_`cV2;l!e}Sm0&T(AK84!Qh5lI0ZYeZE!!7!frU>PJAqX1UE zawuX4agN(i&Ute^FiAc0=$Dd>mOiavQ2AF9{&tMANIuY8$GB}cA>KZt22r&p4{M>(w5eE6F5| zUYH}hGEK;>7|l3AjhVX_&c48f?H)fB(pQgGiRCTb^WMSe+{wNLd`0oY^4f)*Yp*nm zw(7#{UC1Am;@J+`gZVEY2MN_xH)5|gYG@Xuxk0476jWSchhYx9`{tZ(zVjU{*Zehu zc}X*v=ARf$MCz<<;QQ1D{_>uq{B4w#8)xRxK`-|0>@sKZt~}G!GOrtK{80T7gZ{g8 z7o{oPfkO|D$@Nsd`7O_}*C(+&y|fgR=RCk5?c1(IDp|~K*A3kpg1y23wV1zeuO5<{ zYIN#My6Iiw^TN(6!ux^h&cO7dtU(4?ufl4XJZ7>S=9X#>h9$Z%#-n+4c#6$>~K&D$>b0w~yd(is921 zuE>Kh!NlL9bzO?aJnh6oHB573tyku%873^lVWE9O#3}N!tQG%Q-&p;e!Y{!`mzaRH z30-%!%HOH>iS3Tf7Dl z6`ra`Qd2!RKQt&_3G3 zRGP>jc6Rx+aDB~s0-M`e965jF+woaQsze_5km-b#zLE67ON0__p{aFsihWd$ru$JV zEz0*?Cx#!&fnkt+rZb|uS0GHs2;Q#fH{uHfc^h)^BTZEA9sJuQ<4pW!7T5vx zIJRlm2I9KSIz|4Rv_AaZR{&4FNb#xs)3ADcqLvz=LF01gCIj-1oA zLrortNLsjgUe1!HV2K0@-!jry$h^4)j{gI@j&J;}kX2Q!28Pze^6JyjcHWlCTUccx zsW%^^Jb#^#R5e@ZW3@oPwB(0jw_KH4j2=AaM%$D%An3P{54D#^>AGHX)WaR=Wt+;S zqIzFZ#d{oXBS-t??IpEP3Q9tnZn2psnH^2Fhh}IbE4oB99@ML)d8u__0~ ztIGM^f1ivYKRH!)bDP!dyfeqO?Anb(w=*^a7h-<9y7r$;SXq|mtv^o{hm;ItsB374 z!sJt?EGMj}7Sy7x+GQb$z}0BM`<)-urK;%Skv1oDpr}ElB-SdNWEr_#m><^uyqVl8 zM0pN)i|W^?RD(<8MG~$sWUeyFY{7;5GQIAb-#7E|-stQBkt}f*9|qhw89e)KDLNGT zGp#OMJ9SN7)s^k~U_@kGbIGsmrq@>h#TwQBI|5BFnREHuYx=n3wzX}mFD7_+=L&{K zqI$O}=?+Co3|Sd*IMhz|yoFB8ESMx)2=lC==s+B9^dBjLKs^mN6(=N77-$KJ1M zY|0*+ZMtt7G-$&d6t3+m}xT4D)LiKFrAF zGs(QH)uEXtK?~RGgdU53!Z_a=vPbNZjgM8ox;=mP<-s&174+h6>*^?U*dz0PlM_HE6u8-T-D^I{J(3XTF>6Tupgw_kgPC z0PBjYGqk8qAy?z}VWsZQ#Z0Q!Cq)V?H_b4)73V-U?zk~ci&ykP43n%Z^%o+39`4~x zwTgA!vd~plxbINM!g(KSY7-XO7e(Le)4bz_+7&_LCaf|@%;W2zNQ`;)H3 zkcF75+Dga38di0)^db>~>SDF3St|y*c)( zx5t7sgDU$@x-SozSV-Ec+^QB5K%Ungc3x5npi&EXsZPfQsp}y@3AB9v^)qFWj%%_v)BRro%DijB3gv*xfVdW}i80PQ9yiO1&&vcdn(H&#zV(f~A#w zK1`nPmp}`DV?tYupUa&8Zfqf6JXyNAiIBsgrjAa$M&KrNttgQU=kH=hB(CZx?T5&8 z%b%i|(dJ(?!r^{D1q>JUb?ePAlr(a6o!LpCFHjsV`-uKyGKho#7*Aw8#7HqLWM2+?>^nGOpNA_4|L?ZYGs$~xvHI? zU(FR2TXe}-s-lWlwMA6Qe)>|eg zDVeR(FS_w@qBIJUqNcC^=mWpwP~x*Bo9FP<9@pu#M=}#PL%8h7V&~E^ggRxBVMH9j zxX2y?%_}yg*rgvZD0=idvjU&V?o)%(sc%46CC_6;Bg*zd4}(I-_80tquJJ#GCR30Y zCQ6B8u;lp^sF_3_uJwWpB&7YVhm~A!-QC#CBnnhWPKABdmD7DSXEtvq7K19=zGs@7 zGA>NGjO11J#OAqAH!OTx8f#5!vDj8=f z!(nH0ut{Gis~cQOZ={lbc7a)#5b}3;w1kbF*aGpjiZ%*|scNUJ&Cs51H|K>aVy>6n zn=rr(bb8z#3SgCxEY6b8eeqtlCRzp4o2!r5o4c%j4Q4f4$#f}GIKv^){JRN7qB+;h z+V)^zv}#fy0wIeTW@|`A_A%w8A5*mIM9qg?hc{?_!pAN@ zHoQ&Z63gFjYi9Cv{t^xtdStr#S|e*^>Uc83o`O8`r$4E_x^o6N-BiRl9lJp@+Jo;>G~-NQwCC#7UBv zCcVj{b^gY#k{=#Cz!CGY3L28npw}3pmIE$GCaFcVV~f|6E*tPCn#OZxZ)GSVtg81# zNFGwm8ME^H%{7YT-ZOY16zIWe; zF4CY#1zD)`VLBbcDpTzA(on1lT`!yXWHN+X-Ri;b4axs}Q&Sp@VKrt+%Q&o|*cq;P z!!2=GmV0yJwSpNdozLH3GBJT`_d}3^l`|s+vt{0POt5`SDMfqYd&+OOxOs&P`Dhia zn$~KjnDuB3t7NALLk*8|_Yx+wF(K-I_r9G*3X5;UB;)YWei+RuShl1ULaLt?j_TF) zss`7J8lW|q!OI2aqXiqP>9&7I$$MM!HVPaXUeH6KfNQ%$gg>egUM+KL! zb;pxN_6V^&*>fIhbcFT?Z!7D~KsmEg!$|I{YSd1rQeZ~a9d&287)xQ)J?3^z)&x1K zSStC*3njYqo3)Xz)?V1)WiCnF^>m(U9EpE6zv1x2c)*OBVQi`S;-rHh4l48TCp_K8 zIv^Yu)XbBYFW}*lZ^pOz#a1a!4-|GM8^N+t!9&m8xO1PDJ8&Q!egW5tjpcliT?#q6{M8z_h?a|;B6PmEgd@dq`f&mLl=V&9o58tVu9f z>5E^EPW6X4nw_-7!CbU2IID;lB{4*8Jy6-a6&|J$OcD-BSZ6@wvOhL{=iLSvMoojx z#>}FPuiNxM!98Rig7jedMg0Vc4fnh9=pf92ZN=-~&Dy6|XX9+woBI3M8XDv3pJ867 zPiDg4P~-xCjMWvSeCp#!lB%dD^;}5mC(c8wg>w`QpTUB~v%=2QvsjU+nW%o`)_b=9 z-#j8{XjGm%64`c%wgi4BAAXS84H1t=;G+}F#g9&PtLb%LZ-pO|I+Y!sI+M7Q({4Wd zb4x!uM9AW?hoK=oGC^fC7XzPoJh8-p3|byI(ZECS9&%NcvDE2>g{0p0jJPF*5B>Y>29(Jh>=s`QoRS z4^GoM0+8k&Sx2Ime;ZQ?SsJ+^8(l%GD}TI(I~am!_VC;_SCG?csWWG$Q#ge3T!V#C zS7T{rQwPy?0XCQMv_D-U1XT?5hym#&zVyJGfxqnsXO{Z2WR`;atg?Xf-+=-7ZqHLj zT1q-jNvYXZ#L2#J_CjVk4Nlxa zFLt~`W`aMRNtdJqQNG~rOApK~nxZjED&>-Mx&7 zi7TEX-Tp5bXmPVC5odGs5;R1)j=HP=B6fGSEL>?%_VBn}htsJf-g5nzAHmx${iyQf z()XWXrmfThw45T5!NtqK-Q6QggZ#&k@B-oFF(okYbGf-kYS)t;d%=WY%4{Fxyq{z` zV&Me`JH39p0YC-uVOTK}T@z3uOPl?{SfdhErYXp2f^CDKd-oYeG`CRm%e}|W$Fksn zqL|ZK5=mf2k`y-3)*dxCs zxhmG;Z6IMeMNce(-EJ>GOQfi$r9<=X1oRF**DSY`Gub3dK zy`5-je-axKHCyPlj-BP@DYo0~GIC|jSE4wmm#*!AsEuvNz%S&C+e>^uR4k`T5>n-R zx=}f_OrZh#%OLq>D%inhjC_eb+q9#j`+_^) zomlpI>Q;WMSn|bKF$?wSfwYb#v$g|wax0lYK3i!s9}cgMqnX-V8yyj?C4n0~lF6FdSc4M5->_(`l z#QNMY&oR13@MRr82#OnNu+zC6Fw&5v`zdzZ&rk^Vo7$gE6i?_|)v!E5mwWwoIzs0x zmcMQ%>i+mR(Ye9fJHcVIitGG#JDY`SiW$>yIMhblr!Mt;a9{law7%E&dyXAj_3;;n zuQcakBH5C24vQx`l{HO>1EIk@=`4@6!LY^o8)B@*F!?y=2y1wFH7tN7e#k<%+NMI0 z|G11T_TSv%0Mf%_;uLX7gi_b{+_@bY-SdV=`+~{^ht=(|n%e52rtDCXWd4Fxa*Z~x zYT#B)_ucXb=Z&j<4VCYahpTgSem~Emx}1*lOgykRfdoAg1iGC%Z5^K_?&@`; z9i7#|#hq7Hh}CY5k?gD%9vl4;vzSZ^*TdefWs*#WE&!9y4G^?$4D%!KW{sA!1slZt zk_eQD-|xEI|i1VN5{l+l1{mY#R43)i{9zH*E5~yQttwo=WhRQ;tM`J!)=7mqiwe8lG!UGJV&|dci%l21dNcjrcHNM zsV^4$wBSQy@(ZieG{U=gPH5`Bslq&1tGUW9v$;;#!l7F&9ulTtb-{!I z&#a=Q%LNO6M<_QAQZtXqH|RJ@uh&Ex6%`bs$z%7*b?!>mnx6W7QQ}kx&BKJ#bEhKa zUvMxHcDh#s3@!DnUM|Zd2`!bi&09ee?Z}1z>Hc?v)(wSpQ_C*d{Y6A#OI&zmCmJn0 zgA!ao_J}jvQN@hw8;}PO^Hj#COHh&2pXfR@3Gg+o*u@cfGIw{b>g_stv+9e=fWR7a zVbK+Dh{O4b-1NbpKW`}^6<=ixwC5=9?TG>O^XH30v9ks;vBs+2zyt>mt;L8k|4B$J zZxZN;H6rW;6*}YZq088-CTF6ujP=1{Zt8y|*XfO{|Ee9iT92TVQIXZKZa1ro{+MK}%@j=I% zQDgrF_sRE#ku1R>$ep}OuC#bP#BjcOX!Z{2DAIj7$^6^yp6=Z(Zqbt*9AzhFK`z2k zMhUWy>8ix;)LpGWs1S~;n|g1w)+ek^0$5s#`60WkDvfUv$>yQqrI$`8eQdt8jtl0R z5>;m;wi@@ko&JF>e!1zD&^iFwn6?(e$ApIf+HOF2f`ihtCA;DBDLZHifG8$T5ubc0 zS{*%0f6;3Tn`7p(0L~YesfV#Mq!`)22h_0PR-(@Da0EY^62wNvLi3)#m>6ESuA}CffimwlnK+5WBz`7X$1;Z(C`%zMdsmqcfLsl&Ir{KDd?>E&kaF?&P{pNRmy(3i_yD#fed zHETVLKYYYr@m%^xXaQ=<&RPo@OXgO6!6iYVbNXwaSM)6r~miyW_@#KX#9dZG@cg&h4jKE_NIfK83MH$kN`BY6f~vTs+h#j&ZvNENQ<>pvhoxl(-(Z_816UXgu}F=w-S>J`ZrrKZfni z1KoCze}_T~YwfkYi%%Ry&Qf@u+?gXV-Orz`&s}u5_ms|lRWdiZ23z&dt&%RRf`<}j z3t{6ZK_(lBLApHd(B8~@O2v$fTo(dT&o0bgiZ_a%_qS{5fjza;A8bDbOjOCJULH8W z|Lzip_+_G!y&wUsiK9x0sZCgYTH(6CG#{IId@di`guUzpJ%v=9fmSoT8ZRU3=4a^D zKA;$zU5KBQ=OB%2=>n8I2@UO~C9RN9CtPh0ws~O_7INAxCcL({wC_uKRq^b87j^zW z(Y3YfAnbjZ8a5oOckX$W!(v3^*J7`+uZMW@lFf$mC(c@i7$Q5vSPOXz-}n~KdKeYa zDU$g_t2=DzQiS{d?>l1I2Tbl=Q)>{e^>56h)l`qx!tIM>g*u6yK4vqVX%- z>?yn_tgrS9u1LPW@csVLS8M-f;O8>FA5%h%I|DlQVwjgxcU&9_&X$2$jp-=Il2X{R zR=eeNcumumB}^7!*}o7!lKdy--E1n>W;HQJt8QU5{XP9EKg#&+M@}W(x$MdK^ZN+O z2xK`!r$hE)yY_VWy8ar~nIPivK*Gl$&f-+%2L%izmDArMuvL*2XRR#1Htlj271>^} z`Zc%3VkJPb@m~TYZq>)3*V|U#g2eSinn#*+!QyWwTbIfk7DPYTGP$rUW)#kZYhsKE zf4)Jt2QbUC?|ec(6FgA^BfrsBV?&UwHHp^5Y37S%zeoE)KP&1dN`M&-HJi&Qnv2da zn%h_SXPT$R-#y1itYj3SC}b(>M=&?>a5fOhNi1^QhY3$V_`&iu^WM|=%{uPjI{o3i z6D>m2Tqwv_u##*E!!+xG51OdL8dNPL?}`jujOPkQ&ML2U^)>uU^vq-A3Wy<_<;ma9 z*}#$a>%LE5(6w}{b{PRK8*&p}e7E+R;LpJ{_bYC7CDvn|O!l%|g(Wetz4zkbj_|wY z{KXX3;pk`8CB6l_2Fo&w$sdeA)>6TUwD@PPr$EN2X$KT{NN&h0xm?yU@rI6_%8s0i z@m}S==@5UB9h4d^I;1Lrvq*nCk?s_ z=6TzZdp92=FDK%|9_~Qg1)&!n@mpoV8@LKDc$1%=-OjL#K)n;K>~GXJJj%~d+~*t0 z##lYf4Rjk&ksEM|XQSNYSp?1*dR=t?#i6iLf`Z?@2{-1J$?}(R+kANSBN+wh=Zxd5@FSPn7F^J+||D zw^fZ0F}IrwhqGoUBo2zXvjjFd7DtaB3U*vgWT#x5l^E81K_lkkBu~G$vKGAU5Emt4 z&ESPq&a#C^KCPRu_$WW(KLx$&FFHP~Pq8up)2F~n2<^=E#l^6$L7s!8r>eY4K+URh z5TvO=3f(%l{!&h3kHbn1Z}q?A@WOS;2hw_W2+yzAu|i%hJz3~cftPoDz1pmmXP1z^ z-U_`RY%|5FS4h#qWy|d4XE-W{1>>gayUfVn0n#Eq)O6@Y(Of|s?bH^g!%08nq%{0I z{O2ZY+5QQc5Pw@k)MH)6BTn0+UTNGT@!J&%%nL?m6B}$Rl@oIC~55;pw}@nzeNIl8BhQLbxkxW#*I5a3^SeX7)?;*9VZ@k8fOBTiF>$rI z@Eu!ZX!N@zK7MH*>t;Fg6h^K$XDjqQb2;D~RH;soMA{lhn+2Hk<>4a{(cSzGS(Qnh zLn|iL!yy2cKh0BR;168Ude7AZw)mbKD!;T;V)9oaO+eF+3-9=VyKS~|YsU_p z6Vz`-bx9?2>ct(<*awvyyUHJT@*?e&+qzPZfMS?g$P<4VxCIa7A)*2&^U`l)k@<17 z(7Do|YLIUJ`3>8RFGh(}7JHewn>|Ltn%XA0X$;`Wr{B@91Pot+SwfBjTy(kjz*fHf zLw!d17axH;Fpv_^o$VeP#jmPU?`u-@M$Q6EJH8YI>=WrEsLp{x#$zkXt_}ah3Z~Tb z4pm;L!@J$8B2w6~6qWt{EPPNI**r>7$C|r-ilYXh?ekz$|H$hL4g^pmb@qzW%>^4?9W+~=E`>e2)Kf?N|;%w_(>V` z$`vrX^ua1s<8biufq~6U|D&Esfjx;P9!BaYWbap$W8`-fB(w5UCU0Zm_&R zHR(|8mx;?L>7*J2fI{eSo7)Mr$!;78FxxUoTpB^#8C=&$*Im00Z*Z7fexWR!~ z-cIR3zivUUm-dYeGx)$=(eWW{()9M{>_cA6(o*I)! zD*55dSGT(UPK9o%Qy4at&kiyye6%!8H|?}tytL*6nmBI~eL=>S#d$2`a+BdUBi}-Jm~pJB{9$en+cpqMs?@r zq@veQpQ5>%gp#>^v^dzvm3p66+jL+S)OFR>ff4qvj$U#W)_FQlJmVg!Gpu6V3ZoN3 zerjiq2UjmgQcmJ*XKEWvb#?0Mih_?F=av?!K_a~S7MueTXp6jSVxrTGU5Z?2jn>|3 z`c`%lS)&GKJN-LY26jHX}zXQKTH z6s&2UvHGnqdp)GwWb+#5W=v?4*|gEy&y^Gw5*skpJ6gpIylHy-ZBolW0ApU7F;P&L zQXD89DlTg>tQ9}65k^&SGQZOFYQQBm?SP?>D<_>&5tlG-wSL$6qfak8yM!z3PjRT} zZYTDGx}ii+F|co|ovhrR#VcH(3XkYs6qnW-#3qcrM=qm)8C$#~v29`&4!KPM&L!au zzp+M;b_JGks}L`N=Fx%!QtBtR0_oaHUh@Ws@y>nMJ4TmAtQTyBb|v97OGTNcN%$jk z^5=RVYhWo4!Qtt2RLF?f_U|L)120auZgHeOc?`J9(AC&6HHg4?^cZ%pj_J+u-{I4< zV@@^^mE8?n259GbNiO^1M&QQ1H@(Bsr}!r@f}GQ!^78hA zxWoaZY?#J_d$i0iQT+Rbx7cK&eD2yE58o4tkBjNBwA3SZ*E>8SnSDWJo#J5X=swl5 zz%U|l>c&%pnX^=CUft_OvGe*Zm7izoaWQG8TD<7(9vM-2lAPe>F+nPK^J#oIe*Vc~ zm01usVLKq_7!cZCaGOKFr@y!x5R;j|Yl_0046Yr+%L~^~2slP0SUF-TnO;W=C&&cZ zBDLulO99}gMHt)o@t<-_h0I1uU+jzryFK}#*02fdJpyC7=z9auc0NoU`BSzfjhCCD zT0s^hg`!E^MSYwi(3`c`A2yp#nolQ%N(Wz@wgqy=mN3gwtCV^lgW&zY)L`Yfa3i3R z#pnS_bQVli)V6Ye)%Z~sRJrof;+ku-%Dwft^_@C8<0)r4H;#swML9p&(NgMoxd3s` zs?ZXB%jWs?DkbLR^y>)54Spi4oq_Jge;jZ-8DO#rX}l-_A%2$Bj>iajxNJcLVZi^2 zu&xdscIu(>oh#mBHXcFzaRzsR9Ymgd!2K;ck#D1TGP~{#*;bOFAP*bVEeN?BjFlkb zM?LO-M|k(ASL-3Pqr%G|QD89Fu+T(%v=0WKeKP!^#6Uce$-9lNo5sp5-2B4LT>N@Y z-UjyR6!4AL?~*vul_MUD&Y|ku7@3mCOF%o2!lXo6<_ZG|Jt4UA&ww4s@yQ&LjHd9Xw zyw9}S$H7#Exb4P=(}~CMKwlPjZXH-!Adj&Vg<94OA;pVnHzV^FofeO<`Y~2_He)veLT1Di~psDoi+#Ad>2uwr;6==pHRCk!LzDSr8Q z!Gm;A%%~Eadxp}9_sU~{Y=-$@$j^)&@ZyQ1J4DjUXO26%uwx`y$A`nz)u&f?g1dB& zN4F=WFW(6)SY_*id_x;P_W$+*=ypc;P6Nv}lw2Druj{h?Xbp<~ zpGedYN$vIlWco)iNpK6Yu@LM|mQH0JPHG(+khWn<8SM;GNC+Vn=0qRtG-K_gjkEG% z15;De;15A!%TibywDaP8!{hmUs7_Fd+BxI91=kr~^^OhM?17rjdY@%-!5GK`_Y>aS z#I|VxvotTg+wx1bDdB__+_~f5aOdav^^VGh4({h}Mru6oy7nr2g79(uyG>)zT@0no zBB%BEnx(TsyaZHQjx5MI;~}^$`gC1&f96>HD*`JTlL(6aN`Mrdrb7`@3;1 zoV^A^r9QqCin!jzzX|)}`GLe(g0VOjw zVVy%#l<8rbt(2To8%Xo2uCBbijQ=F6KZPNc8$LRD zoA||NZ?z~H03V(sd@q;pm!bE1>T$suzt<{GDEFK-n945qCPWbA{95UUTa$ATDHi@d#*9 z%H|sm&{Z#6d=0AYj6pd$TJ{4R^w{aRH1x6NuZ2V2Fka~$PBY2QP;tsXfi|@>SM|c* ziW<_=14=bXv3$2eEl9($w-mGE(D{4m<;+;nGXeOG|LsHS&OJ30gJy%88N$5(i@|#j z*3d;=AMG1@KnPLkeH8%;XzC~yC^O};XzM|(k)ddzC45qi0yj)`e$=bx2_L-fM(EmP zXEzs-6w6IWX9>{cVxxa1Os?`;<=R~r3zFRM(_0m&!7Nm}F1fwS&R`_6A7y8HK0AF6 z?~lm?I}68?G!8jt=Z>f@X#L^Axm-9>KQT2nQSAu$_G~__m%$?&g8dDCCduY=g4a>L z_*_YZz7KeNl(@n8Z8Y#8DfI$AxiQg1^n~H*oY4y{LLXe(O<27AwAKI9=9c~0N0=7~ z%t?h`;~F8m0a*_8Y48m5h~SsRa^cX{k405MbG?SGyszu!u1N9Z(V8S7)FP9(dhV1KJy{U#4RMVXFCqF^7zQ#-3UqFnlEW zXqngLTLYrN1HZq5>`q26)I9?kf^Si&D1zPF^3K`0Q%hlPh?`sK6#V@I#_GC{3xr?$KOLX8RnI<6%_1ZME|(33Z*`Ky^2jNS z6V5(6j4s}b%hSA3RWg?mYFDz&rPrn@?kpgMbPKeRLRKkvp%5>~X?wPZ(-2c9OodWZ z)DQQwBlT4`BBhUUIywkHcfINO2SE-;xf==j_fJ%^edNM>BvBW;2^@7R3wOO8KveYN z>K~zZo9D9muOa z;~#}kw#MYe0wet(92BDyo8X{DZ%f;Tc5s)GH19P?CofrrYZg(GdID0RM1_;*BnpRz zUjr~k&E?R*Ri^X&b6f}0A+9_UE{hGsvm#$!n@kJ-v^FF{I|kF9_I9QLqw6_I&M2`V zW@=jaK9@Zc9cBYHK(LAha>;oO_OSe{it6Ywz&5F={_tx|j`d$mF8eZ>!` zlcAM7ZeN0+MyHsE5vw|nfeys0cI8<50ITW+|`u2dI zC-K2{gVW=Js2Dc|IKW)_f&qnDwm^+k8jBe-D;vxw2TP4zq8A=q`{2xQi5OF30hqh7 zxc0}w!jlD0l$wL`8uF5u!JT5WpQ{h{q*_u)bo=188&8*^K&$@ug)F(C1Wjbi=JQ8K zUSlK9w5?fUb?Y0&ygz#3cDhIM7DUOOoXc5mi|zq8L+WI~*Ai&nqw}zO_H?CZatYmz zA^ypsF+=cmpDmX}S7P4YU<44fm`QATzB|W62|)$3IQ0|S09*OqSmp7Uh1dO>w$f|NS}r7)AKmdSR2$)N8Eaad1)X zLiFcLyJgco>9AmRC5FC!;k-Ad=0ej_r!}@(0TI57we~z?8POE36lpL?t{AknhuZc$ zn{q4rF*e!)6G1;4mM^N>!*D^2`$||6d`MF7C%Sz?Db}Fu=Sgn4Lo-QHH}@iT8X#k} z&47O7c2nSlxLS73Qx8wKYe<$8&lLoSk$X9EQOcdml<&&Vp-@A@;)_l&yLk61$&D#T zVgCI|S1<1d&$5pbuMRyW0yKUx$i)~LAtWvApH)iPvwW(-%ey6S)M*hJBV3Y*ZI?`k z5OfOH6~-a=zuRp0$^j-R{x)`j5j{~Sluehgqc}mP5ralbyGnNA|A5(h`cYnL9=&)k zZ~-0Tox<;op0x6?jCiwltspEK0(dXSNvq!5)vd=;!AwD=P;49K@!6@lqUXm==2WQG z0~L^l@RIiR@S%s7VX##toWDCP>h&pYFG@nJCUN7=(Wfq?>>SNp_$W|U9SnGZH`1|) zS=bXAk*89=n^~*2!0%~8UN)u`ZhAY9FD=u<#&un-3hJRF3b~Moi(b;clw4-jD zhBIoc($rhd`I>VIu74%wdz1@adB~(#?)5GqXPXY@Xz%0j*|6i5tlnHvQ!{!{gH2@k=41@^p19+&$s=OwVZ*E9qC{W#TQ3 zPww_y99gxg>UdFV%4kOH2~Q`*YYS_dUCob$)@q>pitWiytt|-aX{S59T0S|`$oq-O zEUcUS^;M&eJEIkbyG-tb5e63g7inLo%<7sUVjFJhaRp!uY;l3MBp($NgjFg7OPMBJmhSXD*|m`Vr9HCGj2Tw*Y)H z(3`mET*HMs2hTx7GSS_QF7NkyjMv*a7OUm^zo$RBWOKuo(LL;F)b>>rqc;u3`9nqyn)!n5fXg1WV7SF={p!~ya$oW>2g}d)FUu+t#Fe}!hPkU=O3rWj03>NG zXrh?Oi0)-)*fw)*Oz*fl=n(|?9Wpt9-V~rWfU52(O&!_FA>jJ%q&)eq`^`If zr{~VuKU(7np}#!36ZSKw`kqt8;imHAj)=Dnpt5mSG%7FkpzdH^E@sWE&+jvT?>61b zN#~V|XXd>8jV`i$#(w9#^}Ys)RIgA*!_o}mwJ`CI3RkZU*o2n^;CIF=C+v z8KEjv7Ov}9kWE&`wfL9N*-@D!zd z@RuJ{S#bXY8|+x`C!nPvy|!EWUSD{B?x0Q28yA2ne$fc8KgV}ZPJG&oXo+GFC?)RZ( z&&nmtr`$3yiU&!fmtZu^zG(3|rvT*`zZYK(eYhYfZT3_N0426{1EjPF3RF4VlF*VR z9~NSIf(w#Eg0x$fb@3@lkRjU7x=_>rfUX_0FF~{uk0LKT>hh*l^t2_ zF0k(W8m11SFG=~wGN)sik46&7_yTc30{eqFqr3L@wXAa3^nJyQccod*-DCI?XN zeKY#vGEwxD+UB8Xd9~+ukKV(ZUZ$;ivTd}`sj0hkoU=w?WdxxOa`4-)J!|Iq6mo3W z{^8A>7d~#Do`ny=w|m5-#?B@ZWT*RN3mUOb-Cuk$~4Tb%Zu zl^471UtTOeCV^I)TU-R%kiEMJ(IJ%ik{L5JQrem(CyE&VVe>%qDu zTmCs-Gr#Tcn9{J~9cbO#>h8@S+?-~C(a?7B?m5XpYi9cNK61BfJr7sJV@OX8B|!~Y zvt```radEw^I(hCC?0dcD7xNGLE`s;H;>|TyB99-NT3S@!3H~;EsqqcI|JpY%|1{X(*$y}Fu zK*vXtk%zc`qAF?6QV}iY=FB&t38q?Ey|%F!vG1G_sE5DmwDOG8hO@alvG(q+rRysz zz9%tIA7_}+dp0j;`LgEAoaKp%MM-zp;Z}G9m=!;`Xv_5sUhF4DI;wE z*_Zk=^9WA=ST=la_CrYY-pyBl9nDtNmQpyVvgf^yR;ooXr{!Ci3)Z>#h^<>EZLm*f zGGEDQ`Ki zX<7dgSbXcF1UMLH&-@Qo1PcAy?hx@B{<%Ll)DL#6L8cR)9hmdtf0DIbR&^ZA$66oW zmuBm{{OpafzaWs3LhW!5((dy<{OM}x>ipA`{Yx^$=mEdC7HnVG{a1*AUY_1lDMh^NvM(fB^ttFZWxNQG3D< z!};HjSOk-_Q@wAc&m;e$;gh4!g^}Thy7&vhofc2k(YM%ri2uXhdqy>xu3@8!1zpf^&-FvGX0A~kvDwfz7M-2_x7i9 zMjNSubE?~+Vf(xW*kc!-dWOS$;OXOO9hn9d%thPe_Ez|=jJv8OzaiUYi>hU84!^Aa z!G5b@$A|2!Th4W+9-Fnw231|`+p*PO_6_P{XBRBbpC&ig%Ov(h5%eovPG0I&PiQxA zbF_<5C3%dMcz(B7-TMl+<-HPaF~OXWg#S{ZoakvIJ@fgB|5=R2CDcrZF6x+N&5Sk) z7)i!@7(Vk~rWrA$6Tk5KXt9%VS3L-5`ZCt-SlVe6EgB$}#b2FSXySsz1wYev zQqo4rnf0og8zT~<%4;RK(@q(!yfWbRw`aE%hu^i9f+tF38*}%-npAp~V^|ItcfQxf zk%LOlf9l8XI;324(B9oS!{55_{pFw=8E4L||8Ol){iUVx{l;cnJN;eC#Eo@+?WO`oR45#*AHq_!7?y|2UJn zNkKvMsg~IV6x!pRD_~T75*VeVjvKpr{7uV7tF$v8965UX0l+}6dUUA>;Exg6{{XE( z5c2Rht1hc8Th}K&{k5yX?nPROu;8`reCX#9{mmmS*EK1{tJDiYgYm5&BL#+=_^aKH z9RmJQV7;lSkofWC3uUWe>r&yHvM|8oaR$c|XFKOkEVIcUG%P3!`^fJ-Y0xqgHV|5P ze$e-ISwPFRd3OG~?Mb&|bdU>hbw!QJE)i%TIOyvU+L?OG4pbDpAUx00A3kSj0o%)g z8Bx^mf`UfgHMX|*C0F}wYh5xo-`fzB2FZJrdQkEZ1tKYbf?8t6Xwx*=_8Evo;fL18 zDNLuPrV31qHt^{8&|j37)Lm+tDZeepaMKUl0+5qgT(MYTDfi-mMCAMq z>$8bZFI7O+*?c0Bipi&Yu4cB}^PoCh>Wo7{w2;$Blq%a*(o0KEWuIlI935J#>^0fV{!pR=S<7Bc9A)WQVt08F9Si-q1s1Qm`NNSw+<3&#(ZFONGIFM-gJlu_ zp}K6W6TXW!q4@#d?dW6fkQivbELJzM)ZaHGnv17AS$wH!r+T-iTR0b=rUOc&9 zO=Xi!iTt!?KLQ$Q&GuN{+SL$wu2sUR{%l*WLN~6V;OuLOorf<)Oz7}jREx|RVvFzb2pv~`2U~tW^~F5Q)=MCT zMGxI*2Yn0EIRcnu?KBy99s{@R3R9t{-nI8(;``Mo>3dDc`uJDNZPQ#^^M!67*-OUhKF*4bR#qsp$iP|SN7Y8rH6K{d*IyZxy#=2+rk zpblWkJ?EElgbice7Cj@S`U+Upm0^7_cfR9iRiiEXn1L}c<+LLuD6*7$XA;V}YzQK& z>*-TmVr-SHO76iD>>StkT;qn@BfyN)0J^EKe|9f2Xj=v*h;b$X1~z3%hF{@YB!}0oQ;F{uPU!mD^gt&Rz5gbwE5tkwb@a-P1G%K zT}uqw>!jIdOBy5r4wIO$X`2W=2}M_&KMm}bqHpk3Z#0Ej#2)bt?TdP z%2ljZZIK&W7uoaGu+YhX$kuFm5Z0_`YLP==!A{*7C*hJ`1)E@(?=S!qPMsHRuuD zZab6zSw}vXPy-mQ{);lQ1P$50C1O3orfsoV(;_r?UOeDQhp?PJOWf1iHV zS4{Krz11AQE+#M==f53y%8RjI*H-%KUO6Ts&&0nbej4BQ=5pg>G%Jx^SPJD6XMS#6 zU6YfuSFFJ#K6=!9bZKR%HG%g{JHsl~w=oU7)&dFrttG#Xd`~#uTDwW5YMZ8$3$Y&N zj4`fLb#r)Xi+g;bz9Bo_X5#UI9M64cuc5D2Tu-GoUHc?>bt$90a^ud!r&`0BU!FtW zPhAyRb_ZN_P;&gncKf}bHI{23Y^__@&GZXq_~FssMCaDAiZw359=rBoRS3{S7x(wb z>-iMoRaN;jeG3%UwJLK8Zt`-Ix5-6|vF6UZe2d;g_%{Xh@dc|;P`=_m8v?~JF#Cdh z=%?D(@(>qHl1%5mgEu|giiNg~y_ZgUkft;<3uTKPDtn(%lP%KXrOj5POc2{YM$T|- z-G32F@A>Q^{mfD2n}5{ohd7`MHcF8abkdL&}y4DL$&5>>HToq&y3QQ zX|{~9WSGf~yd>GOwin2to=Xl989J_BZQuQ)f67l&pWYj$T%Qk}jBIT$r#W4c3L;1UI81>Wo|0@JZ^x-J(T}b&)V0nu2G;GkFVyCajh(hlk0@$wj*83 z9gJHhFGqcP_w4Gk44ZO@_1QfeAKhPF{;eyChKmca-qyaas2h6a?P4Za71UL*FNBo>_=iY%H3Y*LYfb3DK?ROyi z;aXt&#!4r0_0@K)dIC(@r+oN)op|u|g{CIundVeA*fv7ET*|XfU42Z~)NQJ+7VY!_ ze(F6ZpQ$(P8mR6R_jVi>rSPt7Bs)>*%@w#*>=zSyY|l^Kz*-$}1@io<_d%@KB993= zh|9^df{@hFkMQ^^$~vT#l@+jj==sL~g3taq-0&BR=1xiXz0%_HAw6~LKb750y}VFf z9v(pnIcFI#9S&Uu9pc+!&>f5it!_Fvay3d1NcAa~71X`TUG-sf`AKeBDA;5-zhL22 zqe0Sm$-NsU>m&w%^i+T30g3Ey+jCa)w9xk2CX@r`Ccop_(nbBuo$-qLc+WzQz`3xF zndsy-W1|I@&rNsx?lxk(QtzX>*KALkyo_)4F;gAed%*a$L9x(3MR8x@^*1ns2_G%L zU(mJrFPq2Vv*8x;!3ZzS&Lq!6)gR{VrrxF7s56L|iGXP2al*i=IfhFEW-kE?k^@Yv z!D&b7XR9M_OkK(d>v)n0rOfo#6JAD3zN?4vKSqI!e&P9^GKDjOsRGb$znV)~rv~_h3-@9b=l_EY2*anB|G)?Phiff8>+J4aZYhMhzU3;yk zA~aEsb|428AhTOuT&TGbecW^{~~v&crGCIxrg34kFko9 ze9n;MbO&zIO>Gc<%3+SbQ^wifSSNm-ets7or>~w2SUNtjSR~}NOlJtkigVdiL~g4c z_s)_^=WCd27M0iN%7uXD?P~y2ckbk!f0??Zo>8sYyzHWRJtx<#Lp329VJGLaUk_^b51&}|O;3l#y0eU+AFR2RF zl4{<>Nm;tnR_ejVKeD9HEK^*2B1f0KI(w8`m)`T1+kdpM{5oXt;9C4X>I=DR`P}gZ z?;(5pcAHdIZt|oeZBNg!o$va-EHvV^hHPfv1VIjX!*zZd1SQP@JC(DKc$)NNYHvOm zy=$G)4UQq@Clx&nv0=;61dK^%fZVFX)bKve`=~89izl5aZMpNy)5hGfUEr!puI%%m zAgpU$xLqwUc}sjKkz=}PNtx(drZL|=#q~ps{^muYo89^0htL6AOU29{S_0-d4chPY z;e?RSiH%O#a`B+bvTVnl#kQJ#&l=T0J%JmF4?K4Gd7xOXBD-De;?HKqP}FH!jPev zXvo-fjBK$JQQ!J*mEM-re@(46Sa4*Niw@y8Bl&23IY$t=oC-KnS0HC$Yb9c8N>bzp@rpwi>=Vz&C#J^lE-T zy%Vo@WFKYr>$h6%ziXV9n(Bbs@e|+dPiveJMOL_w1)fo~9k_tS*V9Ei)y; zH*K5Vw51*sElZ9iI+h73Pp>?LD78h<)%~+35-U zsAXL{(vXSTr2V7?oaOS5Cs2CE1It>j1%EwaUTWWyszx2G?YedoNR6czT?@7BJ-u67 zwWDmTM{OK~u<`CD*I&u-&$mZM?upd9n77(7OW{qKpHs@TL6c7-qd*g><)wGXQK>Q( zqsENb?M7R;@nI957}f7xT8HkP>OnP2-Jp?p&pu@6+e?jmZR62Tyt9j2BJYHY#v&-D zKo4=)r$;oK%2jvX`H?aDVB$Xsm^K=DFdPz3F9*m5nDP z244s##^qYbn@`{DaIp;S_W&$gVw9!g^OY4AWB6NkCo|Q`eVf%$P@{1*FD&k}SK$DB z*EPL!zaZ6e$)Vaw?qeMXs0#dHHX~7|<4d{WP6JoejZ77TI$Q0)y3u7~HFoNI0*0c~R z-p??vbIan|N~%q8_8yj5f7Nm8$waC4a!)1y2FUhpcagWK=CVT86S~ka`#3k8TsyJZ zp*f%e{@4of&>Lr$B&o%TU6CQjEGy3Kox@=4QYEVd@ArY zsAEaN&GMBGwoE;$b?S6`0Uq2nn=;n2<>!xU%R3!pO!qNUq;FMpJe8et#|D?WdpWp9 zsY|Z6`(Ch+kl+olhvz}yb2MB{?StG^h6yi0Mu3%%F)dYCQoefm%4)mJ(s>_(eqPn< zfJJ;}`}5@&=j7A5%XXU~d3qG&k&6U{-L~J~;>(6&VuYHP5Qi)DIc1}8YANM`{dHNQP!&6+-6SV=dQ4Fa8kZ@qro zY>A#Ke)H5!tyBDqK$&fu7h~!VAN2IB^YVeh3`*%%;2Yd=q|RwTOn>wQ4QMl0=i^H`)U_fS%h@J(w_i=?b~Fc2eb?i87}8-6=yLc#+&u$?v$=f9-Ar zJpoz%gb6RYm$%fkK$aB)_KRkev1cI z%b2>JoL(QoiCFlJd;CGA{1Z?SR9__usOwB&a5PS%ntdcR3(!Pz{%b$>zoXb|L?z5$yN>@>Nk{oKS6MG!4v-N%Mbev5```Rr{dZ{I?7F zBYynrPgBy%0mJoDiM2SF`#&-IPnY>WG5ULf`u|vrvTBrWRaEHc2Dm@;@<^W$w`XV% zwx?(|wKp5=|A*G@euG5U!^yAVEZPK*O{R)j6g*yATRZ5Cn2;DkdD_gHwO(|vroStv zi*Q+)MMJ;KdRx?9Zr1EQwH%d_33Q-K7biL%NLT|vF z6kYtycAo+pFH6Wh8w5F z4Feb|3Bv_=MT!>FonJO74`p_{|PTPwX{UxDorUZNUQY+M@~KZWoOFU zE2US`(?cllYYE0$i;&>h@5c=_URM2zw6HcIvahrXVWJz^H&Sbs^N$=ugY)h~bBz5y zyhkav9)PqQ?YZ(%j5X72r@bn(kFeO0>smyyZ2Q;Rav@Vktp12Tdd3JL-dPBlNRh4I;QNN)5$ z5)j0iva+&jzfF6&&C>^v)dNuT$_~IY2A*ZKau-n+`8Ssq+Y!2Kua)S7Rk}{2l)_h{ zl;V+n^h5v5Sss#EvpIba2{#Q<9m5h3&9;@=9Iva0MgJcR>3m zc=+-^^Xr$CHayq%s^A9>ov6IkcI{>7E@vAXn;3UHAg(lvPQhC51;UZrzatQ!P;eG$ z+-UWJd6$Am$II(QULW4^PoKE)7ZbmyX?mqSQp);Yv~@Z3uWgGpi9a$;zaJschwpSd z1WHuV;rKQ}B4N?|mQ0X_U2h5<) z_Ag*vyeJ2CJwKiU7$K1V4;}}U1g1Y*Js;;W$hNr{O(*MNbM}^sBzHv?|ce5DpKKq}DhK!V6*MejIi4kn5 z-G9!usBPfz9)Hiho&RB0OiAX#Fv{$oL?M@kH}5+DcBekysHgoOTjJv-_tSe`{u5a` z=^tpc$nLO8B^`Iru!64^^b^F(@l}~=-0{GWF&%gI* zjvZ_HpNKA{OuX!WBKkKX8dc?VX?}cvQ3!q5r{UIQRm9Fm@cK<7!|8N&y-iEVjZLlo zVW_O4{+lW=AMr||*kIpUR*_TW+mqo}Z_>Y+i^oK)1~(CwNkq%N%OvGOMGVZ8o32Ofwv>dV&wD_NuL zj+D#12pgX8nTb*6TR<}+3*`G6k4FGBED)NqIJ_^O-VIThxF@~MIpTPv{CdIoI~y0E z;O|pzXMa-?fxs?=ni~(@#6bvIAVZhVI{>+hk#OOG+e{f`L=>Y0Y=?ly`;L4ZlMcu- zkM~5lx!jy|Ac;T20HOrliy!)O#q-bAr#h<|?Bx=8+ma^sBK?wrp}NC*p&o1d@tbcT%#)z2kvt#WpYvf^^8o(GtRo(}+U$bGMU}Oz#Tq+Xy{mCxj_fs*^Cr!-Xul3vT zpE3yl$KRsD&i!y)CtMP3N=1_-U`{Fy^?SzbI}8ZwiGKD|q~}zdjudmZt`wYB+7xFE zzU9`s{LzHb^bJueT9b#e+P(2P*HE6i`n3{ZKK#AUo@JUK(vNBto;Jl7 zuD)3H;{6_{EE0dYf{jj>OCTq+P~}3QvJ8)4G+vIAFQ}}5ozzQSyJ6vmc$KRD8}k;^vE#{f8=78Dr~5)^S9V%GK{T7MQq5uwC9xNJR#Z53BL? zrq?O+{`fV2{V7vr&3BJ|!}`D8csEdf;_`;obcuc5oc4Crsn4M2Q#29DjFt!{1}5Z2 zemfPbKudDG@@O!aVnz7%=f(rmdqcFem?OJD)odq|ls%yy(AH4U#e%f~A zs6)?UqQr>R#-P147g3G5fiKduy^?@!slPgT#o7^a(h48}J)K|(qE$#DlZL!}YpHv! zQb&QAt(hd}$ira-o`j!rdwaVN$EALPLxlMj)FufJr!=iG!GlcX=}L(CZJg0obdBHG z-gm3EDv%-Gm{0EVpGbo-Dk33-Gmr9pw|qq^!NjLKkCv=*3GT}o zH3+H_zf`#@5C5JV=ReO>zwt2r3>HBDNjlI0+Lgu6so!4`n z2osy(kCc8=tDU`m*s%o`fU;U_Exb)AZ-FsCCWwRLTaU5^D%w2)+_pG<1b7%qd>Clo z?QS$hB=lIhwmqaDh9J@xA}rD&5khYhUG4sBP2NMdvrc^^-7yA2;CBXMI1=ZV|Imk5 z`{ChiFC%oGyXa(z zl8fHq)5QdXFgk%r&#btH?es(yj7W<_8O@VIsIU;vUpU4s6f;ZxL>#Y2g2_Tww%rh5 z@|^6DE0$7hXw&!+GzC!dU!<~hO%_>koP}5SsWo}X{zi)f_)%?uJ$l)A)m9g>nwDW@ zdlKZK%i{DIPL`~7@Y%4|OyDes^LQ3FPeu=^RT8PukL(~{mK)KF&FU#h$%mf$$Sva$ zl*K@w%$5PIm0^NTlQ-+`d%8$H8;+z9&2sG&D7)U*GqC#al}m433=oR|9m7n(5d^jO zT#r6Kki(o-l}q@ia^r=8MbJSbH#I?k9e2c`Mu5b1SO)er>_yo8+c~h@aRWweYx?T# zW==fEi|G-&BO^!zPQQ~PL$cL6xJuQZb}_;baYG~7FVZUxsLn#_TGV*(;L9L>+=hD zU;di1aUTpI=RsNHA~+wBN|TKr>>x?f*Jo)92_5acf%Lt{oT-BcF}3VzpUZ21_B`FK zBb6u;h~s8!aJkr<&~$NH8~CR12p`)RBNK#q z45w9bUWO(TukL~QSo8YhZ)06cGT`--18HN>1u!Z7nP|X7SAQ=-dI%AcJ66*Ec_4$| zk^%Lm062v4-roz{%3ko8ol6>|`^B$rE1-Vb;?k$cAB)Z(VpYzAlf~~nZdLRq$u8W$ z))jGvJV=58t91Rse9Jm0mE%M&cZ|Z;a(s9p=L{-I`&4IBYC&DmJy3cko%)Sg?`HP* zX>rp_@J&CWKL~iDy8y$j0*Y*7(|=9`U5xmbm^Hj!tr9edK)MhHyo#nx;sFGg4rim& zi=MuD`I1^Gyazcmp8)z?HAoZ0$b9lRL~65%OO)Nh=LIn1i!Lovk#S*CD+~9lR(ia% zX^fIF6uhSF@fHdec8w;k(EgIULxrGqi_MDdWvKo#-SPm7S;#jjLj5FSzA+Q5doH>q zI8z^dG5SgbmF>MYN6NptYlKLvH!%i*TW3_p2CqkJ@ZFkkJulCGspnEo*K}BQWZR?C zn;AW&mmKw{UZbYT7FlGgrdQvN5$em^-F;BLH%3iRWPpNkLTg#x;9hyyCzwZ1;!w8- zPT-kV-IC`6kZYP^JadRoOsbEMDHmZr(WkXL6Lo@ysx9)0;_|5jy#R?_2%@&%NJ!jw z9fkKG1l5m&#KZuN8boV;)&N^ZdT229k=dPKQ?t_Qk~c*adCSkZs7RWE&QFdxHjg5@=yOm^Odx)B$Pb%GZ5 zL>6BFzl)sg-|C`Y3_Ygpbz(&|rJyPmvfKt4f4?L=->3Rj&ajt5nk(IaR*i>ja*+k= zN|nSWCAt9%AwCn)5KP|%YZcrt?FcCtkl(PTW`vX_2^mD@N^YVSiNxo$BQ=> zmGP&>K&)bnjMj-1fmCTk5BRak02W+ddryA9s>!f;WX!s;zM5>Pz3&Yt!4)BeG~S0L zlM?pw>6nVSa645TPvZjO>9KnDYU9FYLyp~in6;$wASE{nBt@O(%C$LL!Z#&9mUUG* zD;Scj0ruUks8fDASdJ%7BNdHuFq=G$Ek1m4o8W-J6=iChziWY;Aw|$C^i8fPd{1Ka zx-_DX3knRInnyh98geTVNyAoRX-_d*;jRQymk^#TPVWj=RmVIM6L38qtdOyOl51C^ zh})Z()4wG6MqK$l>oAG`Gkj~#w28wV$p04s1Ayi8Wf!_-_lm=ayZo+Ouec2TvhJDO zXeSklF~nLWf#oQ3S50j&xY zcDm?A_0-Pw+=hIDn@(_H_?P7o)plUl9mX*BOg}9d>O2#xeJJn!8A3PhHBS8j#iQYT zbX)~&$ICiPL_i_`(u}$2+^#4rQY5&c%*Jh>*l!}fuRYNkZ^CVpQr0OO4c~2chd0fD zvHRYR@~>{UTj5gu6y;5=LO$iBtk#Oxn{RmF#bqLp98{yHvbmz)Egb$aC5hR?p9bRH zb8k0h)gN(M@jTiR6)~;1#TJK^v&Z492UUpxZBJ9i%_PYF!*v%>y)xd-rv5xRFX8L# z;TZkOaj%Z@4sHDKcw-)q>$UMtFSrF@LTH`72Yz0>6p?fEGd(2PIk69bGA_{lRB!Cv)+O;2ixGr+C4*e7xpg-IHvx(I9X;lL7 z1Hxf9aEI7=P?ymVRyg^<+X{~ce!T9`WQcZIJ|-=C_c^b$)wLS$#w-0EGno)2H;A1e zXR-><;@BfXf}GsRdm?B1;ZWxDduct#D#N2dkTW8!aQZ#CBvsEs(l|lvz2@$(Jyt>{ zM*x`61o6+ul;x$DGy=xB`y=xHT74kbUD%FYd%XE-(PTYIb1b5Ifpsggud_wr6QWmgdMO3e7nq0*jC6y{Gux(jJj>P zj{&Xz*!A*E;p<~fxQ_a6JP4CIbUg57rp-`PU7G(;~+_RHk8<}zN{RNjE+eQyy|3FyJ zzzvaHi@vyE(``GDAy*I@bXu{d<7FMk#^0Fd>9>BjV^g?>7&#;vt^riws*hwAT?n07 z`vl!#-uR&TP;q*iy4stH@zIA>S$iT=hSh84^}nOd#+R}OaV&YMc5)a%lxqd>gF*7H zA1C=S=W_XO;joUQ+~*I9QSc`}`FLKpLCOryIOd7_b46v-0~xUQvU3y~R-biLH?vCN zd9MzWi>D;jN)}SjIh+{Lq{FhuP8(x^!8~)}@qjWn_gwtM3=ZxYL#W#7x($6Gjm3vz zo&dzN034-Qpv}cZvZ(TPZykK$ybAYlFmwdh7C6ise9f`*Pg|%G#ILcwFEU@*q!4ya zV6yIX?>Eh*^pG%0e#lJ0zsby>%1;dglzhqk+ZNj9zv5papd@dC|NWkw3Ey@}J<(RN z$Cd@t>geaggbPA=k7;4nx0+pXqHJ;X6T1xwte}$yuxk<5Emv1ReQE1-_yVK}{!xZq zc$Br6c-q=x5nio`XnSc*y6#j`ch$759^az?c%7dxs2zq7qO^1j?O1gkV7 z7js`dE^40X?PP-VO`U-BciURr6Ps;X(i%!v1sKnz}swMJ@l71ObuM)`6 z)T)Mg(>GlFm{JaPj8~TEIXiq3*pFg_`{EvjWwzW2rb+1%_oL4vTId-epXW*>E>myb z$O2qiz03A*{ejEW!;m)s24bHWU+>FidHblTQ}0W!UOTbAa$v3?f|XAc_LEgV2CZLp zF#)|7voHj)tyLdJKI7Q{0^wCKU^i9YHRgZ~Fc+424{BZH7Tx*68kXaKQ=xrIb`1zW zTlqSDl`8JoIWP0pf(B>=ja5kuBc_W{C>W#q$nt~ zoaUqNf;j{)C`Qg>-oSV%?N^7;>%V6N+E6He&t9Uy8EBgFWBs|bRZv$H{`lZ*-sKYC zWC^!w-s{!EK-{ISlw1WTSslJGsPc=~Z6qUTTG&|;w8z{w>cg@q`G}l zKG9^cAZFh5VkSg)_@)aGAXb#4NAWlD>vHPUQ9E6o9TkJ}_I!SqH6AqDti~N+jL#Kd zn}p*V!g@F#I_x{N=;wr2ZsO*c}rE6;+d)9|_{Ej1M zn6DgEN#ueM1B+QxwaS?bMMj(iolB^9ywB5ux8N@-un->G-Uny(il5yL{1N+1`r zj51q24M-XA$esr&WVXCiq&vZ~Ic%nUu#&)oIH(ft5-|V~bd97kybD+)n}yhb9(STU z?m?e9Qg%IEz|{z;HdlHmQDn4Zc3U+%u;7O*P<8i=aN`k12eD1^)$&7a?gM3H+Ph8I z`Jm%F0uA(3XZtfEjJVb8MI1hOr8Kr6?LkQmApS(&gQ#mWQwUW3jvY{)rSxY^s{BZ& z_5WQu4fnES-1F|sn^yiTOhJ!Qd&t+3C{cO;*k{m9jG^!f;v!&`FQCfzP<)Glv_P+-6_>tE|H}g<7dv#s*To8JrNPI-eaWas$c$Y9PJ2$3s z_7T{&oRM&CxFI*4&qP3Oa)$Vi7r`v`u?$>^S!Xl^u+}ImqqgqsejxX_)O79}u*zj! ziG)`KO~sqihfF8uV4uHKhK1r0UlvMx}5^shcbAlZ>1>UeEH*i zKD?=L+r_o^k_aHCt%?wFA0wO2n=6bYIr={mJJTK8y`!*SVcDD7!!rz~Iqw8<&WJI^ z;S1t}RSQ>uk%1?BzV-q)Sf0IOpoSLKP`5De6@1#hRAA!JUV{Q*3sXum834@_&l*rK;qMC$j*@NF2@A0s#BD;Clbb0%Qp9o<6&H_@^ zF1XyJtqDQv25jnBM9ukK<_j>P8)`iTx;}C*#A!I=yQ@}doHYu&iyilc?Gn2!3%d6I zVjO;Lm2uDq!0FwonPArlhZge}*5F@Dshuhr!Ma0CFs25pWT5Zzw&)2J5cZJjmh5|{ zvHBB7@fgJB&29R4`*NHtvEvyJXo<! zf!733HdtHvVcxQ)2kg>$x5xufLfzen0r`dQ$Kw-V7kZk5-`#6Nmn9MS)Pj8pC;}{u zs&SuhElY$_cSpmTTJw<)4;ohIdz!C6_Kuj)BR&%5DkFq6cm)06%kCZ_DSXFqsXF1z z>j1(tjT|12gbcb3X-6-jwQJ|3B6Gl&<#T&2rGE7cwbpe>oSWJua~7V?t;$_xoWN-= zX+21e6MPfdx#1&Np>rAfL8*vI^$or-b|5OG@Bu)T>-oI;K#tG%D$>!Lx5PIPPq9r= zd^&BB^^|Q#cucbx3LzJ^}ThsBE-^79&Zu=}e{uasd8uX?~&dk*mc9b{P|2eM@7LmokG8 z#XbL{$a3$`DR;<;(A___&Hy!w3xy52_dZ!0PDtoFxSlK+JZ!Eso^&ztyf}=<+(zr$ z)WJli@i`%K6}SyHb!y!F1+;f<#^ALW?|kdd$6e>v3a5e@-X%|`8apjs5r;f&fc#)5 zWq|tnYN&5HlAWzpIAXu0j(=cXUOkB(FX zCKV)*`Z?pU`GiIcJ;~M6&q%=+VDROGvM*H>ZAPhn9&pE7AO{0LCd1NA*E*v>u)^AU zSJ%pD3?|pfJ5hGD-WcH0ONWVkN4MX!{|?I)yTdaEDIp?sQLrzDQ7?mCM0!P)L-Y!d zOcxBkp~?r?!U?aRPn91E(5QW-OrX{HkaV08isF9JlqRC%E_i`>pGkL!pas>%$MX?v zn$hXfvLcxU%{m+*V6cicLz^jivwu(L#l(;KH{~4=ht?wJMs3;&9z%ro?f$j#}|u3r?oOmH(a~~g_24%4UCd$DH$(F4d}4kFn1{O zn;vwMDyp?B1wr5>PA#4E0>Z#12e`5W$jlT?;T8=9+2pM5aPfvgmKWTF3!A_XTtbK& zKOpwf<5HnVvGd~;QBk2~d*?Wj2SpGQaFDPMSt?Gv)Md*hR<7Bw-SpO9e7F%BWnWb? zwJFnWqWcOawsu{l{R`oRi>0Hp^<~{ztYs%KZ&#B=5=gEY=-#7w#a)c_U1QqYUblqU z3kDHmVdTMjR<8)J=e+POUIgdOAS2a*N*L_hG-1HMn9@@zocCO|Uos+^Eux1x>}EAS#m$CRG9L!r1RENq8xS{`1Cn28(? z^#1jrgcmpNo(7pVx{&F_M%TwL{FEIuA5d2tF}@H>*wv&00dHK06r$_&CLY_ujTj!F z(?rHrOhn1VG+ysfBDz#zZ!2jpJ&}lj3nHZ5CeT=bh0&Nr#=eeBZda&y@YP+!1=Y<% zI3WRTQjNiby=rwmzJgu@8+JT?LIMe)NqCtI5_V`4@6W9wh={CoVkhy*A<0_^MkO&( zw4jL?s0=4R6#ImKZP#T%%)hE465{#a)VtXv3}uJMgusH=M@63XjUa*PzobP+nDn~< zC5gVd)IdZg-bT7?q9*5aMDt7RR>5q5J)IfG?5iW!xqQ7jSW!sUbNTh4E4NyZjmPY- zx5A3w(RphvYRtMK08!M8)~BVFsm1G#vjrCX!#tf{?_{dBpLt zv4Azg{h;u(CY4Ab4x6i6$E9wCB>{1=bl~J;k6QmJ#!G@TiNVSP(yhiX(xAqe-qprv zrlXiZqt60Qq18j{rRRywk;!8U1>#wAACQ`ocWgb+mRSmA%w06;k|)vYgUNj!<1 zl~c{#!uKAx8z~)~N&b*38^g)YRw|U-&WqJPoxDDBQbtLeOWA5D)+2wD5n6IvMXX^l z?eXYa%T7t$Jh`ywM&_f2tq_*yWY-~b_FGxVN8S?kga>RrsD*S zs&lPTGm%@dmUVkcSC7o5P{cTc>Y6PGYCuw4{5BW)g|>3Ae<8_jo)Ajc4}DYSA}%%5 zsPcK#Ktk8esgV)lZ^|70aQ9s@r1$RhSfbe*Kp-X)5N^Gq_Q6spn3s|)%sTqf;;*dV zZ-!ClktxdAfw}hGTYt~D#8a-Tj8!|}cen?nx%av*#P_b)Ap9)%dQHIOtuAcv<;E_` z2!Zn&Okn%+ArrIh-h`}b6^K}9rff+MBT8zl3!5a8269bUe&oaP;ZrB$4LdNc-?Abl z-wUqKHiM;+FmY){m!z>uUBGFki2&His4>KyXr34OAh>U5_1=`R%gf51&1cnU$R0{7 zKzwYh&&51+7xe>XNbeonO#^-nv9L*r|0{C{YT{jPxBmnOA(|>BvoInjlr{(0kqlua zc(f*Qd!9zBve@G%l89QIs1Qf}3IT#T1#vW60nZ<#B)R%&w6Tz0Vcxt1GXm86WVvdYPUf zvVNKHvb%MaF@j{Ie3~w9ptF_7h=QTk`fq;hIqns2zZmRPBK;$Ova;5TMf-J*5eXjN zDl8`jhqMxuO&nduoLxLd0_J}n@-mA>r4?{@iq)FG4ZVBlCJGV_GTLYzb@5?LGh94o z9Li$=MvIi_3dYReBTmsIA~F%Wpva+JIF7g{*Ol2(^pWnSEWV0R>1#eW*O89TRgUby z@lqZ=4r!juQs$pPfN{-k3~0o#jG<<_F)RnO3tGQ^8WZ{nCvT7cU0ES_KlO zBbr=~cq8Tu$uxTRIPraRbvXo~iT9E26Y+IL>}j#9AW~N~t>&D6t-mS9$`@{4owdFN z4TF)8d^Yn14WT?5_rVK6kNEFyd&BnPj8%>kEw4ZqV-sy23U%fW%sTk7RT`sZV_a+< zr#r)!y&{%LU659v*Z{3J8w|p`B>Rw+=F;isLaOj@BjR(-V&Yyi2y?n2A=}$Rd}}bk z`XJK#XbzKA6GA^*85mX371nGt??Uy@WI8DeGMT$H1X1eC!m4E~Cx{cqkXnZ7e&VMi zu9(>Yu=8fuM@YzeFX>vPYSQ&ovJwa0C>6ms4}eSuH1XR>qeV5+tf*>*g&Jp=0@#tk zZNFR>S)S7ptbQ;L?5M2}~pjiMv0epaT^zY;qaI$N@JK390Y}ViZ29Cn6 z8~ZcPG>Nv22v#q`Ff2o|2jL=?P=>LKfD{&}aN>iJX=zkMlJb6GZ%C~QY_c?jNi-SD z1DysTTE^6UMr%BmzpwQ?B5Am6IlgCw>4oEeXYSV&c>TDDi~gQF4hsO)x$L~o%U)qP zIVrqM>DCnVu_w0U6N^hr_YG$Z;#7>eS8F*ArDA8o%6|yJce|S^_EiIlI{X(>It$o;hXt zukh7*?3qY6m^5rybRU#01783db};@o%P+aR?ko1HpAMb)?r#1k6aT-+1jg&yV|#o1uKrR>4-c-hm@cSdkU!jAPd=`e5&pNH z>W9y9&XP#0<|HyM3;VrDyhOyz_gE^l+qa9+8 z((4KJk+5lIO5k+&wr?E+>rWc}Erj}hR5luq%`yioyw40)_?#(qS;O?K|Bk{VlThgZ zqA!)A5N%2FN7zI1_$b*fgUjvLGSS87_UeYLWqa)bmbb5lE!B(qUf6CT z#dYSa92aPfms(m95}qW34QPG!+SFP&f12Zl=sW=40vi=V+Fv-<#>AE^=JLpY##k8C z;nUor3{J+;l=)Q1U`-&8?d@bE9Txuc5`FKkwd1p-(L5XhqbY3a2;mOZ1vJ@J4MG=# z=lhIMZ6JE)oN`)>o0QPfV}c68isCrZSI5c5K|9zlv3KigPSxRlyy_-xci@3TZx$bz z@_#;%rswY~{w3rxc0l#`62(gX%=Y6*?i@|`;EWPA>%07hj=4>CU%|#!FWw^e-)`~k z>itB|1wfuehiUv%f)%!2=%v09{&vGR+Uf}4Q3e&hRt9LIaf_4ZA`+d;X==&)1eXt! zT_`xp|2Ht|_R|IUvxxc{D4a_1Ic%t_JBXl~{4K=&*AbuFa?#PzIwoF;j!2=;r_kAY zOw@NQ$XDEzyCuS#xtMEPx9{KX%jgxeOkfI!U zP7YvsjwyaO1-1Pz13cfxYXc2j4^Ww#dkc=%YN)ZU8!hYfojRru`@h(G^LVJ+_ixw| zt*8_sODU9)J-c$DvK6u~m3uj!te?jhDy=Rk6uvP%y>gccD>dK=xcl(+|s4(e{>fW7MMfFGG zOr6gBva?g!pKKR``X}#?k71A0F&?*p2}A zcXnzDJ@{3(;cOoq6BFaP)YB_b?x2SZ?0=KSQs{2e%Rh$gk*OSR2U0;Vh8J%VZ!pwN zdk@G?{GWg0|aJe)dBRy*h`l4gDQ|*^-uX*)POx*6q z7B(>+zwnfWg{5zE6B3AM+au7HxL;&-^%I`!a+fs)Su8@LSZ|wg6emJ0^Q8O=ftT+H zE@-pJcTr+@fwu<;jk ziBlG+78ud}A>f6AJxzu;@QIot)x*|x)oZ*ZxT2)w>GaGDfDUy!D!Ylj zx|>_<5xr(!5UmuhCA?by9*Q{5>aWrM7C%I6YDQkL#hJ{PR!zHOet-g;4I7tDYVVxD zHM_Y67Md-VZ~P*byK4``{HBe~X`neoqRScif{3YDTD3f|^cAYEzVHP@t829zl)MJ? z97xVS{@VG$;rDb0I5x3Rj3eUEG1JqwzXjQy6B5d4RgcOU8R?n}eSi)z`BEgmTdp!Q zaGM)|ssEp+eaBb<5IEJ_8DpZ6{;Qx0hsihqK6_3yy{qs?#a1}YL|zp78T$N>uuYB* z^h$dqRC8se+~F3y$YkH@FG9$}Jv8c(oH}-J;T(l}@61Be>I05n3msjIK+VO!fChR4 zp$wE82GazEzk_2Y#>Q8CxMdTCJ)sM2{@}jpF2tbwuVbx8F8C}$WIfl6kOThYB+zJ* zt*tG+*O!)!&$j;tB(?FpvqHDmWZfyO{8cc?ET6{8uJ{Y>Y9gQMg*Qr(>m1zNL@{Y@ z-=(-mAsgJJb3k2LbjS=4V*WsqvLoLDBEY{E&%ZH)ygRtLsax6WgflU1Ca!Jze}#Un z4)iTA*LuRAfj6S+T=7K`($c16U_<4vf#}pUmdxK#`{w#vw|oF(R=!5-tE$PX0T?iH zcBk@jbVq=HG|fm4@ubwailzYA@pPS%>_(lEqQx(=nCFf|a4TAi7+aTw!+on~W$owK z%Kn1mn{xbXSFX(Ebz$f_2x?m02?$4VvCZ{gH6sis?qJ6@`sT##HtPMR>%oz$U&|m( z?VY*rQaQYK${;KMEQ2uWGJ?70K%x!qAMf?p;Z%Q7UxFD<9J~cj&`RoiGiIJ4|3TrV z=}zAsfm0l)e}D#d>K~o|>@`-ntO0|~&Gs_;&x$LJDPSD$?jY)yf7SjB#e6~3-7vSe zkCBcOf$d3liAkK&rh{LqgS#Sjlty}Dr(bYL<5t(>){Sp1u0?G;Q*!=e;0f#PyQIzF zYFAVr1(hh}+Ef&K^;gf%HW?qSU2*t4Ft>J-j=0_G;Y4*2+n?nDnVT>TeGjwn!O^qGbXdnFYwK<_6*L*l8mFV4^91(*-!0fW?e zpUu_do1%fiR~j2cTvm3rnn|UjO|w@qS=|bsfjJt@m#DbyuNSDkf~WxPTW=_>5H8+C z^bpop7BEfn0i2?;{BxTW@~2Hsau}?*cKyC^9N|}{F7bRH9v*Y%prKg;tlj0bo1^y2 z&Emp6Yv;H|SPP6dM_14I`@S`&G7C78+0&@v_bYE|zs(xqNuhCj7gGyKKK%8c?t;)C z+qfr5Yg>Kg&%gXXOv?ZcQ4=v+hyOP`V$T`+;EaS&$P!MplaW47Lmw(i685eg82;FF zvny!CMc0zcjWy^{bJVkugeQcg>bQY{f%`=#=YDG$iSDWMV4^ddsC&Uch2v=onFtz+ zeaDF(Du{8oCCVEXP8x_)AEmqb=Iwi3 zIh#b!1*CDE=i;%t$CH=j z{Vep^+9`aKe44RD9O^1Q6|G|U?DsVF%;96=pnIB73x0)qpJ$PrauQCkiGStVNVy#h z&)a^r?VQ_~qC%Ed(olN-mEX}}s^BAgI+JBI)FQd~3p=X_DPPzx;gtU`XHE%Ffs9BO z>c$z+YJI;lc;UMR+M^$;JX&f!_0T8(K<)?2{Q#GU&wg-8W}mV-T9ZxEw!z#MYJtu& zudhnazxsO@Im;3+nxU;0m2$KCX6l&2j%aj^CAz<@mjq{-T&4%C6V~3c#I^-1l`9hE zel+M&w4;qXPT#?9p98m^QNphbP*VOrg3Khc#lGKR{6N3&&A4vPEK@{xsq@|13G{8_ z+81`0TI^aFiF8i%ouFv)vVa|RkA-4oH9V3NsR8FQS26re+kHdeJZSS&f~-p&ZcqYythlG$v)@swSC^<+vBv#*F@(iI zh1m2_P8FF+^r+a~)%^#T=6pT}%n#OCEC=8gS}gOJ-=?}TW;AuEMS zVj&NHXC=WkmoGZ}_DZGqF&_pZb}&Dw!imGRz^HZ-o!12c6^T$osgXY}=aE5WuLGUr z*RE=k*Hc@DhWTZJ8l4*Sm}x}Kvp~1y{thEkHZra6R|!G z7r%?eugK6V`Cve6!|8py&vQ}(Q*KqVb&6p{)_@q{1bRl8ZU=}rAfO!2FwjCxjx1|rvUp4VkgFSPg%w1?C-dQ;M!AZ zuT$8=hU&DONj1^^MgENLVH)q zHiwtkZsNa_=_2)>8>bY0Wm3@h0805V&D;9Z=@!kJfK9^wa8m+Q>A&kJE(P7KJ~glq zccsNHsPVgP+fB!#|C>JG%ASMx*!zAIF~CcgkiHv|G^+c*>nHb8PoSS)E4JhVbcy&w zmyQHoQ~f=qIdww0!SwL_D(uv_c+1##K?$Kbn0NX2gz(g^hdvsY8Uqpl<`4gHo7bo= z9li29D;Rj5-lzHuy7CGk5*-Am0{;=BAA$aNp{AnwBSe4G;y?fNM~Hp|Xa7NdzR>KC z5dCM-`I~Tmgy?Tt{L4>&gy@eD{fUYG4Ok2Q6BGR#X}*I%m;VIT|0ToxBShb2Wof37_uiO14kTwm5}UO@0%wdbk}mD4Y7_x=(T9@|MF2#|qhrJFpM< zOk~SnmRta!rK7k~c3`&o)d8d1*Xy$|jFjt5;@g*Ux}@2T$c7(x!zO5TmoG$`zBi#Y zbPzMIyA zhme{SC<8E?$vMYo9NH~*&#onQeowA>3+Y^yr(6lC- zf?q*rs)FhCg0$=F3g3Jf^uI+tKZT3R?`a$&d<5C7g}X1e*ilt|2T#;Z_A8fCL%9RV zS)&0Ym7$rn48|99tg~@8{@blOpHerrg$lLqOgr#%-lCP`*GfF6lXz=xoAjAg*HrFw zQkBPywtL4`J?!Luk*5?*%%~Thclo0_^!>#R+}UWp!4FnC^8C?!2&4qM&MOA(jWpS6 zAW*Gu!dyS4tEWB}wQ3*1OYnHY(N)nm3{1MWa9chfjN{&0i*G079H{c0^Rpk%(sn^r zJWKHz)*Ui(QH>jIk~+GyJ{n#O<8bwVYF^cyse;CrE>!gunb$j8u+Od+t3#hlW43rU z5}M|k-@Yu%H(qSuyBxFkznZTfB6Cn6PT6D<{bA~;T@!$!*pt6k+%hYvj&LmHDp;rb zeDa}BwY8R)$pYn^tuHr2e%Bf@6<8xm}%g${?NEw-?1}w&4F^h7P zajafcRd6;@W2MAm-nSr}*pwH2P*C3nkSwD@iT9i9f zR(CUI=k(lCqCCendUSjavt6m3X;Pm;GbvUDIu&OlRpywKww(Kgn}*-G6FzR(_Ak@_ z26Y~q?g+h`Tnw)g0O#rdfb;rHsLWbsS_Tb2%vJU$6qbzk^<{viH)dRIFB4WoYR8&< zUO#8wmHYZBr<~_g5BLQ>RR5F$N$IS(UH5Zb)z~SyZFp;7ba-*i`|f7IBv`L#(T|YG zN~}w{cvfS3YMDD2^UQpF@77V;HUZU6Qg5tha}@KN zf|}C(i?=c!A<;Q_l)FtXEcOoek2z7)8zt&3UO7VxZReE{` zCs(`az~9lrpdHDg*>hOqDxicv5KMKM{Vg{Tm@~9pOB0}npMk05mkE}$o}bdeVcjQ% z$IDCEm}JKBWu3Z|a!V!hTKVz>%oWKct)uYyj?BWnd>44k#$s(oLB$(r`5Zggqg9CX zfz{IA=^kI4#OvsKo9Nmg_yd+?%SeufK&d@?%w~5-B9>vryw<6Tjh`~Dm`WQT)Ug#e zEHbi@Dd*)X$N^y7C&g}hNL|YSmUTLU{@K`iR2WXZm#VvsMp2|`!gk$vt+;Fe8e8%@ zySHIferN9pTV#MVD*qLI(tHJC5e3T=oMDvYPKhJ^4sm35ePF{IM6PP-rS3r2THjH} zD0dbr%a$3`>0J;;kXlO)qvTkQ51d{9Kt6RJ0sq8a3Kfk@jz2d!KUs6B8fd4 zf*t<6jO*dn>iOilW06LDy^IL6=Vyh-^}y_C{~4DuSif}j*m@ddb1sf=$usD z2E8$EwM?wvj^6aTqX1c?kT|VlVq&`z>^AQ|az~EDq2N^*Z*jJYMdgh#I#BWdM74&(Dy7jbU@iq*CNH)<&e9`^^NsoTuyux)4Q+qE zU7TgVlN3+l1J@o&kFTVw zLtg-^(+pWmJx|C?Kctj}pEPteBQt6wJJYjXcoY>zF^DoX&2nK`4hu0`_w%=2Qzk*AZMza} z?&&MV*je7yQj27dra&+XJ}+Vn);x%=WB-QY{U<4R+ljY?KsoN8)PZV6-p>x#zGY^~ zQmsQZ4r8ApVXqg$w$Y7J*OvKs-VNm1L>LVC$AKLxfQY`?>TEn9NvL0n#MWQZHlkqSmE@y$?w?&W6gfK6ji^TvtZ^iOg= zi>Fvq{V#Q3Ggbdfiu1D%xVNj19nW(A>9yN{jUPAfxz%!x;W>F3h)ehET^mT1gE!8a zaD1xmHp%~BDP^YKeSvg!p&Dn9)S(tKRE9?%#A?JV!!tBn>^#XNX88O4B8|`NnV=$# zCK4TFo|VYFd*-;}vInT6t)8c|9WL?Xb@YxRM=h}?36|+YN6C1a5!X9j|l^0tV zN*Yu_BRHikiU6xS;tkWXo;o^n0A0j2|FS=Z70L(nn^xi1xqnA#s&|xT!uzax0H8Fl zekje0aw_|tsp+Hx&xNEot>d;oiHpF1K-(D;wuw)Kbsx9_>aPoeUlK2>TXuh`a5pue z`>52lWmlB*Szx7Pc8Y44yb_wQEaNtD%GmQG-PG{w4H)2B1f@saizyZ%3an;#6xl>? zztBvP5~`1{J-RfbKKAKn16!u*OhpoO6uZ2Aswe z3Lj8`SJ|4OQB#>dBRuZmn*fZfh7jJf?u0Zl3j-O>_!N)~(kYwhnA@)i#@d6cAj=7H znmzM^!jH6?7ftd<@I^I5Z`9=U(@CZ-U;$nisl(8cDJD3-_*6Z{pAj`b(tH9CYTFk> zXUrifoMjfD#5W=nOxS+7fJ=rT$KtXtTZ{VrHp!Qy?wuH3z zLt@(y{~5=$jQwk#)UT}CBjb9rec-^RIsv)25Gzvx%(WRdklSzl=p75OXeIM~rIbJg zxLR1se!!=}eoQZ0GX|sfiz-_!xZHiO5}hnEs2 zQefF9zPslCWdHy1H|2}eG>ZF|U#w~X_Vg~0F1B2C(*<(b29FVmU(RI@3J55-*j-F5 z3jpfhR6BXR(HU*oPfs+gfmE}M#3k1OY-fv^5VGn2FC_YzV!>v>mw6~mPh0NfI=~%+ zRCRy-it)fzixeExB9s_@KzQ2oE5Be~2xi#^|a{={#oU&|=7 zy$wrNsdFK!BG%glH5`aTRYwEeX0>I9cR{+3+Tt(mStFFKDQRl7*rn`UN*41<)JzbI z7}wq|n%4VuiBV2-+$FK6LNAB0FV||fgVQ}_n9QLiV{P;sU*<1>C`w`%L_1kuFxn}{ zwmKge_uAKFF2^y2Zu<)YFBHII_bp{l(2ef(+Y0bM8(?mH@t;Qn)_dNwmTU8YvgE#N zk@;@LrQP53yr++@Ce&$8oeK38t8E!Q)YF?{mv|_}%X4N<+&2DM>O$H6#lG4Mplw%X z!)MfWbTtwdS96m8`j#PJQJ&<>0IBloaV591*?jF3EeW3)ZBEq?`GkW4r^dB=lH>QE z`ONtbgLf@~W?=5DM{|k6p8I>QMXFR>{sB+xhf*Q##FTOy?f?4uCW%4xfkWB}j$2yg zNkxqi;_Oh?xkmrG1x`CnvuDu)+k${OZX0A@lF2C2Zi`G2C>Li#MdzrwRy!f2Zo;l0 zp93UGWcJWd+&Ap~Geuq*0Q4BFn=x|TH4S{jjL{O~cYpGVg}Ufto{dgv>Q@S7>fLO6 zBHMdCt#SsQXi%7GSPbH`JvU|~>eD#6>ye=Q;i{JL6lmnGZmnU)vXx|9N#y-Pcn_mB z+V7Y8;>lm_11^1|G|A}7u6=+M>|b8`vHbDcvzUA z>~Q?)PaGE-*Q=jSF_Rl)oE+nCqv23i>pQY%=bF<;u$ofTKObt6OyyO_QVkm!KLQ-7 zSEDy_>Kh?0Hv+uQ#k_Oo`Fiskr_UojkG*QG_n8~LUgw274y&tQ;EZE)Q{8zd0eGNe zhI#;|GgZ_Go*!uUe2*$JM+FI;K11WNYlp`x$C=0e8D|ZiR8A=}As$(Hq&!LUVzSL; z?FW4MN+00ySuc2r#aA9zXa=*1&s!6Q_j*rO#_8a2<+afOjn@wvemeDGd&lp)Nzy^I z<1ZLH=4eKDr^sF=fy`;Do@eg)wC~Hk%~-cC{UdG8yBH*EE{Q!obSU@@aQGWV$lpT# za!1H(42}4|33=5{KlY+ud+y=CkE;ZX3xMGVaLX8o*`lU#{d&j>2hNh1Q>r|MEh%X- zqBC_%tlvfEOL%#W0Z^;FTOy^AdycTA9cLweH2p4eX@>zF&wtzaM)oU(ddlc*yOU4z zsz*f|gr?&g8Z)!AvyFLxPq4Z4lUf~KlJT0CV3WR82BhEyek3ec$%9j>Y3V_77d>u+ z>|6PN4i$owv-V%h$tNhhC7z^`Id3b<*s_Fs!k zk~&HEoh{cKc9aUqb<6)-oUKe}+xaTG*NXo#Fs0KPXpK*NFj3@V#`lvd3sFOP1Gsqs zJ2ofnDw3O+Gv_3k(jF7o*Qx4jj1i6n^96Iwoqz}Hz zJkDO!N8Gaz^6h)i{?CUY|LN*mQnxFtJH4+zrSTTN!g2HtzkOtm+M_j^pLP1+D4wA;G?QHwW!{rDU6l~aQ(aw^22U5H|xG3i@@fbEuM{Yj@a zT5<%h9mxzz(dbj7yO7GB_3@kS|8|kIiJdEM3g4zXWUmWc@zkNcKd$(A=ZX{Fdj;=* zyJFy;w_Yg|%WfrabH5u$09yiLX1%W_QLkKM6AT&pUv5S$Aj39KK4qeyy?@;*2nN`^CRC?_620zd%R^{cfj9 zIFv!;2dv+4Z~WbTe@Q{xhX`4V&>b zfg6$Q-wgrvt~}K=9<(!;SDt64=UVyaDR z%ckq|N$FLb&ac-u-uG#&#?f^;EesXryM`D|^3k2mH4mRGeviK4N#n9{94L?VR`mc= zSQClZcUk@$-l}j*LqofnZNf^i%|zxEpw!P~Y@176HhgkF-IN}IX8);k>R`}m%iTxP z#`EvfJ(so2{>(0X2ByoDuF1#SyhW_vTwd7IOKJp@DrQ=57e}-`;S_-e5VJPVW9>Xd z^4bOzSYorb2P^^5Z=?XMmj%*!z7jK;yogG5lxu^aTI>KLrY!l*I-<-9-Bdx8{5inU z1~sbwZ6K#)Md4CM3m!4^0fTK_vh;EyOY0p@FW%EAj%HFKYf6pX;ben!G*w+<-MH6L zf~5gAdB*ML@-qW9Q7q~UV8ZfR0AzSNSM)VBOZzx?)(6qQk!j`1Jum9D4TTupif7_Q zCqLzJfZ}E>A?UFsIb7!JGGJpo@|}GTCJ7JW!TT?j7F4#KEh9S$UJWDSCM_PYBtgfh z8di&T1ML7?f)rLueQmk9%WOBf77Om@_PF&XuVx%)EzZGda{F)ACVLaJh{3(IB(%Br zh+FMZ2VhamNSu5ScFB#!J z=>25s`0>qPz|;bGgNuQ^=r)NE07vd!_W;bi)cEHl-SKb;#Js%lKw+K-a!c9tpal zcMcD3o4gV;o*#09yg1tq`8(ztJbBPAMHs2}w3&jm?JLGNECZ!A)YzJe-^&m1eD*ES zxe^`M0|A?JtnSmkh8-Q(t*p1!$sde!j`PCKDWttyrP3>+7`+eXir{4JsuVDk0rh7W z?!9VZOil_i#t$?GnorY1w4sVBT!HhxwVU&% z?86qG-S5kQ&Cgf0w6sF-XXj{fz6(I@`>BssT7$OT0T~miv70y+*2{_wOS3GE3`mC3OEzwQZY4G0B+6i*d?21SupPl8M8zHHB<}AIFPq`FR zu6{1Qu9ULiYU;|jfp5$w%=VT@zr(0UK8F?6sagasH0H5MW&!+l90$coexC(^e{5(5 zPHF&H{cH=y z=N<)wEx1Q^y{6T=T#G)7D4lt1>NfJQV7hsJUsUt$0zn}yD{82gxs`qWhX3|vH4bN3 zJvYlsjEm58t!00azrwgZB`q@*b+f0|%U?H=I68|r$}-=rk1Tyl?mTpP9>Qj4elAmf z&b32MW9J&$D0q5Yuh_h4%k~w{#W>8}(m&?tU$ir;)aneo20iR$ z)Xp71mDNJ)dPI-SRGL+CT6qNOlmJ{;=HUviDbh1j1Qkv^)bsHDY+?lH^czJP3{b4| zIDF)Dm`GI3{veo0=W1^M8!-_w6eU z`!J2HLA~wSC)|jy)ICej-1XE$8slB;&wf5ofD7ZdVyvS4`hV;A zp8NaX8s~&p9Xe9Z$6m4$7crzYb?rk09V9QD-k7q`yNwJAS791fsX{yI&E39N6rafQ zDnDObMOVp{`>;7#hthQi7h=<^%F>x2>UZ40Buzbi@}wOv&9;D#%#Z~Sg1zQy$k-78 z6(7C6_cVe7J7WQYWf~Q?Y8mE5<+t&MP#xebpIJ0AOn~2z6zwF z)I^}GC${Tyh=LC?0H#0WG;>*!QuLK?x>J=7)L#j8JY*y zGm!PA7-mO%Y#zcMOF zSTdeq6w*&PC#sSAVlrc3)lTuJ%qH>h!*Xqe5l1jF=;g&-D$U${QHdw;77`cbWF3n6 zWs}!|1}w*emxl)Y49TRaeb{^gPMAb?Xv9OAly1=)>{(0BFfVe)#+EE1^I_vEB$pm~ zm6jalJS@Wg&M+eibOLB2$@jz|vu+!h&i1=|Zo2fs${wI#2G3O)}!n ziK$0`4{HZqp@vQ)rv|0RVoproI+{{JA{15)*Qk=@ix^L|r)7(HUCnEVt!wHG+wp_> zUrpuALG|QBSUBPhQ14+OzF!|=El7w?d#D8YI$6W*NalsJa9Qh)n$OJEox^T@Z3m{L zcIwdauNHOt^KKlveX`uPt868TU15ko7;f{7!L&1~e=7H6tMgS$@&$T6gec@LK?N{N zvu2#mtWz#3FQypiPJ|g*4V0@h&7rDwuIoH@fU{b)Yn|V2(+Fe4W$6^BmP*gI`O2$u zEBH! z1e(=Q(o|$BK<2#uex&D%=(_2GjgITKcMF|BF@_d_^~TiDEH10{h2s=c*KTdG*Hygg z5mnrzxj~IOFMOh&+MrsPeV^wFVNR$KylTH3TDS%BiRSwR-qlUJxz`B_MlBPWTVkL^^)|SQALM5=ljRG$Bs^^{3K{+&34>` zsLL)g4~Ca-Ru3HC=g>817;Ckgdzj~GXX#NeZn3Z4P}(xxFJ2?RSa#z65L1Q!OJJ@* zUpbgWznW@Ko&`%64~?Lb>Z*qxW$Ppf8=~*j%WqFCL@+9Fb!GH`5uu0r7C+u|maMUI zcPVrc>|J&2&z>V2q#xND*`0G~;zj`Cd8-#%z3 z^=w9~bkzk%!E-aopY_?AOjk4aZ@;yDnkD9pUe04b%|v~{M$MGUC#=2%27Pted~&YR9?qosL4_eJ={m;6MlMSe1Hx#A zi>jX{OF!RXKQlEH%6k)KX|vtvy%)rtY$o@vs?~nO8fc7o46^ccxrz+Uu+c$TdYNWy zsKVVu8pZ71LNDQDx`h`FNlCr>a@RZDGuo{XMJcK%z9d=hyayB^v6)`B>AOX)Rts|q zKCJz*vrdS<6=*6jiH%Lr%S+tGVg+>*^-hazNejdsDQ2SsK01i&@ovtpdN*~{eN!^0 zut!KIL`yAEn~p%b6gD*IaWQ>k7f8`RVk>k`UpFiZt z!7416B)lSeM~S1i5&b@#CHHF?dz`LS;|a$W9TqQ2dUXqVnQAE5%}dqUw4qrn3oIp4 zwif5{d8s0G3A^bkHl@8f=6%2R~ikP>Nj1GKD@x zTqyo{=^hR+>YZb>j-N=QF4b_|C{E0$ZZ~rMz~Ga+X{6AM8FP$0IU!YtPdok;FhJz# zBU=kFml&)E{Fj!NUu4flwuy9)M_j5Y1~L=kyfpnSc=vtncPZfME6#|X(SB#AMK@`w zIaw!#D==@n6^@MN>%9vnQgje5$+dO{PDS;ejRX^E6NKYASCVgSK)cyiFTy>!AH80j z>lFFa>RerS!^2wLSpQ2zNNWWM0Tk^ zId(>*X)|=x7vJnmtLd3)?t@hGCZl4jdf)mNt<(T+8~a2rQcC(}-FpfXMIC4%wu$RB z7>Ja&<~9y&3}z5stekiiq3P$*vMTUAMHJuF#!+SxFV1?}hW*de6$x9Iq$@xh#dWsaR zMlC5g5N3)Gh>tc43*T6n<}l7l7P-2~TQqq^B)|Rb~Zb7kY8v!xF5x$wrBU=Gh~}p?}=t z;ric&8J-6;Zi90#_)MSz5-+OgFPQuEqnEM!i%ojVbrcMwi~_rolNQqviy*`iJ*FhM zJYf!LxlhP0jSj%{ourF=H$jiy zv6coqMadH@Mm@H97oCiUhdCYmLd{$Tk2XjvhZnl4hv=6j%rv?MJCi>b`Y7-)DJ)yc z>EnQA#28?J(Q~=Q^_)VTx)D$N1^=z4j5)OP7WVUalg32N9hmS`rPoa&k(kSVn3wPgmgNL?~_K=a6DoSd88ezzAa40o+b*-1;m+hcu zVKX&_6TRM1F)TGOE1Ct9TVHE+ruuB2-L(-7Z|aM}>d(ga?HT-RP7uc2Omb(MJTpqEn-F=;fcpt{?Z%Z1CL{SExq!ONuy0=qg%CUC$f+TBH zx!WSO$vtn&;mv0aEqqJ~RvTMOA*3>2$EOjl`((BT&Q}zk$505-yHSixo+wYSSKpWj zlfq3nJt_+`756^I(8OSC<*i4R**r;BHL#U%kx`*}6tdwjgCi7046ss}&aOGmv#S)_ zjcqS6_f7&SU{kK;iIr_71|u^AQyaS8vaFesO)kX@)YPJ@=j^besX-Z=>R63OMXaw+ zLyhlBNQJ}_e$LJ^cEPol|9MEqSwSH?Kc+D);6X@G@`8-k0N^8%>5hI@C-~{(_a3jo zgS-Q0xJiCn5g?o|)nsFp764mwTL8DWKEQeC)HwDxI-reZyWawoE)(%$Vpqd z>n?*}4L`V)?A+>uFKVMWncT zCJ{vuRJ2_x&pG9~Ym0MXk7S*U1={iP!u)|}SChFEFzfT_ClJSbUEoS>o|Gm4CaERl zC8^?J^Q(w6{Kro?6;$00!Q^bZOuFubgu!NCGgo2Kg^2+(iv|`J;2BtnPIvX9?Zxh} z;J~#Xh=rDU+Pah_lm@)`4&j_phzFAr=v{HHp~w2dMN?izTtLRBv)+Gj?~}0XjCF6Q zIq5~V!D2<}%uAT7J>k-*kf|kF^|l9%q6XT)ya|+HP(x_DE*iamEm_ej)a7A6&e>TP z<*%iZYH&MI?wzkZ1$bDJeSOXke5mMXeRQOZzUOesZRpK@{&BpdbcMv8n$o%1R#Kc$ z^{@a3!T8nfG~XA`bGmaj@UH_E#&`s-H+w`I#QKm?230T3W{tcfDtfP$!!l&r<6*pH zcAD`0^aSyIxutR+m)`7X;wwt?YHl-t0^4P?-H zL{pl>s@iOlYqKkNWcb*5!j#_CmK>0VeSO&2XkSt(V)TxBNSKoU!VP=wsp{89@;DsP z-u)4XIJ?xq(;U{!=BZu%SSZp7#AL}JG1;J-YO(fa6juRXB zTi^nBj8pNQE5}(u>sbvz2<=1#2Nc-#UKn@jludJ^w^ZXTPzuArC-k1R-V&_Gk5Ibl zn2&Byc}esrE9~P8l!Z(`r?5)DPGy=IZSaXihA)Y#b3Mhq6W$*7241WT_S$V#t=hiC z>4Wykc%xO?dx>-m!-=99!u$`jOP_&8#WZAuGd5bLnEktyNNUafJWdRkUC!Rw+(C2- ztJgHNQ7NgDtMnT&h%t*2oIyw_zfP(nGR8fGDP!R}phvV_mFh*z+A^4K;0*S^;qN9Z>ESoGT?E#Ij(irm89XgaMByRk=snDsQa6b zQfrb(C(koBLpDW?Ko@c|969;Me`t7Nbl=*5(-ZDMb`aNaEiCplhWyGZ;9A6D&UD2L zIktbmu6Lm~yKlbrKs$qC2R)@0?2+Z{lsnV8=_Nz}8(uYq=%Mf%7J(s_ep6a9D-D7W zOB9^N)cORQ`{u_Hs@FX$4rgW*tHpZDOlL{@fz_6G=NM%06HcQA3>KA;1<`zj9J%Gi z{=1;3=kXz%D>h?@i-CZ(Gz7+IWlO0=8T)KT{XAT+Ae?!~h!D{Z`^wS?QHP zOG0|t1J=C2^Dw&4Xj@r7H_m{y>A_W*LHQ%kx2Ya(@uA(|D$}FnWY?->_yptHz+?gN z?9rWzrq_xzz#Jt(J+mLy!yWpbK^mi1vXzZ+mcy6aAO_SH-5D^hmRrP>9)F>QG@vUYO{)kt*YwDfT zt)2edjZct;v-f4V>9;<@co%8Lm%}r7j@BtVIrmGBYd}L47B1h#n@pMx-11g{5tGYCvsyVbTi7^3} zO706MN*Y?elPKwan$xBAXk&XlzDrj`Vs}x~b53ng>qRG+et%(Lt^y>su22^94Qh=s z=VvfOyX3gISbg%rFh!&Ae&`O;ObXPi|ItX<^YuMJhj;3|z@Sg-CoKU$GD5g3 zgi`+0%8~g}aNE-i>l-+?k5MPG2n}~y3rQW~@W$Ky_FHlZS3yeslZ!j$k*LH}>k1zu zX;q+y>(CYG)ggWnl@Ds^ldVEwhCg8)dB$HLam8nW%>0hl3-r^siadlHko58{*7IZR zYZ;CAWBhPjx=j5Ra;U`KeHMW$=4|fM??!I)r^Zw;iggW$K*1j?v0Bm$7y51nsj?dP zWUmgoRJdGH@1MOH>WN6LdC7bD1^d#RLBX&$6T`vU_9yqp{e}lpA64Y^5^e2OaHlmF z^okTKKGQG6r{4o7q~1V$pnp?4HRojIavmm9|AKSKRQ;51i@Y-%LqYF)g<^iB?2(M2 zUUX~3da%v*wLlMsT8#P1(FN z6=Jyau8k|Vq^` zIm+}(8Q~s%Sar_cxI^(8IzN-g$anY731hp$!3NmC$|FVn!p8mc1d{nSA56G0Q*sli zy_U^f5UOVuHa)o-U{HI(HQlJ=ORsH z?J=q8MM!#k_;k*cN!a8N<{=S>w1IgMNUG|=wVk_g3wzX&QhuA)caKaC6g^poF5*c_ zYs0AvtD8Opw>BPCu`TrUXH_uQLNbaYIQ7`gN>WQf=(_CJqF&_p>hB+C#~}>(c+pBT zW_M#;`uoIkRvf_Dj5juBCw1&NCGTFR6_YH{R4+Q4Ep$D^e1$f?Lcs;x-ycs*CL%fT zwqVrIvd2Ve*kgM4S{K=kdx9=Jl>ycLMAMO$BupT;5WHdOu?tGR)ww-1WN4X~Y0&w( zx{B@56>fcjtiqncNe4U2M2c1qH|G>$J_!h0#xg3%^pKIfZLQo3A+ znQc8!a7LUB>`R+C&0=X}A?ZDz6J6!XWkvJI{2Dmd#$aW+D?b?Lw{4y6fEftWGCLTa zEy6cm?h&UwU5!&f30e@O^$O_s90Rug5VUDsc$E z=1X$Bde&D$uu1)?`6$B=M;f`jRu*bq?K}=F93%pLQ4aW{HQv_#H+RtfDwP-~vTjx7 zfJe5gxfkbzeK-PgH6avu7KA&0F{F2OttU(&)_ht=*NIr1>a@!4=-RJR7kP7NkkDlI zp%Tm?j`fs`FT`pjxb!yuam*(wrH<^L9Z!i*x)FfcqxdW5g6d`wQ2VPxI5VdNvZj0n} z+K%9m7M|RS;8s$c+|r6ae}b7J?KbiD$ySntgDw&C>3y_#Z(#3~R)W~+$*qKzdQAe< zAqf80wiZ-T0r&$?(-XXv6)e>5=rdckYpy-|r zT><0E`;xnoAQNw|BmhIYPnS>Aqil)8_*YMAsaCUL+jj`FrEDIwZ?X-2r02jZgtRW5 z6$zDH@f4-O00r1uV)$Yw3krA)y9JGsf0iIoJJglBzSRyD+s*0roD#6zs@)MET1@us zYLd92|Xe!?A?cNX5bK9I3iy&$E*7&Gjk_ zF6~J!!G`o~x)3JS*k#KXzdXq&Bv)7LMe)QWJFf(8YYFOnSA?`+Z&-#!p7CwXeR@XlaWSDKdZQ9Kp=K}4K0KhWO zCS8<*1O^{?EVkEXj+pGWI3?r4VS*8Roko;%5n1aScve|7I{e8zE(}`YrI@0Q;%`>9P4w1i`AR%>ZWDwhtk-tTS^SoF2_u^4Rs2AoYs5J zAZ=M(^xkg^S14$qs9xe|>#&G1h%{;}+vNf92=b;k_q^X+fTzoSB;&1nRpl5QM zmO*iCUDs$5ED0V#g9Jhd!Gi`TSa1lz9fH%igvOoV?$EfqJ3#`$oyHv+*M`Qr`I>W{ zbI$X=_x`z6_g38>r)pExKd9cl)?RDOF~=NhZh&l_tTOF$(Uch78vfq5EO#OCd)r4L z>GKOsmQb)Zo70lu>6FX)4ey;|dl~SRnP#DtG+jG~mG-G_ioHyu<1K7`Wagr4`zW$3 zzwSx%MdvN&-nIvzgpu{Gp&42KuY#Cq*X@*Y+9MT06IgCF_`^3ImERs^faN*#!?)V< z`~q6dtv3rW677-Ob$OUYA;ACY5Xi85q)?;1TrY4~?UsT(D@=EG^ZVNM(bYvVY_&Jl z7#7H+N(%^_bkADesXXs9Iq!}40H=N+<>%zE<|i~Kwv9=rIA8K=mWXaY%ASw&)Corf zh?lo&ErkuGEHf1Yn#Rt8$eHJ?%J+)zptr@~Cs$|wb&#g>r|x0t%nt4{3Jm~5CkU(i z)cThru5H*g@9``|civtE!9&;6e5Yntxv2AtnSA=#Zl8dDgmLXS&oBT5v{Z zqGRbPI4MlKxa7Qi_d7k?p*aik9xkw3vfO*cn|B3~zk>9R+5;!w02x*LIV2qB7g_Vb zZ?`i2_ABjAHn}#me7CG|J^yHV&d!Jjb_^cqZRKqJ_NAMKa%@G~9eGG@T{r`XKfU1D zMh3xggY8N`KULSd+WtV=EQF~Jx~&-_eL><^h%L|4*zaXAIGVz!LH9$hNkGFJ95Qc8 zLv!b&h%%WK_q-La1t|c{Uu6*{Cp*`i@YHzoG+DL5!T=UEpMYCesvBQ3I6&KOW#gsu z+4H&cWq0Hr`-S`1nDv&9qy3Q{6s6FHBWahgv+s5T-~c_a+Fo66>#oO?R66F&q)QEz z1yH_nKDRee#YbyBWk4z7C13^YmW7?u7A$Ok8d?^@BuyVUCXQb(a8*9mGjbcybX)V< zQz9bH)Ww`3!Zc~PTxge1Lxo*!{vg8CU$cFreA~Qn+y%N&5O!4-r9o;i7!%1OJ)E-% zzm+bly|NH#MIYF0n-qUT7+X3pP0Fva8YUOUValFw$?oJPnlRw{f0yUz)(>+9PuFU% z=oEqvfn76GPeKX<&9v7x!CQO{gs=8O9(OKi@8A*!x#1pT z(;>1=REGyST$jC|69Bqts|1SM&+*p`zibt@25t@%h7aDX-wcjf(*GOlt=s;tz$ndS3;(*MA#k=b288}>x!G`#mILZseJbr3BsQ+pk5d@j? zM%BT$a~{ec-WGIx1*V&to0ss@^61uRNqFj>!t&7{N%-QO3mXt}X8}s)w$yg5!^jES z=sd|jh12PJjE!WaJu0x6WNXqTwU7=acWyhYiRS=_I2rY!?Qh!>Vkb{$s1quXX}23J zNr2gaw;c6IC0^SzpF^QwrjU*lknI47YFm!JU03lmbWBTDGCrRPO`nG@yu?m)nkV2G zT851s^b0aec#4QT5^y9N=DCw{P2F}QsYw&u5g{upX_Vtjo=8stdH!AwOJmM**=|MC zmUrH_@Tx|X#Wj_VwVW*z@|cK&r<_yc<_JLTIKL33n_o~~J8Dmg!3$^pAukzl z6)7CEb%mY}9 zlyR~KrX9%;dyiT912(Wu4#)TP10rSs5Ws-~ecl??y_bk=S2K`dRDW&(kB6zXkbBN> z57`~6wySl|MvLba$gBc}qpW0}j_k|TR~CZvGP^5!FOs&c>R?({H~WU&^If`WQ{EDx zYbZy&ypmDVFjkleaIQwg-i#aqa?Bz-9mH<7D#||fCVoGPIyIO1R_h}GTvtlggy^1N zstB3R)N`6Q)8yTkchF|P%Llp79zKWPUS{Lgb$!L+5~&5bj&V4>qglq&PUh4IKeu*v z+iPR4wY)X%H!u_--uaODj{_X9z{#6?&p~#IHcMq|t5VD9e8|{z99J~3oz$)qzm8nD zzxKup5z^<#wP>{G_iM7N<`7Ri9A}3g*>S_EU~QXcH`2y=;Y^-4HXVoS`-NF=|I-0x z>Z3MT)GK7jNpWyiVqP4;vM3Zul%OTUfg=6O^&(60JUPw9@{O+^DtJkB?!^m&8n0I$qAc&MV8C_yxg^s-0F7)6yRHAi4rg9|&Au+S5&< zpPd~qck@-41hCzH$`aXWw5;&}o&jLNIB4W#b>)-Sb}^1cpxNMw1P{!Fsgo>&TNnuIAq}!y?_bcz9H1{XuWsSWrcYHpCW!b7na6|iu2n^{^^RblZ zHVOtfQ0}b9yp+R!K%!BuP8j}C85(`K)_KYiPdLTKnBpPWJo0$8F2TP23$tqYrc!Mp z6)n;#E;7U=dp_zOFuw;&STrdRduN`f^ZugQj7>&I(Kyr5;2-#aF=zBY$%Ne1J zHhOPP>NwS0Dl{%9yz)$Lo}Pa_lf(_ezs0G#(==u)K~YwuOi$Jo={he1x23qya%z0L z$s5m2Na(=WLs{0m%IVKoX5r9(Vsv`Gw(O}2IlZ4Fn&cZm-M5C{N=RU&DudzHep2~ul?4r5^xPmsl11#K$FXQ z?(;$W2)X%s7}r;)h0A5dtN47$egWRJrlouY%lWobCeL+{$iN1*AP(|urZt|Y@aAR_ z(B+z)X@Ey76;|ly*f*YQ4xN#RMN079C$*~DB~D9)WiIWaQ(fHw+61%fiNXvAoL+Zn zZ#gS}T^0}ki<|`$4SJlLb*Ci(I{6Nb-%kQrviN^o7p9wv$^v&d?9);cs+>>OXv}|C z+{6+%rqpfKs_GidrC&6Ib;c5J$s_vkhVyvwgNP>h0F&e-QeeSq_r4n9-q!_5R#- zzb%CI?uRm+xQPJzw`q92{d4Tct=*A8cq#0~`#?fRD~tDc4J@)49AzUA^^TWWQ?|GQ zRwSCHqbnar6U9x&a}spGFZYC5%F8COW2tcj72qe34;)gu_IG{~LnqvxIn?h99#Qm) z$M5tx3Ld9$d1eA{7I7U>p!#e?)QHg_9z^2x(=m~^spr@Gk zp@d5CXA6$Lz~7?L9#+EGug`C?*no@ASi%Y^Z_TbJP7hpzVg%N60Dj8^3?Y#%Th|;d zeCUFTa@WC;PT8l@k)`aLQ~H7X)_(hCP;!amC$j^LI%11g&U(0A* z9AW$fN*Wu5FvjO>)7qV9=1D4@Mm3}xX|y7?T(_uEVxyP>9ZWIp_P=BAN{320>-)Ui zxKh<|?S0%jR{s_!bYv=Pl^R#@Hxz`?D$(6eCP!x-g@|Xqqor+qvqRsFfjw2MI)d-k z;JPy1 zkeMeOH4vum%^~|}Zn!CY*=4(BUc%7kshK|V{^$aO%v}lIS{aRq{~{|BYOAGX^b&$w zYKObS;J8b@Y(|U;{08ke(eX)mT@wBr@6sgIU)0~yk)EzlS6po-D8;2mf!!z#Z!E_f zypvKNQmhI&0NS%HJ@s!c62A%MU)_6&$*kz#ks`e|*wgLvLf24~p1Te`C6 z>TOiIqzE&qVcXoN{5Ne`3zidMGP%HR_veEp6+3KRxZ`Bn4yrU%fp1NSK?hbVUjkrr07lmKYrgu%D ztQ+sN98Ank_%@DGgd*Fh=ZrHyg3m5gAv)5}<~I5Le1%$mc1Z+AV{H>Kq9DU3`43>d zsd$F>yGoXDDjiR&05)HJmgcnk#5nI+&Ei>MbGGb){0Qo0X8H-U9Pc0wt?^FGl<=+l z2UCmPT&p3hfXBQ74SdiQc+JY0tm9%>eNykq$QIRzX~_HAPSNI8vw9KVIiA5#N}xA506Uzm+U?WlH}Kc8^1Y$)CVs=mgtQF8Bd@V6hcATk-P10y z=cQap0hcUg2GrYacI29x4AcObR|(L1s&TYKP&s|M+y03BHZ`dmq}BMXSogrkA$cwX z`I0N- z5Ubup89X5^U7UYjQnz*-Dlsjn253W z)`t*$HSKVV#FX+xxTWWmSNI>&>S~XKn~of-zw?oA{rY|BC>nmTsO_0}Qb2+P?&J1# zKpUr$r=Xphx?#Hre3N&-xlCIZQvfjR#O-r&5+x`w4VFIKS_D}@?lNvr$#ChJH%L6& z{8yfLo+I1+x>ToxELSk5pWiw$zI^NbS;Ls1Xyy4xh@E3y`s~%OzH)lvkgAW+{b^-= z@lPcsG>)`4ZZmDM`R$wP7meeEjF&a*f(#T_T|`D;=BJ@I;b+Uecf12UsqF^_Ar^30 zt?Gy4c2)SMeI!{*lH1Qf=Q#I;GT+%=);gD~Uyu~%6hX`Ho`&b>Zk7fiii&MnfB@W& z-LJ#tC)q64hy|`U?D@}5-`Go`f)dfRRN`#nT#xr7=7W*vBnK$fZS@Xn!O;?DbrU9o z9}IsH$tSQXPEuQ@bDA`c4r3y?13(QYSncng3*4;jeKi7p{T@B}ao!+z$L5Ii5>h#TQAu4t{3-u!T?00)P=^UOD76~l?VTFK>e*oVE$v~q4goP; zkSI>4o#32PUnZm^kY}F!-pAl?O$Tf z5ItUtqXRH?eNx?-Fff~2I1z`iRa~{02;?+`w}9Vig3WVRu2v8V?lPqKK6%-j5TdH% z=B@7Y8bY;)QPzPKjWDufV6JqkmXWu$3MzkKJ*KY%75?(UVIdA_N- z{AF9FJ!5ees4_F>y;WouwR+7l&B2B>vomD%B6FRGBIWEH9EgIB%k{|M$#NaAF6kZN zv?8GsAenuuiZm`io5ULtP~=eeY3AzG!a4ctybgGval70=Q{jX{mnM zJ*&o9;=C;U_9*M&awYs@-ugrvr>U1-QaYS-xdlH~VE@JrbWcM6zJim4;sH$s7k)OK zfeDH3;^=z{?GmjN2YX1C6ml-1BRQwkX@_uIC4t>DqRi-IXygu0nOY#Je4nF$xi{+>s<>h zWs0!4vza-74Zd4exzISp?N^7UpZsTzn{g3vly%eUY(ew^sZXenA3{IDy=y&Z9sWX* zwVV-TMz~s^Ko%!#$6%NyuZus9JXDN{uzM%j#AkZfDhtY_Zw-7RFcuhW@y$#&=;>Uo zz`&qQsDP572)>(7dn;c<>;d8w-wi-G2{OcN^XY$H={x%~Ae_k1k38|~${N$bphVXq z@Pf@u*^dJ6=bTLSw#fNeWE%@FKUHcOyRrncqD4FI7{sy1jJ6n1zckJuXekIoF8imt zeCy1>m#OvV5_5*7m@~nKg?%9Ax9`OCiOtVU`vk1@a_LB#byl*H5+$(F7;_FwQv$+{Ge%nd# z-)`>P!JbUEDAC90oA13O=w9ZS*6hWk?JB9_KwWLyp|GC! z}lb81=> zU=^>}L!=UY`n5Fcl<5Zfw@YgX_4}YEjLYS z!F39KtHo29TFut9!CnA@Wx$nUgwBzxa+9L`tQR2mSRkr?zv`vY#~uzva824WNIM?g zHhwR=|C3-fr9Sh%zrV;~AV4jR={=tTtz24{hf^btqpzpKZ1_u_6&96ulB6;&L{4 zcXb9y_z2Ib3}9DdE+6Sz{D7J=XrY~@dJK0 zB6!D8dB?0-S0akoV;+#wx;rP7qba)^NML3@xdW%+&`2Wor1T}{?wZEG_P{UC_MhE& z65~F>4J!3Z_}I#rf{!h&kv?C$ky5zK3-1#?zS^i}mL>sBEoo)9kB{%H zIDZ7xT@Q>zhZAZ8hIOY~<@=c15ys(r!5Tl(v)VVh#UY9IuuzW*QQT*nS|Q8a;m1EI zZr6FZ{m#RikIsgFkX;@)oN&+7M=M zcQ1&;g_5>n^O$-7ifVOTY?}tTdyKWS;BqDnTx9vlXHtB|#B_Dhyh8)DDc`urge4mW zf$znrK-mmd_f9!$Ftn}l+iKt3ikQiO}f!G)W=64_}!TT=uH2LW+WsMit9==j|2_Aub+_y_7 z>2Sig?N*nwCw^`PCbv*s^L~`RyT|u!1@y*+lUwwc!EzKvR>LTNr<3;|6mn3jEGNWj zX}ul;0!9Sxnl?&Q(qxfXsSSOeF%)UNxY%Z$>XdjV090s@xqH+ZjPNQ3o^!2=em^&- zp&4-0jauZ&G`X38>tn9+_htfqaH4ij@%n=J8&^6-BS9st;eRpzkTQ6c4 z6TA@q5)OhLsf|YS`gQKJ5zpf@Ab6xsEi(rL$PBWAw1;E&o{u$ShkG#kIf~`~l{{v9 z;+$AEUtto&Z!5dDIJ||UAUN&Y;ARb~ZSJ^E+`i)`sWoAzpK&`~;p_*ch+=U!$cb(q(D(I@moq}N=RuxVLs0FtWNvtc@DA=sR9-m? zjB%{k8>-*0o``Ed^rk&w>uUZf z7`_c$d)Iswn^1Oh8DXRzC<6QrgGWdolZ$HXd+w#t6x27S5`-t{xnU!qQobJzIXnUo ztduid4ci0da2Vp}<*VIGn|n#?!QstZ%F$LRF8E;T^7P7d!v5VUWQ+!$N5B{ z`vzr%ewbp*gtpb7(-gvL!)hpgy=cPgxe)GpT=~0F`Hlkt%XilW^PywTk-3Z1%sZXF8|bbOr9o*S>3?heBEx4rW6Jz~`igSa9&e zI-DCluV6&FRnpQ<`VSh02l|tS&0oIG7^y*xWo+vfKm&D9DSRavM>vZmzHysvx_$F8 zK8|dG%?u6urOnrZTs!0ml6?F3!`uA|iG((1ZHP`KEDkBB-l4X~=mGe|r%WD4$#i1@6ebC|P#Lb5rp2luA zs6YHFxPL-1OvtZ~{I-U-(eui&jy7F99-`PN}vW9`>b%?SXL%$ZX!BrNcFezOVf`*vUUg_nqH9vvFV0FlW59POU)Y4^+dJ!o8}i*C(Wq zkV3xAV8v|IoknYB&VU%-t!&#tKahFH!)4Pj$G*GzAd+*`nbw^xIsxuZe07n(s$+Id z{-#8DX984rqF5^!Arz_SsSxZAoywJL7G8}yMh8sfNzXXu!ZL1Fxvz&QorD&$?s{Tv zd6s^ywk*PEeB@Jm18=s=V;op_a=~`pS;xMYVjthbc`o2KY0@@$w7MeiUT<0jGA*`x ztO$#U1CNLomPgnD6z0k7Mlg&ec0JQ4*X@=VtU*xT8#K^GE#j7zGizKsQ4h_%8V_~W z-MJ!U`635}sUv)WV!i^JkXJrBvUuoQb3{=?@$2pTA+$Ip#JyhbZPF=<{=J==Sugsj z%(Wix2BD0=Yo9VCi^~cXRj0cmG2|&@ed$M`)t$RD@{;t*@}QxsdH3EsQMmp(wIKh; z4EFgGe6-E85+~yLn!%c`-6Ak*F57A}Cq)pMYe*mW&t5?~jPW|g@qkOEk;GJRVyw0Y z4I`35mG${HwR(SeZbontYRgtpMYcv_R5H1my&n*lP`8+KqdvWAbCgu=af)jFPI?Il z5mEB2?DDtm(TCk8+TUp_I|DVNA}M`O)DxYd6?*0P9h zBRlBZ@-0D$>QWTZtri`ti-&$KR?z?{xS=?`{TjP6p}RyWznXYJ86J;|cY<}n%N10u z1*&ty=dF^}%nS6VHM?g}E%ZiNZzmNky%~1B5KVD>xb=UA!ZFmUqWmDTS@Gp0fhww> zG>g>6LbdYE@rLyyy|d1{w$B-?CtJ)!o)g@PztU5WBCB6k(VwO74r`S`i#Kg5 zv2gXzogiS;$x4vE1QjXU!#~*OVqUjQ5OuEFIi<^0X+<$WGMi-|B`9@9Qz8BGWxGqo z?TsAV&gRQf2Zf+&joKn9=Lf}^UA+ZJZaJOW8?BI^?pD&5ALPkUZEqCOrc+VgaOqYH z(vib=ao%;Fn7YKE80ZVBhqj6zv>&BEm42|9!qqt*NQig%5A-2n@Gtapa(x6aZjR7H zR($dnbH%90OW_ww+Kva4D*8VJY|VBLk?E)D+3bNyP5Y?FlGS`^EIHAnK3t9Yj%%TY z=9mGj?f%0`cVB-lSgq*r_2LEj2g{l#oO5!f_13!17P%Nnu{xC^`exEKv@7q;C7qk$ zcKeEfNi?dJw^iJHY#zPp(@Z4*+KW+IpH1n?mi*Sx?9}=R-*)#TYu9<5U}9J!wDWB6 zi@PbCc~ttGY1iD_;hge*Pu(Xet*^=$cEal#1>wrynj$MUs~M3ZnC`fCXP$rW?&=SD zSCvXY{-Or9hNVS#^PKVp^@&UZ^Wp~|kytax{eSzZtYjV|szJ5WW-q@jS~sO%AJl)83R#uC3)5nj zZpsEdvxSXsa81^7;lxhtep&4GH!1{!!P$3vXzqwU6CD=m#zPfrliWD9-A(Y43Rugz zx)I&wbl*5^O2eYOaBCOA2VKq73hxKMv&XZ3PVUj!%;i)ex|4}`D)EE5y05Ir3QW2bP8TJapunSJAX{u${b5epy5V%bTTI6o%{EYx zt>v?~xc)`?ZWJY!;0Q8VxE`Bt*5Af`q!j9_e=Fwsz^L)o|smIPaY(oWQL!i zK7C8INHB{OwujVo1vY7R8O^lf9l8BbPEIE6nF~|S6A54S=#BQw0>Ei9qz_ImkqfB^ zgI2(3n7U12WY%eqvGs%&87yu{UC0d?_ja#8tm)l~$R}Pu2-Raayscv#YCWE}iV+QK zOtTwj-Gz$N7KL#w2$?D7_eq;dbm-4s3^X|@%v-klYh?8b7!AIFlr^{l+7p5A>mI=2LsNJm1CR+QTLb`W!JW*rig;#usIL>85q# zvePXeC&sBLVCu8f4%<}nlz3M83ZC*I^&({1N=7aF^wZqwFO<_Gxw#Mg@A$T9KWji9 z7cWzW;eyw%o`b#*``uorClq$!EG>r$UxR-9QHtMML z6>bbeCQ@~i2bBq8;#KsfA6Z@r<|vtWVOu2O=ojYjv;YIEp1FNG&TS`Bsb6Frf4=I% zb#jpLa9kmTXYZgY+w4K+*)}!*_e>KU_OvE}BO?!~ULq3UA9e1ehvlDida50NdZ@6M z=(?#phcB`GJXLfe46DsENzDr9-lt?Mct`>4W5{hUfstq$mA~irK_@9Y4-xmh#ULAb z&!35ZwREEti&&F_XYk@=FP!vFsve904*9uAB!J=sxe0~GJn24`dKoxrMtHh>~}J#Ng7nOr)ROaw=ABU8TZrb5rak$|*Pz7WK4QYxSL^*I_}*;b;JQq^chwaiUm+)s5kg;hRtNQ_!Zp zb#l(4d=p9_@j1pgl6Co_<sRJfq`>L<+kGa<=a_=FE9lrp!rw+=y>~NPRAbNmH{cb_c$xNB7)Fz7b9de=MfcFc#vQ{ z7uL`GT9_q&JZGQ+df3FAjt>^Pq*{BNAo$T3Q@|Dr&UL9rN>LT~(QP9H7iC)0I_-l2 zRwEAFVUhX3R1~CmMY7ptVciD#L8y=NsoW|-C!=o0jg55&u>K)zS41`@k1}0ca8_GG zhuDOl zfS@nZ?thQI-p;&U92WzhQC>bzfT}BwOt4`=l#S=~86(=hyW}-!hGwq%=7VeS_y+YE ziY@Z(Yw_Hu*BPyV38ycpTHFTO8jh6sQ8Gxr_dY(~ny2vN58jZO@2 z%j*`m1of@H>U`F0Yf#Ijo6uhE=#AHP`^aINM!i3KLJq$TDOU!0_ztO^ExAs!?bl5{ zmmiDzBcpml9jUAvGZWQplBx7ub0Oe)#tK`i{IGh08e6YOD8+vRfno%0+85r#hO{fG zXPp9rUS_QTa z%j96J;#OW83L^Xo`}U|_k7_7Wpj;4=2ox!dIsSO84Mrs!_`vTCW?;NXy}YzHYU`;# zieNq_0Rerk3`U398a(D|C?=0xdoiB6zTO6u@lx;Y2B4C-nzpQkh+jP-G{eJ|ba>q5 zB*2NU;`aIuS!^hpKPR01aH%hdayWr30_@`PcRgtP9*}a<74f#T<@_+PPW6oA01c9M z;NEBgl8NcD!jRZ8dz+&c(_}s5&Nck;S$A<6EOz#$HU^8lbZ{6-fMdR&fxZw@!Z3`N%yd_^B+3? z!Q!5wTDLuA;+5pzhf{7rK}8nisP&0#=K6a-#JGolFnaCTxF2@Ss)rNMrk2@XKbn(o z-vE!8jZ>e4U|!$<1#~l`m5c@}&qzgOQ!KIaH>{`A8*XpPWqjrjCH z{tOelU+LIo{6u2cqyK2Cep!B`Bb+ZnJ(tBbsr`rnR6`o4YiC7%Oug(t9Ji!hFY>A- z^H+@8OAI5ggHaPF9|E*pque1WdXEm;+fR7M4TsWkNs$M8mAFwP{UDy}vMNA$ z%zD7oWDq}1iCsj_NYkrgrAl06o$+fqdUKwxqs{L{T zo^#`0cIb|uvM*OLzdw>nJYh&|#i#f*%O;3)!?0#P79i>j{h{dOYb?qn+CiJn_+DW2 z?Vf`NeeLoR5UqAfO3=K+(FWn53ar`J4P|p`%m_>8Gr>5@e$^;oK8Sr3&|Ld&N{}F6 zFN%<7uR?#}WlUXA*I;SHt~#lqbukXcoMR$_2z8-x<|2Q3-mo)bUbN7IcFwS@)p1HHPeW+dr_bKkF z$~ce2Wc9jl@(&=v@_bhD${W1yBk3O+;cQ$f|%%1^ePGDW{ z()DE-P__I&`_|iq^?T!V7EZHKq)UBdFZ_zNJjnAG#jevzD?ESf)!la;LY4gA|l>!Gt zN!uV;Ji}EN16x}@LCo}`lf@!kVXWb6s6JoEfnrJP|R9g1|LbwPqh=lId*0mMV} zMvOYiiA49(R%?jaV6YGKK4m&VU_(;ouixzmo?e?@;E|NwJ0}sg_n=M!o^0chbi;94 zp8S)NZ8s5MplmFmFpj>cfW%zS2a4iy9qs%OvcV4)(s!34m^~)Rn?E_hX#`G?tQqNK zuC*N8$|1)8(h46uc|m&bW^;1w+^PuH_%*nqqB*h>@o=i*y3JjCys23$C5J4)Vl;u} ztQRzyf1N$K?7(Xjr(Cq{bVl%$Z9tz+zj^U3ERs?5{pINMPRtroi%B)rgT{HLrw38G zo++gm*q&@~0>|@=hR0g0GyZL{Mooa~EfR}@gY%DO*%Gz+KG*zs_aUf=60u(Q{ETVQDTNCEM>>l!2ei_+ua3{OFH%IN-!FO);N>FkR8#AIJ zxO#|IyGQb<%r*OH%Y@PG=lI%oU`d`T+niZ%lkt`Sbih5^#9LHHOBc3jReTe13kNhH z2pQ`-24%OUKec4(60HrDItW%Q3ywd%9u>ISmu4i=HMs6L?zp&^2N)Vt2fQORV|u5% zPP}i2aYPPf1=Oiqwa4A5pSgpB(yDxlt6%uhZcl`!Hyq{n&%IEx*t{gq-pu} z;MEy5O#TB8gGO||2`D5uja!rVaJ0O2D&A2>{US;y%3`VIDvz(Q{wzFiS+fCcR`#^y zbC=|R3~$L*UN!p_<#yuRy=VgA|C@- z#acq7_O6Nxcv7sgOmZd-^ypi~XT#nzZ+b}9BlXW)n7CU8jD>W6RweBbFCrpBzIpaj)dFH zo#V!q7o35M)J4d>HUBI!6aA@@HX8j;kDQ}&s6AizFaz?E5M+&tb9eP_vFqoxUtr~jel>eNy}NXC3Vx*+>D1Ns_;$>M2WPgc zJ=h|3r_a5%sm;YV;y{l;k*HPF4^Ck9z-YYCoj104Cc@u?BOKpXcU?X+<_*Xw6}=1D zqm1s|c#;C1%^6htsznM-NELdGe8@iXcQSi@plH>Qf9bQz3Q4bZE@^_=^uYR)XSze&3wA&GZ#@YRE{=SO1(m;%%ch*VXOT0~h1>i|SU5miC8QJ%Sr-}ts$R9LSy?*(B` z*xOcU(e9$N0+q+H+fbbLB(i^z%BrEv50s{7+6qCA+^(U|3g|Aiemr|c;r}1A4$>a@ zhi`m3PBQ=PNK01T3%9qFATdb8fcP}A(7tkBs9zE<#)817R^$O#8H=mytwis*;C+NCrYTGXk^~hDT$t&Nx z-I)uky}8!W?!_dR*ZzwJx`RU^6w5lD%$<_z9~W)g9~~=$+7k= zJ4lYUB}-bUalH_1Q;qCcgWY222Ru8#ZJD=j5N32lzMr4J(Nb$cA1=XUD9>pXyW-c37j(v zJQ&1Ew{C&AUFm(5(^HPYoJ%XMo0bMOU1xPu)c+Fpx@zEF`;n>j5jhEsc0Kf<;QOcl z-sgVPyt^z3N`&zLXOU(c|0#Z*cK^@yfYJD$k=KRRIP6fESEaAoBP}_FNH+kFAwgFE z*&I8#vmgGa@}h)~F{a2ZGQ!PcJ}4qyK;kg+Jc9QV2mktu+94Iqg7?Zrs4n#2^*9dx zIQp+7wEvyv-9PT9RLIAT=3VABuP09HMj7l8fh-en9)|k92>-)(UNHR$1L-wYJChrYs@?HE zBdC+w21BR`q~(96@~Xsa&*E7$D{P{YIo3YBs?^zPHJ>Wlbh_gXptlDGcLjR`jk?4IG?T2WMN;mD)fFPx~mKJoBpsolCypB`JD?E zdL$$7ZTj8kA4lJx19_VMVBP_ZLJW)YiRd#ufpAe#+<(G>f(@!#AbrC-eXqYqji8BC z<`>(4thPJpCwgT~cPaC8hGtsErtxI=&u}&PS<)~Np^ciDuJ!eM=>NW5urKmj~gwciI2f?Go>AH~3w@;P2bT_TOWfADyH5r>xt0B$DavKeKq-u<+e+_BRho z5WjIwPNm2l6LN4!SC{yI_WR%8h}xHKld(R_%rOUil-$Y13e*X?oNKf#HFMCM{XBE zOlP(~+8**?A|~W%#Gm5T-|;U}$QDm8rR;w9zwu^yG}x=}@ICyWX?_~>V9&;%m>KKk zU%x=eiGontT_W*I{@XYGp3)sAk9wFYjeQ2fR1?BV*t}*8#idAip+{6dp&MD9hJVk+ ze?BO@y5|Dn-wobLQidX9I}us>-z$&OyYtOY9xeSIOcAlar@jNyABq0%QU)*Nue)Fu z9*30He3Gq9m=5;~obQ+Q+9|Z_6$GXK^Q-TlcSghgM!E;|mGC}XsQe!sATcVs2M6B#n1#Qw;F##Ix%kUj@dD#v z-A*udI+4_~$J9UFUWo@n{$6sQGgzKf;HV~^bSnHk_1|E&m6Oi@F2nzt$A6b05~KfJ zhX11BUvc>Vzi7BppTHgpmH!)}|DSxQuzU8IeiXVlF+QjHkuG6GNf8OP#s3Ah|DS$2 z<%K!8D2n|%^2ZxJ9!zTf`N|OZ-Zuc_AF%!p30t|39mk5@52A(c>D6aZ=zpE@M3eAx zZ14$rh92}+#97gO^hI8he)NTVvX`ud{JssTEHdnyil^fys7>$^+xPyNNWL2lA*O>8 zNC`a!=H>sdV_1KDKbjR)93HxT_BcmJgse=uT&e_7T4t0eNeKQbMm)28K)JdqcGYzPfjskNAT zYNV7Tk2u}4yzfa-q%H_AQPC}JD$;bOcl_DrT8LDV-*i^#tx|C~FS*q&HCYh8IiitH zC~{tcf0n(|kAF``r-&>PiIO?|Y%@QootA;7u3aIEZ^iA1Yt7QyEfVvT()!3Q_h1hwW__Jn*ZR$umz6hxX7o|D?K2g16hvqQ%asPpNU9YGv}=AptW7e|z-9-#BTc zSNiimT$8pNz%%L`;{1UAn->qpddzg3REMQDQuI5o5W?=&821=hRu^D}MXHo;IsH(o41AXt zwF;%WgBjQTPuGNT(o(&U6@l054Oth&G(Mp-zD3QV(3NZ3>B5+MzjkZk&f6QbRll2d z>ctg`KO{&w#H$6K++GKptr=_nl-gCNH+5x=FPGO9c_^7|x;(z+_3~xcOQa{jPzu(R8;N-N&uX zQ>b4aG)MEek)6&6*icr2S`dOe!;hoOTr~GvF&|4`SxO#nhArb#f$lt^#d>fY`1^5P zoh;UG0_{Q}6F0He5FCJSM!UNHq>%d$2Mr;Bk9B-F_jgM1XTEh0&9d|#}c|A zbx{*OFEZ>CHs!v#)XuP7jg-*f@D9wBRLq`U@rU(Mkk02#8rV4P3wvL)N_QfHxCosR zLSQ=rG`4XK+_ zzPs|kH#n0Xfd7b*MoP89 zw5@qI5p=!|Uo1tpcxaKu#WRThLVII=T}(_7BxEd-eaxA^^w>9kv3uF%m@ammM7iNb zCOqeAlsjzmftH65q@pzKJIH(bQx#06-?>|J!DIjC#?+cIsH+k!Q7~r!sG-D;V}){( zpDcez)R6B1lg-ADISQz=-!^8Y7sp)v4i%8X?U4$HWzbvP@YoR8rSzi2yJEoYBu8v_ zoMQa?VlO@S9#m&N8N|7Awc?Yd9ZAW3Z9?1{(AX789vBIp@~y}$izuQ)NIgIbJCZb` z8TPRmFTPO@s%vp)3hGWk8pqdnFpUA%o2(={dZsh4bjoy3^3whH@V+9VUad|5g8d>U zFoZs*0}uyK)F{$BGpg4mG{30=H9Q?~bP=E0TSPoXluv3wQ1vFzey9DRs|N>sO2DHO z{fcHHD96D`OVA5>xyJhUQEH3*NsG--5ZM3Dl+q()iC|^J;KI$Zwa&2_3h_CGKMLK9 zkBDxz$s_gNx&IEum=DZuSEW3tq;#6RsDzaC=P`_%%y1#Nf4klrz=xTaIScpC~pPh2m-1^!EgGr5iRsU3Gj5VA0?X5eAMbIkU}l{uazmD(kPG7uzws zMH^EsYh5R$fc_mpM|5$}adV~dNAvlRfA1b_R@Z3!)gR$l!118>LXAQZT@}xcPbJ34 ziz&Vps1t`;u$`vRb9GodN60`oiKz7P&cZ|{hveeL)%6AWM9aPt;mT{*M)*pP%J>nZ z^>5}0s@5o>o$%PO@uA?P7OjiEcx2}nBXHTg*E}C|4XOpwGU4qI zLZl&~mZb68fRr~Lsog|hrs~w}7W%ULQFRExGsdbV{H5Lh?mU@Df!ARDc3kRzzZm$p zON#Oz*hYiYu(Bs}rj~^=fNAhZtOO%ZO`-Smjrf1ht?vYzs_p>8&iD6^l9?r^L$83p zGsU1BMegec(l&S&o!|bhy;hr}ZF{9oOU%HR%U$eB@1ajgRr6ex-OuxP#ZAmrJzL82 z#P&AN4bpV(`=?mcp)<@-GdfU4z<^3X zuB-Cp>3uKLH@eDkbAhmn*}XdK9>~=OyuTkZuRloj&{kx0tv0ZxlkEXK)HVN9Hr^Fe ze{d^%^%2RPoi}$Tcv6p^*)R`DbeO0>e)rO5Eb5nDKIMM?KxTd3`$%urr2%3?!00Fd zDr>vP**W2~;4hG7E9@<}?pvuc8;mzbqZ zfXVjYJ}Kx@-ek0*VOmk-n{}2jLqO0DX_mT!PssM%SBR|mLQW?I|BnP}u=`MQRr^8w zwyVij4Rgc%7oqX~=@#R`48b;*s{WsZj3+hoC|LGSLdLK8@$B!zN|b*7Cn2xC zSp4sVtoSz}C;mSsWZ(QEa&G(|w=P_25hb2*14XCiErqY4xzs~Nyq@j$5c>}nq@lZu$n>OV6cj(xbjngpgvMOR;HtTN zD%@u6#!f(O0pKG1(C;8{HU#FHw}+Cyl@pmJPDU)?GCz$UU0rUK^{!dfQgqDBGvocN zeZQE(&e9<}wAvWM+i_V~>HyML25q+2FQH5O#mg+?#Z*;A8af7Z@-_z2tI}O;t%Kdb zqj;RJQ8dD;#uRyImU>72%{=%8hoeGEP&C`&W>X@C@l&YoLfXt0j zgU=BgLMkadI{em`w$_WK1`b!%MyaBaM^eWvTcs(13&i&Dr|zK5Th`yM42s%Hb;>pm zZENLyJ#%u}j;OM&>*p+Lr`D;vJ{-C%cl8vOm)9&b%gFSL=4>rp43ze^y`308`u+W znSO4&YP~zyRE&9ToOu%WDPIxa8Uf378MTOeh%AkRER80cJG*0JevrIHI+H0vptwD}L<%cmZhd8s-+1YL|Rk9i+7VBNqQtTU*g5ulL}X zR#kd8V70WVZGTZ~x7ii3R^V z2wGNSsAnc8L%4HK!73sT@xBgCMqD(1`$n=*{-k1NDsKId11@b z$&SMaKc=_6-0y(plp=Jf(ytiWTwd)5iM()X7}T{N=7y(Ro-lH`2A=5zC|{XQLk6C2 zqBpoTfh)O5U}OcR&EWGIm@9O~fVB7mm$0$0r^zI?;hy(`qqW9EA-8fcCN!bVir@9Q zYWe9HtfO>xkNRUR-Q!9biK)FrY>XDMx|qUo$9nwp1GejG0k)qd^L|HKj6vs3GkFU< z+Yv}*n_u^(PP|rDVHtk8{#~!OM=R-fw6=j$Vp@P%j1bg$YgTTzuG>o#ae%Fo9#1D| z>VESGF$V}K3&pY*n`2LJtJE#}Vr-8$#P<=WQY+P=C-O^>_Cl z2l#pG>9Bj1DPMTZb$RjC(L^dHBkj76)o{EJ_7<~aXUCN7iHb| zNm_1hytwmVdjN1@`m=gUr%NVt`Nh|C2GTa;fB{?g!T~=T>mld^`9Dacyk_jA$wpWw zjy8L9eztut1U>Z1vteEt*ok)>BCp5CcFggaySL!^J=B1R(eKdL^OEoQ!Qm->c0?kj zRmt?lK8>S1>@Ze;^s3o`=VSliNv2CtNvE8`rq7KS;*_Cf|J#tmyo3#bk5WHmvg+#| zz0i+2Qk);@2Ej)e8mK?5X`&K=ZJ11`Bu!g z$~bAaqLe^%H`L*CFY$PKoW@BQFOWtnXD>>63^fC8uoJ*4ODkewwJ?a@+5lGMs;jYK zAbEnh-2^lA=Z1+5uI^@EdLw#L&Gh6V zHu4wWc zyRJ|T>Oiuj*-8g!ed#g>bUM<_kr6m9Oc?TfZ3Hk8;)^8hJ1?th)GX@%-i3JNtL+?L zP`XbI)=+Qnt!;4jEj+)Bak)R*+~IEow8<0S)TnSf&$i;!U#D3O?=$#p5_s-kk!EzK zJut#)s_)3(ug%|Lb2gqfciV-Q#0(BICK%5mbRp8I0C&N4G%&S=&lvND1DU^H{%DIr zSfMGNhKbR4hlZDKDp+J+*<`~Rvc4e--NnS*v^`?@3>cXxjj@i-(90>VuQ^PKo)mZM z%ucGsQPXwg&0VdsmzrZ@1sA?&DbXKq=BaaWblA#ibPqggKBea@jX+F;FU zfu#ADQ-1E26!0pJM$1S~UB!NVFGbdEIt*zMoy{}SD|#ZZ^jd2&Yj*jy`$PULA-#qy%=f za6)$>!^ELiWY36cM;9rarBT>5Q4V&BQ#prQRNGxDei^ezd%kw@(#SI^vsc1L9VQ`5 zbFx!z=s5UVR&95*0STkuN)w%H%C4_;M~*#A(hEXLb6XbP4+g`fJ?iD9GR+c0W0;DtqCHh^0q}mml7u3EXL%s7IuhuFjEv+r$T_5K;b$vl z?v)2OKB()j3yyJsqS6Rz(m;3cVVChh)B{CqeMinu_xWtX4(ZOK%I=pQ7OE{2>rnR` zfY=>K#5a66cstp{O3xxnfVQbGs$^ICVk}nz`*41CKQNIsvForZFgV{y?Ii45 zUf>Z=>-I8?`1OQq@TjG|Ma$6#wn<<=o)&r8001%mY%t!jItVVa;~TIzE72M6q;XPH z=ah<`P0Kw&2bY*>a>L)mr2DT9T-%#j&cY&de_HArLTvJDQ)%kE0hHaRgzxRdj(Jaw z1kBHH+sh0gB3cl8--v*c=^#v7!o<2>Y9N{BMn34)?d`a>7GFleD8?vBzeS(S%e!-r z@A>zQuFLceiR`|e0M;KjJhEB^Hcyrf@89?*in`C`-neQyT9#}A`v4-(Bw2FLa2u7- zPxc}mcp9r)Frc4&sc0e)^N~c!D-O~qFtaSCWyF$Eh~*6(bmvR8c?%G?{-ZId*r%AC zcW0Q@$sSGkt^Q}2O>9=};|X1r%OT}YpaW((Oin<*|9y+deluZ0Kg?3IzvE-Igjb%j ztwjvYHO~07q6X{F+UHwj|Q3r*R)G3OcG9F=rgm&M(}(2Rv*i!1lqtzz-8B8 zY^o%YXKhJm^j;m$b1JY;rwAEQKTQ0Nd$v(>q;*3P`|U@4wA;RZ&vQTiLkz#MV*MdD*GHM> z&+u*_`Un;}?*I%g62b)f8j{r7PcT36I z3+E~RE>qu3(Bk}YmCm>dZ%0W438Pbr?@zaccPC5U-JcV5Z0yY0Q<{73`dao%mV+)& z`MKL$l`7gMGkSx3@j=X35*;eSE?)h>X=Fhg>tcZrje#9pR~-GUJR8n=Uto(zbhoPg zT9=qwVxXnS?9`TsVjvVLH8>=pL+=lCQvuvhI-lnx*x!3XC(m#_E%YLd;NZugu<`1k zvMsNc@17{IiBp1CHUW8-S88ez7(v;i8n~{;nigs@@US7>RJO6%l)*Eyd3Gt0yam`3 zJZVU($lY#Bv-=m{T zxz+v!sq?sP$$)Ql3*c}Hl#e>}%z|bqR6p3>?h1i*Tq;~WL$iCLe|+t|GeqxtDq|gl zpl=YaJ^D@FL8e6?@bjy^kz@-e4&PE_y2J9xybYopt3 zC`cCnWpqEg>Muny+aEG@!w@ZTNL)@2nF^6qS*}%5V9Km>vycskw(p%L-Y_4s+aWSK zZAWE6lL7}eE`Q?Rzeke&Rz4WNw$}kM>1B+C@kFdmMg%&-^tsr z77co49W{haOC5Cj8Rq%&0*;~2f823iSQ9ESrw>Ku_>J@TH*5Hy_yA|<{bJw+>1j#5 zIZK2-pK4vIaYOA2B3eGRxzw*Brp|9sd~laO`DXG2#bXfK88D?vs9|2!*rTk>}EKR}58^{r2M( z;6;()pB(c3tozM9M&BiN=08^r=5HOuZqTb5%C~Qhq1|*eQoH+a3@R5UtbQ*sV>W4m zXv?$&V9z3(e=%LTcJsqJ99v~#7p@M~%~KzVMUql!?uVm!w@hyuU%$FAIG20WI(s!# z>25r>E9MgyC?J@8s`ZSU2_%Tc!ZUGGhRP%nW@ayeP*Z%n9R?`dmD zUVJL)SY>5(eS&86v;UN?L`hc|C_F!aW$ZocG*AOrn-8eCyQ#f!s&yXI(Qfo*1azA> zfWY_$8shgCV?5I2`lCHhNwX07H#ha=@ZLXkWZ0YkF%@Y1nX&LYP5j?}0m;(58+jwM z03;zi=NcVLFf1Nv^|VPCkNPT{X~5Tp4aSqL1t^VK@h~+SPjtB~+n8C``h$W* zLh=9v<4G9+RTzk)XQlKLLg6PgZ{*F$6p88Q;4LKLCp+b7P54+`vpqn88}J}2I@$7U z3TT>ccwt(Sr*-8M(KrEggJ(952CfUo(pI9lI(mM@0)9t)I*avIJV53Lyj^SS z(-GMRSx1`|v%qYpJ1A!P@AN6fZKr+6RO+1vNNoP~!~o5#2TCb=6FkE2Zej@o-9 zLd>)yLa#5wEf4QbLEYb+bm~B&*gw>cxwMC9RPWEDpG#sdDTp=%0%WbiQFtiJ+5^Ch zJ}M_-egu|DpnLg-6SkSaF6s=E<$^Qs9dtxfwnf*DJrC=o^0w2nwiX&d5%E2svsUck zDtYJSYhVUbom7EoTDM50a%-u<_H4skY;qQ)O@{Xz-2oCW$8Xunb5*UN_T%zd#KJ;D z?5`d(5P%{a&bJMeg=al1dDH3}@nAv6$tJDtu3nBn8t82QNp%uBYM%mZ$)6YsI3%%7 zQobVVdS*W*jax&SWs)O1Wq`HGw4!e&>vPN17O&4F| zqY#ZIZw$lLfMW%I!qr|PdaC!gO4_{x3d!zckl$)vys_{fieRJPb>qv;mDOy-drAvI z{1-iHU||19F$T2E)CxKa%UesLLb#%fvv1zuoj6ANPp+>x43tk95Q273p;zA)Z0c_w zZQ<5C<(RKCT;6HQ_vE{vCuHWgCcw1#FBnwcO9mYbL0cFr9S~lGET*$+S6&(R@S3{4 z-~@BjtGmWiejH|*ukZ9IC3wvb-0aDo8t+@uMRr5wU|R_Sg@b7Y?N@=#?5Dk|Cr(Df z{Kwy3=WgAsbie=8fZkQ3mXvp@Gv9*VIf-qcpQ#sMak`n`DH!Hs<+1LKbCbakwd}cE zb>8li#_xWm(RQ;i+duG$&Z})c1-~?5UeF@;A?Al=Z|tt(_OQ&!0pGBvp4lg@Lfo^# zb05HEy1ZNAPh=hdK?+@!H~WtuefcLy^QYeE{}q5b{+-=TssHcF#_wdCJ^ox^qYnfx zWpZDDyT1R_x2E3vL^NA=hn<05iKKSz*Im2HV84#reu+XcZ1ak-538ylaT+5?Ds)yA z*w0<;ti`(S%Aav?)v&dQKTUnoLav$5SPI)7^RCofmt)c5*bOtW9=xDYj0uVEsQG-1m- zY_+?Ghs&FJVj?8t;9Z+!)tev0mOKa*4y@RO*4<;$%}I*-emT-cCQy`I4eykn6d>k9{?u5Mege#j2Ejy0%_L?&F zv2RT`nYt$X!G=Rg`+Uq4@OAD!{1gW5NS>~6oik&sMwi^sYzhZTEC%jsdqi?yse`4APxBhe#CARoG&d7JaGFbTdd95ZM z-}eM5;K-K!5`FisTw#2GYbx3eXH%5n@YK9Wt|MykdWz}E&@R}CU-F`0pqJr(QuPI{ zN$6^tUBFsE!7g#f6Nw!$QFcb6@DB@oL}=hjf%SUW=3~y=M21EMq|E6c(($3v5T(}-ejI(W z7rSLPcWZ4%Ddbq*ckS1Oz=)3y2PCUHCenJjEZh7{pEmrSj(*2fu|%=*xe9WMbZ<-I z`T0z0lWp7vAQzibgFYNRGY`QbSuu?2SlXH9tkK@bHEGEN4lz%>LoM);7W*km>075O zXQ^k2wlxSZ@t$*}NVETo$G2aCPpooCo^>?Dwv0Z?_-*oE)S z(9y*1_xNp*wj|!H0H-8r>&b);z=-g?mkakMJI4Si-p2Kvt4N|ch!+CgYoX3OWyKQc zNF+lzzKZ8Q%>u@KZN`_+eK=OD%d7g=DIQM+OBWh^Rsa+Dap_zy#)3~I*Jx12KA?^Z z1z6688*s%dqO{gvYbU{cOL)=!M3T3qGy$x^8%%^bbSDKDIrj1}r~4$?96>*43oTQ9 zI|eRi!272WLq|`&9B*VoUKri7gn>EN*>KpgU7kJ=i!qr7UG2nx4NeiUPw;)|cyf`? z__yvsQ=>O7zX>&^I?y7{#LWC5=fK7JWbXDC*J%Yd{$5QpNI0r5T;wG5w6p%FrKVvx zqmlMBVy1thGu{oWzevvKtrT`;_-DVYiMZ9sXxh+nb-2SJ!qnijb!qSH-{iEq3MSVTT#D4l%zmi{2U& zdN5h%hK_qMcLeu9lS1GZlCbG8myxa24@G9rYI*RIgX@crrlC-u+Tw3H7p*zrKKqi$ z^?{HZ;F5VM@`CuaOY3n?Q?^M%td`5CXNA+&(}gM&B&L9ca-tNV=f9jjQ2y^lMg3Pm z)iZjv=4H4SVmi>~7I@k>OSI6|SHJP*o1t$Heo|jMQk}$-1xw$x5Xb+X321W?a~-LYQklb9LpZ&xMfXaZa%n6 z-3Xo%k8Js7H>YDbwgFS}yRQWuHlnv)F?^--6fJRI$z%7w#G!g*+p${tYP(Z0M<8(R z${{f9QYN6|)z~Ol@MlNbyohrf$4|!{Era?ki^U`cpj&&+_QOAmL;c}@?4=Si1C=oR z&4$`voOsCekLFH^Z1jH@m;8T9p&_K@r5e0e_i?(6`tUVy|1BfmW`IU~`IU>3d{$^$ zrAAnm5kWV_=B!Y#IgN*k3j`p3A4(mvS1f6Tu_GEBN-{Rn z%Zzzmu33>*&aU9564Qkyu9e=@dNZJ2Qy5W8u@t|Ni++u>gG>7a)WQ<|ga8D00jKxI<~pZy>U^h`ywtqc+%6rsF1W+_@Yy4os@fQq zIUL)8JGg3{h~lu4IxSxU4`s4>w`hp^6Vlrg0GBMysq2JKQN1znF+1XDUs;$oM}Gcf znQ@)tyUb^jBTR{wF}H)EzAsKVyLkS~t_+_^yKbACAKi>Ytoc-Mo?(WTXN({CCewdA zF@D_tMGCc@@8#aUQ88!PElpO|8jFB%-vj5nPtQ?qadE8%-3Pm#q*7N?iXiqt z{XI{*kL(3Fxf@q^8D8?6`Y@ML@X1hzk z-zu>1>jYh*mjY-mLU3w85^5|jz&t&c zTC}=Bmstr}R9gP;Yut7pgO4E%gFWN54zy2AcWSDwJ_i0k+O+U6?+>G*hG2`f@xqR) zO(3^{x8Q>U_)+1UYz?}t(SH5;_%~OR?8QrLjYmMz$`_!il#Q<0?CN7}F7jXCqPRi~uT|Id49RZGUs( z11h<))aKhe`kpZ94 z;J>8hp<(KO{9pf<#>Wp%!+$5vW*Yn$Io3#iFk>nH{P;0bh=z-TE(v>0A!HO0`#J6l z@2z^OFliXVS*hMb^}WlHOqK>$#;r;!ILAHRu*4B(k!1ozz&e}TN|LB5(ByP+**=1$K zh|d)cQ4xnuNmK3^_FOxy0B~KNg@#0z^>GreiVQ&FoL{(<7PsDgarh+T*|#df7lA|k zYe%vl*ZY%y3fDw7nkdkgL$r?@tc|L2cS`%%8 zIvc6fz-RcL$ka+R)k0wsI2rmVrL8h7A^;X~pF|c#kZH=1BR1{S&AM3Lq+)jwPkQt; zEKIPi=1`P|BMdU1Gv*e3p@%C$1GtN5mYZ9wVSPg>g*P;ZVdB+Uo}tKX);$?CKr!_D19J312)B<3~6 z*MPT%4G4}PAXYAH*hIts6(&Gf+-6QR{1Yn>I)akOKcVxl&)%>Dw%+XWK6P~!P=GDt|3ul+*gN47wvktfNb0gQGZR+h8=TcW2L?=mA`r> zWSHEn+(fAVkf;C7YgrVHMt0-E#*rD@EjkH*thVpFYHdEpD_fJIort$}EO=w>z7@FK zrxHG!_!Fg!hfH^5cq%Q`1H&~&_VPIB$~5aAL7Zp~pHXcYZ=Am&BsE9fj~M^rEZ9Gr z#bKH&;f(*Fb=Nu@iqt4y*KftTK#4^l$2rfYrG#tVlAW*~ggQw7P_kzh^y~6>cukyx@9&`s|*g%6==Khe{~BKxuL%U zxzNB9*lyGPvcZ?Lo??uw>pgB-oAVudc;JesnCxItpIe5F*(Si?=J?acxC=}nAnBcDWs~?s9ed(ESUK8B5>n`|&%{=i>ATR+N*h^l@typT4MF)3Q zAW*Nj+iKD{xeKx7bk5lJx{niR*lA_VZaMPe2j@Wder==Q-&))Z70Cqw$ItB-3R?>A z@QuX&W~K1u2*E!zOJ0qxWuPFXzm|#29P)O`k*~`RODQ~sGJJc`g%ftq8YXrkWA0wP zc=RxuxJu`1CdK1{VNcsq337{DX=C^gw#k`UK5oG16wlDvWt=~4xOJkGd?51AadS7` zcYIz#tFWcn&Nfw1X58U8{ke3?Z)LxI-sl@OM&lT?a?f8@;C-imQK!2*|3~?^#olN3 z#Db)2xq_bBX;2Za?l#deYE$h|Y9ve_iBqL70wbH8%uzk4qj8&3S+Z_eJ(_Kuw0Ph@ zQ)T9ex01@t%0Aj}v^bpkf_mpUOwviGvkJvA3SznI(C*+K>FW{hI%lMuQc=;Mby)(1 zyrh;IaG(C|_XuZ`IMaJHgltv!Sr$RkoJc^8^(@!ylDjkX(cs# z-3?bK?O!|WzHh~y!5@6M@jLlR103>}a2$Yt_@t8lM~|F@Uy|mInC5Y<$-T&uFzQ}) zRb2l_3VYWQsgcBCR`C(yJh5Uo2?_Qr+>vCr>iH%(G%8n2v(f=aH4q{FNUwB zh0n4j3s)W*Hi7f(^}GrtnyQrQ6kDXG%UXY7Ci{2c`rigffIiApt#*TZ#NpjOt(5hb z2_f%al~n04#xM^>TW@tTU%2zQ?jpJ`H85fZLPGG=H-KwaJm6g5m_bC@%%M6I;er{e zAeU_Mp&e0ex{5*ub`FG9Bd*JRRdn=Eb!O(GUubdI#P5I^pg0RMEnbexzwDC_C&kRS z>9QN_3r5GkDgBarCvY6i?vfbL?6b{m1yrM`?Dxjdn%d8w{;L4}3&_3Xz#5HE;W+$m z@)y|sh`H6EwerCBJ1DM!s!Fn+Rm%m7<@CtK zpg5-rjN@l-2}zgIYs<)Q&w=~nj#3ZnAnx!W4)YbRW;x&VFlii%y2sqjo5Q>Kuv5nm z-;!AUytpf84i+MHr*;s^5=tgwVPUvrHqz4JVKOtfpU#~e<5m`EB;)n>^urC8$7&XM zr~%k#Bar3iY2Pr$4j4uASc3&)c1Hd9u&^gdgvu{>ko5rJ9;Xp!9STNcNuKzJ(I zUaU?IzKd>GWp1*~SH1c+s}EJ0JTE)h8$m`qYZK+ghvXCA4J7-#baeX?nX!e|x4HiS zz{qntDexC^>V?9F^&{y0-)eN?QF~lIJZp%uDo*CH#Kc5`0WpX}XB zIYJ5Xbdu^sHVJSGi>RiIhFx=dY@boi3Y_<;*1x~?r2Cyj48@GH2D^aZ48L(R)f_pd4g(!*_WJD8WRYy z*@KK;Lw5X1Ce8Q56<&kU%H>&9rAjjwXYld7#dm|mvUl(wiue-DiR8|XAC=os1*XBj zW>VijVGR6&KT&0*6W%?fp_o8l)I_9fn~<)Lx1Ym`hsjq>f4*F#zHwzUyRm)`=2;Xl zhndHOxl?13CkJzj@S^V*_Vcu8^PPw5mQUOI9w0{w9!R3cDZqXkO|iHWWl05Y1uC{9 zYkJ$Jf)+<_L!|EzFmx?(45;!Wc)o_K=ptXBr15Vff%x zD;9UIbtcg6Fr=e7zI}35&gZN!&R#1=0zQ1O3bjMl@8VSXZ?uifNnJon|Mx2hR{hR8Qi?~ z+aE!2RetpL+R0xp6C(RrsfQTooWK`vZ%p;rA2pN`Ct(DYB4ghzXIb3y=P{^HDI7G- zS6ezS*J~j(0dEy}9HyFanSX{u>Krf(_C+9l=`QzsEL`{p9a27NDV6#*zGvOw7V5$N z`YnSo|9GfdTn2;ID=Ht2)|+@|o7#^FjC9ngSwCaa_&0kNJK9 zo_dv%`72<)&p=H%w9U1t$>BAX7!(s5gpN{nd+M}m|GMDHnZVXDDu1`!vRMoUAqGCG zE%I6h#DlYQ$(FqL`D4k17zjef6pqV~Hcz{1f#oc7!!|DpD_!%EgWII+4T#rVLV1#m z4XuX{iRnd^h%Svvg*JB^7F4NBl9WvN6>Yncrg3`@JLFA15K?n`d)^zf;r+P;sDQGy!%^w}M)| zyn*`EGqUJ+I~`9)aQr4${K<@~8&tXE7F>Zt9W_J<^Wb|oLPe-+chs=CKsByx@O2SXoR-u(Ns%A!s^=As!x)>`v}mWpR-P{_sl`=4vhK&VY@g zj#3muT`}9wG-tB;|9F1k`k#s+l-$Yy=&{Iw zD0$2hdb7q-8{)OeN?2=(q2kDYF|NPi$-NwUC+sNRgJ*f>>?%R($d?YlFewiAFR)yS zQPB@M4m{&mEvWo6pm0+L=oiC+?=!W6lhe4l`jh}ZPeJz}^}jMW|2}f-JKI4#Ihk0X zq!kYotC&4{PE}LbsP}`@sT}r73c;S^Vge=^&EfOy_E$r>sOoX2D@@l~)lvd%5VKs} zpJ-fu!LYD4_!i-43Y+!zmqsPogNO#cODr}U1e9toi(YSu_9${LEeTJz)z3=K&hAc0 z2K6?#vGv2Z63BK=Sg#mB#gW(2Pe#E->to5NfEw~2;%s&NkK^g7y{=nNM}^q#WbY)+ z8%Chtg`A_ddT9yu`#(86-UOMrO&Ha);)MMW1zOcyOJ?`~1O%Ko`NXNZ@k!%ae>ztH zB>Xs2d_T)^Kqz34+l#aMKj68#g_=VF>>qgkBsd3Vq9Qo`Y}jX|Nb}S&YVFfg8L~Sv z-39hbdl5)9`-JuDIUWGIOXMzPI~0<`%V(?EMx+HLRt2?8*S}l!mtyF#cT|#utB;ih z-VjRGkS|0xd)pVtav=y-J!E}GXN`AB(Pv}|~WV|HaJ#@s))WnFpi?>TLi zd`sBwDO?)kNs_)3_9X%;>*GNy1%7W8-S#D8LBArop2nwed)g(;)EY#Ti zsABs8M7X9%MF&2;3!H4w*w5=qki8XC=d&l@-%T-u>oYNTd{6|YZ&gb6X)eyepxINz zI%lW*jm(PJ`X|}Qt<8fB1%pie#045)IfiO5;NpD;nyfoETSUtYyD+hDcTNaCm@BIO zp}_?;;DA)l;wx6HmD(q zxi?fJJrC)uBReRa|2j`uv{bjL{6s9$Gq7@^vMpjWz>8cZ<0N}yb)9NUWTpXE!xcr+ zYJIg)t>eQ0v73Hx>bFYIJ>)%uSLKvu@%Z8@SNKjmzcsJA2XTlfPUwTaWYtH&E{?Yb|Ww4+ZO{bvI_^&7&p{X`d3fK%M1*HTD$w7#T^;1NQ}(Hp7_ge zdWF40c-?YI-N*HG&>=*t5?>IdmVZk~R-DJ~^Q%l}8ge=Hk1Ku=-y(IU8XmH5nc85B-c9s&s!Gv@D?{sum40c|9o%neILQ zkaXPF-+g|!q#|dl281evVH%ND{Y7~q`Oqui#Q?~#uaA8Ha>M``){LL;K}%wphDXBi zEk^r3*M*4THVYesAmi2o?Q-aagZA2!cx_6RMdkCiGK}e`J8V4%)w!Gp6Pn6Iz;CDg z<(PdI|8LpoC6JBgRUS;wNzm+GzvZnkzMNT|HmDk2&gZupp#FGEmYPu$>l1*EOUZ`LSP1jq+`@{te0ui)xR<#3 zRPdKEQwO0=_D=#?U3}xf^DXwiRNjPD>38{0>23}bAAC9wJ2}Cxl5(bFnMSqHVWaSy zKH%Il>;z~t`VJ{UetbI;#_M3Oz`9v-rKz4m=h+XBPItxwm;zAC&JV{M-1Rp-gpEv* znsDr=0-QrJvD;aVGU3M&PrA5e2D~;S^P&tZVKH-nhqOQAFt5t^4^Y>9SNs;+Km`ez z!JZD*vex!xhg^7>uv~l|ovae0*7EOr&Em*py~cFqI-W_fPBI+H#XWK9o6C!YcFNra zJ_;wU_qdICJ32Y;yt>PFE*-~(X|gI@aGXrkwDH8B;&=CD4W9D_iZhL3sVt$Ud7Dmv8O zQM)VzJDuGycMikMk(>mZq0595#?zcY9ZbH2Z{^oA_l+Y7ZJ+ubsdt(UcC9nNa(c1) z-iv+Bj*tmoYd*m)1*g%>K~MH?Omk_^bS|z(xeU4{70-hcwBMO^oPLYhY{r83&Mm~g zm6};snl6f|=dt4p8X?#5fT(G-*ttrLT4WENym^#xXy@8tQBn)E_?n-e|7S%vz~Y#X z@H;+ku=|mSLr+ZnYUEM26psD4Ly44vRnb%D0M6Di<5vH;zyvMGYntZW6k2Xg=k;<` z*P%uyDfO|fN}tzsq!&$}_P+ck0R}*1AOG?L!uS6NKY(xXe&)BrFx9PDBIPfU?_yiL z0xVqLWtJ5EAIjc3tjeux`&L0ZrIZ#>q(Qp7Q@Rn5?#=~LQUXeMcXuxk5RmR}kgi4N zg7;$Yt+)Gm-uFJf?@x|Hak;KJ<~7H8p5r&0o4>)RO1)-pqh!}qXvOq%AN4+9z;;j( zg}h+qqKa>a{S=X!%^TA0p%X!0LTCYVBnezETFs6#c9UsnY^WcjIZ!<%>&h?nd_=yi z?W1=>@7dWcvc-ltoj+55UonP+5^QAVNlNac?;08_c$5NE^neWiaQa`SeeC6iH_q*Q zf?ABgr*se`?ExXY`B@)+gh|tkL%!1SJ^EP6)4I$^?w_;H!{0JX60omcnMXjyXVVpL z+MRFVjG@-(rzdqp2lHxQ@erDz!P;mpZXP*EnsqATbzQ3V!BK+`KvmWUuu z+xQbd`}#;N5Q6s@>eN_`n?nN*xhGHSMQGX97;jn;`VhmIT&sZ-BbJ$PJ|4GtPch+i z{j0t1Hevo@lQeJ1?g(f;QK7aPsowaA!HVND0FXJ$rEk3*WfVt}n-eGES3-0Cc7VU0 zy)~I+GmiN>rCL&(nAnj2{@7yu(y-P3<5v|$+=oInX*8=v(n4a#;mG+kn4H^mL{lA1F z)Q!)C+?~g#c=~n4od_$R6$Ysi<^dbm{Kv{uEV1)EAs4*Q4TWTvpI@Y4%jiF$^((P~Xli8X(hyZP+h%YhX7qrcwF^fVVpOU<$d?xi(O=6Xz(0DfPo+q=23_3O zvCUfj^(fv}YX9>%1sq{7*L4&Q5i;l8XNTx~hoZSZjS};8^Kq80b@ok;p3a1xik++v z$RZm@@T)ye0IbfxyqUkGv7p3g7ow*wEHoNLQo8`oab)euLTw_V8vwbz7QBFHj=uAiO z?yBNaFiB;7q0>@zs9;T-UpL@nDDKT|e_@^X zoo4t02>;Na`FuKD28l1KgZ4w-4m`##X4~N@fKvpP?6+`DrR=X}a+a%|(?-u%cZ# z5Mv@Vo`8rm#+s;a2kXe`Z)pDBOe94=>McG!3nwkuNzmL6S!P}j|6C0}R3X2!dpQS! z&-WTbn}idEm}Pu<@moq2K=>|0S}Qk!}(stiXmCWb=Dq@r<*L zhNh%P1dcU4Q3V>5^c~Bmwduz>Li&6bZsxKqzo(yoNC8fD?jwe1RON_ zp%L*3-L%~3G6e;6+iM?x-R;rysu3xF zUuE9oD9v~iQD}i)V{>?LsgTrtofpC<)a}KiPv`f;zUwC@*D?Im<1yge8XLiaZf6(j z%uAO+TU$o{CFDY-^cM?&kf%8G;~dBO4?3;$qIMYymj=_J(DL5gQIdJNV72s~YnH_U z*Zm5j1a~Zqwmf!nn!qUaG0km_sPpIx`?m~S8`>^WczWDP!X|M;C%b{))W4y zwHbMbNYGJB{-!|`l3?GhFzdHDO}Xxba!5=!>r6>3)1fK~aSI3kE0Ur5L~`cSN#WS{ z0td78MA>l>&dq$cNly5a3TuQlH85|3i$)-vpC|Tp>YUrtF zF{ntrF9$xhz>dV1hxqFt&fWzJ&Xs=3G3@6L?n91Pp2U}T7!zjGs&PIhJr(3PW?x_O z2-O}8&pmrps=?(~+PITk*HyDyr_i3A&^TZIcwyV;9(z&~+kZQi(Fm=fbrPuTl2^jdEA6QTzaJ!B5C5HnCG(UK^Gm;VbW4 z@6UrV8q1&*SvrsuO1k5@yH7N3vTVAu*zgMImfuuvoz@^NEtb0lNrGo%3Y^2FqNxnU zz~SW1tKv$`qMOppWY|?E{;8~@GR;k@N``>D0!o2>h3+b95zta2cl^D z2{OR8&A#%@TVld1d1-PxWqf1}LB?&H&|dD4=pS;IzF%HzrEEfj>i znxWaiW5nrrT^R*eexrHJC)K15<`*F5KSW>FSgAjjvE7?S>`mpR(Me$rEJIggiidQJ ztX=B}0II&iB#kghQ4SlBvXMr+x0R{*k-o$Y3B3}5e12MCD{_?89X@un#$VvVu2vLr zQOZK=KD0W2dAtjvcKo2~Yl7_2?hfJ05UN`rjA5qzPO%xr5`P9HWH7HY0X=_X#>7a= z43ro>b(EQ7VcM14iI3^=9<$9vlix7=?)R%*z?}3;v=v7kR5&eP=&Wm2E0HR4^{uR? z^9ae!C61@qTpEOX5Z{Wo{*H=QRBvNBg9HKktU76h=M~kcp7*`1xbhNuxZ3e%Usm(e zMvtc=M7o~atoVHHk+?E~9Gz5(OCeL@iDK07WuL`W(&lWyGB>)1kNcZ9JZVAt(cNbd z@G-XQ&bV1`8h_agGAzJwTey>0lt_s9@cr8& z7kvbc1B%P&u!3m-M>r>GQ8nm zhMLG$oU*0gvpd!v4Fua#37Ra*6tgCtDk`_iu{t=7pFmnT^p9zY5{GEDw^F(DC83(ikn^O0PIo@d@aNhB>SZ+qb@}^y zs2(mbBgR9eR{6sVPqC~5mujMEb)zvdttOjuW|84N+SFN0e9w-s-~@5$b#fM4Em5i> z900lf?d;go3z~tpPQZ~ED>Em3KXP2-4)LM!aYsE^im`6kcGr!j&y%zxg2CqdVLe=& zWvtpFg9uGUAyyWFB+$!~&pc#L{%!zC6A&p>2nH?KG1f~eo;hWfG#iX4({RJa@ z)mys>ZT9D~czJET^=Z7Tp@^+(#Uqm)gx%o#Nlnq~HioCHoW9XvWMm-KQ@{`rV|TaasxzfUWnb zPsgJo6h9%;GXmFPgKr`-+3OC6L3OL0#8t6LprT+yc&a>xI?dhwquWnTMM^nHLmIpN z_EFua84NAAVw_PLHUUdwVj>uOvjwjfymn<(Ia7~Lh`(9glw-;2#oy$1;$(?Y-=6PX z^_lk3lm(vg+J(F9PLuWbQB1|<6-_n^Ajg)X5u9|NjprJHe5WMx{O=f8;EJZe&uj6+ z?Hj51rf`1X)h2sqE0b`JHng9u7Lc@2elIklo|;N-bGc0ag7MYreTakpA)X%Pjs=_L zq@;N2=bvF!V&K?oPv*{8L@bk0&2=Aaw9m$}L`K$PL-*N>Qv#>quqk3;h3luDwBEiL z=P9aa10ivR8SFML6vmTgYoS_BrmnPp?>#1yUpWc{$E3if0ew#vP7(Xgs>HnSrMV7_ZE|p}}oM0oWiI=iiw%LCsHU2g(eU#-$0RyKPOHf?G!GonC$zm()AuL_ zLq5XY0qaiJZNBow1iaSxR<0{JDFfKdiR&^ESZm_wq^@$GS=*3zPQvDzaO*r~gJQ{> zyQ&XI<M&(S|(>(ftf?*Z;1p`;sv2NC%_4Tlf34_tO`>xacLw|@N6G7*7q z?0AOE3Ga6^u88cDZQ5})Q};AfZ9Ag8P?cVo4u7g9#P;-wg^!i7Z7coh58;d5oRM=% zg&&6;7A$AwDkCS9nR62s^KzR-N>$HTi`96GQDEHw(R{LRSX@|dK&Hw0aKvy-olP@1 z2>sWJO0aWV13TpW_a5NHMX01N~8KaT|Y3b8rx?MbtRQ#8{DXnk5dyFNz z>8DzHzr&R4rG7~j>E>0VbVyIS*acrs8$Fruncp?g_k=R?(Zr7(E`o^i2-K|7eCB6D zWoyD39=_}|J5ItG^L9k2`%7Vklh&n9Oh8;7x1GyPtY(hQ`|#G5-D!_cfx(`Oa^~c% zNF_#}eTb8XHs2_Vemlz9$dR=EVR(Nt#$4PREolbdMYu+S?4R^`?B=(@o6Qm-o659@ zr;ucK2>-?Q|CjnIkc9NOLTm+0R;VY2+tuI|mdH7IKVOETu|$faZd@eFm9SKu zOUlE?Ujx*qB-9AZd5MXDagrc8=@?qEDV?|oXV3D`uJE){wT!#mP_w}_)!c`Qr18v7 z{e6o3mG1Q!m-(k6JxK6%=}8Zojy~;fwWGYKu0wX!HytU>J74_R!b0ZDV|x!iExW$x z(mK>^??*C{R-B7PrQ1u}yGquqIn43y7&8RCwe4JjvMm0)TcyAwO;5IBb?^0;E5Y5w zmAwfJ3l$g)RV1KGuipGjyym>-06C>q_B-1NY|nO@r_D;Ftod4DJClXQn*;n%@ zXm8OIO@d}ZoKCN`xJ?gEM`BF~9Tc7!*~)!e>jwuMDjIY$vn)bfLmp$HBxg@ypE{w) zZG4>jCsMEOcS?3CQ)Uvm7x()h^d%V(bWu0+?&eLT71k1S1Me2n_Eb9WbJ&P(OqsW$ zY8cx$7u`?7H2ra;y~z^oR9nv=Rb_H+7+f!e`1h8^5j)x|%j1LbTFQ-E4oo062vE{X z;n`-d476_O4T-k`ASgQm=wE^Zwf&7JPmg!X4^D7P)9aSp^fuW8KIST*ex<%Xm$`jR z(ym92!uHHt;u2{7%z!4)UE^wWNMT?HNBRCu-~l;%t%o z7^{nf-&fxA`DY7c#6{k=g;d?eYdBP5CP4fduGu+~toFarfvG@Q$~N)BuzVRE&ZlT}*Xq#?>Fh_!+XrdlON< zNH>u5Rp9CIChFX4vK~iHf?lGV#`mhee2161#(18qcU>=62V7pNd+l8!HmbV83?^Si zmRmv4Mebc*><5J2cojd~88^WG~M zXhH@EAzve#2&V$_j#BYFEmO|9V>hvPIyak_T-;sdQn;MN4i*237c-^(7v)D9sm&yN zL46QzO9V041Ov6jSPX7u3Olra1_+<@A-4(2yr(WVBC%>uR{75szMU{BHu8}pb`QK zMxUk)8ntlxY>^g$g^qg?;xQLoQd#Rl*^qued9+89Y8WGpue_|_-QV$&zxGkm=T8Ld zR&K|4fV@sGjW+TErDm%^_;vh#yGZAB$LnkUE%+HFT4=C(mj-x8zbtNP$@T|8h3VtE z=~8i^`#L>Doea7%&p?MCvGED&;d4f1qJ{NAc#3}Ll6<}gjkMQ`Vt)4=qNhTVn|RzC zi2iinaPqke93C*IMkj$inQ{~T)TTAzHKo50ZbTZ-91`LnNVgOwS8i>hIhFifwY4fg zEiLzC5{O3WM}X6YYfDm~d@=)(Cc7L3g`WV9kO~F!&$nI^6WnM$uHG|i8}MnH@+hj6B9cLE-rC`0p1cuM%)S;m|5PMJ4aU~6g6 zV*H}ROP=qne&cdSJ_=JZ5RAFY;^+21K9dF=f3 zARN7h6t(Z=_pY<{p3|UEqsw@#Oi{G6g*htg9$vWQC+=hb&P>=ciJ0REhw8SZ1 z<1eQIb2MdHv%3b{B1CkW-K7ob+IY$3v}DJ{!CyTnr^&^-gwG31_f7>v5Uv}xr}>jQ zHZZf1xab3aaVV50pO(=nJ%!<4mkeQYgnt#z!g<2w`F%da!du!ODbHA*?+7DI(dio) zWs`BxNbblwVxL3yh>3M)Z6i45=@*D6l6;v&kIta(h58`!n3y6@o`G9l_b)#Q^!ZCXd5kY> znL4gb(XpH9>13}SBF)jU=Q=yo|Fn|-F@_{|VSV?;I>*#D`B+>lFP=rw=~DBny~MU2 zK4^c~#g6zzdpkmd#Eqygn^j@YV#P7Qpw*C?`^F|y!XYLfcTtif z;ZQ%>zv@RfOkR9lT=(0&4`@=yN41-4;5h_tP>%uX(4EAb19A%D|C`or4%y-)XQ;oA}K}N|F zCB)%47Y&eseeP3n?;qBme2m{w^Sl>oM$FKikjg0#jZz>PyR|LT$l_84OKIq7?E!Ar zfx~PHK>}3BL)}G5z$gUv>QcXB!@ewJ*{^FfCUweyF7K{(rY=MUV#d(zbS>jnVY&Dk zxIZMY=-eIs5kYS1H+qh+Tm}>^Zc(16EB9OLxh_*G(z#~nq3!`k z@QbfkoJPmtZ1wO!`EFHR0f;gUZ$igpz_4+`sXzC~JyK}2O;WEhJrgsho`wjvSe)3} zS(v*=u#`!E5a2Kuv1$tpKKBoz4k|Wy&3wyQ%00h(&1k9;Z7Qz75ElU96DxRje3x=MVyu9k60P~T9TNaygTfePB|&xZT< z_0;rN<^%c=^~5>caGfmr8%mf7QcR;K@>K=B2fkcIN;~rId2{f$NZ_+p*MV3;mm}Ua z@F>O4r|Y87u2SD9;j4dRzfK>_V=3=b6#cEO{3-~6#+Mkt%ui|y*F{E3d*9>Q7hStR zAG@^zf9S#kNBFwfV>4EHbvv`#e&BT?hg0cb=mor*P7Le22GC7wRr@MkD)+lUc&&%A z3mGIgx2@q^#27jio_CzI%XnwbZJRdKo;uqh89r_Hg_a&hepntHKJnI){t$!YsiASg zWMMD=3TnOFY@`NV(sxJdoU^DPKpqFxULaqeRxxbt+ME~o#2ps<`1xZ(9peS)4O=WK zyrV;TAQ;yldJ4HbSQc6PfnmY==|*7&-ulOnso{D?Es&16Qd>=54WyDFDVgEXgf26K zQw1yaejok?q%i)fn!6Mav|PY26?OK>&`7=Ekb9EkdW|)|*-?A@xvFuZqu8eU1k$LH zYY);v$&?LZ|N1QYo7sL>T)sYdCz@QGZ9T@WvUJmTX6?+~U)Q>I+%3*>I#KlXT9rEP!l)$N`I(+x*tdo$oJOY;v@kp zTw`z6H^<-~M>Hg+)aLdvh*eG3u0aJAmvxrcv*qHBerMF8JO;hHQo1`<8B&|H<%{xS zt7fWh>|+L^JMZK`0^j5_+f|t1MHsPXJf*&Livd<%S57q7s7NybQx(=j=r+KWC#~X| z+^|h?{q1ub{x_fd)iCqZw6FN$a!rSm z0&BO(xMi6dC$T$exyxASNvk>LTkEzfJTd|9hR%?!cST+V(B+DzXtBq2UCDD?<8^h% z)FGeHD0#Me3x-uEXyQ;kz8UbcE0QDf@!uU1o6k{=+hVp+V#O5vZmoVPqXGyNln>#U z@7%tw*E_nPt-l>zw39G6UG?QKqn-Yz)sCI+OY0nt)BO$7rYn&Xjmb(MFxh_iec@s3 z=wn*Xf92{@u8BITV}I1?2>2Q9xiYdTsJd^K!^na@e7ZF7a9MKWo+#863~X67s<8ig z4BU9svf40sjM3INP_Q8E4=78~%z7XAZv`-go^$i}3@wwjUgEXokz`UwH{pM=o^FmB z{EWa(m*@}I+HiVF<$!=`!T5&FXdVOn97aC!k4PafYDkIkL9c&k()(uLWq%#hjlDEg z9O%e8E1qB20V88ar<9pQCQe0xhMigJKS(|;0O^}VR#xnHkxzvHwJW=*QdiIkzjgvQ zjv-k{jmGXdH-XT-bw@9!{>ZDLJ<; zMh6a;JjO0aZeS250_PCo{)u@^uVR9teBearl*9dG4*}V6{aML;*U3WfQ7w~^eqs+n zA9xXlG-B?&V$nwLxf}R8_lLt41YP;$N;Q5$S6c(4Uq9;%iNqV@YJ6|^`i&4_3l6|Nu z-mZvsGvw7HuOE_q&a6NZIF=_rCatwZVd~3~KqA)N$jje+rEBFru+9ZJz3SRGTXI=? zNM7gj8-F_n0!2mb$<^0R18 zkx=6I3k70TB13MR9%)XxyUmXObO0%8y^4SSPkn-5eGfuK=Z9x-X#4G-VI~?iM66L< ze$M0fu@Ye(+)+iZYN_aFP;b`cB-G3dE!E)*^-{ycH=nR6@osIlt2lO!d*`-fS=_0- zm6o2?{)DV4kJGiBkjp$0H=^wkUy!d_vru8!=ayaHoT3tpQQ)l(q;San55x@0iXhFZ zO@jIKyiK(kfWI-eJHQ|G=5YmndtlNNrJ&4fXPdGe+kk|HXR30lBv!x#_eQzX<;rB8 zJ(hN33^V(&c0gn7G8&17{jodxMMj+Kj`7{J>fXhJDTLAGT(K8b7 z{*IX_tzJ(_K!iEjj4CYCVjZ9AbhC>R#zq@MtJAktHOT{fw@-CnE#8eB5LX+B~2z`f+#c{cw>-9>_=NcRr?f^9XTgu2g~fm0TTj@ zUYQ+tN{;-6v%do5{6p%d-?ekDILRG45wEk18-{g8)#}1?o{dCAXYM!eXmcePN_7Mh znDvBiTDhN(o!>$Z_+4qOJPey~Q!6=^im^5lqPDkL8v15-vA?@v{&YY(3tdkk{x#Xj z`Csi*KTfJwm$$oztp%CpmTRFXd4NS#hV;_eBvZvYjI++kSs3EZXmOyp-$Jx2!#;93 zfJ$}*LB#*}mi&(c$NFqJ&P5PR9#m6yFcG*|bs=p&QX4211ldA}I-sO`=xq8CK(zQ& zj`2|`0B&M};ikg6znss}W~C{)QWRx-Fu@JOM|kSU1O5aVG)ORhI@EfXsiq5(-oNnN zP3d`%5rdFU@My#xNT0UP_<*_1hVZpb9KKL$wo$f6<8Tv~qUPQRqXK$AYzg z8g+=vXX5F()7irXvKrU(22NB;{3*qdKJiJvF~DRqq}zz_NcQJZea*V|y83eszy9o| zTbZ3|oqKthdftW?3c+xS7nhr2=zt6pZG^Y*-b{rR7WKQ@*hOt1;hztL-!+ohc-Iq1 z=Ir1OU{>pNAi*sPtT>rOnXIQc4Mia(N zXl(|+HeV;6ZJ4wdm9<)del(f)u=J+V9R*HWOxjFNVeYVaoj&rFm}&0Hnn_KAzU5!- zYI$iNp3rY-mZ}9P1;CP)Z7~M#UCn#JUnPf}{bw;>u>X60Z?nzXDE#+3f;GMaRd-J@ zyX))N!L?bsDAG2XhXT~_5nR}zfhb8XuSZLI=!6=q%c584wKt7XpI6M7u?~TQ7ruM$ zh_{NcCj^3c+FQ$hw)&9cnmaSk4hfWx%I^P_)%wpsp+C?S{JGY}ev|^)2Mdu6(qz1< zhg`<63>s%;E zy44b|&ox3j>$YCVGECxc7j!I=Fi5}57yg1c?gWqnmXFU1<~WM9c=wz$p{Nn=M#}fk zo00s&=5hBnvDGw(9_}#np`Fqp@s^V3zj}VQGudT5c*M@vTs)Ruf*U^D zX-6|-1s1nRB|C4wTS8_>@1)$Lai-HaD7eCFAx2+B5l z&Sd-+TyB)amcUrakJHGFJ@Ec|PjohmTJ`eGE4Vr6+GmRdBLo0z45)kyy-ilkjM&@M zUb#LR_r$}H1Re;X^9?QzozwM93&q?wQ2LI>O4YVRXlJtZO0!y-$JZ|T)Dqb%P~3a3 z=Xrd6y3MABh^=9)B&5gzBfuzH(34JA88;m=^@6rN+iewaw_9vsXu+ek+|r(s}mL^Ea!Fv zVwIipVDZsosOn~JKwv}62hYr*nsvLoo8ftw-B`?-t$z>2i0nkbXg5FL1xXk3it;G`JUAKRmRYiNKej%cw+r zTa8WzZ>UrJhe*AAO9nIlfo)r4hVxPJau_+#WeK$hqmpciZv z7FB+lz}pmr=4K49aonrXr5c~{x86Co-YH-{wIz}zW{-Yr+{{?qJQneg==9xTAQzND zc~_AZ{p|0z#evt;wAX=DYk{M{5c!9MxBT_{W5NNX-elOm^C*H~U_h}q>IT0EWe5UA z=tl02u3-Zbr^QvIEX@hFsI2BA8pyA}?cb8t$p(@xf+bQ3?TAPX1{_|6yo-^_DdDP= zb#?3v>^<}@mu6U1*ly+<9jA)s++^i_%B^#=QS)t8?nHOIObwp;dRo|WFfp-v1iQ7n zlD99v@AFkfdj~pph9I;_{V>Fq!LlsXqP}&7sM9lNY-J5dq3<;NmOtN5jW`&?+sMFe?#Y z{|;}fTHo2Sbe*oYRE?-C5YzaC^5XO@Ww_#h`!H6+{3sr46RIzuJsS#N~00RZT^q zABYqUMZ4I8`r- zt2IKSm;0N=L8I1|2pr~wFn@eT;Dc#F_SFZ6NRxsP+@EFdYLqR*(D-OkdG0$Jc-igqz73-ea&cXBV*8a^&=;__T zK28Qdv(G)wNy~69AvlreUFU=M5-8U=Jqp~aMk=^%6K^sD8fn#goLwM~q&}!C#1!G* zSgge-eW4*N9W=aTJ(?Mcwtyy|FckZyu)F>!V=4TCFZ&_GSv(Zus(9w6wdKY1BI`^-FGKbkk4H!a=K?5$Zbqn_7FR{-b2pmsM z`Jt!GR$kkzvjuyfpU;nt(k@>*J`f}nUlcs(|M^}{pgq#lo4cEiYWMxuy`E;c=CkRs zH4Q%x$ZvSn%G1Onpr#X8WQNY41MaSMT;_A9e9EX~_^$fOX8RabSD~E}om;Ov530a= z1zXfGtS-?SRgSN9JdZ?YyG9JVqc?|!8&J?2c^hj-477^B2j)qu6_XQUQG}cOUg@d$ z$4?Tuj}4h8>CCa`wFew#>y6)XAA&?2#ywEI@;)FnzZ;ZqKbmj7_n@KiKPOayJJhC* zK@SLQ6);n^R=t5A=w(?aa_*_DX+#kT9g3xua+!B7dPb-xiIF~-SQiZuWEzEvBAKyW z2X+IzCCZ&1UGKIUn~3vSHW_PWlHKrkhXr4Lf#y=X)9}wUcRYADZfHg*d3dW1x;*QQ z?-5ug_w~7TdrnKWfXR_UB`_KEWQhNIx@@6b6ptOuTm}9~3k1V`+!GBpg)Zg=5y`LZ z&dyl!H-cPMtZT}QCHL3Y-g}i9o(@E40jFh4tyeRvhQA&zoCZ~3&MZ+ z?uQvn0E$1Ht$3yXF)-?{H@1va?r$;sSCR!7u2uYJUdmv<$m1*{VaVqYeY;%A*qadQ z?WG^#DVK4&`~usFISNeZ|M_|Pvj5}Y+Fk-AxJa^B%_IUkz)Y8MKJD~&di&Ocf*;wF zADgsNR8Pt$!RvfG{|Kcy;xq9iWj~B*HR0D*07cOpETUw&hK%##DHU!TW$#O#4+N}l z*1G0SB|e}5ts3)BFh&0E^X?9+V4!Uaq5WezOTZGa;6ep&PaO+(R7LS8i!y{v0{v5r z>wjt}iH(G@W0xOu4#a;&=|98B614Qgmh~o^v{U6jW8B5tzXcYd({nR=6Fffs2CDJ) z!^dE5^T*g5oZ!Dk5H4Ns5=@bX(rDd3Q(UqLQ*{Vjy?vFhQzWKe+oJkJD(LM%OBq-` z^lqC2=TkB`+TI(JWE^#Gj;cTPs^hba&pI#~KK_KOzbR}p3H0`7SoF8ry5o7%h}Gpy zUV-fWla_?J(;eg_u18hLpcb7zG36FZ#2W)>Qs>x)lo!LdzHj*boGbWSqYfC*i?JcM zuao%khpgp5+{`R?jrLY{(TA z;Y~^7-GhLNAHyi?UA?0U=N5>zhkO3AQp*b{m0nQgri1xJi<$@)23^F9#i>y+DfxMs zR$0a*_p5os2eK&9JCgUpdlCLdS%K$rr?kOF9<%A)BtgJ6jzreMx%)GAgXh~b^zRU^ z|1JE`y=G`Oc)@+dA&&)SfiNyoLN7(^UrwL z*)tB+Hp&*)H+nT0i#C)KysF={y2cZb9f_MfR)AjX1BfYux&C&00ZhlMa{4X>2Ho3r z{Q;J6U3VqGPGDAMrJ=9!9gmH{2BZT5RnU)o6F$&ZQ!3gRqOseCs(ST9)buVD4FTU> ze-{FyH?v~FP2tfY&mcqY{TJ;jTFhCikm`S1!+3y>Hr6?-cCjOzRE}wEK6-i;JN3eW z$Uly?NCn}9qHT8x7+OhW^03SF;)NqCRH2pzum#m{2=yp%#$9spk_-hdPmHNE`Dm@m zc$&Ut!3rhBDLolHm-_L7>gY}%426nizx9eExmt-oNAQ8JJw~Sw^{E-$PYM8k+jJVS z|6W1#0FQV?z8E~NxQMfy;}0Ld?>i)Ms98_EfvsQGC7%#wN($SZFBW63Tw<@$xRoi> zh;UwK(u(QsHS4eMScG3(PU$);08SD4cHV4uhkN`0 z6%!b?s$YbPk0i~xY#~()Bk-#SJz<&$(k8SvR$tA-83CAdtLH>lIc<>d^g|>>gXOOr z2NVceE1)nN^P?`xU^XtuIIm1=t~)UU1M2Y_r_G$H12o8sUd*f(%HkgGA4XIj@Qdam zS4Ua!qYEydcwSZV%8c;qWL~EuI9Va_SD?-9E*7Y$>&5f`U?JxtEmH!VM*0%%D#^09 z{HBfsLz{drt^?M@&a#*a4^bFC8b29R9oQXvg{Nl;5IFkzN7cFBHdyh`wvrYoZp!PiU63J=_2oGIc1qRdt*=8du3fwEcJ zXM*MGWyVI-+FOGnyOBi^dl%YAz?}%;@tVksOhJ5$HUgSc$a zy^}K265@YxVhR9v751h>m1akIm7J(aO%*k@B*%d4N?JD$-eqtjeWamN&STC;ElNZ%ZMd&!mC zvgljnl-K2!Z5Y^fT6cps%zuBECETQ!(TKcBv2JpG&we-?ekGBscKoyII1+=<>gLtx?un2rt|0 z`hIT2md9zQ`h3GLZeuxxSF1L?ki-~%Q3aitN65Q~@xzpMRFr*z4|5ZHeEE~js|eDv zwG+aey_P>dcVrk)P#i(jFKc@ecnOU9Vkc%J0IKWOmp&Eh*w-A(M@gjBb9IN@G0-Qy zsZi=h)!@qHYuS-g%JO~FZVjNS4!vHf@F84dl#pgUBVKP}W7kQ1C9P;BH5c$^_u~$; zQE&7+kL4<4YD|Or4>Pk|WlIrgcC@W3P~Fp3fU}6B1Z`WHt9n`1_A-&_GM+6T=?Vxl z0U2O&GRw;r5d3y;v6_Oo-RRpnisnj2D*x&uVKqLqYFC z5#S~y?{^QHc}UKPhp%hf6&kYiEjM6ftYo>BJ=(a57F%OD^LkyzC0$(w-@gkzhtvQh+Vn>v`S{M>hI zh`kx9Gz|`VYd2sS+PM>$yVLD4*f8|LfF$dBxdoerrN@;6fX6g<#>@+_Z@IiyDckKN zcP?~CT7IW3m?Hii3{)bY`p-zHbl(&3N*3ivC<9n*i02G;rHCTWKhOw36r6rUYJmtC zswzSN24b4>CRU4run8K`6GHJa|G^)NWi7eTo0cgUZPPbP8W3FQ(-bug4Tk^q(%@Kn z2>nI1G@i^Derj+Cf%wF2ar4j1dZ+QNTavbq@-*0Q`+yOOjE>wyNPp~J^0dEh`XiO( zgZ=7@wpIOWx~5?~DFpM2?`fIbB%58pWIqAd-CD#ujRqD6+T4(w&zL9gtpco{EcJRUue%Z-s z>jz@vnE8e;fb&i%5e^I|ndcW1d4~t;zta=al3XT=QO~ zG~Eqg<471%4yFV0XJH&F$+&Q!Y}MNzGVlMUYr;mPt{1UNDYuecEXrnmI+v>^MAn_I z3={jDU|G@^`HFeHl@0x6a(aSse)#-Ap)jT7|3xhXR*z2Lt~XWeBhD!0ZP z`6SD*{exVc`EMXNyimgDRs_6O!2350Zk^$_ZcPZ8cZq22kEGGTLOAA5g(>Azc~e>3Ex0fv0()5Lvg9QSmq z&6ZQ(f)-8Zl};v`#d*h{Ki&ZcV5mE%zc(Oug!SZ#yr|D_0YBRz2pY z;oj>@&ws!uy$R&pX9tV12hD424~*MF@{`bO;5MiMopPZ$4~UP4+4RSQSgQog|77R> zX`GS||M^}W*+9VH-Ojk7!gRqzt&wn{LhXm?w^uJ=eaSF>IuR_J527?-#AReEfU+QX+bF$72g& z1W;Rj z*E=E@o`YJ|zV!8^en5??;>2W^}ILIxj8=H|4X({5fR|( z7JiVX{O6%G`wsoX5)dD2p}((Tap{_N6w-AT{$CZ2!S9m|bRT-c6%U^j?{Pyo7@^1g z*S`siYe+eqRig~Cvsai*&zrV0f@=G4ipKs1ccs>CD z+?w2lhnz_1_o!|E=DknBc;n)-h|?cYy0#&X*qz=B8Zc!sD#8i2bsOuIdrOqcEsw?fNWYTme*PkojG zhI{0u;$tL@|9DWpp6QE+uP()e!^kB-AOz#fE{AA&pV%Mk69W%eo}B^mNdNKuBqe@x z5mo+OgRn0t9R8+ zZ2MOL-<~7@G+oO!q1F1+Y8(c>FFcla6_mdpB&_J~k8>9CU$%to2cW4605nAafTjh5 zc|dYxmmMsV=6TIYbhYwk|L!Mq=rOZAuVC1YS{?weDv#~?{J$RPvM1%2aZuw)O4(j3 zh6Eb$Kq=vkX8wHezOP8F>fgJCu>M|uq#VD?w^ZZ*$5Ioexn!xhJL*njvD$c|nJ1Di z@qgHR%c!c_u5DNmq@+tlx|9~A8|jpmmR7pEL%O?>F6r*>lJ4%#O>f@CrQ&rx_dD+U z`~EzC-g^v(W4niI?Q^Yj#xdtS&N(lS*E1Dm1OXD34qa(?-M_A`j^wY^Vdn2vAqxrI zua3xYD&lYd4}uU_oh!HBpI$BarhiuV>Ho*-07cMl1C9@+v%p7fCEw?}ZLcH#sMnvJ zMIBZh08(tYFvMrIv8PQ!pnFyL{c>^wUCWlFXHY_~{(L9)u%Fid4p!vcn|QgB3^WMG zG|AkPH!#f`K#26=HWTOF(Y9QVbg6*VFs#!Q!tbAY&^m5M$gmnoEcLzgGT_ZP!*p5g zdWWt4&A)MP-@m0%EB188hl_xop}%c##jAgG|N7_ISYeW(lj^)Mszl^PY$xeNNr1F= z??YHr*#a&$i&ucAtQNPW0_De}Wjs`BBr^Z9xF$Ps{G4mc4F;7dJAx1~$W>Me(xH++ z+rf2wmv3pylg{5P~N(7+Pbk^h>7*6lN zIKKPFZLAeWya1^t%;ylOfP7h>C9%JpTyn|*x=*34g+e|2((jb1+es)lpr!+YnI(g~ z_|>poRtd9hI)4Y8KFPqwF6+I)$K&Wh5%BYFSb5dfzVPS+F`h5=8Y^hEv!r`R=J-XCV1yX=z&slueA10F=iug*~6Z-QEk7Q(2qyPKZ= zpiCiUf8P4cKi~R40tTQb+=j&ju`(;kPd51m`Dg|g_+lhn9yk(|?HG%y^s46b*2_2s zlN^IsN=3Sn5b)f6>j2f0ZjpZ#k4wJ47TP8Ol7mUd1T;MBpWU7B=dcWaJsA`+hkH=6 zZqWygDi8iQ7Vr|)mLGT{l!x4&5 zpi|8SNJs?U?Iai&3b3*8Hktzy;`geCIq$n=-)g;fz=!;KHGVzn?-3nuv;NH~?lV}ega_(sgw z$d`Z8tLZNvjLv-VzxjDIASCr5oKgXRzS;npf>??4-z4|v(}j@33MEs^;bP`QAp7_M zzb>^D*7y6tfrk~sxCbKSh#s=M57(OFKkSG2zd>$Ez(53C^zL;L03n%kR%?bu{_Va$ zeYcyeF!z;Z&T)}Yh9|cyVAO)(qP73Lcu($Je8A$$nmtcP{d4V;2dHmC_)ntKKN^+u zrEHv}2r()ELK}V3jS-%Y{;;9HMIc=SVmM1Q(wm>01TvKn7|tSFMO^xz$iQ$GsQZog zYsHWJ12tYf;6q!qe-n?90UHm^Us8>_h+Ur`SrDV0aev>%yWK?SI&_AKjpghCOA9cTzsd@zb9CJH8NKhdDqwwzzBb)P+(s-BvPotx^V zKKJE{of0+=yCjS4p0!)|gnXC{D1=W23$*+oi}#8obQ6MP4kB98)cieb^D$}{EJQ-< z8XtWT>cb>zLWh!C z4{L#b9Gj(0K7{TRhaQ^4QQT^z4}tEOsk@qC>dR2y6#~qpH$!?(jHcVN5Jj;jty%tvw*FG6K zjL&|Y;(lGDNs~*A43FVBW$2HktJ>n!w7YZfHO1YpkHHm*; z|L{b_k2y!yv`dSmiIIIDb(#^Yh>1Zwyc8B5SBt8I&rjvuf3Ky9hkN}^EZjT(WKw=@ zeaT(Z__#+<$vH3V-l%Ab)6lI3^$N^B;og->!77o8p$u_V*_=v-c>6Z&_w}y7KA49+ zO5QRrQT+G@)rZe@g&CEa^lxL66Y!f@R3zR#q2T$wmPLI0lL#@WxKzn-Lb`G?>nd?^ zSD@!vKpi@DaVFLabD*u|{$qU5>mm55*J7AL+|;MLl5R22g@0eq{p$mZu7$?}t=ol5#ie z=?kdAMSmRee^$Lxf^z-{*#Uji%MhIuo){m?9+br`7C`*_`d>G4OCWC2t4T1#l=p)q zL|J4G^7Quw=4AeocYHD!Q^aVr{w9$JNn?iTVkePt%iJ7E_YvFk#W9vaOIf66AfoXS z&EMkxI%_uNF?}W2 z(%$y4yTl=Akj&n_zPg`C9u)5tq;^tR4!6wd7iqpsP#a=p4(x}+*tpQ>7LxH(!uR`C zhHV^=+Hj%l39!AoAz-__l5XE1qY?dimY)yxU-kX3`uQB;_F(7@+y_U9Qw;_sbkzqQ8q2PYcgXOKi_?fJ$^pO^4j8IF^P*4meW zNSjGm^(P2@@%ka~9JCVpp=g_SO#cXVri0 z#icj^hA!uv^EqH$A9cRHe*ADx*i}3L=^hvMf7JQ~gkDoaF2`m0;4BFw#)Wi&qud8! z0#r5oVWz~Te?rX_Zog?(u>-;;@M>y{*8lh=;t;%hT#dKarWD4F4B%UQ)_}ha2cIPd z@Sbr6yaE4j=|NuTY^Pg5$sEmr_1!gja}m%qNirS~GDmFn6ZjyeXMb-!r3N01nwOl%e`PU zGHM0>LPsf8$Uc7w2KN7oU~oI)Vf8u+#z<$85|=b)EYd@0MSrB{wGJQsiItf{QY#Qg zYGI1Fsv@33*j@t0)RDsuSMxWdOsXiQ=I}K z2vJ$;Z_Oe=b1sg|`5?2aqc6DKYIH=}1eUFvN!FA6y~#ffLde3sXe{h?PKQ4NAZjVz z0|*Y`2BQFg{BAmc;a70rw#2dT0>{?WC}k+Dk>$3M4=+ZpZNW>n%CNJ8K@ne{pU z&C~)zo-C?v4Cu#y$CG~WZdL1VGzF`q{#OK}?cA)m&*s0z@NYu@Ut@gBB7dbt&e09_?t%G0ySKZTVCT>nrp~2sbC{BJ)6} zyjJ2vF3FD#z@UB(NQ;-hKo|Eu>R$kARV+sa4RvZk8o=?zztP5DdAQ#=-cQEG{jkV9 zF!SkqZ$@$1@A?zfA?VjygN+xtHRYYIyswh%wTtaPuZ3@W-8Ezu)ZGwTb@OjvimY(a zI-Y8m?>2bLU5^LK?QKVjpH?p1rmRzc%(b4>fF3JSV<=b8ak`!ITV;ZbVX~`BOxcc?=AUZ06n3h0I`{S|tKK8I#@aC#z%MnTiiA$x26ca7;1V@-?~c+fx~;oN^o++tWES zd)r>oYBJZm_0jg9rt%Z=@F>uc+)`^A+hmlL2;4Ry{YL0jEm`|`AOOs~!pt@8TmHA) zWjqjUFe2{(N>&u3BM6^wE#&4Fk!jZVnBwq(fg_fs&S% z!H-?DB&-^sNq3HeJiF%ZyL@HwazpurQpE^1jw=K?NzPmMH|7d^p(mr^K?>?lx8LKq zP9%r~#IvZ;9Wg(YcE+h6R&O8a$t6D~665vZ$daT)MqlRs&;ZdJdv`r|&XtR;&vgEb zqr|abS}S<2eqnZs7r_2hl;%dk?t>Rfp1@xp*Gn=|s~WX;Zh(QiNN3H0$>g>uG>YbA za_fsv`n})qSQHgmHP`8#u^kv7tz!4F?`;zG<~~7sok+*F?w>tad*JkW-6w04WfW{* z5JR3WGkKQg;cDN+-fNlALStm|nR>_wU zQZJ;h?hwaOEUPIg#EQNY5`&hCkA>2RL!JO3p+ySHN2osmjgJt3@O+wL5`Kq?DP1sa z43a2p(q`MEo_wH6bVq?tyT1dQzw)=x(xc-V_E>JqqshC`x9E;F_8xS-Rh5n9g}(Vp zm*iHW-kAE=dlmGJE=rX=stWbRQ{LRi;0*eH^8;g1k@{<}V6Ng0HC*Zsev1x=Zc>TF zJPvkh`o?&sg0z=R*<0-xV*|-9w;ALrsx#=&W~~uNHjqi8llD@FirhW82F}f7>{~pa zyi7a3kmZ;W!Gn(2UgIsVz+k(8`BhajzUh%inofy+d}j0dqHj)MkniwvecNU%;Zk(W zxb`6T>U=w*mQ0b4A#IVKW5^~TCdK(;iA=6upTx@el75(exhwKJSP}gkt8+`+a&tr7 zS+2EHO#b3S3mz?!!C1aquKb$5i){8n5eqQ<_}~d1+eqM9Zl1uI!E>`5Cl%1%b=9j@ zkY6vgbbU>Q0*T1(Cp^aMtqACE8xb$yM~Byt-uS5Pxt=u24N4W}+WI;%4|A@&A8H~D zf~;Nlq~2dK*DJYXdsPM+i#W~AzQhYh&(dS9t*?@Vn7Oa(809e7Q9s0p?OuR_wP$45 zCH?veh#CKivI)5VL&WhR;VS&LB-ve3!Q{g>*c(>{=-f*m+6sLpy>YwI=jmD4&@gT5 z&bqNXf)=$bA#{2W{G}8#_k{%}MI+XrdYP-nn$}?UUd6%yB?6weH@U(kPv?04t;kqI z@pobAMi4@-l|L0kQ;K8G^6LvqeC=R{#zi!UAUR2Py|zRQYEDTU%SABmc#5N1>T_%0 z&3gNc2BhwT7)S-u9^SzU$PC|WTzK}Av{A+a+y_s& z)t!x0iCT7?4X~}|7^1WE+CnLmZ%ecHX@X=#c81udEp(%kJWwHH$&x-u)Sv8o!-8t& zhecFf_Ap>O-JNd^T<&Cm0ZFpJto5TX9J7mW`WuO&x#yf0lWg+!Jq+7XAmus@QOLH? zO3-tBuCX^{I9Z(I^Gy69c7UDFopG=^X3xk{o-b?!XI>#b&%vjTbS-hdTw^+$!ZKJ3 zc{x;B+M+_J?>bp(5$pzcqU7lWn(wt`EPo;Pie4zu{`d$sQ{6$UT;D-7*;A_k4{44~ zaIbs=w;($o=Py}kQkUcQo z8yizldFidl->mmoz#;z_nQxy5Zv_TPBb^?o?Co3q1h(qS$QySo+ z1w7%JF9Lj9`{)jlt(LXELF3uAq~p+~)Ycr<9qV$<9peinvO6^tKQ=i;PT4Q+++RQM zEo{hPwcOVONd?Q|Ziw?PaDkvD7|Noj<7eK<*#=c|*tR^mN%)e6YR-At=btOMK(IbJ zdh|YwT1!25v?0m+2J(sGjZ{L?R*6q&rtqgc0D8>lUMMpEiK^520h?M@^cL;+SKKH+ zlQ-!-|0(k@wJUk|ndgyOkxOCp&8$T*$0&jKtLGQTXf(x*mmr5TfX|%Uip0j(zIXY< zd8)A-GMTxP;fe$t#1{2I=Tv%wE$a1>epGtrVaYApo-k}u41UL~hIdP^?iD@Jpk8vJ zTs)JBybdCZ&|br}F_?JXMES5<8%!(-xkv(2{p1IC|I_b*DKNcCUvsj0-$l{i%Adn} z67}Hfw;>i-nm_BcayjX@$xUDP$XE@JG=S&4wr*5ZRXoysyEN;3ejB{qi8qnsfyCKl z>t;?m4DAnKtO^O~iT?OI$>4Upv8yxeF}LYF-5jU$sO$)3+kJOIDq4(Y{tu)M`56=! zhI4gpx$0TmA5Lcu8uI{p>#*$txtxA-N-BK&w>y`MBfTrfb7Atn*Ac>au4L1NRtqy4 z-g{?f*<$I%S0j=;W0ebcDLuGqq(Ce2v$ivnl@9g3a`S_FkfmJSP_fvt)AbyZ{@fKp zNAI??Qo=G)H)OKQ&1DwQ5R&1m9)OWAOLcD8aGa$-k2a8BrW#K;Uj{ZP+-h72bceBJ z^aWinAzi(6Cn@^jjWp@PMMNzUiNZ@>hSou~|81Ct4#iO-hE<_oxr(x6k$&uB%bES& zbm181^=C=DtCywqn3DXkoBq$AXcW?K1xTsHH(c)vmKQdFacsH2j66M*hk1;{e#22@ zC?7#CD1yD_9A)eh3YVczA-pmDHP*lRdKn{fCw^B0Dp}(|QRhnVU!^2{ zr>xHrU6bEa_B^AAanC*VR1ijY;=8iR)$+Wi)aw2tnC75HnR2!Zj&*ih!FnC?(bxWcl z=Lp8qm!j9Gug3wr;%{|wvibn)k0Su5XE$w{&-hEX(4Rn<^~&TJ$zJ3z1~s;?=rp5tAe@)x3?bAqf89nH)6~9KT|i8 zLES!1)z7F3^2E)rqYm<(4)=o z9;?2z+Zgp+qYZ`L`{eCEz$GYGy)VM>d2^C?k7TRp9Z6fxN3~O6ri{E^f_>Oa)*^2{ ztZC}%a~>J%H4zNCN(rb90^cONeG;pg#UJY%iLE9wWUp)j`0SJ^by%%BAkM--%i7+6 zAI@bhGg1<|d9>70C0sgIN>gV@#&r7~XpE`G85Kco_!w|!xCmIN80lDEdU4jP$qyliOB*>!|72#j+xIdsQz z6czVMP6o_B@<1MWt@~QD?VQStZXS({^>ijN9%(>Hfj*wV8|*gBrJAYAl}?wdO<6Zs ze}9>0E{OMyR6IGl$dhXOQT&9OQ*5-P^htbLZ`^Z#`G^60F1Y$MShvP!ETr&qp+eFX<27>CDyd zK0YhQ#1VDX3J#Z=XUsT4rkJp@FVZRJbL@Zc4klY{hdq}1%GlY?=mWzJyu zU3Y`TxQRSg26BfMEagKMtzpx%QcT&wF*`OCryR+m#oeudD3$NW&P64eg>anZ zGIhGT7#ZZ4sahY+F{{^?ZLH~>@xQUz_|cgxwL&(?2tn@K>Vz`UG=J+I56ZKYgMvD6 z(P+so9vQWuX$TwQTATL1VaQX{W+e^qI292_P`E*$r68HC!?@hFoJ_}smjUa zXd#+dLpHt(tDGyfaE?Vo;MNy$7tbtQMU_pW1YaewAP2~nZucQ?AcbOMMQ@ zUsoC_QWQj+t5^%nHSQ;e6(*RE)Pubn2Ek)47!+)D9p0>*6R*xpbCsNlRhAF>uZ44) z0X06A1f}>5k>5GnqVE7P$XNM{a1!({n`P+m3#jw`2cRAj2-bG+wTy&q?nz}?5l3fl zXD}Gc2tghmfJWj&*pa9A-i_`?CfEniqqVe7BlB18C#f{Bpk8MHaJx5?Ds6s^zD1_( zkE0t#Gv+>gQtV^!MR3x2<-3jSPCPvO3t(~<15e7?AwUoZdvpq6Bq+lu3n1-8BkN6I zs38)tqyJFi<-OI~^#O19jYvu`KYPp@A5>q*j-1#mnyMb+A%+~mV+%*4*WDsRali=7 zCdBt{+87O&H1$&_ahXxJH+VcWBYruQCiNgO&tmW$wD<_BiXfbXLg`EBF47F zT(|Ho(C~3oBJLiGQOgG?q^}hUbZmjU!Ps`SN2{J!Y5g88ZErmSm`5eTshY_=bGOD} zd-Jc_N39cYZzTsFuk71VA(lc(e*}-wgp?U|H_W-*w!{^gX;Ms8n0dr`L4HJLe$W1u zQ$gOr(+lf8EHd$TmA4B%ooladHxOwu&t6tWQw~r;x@9F;Cis?K)A0%LIO(?G)F*1T z{)&0ndRusAMsgBgCpscKfUuc|(RL?6+IXTf)`l~EH)VrJVz=)4-5ylzm(#q?Y}8W? zMqD_~!$CYwV;~_t8hXPC;R1y!3EnCL=r>O^^N~1jh_~ua9krkf5R1SoCd?GvvsW9p|4PwKV4~TnA#eU5vrB> z+|7#4fq5VAj+wNgSoR&eTpC^#L`ST;E4EcY3-$3klaaAnyqcbtw}331_xVj=I=+LD zqT$;F(;Cdr2|xy{93sMDTl?5^iDosz49ZS$)T)7Ri!9#uF*MP=X15(QlrzstBwQ#> zJ(YCq8p_yc;>GitJUbo?bl>ObiVo_}zI{8EfP9{FjVVcUsuAR*RzfpDb<+{He!Y)2 zZ%{dDYP4rproQ{JUQLwfxpx&Rp{Pqyy~nagQt1G5dvf~FLCP^4Nd@Af7KLMz2W^pw zp4C?ShGKDZZC;YC{8_b=r?96%L$8N^t%Osb_apEQGGE69_sXZeNDRUQ6}Tze%-)O> zEmffuX5>X?YQ*n-77wYX$#H-YSNXmC^`Q5!q(%kKPmmDu?LUA7pIR@ssMu&d`hrZP6-Z_$Fe#%;I4Nt_8XYnDs=18gQ1&W`($ z*l((iRQ%gE5}ayYGNcE0lim3+_t6mZ)%lng-OXzC@=|RCl%Wpipmm@?zJx)hnw?%E zwTb{~`ebN)THv7Wg{%C9A%F6EC~9h*JF9ARb3Kpbf*EeAd=l<(({9@<;s92#eFoaH=4$ zi7vQlq9bayXxRgUZ+PtQqNwXl+`HQMdtW0#Ap0Oma;@>5`2!;!vjYuM+vYO7_Stre z+Tn@WEn7UuTOu)Nb;gZ$?+cXp*T7pZ((guEI7-oSUTHwzAvhm zk7il@Bqu^+6@;uy*<8CeDO}oDo>2jkhYPM-JS+kAQEJNFfZ!)@DvsXrb8tvVvL1be z8XT=ec)16Av7{)!&y}q(`qpm;VgKN`_{sVU1%tuVkLF+}s%<<5oR zuN`QJ^(Tk&s?zFt3nLa3ZAi@@H{L~_S(Key$3b4vhZWr9#?53=A_v0;A&L#yk>++wy?lqT z@gI}U-cdc8`I3cmGy-2iMts+6p&aA-3~c{xUT&xC(DO;&zWU-+d)U;39L*u$x{G2k z-^U<~R2rYOOvzX4Ypp?hi5N-#_)R76pEqaOW_mDVxIc9%;#aF!9N{Ee-I~hyo@+U# z13R^3h5#YAh4$o(?v&8pwEnOXs>}M$-L+7-S2@tghmWA5w3TVW*+gyey^l6DrPK^~ zykS0U-1(a;%+L)l?_?1_%N8*4F9(gU5J=lIWG!MvE~U6QF2y7~0h!?*a#p2b9`V6i z--#<6!h#NC;E`OdT`Q%143YV|D2^Dle3En}aF^`dBi~K-uw2au2hFJYkl;<@-kB8g z7ozSqJkv16sz0UjWVd$pR`SBkufsPY$Cd;t6suN^BSOl-$`w(8ddvVI8YL7@auOx+cJx_2n`Spe9p$`nw1Q6Nu z59ovR`jt~T)CZV=?rhOU*bquSW7bnTdV?KA!JHY!*zD6cUk_`{GyK#N%IqfJ>IZX8 zx^b|5N+{P~Xr=7M0ctJAE)mh&&bnE_!|I=A)?Zl8y`R=5*jDLl@aFA$MzaJC8Q_3t zls=Q3Juy2FtU6ndN9U`vzWCt4Y+@F)EwfMR z1QUaD$~Y^O#ggv&;yKK@{dHUvrO|BXiesjKt#KKkdB{P)0R?6Wu?LbEH$*ibpoA*= z2;*MAYadOd$P?`RyFNz$sr12ojaCPx8LQ!0P+2BDq3+<7*X5~o7)FFUtU|MohPs0X z5}js_vP`Daa%&08hheJxlB+LYJH9(}r%irQW;At;Y^!2w$6(Rb$eu|h=Gv7lb-QgX zUrM9)^>D@4d;gfdm*ZFNk+gr*>^=KI(z+|S6J&VWmnO=7M7+F$?AX(5Y767_yc^Fj zG@9zdaW8OeI#1QDQ;Z9#5#i>mcWrOscY{=wWmvvR2q@bU4~6%T%K6Kvvax)^V8x;f zwUs@TAK(mb{+lH%RKo96J;m)$e0tvz=Z$0~f0sIo$Fv2zV-@XJRxl>fdGv%^aKCJX z?oV;FPMC8U!En4f@x5&XUn$Gwj(3$cy1*L8n|-MLOVO^Vd(2^dG7w9|$D>Jbm2FSF zym=rUx(%=G?H1-n0`(-ryRHNaY(5IS=tUz+k1v$-PJ$z}fOKhOfX2C-+|UcgW6PmD z+EE9MON1LPRG6s4@1AyF5CoPHfx^$ms(_}l*eUF=dAJjzK)vka%SpACNzgFtTN=K4 z@TZBrtw_-T8;$GtkD`%38v10_ry@H(4J644cA=R6ZZ^w>SJatvhwG^Y8RGi87r^PE zsl)rd^+`E#ll+XkLL<k6hDI(*b zOlLh!)Ll?vRzX6?tMj}WRb%+6Ldb#Sbew~=vUo!{YhLnP8UH#!QJJQ~s>-C&Dn}z<} z86!l2!?*^GmBH!v;L;>A4aW}*_Mg?uz0cg@BNF?K5(X`&^2v;VvMe!KlUYIng4W-t zc?$Q`ygaH_`v+>?U$sEqh`*I=k6QYEa{vk7QHPU~AnUn=MW4Pl40VQenf&HWOr=0* zr~w_s<0>cW0Ld!o@T`<$OV}QTd&wr#eem|2BEx=OesKv^tz%Jq2!oILQLx9M(l*ke z>G;*U+B&UaQxpZaqx+h`!no)b@3WJ3pe4I#&2_X85-O>Wk&giZn7622K{IE5-3Wf5 z{Xh}nOV*%T`dwq*5M88Cuk0agut~Iv99rh6U#(uYbOz|{HHfR5PtAIYgSAp_Vmo8T z!<~xR?e(VnNe^2%6L?}o0jP2GOE(BaeVt z5t%65CyNvc{&G*mfP1P~wCJBM1jc5A*QSbWit-C7(q+gd-MF6;KBLrV>>W^M-=5Ox zVp*&+bp{{0fs)qYYWGY0S@nZ_xM~teWnnhfcgWkxLU<%nNd8l(1V^D5_^7bugT5xjxu=v3M}n!*6h*x()1Aa^(ev%snL zV)6?K1F3Rf-B(M7%o3Z9w~%o!q1vopfE(PIJprCbukCGngVSwgphNvO9981*U@6o77ORL?lQ4)7jKDJHg3Zq!I?sB}u1ciwn% z<_1c+2~`7OcOu6z%|58UyR{L~1b+{8xY1K}uogGW9c93-? z0+Qy!<>n}Bo9T0e{d#q@*bmSwU0+?d+(LHeGT_{l7)Jn2ZhH^VFK~m(c>f?weqq45 zrKb0nJZN|7uNsu$ALnM+Na+_sMUeCJ+?^RdNyJ6Hf>YFC@cu1(2_Cc29&Fem^M zdbY=}5hY)_zx$Y44A4xQr~m3Zf&rxmTw$(t5kJ`$ z*1xJPKY@Z8pUu1gtVKlk>4*E3Do9w8yY4FVTVGYcn3o4)MI;!^+FOu@=Xn_o#iLY%jk+O21kJi>G ztW!=c=mL(Q2=eqKSE&o8*q3UP+i{3xa+$4|M43{HRbR->d;BjwjqvzxyU_w}?cK=b zxI0UY40t;{MX>pb0K)Bg8zQnc)2OJO7W))|qI$*1r$*^90o}f-R5O^+a9{45%OmA0 zUe?Q99qZz{w^1yJXe|lJqLct3;aoNkai4DS2q9I=lNJ;@(2msUF^zHrGll6$igq(I zQR*HCdB^6oObbe33|Ij<-a0fgfpunpAzrx zZ|Taj-UrnOp^(AJAVGPJ8DN(8Nk@?V{?jEkcNb$1Ef6#ZP%Fs-kT0zO3YkXh*W@t< zIg@u>Y~?23a^&go_p@R;XBqU|$;(YrV{j)VRRmRe))Ou^ zVez;Ks`!SPoi{vV^?!t27wycs^as1T&@MDqC~1X5XjnTwCC?5_C<0Je`4#~RxadNf44@2CazwLBGFSm-B=f@ua z%NCkB+*kS0>V8!BSmIotRW1Z}CIAD(?L-L)aoQ{^&l}|S>d%u%l~n+C1{ zWb!>XrF>lUjR!mgqm9 zke^$g3K%tS;1H2Mr*Tu$7)Ru*L>>N)cAU1CYzNjU-`rn=g&4mTjh227efnXIQZkii z>?|RtCpODjoqlsEK!_7>IwK6Ad8p zZ1`9%I9}IdjRZDZ3mSVgER) zE>J*pa5iRa5uafBB zk*kdg6tlm^vo-$?)CyVNWvgS|vk#|#=yHkJS+A?R+=|bQ7Su9%dxc=RtGQI&ed|Q$ z^f8C(vk78frqwwYzfMD8L#m@`4JWH}r;eka4}zQZ_I4_xDcFc<^-5%ddWrDj#Rk1< z7hx=LlnGxKIG6JFNe0Uw!q{sF-~!fa9=JJQvkor8s4r+Z~_e4CI zR;*vON|xiI3#|iO2K#;tAcSgx*6@H1XbxNPeXZEXmBX5YObU$#VrK_)!m7?&P@3b0 z{6*-MizmQvAUd_LhQ)EJhgG4sXVR<60OLf36ewAYHb^)yw$L?voPI$lrXWOmT*99V z^eL9RkE|o-*mP-jVh!!2Xo^ibG4n2uz z$OMSZS#Kkjlf|}InGk{<51(D{!qddRri3+c(2C^TI+ot9jNGl{7+3ld9E1Qq+w?iz z>X_1*QloA^Az7AzO>PsIw76Lr6J#TK+aK-s%~JhJc7DA z@{BjHZ?oF`fpHRP0RA)+dTV*CVW@U^5Vj8nVb&HH{iPeK6)K%Alad`spq8>Us*6|Y zoyR0t;H(dF?gIpJFS-lvfC^S#hd4R%++aLshfe?;#xAqz!joWfmrmIYco?ySPe`jk9;D)r8y^wjuu*z|SDmnl5Qcc&xvR;q#S8Iz?17ZK*}9ROj@ zF=w06STC%l_4W1=99^a8lp!QnlcY@ytW zV{&W_va$PgLk(nd>@P=DyBl^gw&!Gw;3WSSYCFZk9mTu-a)fQARCnskVDf`Q?Nr2x zYtpGwwSJ-7dPtzE@>7zYL$E{?=aiYU;WL_Xn`fH^)|65$u_~OqB8*(N>vp#U5{2$a zW~6g0A2p13kfeIT>lWdV(oO1b8(2K`AlR|fv6GxZjk(In?!l;GV>0qoLe>S{Jq^_s zTxY|1CUKt~oWm%c68=gnl&X~hxdh}SAgy34FZQ=y0oC`pJ(Vx8;~o!b1<9Z1j)2)S zzYnA_=T8!LrT@lVQ00&-EvP=FTs-LIrbp&7mUn7OvmXhhW}jC`2thX!dNMBD9m=@d zBm&)}UeZJg(YtiS@b;=Nw==OL6I?{Qm#c$_oM*dc0#gpMtgr(F3GKcFttp5`OfCnC ztIhMqly8IGNk&7}TYeUs#usipu{eEdkau->!|jkT-b&2^*{QGiuUdK?%35a*98;|4 zb@Z%`VLNlfCD|n+$;*Rt((aN0M@IMvrBJ|m=9n$E3m$?Gh6CBSYF`8jMeoDKWwiR3 z+yoP?uxS#}{w7WYJAuTupNDhv20#j9KWg{r)peNHX>S@wcGQ(7%c-ot6@-6eg8feE zyW{Q`3)}WN`Z`VmC%W9O<_eSG@+zogb?39d+^Li%kLQrp&pT0D;F435(x_hLsja=V z61`r}?9z5p+y<&{p3O4Pc~3g5*i9@dY@N0*2)5f+dSl<^fQ6ZoeH1?7sddsy$av9i zU=c?JwBE-(IGH#E$nFZVp43-y(){YmH#~f|$T{!xjlE@ch|wIUc}MVrJFOKbE9<2- zyR|73cj01TIG0|$zzqwZ8~LD?TYP%sUzn@x@QhXt+uXrsMR>YEPYRYZLu}&t?Mg7$ zm`>z&3~_~xYB$l0w!7r@lQ8XNUfceTI>^VGQa%DGseeiRYYI7K0`VGxDV1H1U`{#Y zd7`SQ{ejh_$Xu5mTmHLxlOxNK;MN57A*VO0@!dUEXPw$#%w{p|k#=VkRqc=LNLlvB z3iL#ap=fHDW390fUE1@P%d-DoU1F;f7-H&7L=jIKx(IoA5#D-%^n$1LNM#xdw<6VUJ@^MKQ2b}tJ% z+W@ZAI3<%4kY7O0%@qCo#oy&dw2U`qNZbJvb&wSt-2zm-XAEA$bqx0dbKDrMK7Z#x zU4|<1PRS?mLd-@Zvzs436{2 z{3a|9E{Yc67+H^e_nHi?nT+p=ZR~-)oiSFp7~fZWb(u4P!SLmKdLVm8^Ha6V_mGZ5 zZr9l1zscoOjcG1SRzILNVs2&cAYnPL5nipq5*R&3GBlpQanWx}lo>s~R){0?>uJ@i zv2s_SJS~-xs~T3}o3lY5d$SGVXA+Mu9}Gj>ukabaMlE znxgKdCk7`8n;>PY{p2@jGFwY8GZkfkExm|y*yD$V9ZDNC<21F#fl7G%wV&-J5907l zl1cFmBVP;*@{Sk|*E^oi#)Pl#(&Ldd9es{b@F5N+PB6=Ft!H8vj*VW6G?ok=MH%-qGskQM85f=cHPoI0_m$fPyXgnsCkmGa!9C8A!Nw z&3cE9GgP*iDZtdwWEjxvyLhHt-xC`)1+(+0#IS;Mp1-%DQq)+F^nT^|>6?tb#hJsj zzax^xELR5{iS?@(1`}Tph-0B`ahmC9fMOq0hU!Iod`coOi8?sexhu$QYd3t2L zx3E3~=p>vfabE&-gK7I5sBB3mTB&6x0HYvspwQ!!r#3X-sy(%9P}m@LwmCa4i?{p? zCL8%M43qF1Q7>+;ya7^_1s3&5l%!ytI<0Qw?!i!|udg60Bn&Kpyw?Odq%x!y{A2ut z9iVD7ZMPA&JvJeq$3`}A+LqL%2hVA=MB>SI$oEkKzINPTkoPeE^)kh(zswkl)Q^E?OoCG(>iL11dqf5&Slt0iIA9*x%vdQ9 z4LEMpyE{u=#`(c&4&gU%+%pJ)p%UVeL*8-k@^gSv3)We)cXIX*v`h*Zc47b}_Rm-| z9Bp31m$^1!jy97T5>cwN_n%r7hwH~1C*oJRb3f*-#NJVu&|IVYAf9_ZfA`U9i=oTZ zBjmCBQ`j6x<1p&a1`gvV@LaoQ&xxOU-NFxWC<8MOlxe)3s;&SlIeI!}^CCFb$HQh2 zI5d!rV>64wC0HONsx`9<9(apnHR)@Ex}!R%~B&QVb9^?vOem_tme>cNxci7W?b zD}XE|W@oM5;bvc+n~hyI1eMwyd|IwQG-L(`@dDBo#v_ZCZBp~SExxDg*=AZW?X51r zo89J_vo74+Sr>9jqH_-A0{Ae=6+5GX!D+$b7>mC5k=%C3dqi2To5M7FS$~+N$iW82 z>5UD%Jab$i;?kfY=Tcx3sn@Rka-wTx75O#ku*9YkwB2&jlSeb|m?6(z)?+&!UItiU zjS)=9RCTL^S%E~0@h_Q!ngQ(REfo4obG`je&zuC@=NhV_PRE@P&w~I8zJ!Zok%&gM zDJ)Pg+}Lth^USdbgbf%ZwXZ&zvl(%6S{Y0#dNcOX9yn(rfFn6 zL!)u=Irsk|JypSBFnxYx*^#~X7Q@Qzyv<^OQ}qRyrXh6^K^bBiCRLmbn1#g0aklHf zj@;&juq`hKv|P+<86JC1V_gK5%#3?>&)5}iA1O#P;7Y81-Z9p$fKHhc=y3Ll1B>k> zKxWkD3LM}7$N5_5oHH)hh1YeVx|7>WX9WeH*^5l253iX0db(UauQqzF(0a@Q(y@t% zV6!3Oo@I0NEmAUj=Adc>=Ef|MW2wqqKP%CCRBIiZ!G87~6ip$_VN+dRlEq|;Nx-)e zNfsU)r%@E$1~{a8n}CEx)rriY5w+BE?}4Ib-2nC{r&X@5D0|yT(0I1oE#49pJ})n+ z$ML2Y8|L%wOii5MX)V*zpJOR+MR?bo))>Vc2|Q!l)U&m2k_N?}CCuU7=ifFoCh#_{ zj2yC4t(uV?3VWe8kT!-=N-H5q~*8d@`XmwxmRP4MbnB*(t=5 z>$}jsY6iyf>Z?6#3ToP{DEIYhsW zCu^=)@;`-{HZCpxlaJqb*r!q|#M+{;x^yJ!c3T|flWgHvA)CZ9e=S23K%mvc5+;Hm zx->Fbx`kF_BIqggA{5eU%rEB`54%e|=X|%!bMqCN)V8xN1`MQm#xN$TqsRD_Y*|o` zGN~^35-7h1IVrf3$@*XdrJ>YUM=Go2Xq`=$?k5BC(dzbYtjdj>Q6UA3GIg8Z>Twl0 zD?`r7i-V2GQDXO41_tyFR-o9`+KBQMuOsz`b4$q_o8G!`9m7|B?4~ZhnIXfhp#Cf z8wKyU+d`CHm!z8+6~8()s%ImCaV1Ht=PP*io^8r#wXpzSmG#nj95+h2{NNkDtO(|A zhTGl=1zfXU2`kCUhQ44lf~|h*q@`C{FlC+uGx#~t&8a$B2qzF|vO9_>9&!il6yw}X zkloemr(q(M?+?)zC8h_b6J2=PUVWXW+9i*5f*~MC6%b;GI^=m0bt*9)O>1p>z{eFN zdKf$KyuG>!o4hWS6rOJ22emUmFKGbZeD(mcV9y7~eNNyx%_z5TDHFkAx!*qor+ji? ziL?ZB`dOR}wto$bOW#*K^3gu3sFH7@FseEKL3@jKUKylnLZ9?Dr% zhWT&W=On~F`uCs!%g#2u(j&Q)BPrQwNe4<&-_Q zOrWT?d3I|W20q-{OzW}l^wTE;R3SB&sh8j}>Ljo0iY+|+{zC;WguCIsvz)q;-`OvU zo7=MB&w$eXPeA#P4kz`?tRaV~W^-48UzzyKr{Iu1d4`JOoy}}UL%sF#V!j(QVc){e zSCbl;^_Q$6Vc1I{4B8qQ*Zipa)_yW|D- zjobk7OL=u}I^#=$7iKJac4qoEj=t%a)y>7Xy(P@;FSwJ&$L7MWvUpl*N<_8ALEo3; zF^_lZ@r@{U9By(|r(XG8ni0|_62(Bq$jxvD7q!UDhx4?W)b#jklYRsFVw^06X; z5s>$KMKZ%koFZ0ZV=rNBgh$&-Gs}nnI=JdC};kXvC9W(3i<1;d0hq&Ep z!I~zEpPlnK2l0kY80fwLd>z*x2;Q@Hz$kNnaVyjT$WNsOu^->{7z6D0S``|JB}1Q% zY;);;X~1BZ8>I6*Rulv;N`CpQD?*P4UEKLYyp%t7YHQXNG#m8Ld@`ZO{NAKYL3*=Y z7k7%FYG}5WH7FRx`T$?eKF8AEIF7VTH0?#6vFV*5#<=M( z39$c;*~ zm*Q#+wYk4(kDH>6nHGMCMcp*W`1w&`YQwC(#7KR6bdAr1Z_Z_Obyf?P$2aOi*d5+_ zRO{!*hzNVDc>~F-73Y^JdtyMNJo%CB`ox&vPM6}y8aY+qd*W7xT|?gkzTL34s^{Au zBz<=nF!U4GtEkdr{0&UjQfbxp>2TpswR`<6279`1cK>l{~Sj^;| z)J>chfyyg)L2aqW5+8xkskSQ>IfP-M1r(Z^hpNoOu#&6gEBp}xuH?E8JZxz~9a0j` z%MI~^lFGI~Q}7D4RoapmVP^}jh^F5Ov|*Q;E0?W6NB<1DISAT^cXVDFu3@Q{7!U%% z)To6-s#8a>hphLgVHONIjkR=1Fq zxAi|Eo>}tzD#{+d{4^%v>wl!DPuNbOzsA$r7HQXj(2KhGhGyC2-jGx10~<17tjpQ; z+Ic^1++Ab69woC43H^gauHCqvej}mjws~#7LPlr6TU;UgC$jPtZ`K&03_t;4!9y?`i#BY=TC_{S-`e8&nuS0Sz2IZUH=6Lw&)3P`0-oeB?1l%MVC)xB`z0pzrdH1)A zV*Qpm35gjaKlRvywg~A*2TXBhUnigp#QC<_-?|7rxavc0ky3rGY_Te5OnC7s(9GZ1 zcSnVl3BP4hpxc+P6*-WK&W|_p(U=`-(f>WX@uEh(BcsITc(`bta-B?J!wkmw4Ud4N z(g(BSu2YRT4h;lNOYdxbIyVXNEst2WFMsDadHS{cP?DSEN*Q$xC}V?M^R^S`gpzE3 z+#`mR-S>*)L18z>N$;n0!^jfVe#vP{rBD|KU0i60bFS0;{qL{fw=9=3gEVv;b}ynTYo}75e>j0@F^x)f?T=vc5GNMDJ%l zjVY4_5zls#uzuuNBBB;9h!&3>ok$3j1WcLS1beCA>gLf2{;DB=T+0%@?Hbsrr9lj% zs_%!*Fz3#*8AW5YG6AQM`x|i5-!T@sFQ6BjTVNPSi&(6zsy?S~EzGSM80(qAn;shc zP~z`4_YQ4|R+BoH!CZCGcQP+WT~JZS;+MD^TQg374QqKwy;ToKqTaZe7f8KJ)>px$+)T0|5}AWk2gvLAsCDPu6-p za)+#ZIv>a|BUY-@Hg|#(%t$XPQrwn9IJ=MZ)ydjY3iy4V1+r~H}<+uvw5*D z0ZpIp@?6}Lg}KA?B;J7(wlNyf*y6gSx9t5R`|25aoxTRHwL1%9r{_qU0nGk>8}Sf5 zddFIUeYQ*CNnJ@i!iwEhZc26}4a~;P@KH_(y1}Hy%KHPVy@;a$o*A(>N_$18HJt`x zJ9-Qa%8k{E6%8Nx-TC^z^=X+VzU_`EqJTY3)5o6vgO+Stf#to9pywxE+ynC~?fFW7 zD#@>ML;*0vQMq9%)kNV<(CrR+3|u*8QT5IFqoldD3X3esOCPi3z)mtItoyH&{<_nn z0=^ZhstmR~z6su^({>SB|E-gS+Aw?&?VK<&rv3InLa6o5`1rjS0cwd=>AuW+>sePC zSI1R_O05>t>j{pWWDiALiVhwV?78|hxI2vFEd6K*n@+@)m_cXqm%ElJ6`Q}TBiK_| zemQ6VasT&q?-q3eMia#w%=Njv-#H%p@Y{1Cy5=K-=U-!4Tkc`;iT@)oTq-l{wvav$ zE(3^XFbX8Zfe@Yfu zR!+liQeR*+pFK6xC(y_iU=*aJ73qkLh&bVS)l_JY_)j`tw11!ryNQP1a}Vf+|2Y$b%2SYAE~R^aO^u*!0xekwXC*vdt@fcqo#|$nR5X#=d;JW zynurYstF*w9iP)P2(_1*`RYgSv!em~$bK9%`@q_p4-A;PVtrWFgpFS%ws?FA|pixfy zXm$7wt55b_Q%#zpn^{L_80yXCg7(k+?q|OGtR$#5R823(lxLJNAD@KkKeHo4c}O$AWr8pJpqf$I0A==|6Vh%82J1%6WUk{ zA82(pob@KLT{Ji5vKKW}*q+O7-pp&{+H}#L*iaT(M)s=;MIeC z=Jb07>!A*TBq{({0Hfq-{*9rBlBa zJ29vJs`6#&(iaBKDMc!8!oL{{>OMZ_hNVxeaEpS!G~+4AU44q3z#*c}y~#cL#W?bf zXDREu0$B`#j>TKOmpY-!j*|MoIAkdTNv9vDUbZe^NsB03v^ePmpb;>Xq^(w&+`BCb zqC2IDYSL(9LDLnvBl}rCGdDOIo>*WqWB&5>M#R>%ciGY9j?0O)=G&Vxnx%ro=NfEcVmnVbvy`Rqy4w93P3?O;iuBn%L#)VM=s@dBjB1w%J zHV>3`8f$$fXLvUu8^?sD6{2-yH$$o29G%%3WZnoic7P%6B=qH@!N&oa5Oo@dznF%w zI-F?kv(L}D9*>TIaC&&^UYsv|Cj?Yg#V+;DeRy|RA4E5ZWD26@yaD*C)w+F=;{+n1k7MjS>p5f0CnDl zYeA9^M}%<{70qStL(*XMV&lUM{#+&Bj^Yl_=D*L!DJBxsJYIz++@M;y6*+G6Xb`uy z41Owd1thHtd`GxBKLNnMFOsN#JNs7*hPS8E+U|~R0k2uSqZLE(|E8TWVLn6%c~sqm zcE8`klSwZocgm*I-@2^SwC_8e9n%N5K+A^BfMZt98HfxGdfY2=IPk=0Douwtj!EZT z0#1k4cKa+{N%L(jAQq{0wAWBeQlscjtOaDf3^JC1i!OOn{Qb`W5AEB-n8QBsW+m@9 zknd!YqyO=#Zcq~ENicxCbh#&q+hnAq!{--0Vq?)}PFuZjgHp9< zy`D7LfC&Cpb_InLGYO=E0JoAyqO{4v7xOi&z^-c!-!)Ks6bMG1%2kqVl<4p=w`N&2 zoHqsh*nVz+$g~qydrTO`UP8zufFms8#0li&u?gi z9KnP|J55UzkTtz&5VYEHg!I+Bc+G-k=^`zJWLvVwA_#X&*#4cip>N2v_nstS_A1cM zYT>tI_F=_NN^V9g@!#)7043!Oqw%@GZO7{{2EC1QXi@;Yc;agc$o>O#z_H2m=sJJR zt|1R}JWs&#W}kzlj`hE&uVZ{uXX5w*Et5|88+UHQ3_U9(4bheBAfu<4+fL~f`fy9% za6ggrgoJ12_bFh7j=GK!%6(9iaEUjX^I~__+mO=FP6q40PYgwpSa`E+afV57kssl8ax!M_ zmwx)#v(?igIlJpR_ihYf)q>0K7*V#}-$#9gjJ58gZB= zv*tw>EVJDW7#qG%OfRX+Ju>&a4`4lQIw^Cir8m5s05?g24&DZ#~c2H zc?eZyHTiI{@(w1KDY47d^xU4C6&|vE11?$;H9xb%zpdjq`uVcDz|f)zi@rec_QKUM zKX1WJ3Vm*Ny0EacP9P)L<%sp68pa^qFeZ#fLYXfmEtNes@OCUlTrB@jP>?(Tqm+(=iDCuLJj3e=j?uyM7 zvIxyEs?j&uC{E4Cb5gf>8MvoikJv69j^@tSl1ClA`kvqJNc|4+GV{+zy?<(xSE>xja2Jne5V~l$fbr_=UIZ~o{0@Kz`4au!suWz68x(!9 zhx>|bZ4Y<9XumCDYZNq1joo}D{r(ywQ@nf7a*a{D!AY!vv4Uuv(x<&k10u$pRQ5ww zE1R!Qy$3#eAGreqdNGgN2mBal!^;kC$WioyeE=Kd`A#Lu$hXqJWk^`+@{AuN9P9Hy zLjUZ}QmZ$hv3Cac`(~e>uq!q|7>w)}bRW2BNLEttCFSB)h=wD%bPCv1k}bMZybkd@ zEAj3^J*VI509Mf78^k|_DRMy6;7`a>;`+38SXgaU{m9G0tZ)elM$hGBvBU_UvUc!8 z5cDmS{dcoPm|n!^ACupt_qmRML-&b9{QJ3kr20a?Z_rE4LvUXbFs<(-W2gT60w+3; zGe<#(bsgq}00$+R2ANjVbKgs=)Pr9SYmC%Ewi+1PU#d2Hxkp^zFCT(zPNG-*u3?I_ zz8;Ur*B()w{~26paa`u@y`sii3b;K@MEB$waqs4#9O7XNuA@BWdrDF1+17bGfF(-< zuw+5NIMo3vW@tI55Fp4I85%9#ZowD)mnt#7LV5$OOA@H7c`xc7EO%f(O>g_c@=2~~ zjTDPUYp!7g)K{;xq;RO6hTuLV6GrcSDC&BijikJoA(t@GDwyt`{FGN|3=elBWh1r% zx_T=e7P*`Qlg^Y5>*=B|Y{=oeKggO+mtgugIdFr&xe3LDv7er8wv=Qimqt3yV;%b! z7Cs0zC?9q}kKYY&Rb& z0v`8Zv(R|l$vVyz^SA@+-@+bXpR~s_R#Q%e%bvE%s1uGU(r4*J{yaKJ>GK_ui3=>4SA&6a$$NnL>rvHQ~?-zFpM76K2lLtr@Q z2kS+~97l3gyb`~jSf|pJHtsf5o*sHStOwC1lhM83k9@Fpw zR25V{n%XEw=oXq2jfSwtRDzN*c{B%1+dzLRm?kj?3?_bVncXh!i1qPyd3RG>XE?2Z zxnoO;(#5K*GL2Wb-rQjL`dU18vGKAF8JMp3X zY((19Ta;Kg&ZopD2Kr4w)8`=9?H1ne4z~LDrXF@Jdj=u^Od#2rtDzP$h!s9w19`W5RpRb*pLu%as%bJe= zvzbAqGK_9%qjMWL_v%M}epYzyQCrz&18ypAIHocs7KD|%F{*icOlTGls;r6bc6yCT z?&bMmPHG*~57(E~D81O)hR1488sz6?ja2!JoUZ^`1rFd#fgi&h&I^1VqQC$LJ~R z!^ze)T`i^^)HyfRgr;DjA?I6s`b8C5X*r+$MfT~${=?6#b>#u?;UzmakjLfKAE{6M zaPgkdyJboc_Em1g>b=8toC6%!u|Q7fE00q-dIcZUtOYpVJYxqSv}_^AS@HKSpQSBK zE(2f=jmMJ2&(XKST4zxbkt84mQJ9P8-`t4HNj-RX&&9V<<(d!J&}gBlaSFoIy|Eq+ z0@u~l+I2d3AA5_;@DXe;%(X=$JZP`zI-; zaKN|W8d5BsR05C)d%{~{=3yniq5Kk4lPFoqEH`bh|Dk55cZ0|qQ=w~!A zQI(K70PFw3bKTT(u&aXkQMgJq4o(7$y2N7iyU~`7MX9m87k19piJQHq&CDtPA8*wx zQN7v###LO^R;g5o5-+^!Zy9*lW#oq00|b2J3k))`ktBx2`P1g0nx7U zzF>flEQ(MnaLM8Aky=`ZRE9rNNW9-ri?d-VO9&V(_M+D0hXt2(jK+`+S6AiQE4Sg#K24*KgzB_2C`|yS0_{^i#^S9YJSkFPpuMWT zyozN@iOGQPkI{zT>-*Gu_pht<|;5O;RWFr{s3m&L z!+&$k@%Q+EBnoyPKEryurb`KLy1Mnk&JOd8F~EcP^TZ$X;)aCSrF6Q zxJN+t5_B9lTf<2N58jVV;Hnz`p6*?Cr+pBZ7kkGGyU6ho`7odT-fOV?e1)WU^Me=r@Q~T-vIy}V7Zx0zheU)qjOc@RwpeV4pGQvS- ztP9|0@tRN0o=;K;C_zT;Zp@m)1Cq?;Jz-zF`|ar@UzWgJvbd*sc2cvDe1eAnHNkP* z{CcN*@J=}sV)^7hh z4=XLyF^2iayKUy@E?xkoaEsT4SkUM-@T8<^_%e=JV}cOw(K@satwEP6R`@S0=0EIp zPc;U>5_Ex%Vc>uEov<9|hZJHp#cf+)2X6kWF#W0>^Q*J(7FI=gPP?KO1%AhVH(*ot z{-P4?#ky=C=Tf`vc-;NtNzAy=W8@Sh6BLF_Y#mLgYy2#XA0y`+ttDRNQ$QYL+`BT ziKp`)_NkEu0Gz>fa4I)(6?YMzyl>=$TBMiD@vDGGy`Dzxb)rQ|rtk2aIwiuj)JvxJ z003!7z(r1#6<1zBKXoNmx}*(E%XYr%TjnrISm@}fp<1HrS}^4;6b2HGnpZ_5P4UNo zI9Vteg8Vn<`z8jv0?;5e{5;L6OXqE-=`Q9CFlmEV5!PcFxz7SE_`J^%z^!H%2Po2> zOugRYWhZ#fB_#qnVvl5`Q|6CD+U`ep^Bp=ljA01-oxfhz#LM_zX2`E$_)|y?jVlv9 zh1z?Gkn5hnNpIF)vCF@PFl*QB1`}c(_?{PRQ9rEI$-h0PfoY|v>K3!bJEhK*JDqh<*CEkdPS;P4G*F}LWI7d(?=&Y3gK^?;P) zA#*zY`_(_u7l{}cN}A8{A-#jGp!>k>swq}Q)5;C{ z9U|_l;0M9SHd2?5wrk+5t?Zp#atVjE+5coN4hldKeVW$fkYlf{f$7FF3K8EdbRPi0 z%YqrwWrdH;`JoUd?z}#KhB^W6-REiCHpl~d zS#rJ=5r*5E6DNAnfgq6^u4lm(cX9t><{-OklpQeEKN*acx|_^r^!rK$gvvMeLzC%h z<}~mn?maYlHVHh$b|vkN#Nv^Us$RU`QDieHiag%hY$*9i@tOgLSjkYT^lZn3(^WU zO4Z2OJ}p!}&3gJpt?1E9HpIbHcf*}Ln7Vk5Q8ee-sL4?Ay(NSh7RwxMdJmGc4UOcw z%b?suKXhU09T-QyWA>)_1)`ST%M=&=7Li3iw)8%%LSI*X2jH}#h{tDnkJsbo&=!Tfj&Ei{}h|qJ|vKuI|L#oR-101D*b5!$h*YO#&Vphgja-D3a%NEImy|0bE$T zzhzLhchBu)*5(hX1F2o|D^!H0e;GKBy`pA4Czd??{AW2RSb5byoEkwtXV{wib0{d! zX>f|4ZRL`V!ASSiPHQ>ig@j-C9jJOh#Zkz@GD}_y0fY^=!@rL6XUkG zum1fNu<%&eB2OOj^n^%bww*^Z$9mjUT_#fUTl;K7$9r?=gv_p(kJ2q4n@hdzcOI8n z4UXg7uyE|=M~^q!9eUf>k8?F{^SSw5U0x@dE6{*ZW^+tTIqQ`+q;20A-oyn^Mtr9- zI)_u64+v@%R^L0TZKcVP-!IZ{H+b6`E#XeVbAPvGxk0b5VJpRHt5}GijHmHe4z=fo z=3?}M7_yoaNPS9{j(Zq}WTLIu*SPw44t5a*I<2`?@kX<1`K)+k{snlNta6eMHVw3v z4l%?FoA{Y@F9ln`zDvo3jm{dR{PeU<4;~k)Sdzfw?9|pNcohAPiVFJ+3P-ufq)=I& zYwK>QcEdimK`~C;_K{88`tB7wohNCWEgxP%qZk4tNjDo_NVM1Ny*moX@p$)uUFSZ< z8jWa4ah+K_M*S|dBE|FQ`BQF;3U*s%35+RMH52eWODNkZg6J&0(%x3SuJ2T+wV&m+ zZAG?o@DI%FHG}6okr;M5wJi-bOerPNP{**K@2+a>&GLjtGeQ**WA3Cp(6UbUCrq- zJ_ih3X`fj~U7IcImB=A+%iaYE^T!obxp}m!F|n#DJLLmvw^}WHg{nV7J`U)G5YX&T zb?0w2XZ?o-aI;H`#G*M3zBe}cNj(*3pQcR@VW{w#VRl|x4fN*s-XBan9e5^s^(RX$ z$rm|(&)zJOGtf}D4m>I0wMQiS@imH2GJjzN!zXEZ&E_v0Ihf2|soUvM%~_>}3)=-~ z5=LVu+VrOIZz8%=DPO{N>Tf^MX30BYN4iE(J_&02%8i9 zDUK1B+9smU_dAca;kx^+o)uuD1i%HHKa{Tys$@}Ng~U6|79|g4FSE^H76Z`U+#~mDEU^jlx0}Zt@tz&upQt*Y+)q;LpID`-f1~fn zGqD@@c{OYDi=B;!=Nq*h(0gJz~QiE<|ltXH@L6i&eWzS+k(A zXJ>zIj%{ZGbDeMlP5GeAULV{>qgr-s>xbuPeOD{e<$UXZ zM~Yh4{Z@6nrh?O5XdBK6ag)|)bN9b^NC2Lt9LG zec#~BYqf}*ncs&TolEq~*#<$;F*V9QoTA{(!m6riR4)Ozw&DO~4#Pw)=W)ElS z7pe{SR)s|RXci8tHf~+HKKi9Z>7~y+v#ryC0AtHF3;wPv(!0(>X`=a*@r%U6s36l5 zLDQ*-SzPMJ0*~fumOLnHkS_F1qgS zqNo}BCb5*R-r+@VJ=b^68mSk=hnz(D)N%J#69<0iV4S;(L(k-T)@pHP(T{#f#0iiI;;88J5{n@;T&QfuZ6Sjd{j#yV$V7H;<13j z_!fLZD-+2rzt558D4wR3g(3lkS6BzBags?Qzf;>kGwB@X!kRx{S$;jE#hs~W?c=s3 z1zphiJT858>|EUB^t}CZ^PBHw;1x4%-H4n$n6NPN%0V4Ho^vu3G&YaiI$jPb(fGd7B=*JYV@1p;oWzf8Y`dRFUM)0@0g868MIn7x4vEB+v%#q zO3Hykc23rM6$O>+8w1o1gvutc1i^H2xVM&ume*@icPO*#z93~^3cIv*><=g}-ek9AF@G}nwc4dt)sK~(UM7O{Jq`s~k z(WdMxpQl+}W`%`u-C*3qNc$evxTh_b6~6>T*DaHaOoGO9WE+!wgpHq}fg?R4%4`Br zhN;m{T%N9~Qa0W=_$X?9r9kf|wje5ge4O4jTfDXLBD@PT&UCN~nyP@Law){Iuy`h9 zOJ6ddy_ONs>Ba24=Rk!6ze*p0u%W6Q^v7Ge*FE0^nY7XzJj1kEJZ~xfRUrR;+PqR@ z=&RR>uJl9AqQ?8Oe!`IBR_0T`lv8-uX}Z|YxXh%YPDO1DL?NrH&adUc$+lJg^=k!D zLWKmMOCC!07uEef%o#h#omsB5c>NTBYb&kWzFKp4pO+A{aP0wt+R*xgHSr?%Zn)Gn zANVa@gH4<+fhjC{zEq7ie)g38AhU?3IatjE@3efvphKLsI9itqp+0m?qcbBlCL0?3rH^d1 zjPg$j8-|YRmt;r@`koXxezX_>kjGN37V&gPXLu&qLLluu3#Ms0QTgzn@|7$ z)^~r`bi=<%AOtTx-!p^kS-PoT1kpo2ozGuiS#%n@7Q^!U93RDiTXy0qsBwS-E^I;L zUNA=C`uuyMBW7TcvXmyimJKlGp;P^yjah#f$XjzGng6GW`0L}Z-z_Xu(oGcpK=qMiMUF!?yRorp=6O!zOm!IHV1wt{Qsc6Hg`zVW#mYIeg+l^a?+} z`2H(qBP+yejojH2BVG!k5|~c7(}ig3VFV>kpR~0|)s(M)?wDc!OX?yESk%3T>dPC9 z#S6}VH!PzbW28}wpPFZnAC%9qfG9~JolHgK`EwRF^6fMa#Wy7?+uZcKeligJw?6%) zz_3-Hut&4|jNJTO?z6pE>hc*jG`pXDg~lqavNN`g8~hCYqZG#IAR;he4l_A@!2EGC zYsR$(O5g0QwNP#U%=uLxvDSCCBeKNYyiboetyCp+i0j{fuSd@X+4+*b>LDN%w{NMA zsxNGM<id5@|IvkV3JbX`PBvvwHht52LejxV5E_ ziqSl~m)7h(20Fu?R@kHCGp(ECJF1)Z(sb^&%Fextc00}EEs)5=&M(<Lh+Yh!&lTs{m&RjVFWlh-y?l%mL|B1%XYyC5$v^mBQ9WF@;g z+6K!%Y8&gJ63_vec$BNPR6~|5`1Z3%G6*sKv+GU?~m;J2&+=}r|z?{yt?Kb+gn`~dT5(pfM50x}-TfQ`U`YVhWHSg_LB3dI$ ziT2zV^)L3Zo^e8V?YH{$r+IH%e*^9r_v=>5WHh}OfiViFo_{-6;VpFhHvLvrcVl#vZsro31hQtK!P&_^UKE@63kw=nB>ri-uYj4G zs}-pxeQk?7vnzX#a?dRO9x>gT?G{}HdY95muQk`^H4gcb&pSk|vcs%gp9L++D)mpM zvjq@2~9h| zSf={9Yv*lNx-9UQo_$27)*P*k1Vu`<=9<2Nb2Yf6Ua>7UXY2_Y{BEW9yXZkU&oi$L zJN}SeMCK8~1^tNP&N<_<8X*~Y7I6B}8LnDH#RrbrAPK9x(3ZO(`qMKLMbtW&a}i%c zT#=54>FoGZsHnj4Z{>j;XlY0$PX%jjm4W;`x(Wpa(dLb0`t1}8qXDz(bNg~jVy?Kx_9l?5Z3Z8Oi` z?{*0__{?N&R~#p4#i>w$v&SpvxhK3}jGY6;OW5&_=f{^N7E`d($pHdJ!ID3FxBgn~ z3bl^T9~76z<=-)<3tgSBv<&ECtmAB@D{T~_5Gj?Q=|9Ed z6JZ2j1jE2vYlVHHPSG{@xl7Ud&+oV9jt)KYuY21hqaAfM7U1bCaA=*}wvYg{ss?t8iG#vM4;vB)( z)7;m?tu0`PKytO@e#88|f`_&SlyY-UC)e0NE$~Yx>pfF!p0qLhcGL6!)8`X=tg0Up zg)_xlNwYmlU)3Kryw@E_>3NGe5bW8uu&Qk&tjtsWbIPr7V4y}H{6WjeQ-0pc1lM^t z-Y427N|*_lDm2@zzb5rA(JzHQNUQzpYN+)R!TYyY`Dcy0zGB##$NQFgQy*QUX7>7D z1l3lWB7z*gyy>~~=p@hcf}<}B9R&-`{MxILjdme7=?evVnKd#< z!|x$+q=8Ae-F$~v$Pg$Ku>+eXSpe7Afoakhhe6Ku~+JHUEi|xG|;;zNjHRf1Is%+FSQEVku~Svda2%S z!eo0N6t(oM(gsPKGZq!3>x`1zPk7`ohqsmZR?BnS!wc*U86Pb-PpzJ~r~+SQ{kDDhp$UcN}wsXLBwz7p9#$;pr=NvC9wCIAd&@Xu5G)|Yy`YRqDN=lkm?qY)Y}+)<*AM|0^Uqh z{5j`vWWFy~=i2fV8|qYgwDGAb&#NU0&ioZsdqEi%Ix`2Ekz5NPPjzLmq!!{*}Sp7HR z<)EjR)41!{0b9-)?J7znCGj*_4^S z=AKZyhlGM*M`=E9-`-b}PI5T^VqV@s&~oOsoFcvh(LD@F(2?s<{QdWq7^+|=hw$i> z{W_LJ-DakH$Jz~kfI+(y3hqTrUmsWU-Uh|_Y)dLGjTV{;7-qojXD6x`G%j9rjd&%? zChgkj(YoqzlV*Fk#8SpWz1Bp%AED-bQ==$30r8uCq#9YDqq4tLfw!Nv2=oC@yQ;Qm z2tI&ei`JEgO7(VnhZ27^o6jvd885uUB-bw;CGVkkAMtbnK)N{rBim=mjgyeUd|2ad zAA$F!9VP!|Q}c)9DDMnNX%9xlH>-~4&qm&OLCKLk;hwKwyXV;h#(iLpfeOJajo;qT zn6#Q2>(XwnHLqcWyX&*T1Ft zqN2B+^V>tL#Vd9N%QJLV{KLEygEM}2_VG1KYBqUNNg?ycy^8ZI?{nys{ z@;{9@A)V(d7U^g|$!lt5WeK-l_0N4=uqtxe=0oU2aaUMd(hm3(VKNb!*T|AAQMEC1 zBv5Hn*5tX|8MXsjKU_w+PR!e`V8!AQ2!8*kWyd_0rbF$`3`kv~&z9BhM9`xWfzIAi z{fF8<2fnE;k8YZBo7;@-hxViA6pO&Tqkx^5OZDyk-CU?8?D>sXqE+@px~@;T;64tv zS$sAg>w~tp5`*1)9QWG^gShV0jH)XqPRx7Ee5Vp0>JAhl8p6ffy@Q2X)l=cs_q;Wn z-SuhzMIP751fbpYPurA!kh;DkbOtXNDoET9Mu9|Iuu+RkXUg!}Lf9l85)`0@`x2)S zPm2FjF#f;0c^?-L4?liXmw2;T2ieX$>L;GDqpcWUwKH42t=r1L6C9*SPS zf|w=|xjnn)tpUcxXXO^mz`qBGq z#_(p#4oyDc*oC8t^{ba#Nhjl9^H0_R1s z#WRb?}%)gECgM?<_+c4y=`4+cuH7ZEk*qi^X*a|Y) z5^n8SCYsf!C}ejGSE)sq@z4p*(KQ2#&&&3W^8f?rvGM^0m#c{BFWs8g(N}Q`tn-A| zI&Qx6Y<{(l*140ABx(CCuL--E3%n7jGvXgL{5)p^yE(~wRl0_I{nmv%N8}?sKT~!$ zWyE*N!s@r98TBBI1DoCjlmDn3u(~1_YwHL*-`jXnN0s^HWbAxJu_EyQHf8kzaEW+w zx;Wh1?A4{D)@#)1UqS*?ApvTN%gry=)`omMgDbms8~w*Z5+TUPPTw26n2k> zrS6U-kirM6b-y=66x&4dir|Z_|G-pwyOsyqOvS(X>(c*9=EwN|$bat_NxLtE13n^e zE`6o-K#+7rpF;hSW8!rqXb7Ub5n}O%Bbhq$JhLyv=V%_Rf?QY(K!1V{@S+dVpJG^5 z_cwSvSv|e(_O18GmT?6%LSm{Kya)CUpCw$AB3?t-r+vKsKf*?f2MX}mu8zl+kBG2L8ztY761whG5@R*%H z`Xv1&g&q2*vkF8r`HBSe5fHGeoIl&JVSj}F5>+#B(B^X(33;f}E_8nwxWat~lYY|J zpAYYbBA+j2GCp}9yx)no2>`CsB#{Sl(+J&Y-YbhhB8@8_&MwWPdeken-Pfmnooqk)XIw_ zK}pP~{+Md_yuzD#f${##B(4(o*EXK8(a^8p7NB%TG(A)jf;++*?Ft9MP(6(Si_BJs~BkXSzaW>S7MSge~V!IRoqpJsHT z(_H+GQ{F=i(molm8s*&~-!@2WEEiWJZ8@3uhAU-H!l^}spTxVMTO?@^79pRSAwSZK z1YW`8-Y=M*pfrmJnbh?~MTxn|mY*9?H~Ww_qQty)jEINLaY5^(Ueub@u8E}yZj7rg zv|~NUpuMzM=41H(to{0z_^Vet1#4WF8O}D}QtD@Y;zT zjalW-9({F?%LmD9n?`3Lxlp3{gfje(SNFt_uPd*muFE)9UVQghg`A$p?;rs)Iw=Qt z+YUARVp^I$fmSa@*mk!Wm9elBI#o?4QB=LQIo{ST?RIUGVB~gO%9V9gC>0h#jE@_#h_l)Rgl`gA_i1uNCQlG`r7^+?sE+88px_?sn0JL{>FFy*N}fUkj_9 zevJCk30PBN>_0pgSDAw*>nnMtijgEa6;vJ3%3K*izl#syC@#bMovG4029xBF@Mg>O zN2GX^(g|os9lAvKw9)2RLoZ2*hf}0j)uaAJIQLWi2!n`s!fT8iv-)76>uM50b6#CO zq@kU`7ScT^KXH3~&XSU~Zp9fT_10X2U~6tRWADIvr{OBSiMYK_O=S&7sxwoX&qw*K zo2zyY(0h1sGR97d-jDMaG~$#f8Qq7|Q6nrHv*@xa>U=bAV0_eAV8wK#8+*&Fi}a3M zV@xAme6dGGjA&g`d(kvSSbVG{vAC(<$VFb8$==rH2i-$S4`AiD-w!PeX_^rZp!CXy zHC$$=3~PI12RNXskPWLJ4*~31~0axuFW8YPz3Ma!5YIgP?5p;ic_9 zS@%&tRX$I9vL*~G>g2FiD0v~HC00#ka~Z}ANG;*2EQ9Rh-&r`$hW_501@+7!wF6{o z#hx=Vh~++pQ}pw04&%7P4)(r6$)jz;#q^$9gK0E96a-1huj_N6l5XSqt`7* zySVnstx^u0888ImVVQfqq~ zr%DFDb(nRYWK$Hk?DKOWM4g3XufQ2OSlLHb0yMei0}HLXS?@Un0-(?1Fq3{b;WGdI z-%V8*ww|Q~KasaJiGOTl^`)$C;&sbV^V#@jciVT71w|@3OVnWF9&QgMekX?W|FHL# zQB}6>+NdH5NP~bh(nv{nx3qvXh`^)~>Fx$W=|;Lix*MgtyQG^*Ph#Jw@I24^eQUjY zj5XHS`^Wo79CN_DulwxtIOB@5@jCbUFc$&heX%A3NnkPJ2-8xo9t!v>t3{__TKpYq za77Iin_Zb_+yVU`MDW? zly34(FB>S}d-OM(Bw6USsWV_HSuhRTwUr!SH}o8(PpK@z*6Nol%f2r`M%06c2OJy^ z2TSS*FAEQC2K>ggOg=TGt>p$V2Eks}pRUCvyIxG@*Pr0n3{|GPb?iwEiHazM|70Vt zj(HEpfGU313`K%NxZxIWUZ-QxNP8I{J(0iV>{HnK>E0=M>rCz3(VAm$edT$ZLtH*N#M5-@W!d(s=N`l}=Z7xYZbyX0jedIn z6__tQYH(U@)<;_F=;o$8$?RPSps`+h6U$yt;eLqkZ#-jEGDm$ENb%hl&H5NISs~rG zg~&0nZ~=S(`Ed9&Cd~n-#bD~SP_}*{D%1v?-YxE<=4m{Zv%#X+joEAv7Gv6f(RNav&u$2}AwAd+u%XxgwLy1IEl zGNhNYS%wvg^C8+ZYHfXKHu4>Ur>qUAX@t928?x{}7Hxejx~L z?x89mul@}kTgX-jXi`5K?qd+wR*+aI8&Ib$;bDDm?^bK77UQ1S0}FIlwxz8#ig&u5Nz&_&Y1^5Rbi>u?Sibr)O%pSKdRO|mD>Z-h z<&U!BT-{`mYP?y5v1;As>RAe+H#cuMHm^POZ$rQxnPxt3H)jb*Xe`mVEQ_^DAnCNN zoHS+vk0;uRN5v=}#{rh5O3Q0$>He$vAERamF(j}nVe`Jv=die`@2PgFRUR0Pej5@0 zp|-t)vC62WtC{iCkI<4ojz%f|?b~MVqYm_Zg>-BMI`}AD?ONj^vTje3vwZ3BiZ}6a zVnEj@4%Dws1j_luGh^BI zn80IPdy~t=ms{gt>rwy%@!Oc_jN%{L*~lweiwCNNNbor}cob)Kd(G`V?k`Aygs9N; zNupKeq-{LyEoKK7<#|(dTQTne+6fElXYC+-RtMj#1gE$E1&1_Thb~DNW>79Nl9s*M zau}{3|C*3#1H{;?9jm>TAqNkkR8c!h2o03T|NfV4=m5QDtmx~|@`Fz+CXFCw7XE13 z_{T48n>`;lg9o`Dg%*C2?p~GN8ZD2sWPs&08BQx00kr^7A;7<1vZe9g$kv|t(s!$i ze2%eN-3Tx_uGzbaSx2kkhFN*gsM6aIQ80%P)QY)mK?ql`qfh|4bJq&@(R%pmjqSvb z+e#IO@+oAmF}{MrnhCf-GDWjwsbM;EtquI}|J1o-K<8>0h|dzNd!yq%vkB|afOQz+iZYiQISB=%B_XL0aH4#zM^AOMjj~X%}f)9j9Nfh12cx%YJnw1o5 zrnj1y8~pnfqqTQ9S?WWKCy39aIlo&_Mx#SWKO{LHtc28VOg1#?%7|pWVgpLz4+bKqaO=O*pL9~Q?I#?`NWgFF1A=pDD#|CzYUV9v&|;^D2~ zn>zTr;R_9ksIV2^Baz!ZQOsQc0~5!L5Q3R0OqbTmK~WMtO%2{ZLryP7UNRf9okGT4G!n{H&YrQiR~S5kl`uUo`GcbHq3_-~q|{zVgZ_!qIiWoGcp8$~(^FdC(u(`)Lp zZLS3<@V>6>YJ{D|4FRaJ{Cw}BNOubxWu)RCO zAIr^b4YiUq%wpZ3!3VwR`QWOGj?xP0{=A=WMRfA84dBPH=+*w2wj<@LzCZwV2I`5whBt4fc7GJ5RjwQc24-+^Zys^2j0O&ILVB^ zYqnfO$rge-TITfb&AC4=zbN$cR()tRraM0A$^7OD)L&frZzNTgb}!uo6q`q`ZNP`L zKHckIDf_ol;qQZdOXo6s0CnuaU+os3gB)D-_B)IP}(+J3 zegNFayKQun%ntML3sb$>ha&{|RAxJ$=fu9w>`=isuK~mS@1~L_dNCV&8G3txM&MJ; zr|tKz_WS9~?7uj{{#D}@G$z!;ivvOTKB{}Dia!82K_u+w?Yq~H`1Jb(k43UFNxK?F z_G3MZW%FZ+Yo#|!zu3UYIa#Ndl?yo%-r6iu&;EJ0o7uCAkf_t){)Sh#h!Np*1#b(0 zeafUw{4JQjRY1dmUlwD)qS_vI1ohTpc&swtJ(izH=+t;N;H7-OEgeoEqhV2O#soTA@4iCryu{uEp7Tx;O`C@;rE+1 z8N&aDHu=-TxWL$=D`cl~U(w|W0$Z}nb%yGP#PXh|(tg;`qQ*JSw=?H$!~JmtF1{1X zxP^^{9IPrOF12PA%v<^OzUL>kd%d5mc3%NSU%H;t{n=6}p!(3-|97wai=-XUC4iRc zMQLzBV`2ilZ6WS=_=~p(!1*!yLlW-KAN}Y2|Bko7`Ca|+OkB?v#pZJ?LlQ@K^sL$z zr3=`PfRbk%HHCD4K&Y#V<3>i$ZNmc4m}J8o*njtwKgJe^<`&&S#3aQ+^6(LNcdbKN zv@*V9E|DB5U;#utNpSBhfIh`bk_ox9o*7Wxen-4ekrPS0SKIL5!FF&@Akax2?RlF6 zyNIkkzk9vk#PN6{`xOl!O2eB64?H5E*W0N)zTn-58j249nqbJmJ-d%x=zl+7*7M&y zzl?$q<6j+gHWF0NMs8XXB^mS*>_F)9Itt8vTJwb8jE$eR_LIA|rY?3!66xM8o4hiuCZ({^=FKu5h z0IuaVaHh5Wp|S7pP($AQ-}n-Kr$hP0e`U}n5%3t!&44$8L?I8Z*h7&13ctQG!T}HJ zaZFLXd$!)GWpRjI1QjO>nfx`dM&enp%lqDElV)#zS3J{5G79jAAg}{T&Wh|$?uq}) zzI^3`eU+6L;+PtN;!ncQ7m_A~cP}d0Qh+IZS*k^earY4{@0JiQ|9)&L=G_Aj@%Qrz z-uT2B7b$Y75mX??KiJ998Rt{}XZEegcFP(~w+I3#L3dXvuD{e_R19Fu@hCQ#?;kJm z>`w+G^xvDd|G5MZ)kX)|)TK%N8$EYF`I*+jaBnYuM>3-V?i`vnBj_34{CI!^!NdPs zR;Q2l4naBgSn%Gp^Y5z?P65pw&&t>DTXe ztQjZF@N-A`N;r)4mlr)o1sLOEBO35Wp%Hz4d!d=X9Q*$@#`&L1K*#RNd;C301#9Ei zsh78y*K$#zTh2G3#dX4bgZWvNR{~p)#{oi&4{I>L1TlQkH z|0eu>u+u5#qx&}jxczUg*8{7X&1{O-a31V{cV*{nyU!Xux=5Rz7cy@7yt(@bm;~l6 z14cHej0f*B!GVX3Q!kaWb535Dr>*Ere}fxH!m+)wI{wFF_-)Vu*32G@ZM+}?{7u=T zJ1!8(x!f{FJ5m1q?~nL}Ui=CwDB1qkC7{HqTBV(<>)F4_rVM3yU*i4c9)G=;96of+ zu#y;${A<`Y)USl?gD;To6-$vE2nixp=_BrY&7qTzQxJ&&h#}}MarH$y8-1H2C|$0c zi%*L@?JMnmGV*OJIq#ooP;UG!YYO>ZBR_n>&zFh+CrSgbt4$JsF&CS`XnzzM=jrdL zqdMikPu2hD67YGp`#a~HeCGeUqb54bM>YdOk3hAfyR6c>379&;FwU)ivt#Hm!D~DRg-0UrD?5^Y;VPPL$Fzozg@e~OG#*8W(L_Gh^s{uLKU+8id;{Ub; zcx-FYjU9yi7onQ}l%%@o&_7M{KeXq!yT9lDpQwcYic~I_pD9}JDeuWP(zDd(VDYb}h;`;^8&iNPG zkL;n<^7@4;&BOY^;iUHrsBr%hp%LGP0UgFEawy^=?yd<>vF;ogiaWmu04yn^j`F{_ zJLmrWBR8mCmuJ~dVE*4W9q>Rq!cqI^6+<71$~pyp7bd+s@oyG+1d;;LCvl#Spa2mD z5Z}DiApH}b0sO)c03tRK#YaAOL0pp%e>+viQ)Z>)%q+^$q$wTs7{GhC1h+f`EW`6y zi}v4%GRitx{0=K?z?$Y*eEB2MfG<*nzD-Ev z5uZWb86@O~_*oGz2NYf6l2R3&jQz}XfR+Pzrl$&LZE-q!E%?6!U!kwj-`mo# zHgXL74>SKiV&;#swcWd20RLg^f3);JTKXT8`yc;#>q`IQKmQX9{r@-^V)H&JTNu~w zH7RN|bI7m$@$z=?B1C`qGYQ}MkrjAvO^eZrFUcTE!x^r0BNI1f)^@VPE*@6lvAjH-Ut*(SEIEt zWAvl6x=hO!9UUjk`7X}T!K35z6IW)HbKDe#4K5atsm;6s)3@q2k;2g`rVYYBBqHLP zF3)dRfQnxK>a>NIP0%h@j;xfgC$yI1icLJor;!G0rgH3MOcL}tTpchEy?Q(G5=2{( zJ!uJ+KBOSc%!S*(dNVpIwup{mC~j2)%`Oh*kxRD=hkrroh}aZiln;7 z5@`yCWsN?Bn0~Z?`?>jHj3U`_>O~+qzxdZeBhlj2v8J z?HOwY47QcYw8y7~u(yf2T}-ZlE`ldB2UCOMDhkwNB9+ys;tFJVgC2nM8-#94t=$~+ z6Y{M+Z`IA?tY|j;Y$<@Lk?KUdsXh&7*P8^>H2P#i(7a_d&&h&jY49F|m_i7{4IpqQ zMHxdo!7z7S+-(D&R5Hu7GrL_9N6ktKD5#eeX?f|;CxCIH8l2?U!L4rV-_eFUxf@T0hxIUbq3%v5C!Ri|X8 z9`55FE07Py_50PHy44T>2zm*)Dtsl4ks_ zV0o{kU$@z}QoFeG;w9tO6<|kmGP>4R6dT!8Ow58?^&=O=gXTPNh z7F`f=J|M$n+sd!5nPjwQATvjZDx$MVVsBAL$@q) zV0=Ag@*z&?t!8IPieA17o2xyy)!Qv%MRgaxEc9~|Y}3|hJq@+r^R9rU(xCP{ z^$*q6lh)ygx0S{E=#tw`LVASnk2`Bm)J)pQs3bLn;)Ww16N$c%dkEx(|0qkrrUO_W z9oeg}JDfqz;66!H+yAeVw4xWVM`-UURN;X9cOGRb9P*8@IjeDy8<%F;6_nh}T5|pZ zCZ;eeSr3P{#Q*AYR2kfhTEI2sf+{}8tm~<289;XF>zQ`ra7AlnJl-HGoTfnW32s=E z!zeD6*DBV{5=w9egRLSKQ0|15A%sySQLD-0U}jpk;voZB?(#R zw`jH#t|m;IEgrVo*KI5C=#f?6UbjXTKSCQp!;H@s98wbx>`$cO^lI8 z^oo9z>J-BIbiE~gNyuy8g;Mqbpsmh;_hyOuwm#Ym>>W@^K$R`DEbp&5b3M?Ik?^~Q z?AU=u^p&w4DwF0Kv>_?_W%3ExY7R`WJ!s^a?2>h~sm@jh{Sa6L9dh;QGEAV6s12-J});y0V zx0N8TJWIMidA}UXJue*>Go}={OySXiQc8?`Obf*r0tx!=*{@CnEg=9{Xp<>HgI)jB z@S=?3REH1fDNL(g-i7J)i4HFR(h76RQoB>5Ak(szye z&WZzWILl%uNzzy`PPRF|TB=S@MXJZK@7uQ9eazu0b>853%=ck+Vr#Gso#>Mb9ieMUb0uHL36Op;DV%V~LMUnQs&iE&nji{Ub#i#TMPX~FsGx&6t^ zOdY=cyrRG=u#5uKw=>Rj6rr_!4y^ukV$wS^Bw2ccXj%B7@oKK_v{{2k?@5wXrqN+& zqfc`9uKz^op8KNck8-rPDvR`>Por-;bzXtmP)cW6)qln~U;9K$pK5tohMe=xIA4+8 z4iJMyB6n17RT`1l_rKul0Zl^MG}}GM9+SlU~}!Dr@YJjSn>3IxOAM+r|lS z0MT)7=eR2F5~3D#b~kNh{?+B4fi^SG1yV8OO^6`DnpsN+;pyWnk~lUNO__~%dIWop z`_GW~(c3i>!ogqKlNld98OOura*R=>Rq30cliy@<24Zw z+jy&eMYwaG{6zQw_crXm;M{3-4awK#h&PYpa%G3ZqK##DzR^kn__vOz&QqYQ7c5ov zmq(38;p%)%1b9@-vaI<>0`A=JZnnWiJ%`=224iX_#}j_ZhbWM6Sw1$t>$7mx53}Wr zrtEgdMKV9`luPzU*eoksaz#iPol-pC{ysrDTW zf0Y)W06OdQCz_*WLOU&IlbsJ^Rf!i!BsaVCBoFn;D zOgRWlt6L9>HeVU;D7qZyH92ii0P9xW>dVpR+tU*)Zy>AjA9m_3f#5Z{$dIF5vh#Gq zKs%N z)NYDRvmVkqZBIq)wbD0Sb~zI`6BgDTK7ClLlwZ{$y*!QeB61}BNc|jX)K)%l!Rj*I zXtgHR>J2OxOezL7*J?ewg9dtBSiYpZ`W z*tUm%?is(Fkx6t__%`G8iO5=zNNRQjHPbiXy{*i~P`e}enfzL0wDxBvtvYZLOz&e7 za`{3Sv2Il|Q5IMCUKmDS9SUv&j0Imzr={*M_WEhvKZFW8ztos~gzc|UI z(sCXVKDFZi1Gw%{X@B4?L28YFFkyQ+PuF(sDZ!%GKCZ#M4!IUnD|x&_?*tx~rJOL5 zAHihvk8YvO@TtlQ$YDE13Ls;|XV1BvBcvO*`nck(`gvP-k)JcRTy9Nb_1yMa{|J6= z8d~k97Xa2WKrWD&&T=ZyA85QVZm4$c+gS~uxW;gl9=a?+FpggEa8&$CA3!wRXYd+TDv zsT%jDE$8Zq#=9i94UE#(3iWs$FB5J(V;nm!NL1~1HL_7n5l0?`$~?m1GQgl`0G__Q zT@-=vUB!%Zc!K>okz}k)93#oX^?R9_J}xr@8NPBM+5<0|LD%n*3cY;E7s@gh^`|ql z36tfz6TnVN=L_sL=+xAHOZJ!Hi$WLJstpQltV0B=Gq#lu84i4W6;>AO5dcCvH@aq)9Ao@Yc04sTCoKv_GL z$%-f|C*|4Sewa;D5R7+~nGVu$SH>5CJKN)>+2)$0Vz_w>;MA$SCQNM? z=5%;xQHy+TSN3rLn{Y)h&f2~rN~n)yfU-1Qaxi$dSh~V?y&$Ip+qRG&>U(jL?J@|P zSH}sFpr=UacFDDvoebuc!@WSml!r8+RV9EQA#&Cy=*=BIO zWz=q@?bvY{(kfx?UT&wcSx_fMYPN~N%=5BZj+B! zr}^c0%*GNd;50fZR`58p?Lx5FIg&^tx@5N)3JbdGf9CUXHz!W-A(U>>agDv4ntVjh z-2b@y$h>aRadk+tuR8#_AC)HlvQqP=zaj@h;7pTM=sgK!qbxzRt5^#+Tl4h5h^G`{ z6pt%k8SQBTFpxe{#&zQV+3xvyd*2l@59o(<)!5T)W|;=86#{m{32}8dX=KCqZ4m7+ z;Lc!Dpcnh5&nVe(FZ_}=o{fouV7@Ev^;GoFIFOcG-U@}MB^xmEM>>4@?a{0dM+NfC z2YM1u#*MVfc?UNn!s_wl;ac?pIurQh;UjsTY8oY%^Qt_ni*gMoKU=YQWi6LharMx- zdDrW6tl<;yLQ&{7u?qGH|LA7J7@O7t&|%ruj?O_~fu{zW=G0zk*$$bJX0`s|4h42l zJz*U;qv1Lk$kl>AhVLeRAb2|Dr#`A`7w3-M+qe@diYQ2py?fopDp#vzM~FZ80{V0Y zaZH2ga}7$Vc!p`ID#se> zs0<1lY_YxR{8e5H`&X>oKog{nd|O_2gmz1PrF79TT3@6`jI$#Vd9+3A(MR%5_=g|Y zN_`J6Q5j6vcCSpgft7aksRP`qJN2t=3VkJmON&P_B*@(oFPAhEC559ak{w!UPh9D+ z32JIH_tx~Z{0X~Qvef;f+j3qEEC5}1xEhgG&37@!$Zd=NrU02G+@D0k@%4Sc81rA3 zK?;WAw& zD1q|c<>^NR`>$_97N$}?@j6L88Bvo0qf4C^=cbEFf%PT)bWq7V6_Z2MuA`T;PKeC? zjr)NZKD!NDeE`~}i5e@LP9#`MXOI*gPI0k#VYDpx;Zw5fiG!S-i&YzixnoZM$7rNd z0tC$>J#(B{LYqbYQU@YT4id`75eJRuOz*{;bb=B{SJ0dT<;o`*HSIem)ZQCc4h)`T zjT2q%c4plql6($Z(N2Ss7?0Ym-C=u~FM*R)acCB5Vx>p>FbXz7gqz&V{0ecbSPOxr z5jM=c)+i7obc;o_NV6`cVo!zSXv}%A02Zne7_o7hFzY5}3^ZZFN*M2_X03DFlww+$ z!cEPBGaFZs{YE&ug`O!dP`>)5!2VEr>1woh zd5YdQc}!PB-G*nVx*MT(WdIU6Lm^VN^@PXIzj)3byi+%?v@=a$HdV>h)oXm3?7HeO zyp<4#1!XMq1V-51@%nSaq9kIRXj_s{A7KF@*nAk4*Jk6;`9}!n5{i{t*0Py(SOfEF zNgkB^CC_=!ISmi_u9yFUX1OIn6}=ZN zh(76pi&`2IfrWjM$W=Ec`NF(9RX43_kDyaIxVLPlcpyfuAaPJ}O9^#5JbVnU3f^mG>gQOY@x&#;PsX{{$(rGLxPTsXW{HCSk86366C6bJ z>Yec1XusyTwg}y<-Y?q`0+-1#*{k%zYgzW6I>_;)of1JTg^qYhiFH-YU3;6T^Zetf zsUmb`lqJT+RDtgGTb&f7wYr}O9AUfFN%c>bJb!fZ;;$Y7z5hlhX>N5gRPXrntxg)t zZcfU}1q{dts{-BBF8fBP;3lCP=HbHOLq=d6kvBw<cCpCTN0fJKCf&#Zw0K1?H9-%$;tZ|U^j^$KQ z{dE+!!-n^Mg8X6{wbVB(f=eTpqs7%dx9RWs&tYHdVfS-*z$$wBbEnR+D#cJOc?9_# zBiEzVbSoy=J8v~PZO-?GdA!mcd>3$j`bfUV3%~=OuC^80e^TdC5ws6@=P&}Sp%O>J zF2|)fi_jlM=F1g*WxLj^U{xm;h^YJ%qu%`VVFQo0+z()8uJNmMK3S&}{pD!3lMXOeg9D{I4cGD?RTRZ34_FYu=VbkS)+Wj@(_DD1;GQ6XXEUKX}7zUtMz&!ar8%Yvj{A(yBScm z%b#-F?UaB|nh&S>V$CXYQH?iQM7KII+5-)y$UVfCGNLqEZpWNFvcBhlB9-2Dp;lL! zsrFGoeGZ|(GqIfjQG<6*`Ji2n3cvA5%#?c!xD1Ni96?bLaMB~zA-$e(o9B77*KU^o zN;Qg?fZ?KRfNL+`r2UD@JEeG;>u2GzQ`Ziy?J+Qf=UL?>E>-$+MO?ez3N*)DR*%`@ z{Ro}9q#7)R?F-kBB?!@r@`;y#fe!87=k4gmio%4LtRM|y0ub5Rh*dVf zs~6%Ji|o~w1JhyX38g8hzv^X`HeI2_CnrAik7+b6^}q)57(&mCh3{O+z-_&Ok-{|e z@97W|WlV8t66+)_KB&>`A6|N56FP_Hj!!Xr_S?-}H94PNot`+U)GbR`)W0n%9gDNg zvEoQdrw_2}E(WI7PovVH_Pzo0)+wsSIb^+!$xaM)r%$7^-D!>0wK5HDMsz@1U)@~s zXnI|=79OhAiv=2}UR{5&%-Bh@Y~;w0Y~0s}!8XAAri~8axpXAjB((aTKCo;w<|$iw zow0qGhTo$d_hX?oQ~be!HsoqR zW|nM8kIqmw!$;a-|J7lir$N}FD5`k$Ggxer69aWV{eYx}tb4h^DX=`Yf`?#q_1QFL zhBg9EW%ML#74pqRqjv9bbN$buVr3WtH}$wysHBeqKZtcbqj7iZ)?UO*wXI({CK5!G zDJ9fZ5iM)M4U)~q`u9k0PwwsQF6S80S~;I`b9Zrde5;bWPXK=1ktkpHnT3EP+i<#o!(QWZvyE0 zg{R`$(ZtV820Sv#y2YWX85s`-axFKsuY@<9o;-JX7m*z8ZKsf=w7krpX3y4p$V0*N zzriivLe+amivaC?36j1$6Wu(m&!NL2HLyvYRo`K3r((8?romqK{P~k{^fyO~)zng- z5e6+KszZ&&Ii4ZSFhJr43Ta|pgttOYtd4Z*@$i_f(=l8E*o|j0T3ekSwlX=P?|BjcKQGq&j;9iPFQ5ETeZYLro-~Kq;D`V?lQh!ga2=BzYJn@rM6Z3(3$shC-?z=B_ z7nSI~s#L7-5$C%OSSL7n`B=Q*6MBx!alqN&13TnD5-mxf&zz{|FDOhPb5t5EJhV7Y zP?-OC>h<=B3`VIV4^HC;u_Lhk?r{o*iZqFO{SQN)jgRB-#jD{p(Knx<2J@a&(gwXc z>nX%HaR8wsHxg%t5zWbem&}Deb)6*LmtuXjFSqKO?0P*}#<|N(?F?LBD9^Y~M(sXgr%$)oFSt>mBB1ol*#P6aG2?`xI_s;{jstPz*PwyREt z578lN+9k<=QJ@0CLapgBTFbVnmruhOr2s_1e?rU?Fb2&x5luk}{&h10ZUf&}2i7DeWtRU z*ShT*y>~S^aZnFx7JDn_Sny@@u$?-(Umd;)_O9B?cyG4JLn+~8-3}VuqoYdRgQ$c^ zevbCrMba9wKfS76EwGz|e>}g`d7X|_uv3*yuD{{7^4%Rs$wqMEIBfD&UCQI45j8?9 zvl2%70?X12tfB?6LaUarE9E-u8mgS(J+Y8}5>U2SU}wq1JFC*tcYZvdtrl)Lm;m6{ zxGm7EX>XO0H9}&4LmdM1SposaX_>Q8DQ^<6X=JNJ2N?=QToh=T3`V~n#&G8+yB!0N zC>Kz@YnilwTW8Nu^vK*C9(3n#2p@(hwS zMA$%d)W#z33|feuUs) zI-cftMsDHnUvq)Hmm~s!9ilnNt>1;R{lBC(->PT-nzlX1zA;+|dFY}4)aU-C#X{8} z9Ml9^?iWTH9-j6k=taJ08pVOKa{A~i=S*N<^&Sk5N-?;>41T3cR1CD&YRVDIfiR?n zBcF?;fW4L}8v{9_;*?rVj!A=5`w_9!+#yNoMFJsWJcgpftB~!A`S*{L2b~=YZ!5cH zhBjdZH|uCKbjwwaqb9*);T4)3(X#yc`U7SS75N7xSF#HqWw>D_EuwpD_oCUf*ybEh z$^sJj92>*+BlI2K!+l@fn$mSg{Zj31SX&Om<2JKp1qqE;j6VBB=d_jCS-Cf5xA)lm z!ugW)(^%T}nn|829PcdWXRwj|LdrX%g{HKx&~|XTmhNR#&0X{4V8i!_FX3giGH@ zTY@XzSlltyLKW<*1DT%jVB;XzT2md+JWe)h><=`fx5TIGf4}+KG#x*vdj1yNde5fa zahgcB0T0J}tVK@TO@%JzmF-?k=)Ko@6+Q)s*c+{kldO`bpWDwu>0bxyMhZ!_Pg6g<|<3dKV#8f zJ<#v_nEU?gswzD^(8%0Ptja?UfcP|1;k@Zs8VX*1`&)&jZv`w1zX_oQJ&+WZuZ1RP z)sS^G7Jn>vSgqK+mfL*Tvj)`pS!lI>0GV=B61hQMiwAIf6)~>^4~kno8chcE`xsSW?w`UM4#*^3C+&SLI7@no2Z4&!BfLdvv zieMgz{YZsD-#pGWL)B;nW2XT^k=)VEV^_q)6%T42+WFXwk>F2hcsgr)K+boLEUW#i z280lG7`d=YOZqt$GDxw?S5&VP?dis~pMPxoji!VyiyNrcg8X(WD_B=TpAMu73I9UI3Aqza!v^^n@qRfCqC0LnRTjw4q6DdR-rWaC(fAE4)k zopm-cq8PXGHJ7g#3a>D6*OE%tCWU&Ln$fDz2x=e~ue$WN(Zh@cZ#`SJnF4?l)}N=q zJno)cb97YV0@T4%MESw8WBn*316D>#2dKT9YaA3Dg;smC8)&QOf5&Kn2%`m%=xw|gbE+e-ap93OG7 z{>B3pIRY&chG4TTzJPig4Y(J1CD8?_fy~UZMzpmWXlM_apvgKFb+{akvi0D4z^qhy z>Nnb`o&c-47}_g&-h{8#V;MuC+7O=IY-_^QJu4rtC#IapP~mU#r93&A#E^$hKb>D$?wH0HokmR34&T*&n{g56h zLP<9P^|~Y{*ExpbzVAJDhRlb_0ai47G?mvzyxjz@>%6(jd4a0ID})S=jEi4GB>Y28 zb|r)}&k=+c&96`oR_j|z8(;gpQLIyWA=bh-RQkxn9=P*)Fr`&&6l@#M0z+k?&TBka zWj@hNz;_zI5~plv|HJ;F5$YbCAz8y^iDUsO`_$f`x_hbSEOZ{lRbig2<|BL{O^>0~-3F06z)}@Pi)O zJ}Hb+ApG1&Lo4-AB}Ahh6{RJMP4w+ktJ03A!Gp`BBJ^xuiHbR*CDvOuriMnNE*Git zgRjuC{&5X0%h)Tgrv3;i99{UTM?a5}Cu5o(+t;s%_Rte((NjQ$(^9fevMG&KD&&Aig30r1&NtPDF;`h&E8I9(0HN_#h5@*cG1}Sk>H;K%)pU0lEnT-j?Ra*GS zY^SHhX0W63UM=m4X6v52whti(P6S*u9GsB%;e7P*mHB))>CowGv}KTl{N=~Cw`|8g zm*}~RH}{~F90?rWDVUdOI885R$cW$ zT@rYTKay1{gR?Um?k@#wx z9P>jK{5f(SVutDNpzFk9_viq>kGE1F#^Gz0Kw_?9nsNH-If)McTs}(Y9c^{^p~Re2(J|5RXdUpdCPtX&VMGOCrv$+)h;1A@W_4TvA_U$ zl13sba$h3}?X&My-i+JG)LE4M?Hk2|L44Rjeq9ladJfy};txU+kjET&YEfmov zTGCeCAbVo({Jb)?(A?-aCl`; zqi9ZOuCDs$nQeYR<lIACXf=^7Q6^#pXKhI|TM|?$(}4^%>O|-)B^h0WNb+s9Bk9wV zqh0ZH1nE?BvokTH!2I!S#6)(7usnUuJtZ80lujf%oMQ4X?6hY+gobBAe4b85qZ$Rp z@YjaVN&2kK&4g>}8+Wrtw^*}pioCy+QAg4XeA*iSCcZv+QKE`@*&khyvC`B-?XPB$ zkB#nX7;0^$%4kr`jM5(7PrAr`+M_U%Ap~E!l+Vt$V@2#NSo*$_Np~2{_J=~Aqey`b zGT&8)7$?Og%B~IJ7xHpI;=u?w;!H_yyt(M39?!kKkbpNl8Ig}*lcj6tmhqNttg1!b zta`<#yMdb_P<#NN$R0#4%m+DIwoV)JhyT>5WWqN7Q2?Fy#Yq2i7Pw?{P;$7}zR{c; z-yRc_3(eJI75iE0#=Ykqhg?`p^C^R{$p;OkGZ$-L(gNjFsDvroG{CErQ_+K%?4OTQ zjL_(&oTyM^)xKRUXYpC8X?%R3JDGcYbP>N7+dNXPEBR7mJ>c6nL%y%aOgTd0T+Ga= zU72pONj1L%U4){Ka!vUSB&&Xo&`W694Lj(7XoSuR?SWi6MyYqrtZPRN>8 z&KGsq)8lseF<7l^7u2gG@bhuX_^#H>{*zu!lp)s-p|}IJ=(83s=8TX`2g5cg&VCn>#txRmeqyp%z}Y|ty9Y13Kg9TW=X!2E zQ2GNXMnB1ipEF?pxE|n#Km5a_EYjBldagLi#PSX_Vuf?Cu#3aEB-B3Ws>6Xt8y~|^ zj~={p5I{L{W|a)f(1kt|AQX0cKLowtXjD$DBf@~a^MrzV(K)?LI3%8Zn9)wFRLvn* zn=5^%o7A~bgPEm8sxTh(%Bqym#@+U-gzginu#MM_=c8;k<>VpQ;*~3ns*rPJbY7jO zLq@}FalULnKjd^I0k^c3V~(atqG330GGGGzyDCrP_Rq%y&=qBjumdyYtw%m5bv-+G zi=(Q36M({7ob!oM=Z9SsV6_C{UEy4t$|cXloLUzEe6v(!&b)MasWo_PdTOr`NgJ>!lhmYP!RqAr7)4t z?woSHX5gFvoDgiXIb$z7-(lJ!42SD(n#9@ma2h_(xk|^+8dc;{xwKaFLS{iO09cZ13feYh+^)Z7l)K$+DKwG56+_*<*>~CoX(j=|g}; zG>1qyqDg4DaOsCckZC!Jvyt2>aQ4%3=Er(SvYo~ zV`HjakyN6z@nb<91k%H{`Fo}I5crorZHkCr?vON@(199+5@C}A>HxLMI;U~E=oHH_ z7$4(RLw}9-%RM;(VcuX8W}|$TKDBnbkK|kR!L1R73OQ18QXnN6$*wLM|%BeIX@bSO-7gTlZR*MSn(R+*wkNQ$Uc2QC<9wy_x~@ zxqwQ(YEk6ai4hv@8oio?f&tw?Do7d|Rp%NFcYc5$+JNn`iTMeM>&R!VxBpbujjDU&)c^z(URYr zrAgpXtLIXd8xst@!YZJ*amqFm0Z@5W*Ho54=NK% zbCpUQrJ^@ubS(#LN!TlSEMqXq33C)7u}}yImeN@7%7r5U&(P)$xQpjaVd;Nq0TFBh z84!f+(p00qLHwJ);CEl63P59yP5$wi^q+b#Emw->V4t7|&GSG-3k8}dbXEL_!~M4& zXuE-VSj;9>)1EL6j=%|#ls)r+J}-LxaT5DeK;!ngQha=$i)J5eS)zXwIj|ix6{8J0 z1{KuHc_OW2RXRU{6X>Z-HQbx_LuS;*FrOLi{iB#!6#Y#!a^l5RDO_m>%Nu3o&;4=K zK#E`tXMo1ri|7V~Qg(r_26J6xrI``K_5*qk)?U9IKK?k2l7&8TO)7W>ZS)}iTqb@E z#4sqXmu(l@b<9E_&gU}p?bWA`u&tnr>qma;ad}n?6vyja33INek9PLPOL=&%*Kdp( z#|hb53>pP=X^7J$7Cd@}vu@u0j58`b`95xqN|mw5G{-4Gjd_9HGc}bck;E#E#1*(?$D= zXS7N}(F$j=6f6?Y`Fa|*(|ORKk1nLAjRcW~;^RK(xZ;vZ0apu?2NOIOWgbUGa$D+! zVdND*w;)=!+ZqXEOt8~Zfg`Hch$K;$BH!{NcK#7a_>y3UH|`T-09QZnmAA2-Bok+k zcMZBgdtNXg=iJn%C+AgRzU%g5&>xhq`SLAI;BITySqd{ zq>=7!7<%ZC4(TC>uA#g8y|j00&;b`qmwy-TJADodex8TJLzyOTX(z zAhfP6Q>WZBkPXO9Xm~4|s9rQF8%nQLPvUHWvxuNhh zTD#B-Q8y{Q#|Sv^JZvB@7Ww9=YYvNWF78e~X0G-3XGp&r5uWP|TjE*Ber(U32bOBAgRKmx0hLOu3( zGg9*ZR2@3JGV;@;&OGcB<+8t_gx}N3?cLAQSTUsJL4~!G*KxfL>pS6J&;?qUtCYEZ zu!BBi5K-a^qdkeoffpVzttQvxk@!M`VV5MeFXf1RUnK zRE~3;>n#zjs(5rbbehIV3X;ufOeQuLb!N(vy<~(0-vdZWL@cpVrM@x;KOq@5$l-K^ zA#cSfv}GQvz%+~$)>)AUzSsEJi{~Wzg)`@4TvtMwmNqX}+$28tDoIDWq)X`+S@v{T^j~S|8#` zSx^u$lD=p*<@RXFnCFKjna$NLK?3@#%@14Q*lc^=%2B*XDlso%nBxCJ(egUviQBO_ zj=2L&eNN;)TWrjIMjeZ4MyI?T(6~&C4X{I8=6QacVU>q~&#& z%LHMc&9f?Oe{M(I`I7~3GT0jSg$7GTY5TP}oB4_7c_|)?*E2s7I0O`f(KjZI=GEBa z&4IK|=xpd;e|kXVrgwcDuZ}Q0G#7aJID-mZPOijP-A??xp!tTj>=8br8|}?rYcJTT zOovOhfBM1I9EZ!ngS=qXV^$g7r55jY6chn;BNR)p;I{94b+-soP~_C@fqh)%cndq5 z=|meJ;q_G#dkEL6CMp2hG*S}9JM{Zv(G-faWpCGAfl%g?2N->g(Y4mI+4Ew8*#x*hm}jcB#Cu$ndhOw*>L_458`^q&JI(bZ5uHJPnlUy7ML{{M7y_iiw~=T1)7qS zzI=|%VI>{iqEOT0vV#)XX8?Pp+*di=B$%z<*)(oGR4RW!dgil9ieS9!bVUT~BfY~(-&}UVWImUu^xNuEw*gUIk8}Jipuc5i zoFC5meHwVENv@vLeemMc!gJZTmv55;HIAA^xOM`*00}@5R3@(0OJF3F5y8{M?b_aq zG&02da$dy!jpjw&k`HAmmdTKIHYB?YGLgkenO&x$`}HNVha8S-i&zBE#jb#MSLs_y zp7&2qJM#;f3?Cr#3R1d=*Z zD%(N1USaNhl}5mp^|j{hZ)B(fMq0pv@gkMj^U82k1& zcw+tPoeUK-v4~(jP4cn&X-Gz7vstEJ2*4V2t8g8A_kHr~4rE>;dP6rLlAbYPd@@Y` z{Qw0O+BX#%bzbq}6V;fJzmZlFsw?~B38eM-DMAlTc)?hIo_4>xUR3B)VLZdlAhBNa zg)&(maXj=$;a7gFy;(RbDyDW<{`PQp8Y)bb$1+|H7GyQwE{Iv;#kXlc5ZlcpFfN-&z!Uvjx`Qh-R1RF|*4mKP5n!6Rx z?P>o*8BcMaC1{9}*v zbw__Jy`>s071R?6>2n;erEdVXyIBlp8lkBppDpx`Sy-j`nt|!G>M@YQxyWmF^SWRY zD@g5&zrN`-)MnbsHo4gDguR|shOjdqIlLpk-iQEaeuvuJok#00um~-y?dI{T(s(D&{}T zfiMZ^_oZ6nP>`GcXL0}85cEHXDu%2o(XSsRoX9FMLC8{3cr~& zQ%INSOqHr{)f)PHXN|w8mH6>OHk*<(3smUXggwyZFwB&SN{y1#aubONfd$K*I^>1W z(0|)7|G8WVC9g{pb6hWEiPNSwqTwtOhoe!c`jDt|$At8XqW>jqCrfFdl&nwqJWh?; zY{0YCiheZ%hhbTEE(|F8DLEwRj-Bu@5&v%U^vP3WP}HFlO_h?O*@3XjT?AZ{ z^?GxUSBYXYb*96dwIPJ+CPRwh3FkVREIyJVRggw9jXG5xc=+A$h{pbm^%t9JOwIAw z!*zu>ZDyBadf05;!n{WV?2JL;E8Y`T?))Br>X4m#sR%{SIMImW_A~<-E<(kW?)zK+ zp9BHzRe7NjQ3=O31l%rKT`NcjkE7WQ!0A-zblfq~XiUa1a5>3_Jd&iC0U%_%{-{1Z zuh}qB-}3Z~?;vnTDM?QvK@WwTx>;xCug;>w{mBADK2A+mwe=bj=AeJAta6*x+Vw7a zULrV}4lQ=~=DE2&oN!$>>u~dT5pQxNbrwp+28S9uDY96zAAqRcKQv}0+=3VzFhq2+QUSdnlSO=3X@Jw zO9Gc%DU0ELB+~*a>kc2jdO92ti#=1 zXn7p<6;VEwx^G%C*7;iwOauBUb&MiSL$=kU^zuB|@`f}HYs^lA2j4~##d0US#I>KO z)+IH4btR9PqJE8)+{`V`eSv2|yn&HId?*~(DGrD{s@=V;*FKR`xTFepNX43RyQ^H>S`c(o>6b2~{yyKvSwcVSH{H}lXCQ4gzv432)T3;*a`pw0XtC54yw zL`cA!b~Byx4YxRrLYnPWhdF7wAEd0mN<3K#w_AR&Fy;=SkL#|kjb_&zb){f-@3$A+ z^L}56Cv5`^>kl&jGpp5n{`tw^O3@2E1|AV+05|unRmVz-@&R|uBIas+=|H&Y!`#c8*^N`=6yUkp=E%; za#@WB-(o#YWLhjjBnOsbRf<$c$@d?GpCA(4#^J8&U+fjoPEt57@(rjuv+$y83fpme zT`_G8r$Ow=xU{fP3HSNDVO41=LFK@Hk{%;12A$*1JMtV2k=l+PdM;u<_4>s%-8 zv1dNU=>+*r<@I+goJtt>$5TjOg|@EEZ{=?`mweAOYAk!hsOY{t;e)HC9%{g$IjsTU zb8d#<0{Jx3L`gyS6LMYekEqrhJPg9jMjfZmLzneG>Nl1EV3b;Xy+IPr)gQ|k)Eb_xn*G-a*HSK@+*@K zuOJuswV>d+L<;i*gW?EgqQr0_^8@$SjFUmOEtR%HQ$}KqhKuCAo1|4XTej0QE_-E6 zsC~vgAKXy0o=?~avG1KGKYj$QN>4)ZrA>MwbNi`H9ceZp_f6B(&Ub;sNs4lh>)=aX z2{}!-f@MOV4@nurj=Bs3$@|i*&`GpXm%C=|@PrtrYL6h#me9S9&)yl`bNYK5J*T`= z4Kw8fVZ1P`P2;}IbnWSY7FtYq)9d|=(GJvSr0K>uw5$gtbK+z(aA<;uc^byRTCI@r zkm9m8FWy#Ssyv*k4*AIK;I>H9tPT7dXp>FP(MZAI>3g9xCWX#?5$W*zFU3R@FGeZ} zd0nAFsIbJ^ z0xB1}{~^?wW7B_(ZJ?mrrCY@mxA!=c!u_9G1Ik4gUfC1%@<#hmDdTBQ4Lh5LIHv0X zi6Yp=(fe&&TyZ@JbR_549vyeXE1!4retSyWrs|8sFVpwZqibqDU$Y^)7wh%J7p>6; zLNPA@Tw6%>AFfpKU#oHm2}fbt-w++J0VEpEH=ZLSBXBBM=WRU4t5AkCU<_>@4le1Sbuu{XO28T#)qMGHaOxZOW~(*T6eU&Z~Vht{H%I|aOTzs-_hOedlx(J`!WbuW#uIGhOuwFtGq0Vuz3E7b0JEdmb|KtlKu|nE}ac82P?2o!Tc9I28VdoGExFLycrk>sHGSHjW zrGK#fB$OT)l>p@C|0#L?S395Ban(Ebzzla~OEhsJZt7C5CYB=)rR{1Z^BNBN zVxedJ_lfvbXb>^Ci-18Z?-Oo`U9bg@2e7mosHv-g7K9FM7qc%u*cKGWNlRk>*jq7` z;g4>~qSpBtZER`YF~O~xZ-gOjF(#C7P+JSV8!MQooO_ZstP1=3q|S>+^oj5M0Gg0j z<7GdW7e~*C#(cM=xv_Pmb+$3%!^(Z zEPHLjB-XRFL25L{J;G!2h}}wk$>%SDMXXN+{3K&XEQz0oa_o`cE|R0J-}z^7*^jmS z@!uP=u>Y-dRuEBhLJk=qnLP$`s5iJ>YjqeQl-alJsDd4E_cu@0-xPfleMl8Wgm1YL z2+D-WVciXbc^UTqBdhwr0DrZntGI+1U6O8EqVvuxe=+UH^|J2_cc-~ zq6^Sdb&>t)Lcir%^N8e^1jC@u+5*?9P?v*}VBY*5iS=zkBf6HxV{5r5&oHRK^Rvp4 z6BfP>aknBVHfL>&ZAjHl+qJdN%n#1%IL6xjgNl}8A_IOj&!0U1=v#NOdB3i+Od?ZmW2iF<- z+VbNSHtfZK>glnjY;*_d8SExOzplf9Dm)d3HoIijr?i@q2Bk4U$LmKNDiJzi<@5oJ z=5H`MmZ4kvYHz6tj{oAwKmLkLGD2DTR?ooMUrja2HmMXj`rsr`rEkIv&sLyhnTjYX z6o5a$-(yMKR~srEFNxCN>ciDS=BrGaSY$<{H~e3W;W}bN05q}mM5%W$iLvI*(7SA_ zqU;G^FQ=8$I{E|W)DQh%E20H!FS*wXd(sX{ReMU{RrSzMyvsosAQbwM$>#SHSZBmQ z(90SM>q0gPiiU{*^Ku^H&KN^k7*CBk9>;Yp=h4-1TcJkMXfP?OP4`#osV-z<)Chw6 z@Rv9q%1Kf6gR&Gp!X&z6=x3|Ey8auZT&Xy)hKbqdCdDDKP3{a2|Df@GV|AyzgB841=SDG!@ZN}-eY9095l?S+s z)0BqW-J!1g3dG*c?2||tKxcZJaAwEvw1pT~0#)QR?o7UpuWfLfRy&5tplPIsTO*$M zi0#svT^bo*8-T|rkb2>Tu5vI0gacAIEZ;>dwrIt%1DI<)>otM>jH_Oy8I7@;$@Ger z>z-VO&ArI2CoRiX4w_)5T;yX8Vr(eZiqpxNJI_I-BvI}|3y)dbHA1|u@8WqVD$8yX zpbTt56UYY0TrKg+J%A4)Ct!D>0VzX;i0cm-Ear+U#T6Hdb~w$Ud@}x&A5utclOwn( z0U`;W-CScKtm-a_anOp|_G{d*VQ++{z01h)8f9T+$BXlCSR*U`9swAiG?g$`iX7A6%}=3xyvBFvjTxP{TK@ln zKvK8?!BBhOqf*iIi(URrusY;=GvhFr=)`KNGE_c8K(4_-&#+}Qm4!n3sw?Q=9szZH zrBm{pI^T>Y`+DOxb3B5p##g`8?CR(vzo-pFLaa>ln z<8jsG+3Gh=Qm88g#eF;CS)Jn0&IYw-trf1J{`Zh_IWb&5&@Mc`~DV9sn$w;^dooGZPkGv=55;nZ)V%`^+9)b zU3>Sf3h1l^09ustw;iZ!7>zCwvKSAViNLlZ5hM|zb&pR7x$HE&n;N7E7n|mlTxx{Q z!Vi6B%48+=gh#CmT)!hM2B8uw5f3;9BqgrbjB!*~H28tHfWPHYrm3l>)kd+&mELCd zUM{UcNYrVXu+Dsl6sV+Keu*8%fBLT}+_Qe}AV*8$DKX;`wh8FCi7cKx=VBBkoqG<9 z@XUZX@#;Khg>~vYV99$*V;5uUyXu`Eco0~$-j~WS*}e#-=bqQ6lpoatU-+-a@ijQo zSo^JLJxv?%+K%HiKRP=^gs4MRb zk6TN>a{3f7@D!xqOKdFzh2y1wTOzBwhbiC}!9SWg(!q?2^*irI6KC7Ii`a=5{4IDiS%GUI9)KOx==z&)?P zG8V6up7j*oH(=YgBCgavIrAIG+TB*vz_VH8%I%JgVJ^Zv*K9CL?rxFV_F7iin+K2F;m2+DsgRV$!f zo+?iytljnxUVLpFove_#CANdc!9GwNgAzv*i>RVK6fh!5>><>aXgaJbQr`Lyk!OQ< zZ4b?~zsJL1bcs*HF8jL*135BeocWx0-Vmx9Vy&HLhq74GXZYaoQpjukB+1A?(O}KE z`cqQ>gMW~?V|<8U*X6Txo@yRtC}Li4y-b4-XDx3zjNCoMO|fFX>Nd9;1I3P;Bs$ zx+e^rj)TEg*X{kcUBMVkzWp>ZaEK>adiS2LvMP9Q6c06Y9l{JJo+4E3vYVHbr~v!) zecG!_dBlv$#dlKwRn%XF$e;DI2Zhp6^TDev;?XEm!UkkE6{<*uEBL(D>nfZh8>3D@MM~5n z5Tc95;t7kgX?ynK*y@^`bT0d7J>`ImaNAbQN(%q&9RU6*|EHJ%2=Gb zUi)NNJ%j!=>5y&>snrPaT9NT-<5^b@ns~0{lMfEdZ%Z}ql{NU@Y&a=j0q3Nv2G`>U z5W7Q7YsdL&32_HyLAVP$;I`$V6%y&sy1%_N8&BGDCjwnwOtHO+1(>?{5oOYcpPI20a}GqlsO0@yS5(5 znGr+BY85w5&TFBwHXm8M<=`O)xaX> zU+BU!=@Gt^)1EX_wjOQ@lO?)x>a>56c=h4`*VY%32{2}u1)c-y)}=p+iU*$yopVy~ z%G<0jr(?v}$Sy~}s)i&o5vg2fc|#7E8g}NY(@;6|w*V$cn}EpTqQ6=bRn-m+e`ew3 z=Z;PAw2nA#T>N<2j#E@j=hrOxp%8f`b5c4mcCPs|f}-4i-Iad}0ZqA|372oL>Xw}1 z;Q?aqD?6DP3S3#z^KBy1wEITeHWvO#ZY#s?fVad~l;MQvSr{jsk+sS}Hupoh20m6h zulDA+KO4R>8fk@-49au9X%A57LLuf)nrgjDmN$vU9r6Oe0itt2M}rWKyBL(o&8&w& zU+9ci>oR0BU4uazF^UIi6lH{7?4y-!gyAl#gEyus*zQ2j5k8;l`4ZCWG!KOIzBu7= zUgqd@-!w;|1aJ^F{ES2GV;6-6mhM6zFW3da(GjC+AN~{TA=H>P=e6IElldg^8P^Nc z84V+q1CsgSG!E&c%F9ml&U0!Ki*oznEFK_K8`k%*MVR7HGT zAP7g}DF>P?MW+*sPU$Yv`f<|;o(w+;CpN;a3mBhbn~uFCuHXhNN=^{}RD@_yq0=Dz z8#sBp*5@H^F-2m=4ovCQ2E~8jhYj1z@?%WyY&5ML&+Uz;oocE1@h$x259`XQ?&RqM zp+Zm*DpE%!Ovntyx_GKn15Uy*b&3eB%3@5J-RY?KJ%qjl3z2DB4fqdl$XF`Wa}Hg% z7%6z(bcZ`pz{-C&>TN)M=Y?QLD>wyaDwnHl^$T?wInsT4N#uFwf-v-bH0tdTyV_g7 z%TGK}X-#JYV#K~D4Y&j_*Hw{Ea`AYUMs=;P)y&Vy5IZ(;^u0ytwOk?D295Vclth&{ zYghP7*q`)>hC@^PE{V)XI65f5!=!dRkqKlA*D~pNy+Wzg^79|eU*mBLOj!cwdd;;P z`q=S(&#>Gx+&8(3UWT}%1v$3I$FNhHPZKjmJEO<6Hx(Sdyey|*tn_nL`v&>1IF=WF z<~a#e%e%BmUD5remDF2GC*N~*Xf9a2&JVhr=zMf-E7SfP-x2*czB5DoT}|xOfAAew z4h#X@Yyb(_?e2)P$PR?SeOl_|Zczo+_yVxQr)WMaAy*mg?%y2b#F)b9_0Duzu{;aE zMA9Tu5ug^GFn}R1Fs?xO_P^kKQV#tF?P|9jP=)>~Zk`3SF#vf^kF`eiowBG=@f;x+ zvMP=|c0Rocj6~bKABOo`R1-_8%tPCd0ruae_MEt=;D0MM*8h~+-1O+E!&8SPIZi!= zOp8By!NYqgaQ%H8-SM4wPECodGM-$u>4@}b zbUT9{0q^KAGsn1>d8*JR@U^g6WIMu< zkAIIDrs)U&p~;h-hb7OEG-&DVYF^1U$dwaPO5I66#0ASIKgm=OS{wAfTrBDfDKHPq zI(~ysF~cQQ&ah?wDpj`Haj01&hU>!?1L11v?1zd!9%kNe%=MU9Lhbw zr^p#UCICjJtnN>8Q-e$yDS(Rws$;{q8?14mc|X;upq)1U(~0G~kJ(agrGH0s1+F`) z7oaR|8D3t>r(RE(9)!T^Bk!*%Kb9_vm7^FmtO&E)4ySM?aO?_rE-K5FiTK1Ie!t&P z#@ex{tWX`Rwp@QI5Hr|=fTTZrh20*&tdsP5L<%RWM_ANrU8Y=}_ zT`Nj13~v@xz9sS2_bZ84M@Me}bv0NOlG~&dmXpw?L^3{nU17>}ct0z4b*CCWThBk%hw-qQd5;6L;ZLjb^V^VMLIHi9TiLAWbBF` zJpZk*|H};dTWC*)zMjnR1Kil~?b~w{v$?_C?{WF~d|u6@IITtgsT@x~S>Ww@a3y|0 zs2shk{bGw*T@dkVu0m9clIaY`++Y2!6O8mfyx1#|a{FC83_o zIqeuGDeJd6aOirC`QnGc>agcyiq;S$6Sd}!peby2W_>lJP-gS_OeS0^G~$$@@K=s1 zuial19ZLU33O)|atG*BhYZt_;>3E#Nb4U0Mp$YDfdcXBabWpGud!qNw%i_&?Y_Aujspx zo%8aY_1*Z9Y#D|IjoU~n#|vYcAhvRYg=ep0ybtk=G|-B8zA>q?XFtjy%@w;oa9Jipi0%!DUu zMUy(0_$r^J)w5#k{i zajDApU$^q||8Xnl0fMn1U&qdIrZD*3WWgQR44eEl^FH{4m~t=3(c zL|Smqd-L*W0V~Rmf_c%tvid*s;YLBh z^`|U=!>x3ih2Bu6NO6LlpqvR@ORz|Ryod!6e6ws!iIy7A*4>br|OL`Mf;8ZOy-@^H|;u%xd?+n#*R9Z|V5l`Bpuchi**36tss z$!SkzuGM5E>s7|N{EHdXL4@ZHI@Oj0knKw?;N;zzC~Gj}&|3rixNEu!UDYEyYG}P7 zx39cRAtv5iX#H}mYqHXTFJz-Qg(pf>TjAL)-)I>oBU~o46-s@ii9h*BI+d*92#v!} z+7)T4<&P0Ej)uQ}$oj-OUqZ@-sG03WD97ilWt(DI;)axQ;iXeh9*6xapI(%luBpMI z{P0PX!3okHFtDaNqKwPr_6eSLbpljUJdmnMowYMv-BNXO?8`GhanR_sVg#C1k$mf$ zPj$LlLGsxF&J7gGKC!o*L)afCE6;lUIu&hI#UXreQauc2VVfPGpn;skY+4!rJoNqh z5Pm~O($!1Bol$e`wbAXtezL6>#s-r^z}G*(NEvrlth>DG=i&3y8*KhHOMGF$hjfvA zUTp#dw?lP;uhWh9vb`-=u7Cni+Lk^)XU+84rwK_=NzW-QF(*V=Qhd1F;N(FaVW_p~ z3>J2i6buUGB>v^lRFyANaEY;LIo-hMeb>JU&$1JHEb3$@y}8ynQx4ZJqI6I{y1NgSq0xWfFv3{PN@|_%npU z1_v<;j|J?XNGfbcT%hZo8eiE?{C`X03)sf3it_)vFaPHd+6IKtfTh25(j8^rNDo!0 zlh1t(n~)vmY@wDbN$fL<;hVY_v^(F*jpbfTLa*JB-XoWs*LifmJV0AM>T^1BK`5wg za*R!7{hGVHC0PwLE0oit0O^O;Sc&C@;BZuH_&g}uQhGgP4Nk{V;XvQkSf+eJp(KIO zrxK2z86bsq8rlrM;~mt9qt;YS(iy>jN3Tb)|J7xGQAXePHoo=lM*C)^cRz&Ssv7+C z$?2E+$h|>Z`U}y94{#`13OJzL7*Pm(>*JNZNmGtBL+faM%3|)%9z>4*@e*VQf)sok zm#Qjs0f;49^nX>5kOc|)I>E5*{oLOyMcf{|D63$(eJUbHa4DtRP*By~UUlPSXQ^$e zkJ$p9drvCQZziB49FI3n4dfXp^lc!Wzhr!XOL*GxAdH!3ku8W%GwJJytyw(;X@WRv zBeN2NT(UII=gvMz;6wLud6~YMW))YKENcR2T&O%}&tyT)@qiXb4m?%KM5tHP<7?7W ziJ+^LL>`3e4k?C9=vCJ%k>su4sI7IF8*%;BYYZ17(oM6Q5y^i-a(;-VRZaHgR;=dm z$%nN%uUuzsWU@t_6e#zz??Hy8%bcJvmu==m4(kUEOjm&14&Oq&T>XU~hkJ_xg(6OY zVOls)gx$&F*FO-Rl1V5`X}!8``kKU;H{Cb?VCLPu_(cpL#>3`HKYl-aiW1=;n}{ok)nDyww|2u`XpuL5L5|TjlE~y;HSJi;Qsc zZ+@;+rK+NW8Pk+1Jil!$g0QJ09nINNEngZ-TFg!%)4(yISi7B)7zuxqV!A6B(-8aS zIDVv1H(~VMjM@4)njln~u<^XEDhtiYY)&U7LQp|7-2D_JH(|O=MHpG1q$05c2LVn@ zf6f%kIzcj!G>w9CkRJT!ydo6tywjv}1HsY>Ar;nAI0J&t)zOUv-GQ;P>2)uKtWRzK zHM_R{J-dALgw$54{wN3D2#=6^Nu&^Pvrk)p?b+UB(2$3iGL8$%bx_uzya8~gpgGDX zqgzTXHZq_ae2X#rj(#WLn$**;^z8m9>fM&(U4qXNlle=Jg@S$#SbGhr4mGV*YV z1-6)034Pr`Nk;R(jVyS-Sh3%%+;KPwuslxV-Q1(qDsHCxdx^r)(~b79{LQn5A9}@1 z^bpFb$fbazVDvg!r`(#@27b**{rIc|5wzO$?REZG7BLK`fjHy+F$G%<(UVS99$*?3 z&<@rho&D`}>lI%6Uh?)q;~GcH?3Y7|z18j?r)`%Hk}pRln*X5kCYTZ6hcd59nHdn6`z=Q+^9fF$ot*SxWUB>`HcvStYO#YYi%F15!g-OLsA-Gpz~aJ&yi*V)!(de87m<6lUeY@7k+eWrja# zulOymhEN95NwWx>4IZ^RZD1uR0gCaOuJ@-D;{f}jJ@BofRlCW`B+2Tipj!kYEulZp&7)#8!_>xNzk?X#z|hG(XnagPWN$h zYxfQ2u40p;wsyn*X43}iuTR#V6pP<4Z&1Im*=&$49%{WcZ<)&faQZ7P7Xk_6x->^P%RuxD&K~*H*JL!`+h51WThXPr%2sPHOGunmy@!339XAy%cdAt-|{-rfxizCu+aiGSt=ZSj+uJD&fv`G?)1F2$Lo~* zVU0}U9mL+;yE0yzz-b23koIE){2k%l{MC+_}|M%J?vX8Je#>hhVhIa>;qUH`^wzL zM9zQ=_E8k~Qz~onC6$b)@8r-f&}MCx3`mfZ=?7HH%DcZB&E$!iIfnlVb~&$ZpaKrk zE$*P|3(ZZ9&C#(=56FTmwp~Q5;=fi_zd}IrfgP$fnuJJU!u>0B-K65p5t% z;+Xrz`F3f4nY{TB)9Dw|@1diKh!Nk2O&4#04r`D@F@|lNHQ7%e_BrjnQU~Qgha(1* zGs!BXN@(?Bt(Hm!W*sNxSWl!IT{x-|DQ@K7GqXDI3}vuN@aiW&TaEAZcQ(YC<5Qbr zKxWr_zEx_36&7n85$VG}2U&}J(4W7)TV7~-{NS;+FPE$_uNwVSO_05(sZGL73E{&yUQ;H=q# z3_cQZ#!4phYOCj=(_n8(6cDsv^l6CBsxhKXDv|Px@Hr*A+g*9v7I+#+(oQ$@X2s>m z#OKc(yz36lF>gihnr&T}8_NTLpvm$1_29Up;K-H0;Y6o8U+v15c1KsUI&V4(;OrQL z0o7$4$9tXVKyeh}nY4q;nWI;~NCf{|JcaN&7U?p`MK6#K&HnCes60T$7PhM z11C^Z=aQh9W{r+PmoHW>S6fQf9Sh{0Cl8!`>DJop*`#?Q0ckE$a)n}*rlqU0;jS

k=+gojEbAbT0-A4F>Vx1Bt@wUd{GT=uW!h+_q-yEYw9as~lgfyp>U z?0mi$n0+(V=_il3rte7wy{5m?R-;(9BAIUn`gjJ0KI>RvDWS)~e_}bnFX<pA_z%{SHS59Un`AOihuRh7&GX=_$htTthAa(lp{*~EFbcuS?35NFk?>Qk?mk*4Ax zKdeu?AdsITP)0AzWoxjVwIaq(Tf{3DZ*F3fDptSGxbxkc38Zooir7iyuLNx- z5wIjwCiZD;tO>kwUaXdYbx~k?sV155fr3D~iT)kt58obRl`AwO)L9;j8~sj0NNWHM z;fe#)j|BZ2pN2BT|32ODw{i}Lmd9pxqhTKmJ_39zU&U(!O%xeKr3k2;z}s*5K6I-f zBvl8&RKBF>{ShyKzNEZB=uTtRaHa(7GA&%Q5$3W@CsZRxNa~S`MU($-6HOt!tuZj5 z|B1;AxL@j3gNrI13wL%CJA(XG(=ViB`uQ_owsA&B{|f zx;2$0$yc;&wM%ghd2iN%p*F+)4=ekyF7_KbC{;q3J~nv663QLlL( zvS1RL+A3K-d)jy%%Y4TPJuiau0Q!cC;22ku{*87S!Fv=Lp+{<=K=1klqytRkal|e^ z`B!@@?4lpD{WHZx%K97svbn{-3>yu&A6!cwpSMu1?d!o<(8a)8ruijq%IkvX8s>Fj zr&z29pnUPGyVyiB25>@vbtHrGK3(8Rc!_@vz}}h5d!Eq`e+w6%2P_TSTYNTGW z8^!$=(zm$+8gk@=%}&+)DEYWjHtm)S^pB0fC%eYcDpomDw#RaWhDQux@=Y}je^h;G zkhrT46p?&_n<)`843_pc4TH`P<2*U_=a z@N!Q9CrS7wMkwgtGWB$4gE#2MXRLVuf^NR}G6mTpe+qWS)Ni|GNM*l!%QxfwkEi5y zJttu+5NZo>{h1tMWy0L|%SSRWrT&s8`X6bY`o3+pS^3PmKe4DB;)_?AMLMbS6F*BK zTi#!|^_^!37XG!DTy=zBu^DYYDBi5VE1`T{)C5p{Nc@?}Z-4n2fq&*TA6$g?FwI(N zwi2)Ccre-(tHA{p7Tj$?qTBkqW)kXPM}`GjV${gHybKFAubx#~2>$?i=^NWWGXCWRD4u@7H(`HlJ5phS8OOdut!X9fs?SGJ#B_Xhy2f;BbOqbrV?tVK zMD0$mcCuCgh*rVjACbIx&Rmn`TLTpuJR`3e9+v6!{gVLJ+e&a`AVcQQ`DP6}qFbVh z0A&q1QrhqxdQ)YF6UtvZEGN$aeuif2Hq5qMBZnKa33Ixao3))vVWlz#MzjmS4eFUT&) z7>GKapbR%|9gC-61m7OGSj_F!c=YlgWXAUa!NrfWV1Ap6_6j=&Qzy0>x#BJ9iek^* z*Kmz;eR@C0-4Gz+VTAX=xXM~;Ar}pjcRkp3+iC80D;7&ZqDf>9J{A72I}%9M5ad_m zq^AaxYowE;u(kzAQ#bYUIVolS-Ic^oN(e8PKMPPa@|g_r$^`L%d0CBRL*I9)OT=h{ zyLL}M4oyLZ%?3*JP+>Km;U=&8s)b=KkLB((0B9?92LDD@7;8534$hgS4PIWtqwH7T z2b{)p+2l|9f)uvyB(vz4e}F;VPCmZx)FPa3Ktr6FEdY4xZMS$FmUoZu1RvZE`-ijD z2X}SK5ECJ*R3mwhcQF+?UtjVJ(!2fW7%oDCqMdOyJx+-ATYzu z8UfyXM8f0jp9WJE0#HzYY(cKI!8`$=Q{q8rH#O-G^XZD1u`*+!Va+p==$CqTYEBNT zVsnq!q2MHTRue_)sG~*$U~vue5<_nxFiD&QEEb2+`rOq3?gfJ7;?D~b^2w*Hy1V?= zM7&1t{nLeze^KD4@H4zf!tsg#oCTojmhZ`KXomdd{~7Vdp_Dug4^r#SRCisoq+O$C zpgaIrLC3MLTe^(m5?U|9+0B6Kyv9Gh7!sFa_wQ@-wxgTmac7ZM(G{M^xPPjxxEN#E_04j5AAMt_ZwPoc;v?&-m*5ayHvn+M-$tqqA8>wpyQ z%Dl*7dW=OiqsY6F4DrBta@FPTaf>u;FKD8!jN&Xst0OQMj6gq!sB*AAX7U^Y^m*hZ zbrB3F8i#jKb>#0U5(FS2=i6JDr#(k5X}Wj#si2m_Y)C+@*tP^$y$Bpp+GiYbXLc`` zI(sdp{gH%R_WOfe(}6d2RxWE3D>?unTzkw!iL`16)y-{rU1Iu0F^MMm&vljbPNZ(v zb)lydV3VA@uP~I?vB6EbrsGL9{ENd&phr)L62l8lX8&b;W5P52e*>$xD@E9OkbiJZ zt~6*GKSD?n;@g~m*85QN-{+Bnacv}Dx7%`kSYcU@B>UX}!ZLuS9B96X5ru*eQ@a`8 zB(YyJZ~3h{-j-M)Xd!P~?YowFmGM$0U5Dj|TWTA)KIr)aeh2Q=GIV3w|hJG$3KO`Ha71_Ah%wR0yTw-S66 z-qShkMi~u!1L0<3Q%I;Be=s=~C2@7XuL+?f;<10NiUj$%NJE#5{ENOwSx5}S)@AOHML zyQ&6C$|+l{VPX39>pxDr9clPsIrrVyZO)HU{FsvGG~8=Q=v~~{lhW!?X%{t# z;RXb$Jhs2fsUP(L7V$Y&MO@Xq6QE%ni~<%QD0op3(TB`_7FJjW=*es^4~eQW&67({ zAAjgt3H9!GT8h^T|0w)}7~*3TLiu2Hj4IIKS=l_$^&H-Oa@+o9W%%)yw=Nk5H1uCL z=a^4Oax3{)(!R`mBFjdvAI&&;v^g&0M#v%XF^|^pv$66PQRQI~y)iIY7}?E3ok@~^ zwMDc#=#~<(HC}X=XTQGYg}$5&kRI#yjr38A<&nOjdN5=G_7(AuO7e3L8>^9A`?_Ye zyq&M8t%+Isz4CT@MD~4v%69q!x+3K8Y%Tfy^L$m0^Akg0at}A33~e@a?PK>zQA^Yt zWAN3<67+GQg}?ia*CR!aeQ|pM-o?2cfv-h|x}pTRE|jbKw%c$7HboflXjvH#0HH% zSH6qu&0X;?-Q}osda&Egq9#`KQ!8Qf>l#=T-e14IF7PipIexC@S zo}C}axO)IZ>El?}cg){&eCJposX6@^Tp77$Yf3wVsv~^k3gYSV_qEouag*2x+QAX3{!v`r5yW}G>e~!eFerkUizJIPF0vN zK~4tQ_D>?Gdxo~kdWT3#`+>zJcB3!a(a! z8o=d3Z!|4}*HHUYUU=oa`ig|)dKpDKY?>B26Hh#1IaBJ44Z>qm7gR7{>=Ax#{r+*| zQua+d{M>ulC~A__U404J#B9CXOf`7xTkOzMKK=3J=1UtltxU z#880tUn}ypd|^#M+oue&vwz0-w^%d2S}h8XAEy47?$X=KBOU3EQ#uzRgXmxZHe#1$XEA>l5akvSCXl1nAiz zNs6&uh)_5{ADu*nZoC>}mcs2|u1w7Bbhd3r7sBoCoz#=4?smGqmw%92HpDQXp1@`_ z8dYj`HOR#9&6K@iX(*^JfsqGkWt|~s#UdC^z)?=kv%7s! zD=B()?4`BF>|f6F;5c3~l3wBsZCFxvex-bf9bxXw-gj*`h0ObQB>`gN4cgU?!S`!5 zRF_a6+N)uM%}35L0OCZgND_cUi)U|N?{vg>CKNqKwJgdzC_cxmt<~#jT5vEsEHype zQ78n<3l7;G4mm{!9vBXngi>Y1j^S{9>Ow(40p;E&%3WlB1uSK0*#)nA2k^ysEbU}8 zsiY&bPQ9ESKyTsK0y{@3F>Ry~)dZh5j2mSrg1VnGbc zTT}!-xZAHe!TJr>*hdXZ7VT&HHB=k>=!K5?ZzqjURZ2My&s)Ut=k@QrBPU9~2 z-UkH{@$&1_4i1|=Q%ZreGrxi^Nyi;;wF)nw6wlPf^QnGq-w4+c-mcgU2w*W@SJZq5 z@XhT1#KBzCOZSv{Wj!!@)UTD#JbCD8;Sljq|RpnZ$v8k8zMX@-0j!S)0sp3?$0xO z^D?y*xMtRdlhvSbhBvQC99BW@pp$hvNcGzT?)I-cFCm1-PsxOmpBDpb%!*Goj>sd> zmuCCFY89zD0j+5gI^q0F+K>yPl8piN^ga6xh_;E0vQ|`c}x_dM!tS7q9thB~;Ill~5%u^$W5SORce8bp3U!lvv zpn`oBMa9h>5T7lz1sWe|HEpDn^jb8f7Dadg09vnr+l3l2?( zr2^sX$}SzARC`7*t^ReU(TIPJ_WESVjRl9KxL} z!SmiKd6CHIjET#(R3DW}q0}TFowm?*ejK2HMu?kA;g`yiF9>4_ikb=K2_MQB{(MNn zp3;${&EW&kh^7QBF=+5BXhZXZ45Xj%19Tg;ykoi~n!~_-c=HGK-1ebVrXIh~#GMa}TwdeNd>b}e@TKx6o>^K56%K|V1*9=-fiw2@ zm;1c$<2p){6dA}KVzqnlm$b<6}R2JuR7PTVtB57vl<99OQymWUK0XI9} z!Gt8@@iTTMd8LJ$ZCUbBHd|toXy7Vmd7iAt$8XxMdptac+jqmL9pni(rrQSz48x33 zeflUr%Z@9F$8+0xp6c}KU!-k7JiD50sAaneIexzMo!J9&#l)TDQw49dC*a}ZnScX} zUVwA7h=U zSiA{8jM>2c^kj*M#boNiwEYg4M|Qiy=7`uBU>najvoldj3z10pdJvs$bhPDi&HY{M zI?u&ov`$)i);(@`Se?qEcS!b1m|iJIv&M7RG%Bj}>YN}T=DaeW2t4`hn2;M>U8->0 zyp$-0QD~4%xfdc7rcoH_EL}?-Fuv(rF!_b?tJu={P4$Nf>9yDnIeOQwi~R&Sbi22 zJac!7t!h>QYRxBcG9aPtwx#upVqu`-JahM`C#v7zjdrytPFiQ5s_HhU(>h-z!;$+{~==?`P6v2E4pX^KsN1>Y>BCYp8aN z@KP0vf9dXecIC}9g;wLJHry3lPc>24D(AXnt$|2KS`%*HRX^FJXo3nO&k!H>5POP!$+F3^!9k@cJlIJKFkZ>l$borU~Z=HG{g zQfF$WgIo_Jw0upJ=wyI0ez1bJ=|k}813|)1Sg*F_3GC{~UZGdF5OV^_W`1*lNW&5p zB?Yi}EJxcOx1lgn0j1NSe0Q$hQyJk`oaqrJ$u~q}zS938OuuOIBtY1JWy3stC8TK1!Aj5jKLHM4;!eL zW?jZps>2vnia(EproONo#cfYsu#w??`+8`+S0)}5iOb~D#1vQa!5$! za_Q4}>7d&)w6>0DyZ${2gqf-P=`2~Pr{}kd}xiJ(2QLj^{>2Sgz=FAcHIp=}I6Xd0v#j(q;M& zDy(I%qlG9YMKYK3pJszhT9p}P_I2&w)fWopsUNmZwkzVdM{MA?Cx4GfBG*x>yC{t@ zuVJ+is-=Dy$>Va^e}am){FyG~>-!&Ho+|FyMk9luj^}Uj$La_)s~zPmtlYMHuuC8= z_LG~%M*ZFlFuW0ywOPQIPhU3K20U=tef$l-oZgQY6IflQn%|aH7A_OX7&kcg1L@{w z>b$*~W-M=RrZ0=H3HnW@56oFnJ%2(<|#w zdmS^?mtVxk$_lC8CYgV-CNZ=-)%}#-T*ioZ*idwctHg0i|D;TO zq#%R|ttS5MCs0>EWd`k41OWTJzPN79{v_&cw!2i;LkFHe;T#l2O9PcbvhQ*#}T!v=wzU<_h`tHbNmg1|K{z7X)pkPo6M8yKySh?an#!^tDN!pb;``kV7r)P zj9-S{^dHbAcREiQCS-=1o_qgEU9zM&-v9igk8b06D-ov;GRs0%KMLs~X%9})r#P%H zNIWf}IG>-P{+85l{KNODyF=x7OVM1?8LB&-dTd6Gxc}t%-+kbQEO%!8)&4lyb(}AD zpVd!Uj680g$+)LXzAs(P5V6hAhU-&R{!g5$$^)>os^ZE*Y1F(-`?UgZ{U;PSXNH*r znI=?_psUJV47f9f{}ZYNIBO1+XQ2w}U6uU6GhzRMXYxh+;W25wge~S0(ul9Oy#s>k zu(-c)Iscz*m4d6mKmy0b8qb$cp|ayY@uGzfz-dAS)DFSOEU8uh3B{!#0ABIYfAkfx zz-jOPldtHgwjI^?F}{xnfDmo|6SrCXBVa$&fAkIIx1*xJ$4>|YZ@6jAdFMMZg0sNG z0v!0=gjPpu0c?H#-xu}o;P(Z>wshp<&5TC~6BO=F>r_%B@PDcS9>-Yq_>7=fIn>{N z*Bx>jFf!am@iQC^2FCv~4CG>Jd+gf~>JRAm-}{ha4sX8JA3G*FS@U*llZW#7b%3tS zW*(nMT0aVwA#CakpZT8~{Qu^IN0>yt86;Z4g(lpFezSww6E;A5_){AFq2YBH1PM`r)iHBkZQK|v7D)8p4&z)R?s0eE$j(B}&T33OH+?o@BB7M}mD7LcNNjhbfi z_?<~e0cf>?g2lYnpTm;B4+Ku~5;Ak3-^aFq{p&#=uxyP5$PTR<5uury>us9rhd_1b zt5^Y)FEjC;fBHaAN4e;Kl@%ZjAVQ-lF|g9k^o3irGF~;Cp!|?w{vsPxBSSi1nljn@PpI)#aN z0B*)7Z?5I6U0<}8ztDll&f(Ah(?|V>yBGwHgmQp7_S&jQ2!RmjqdVE zPyn$=*hqGH`hW=B9(&0F`zGG+*q4;@+Ds_-$u{rYeoLO}PT3HRuIMxVbSes6k~0+0 z({+EIZCTcG9>652`CD`ToYEFhdy8{8g=e_qVF=`rqLLU3|B{^5 zwZ;*;4Ez=xDOS{tO1G6Y<~C6(FT4a(4&L ztpgM_1bBPkQR9@}=YM2J`o$4mrHQf@vj_h7pXSkXBfU&SPS&T`XOBH$Bh0UI8*YU6 zx|iGw5&p3ZF6q-h-r~32JB9(381AtbhPcQhr0!0pcX12<(SCPPWF7RS;6Og+Kd5mvX;AfEE~-s(FnA>Bn2Ls3g&QsDQ1~^9lSD(BJ-r+s^=Ha4Od8p~w2h z50umH(ZBmwX)_2orEYQ}kN1DF4}=>dN`YS<_#m{dhYZq#jTGOzvDKHtL1?(hI^3%6 zAL#!P%8l@5K_5zeyC3de9uyEAb$~G{XHN8CiwQ{ z(Ql=LL*N#GGhq+1MTowE{((OYOQ=5A?(<0S{TnNKdhE}m*N=Z|L=Smhs*t+xyu0(D z@Q(m)Ke&qmw%VOcw>>S0Qdy6dI6>1%5?{88yRqN z+)6tHZi6BU<-g&c}O(`Osg=#PY`XDD3~x@c^gTeZfn4y^)Uz)ZY+yH;a zpBnYcmp{q!?@N^X+LsbfFvY6pF1>@Q`j3CACkJeGPfRr7$)9KQ-x?6yf8T&2 z`9%7Da{>I4%KsSC{}|IRllmWH`gNCQ{$Ci=|5pYiwvWHLK5A3!0@JFb7rfW*qGui? z%g;Q=uXE)gxDNw%anc!Jw1+Hwc2xA`oD-XwIN95uFV(gU7+&aqM7P9{(JD!6zj_b5a1Y*De$@$L1==t=#i~=8_Bp zYDMdI5;0U#18+WChN1fdT?p~6yTKV|i3u(U6S!clYFl54haJ+7 zx73z=FBy|8jM2>`wO1A`7b zho{0}FXr{z&I5bSWv>rfDS!O4{rYJfRS=OF=7d98e)|#cX_zzS;{`6fhW8W=OCM43 zG8CInon4MQm|(A&!)Ca*0xDMKn*CUGmf6+HX3fkjEbADrTdht_Mb*0v;E?hjxX<*z ziH&DnybhUv1PJ`y5Ys~_ca~AJQYb$xKO0dQAW%sfH?L_65}Rz^dg_<#}&;3AU2)E-hG7-lv(xhdRovqYRwp4b2x8Gy^1~OCvn}tsP z(Lzn~Hf+7xvZgGE4<^Z=eqKk^WJtOK^@2)|lh~lcV7Rh8VBF8l+zs9&&lE*Hup2&B z9_Z5BQ(h2f4vo%U4<9tm+YKnf8=!48^7&tCk%V6zXj_7&>#bkT;)^p{hfC4k~+ z+-lRoU|1ZND6q17@aS;8Kd2XI-GdA#F)T6b%+{|KwHbbn+U5Rv(g+DYSk3m2Y$CR~ ziaAIwcs|@@GrLUU)(5Ox6CchtFaY|U{8j0Ru6CAQOioohnJ3WYcj{)MmF>+mklW@u zD=k$r`VddACx-Ln?Z#?Uf!~X9^=_jXA^W0`{|feg1pMVp<5~?$>0THRQLc7IkrU)< zuyl!fAw~f1I(yAvN_W@RX&VEfvRvEgIAA=daL^C#Q_1g$P)fU87j}=kGrmv z*W7-qBM^7#Z0)N1Z*o=Iw>bS!I+V>`8+6B~jt$ZsS0~4F*hEaJT;ZCH?Z!_rPO5w= z%if&4cvC6ui%j{Ax`|=SRp<#*m48GxG-XRY?}^@&Yg}1n?RC@B#HZTR2dHa6M`rpO z^P>J8>hOtHizF9i3He954O{Y@ki1z?$Prs3>)NDiPuKnF=baDQ)`<#yru>$$@! zy2!SppX8u@ybmyz_tpIfHu6A%x?YyV->o|@?RW5X0$w|M zbsz&viSUV81$xX>>hKbM+N8_j$zZEZp78uwEr-EFFhDmI6z_6s70qJC2+83-`iv+2 zs;5YjfMNac398jM+XYVi7w>7qY2`j}mPzX3k`bsr0^4u#CCeL|RL@iK>|Dx8hI1x!;;Ffs>3L2@1P8Lmv%^?T z41-sJZIuGatujY0X;w7=$xPx_?UneE-m?aSVr~d}lZU}gv|_FImr(#xIg|1zo5-H9 zCwt-bg)Ake@~xutqn`SZ)M@!ROF z-^slzub)en6LblVv;R=^h!Jud<3K8;13nLAXtCDi(2MBb!SnGUE{~)N+}v`w$hN3W zUo;D?#t2k0g9wyFAr@C(aQo>uA>xYXsyuI|{sHWXz(L0c@f(ozWRpMoarIhO4tGE) zb5bDPuT|&}e=NaBbmQa8jQTp15VmbjKPJ>#4Pokh^%os5oLQh*B?IWV12vEzk+`7g zTSO){imEFuzr%$u{Nmr_W5IBQ0~XN>tqCu6JYdyLR84tqwGTJQdvshlvY`7=Xv(fN zcPu#+!-UGS#C;Nz;4A#@j6 zqg)5H+Fug~MF*WMj+0~7);wz-9(PA}Qu z%{(I4#gBz!)h^h&r3bX_!o@yif2IFKKe(c8KBlIku%25{hyOI?#a%DPoe7JPYK0Li zazp_XQ1UFzA(o5Z4fNuG^p~NE2PNLwg9*8ZyRTf|@$$;}PJ_>q$!yxFODmW1#xvg( zRf5zUrk%L-I~||wMAQeU*$yQ!;Jz%3tT%{vejQ#dv{hXe?tDm)*r6z^#ieaFC`(dQ z?}w;PsG>7aYo}Kee(qa$nO0&^oBB}!N6}?dagHK;qo*b5_?cW@Ag;mgg%p&10On~* z&g2_2{dH?;HUv?Net+(p{)Iq$*bDLtn_Y_Vya7JcWc-0l2^pTt=aXO?;+kA8mjLip zlZ>5fGMxWbmC8yqXIu?);n6_-b7d!v*o0~#YZYz-_=PqIPff)N(JHfLu{51>*5kn( zB`qh6uDmnPk6GH_1BR~Hm!5?hRu$cP=f{?7OKJlr%S}gT)gRjr3PRuac{XS7)3_Kg zT(nE7Rkt;EL~J-OyXf5^KQh2CfgGa`Yp3sh&hVFk2ihse!g7-&AJU02J!+S6n6O8U z|Q8QBBDsa3YFPnyz|LqocC(x(Gp=ZfVb3XJeY)sw&2eLqynhk{wbrXqEI+HYp&TI@ z-jh$9lF2-UflzKq3%Bm^1RG>KH`c7MK~FQ>Wd@B(5f|BoYWlf%FAOp)&%bN$TSuv# z_NlMoemD^7c#?A;B;2E^V8%SHXvRkJ*dKQP&|f`eW{*>{cmaL`DV|`B+2xhE>ft(v zgj4NL+2S9l1J!a24CYG=!$9}E7-0K8k5JQ(oeoRJgMjtC{qd@!Lkw(_VsT+UgIwlS zQHPtndBPZp>SJgwAk5Z*#B8paZVq;=0&vMxmfK`mkmuH(vHD?&Iz=$-M?BGeL zw-HmLIAv&yjMsZ2$W;7^>R#dQ!)Qq}@;WIV9Ff47G$=l<>hrf9 zE4E3wmk)-*C~ZLTr=6~F8_(*-7x>sp1)n}O$9g1QeJM}_x2{wZz4BuTXn1Bkms_Wb z^T5cWTf1Fy20b9l+^!2G3?pm?)B%5wGG~jy+H~9! z>&gNK`-;bi!5Xl7P~15}oGG-Xw=i7*<55RM&$#a1PO~3gX6$OepihwT&cgX{>#46N zhfE}~*UBU&Xo#$8v|q$peqiA+V`GtS&@|0#8)+Q%T@(CVc3$UPhh8Q1>z449y;i{RoE;)~-t8tMjb~Y!M6Jkv@g~iS&u+)Otf(s^LDvXQnsZr(wVeqhu zVmFyFMr`+RfX7yWQFh)Kvw6ZyKch^tCZS8vtv5#g+updqTQ89umDmpt5?o0aWSJ;H zk+{$nnx3&uCqAA)z~hL&@mN^1nv&Ck<^lkG6q8*|;&N4j;oHe|ncKfS^(s*5u}ZN= z&@T#ffq?|7!*8wpr$IphD_~-pZ}nTL@~p+%iq#(_Tn|T1 zB~DUY(CoB%Ga1%K_h^uC;|t;5c*-fhe+cF*peYq>zHV8!3VpvKxu->vTtmF;tH~(B zPvH{v&LY%f7QjIilfXn>{xx*g5JE>6o2kXvyM7lc`*@E>n#hm1e2!i*4mPeDc08$R zw|6edJgmjPo?zIy#2?I-wlh{l?o5K-B?G&{-MRZp^LNjS;gX@)9pIAp(D5 zgwZBaKSB;P0@kRAeAiGwwjvyb+L9MO{{0&o>@L-{-Q%fyhl_;oGjeCPl++uM?%`$^ zAg}qD@%ESTo(t7oR{8;0Mmp1Y6+xt|_x{R3g`Di;^4uybt|PXHn$0~zOum8kv1z0k8H&jD|RL{fL zDha_EUVjvd;}QaMJ>_?j(T}J*uM{kpNJab#HUpBM?{gX$zr58f0@PG_5tYQPSRYXo zARxxa=W+E0y@y44Xlb4_2F6wS)=*e~*_cUY7%muxn;x1(HOPG7q-So%*5?y`#(e+2 zppzfa5>5E%nUc72(fc+upm91{?2X!3pC&h2Q6h#}jO=KLu$O5(ubaniT^NPzLhc&D zP;S;KTN`u1q;)5d{I+_o&LFtn^KF&nf>Ett;#vlgj54Kc0DO}DyKmMmO1UDfHR>gJ z@%Mao;piVJXG{rMF!Hg^4-FO>O zKqrMn@$}}rcBlTm#aqXDa9h2#sO+BzZi;327=PIp&DD6Hoo%@lM~c>71eh$!MbI%~ zij#%(>;eO@3O?o+f)S|U&(vS;vEpjmt#Eu*0YCE)RsdkaoHF}b0Gg&$Z4DlqjePMr zlq$Ef=c;ymv24LUsvUj*)g+E6rmR42c80`8%0`$neXPVavfXdMpm@d9H|}7e!rBVc zBla7J$W;R-4DtlbyNDlWQ#MrbXhk2~x`Z zBW$O3D~y}_6w`i6x^9H50n>QNs(TS3$jcO(y2*}y&4$YOrASL=$()#Q~&&@YL*!t zN1OcDyzdQ1&#Wwq7v9lZ>XVpi;mVEBp%+@aF)uMD(lCdVS_7k%=lgs4nNqD&W65+b z_N=H%+<-?)30PB39~<=I2_ojQ_xv0w`QUAt$>68t>E({J)GOes@7XIs`UiluZfJgnjDyL=n;x z^3jnV?q~hDMiF(VSR#0IZoj$#^_idl)YUSxTKT(h=vO9#KvR|!ZRN`TnO+8YF0w(R zg)xAcIDCXb9J$oInO^sWX=-$rZLb3xy^^|R`tW${mD4&qu=g?Ak-!<-_nvc&UVc0T zDJT(u5C#`U`?a1dRBQ+PZ1(%B*p>qL0E9ty`=qOkSGIp>a-VglvR#XTZdk%t@)iI& zt=yU4a+B_hiJ5wOh~T`uEjLX$B~S@~ur%2Za`3$zCt^6Y%-URGZt8`S#JF|47gjnj zA1Q)C#m0dS!;RB2LqB{rLIK}8GT-*3k_zjtd-yMRdEe4loW*U8QZEn1?Dbs(0z9V} z!y70M!PZG{7?lu$P4lawnXIpX94EiIR*$M$8Z|O+v16>s@YHr?->N()(3rp;w?jld z&v{a*SZ{dCb&9NMka9(76ZU$W1~Q%`ZkDB#>HigjKph)wJ=>VC*srCQtwbxu5Ho;M zXh3-Nk~)du$Q!>73{8aRwJ@)t%-;$bGzjHJDkpUr33v!hiN3yjX4|K_ewaLfvs}Ua zt@cp!tXGu8EirU@yj+o|Dx@WOpG5n)Gm!8Clk;)otjWMi1TpM7_K=xSX0-(l!()UE z;Stzz9kmDio~+XsT=iR>!u|U?)vErwNbFnagM@I`h172k$xs;Tl*dijOyenZ?w3*8 zs5ITdb3L(3KYV`NFa+BVzmA3d;}OV9$m_#46y_ml1U;Hg9o%VUmTaHjB4H57RI@Vs z@kB+Lx)DC}jDF$rV&`y#Fe__gFord2VM)LLS@r#gzbZHFZ;Ud8V}$*x`FN^|dvyhH zk*u+T6G13D4Z8g@Vbm?X0Bi=(gxLlHlrO7hE0kOoQJqr~qY5)EgEmdhpB~D@%XdHa z0Y+l%=4(Da2p=tJi@zM1CqbIZgXnBYyH1G+7k3T|qJTK!yq@6Ed5V>iYvG=#RHJum zSBJDnMK_DHIGh^t_9jT9?O~g7U=J0jCgY7fA6$M%cZ4Ut-8NtN?xb?eZ@qfB)o|82 z<@7lEO+z{|i7GvZv0*-G(Ux0^(86S>&_ZYnS_;yZ%x)t9vnAZ8rx0>}CGNHj9!ue- zOcxJr@oc{0*0p|5ck%4N^0I80Daro{(|l6`sqQA<+lYw*#AalX9CSSbvAUu@TIr}t(Ss=?|felz3r2{BA2op>@%iyUOy}PI1VIKer9KhhR`_SH@o1U}^ zl8)z2E#sJWoB4vp=qa&Tc?hl$V!d>F}OEkkJ)^lVxq`sZh(VD)M;n|t6R@J!+8C09^nn9%l1OO zmVow;Bik>k*fx6~>z4|4DsUwHiKKsxU(zZjW#z5KSDB};;P~7d>_D+cfX4C?%G7$# zXx?~+37WrLy0dgacK~YNr+j9Su6prnD~U*(2UQ(Rb(axp)ni_p^6F`IAPyr*(>gTo zP=K_|a8`#MW~S+ap6D<&o+XL4+8?#M?EN~X*hy{PPK zDhC0ujQ({a@{2qo;6fj>TgfgXTS84lfL^Z-UOiA-vtFMqI;xjD{mK&rIs@1}R6vKD zTnS|oxFR9lRxubg%l%BXz46?m%ozw%FXPJsG4*A3zx~+1CZU=-Z*7&+Y1xny0BTB1WKwS!*g!!XVO@SQs6CUu`H3X_euk1NiYDhY`{I%H#axWn5`hr{% z$uFM>)YT99-Uzlj@uUd@f@)6flpw>7U(>fe^x}?P>P@g)AWZv>8_m;0%5btd@%fA2 z#lc9Z3>gZj*~AR}O1EY3V6bt5j!%G|g->@d?yEzIiux9d*5+sKj~d+-bc$KABsYa%6#K z0m~)yHWNqr%h4||wpW1E4h&xBZJ9S_xLz|voNOvq&$H%f#~CZ<&q!k0kqYqCwv0#O zF`UE4akxY{&dv(*CK536RAs_RW2HH5pkJIWfa90eI0XFyeqcw6ET(;SZ1A<4^# zq1p;zm)so9WY68lQ9hcry+PxyiGhA^Mi=9=IF>-2>BAp@~4}+D7iaj+jhwV=ahy^%6@L2@E0?p)yme+&ff1F9osPzkhbHo=~j}5}sJxgm|BvI(qNYw;dUx z^+qs{A#(>VjPJ()b^Zo zWpO_FnY&RD9}`AB+vF3}^Ai}q)?=AwmRT-{06{o2X_JmLyB~^gpR6otD*z;OkkZp^ zkn^gqs94ZO5N|qv6ZLT(;^lKv{CJQa`!?ZCdS{OHm*cGR3IJ--zl^-Zv3`}M9Jl)H zEhW++f^w;Z;6{nf;XttMnHk&Kz9mUvxYLZ?;eIRe75lbQ<3Si6`_uzXcvLqbgm$CC zW)8m(Di2S9N{g5hgW*pcy%TdHbz|)0cZGczZ3I$aV05@kxE+^voA6+xS4-N?^r40o zyoD7^eUiY-RMY%5GjWjv=y0g@Y3z(m8$c~)z3vt-*b(j;HCASx((%KmH#F~m8EluL!po3JyY5G#?WK_ z=s9QT9fCXd_!@x@kxZ)&#{b3ESBFKpcKs@XfQU3mBO;)5_s}4%(%s$NASECrA`LT2 zcS?76$IwW349zgWdDwfu-@DH_-(SyvUUJ>D?t88EE58wpx^BmxTS|3=wtR0XUrOKD z7zt2k@XW1)SV0p=HAXe_F^K)mt@iQwy~wF6^2$)7izVx`yFOy-A%$iNpx1+XpOGG( zQuJ>H1fak}Ur?T(9`6L3|MX4s4*KW9L;3$MynI(`n9-jY;W$6n?v^iMsxvu$wl2W- z{yd_3&;>9f?J7tKo>$0Ud#krblR#d3-}(x^wJy^i&w&c;r6m~)zxH$2#P|bnZb}@Y zuwCG&HU#kLU~ADA(B$$tM0noHeHb2TN#M9Mik|p1;Ir;6`Q8(m)vswb7s2ig*eEUX z-i7A05?R?-&!rY4$=P4XF>1x*POF#yK}x0iW`XqfJe^rjXBOjGXpy@9&b$|^9?-&j z&1eq+Ej8yCZTmjb-^IFCt3jS=DPE+P|3j?)_UXYnW^de>wdtmrZJ^BPjWB|FYKKW2PcXT2ba-v*nC!@`Fin8?8XRtHbi@~8KayPh1>4bZ{;`~bT zjm})z4msfCgK{u)-T)XoOqJ6gUYkSCUM|TnkwB-HJtNBXPg{*j}7~&^$ z`(sNr(<7+|%^pXu(Lxkbril#EN!DfVuAVB^48ObEqSUJJDb~M%`0T%G%Ju*+KyR{1 zm6m{227r~@jV76`=vT>_=84U&zUs;t`>}S->e=n1)oj%S$@@0nLBW+;Se|e&S5)uW z^P^^H)ih49;h&b+)TSE59j&?aJ+-TVOd`+v48M>C*{9d}4He&dKFpJb_-Z-a`+{_Z z4ta}vGG8Ir&l6EGyI$BA}oEI1mfy zHo}hqc-~bN5kl*oB^pK<@IdKUykD$#IGG7JYxOO^`Dl+gd{>{s8k4#9<0G4nwz>tKa1Mmcr=LwJ+#iQ55mtKV|(<{by~0%*k!FQ28n9_cru6ixvx`toqcci-bSNYK!~IFCHXO5YCTB(#o`jDmG{y6S94Aar48;! z*?0xZ6=qGcBfp+o4b~CNxok@UoI*^@hsM6Jc-olUqiFxm7viSVdDHQNe9bG-YmNqUogby=)NLXk)8t>K9@rxh7<|im`Yds0EZP0 zSgt9_WEdYIm-?Rj{%(tg9ONTmz9N}H@M>vT*E>&ZF#*2eHB7($qx5$wDw&)%~GbI{CHbdxhV)j3%O7|;0IR6oF zJVKL@q@u9qwbKCjb!Xf&QZZp4xn{&eJk*N-H?29VF@i*W#TbxJOEG&NABT6p4YZC0 zW|s&oE}nZc0GR{|zVXru7bO-55L;O*;FM)~z02czk}-?-;}LJ;)_}&$bDNkr=gK4e z^clrrQ%woY-!2Ni7!S*}kdVG>zfm_p;bH!_=E5>co1d#ZlvnUE8mOZ(e4gMz=HqNl zjY-B0o-UfV3Ge9~yL}iXtA6mPB~$y3=>TH~WbYXE`ey7k)*_KB$Nzn9YK5x@8_jnY zs9O1YpRb@F$kr2$)NGGg8ue6JOaKeyc;pb#_RFU$(<3`L=U7t+ z1{WUjqV8PMJqDhmiLb>&+Mu7YYdP0`c}9%N4PGb(%9+#vts|L@xQFW7K3dkKd%cG% zGBN~4O|F9loG@E>kKpE|q@VqItMCGV1tI(~TmN;!pFi6o!8AI^Ki%JH@`x4KCj8HSy(6=Q$FKfsabG&J>=~hNrT$e z*nrd$B2oZf6#|BOHwEO4zfX`2EiwxEZeyvtW+|7` za^k~S`?YcNVpvp8HoWL%%MuxqqU6^Q{d_4pC2$7wL8@}!YDkl3oTF8|a)P5(&Q`&W zkORK$%o#HQ(}~z5MMcM=3UGuI=c=_A-PGwdg~@O@(!&@sB6#274p{N~3S+NfG)_$w z7L&kF{yakd{yN|XT#=}>`SXL_OVD1xpHpGla&S-MhFKK6*q1=;ctU2j{+whw$DH|2(SUgToXl=@el{kB#Q)QLbFe$#ou`Mlr;co-!j&1_n6LE3=&kUmxUu zbzHH4_Z8N%*5P{^VyrwEJcF+a&1Hc?3HI`d+O7k?B7iO^?+O|GD1AqG?ojsnO3>lB zYze+#E-O(;^10Esu#s77Us{4x^6SIuB zor@e_Q^siwubpnOeCXIDKj;4W=`}J$42kVux2O_Tf;F$@l=`wy>9k|ZcqZ1OlKbj1 zGqT**mR->3jPvS3(ri`5zpz*7#n5 zOJliT0LRN+P@NuMBfZ>k^oU&ej%Z^rdFY79Dxf=ZZ9S8T;mnlz{s!#pqSi$3c(1t# zUjrb+)^jduW(YO>Tkv{k#( zW%&FQ1Izx>@gMdywOzyTH5TQyxV=&u(>#}p?(n{0*D~M*H|`ndvl{*ZbFHq){OB>i zEByBo`{Bs|M`(Y}yjFpcuWxZa+|*Fv97Ejr+x6ADG1ge#ucpXE8%k~4xUu6M4s#1Y z-D7nKQ8T8iVLD0Ay2|qBY z?TL=bFT&;JA)bw2^28h@ETKYo0Q_rFNivF&as$?aa(we~x-hO8ah&oeu0;dL4NKe^&#po& zkzh(6K5y9bHby;}nYkNyac=((x_zDoBcVKX-5ffifEN0D_kj=QS{c0rM3>aQb*z^^ z*$KEgobDMb!gFmiLEsE{3VPqM10z{U>lrOWb?R;6EJ1ScIJ9|B=zQav&SkeK{Re z$avKk_^3ttq{Y2*Yf?Cd{L-4^{t^68z&`UKTLG9ecifrAWf*Ow3Py&x?rOeJ+eKpTEZ=*-pgfr=H$JTlJe}abnc+Fi^_D||!iJN8 z`P`(DJAnciG-9C@kamd zD1YPvsUe0jpn;%cdArZvPt8|NyD)I zTw>{5(r}tkp`Wf=o6>O)R4OV}5oior?QKxLqH^fD5f|%%l;^pk?&i?l5# zBZAlTt_L&hMce8mFQWim!{Xxz?%X`b8trg}Vvno+e|8CzmDbu#Ts6Og*4`WIoWNLW z^&piRP&&)Of)4>(zugO4S+s<25!NI;mI=$H_q@U_h^hRKZ9L7M5!a7p!}>2@wauXJ zsSi)D#&MVr<61B?jqiw%r`%EG_D*29A>%iS?MZbjMx4EdIlh!tK~mdqfAY z^pARG4DOtY?27wUvPMrqI5DtfUI6;LE&6i@UoEtE9b1*u&pJ%kA|8?u)GPCdzeAzJ z2#IRzO`D*LA{MDx_Tp%||8*i;SdF^LYNlade|acyRu1bZQ6SuZ&>8*Kx&3~YdgOPO zD$F>(#s=>dza%ak?Qw(DoTs||)Rp}<@ype?9*FF+Y+UWWd>wF9w>}V(Uw1N0c$k>W zxV_?t0C3f;q-f6tcbj>dtk%yQ8sztWQd51G!gjg&9Gli3YhWZXW7AB#c=n`>qTTjI zcVONXy0y(OI?yoy3OT{`l#3c+IT&x307-%c&K`Q}Z_ieB`M{ ztNhMt&)e~h;H{0?K8|x=8IqpHb1V$3^$2_50wK(6o;H6816^o`!Fmf5MrAH!Xx`u z84wH_{mMXt97HOW8(tDy=;(*w`OH;WUfN4>uBjhFUshu8TvHRcd;(ZR`9W~)zq*3Y z+D9(kH!+*Ky3;fdxO)$!=QpV+NJ$N@aRb8mm8bgJsYmG|@Z*AXD}gsI_cyqNO6RZa z1xmR*hB~Y%2@ll^7Xzc|VIU0bDB(vN)p7;_z8e7NQ@|$v*_(XP^a7{11y_K^DKpkM z_i2;35d8Rd^0(;FH~qocjh6>-y(Muue5Uun_A~RX75U)&V|Jbw-BtJ)SLQ?787($H z_R>OoM5bi3&s$J>vA#x9fXwAdr06DNYBm`)4!uKHoy_(mhU0L>7&qCRS16xM*^ElP zei(Uf@A`dF0c)X`T(zkG={Ex;@WQ4@n8z+d;BhRE?=G3|+@SUp%y*QDBitFF>(nju5j>%=~w$XV9 zGU^wXmwb^_62EcJtWYJ{?Y?2TX%r|t`A~I{mg2MCN_Z$LRR_51W-7@S$maHsyiEc(P$04NIOW zQGE?W6wc7L2*!{J4AJSC>+!rB=|K!WCon6pweVyhT1(89I^rFhGqKNL(({e-L+m7e zn1YYfMpKAj2|2_ZrD0|GE}bktp4Een{Ar^jxflM0^y2M2?U)ShF^aa5uBi zF{Ve7m!#f5br$}j9qI`Fn>ZZTt;4z2)jQF5dNZVO^a02zu9JE7M0iA<1*{mN57U2* zVzx;+EO@^VXJQsJKnyK)>!wSL+9bB9ei7cClior+7OCG1{m@2#RrJ#jaSkjxe^tD~ zME;KQxMpc*-7M2rJ&_B&G*%dX4MuvXZF!JTjc0Lm3_Us@3sYt|44j;wY0*0;gR^gX zAbSGqWgj_Pb0NhoOO;Z7g11RZ-ps4EIefiDm_B6m9vJRXLhGt|;PtA|7Aniq#@p3Db;% zR1Y6?G@rdgYu_ePp3O-n{)=h<>ndL{eE`PQR)+nh28(kJ(@Brq)7?M z!?yrwA+*-?yTt+Xo0sdh;YYQ%g#4bn_(R|td2cmC?b~<{m}FHG;$ccqJeOS8w)f9# zsypVo_#7u~8=Cadws25etjwapCHrZ;zQ)Y?6rhbDi zRUS1TIASqxf)oq1m*hE6+D%Fg4v(KE31AsM zO^e~9xjo>#4p3~un*>g-3|PGPQND}+;oh(lv@Zm>J59NIrAfE7w?w-<7LiJW#M@jc zHDNZB861vHgCV8Dr<2AmGC+cux3;hTA?jDp{b+yR%H0l)=!vT%70`z6F#sd)oU+_Y zcStGy!Yx?#h?EOEUyc*GaRaWJsM4md{=bMInG?Aqqa9dFg-r;4oCB2Uw%f@(J1#H8 z80u8w=$2)0TO5IR{yK`!Y zJ9)=M(W!~AUcSKKvMl5d3A1>(uJ}JRrGc_3eShD4_d5PF8G{u3Y+pG zhAjA`Dx0_XqelEeTizvVb&*~~9!VPS`@tWsj~q(xUhfpUgVq`0M+R@E?ql+~mOpMV zx!z^OpH*u?MpD`4YSzU6c=ZkQe^mb-2wDL5M4D|bWcQQq2`X4legmInc?l<$6J8mQ z;s@i_{L6gE@ile?m(Bu7%-8>Ma{9#iH&kU%m#vKF9oB&d?Bf8OOxN+j#Hfb$`^RJA z7tj312ABFHUmXv?R)X|SZH4=;Jt8ihR7tO!ugn4<$ahFBCLviY)#AQw{n`bvMC4z= zLES7QNG%pG+@mCg8?)(AaY|*hc{f>TP2|{QfGX+=1kX(k+}PsSuXEp@m%E%QcxG8t zd|f0{Vhq$RHw$XMk1E{y1gvDX*Nl4A*g5K!HZIq}$OAe!%0Q7DJfD}o0`pU48W$kJi*u<(9%nl~1HlAGL zPCmv+E3IdbP-Gug0xt`GxgOv${#Ly4L|P!PoJ#K9s_$;V6PS|r@q{af z$OYV$UAgRlr@{>`;rp_MkpLE>6ERKeU1<6kC+4y7pJFQQs-vBGt1;gDG(mgM=Py*i z@Xn6X$-dS40#~hV;i-V*vN-7PP; zyqZ=&EXO%ELUzXhipyvs z)xcU1wm0id6eo{O&^BE|bGfkcQ2AF!!;&@)%AX2?bPmI9CG)sl?6Kv>!$%cBq*%=G zSi;EYv~f?g_6Wr-e5p6xq2r7lO{UqyVI-8l%f(A8)1hSKj6ao z2tV7&_*7z{zrzdj=P)BGVDtT~3TNewWW z5cY?A`~-(+oSDj_E#*!9j3GlHN^s?5ckfOaDRSriE*i$~a5zlnul2-8 z$2-^OspTd!XBGwUzJX}nZvuO9CE=TXHYM4x=gxvnr+3LW1&pd%i~cu@SX^rTZ}##5 zH;e64wzXW%oqnUAAshSG14SODD4lFz4tlYmcsojIy|v&028`Egp1&8F$oS6+DhLLbQ^UW22OHk}_#s?lPan{!*yyXnaS27#YHKN^d2FDZ& zG$zq7jiJ?WGwEJ`nHqV+Fx^?yy)V9z5~VcjsxJRHm%~ASj5)UzK9|PO8+y}gsc(;9 zF0lBJ{87w?$eEC_a(BMoH#a^dtogjY<)GoBa#z6Tibr>OEYn}hF^N=hB_>n4Sg`+~ zb{N+M;eXKuO<=qsb1z6;dwE}ZE@*fSTMfNPUVFOtxaMbXRQ-`FNwwqPNtZe)lg&PS zvMA^%Ko@QEdAI#qf^Nq5l;68KYhykBHX}a6y|x@s>eiI4po-o;&CKW|@w6U=Ehz$4 z<{wimh5P+bDCt{s#UVd$L+>1kaSonalU>rvQZ)*Ktx@s=GLlT zpugi-GM8#xgJOzTHuZlk)8-B*LjLp|$*_6i%}L3yHSRWDuDP{S>0;TTf$nV6;T2eKv;>LFX-7*6H;mhwY# zi8ts7vZvt98`1BPQo(h4U)|F!!s@6LJJNc3l+t)Zs)!Czrnz@BCUCN^$>P zIoQa!;#7lNIz~*fV-+;-AEu za&@3(pnTj_$*(GOV-QPEn*Nr?c=C!x$E8D+C(u%IcV0pGLndLmYvbZO0_xo1w9-35 ziwGRIfs7ne^ojOY{hrz7UC-BpHqdJ0zGzQH60=jillg-vlV`WmIHK8cSj50^M+<_x zO*1dYnaDoIF^s0ua9~t^Wn@^8;B++I=-636w-8-`>18@wLTftURaGIyuDBd@U8y&X zmapJ`0467z_`z_6)PGLdj=r&bIW|7B>%73yFol0gF~h{+8HK|1;WlY)bSQ)N1YUlO zu73E{CYy<(eYJ0a*m$R3Ufn^pnbF6|JFTd`jH<@34TX2X?R9Gug6sOk0jDR^G8@l% z)Gi7y#6Y3q=;v5H0)W%$l@8P33ygavE6V1dgv%~a_Pia0ZiaG(&L)%ue#he|`nxYu z53DLN`mS}mcDw|ZRFbS>K&FX=tnCb{y1v7abplzW|0W~W&cV# z_S6CpWbwYKerl>!Zmm&DLcpLUkvs2Cl2-j_wo;D4tHK4Hy~jatZJ7~TY>W};Z7Cc8 z3%m5JaE4Nt3hgEt6TGA(GwPED(IK$yqf>!|E^E2Ku$oYbBF+@onI2l`M3$;L+-vP} zj$|us2WQ}eRe(HJyqA>uTe!$8tE`1q0Tf9anp$11VyE&Ai3V4>N(9-48or$ljywj$ z&w-~0Oo;)}n{i5A9aG9_4tK{SEVYwEmOEqGo?&D2TZOGo7Fu`K1I|jqwCsBFf_P&< zt&$jHS-;ZoA4=J<@^(JwGTt*?iNk3BzTm#=S^u&4mmz^$PxNAfoO^9Fgw^Yjd6Z?q z^rR_lTSUd+VoWELjylfu<$8~oS*L*Cs|gAN-IBvo+uy3fxm>A2Ejk!TZyyJ+F7I5W zBi}|}>ZWgTOowPt&IRSq#~B|~n_QEaj(JzuN##~6byM@)7MKI?|N|E_t`i1&tM@Eq4f)uVt@RZWf9z8+RjlM%FPL4-d7Uz)r> zD%B@59ing=+G2HKxVvAYOS+9VPv4eylO4d@6a?Sxig}sB1u@$?*@A#PZ43>EJgnH? zTt21u0vgF15`SmDiv0a)#S37CI)DjeGX5R>?MVGs-8-EB{b(-YV7KQAaQ7wPULYHI z8YgSDVAiQwJgKO(Cgi7?t(V0|u`3ga@TA|y z^^Zd%1Z(?KS-pNbMWlxpKqG3+%y@R?kGpO)8dj(BhB7a7;bTBPcFCe;P_IXqW#W2X zx$}tV*_}Vgalz!21Y<$VH2piccu_@vI!`M=i#vZr=A*Q5uu^SBsGK)4b{mD)aWg%! z10|OF1(hGCXES%ULL6-YORu2iF4-qSiH9Zc*BN|Ate0;jbZgHe7BQ*sQArad>TW6r zTDjgUQVCpRDih!k+0Ed{e+tE0IbHAbx9Iyi#c99t%y)Tl*JCRxBq3r>1Kw!~3M5nA zFYk|DJ!0{}iXs~yki{6M#lPE}KgyRIX-gn|-?=dyA7mw{Eo%ERF`A^X9R9Ni>Fs61 zA{DjFqh#VNGvv5nv*k0LS`&gOFVBME8lx3_$)BJ#sWIzH&OCXQV7{E9TMKq@$;^uR zeps{0HdvtON7*?Oaw;=*DCTy_R#F9xJb44;6`v-i1E1yW?|^!DB~*(0&BkPIM_z(n z|4ccBu`$Ip=-|qQpyh0HB5iD{=QX0ECIMzSxJAIuzY^+E>KNqmLR7|^{wVi_spNOi zy^}Hi$^;_p^Zbe{@>`dyR*EyIet(x$UD%5*k|!nvxdy`+amPET?r|0?p~sex0WBsd zo97lds;RznwO86e`womQr5CJZ`c_4lhzgJL1ux1sUXjf|*w#KN^^pS}9MHbLPQFrLn)ny)5j7rgmXlN@FyWJ7xk>-kuqm7Cn`=YAk2o65~3q*@n1JKCm2#7&h%qg8MvK3S?B zN#kmRQ|cKKZ<_pBOw@jcd9@vid3qo zW*cp-2WzErf#W>$eSnlBR5|WAOukX{t3u+uo$FGh%bt4i)8ey6n6-lG;MGo+PUT*8 z8xj^;p58Czh8!<(G|xsf2$kUlDf^e{5`Xmwac={eEkH;$A;FBTMZ}_CJTZ9JLp{&R zP5YQsO?8i+`k;KRr%Lb?)fWjy_{t(xgM#s<)oWzmQ!W#&h>G`OZ#JOh6OCE0hVM*; zYXfaeh0bZ1f{_jDYcjqQfhqe1MQBLW2BDQtNBwT1zCE>um8b)Cxu-_h;nzvWeH2AV)!xnd z%mtP;J6z}M z_igwUy(QMh?c|EYmEg(zkxKQa&QF{!eM2t@OKN?2mt4(76>3>!zGlk_i8VDyb2kn( z@`mz-uj_^R z1+93*SaTASUyn+d5bSr(WAdt%er&W7$xjYAVz%a%H#hQ zV66r=nl+|-`JXqp_{jh1qXX8(sft{m4Xh-Db}1WsI=M)Tpg|62XtZT#@IG&ZaK#Q2 ztl*V|iy}C9*alPC53`&5C69?C9Md+d#{R^2Z(O4lqLzXB?aB-fo~Q})cFq+!z20`X z{^9i8Td-}&B6Bm{_0rJt;5hd6Tu6h=hc&j)sGq}M-)pS)S@HWPUJPyTIK|KfL^FBP z%6!04G*MbVV6E?j9z7<+GokQer&I98F=K4P3WiDZl#72LD(ld+3n{N+8C!0oC(+?%CCed^K zai?mqIqu@sxpoM*_Qw||c^t~o)l>b%Mq&2fDWx!dW4{7knUGyZ_8J)tZVXzuVEB2X zn6Bd0E6{YUCf_i1zWWNrYP^-fh0R+|CZ80~ zh2airpQlqwerF1d?R1w+33hV$13!&>T;AI3YT4D?CiQy2f^wTc!q9IR1A~=Ykn-M! z!zw{& z`nmZBGCa-Yh8_RIjBB#$d;5@6Uh<$4ymXW7v)Zyv*ymry#WAD^BX%zh`*%9q%q_ET zApdRy16wVT+qF8-fBt(W42}}f*uzigIKYU=?LJxGj{b}dS}d2T{!vEvXaK{Sn(g|< z=Ooga@<%6?e20?Ut=$;h24yR7jXtB_Fknyp%J^x0<{u`P9$OT8xrD)Jh2uGkV_9ch zH`!Pj_;WJJy(J$dx|V8-6jl48p?54!5z4w-);cq5=!7Z#DtVba*#=eEgt7+Y<*JoW zRhs%INS@YW?M?XyyG^_pD2URMK+8}JPOY+vD;#N!MP?`h6eYQo)i&9g{i{Z8+V(l~ z7=#X0IjiQ3&sEgGYH#I!q3IDaA+SM)mR~15oG^f3;_)OEpzY>TPte3>Zb7g!2*kle zG@dvPG1Z=9kOnyXiXN>Ar4GsC+aLhJ*PV?DIe`fK%oNp3rYx`cjcV$c#cc1O?gPuy z812jj_SA~O3p%0|4vW-5x74kfN1=YaSEnp=3tV`(Xkf{R{9|$u`E`q zqMQWyQj4hHZ|;4aECLg?+#g|iiS3Xg$ElRO7W4~@_V=M0&A$<0;+xjtDiod{iPJb) zJQgQy>W-QXLLSt2{@#t%nV4MDP@gPX{^ad$ka%kD z9BH#$o@&Kykzd$~`t|Aq8+~+m(b3{r%d9(XH}&ux*kY4oCHZR>q&dc`>nuZPsA1ayE_S0U#vz(Ti!()pkJ%u)abbq zjpaD3&osb|t=wcDvs;ffsNasN(tHyutgio&tF1_Ix z9;pSD3*e;G^}UVGR^T!lu+j)5nJ&z^D{yuh%;Y0fV@g(3H*bDd@O)0S!O)rBl^?aZ?%J|8YKHgzjy-TB89$)%KH+?^({lvn+8ITwp&?qrpE6==x?_(e{P^vMVxS!{ zp1gsN6Q}$fvTeO0W;0_i)~HgHP9_B5jn&jF_&iad;7J(Go2Z{2}k()l7n{XgyG#$$Hc$_ z1AjoG`$dsM2MLc@D0kFOaxTPRyXbYd{*qwbnd>NQ6;-KFECvrdqU@p^kCt^9*l`xf zxgCtA^(XquL)42d8mEagLPbVW+GwtLqkN03Q>zYsoyXpK;{5gaSU`ZWC~wdMLJW)6 z|Lxhm-_>7mPnNce^@%o;I1ehuFXo3FfPoyqdN2L0w}HP@W&sWIS%42}my_z~^SsRD zFpGvV;J7EuB9(OIbQ|J7zNR!KMtDGA1Us363xeJ0bz9d^6N*dw5(WA*KovowdMACo z-es0J{Akgl&`XQZ)H`VsRG-gbSU5OkW=D%nStTJsJ6$Hx za4gi!F5#vlU-JLj;yXA}VQ)6fbOj`xefsYL<2yz`+4Nh+(0Ot~rKpX*G4jW+eCmzk zY~C6wJxU;&mS9JRyncFl*154sTAEp;Su*X3u!(Mn#Vo7SF0jF5%3aTtjGxNbq92%2 z*R(5Ap5;avlxkx4rBLt(tWeFn+TZ?QvL(D7Zlqg|3q(5 zpe75cWSA!t^J!+Y@J7UF34uQ%GkMHzb^0T>>-QlrZD9#<%3bv zTC@V+3t+xpiWFZWYN-D8y(?lo9FMNbwAJu6omMbwR%)=)qdyjw zXY**e3P&_gyI1d1e`b<2j_Nhog$}sc+=)RAY<#kMrT6<0`E+t0YoeK4ruPWKTwU>D!9Lgy%LSvT4CHa&)X(I=FKKoc`-k1*J^*6b zR=CDAem?65E<;dVc}9^g_Du-W#=f9baygm|WH+e|2HhY?Niq+yz?o$2G(JYduKV1d z{{-ScUhnV5wBET!gZ^r8JII!lQyS$8ItAv%jKuC{ zBh5%juf|{{4MnftXO;8?#nCT5jpT~;E9^gZJ%T-cg68j6hM58RkW1;i_WLPD#@~2g zkSo$Ak8mlZR6pol9+o7#dGmL-{RIVQg7Woqo~3cG6*a*lq-Y{x4$0#pn#Shc? zbeMYPPy4H=9GYKYZ{DNfda7@6g#+zR9n>x^XZTk#VELl`#no)2q__$MaMC}YT$BYE zz*2)qSMYW`5;ZU8w1!h8F164fD;_s?+^|j8+fMSoJjywK8eo15pi}6S`P65hYqOP}ikde}g1gkZ)%Tf;Hx~vGatodWA3i5+m4S zS`Mn8s-%sWMqz1%ux^zT35s2viAj(DXMlSc@WQYH*0nmipzr#a(EbG?!tYn^#7VwL zc$zYXvMH!g48$7SAO{~4lV(7I#+>A5#?k#Zz7%hl?cs~U#ks6+Kj|AywV0pkB1xFt@34b zd8NbtdeqSr6Y~i>YvMo71{s}_ryi62>BnbQIB28!Iy2!A<&AFi zvb)dt4TYM?C?|g^A7NU*V_F=;5|eff-on^5x+39STE8a_2wLKw2!BqJtxFX|zd|ex9zp1K$RME zkpkI%?N9PG=?f@nciw!AqKkDVu-abbS{ri4PNiVDW);$`=LM{EXw%pKYIq-nnqEz4 z$x}tCBcfJ;I>@v>v<3E>)$=$eN@t4{qtja{Zjs|V7dIQfMafJMnRXMac}+|{LM|da z?=B1seLbrobc~QaHv#??;2D?a`+MIWqfyPicHL+i2Tzd9WkrX4i*cN)cBUL4p<89W zA2jD(HONmEmTlvDPfTTAi=rX^R1@iyUOQd?OgLqjcoL@@_M)TqV}0Z=RB&|Ht1%u$ z>oTf(%=0&arqsr$uKuO(H%GX_^~9-`o>vvBj|by+q7bwA9dqtE^HYMXZ~^dCO69vQ z)@-;$jw)XzRqsHRwV?R^r&)Q95zpuI=#XAKYaePQZ1U}@7cuH)jU$mBJuD#nwz+ou zN1L>c3-)|{bZBWX=p2Oubf`Y1abdf=Fn50Isct}%9RN_{sFrakNtib+L08Q4iIgcget>(2vtUXzr&9 z+ODUaq`t|ZwA?^;La|XfgdH z_TPt@*$rrVbIZuOIT*nt196!GNMctmJ+!l*+QE+)S&G8(e;j*CXbLfE=YzW+Cb_w( z0&*3N0T0h{d2(BK^yhNYHe6$R!+wjo(gI#Ov8mW1$DdUJn}mhDQg1iQ?DtA<&iEJ$ z+mN2N0}kdB(ume)Gg^{?sY3Re-=k1%5`qRV9LA{C{L;!$WU<{u7Rphb@n#k6%D03d zr5zMX3+mli&@wF)=z&=o-cvP}R7)J_pbss2N2EJ{sA|jbLCP)-1xEZNtq!p%ifL~c z?BgXxs@Dj#^pMwXz31^6F-|aNZ~2TE#7hRBU`>7q2qMy->3J%#sbKG#qU+$#ZOI_VTT&vf)0=wvQkee zplCG&gGx*)i}~@YDz$!6Wq6?SF+Q6!nnk;BCi0in4!Rg=NoiFU7s&l_0=WrmTXgSJ zMI-_k&NjYsn&Aq6q=Zi>*Ew)6Hrb#nN%z|HWu{!d$1 z;tu87ha(Q1F`5V!5>ia3LFkaNRGhIiR8GyvzE&8^kev~eC|OQZvW>|R$ujngFdEC0 zCCAc8nBmx}u@7f#GxNQjzRvf3*Y&-Bz;nItd)>e1e(w9XJXeltQlGjt`wUe&w_~bV zr+%}A=(|9~xAS11UPYGar6r@V72_nsL=E>AUYkC|ZqiG*uSceov5%=}2vsR|nBXgq zN7pybq#ciC@e_3I%TGSd?7gMsIkCH|LNS}o)p^wu9wbrSg2=04p6?Cit#-T7I3Q0O zor#E~G~MOYJtsbh$1<#|7|Gs>8k_L>i1|cOTw#2eWppd_?0>=)pui*gU&9#0q!FB? zocD$2z1Bi|-v#LLNK6PaLl%AL@35p4% zsSen2Ci|&pSfADVg|4CX{y9_U3?D6FPiPvKZZr!F9$|7EewkjQflyg*D|G zR9}LvPojb1l=+eAEnQ)^cOF7l{ulQVkaWGVk&clxL|o0Na0cxAxv$Xi8-@L%qQ)6& z*sC$4j_>14*xWG^LOWX#Ep{iSzGvKX4T4++=Lsk2>akxA$wNbZaWPsJYs90t$M5Y=ZsuGJ_H)>X&txqwcY;p>n#KrK%u!d|o7Ka;*Y zwiC&wPnJSfD;6c{DE&8HdZ$Vq@rm6UL50k31YH+`RYhQDhLi}Dqt%0*BpJ19MYG;B~oJj*;KMhqE6cZ$`>575+o@a+1Ps<#uRE)rY@D_>A2pVE5DJ zA_9y5CUiC1qoDy#fE(K-bA31cm549)iqGkOe>7-3SnTz^JpA%L{e1f1a{t3RU+m9v z)RZ3_G~4UOk@|B6E@>C8cEDSs(*uK4UHQ8`dROg3yoydJ2-!EfgWY0}TsUA92e@4% z4`VYTIIH53=miMP-E_i6PI^bG&DQFlx6@Gk3ZH2wqt7?uME^Xr?r_7myRyL`tP_p7 zHPW+ZX}=eANc;Ebyta;r8o4i9T_>_&ej>D7y4>l@1BU-?*S%8`f&Ha7hGO{@*aP!* zyfR&^fS$Cyyv`?4^5-@r6rJ~}fBR>Fx4#ufX*p_S6?ev1XdsXq_-D5(9Z^Mu)O_0o ziLbd>0z3nlrD^%q)Mg!P5`kZwKHRvo+FdqUA4B&y_s%xU$5AfPUppNdE3y<0`ysV6Ni9kJ_tq#b{@)VIv@cNSu8^? ze@{fDpQ^OI4&^WJwoH^5>*%wugj+_JC5?h+i&ePeiYIakT|~t325FkY4Tbl=vqv0& z)m%Ao$9dm|;j00pvu!R#)|$N*fcFgM5ydBULM+Vx4d+$>qkpFv{&?m|Jj7LgreN1B zn8ewRUs}G}Ral1C{w#e|6+cvB=ljww389d>p|htT31Li*+u1npdQku8?vGw7Jogg8 z#8cI^B@@9#=pEJNw~@Dwg_r~i_A2binA)ZKW!2>pk^~HlVM!FrZEdcf`iG2OY?7;n zDr!+7WVu!OV@vSMMTbO*%!@g);F3?gJ8N}=wpQ~dTR|-;I3r^L?=IH6uYg`0Q$g(B z6l^aEekaBt{$MB?T7sqHA5)-q#~RfHU=a|Mg<%%ZC6~+iei1LWE<{*m!cM~M@cjO1 zqnUbRUxrO2=W^Obl(?i&29@(ph=y7sjmn~eyE~P z(vC(|E;kQV-`uUnseW!S9mu}*E1FBS&snU!&Jt=w*h3daOA{WSoaz&7mD;n#W4X** z^6Q^RjK45PZk=g0av_==sA=efhb~}@GW(Pc;`nr>&i$w7#L-Z6nyw_=-MZOPhFdm?$Jz3%DOgWYpcq$bu2u9kncqP_mlp-DX5^|z zVVH1&4l49a_%Z}D+5);n(DwMj=NaQc$30obu-qZOOKfv8a2Za;cIQ$7l1N5+S!brN z9Or1Bf(>1hJtMB1vO=}xY5s)Ft|b_;uUsn{O#5WgFIe4qIA;=)P==$urxr$hH(zL;i$!)X<(UntP2wMn$)GZ zNGz}(`JW8b@i$jz2LQ_oSm-HRBCves92G8D=?RMDs~(SQG%7L?G87aPs>~+|RVXNAe<&#UFNpAOC7C&D zAEBV$n^=j9E6a$BQz$z-m|NL`prAfQrlcWgC&^$3-ZGVaXb^=f3|*B-AQF{DWC;#b z7gPU;^@*N?77ka?yL5d)5{Jo~F@H}7N&tn*9ldWA9ffh&%QFf!B|sd;`yp)?yu0AA zHab2&E`P&>5~xUQL^~qs20fUxGqTweqGP72s#8b@gy(dN%12#E2)(8{Z2> zsiftUnweJ4FFvOCXp6Pc5`PFyF?Hwbg$K1brVL@k_$(%}7)RMI8-~RI1%Qu-g){BK zaE95U^zy>i5F)OFZAcSah0e%1kAYMM+(8J=AiEZ*-vKp1%J5G(b^0&Uul3xodKbJPo}sYo zD&onpkQQ*9)4BNovIUUA~ZWsIroV;@)@ZlErrs;!#+W5BR&g|K? z-(mStR*FC*Nz7}ODQ3;uYt~1nkDQ+wp$Hm96LC;=i0gu==zywrAv(BGtrO3@&Y)~Y zf8s&g1a2YiV{*BXrQi&BM86dwTN*e69qLbwEi#vm!z>e+y-Sm4{mpot)H}LkcL9?tvm76vM-S#*Zlu zkaAv&z%kquqrAgTM5Kq`d3PHVB?>(qK=!-5hLxh;q2Zf@6q+ZD0J=o5{+-?epB&)3 zE|%weoj87pjN-LGt3%l{;H9&JF_2RvzWANA6Zh z2&=J;7VX;&yv+ijn2_Ih(lkLW#pU$JnpxELYLgn941D)Kf^nDVgZ0^SS|d9!VyR}! zCE1g{==Js;%Pf~=)UPlQ)EBYp7wxi??K*(pR7_S#k*oW4&y41a5}_?rX1s@ynpKfS z3{~zC^}unRxo}%8n!ek05QPkN=5cgx0#51dJu&4B9cU^VxXzbK{>+haKF%aKTi@pB@#b7K{Sx<0cA&~yQI7HwZL2_`B zF|IZWZ!n87!aV9t5Wtau1fk@Imm`ustjGF`6#|o6GdC5owC5>!76)Zr~et5bGUg~x9W>J;L(h6#xR;=2>#=)X)V z!%n|%6c5j{O%9RDN801(M#%}q%fI~HXDOf?dGgU_Vsp>yifHjmmuNdjpp}XID3;65 zc>?^eKUHhxWmZ`krk<8VeWaqM5>CDY~UX_GOVFww9Sd)mu#&Qd>p;EcSHaPZ_S z#;A{+iuf9F80ixc^GkDs+LSL+f|FJt=}Cs;1CRl%F0U)TC<%~ileebpq_at28MNPG zcV^U)Wu;0>@JNhHWJ>}k`p~!G=tZ&7e4#3oFJjKKE#g*YRb6BdRu?W8o&jfyjunsR zrK;JK(v^9X)igFVR5tiFNZH6VnA@b-9M1p>u0;=F6~H?Zo0F54vWupR?z+^+c*i_B zip>QG6KebSSAAEE4@eJw4?cEZZj|kEo0NT0-NW3gZa~Lr031?1(%5m&oR2x2InMxm zH-H;Mn|qtSLDz}=3i6708=q_YHGy~UwdvKx72f{wv|z=pJh;%IguI+>8eGyX`5O8{ z4d4IW+>sEbU%y|oUq-4Odg8}wphdi$vi0|+Z++iJl-GXuvJW{%nS}kp?B%8HZ!o(^ zZylu>^&34LjSCrtcY=?EXCYDN+PnDXAr%r694#S5BjpoaP3XsMP8d$KOTfle&DCWQ zoW-9J&6{Cc_j9XIX4g36r+>)15IOi$V&5ek9yiVyt{n5WzN=uX=8RUImW)x`m<6PI zMVDf)c(0~=*~@|*QoQ$Hp->|GWWSo>L3F+_qcNpvj9Z(#>166kX*06$={0HJXfNo| z=;G=e*S1+GG@9#K=+-pY*PYbWekTNRtrRcrG_mxh+A%d5*!e8pkPKYMre!PBDbdxcg-uCXOfQV+Y;>%)x*?iG zKrr^>hbj5=Em=v@VA8D95X3XY3%=l?RuMlqr;mM~HW~SnLw3_QGsQFKR&ki~=b={` zjg4rP{mFrkjxR_rY)QkT%_sel`PTbZi)@^XDh`#5Pna;bKxo&8*USIr;wI5&#?#Gn z$+N_#=CbQ2ukk<~el6q$Dwm%O--~P12p?50RRW!-$y&Y$SB2 zh^z(F2k1dDQ3xBWiRyg=QHXr!JTr-Ri)Xjg`>hsBO_x>w^^$X*N1M!-{f=}lO)zAM zAUG^EJUO%>^dOw}OEqdIxjAkst|sX$?g=4~rGq0F1Cy|pc>^92O}DOb-X`T8m(5Wp zkcdY}-pYGnlB||2RBnO;u-+=gEjW={l9DSpG!=c%W-d9}sr@)FQ`;Rw8yib%!0{ln zeAcq>RI*8Uj=ka0->rbCsGaCYH$kbAF`rzRl$@%ba>Xj}!OP2eDc!`(JX!#3nn)#6 zrbJUjTVyYHOhycFpP9emhBRz69Bv*BC=hzMcMRPvpG(v3$*(KTgB3q1DXwL_M3VQ` zYYky4;nS-UvW=~xOnZX2@TI%Tw|^`um$bPUH1 zuBJb;($QM@C_30XpX3TT8+xx&mUVGUFA7cgZX`!maa!oqR@x<=7HTB5Z6#dTAxm&5 zs(>A!L{{18Ns~nw$Z_Cs)Nwk%+1y^QIGqfW4EV(W1q1$TD--QMY*S}e!Bz@WEG3YP z7h`yf{+*1$(dZ0?`bvwjtQnx7?@g@0w19w%Kbtd)uHlur{l)SZFY`-th|iVUozhI- zV&BCE@#XW5gMJ`eJhP?FUYFxo4;xKSqDxVA5vT}^hlH2&=yeC#7P(~|*I%ytWi#}p z=vUEX(OlX))}>l~2d_UD(o9lSr3OQ~I&~Lgb zHvA)v6TcUmhnl%t$rGDJ$f}XckDDKDA<-c;jh~LE7pnnI26}$7eusGp;h98FL})L( zSIx)aEa+M!971(l^w)bG1)T%Pac+FWM1pY^qyf|1dHds$9CX#ZarX8CgkBe;W#854 z6kQa_1vdkB&UDu5!^dkOGdZ^aBcGp_JwMlk14QOGW>kG99!;NVz;dWn)VgtoXdS*! zvV$pVT3%WZqpw}F-oob(W5iZ`i}uJ4!1ffM!>#wt?Ka@P7Law@v}-Wrvw15B{4SFH zlKymV1=&(6RN4lUitzbHJ~=n@)ii7Mr(He*-?7jC9+iiBSw({qeh1Z0bN9@y>9=!; zE#aqlOnnB{_G3N5W`nXeE_u-3<%Ri<&nq~;Ck9mtoWTx;IyeFT=FT5<^-z1v|A~$M z$Is!^h70NwDwJ>P?afW)>GAV>Na;Ka6cJq+B$JV{r~W5x0ImTS);RoHG{_m+cxyMfTef6u?I0H9$1 zRt^IN6>bFu_up+4-_pOX__zBnnEy&)6Td(qyuD$)-JaiI{--sv|2NqG$-{qntAi3# z6PJ;BOV!MrK_Ghgot!WApIvVD;c)b#S&|;}8%K zU}NWGB@Pd^eM7c*xoM^`Hcdy2pMH8FK?a}}nd`U}zD zumAc^kf+r@Fxk8O_psgu$o5wa8wV>p+u!?s0~PwK6rgP739{9eu(Erj*&7ZZCoey* z(BA<5UseA=`Cp(KE+A)d2fH^&SKvRW{@=j=T=}nnf5WNs51bsleC+>>^Pj5z8&rtx zud)9VE&hwpe=B{XG!R*c?eDV&L~dQP9eJBb5-SNM^|$nm&HlPz#or$E|CPSwVGSC- z?pl9!wS!7}?Z10o_!a3BT6f0Wc?((|5U<2v%x6;;d(EFj2SVE<>M{~Ph3 z5R)VB%JN)zdfs}K-uxi+v3{~6b#ihN>X1lAc4ECU z)my-+Zg4>VznDr{9!}9gUs|T!{dX(uBZKe(T+zUQs-iR2wmAqB+bAj{OQJ>{aouZe zDN86Mc!-TT`{`i1=m)I=LyHD6^06-{jHhTc5^Sj>iN(%TcZ4ihuKJx~JsrwVjPUNtn_+TNB?M!@~wQ{de?3FaC_0+(Cp8gN5MQVCn{J)E}ps%>lEv{=KhX%vZ zacTOMK{ex!zXNjV0k2sj_ z(kgb$_|D7AI}yy*4)-ryQ|buUq~x2RSf}TqJ>yPVCiW`!@vic$K(&<#3T7`sX-B0e zKu=xVy8iLDX_HQ%Qw0rM#BB&9naf<^>(+T)*D*Y?J*0}!qK+~6CLi8w-j5EAb?#+V znwxx`+?5Q-E(-d;ar57Q!DIv&&K-R+`gTLYTr#q)kpu9Xu&CA&C`1>r9+UTch3G{P<3@}>KF$Y0)bjd(A>$wyBOX%dL z|6kJn?ej%jsiEdhm0zxmu}ahb`D?3VMg9HBRDemsE2O^eV!ph;#SLOA+){A~n}zvg zTiwxZ=hHKVc~XNjxm~WsDXuF*5kHX6OFyn=$nK(%e`xV{czxh5pDoT&(^WOypp#|$ znpDSp(GaHYm10(5i-ZlwN((3YAa6EVV#PbqH_d7F^`Ei-e~}#Y6{<_;0p-5lRxEY- zAKLAkI^x#uHXen~j(M@NqO8z%AszPiHa)n^v)bvl>|-wU%+S|YX88nB192prLP%8s zc}ZoV1afCSN~8B5+>5j8#?oqAM3`|23O?!(cZCemlDQhn({+r4vy(?f%78YIg@rh% zS^a~eMG}2Z4!9Fm#c+F`$*AXD&~Pl&a(hXUMZ}~9NF-i0;NR2ZrR;Fup7u|7ne5k? zju1cn{Kt@uv7u>gcDCCq44zs(T9vq~9{fRdJ(A=z32}vj&d_>tIvH_N4HBZZW$Ev?a5&{s=6%r&q_^^=B8oof-4dZ z+%Qa#K@y$v(Se5S{bkg`rQGq&iu}`b3JU{aUE#iK>#Ur`u0>AwDr=3it5+S)e@)e> zNuLtOzc;iut05?ndVT0*RqW+|so8HEn1dEQbQK<)3tW6GDOfL$8CdQwpBN^Vh9H7q zp=Y7Nf+&Pm1n>BgZn_Fk-Ju6MAKuGo$#OKwu)VqkewS8loc zp7<)In^zr5GY0ZE`(46B(0^%U5ea*IJg5hL%S4X@AP~W>g2@!J2_2Mms^E zbkZ9XB&J^F&4Y}BS`@#Ifn199gPUUKj^Lt;^@dWDlV=hiB3t?+PeHfPEJmZ zuP0Wu-v8J#@#0kZcKM6BU$%zY!5}1iM2)#^jh{AI@ak@rO zD$wYUfojsdj;7j5H>3PF#tV7THQn&A;TU0I?tRhPYmqGg?M_PkT^YW;M9RhzasU@r zt#g4+;Y0?S8wBK=RVjbqKJ#M6@OeGXKK3sq58&M8JtT+CD?=bRdDn{z^w}fd1xcv2 zuLK!HLt4<#DM!zDLqBr_ObpI4QnUXq@!SyY-tpAIcv_d?LSHIV@64o!j0IPT`3d+v zjyHxNKX|F*iD;&E+x?Ip%}!9_X<7g`oJ1$P1rXpPXR!mSc^{glB@i|vc7p^{9SddH zuR1k36Vg^Or*S~{Z`zTfP8d*;DDZWflIeoj|^>H{eKx90Tj>>*#ZaV(*{W*y1w$ z;bHF)UZ2w8_P37}VzRjNyxk5}9hKvwK%-OfquW|y7t5VKe5AtO4ccxo(b0veEP9Nb zmbF-Ij$1;sBAlH`ah!v>@_VkgTmH^J?YM4{(K^3ZWvS}K2_K->(Y=V;e+fGBeY$g! z-MQ@f*pHLa{uN^nfBsp8K*itwOG>N#lXa=xhzc4k>wUh4fE(9~H?vAO^-2B&*wt5U zRvQ5ZcswhYG#|5&##pd*ax7Y}-Fvp@0*oKVU-t$#VKc5W^vl2a zuv3=*e7qd0acfuIR$GmflY5xHuBzOEyng|D?yXZUwtKK`f3{GOm5ri05FNsh;6HRY zrZ@}Sf(J=dYL%&m33Yaqd0p;ImpV~jZ2KE#ydREfcR$XDmG@+cX|$e+%4A1sqAwbp zOjb#?%7ae_Ck=fVP&XGg|GsF1_6lOEOV1L-)?B`CY4eIli0GyWJa5VhM-OjpO01<$ z&jZ_R(4M3Oyxh~-t#)L%AI*z7IXQ&_VR~z;R0`l5t){*J+zup!y|2EFuyyMn_uf0! zmIk~unbP>ZY|oV&JnRNNp5z_+7?tob9Xqeb=WXlW-n#(l=PqnL~o!zYf=f$B1jEVsKu3;{3af-?@kGffpMzwI=3e?W&YyLBbeC1rK4 z&cpY30LXgEGElm<2M?x2 z*0r-8Wxe2$zPJK4?B)^x9ULooa9Fg6>WDCUV82AHm!*%WJCP6&fg+?I3@2{@gpN zh>YA^MK+<=VC$jex#D;**eI;ATsBZd?0HIqIY#6YIVnfFGbD$pO}3OTK#g%!2nLqH zc}M@k0!9kNZjk_V(_1#P9Jr|%^LzJ!L-a~gxF19rASs_xeBK{;RZJ4Wk&B^%9p zhXvEvbeV&c@r=ZUonCxARb1t<`f?R%)_!1=&wRsVdg7sdU3ZM#FX~ERqL{GI-vemP z|CD@{0)Lr2{1u74KW7wBw6|x8Yrb;p28@XFdt>x`!%pvsONdBK1O&iFTEofu6WYuC zh(AjMe7!0-3VYt%T2lvS==@8G)-~jc@bOkZ>eUC&>n2y?dTHg8g;~MgiO*@lM1l0w zTEMFUx$jvJ)uqVeQT^4TO;Y+@N18Q)Qb7cf1p`>gkB^Pk*QZ@=dWaQA*9^z+ypP2D zdZx_vu&Q~I4($6;MuxWW?2RQ=+nekxPxI)97tg5PNgh+z>~u2Joo;bb1{4qU{{}Z- zt3qf=*D_@&?HK2NB@H{T>PKIXDk$X5*Wjs4e&H$in(;PN`g03Bnl7e2xr=LxCd@kt z7xur)d0T=tSGa_9-l5gFF2 zy+C)VZM2qBu1IAWUBaJ2{@mGZbVF^jA0c-081#6m5~_T7MA_4*)8=_7L`iN%(R_Dh}8 zzbv!qmEfWal?+abq+Xs5A>EeylSSqCevdarHs1OLdGxnMOlAJXc1yNYeitp$Dw;mS z+{g#3A+bV$K+c==<<~{I&7a8eQBMn9NuBhpm?yTACHCqd%7A__P=T1wKHp_XCb=Jm z!@hD05q4~1h8F1UYNLlm1X;@J}%&Sb^b5!QDGvHHq@N{oMq6vC##=?`}ex+h&$|u{%Jd(QZl8 z0EOJ=h{OB!IitaD3G>3uNK}FNXyYsNt(WHG=(t-+?|_tYy}8!Z)&7*uGjBGT=Q=bI zhbi1xuCQJXe)sbYF;jqhiLg&TgUBrtGT?@q#sM&rBxavC2uxCVjyl|8p0fU*Ws;L- zQzC>WQ1q(fmf@x3lv$U^eI!TS=JzI^NzuwYj~C3F(?f%5Mk(vfeMvc`pzp9?1i(Ts zgz(tRWpM4U9O?#-y2y+sT*xM}P5$M%QGC21H=Y@x@07u(zIQptH(~&3xln(th1qv7 zTVqe8Vd6TUnny#GeiJ;on`r`v;hHO{a!&WZ2I*2<13rKD%j`CODMAdgb}=`nO==?l zCLWH)#P^Qe@5!W9Urr8&ze0b?0?blOv&NEVHV|9J>}|A`wd#F1miU>@N;dLGgGb3k z2A4&tTpFwE)EHk1C!frTJX~emSXbxc-jtGvhUBsfM0mQ%U`Z7>eA9pLj8c|A>(lk& zT*2VrASaE9!)ikYSWZ*fG}wxl7hdn%{JM2&W~Qu~QC4~RO!uq!bb}6;Rz?1F2_oF= zQ~Q@UgRR(9`0BU#beqBHW?RH zn{5ltsKb?6;|vzKUPo$*14pXAaeW2mYrfj5Oypa1O3SSIDz07=t1RB2#(OtpEQidH zA41Z_p-BaomW?KYK#~vB$IcoTCA5V!^*x6~c)O8ijogUV-;5R?H7z?K-g?bC;XY^l zcKl2MhHl#$I5;?^7kxl2&UoQ~hkd$|B%m9y*h|NDs-FJhv7n8Ys-ih(5qL#TAW|^> zEJM~X*KlhyV`SG2k1tHdjQKhZoF(46Z?*Mjqz5I}zrjpP$Zpj;os(0oOs5yadZHBX z9h_DXGX$VsG!EJ_>cmVE@sr<2QI6pGSY0ate#2OT8;N?zZ_iJDwi_Wv5A{LCw?u+m z+OVek#k+POpM;^JV*~Mgs`Zl=H#+V1yg8uGlfX+A?Vhh%obY%pZxQp)QO`;enb#I( z+rvRc##U?0&Uj7*5!&^*Kl(Cc>!f!pr8`-d55$@tE_2MqGm{o88I#{1voz7>_BuJ7 zQh~T8_XV9?kx8>s8E%h7uj$P|#sxTk$gRN)P<>1lnMk1FNUr6}D8D9A#9VpZo%ZP& zHZ)*hVn~oUo@#x*Q603icm*bBncu%ZGN2CD9p&}o^T31Gy`B=}53PQDUNZ&h(VwgB zZVkd}nmAR;fDPJ3_rJSNmV~9092z8obhd2_(hS;fm9NSOT+aGFzR1hT6%f_~>Azh> znxRQTysr<^afSa}Jl~zu2cz6oEiqd>{d^XD7~Ej%YEq7@+9S@r55#rp!`oN0P|+C# zF9=~X2ve(RwJ)b3pp)f;1MXA4hw|7jN7%QW!#6VD4rjA{H|h@eOdV0AWtk}fiTE0+ z%#@xv&t0Ir{?6+iy}sL7swtC6qTR96()HReI&V1g!KRd+JIu7H&bu|2pUbLarh`k= z84eguo>RB_7|Tfao0PeKwsxV_tR&;kYi`BkocSmbwWnIQG3rJ8I($s5E+PVvh}-i! z(Ynj`ax}@uU2wqO0J%RihR4AnZ7?W^6qf)h_~6Z+sIq4F@41sbMJ|HLtNfl{qL^b% zOTcG|Arps6s%zqw5G)rSSYPc^R1sFlDhKC&yj--5vd<|MO(N!YFg=ll#*&i|F8)sD z!@sz7zC~lQCj$RaGEh^Cr)XEWvqS+2t7h5WUF^n^3R7|0c8<$Uo>xG4i=1qxb&N(?=!c8%103BaeLX zrTTE>r&HuFO`>mF;bi5waR_&%%VvY^AUiEfVIK*Bp4uE+Wf2+~%_2s_^m(}9;ZJ!OVEytP$ky);ec9nF>NTQc`tL8>TjaCR zN%spO#UFbo@SgHSfS03O%v6_}K@WkWNIXi8@>k&4LJd(j64L54v;OEjzBS*eqKA&^ zEGHyIX>Z*&j!#wMd+$w+n=03yZ#n4-*ZH%+-QWx+Q>GlIFdZiU)Z`M=oVZTjRPF$u zdjXPhKOs7ngd0dex^t3}vV;Vb%C#tf@E$N@9;Qpr`8u|?aMH4gMza&*D+cHH`h4s4 z(GLqnJg1nV+o4o4i8Q$eM`T5Za1o3~;va22Ydf5q8a#8me!J=ur&#apieYq`M;7JK zi-c67Q@a0td%H3(mNeiG60JZ(zy#7TAWIzeG2l~B0?i|y)ymD=Zqh_-8n|Q~uc~aS zVZ*1WY5df5Q4!bzo0f`lbG3$JrB#^*Y@4Y>gHe;C>*gQK9K3!!ss{ zfpP=^=BO_aHFjj<0vLc>KWI_gPTx&3MzzN z*!#w{i1w2~R8SySwND?tV!kF648u{b<77pW`jE0!`q>3sQHOEmaPm-+P9$Yk=0&|} zH^L*KN12=S1%3P{n19{s!WUoP8cMpF&7$Q*iOA(!^(@GE;y3-}sM~DBpBD8b!=w{8@S^ka}>;L9#fUu9f1&R7T9do!hpG4tjc$W*RAhUl`oblcy zYh*Hi(T^hFpKo~i^N4dkzqvbSq4sptY)8Wv?!``7`r=9(;NCS!qM5+7{icfTTqQ4d zPda_9@UipH73!_(cS0RRNA0IHnzT8K;?PKiK3SyChnz+Ng=Uuut+3bWb&CQ{mkyp;bfjn>xG{_MkC=@Mx()Ov|UK#u+(4$38+RS z+-2k>d@553P&4ddy(o6QsNSfEb;85;yVQ9%HZYYV)Zxbf))2kCyennI9TmEH86QZD zH1?{5%k*N+QN>Es7D(>q>t$)vqPU+s`J|z@ECFqzfYUme;GJSY67#bs48@B5tL6{U zoys@Y!&ifug5#xT><+46f+$Bl_#&k$Wbf@1Emd`OoIhO^Lu1NHB<#k4vNgT7+iEH% za%s<>)-J(ZRgmLmRihUdw|ZLJ{}u)AX@7|VT)L+%NX&t9lOrK?HZKBAqxv^yViB4mq;-9&bfu8}ZaK$CnA9q7-^ zn`p>rt;>IEI?A|>jWM}_~0$Y@MH?G}3Kz=z77j22}yFMM# zUz9FJWTL)=3hW5|`Ig+|HTo&)(FT`?^yb6NzK;ik3zN`&Et^Qu=$$9B@t^OD%_};d zpI3>ycN}Erd+aQZGJ(MI<Es+IO$1Ht7c{DGHy50)_yOsR0iAaQOd02j~x>y^OGLWz1}nL|f` zVX~{Y7F_^siS{u5p?BJ~vtYOKcuu=$E7X@Bi>0P2jf6u!+k7K!XdGVWUz@o3PYl15 z-gQ6xd`~+1ols9R`fmR8f79-W+P(oD}sE^=Wt z9nFb;Tr_*=QSMnp=}!ggA!20B?n8OseEPY0`C?vW_01Td)uIZZrK6jYy;)5jt#<0n z__a{K+lZq=Ew44RBkU{CcBw1Go$JqHoDAKZfk)Md zubHfZLecg}+LEn>&bw9L%dyWkzWJ5IE+34dJ%z5Z3KEyj;q(coA8r=%H-{VvFluKATY276|7`TO0RJTQ$Lvj^rIxeCF z*gQWzKAv`$sT4?=yG?M~X>)LfPws|l(c)8wZK@b$u#a!2h*TkE(QKD#@0y_(CGI$| ziAeWO<$Fy0d>8k<%@a6Ny(xW?YwWNyH591nN_&M=uGSk4Qx2y>k#tA75n=_Cq_NyiC^h{@wYUlS(-0$ra)^hn?b zVk{`>#cnp@Nl7CG9e&a^e3|KJa5E`VqTW{v7feixwXxAzyxLAFsd9Fm?-PAl9$^I6 z)GwAaCB_5YbZusoNn=5hZ>EKtqw5OUe!byJ8zYJkvj*OU1sY^jmT7ByRYhJF%b|bt zHpS;w@ZlzpOjy1=5dao>)mM;9^{@E^rU*!FMmn$Wz+^M`Vj)7fOoNL7R<`eU`00Tr4t3&E2%I$%L)(`|3Fmp;*=jS#gk zqj}X^Zwy1?&czH2dOKs#Pfw#e@c!4WMQ6>V`@S>o%4&&HHX`vjxyC_m zfFXTc-B-q5%pNlpFa7tA@4~KDp$bY%|JkxT~5OTZvasTlaSNHQV z!&K?wSTiHviJOtPPG5`xdt47WVd1P^0%;oK|+X)j7$-tAx+bPxB$V=;az*9~1oVc7quSYeMG3 zkzU(xPYOtvUJKsa`zSlI8_ZSk1+~1-kJA~8Ps(fnDo{b|H9bo&HQQ-WL04&;G`k&Y z&MN+4f`M85VLdPqn?c)~ruFXNvZa;Dw|y95(WuC0Zd5CF%(Zz$HY{|{rg1J)aAoW9 z0P3^Xy?+eYO2`pk5uD326ed>r6k`lLX_c=Yrc$~U8kUC?lKN~|9`7XKfRhzt4iQw+ z(Ei|%?Mar&6I|cid>a*_jnHrqqhzTr`CTlW?zl6Q6OzIONi#5P(My9hC4TvBk$2T< z{1b*I7XDfhLqu$q1mIdK3m;53IPSRnc5nl}(L=B$AkJb&1Y>!z*#Qz2PCL_c+VvQF z(ssz2!gn#y3hk8avF4}M8Z8a1Hl2M!x0pR_waCriVMf~+HdmNuW>D$_ko(xNTD{y{ zxNtx1&6&|ZugfAMT^IadJdt093rbRwbWmD6^&u`&-;n{dg{;0?cB-&51hXT-OB=hZhB{^)|c*wl#t`pj#Zq8~te%T<>AC^XKJ&4+{( z`IIJydf@PlR-wr`DB~$@gm1xjSavi|{Hv0(yCy}W=M(Wox$4Tq4o-we=FA8m5SR`_ z%v(HOzl%SAdIa^CS#Pj%3qLHQR51AXsMZ_w>cTsU@8(Y&j#}svj;^|UyQ^9n!gy!C zQ6g2$FCpvWw9BF`@mj58p^4Ba{lF}f0f=$3sv+y=@6+*c_m53BSQr9J)zL1ui*WQb z5y`4(YbViEZE8)ULfXhA?cO}me4O-t){`ZQMsMx~ol0QpPxJ#^6Qx*hU0lKIMwx@4 zAKD0`v|gZ%&$!%vx)WTq(N;ON_HlQFBlT*rWWrK^o)V3k?MXGC>x3yn5U7Rbf2^<0 z8wzzuh(e>Gi|@(Zygz}XNFXD}uURILmYStoyF5P+&RaoEmL&6xglwl_H$dBQ5Sd4B z_Pt{hbSyv;UqU|;5p|5ysWCJ_Hfu(V)%Qt2*Qdw^xJ7k%6^nm<{*+6JQ+e?VnEY-E zJx+Vh?eSuS&FZ1y7DM842U4(e#0h6!nTE{&niL=ZZCFNDJ|Cbzuqad-VzypNj{0YdXEa#@T!c3*Ace`OE17%3;TSw_Gb&n`Mzwkyto=6=_AtFc49z3@bCn`I{A;$v=XtG&?ifX!$1tj!B87 z5tRi5T79cHSD2y)etzn*o&OH~d|EqT%af7n+Z;MuV^njq;=W`z-KeLPbxwlxiwApm z;@oV&>cw~JD+&DGAAcD0O0b*%9$!l=SUBs)fKWua7W%dr;p3cPPFI89&hECzlu^KQ zQFRiwQI6%McaZy^)sjaIJK$+$O=}xaJD+7wj-9;AeDM&Li0w#P$gV{?&w$^kvFDvW zrH%N)O|^{$`)1NBCnu*@u{nEK?W4EZDgeFnmq3UM**)P>=;iv=zGqj-Y@xMA1s*** z*QuFeUun~ZuAVkw8#8lK5`-hwLfelb^otYvLTg5sMOy$pV`}-e>O~qy6Y&4BGpCWvdR9Gm@i|X!$%31B~2a zzrAj>WhNC3u9%qc+^w2C3JfjmCy*~XbXCul_s-3T9-59W-BkOutY!0K7Cq?m2OJ?* zs`h2~6EfY5hY@t`_Fvuyz97>;*)SdZ!S0fe^9ePtXIJUe7q3{T9oE$vEW4}%;v~9u zbAf(RD=s!?2<)5pu}y!Ld$Kn)UCWwrBl67BP zFfqQj7KsBslZS?H@^hWiZ=> zZ)DpGdMxc9e8Jy1pVzb3g$L#EOCo*kD&Y21FkL3DajLf2MtKKlg0I_+!ev4{JrpwT z`<(52J%(~Jcv%0{X2%|n!XkJ5{LPokTfyxKMm6{NaV+RsD1}O;;hlnB()-v!1}o^h zg=n|f5cw9;tgemt75eIAB@2dxAa1*Ic$D3+&2?Y3WXHkQ!-Vkmcu6Y;6Cban^44lb zVOoI2pAR(PK-1h{wne_xCsC#TaImCsb!*hlz&xK;eLNf3=2|UBsR2tbOs#||BD{R} zXrYUDERrf3U>B8d=VMmpe{9^gyJJ30-xHT;2H6lN*xtwrrei{CY6+qOtV(wMR8DT3 zX&-LuOT906HPuH79oMsOm29<;21^n=^b4ML6bxn()K7*gj*uYRn~)IS;>Sx#$C8xA zatuTFQ#`TiHATxns%BbnhMnGFZT@93?sBKGeWJEq2Ep*QF@_QP2Ot(b$WHs7>S>~zXZiYzkq5 z@0^{H#<7+^`&ZBo`Ccxe;P`R}Ux}h7dG#1HTJ@V=v!h|!N1f3$1ifurqrFF*r}8_v zV1m8p%8?;nc!Dy-ERF=wvyA|VBf)EIda_&=qxm3_CsC)`TD-nkA19I(zhV z(r=xlLNhN|bU&+&b zvj^rHHmH;w5=zePshl8z+3}QR1mMG&_{XI{E`2t;<%T!I^#$P?%YL-*ErP}s9~|e{Y%=2b{^kCW{LSl?l967u z8Pu|z4?<~sbLk!@>N~Mk-PwgXj|D+P6$BZF0+rKy7!b1@eY#vqhIkS=gg^Pe8GOxD zn*_ZvhXGIL2+!BMM(++{p~K!{6Bsx_CJ=#}*cf4$WLDx3ys46I3!r(Lfji-fA7sV) z_AT_l>f8cc=GFq+76nGNTY#ta^B>q!B6tv3{+TxIsIc7Z0bbc^TJCx~Dd`=|Zeur= zPVbL0R|223Tv^rNC$)P!o8`bz=Xj`DwVxMGKKauc$(Q4Io5?dg&zaeBBoJM-qRMA5 zKuGW9ElNUb;h=T;zIQ7jjp!RewRY1GNK>glxhRi*a}DvP__ zQd>;w>r;Iz8-a(ZaSI!IFv7ccgdsBz;AM}1`)#d8p!<>bVuu$twux4jhH))*p*12Z z0lR1pEWhEJ|8pbYR3raVB!bKGH$|7?96Nm0oX6>^WcSMjG4>(L=}bhROXaKhvxWfR zEx1T~fDHZW+Zi=1`5zhhF*nVfZ3fzLHiHXcvF#blmj0P4wX~CMbSR28!4c8Hmxn_K z#9fYuiizJP6U`F?F?hJRMJFJ{UY>fS<%e_Q=4-Don~|#K6>Yrb&nhn^vfv znUKC#yW61@-?C>2N6Qayt0{+92f^Es?WaGK6I2dY*Qr&y7tD`>V}&B5KOfq{2_M(| zFYSW5Le3B#MLu6Xe7j^EpY&uE;=2)eNUGV_<8|O&gRFDPGC?TTCRh0;@5IM%E5E(I z1(M*Fnp;|y0=k+WYGd^$T?|X4jbN+Or_H$am(KDE2*Kjm{aL`44PAsZi^a-EhKPqX zzsIV!)B}F)e6YxTw+QwJ6hr*}Zf-*Zi{G;xkZ6^mrVFl|1$~Kxyvd^ z6alwHA0L*8k{|n5Yc$g89y4IJ!g)O@{+VU{v2nt78VHX1d}?eYoT|867TQjJQ%FE4 zkN;qz7Oxzfd6xlMo2*mpt9T2HDOEg?UmO!6Q=lUc2)Y>0rB!&6=s0TOic;U#VUg$? zQIO3moxo=BIXyw9^C#QAy>@#dMF^FrMWg?J*m~=rIHP9WHv&O|J3)gJ+=C7hTn2Xu z?(Q~(;O;IHg1fsVxKD70;K3PmfC0{Y`|N%0J-2Sv`g7{7npakLKmF@xaVG|ezq*4m z%tiISH6>s3%k|yaXCoNgF`|xnr-@c%HJPnZxxmIYXXh!T`iKy@<#K$R>QY0{zWMjd6x%6hw}KKY*|l^({F3g-Ut?WT`Jr{5p$ zj@p6bty66Jhx%F1+@dSq-K#gs%$GY=f6}D?mEd-{&cq=cq4mlg)-gu&?tuGH)r0#~ z&@H{<-J@EoiYF7zQp(KKB?$#mgwGl`A8kg!k}-dS;?pW{-RNXpQsr3*)to%GpK zpbq=G#K4?|D(i48sg!fieTi=sVIZ+ppbAMp7yElY2%-fMk{|T!dy_}BO(=A3J6kfH z0u(DBdZpIFl8FO|&?gU1zSJ~CG*Rv5l^ArUia*T`(|ZP{PFpd!&(|x*3yuhC2=_Jiy0mwXVdkDY!@K&7B7NvsrD|tPMF3VLww= zug0LYP^hv>;%QF*aJB=BI;cm$oxgI%h#CRyqR)6h>zk%lNmmn(!;_7rb=3x@e%@wS z=EV&FaVii{fqm=eMAdw9OB&xJIaM!zv)vRs_myk!Nd*tTUSk!r?SsWI96-UqyP0xP z`?<0L8jAqL&ipBwZQI)R$|V%GTnaf=2y{MfalEZi;IW#dPGqUZ46&CA7qO#os28=H zOW}1~@gQvZI{oJ8R}KDGXSt|9AcS*>%-zP#qrphb4 zGDu0L{Mc@`W)-efM}VM>0CUygoA#^yOX!Iv^;*)m%Q7m|9tZ%?^5;k?>oS`rEI9NX zclNY+8~k)}Ggbn5LE{Zf)5EWCkDE~EA4tkf&yuz`EU#IjH(cF0f$nLLrgee-KU?O7yfgMC# z-4DTb%sJSalb1QN8@7ovAAabsUKI8|tCBNZ*z#N5sIO-b|48MyPy}glT&<9J*%kw` zy&N*?cRx6%v5hoS2z$;+6Zi`zKxlCyPf6!CjROv)ubh~0LRjsTqj_Zi5@NK(?t-l8 z2!skDKQM5781fkPHW1N&E*J&aOy781cGqU2n-Xvh>OMg{36Fz3Z(R)bV4Lef-^*mr z9wCZ!FgxO*)#QB|QG#3W2`Y&&O+4QL7(kbzGC4^$75!Bi)K>9U#U}QaDmYmnQ6L;i z@BzDrXo;IVVmJbz(zpqBltAYjLR@b5pBnN=O1z;-qSw++{{)hK*%2X1M6>C zZ3WLa@QzCl87BI`>_(my1C;dc&=W@-g(s>9H-$~np2Q|7=oW=Ps%@wXT7VTQ=_n() z?g@{*5&&ukR}_dILy9`IPr}F3gH_9nc9vHXIPE3DT*kJu=Sw{yV!}W%fqi>J*a zu2F+2L%wJOleT=QZEJV(%QR0+hFaf5!lztVJK@>l3s2y9Fq!wClEBOp3~p;#%EjTf zvK!br5oL^s>vra)(`^2*fPsEo%xf0q(A9Q-fm1WSL67`I^e~;wsam$mk-km~X?^K7 z-^Ei;Lho~5!K~CIpHQ^1;}PK<5-O@^mE7O?i`?qW^n4-75H2;vB+~Vf@!34IuNQND zcHfj%Mt6O(vf8Vbhlh6B1p}u1{e6=;sE)dnwEOnBbhh0`MOosm%+HO2lL*#&IKo=~ zN%Uw{*fD+bh;nU=paSW};Rfd-Y&zh4wlksW940kLxOJyx_FgzL)5GRVt4pV?n^S!# zIfRG!rb`jfN9hdY4ZU|?<1y_@F0cE8#74ky`1by!=|(`{ z#kOM)cbnw}f6YfY`7gybU+=45SFbX_Nsz^)o!hPd@d7|G-rt|zqEb=2oOZAi+_t~O zbR2n%akfWU14eavs}8qXsmtXw^tOIZfwkMhzA7MajSpBPlFiPYA@Lv~unOzb(3{Zg zZs(%#w^X#?qXMEWKiZsL0Hq#`OPcLb1*P25=gz)->&GyY)u5Zu<+9}EwRuujaj*Ws zZ<+0QM-LCK_u7ujHuW53w(b@FKmhvr#w>aM=)hzURo85nW4HSG*<(4hu%GQa#PuhtnIE#0+Bxz2TQ(v_;WrIUhjI6UJRIgd@l`tfm!;E_!jaL`tVF|A zqsd{#o?6?CscyzWS4MAwHOLGD5(4*`Bj`7hQ0hXD{pRhVi~ z@7YTJT&78l*J-HRYV8_>B6oQb%=hoF*V1K=Q6>XoPrE!?Sdq%gQ+FvxBKAn=sxC&L z**Id&B5~C2+}u?{&}Xsm}Dql$0fUfz)jT=~c2V{zMQGFloSh z@H+Xl>F6$v=*F<|*5rNb&Su?*)yjh?lYiq<;TQ${js{o1WQCp(H5$C{E^3T;U>ed3 z%cY?`u5^?QQZN7{dX=aIt{X=9>rUb#DJH&OXjFJL*T=hc6U8XYZEIUMSrNPoU9s1g z7qkC~Qxl>ZIv)<2i6*S#^l|z3=c8v8F3P*h5H-R7+-rT-f?QOZOq#S9?=tb{iBI9#0Vp)+BdRiwDJT!1`(@rl48xTc zd&O$~7IltO7qkXz2qn))WF*3}UEBU|#Hm&VVy`NVyM;VYSJDVkk1v$kKCjd{g}5C? z2QqB0X%IPCxv~dbMwKUs2V;-@aPh~S`5XN7$QDV{FN;yyapJeJf-cN2^M+vVg%^rn z1R>?I%GA16PAjyf%-a~G>&05c~VfUziY>;=_>Sfc}DCrs-r7c-*yRg49gd` zC@Zb1_4iLo5HO$9+tl}nqE8|7H&(NHqFUk9<|Y|$q{Ym6SjN-LEveD#Hl9AdSNxzp zq@HsVRR^95*KPRubRo)oLhP=@NADHL@U1t)7u%n+bzA*Q^D|-gQnm=>Iz=`iyT*F6 zu7QCwjC2*7*o9i54~t4@q6n5SCj|OIy&O#_R;g84^YC}U(O}XpL3M$g|HSqEWpmP6 z=ry&^?XJBj0q5PxdjDOMGIC@Db?{F9H*??U^@NwU6Iu6McB9U+*Sx#PZwppn1@8u~ zh}iT$5g$iIp|3TBEpyYi><$Wu-xBBM+H1e)wDKE51sALGDg+?7tUjJJXh-R6 z;U=%`jHYSis$iI{B9!NEMZTu>rRL>6 zSD{qMgT_jFGrz%}!T>OcGNF?1-d5o`XSfs#{Ns=8SyAKWi;A@g*s7*83r0$zsP^Ko zQtt1^XDj0g$o;vU`wDt>AhWdfbs-uA*L+s`ik1#eX72>^NM}zXkE+e ze|uu)_LZT*()>I23yr`)?YOx+?44#(c>S{vCAbXr{SH?J<=Vlg6*q1?z0;IoEc{k( zQ=Tkr4XnMZyX^queJd*s8HP^J1KNy!6fS{;DASXptT4E4Ror^(71CvHt_Y?5b|Tcl_nf8UiTZid z_aJXfn0m%U(8Q~Z?OX6{l?Kx>Wg(AHOy!akpeBKeem#TmxjK!h~j<{fai?!6y{ zvf!&Z(1HWt2zdYI83BZ}Rcu@Pt-W3mIQV#ZH@fAF^7=?9p(i@%Olh09-T&Y!B?{~$ z)^*w+3bb;r^u!S@IEyM~0J(`IifIWkC~EX9`u3G#7e)XQCGHK)bf8Cmk^$IXw@W)2 zdseM9*0wAZgp*~Ok5Ce=J)h5Z(}DFl$;_Zh+X~O|&j_Crs$rI%>W+3s%S-PC_LxkO z50q+fjOeVsEQHgAPPgz`F;9+w>JhY%-~KJ0RYa?Gww8z|U0H||m@|FK!LT%hHQjiE z5$!a9K2IMSqr7cBbC%cS+jMAPj zW&gCPY^TX~pfEMX>EYIS!xe;VF&95;rI9ugzR)@5DJ;MjX4b3!MV@S-%&qdKE5GlY z&-LT@Pnr+1;JObUr2XZ`#9} zmtw)RfS5CFJkJcDKWXuTF~7lva!wNLZ{<;c)i6?&N;b+|UOZ9YGYTdXo`U1{_%O>) zvgUJw3|&*wn0k_6Q3-QbOLJ}GxQo=R?`T4 zrE-`{DNIvTth(#z5$FW^^l#^fT_@Bl5X(ud#0~%{ZU+xcGSMPTK=)xZ7Ds`)-|Ozi zd}mC1K~K8N&Y`ZYH0L+xfN!HE!t?hqNNa6RSy_swvwI}eQ!ZT5fIIyAa6&FOVt7|{o{$DrgY@8l(yK(jdg@xOck@top zyZOgOh<{6$m4kBxTFx{!XKQ_0uXb*~&6 z{(phGW-h+_6M9E`9TIO9-{kW_8-ZXkWW(r?U)%_o&Z78aS~fzL>4@#wp?-P&Ch%_4 zx&Rb`q&hXbt3%6$@u3S1>cS zoO_&pL$K(UFtg$}z< zgnE*5Bm$aW&!X)xW+&B zh~ppA+uPd&TOUk6BP9P=Z4z62=#ZFzwsdRHxo~m>k4wWDb|%SOCL@s4k)m>%-N?=l z)ECkGB6^$Y+FJGhG8XzTIFkr*5Q`Qs?YO^HQmR11{i3Y{%`IX_h2>ny05D)#- z{)4^zdS}KPpKS_O+jV{Po*6o#w>sz6L;>P#&$_^~SRE{u=hMUGc6BC@w!agB*70o2 zwiQQ7)mz!JsZ|cuhu=E{PcU7EzB~=b`2onutTnUOn1giU`>sa@^uu{AT$vrluHWCS z4*3z$CEu7p%n?uPa48xu=rKp77w#8^56Eb!8`bkVM0YEld9>h8i)@?=M(|E7@f`1`e`Gu>&Y%K(g=`fOEB{w3?{e=m0H+4=k%>)jqG4#LzoRaw)D- zJm$zKa8kiW)#d0};jxJnuFjft@D}E$pY6zF18t8tizbjhPFFi6H1o7f^7{YKSzW=; zRZ(DMyj8ScBy>X|I#s$77k=-yuwr#SItR?&BSZ!CfQ*I|t#%}HHy8k}j2(Wp1rA!o zWBC5L|6x6d!(HL1tctn>dHTH$kA2_%pbT7TsUtQ1Wh4cw;z5-JpY5JN+k7>;Yy#XW zaZ+?OmLI)__MI^j4C@!8y~jVMj&Tc+ejb>?+#Ja|kvBfbSn|>_1Ki)YE5UZ{k@i%_ z4&9p`#tWNnk2O-$(q%(r`HR9u`$oE>8S{kGRn&S1)YGgMHJ&<+4`73>n;o7O4Sozp zS5N4de-9EG_>~u`{)^viD2`06Zor;=`W7E}leIE`W!W`%VFTZ+zXStEELSK7p92PS9gx$AZ)dGtUNHS>0B> zEI~ozZFOMzb2r2Dw+O%{_M5j_A9g!!=gd<Y$)(nL@|@<}$D>EA z-c9j{Y2$}$iX+3nK>WemK@U?uck2w!iu6M$TT$H?WY~jdA1i^*mlx22qdHNDaEF#&UN^u5uFqKOSn|CBG43AH`RcQ$QBgYc=U&-Ycjha}_L2SziN=f{5O zytY{-JR8>ve*G-7=;^NeGJ_iCYV?DBk)>1EPWV6IdTQDm1HjXs?xNE?vFd+0Hu6{e zB|LZlN8Bx(yNfMakKy~**!ILr#Ry-Qt9J#Lr``^&WT7L!LzJnG)EE)HDN`B zb^&6)J+$19)w6rLSuCB2(+*{B4hixAOOe_yR!{$yW>+-m#Rk9_L7>Pj5dML(WGF#a94hi^3Bpi!#eA zwFLcLpS!Cxg&i5Vk$U>sZ416wDO27D%cs3?x8p6OrL{Pj53(8D^5Dnb=TIqD&&ap$ystMKNkjPVc?Drq7r2m*E0GtpPsHAM^``-mTLbK7}Nfs;k5p8ei znh*A%c(x$kR8{$A`om%^W(PH7Z;Dek5-h7%d>3-{)akVD#~TppPBnLWL%J^3^(Dk7 zRD#N$yC_~`3GdL@4@ysn?6kNAGBWVR1r&^+z4?f7a2ttB>u`(A^ciAiMvHNj#L;!p zr9YgIwB{jLiDrP$gL_bj4bZ&PstU1Kn+hLiWEz&#n|&ac&i;8Kt34Xt$2}qUYVJ@$ z3LaX)CYQ)L=Xi1jxw62WeP1(0rHC_e05SRT4)AxOoR`k#y82)^n^Pu>I>$at+dQjf zp*jlOg1RRc;nmH*$9EOMLko39&<7&SRy|`qb;p;jn*oYsdd1}Lw#yjc>HJT|&wrY} z)rSV##B>FRG-u8A2dVv5O_Y8KA+7aaDAFd}f+zHNna!rNxSAWvF2o(YNsT_Y?yF)$ zfLu~0If(+AoCV>wjwBxWfFLn?u8%AIvl>Zhu6f==g3be{(5(awQSF{){!~EVipSMc zJ?A!Ke%ug{#5pf)g+IU3MS}EK^wi2&GVBl|H}>f@(pQ||YRu2wpUa|xfzLz?$98uYgJV@BNk+-A_6ss}#2`XE31ZthsV_bYjr!zA?%y1p zXZK;YA#FnoF-!O6P5Hv3#%QgYs1Vns<=7qyImu zwVeEGz4R;!*dFqB=EC}1rz2+q*@nM0bV@NT7(b}T3R{lQ_Ad8MNIl6@E^1Yl870U{ z;9NHIF-;L|l$3xXqi-izk6@xEzcK;_6=&>Zwx-Dt_AkA(Xa)c`WSmDWf2UWmieZa% zjrKz_ASevsmvx39FNR{eQuQ~LfeiVgviT=R-PtHqLFeL^_5qmlM}7e0c{}x_N&XB> z4x_~epWlKE<%P9{;67T62N_7dXj&PjT<*}-uiT{}^qCM=^n?hQN|-qYhnCQ}ID=F* zVlwt`(u5Y~W?5BthEQb<_L?`g(dOg#varTWHf6DeTuf^>fWv%c(f#V!cv4+TZv)QI zKBU~Pj3cNPYJvRW8M0ZcXER%LIclMT55@liG>$l8OaZKC!J<6NzGshsyRa3&)KQn7 zzm!M@Z(gB;k@`wez*1dP2gx}dD!Xbc%bNOkPh2aV{5gHAB40i>_uCHoHgpkcX3y7!i#2~2f(&4gr3)R>>8iuyvy}b#_cx`JzqXGtADY9jT92x|LPaAZRha5J(p^?EhpOZ zTa5fc)Cg!sbzgaRx2PrHrSo)Cf+vAl8+xm{WlSAFegVyY-h07&X7 zx;Mx$$_-%m90cwl=2M*I7KW>7TH;Ox(oNJ^bMwIUD)dIg+og`XylB||2-d68N?3mw z>Erh0R0&yLv;V~m9zep_|EvogHx>VJDeH9g5^r$xri(0dc z!`G|~#}>fE>JfojGK^;%AMGp=id=@TPo4Uwt=d9j$9`B!z>(r(^wLy2(VyX0kTUv| z&94S64#mL<7@Ehb*@!-!z_!|63wpkZP@ZAB4c?j zT)5N$S9j^UxF)#W{Qs}1{*OLGB+*AWqm ze9WyHONu*XWF27>s;RbA`%h{39et~# z`FQfoS3;=+f$l3@W3)U0#P&sWYaIV;EM(Ma;a#&1&NMpDL}<9MMWLW1Jal`r65oW6 z@ojmFbpmD8pFd?^9J#FrSi){dDbAPbcX%J4?&cZ9@aD?Rv-c|Qh?_$@chPoCv=6i> zw9z8o^E>WI!$bAiYIZnr7X3Lvw@N8hbyR_fPI25WgFkwNjHPa(j09zK3qtH>y4x9~ z2W@v;3$x&uxnRD_xW)YN<#Gj?CwVyb;3bUYmJkp}H}npZl`tmm@U+_etIJMF9Eoo| zv2@tZialMkvZ=ZiH}*_FC+Bj=x&^|d%NUchLR=VtxU2IO^QfU7{n z)aaI>y?R=B;r3rp$|ROJ+X!9|4}3nq^QFdEun5l(d5w-j z;eSV~=w{WLX3Gad4s@V_E&c!P(fW5r@~=7rXj=|!3E^-*n6iXOPVc|PK3A3j8tqf? zYa`4LEpkmi!F`_wez}Y`2thqhk6h8B!$+ZIWJhCPl*=f*;;xI0 zCo5bN>7ldQ*27Fhr6nE=uh=(pz6b-0xu8NR{h5EuXDX{EIM$FR|RM0FZkG z0kF|8Zp`k^jB}F-pWT*~MN%ZLuG|FKQ<(H7sA=i*W`K<9SRC#m6{;@d_T{(p*lie4 zeEfN+(V`rD;PZ!2!@qnO-!bA<8WsadY{d~@0^^V(z8(}Smmn8N4XQD0PZnxy{iMTR z_D!rmUwFJP-+!OkZllUp&%)@ZNVOo@)ILa8rvkCP9qjzOt#=+$Jqk`E_;3a}V0|cH zrjG>QxA}yOjD`Xmqe}&9S>}NZB9C9GHhVS6@wqeyg@MnZ{yuA~1h@o9YSORv=nYN^ z=vE#Sc`2Sb5-RxH(pJ_3sI;mzQq-sDXBQz@KkRGd3dYa>o?9rfPbx!-r2zS6VO!#M zk838M0$qdHiMFj52Ng$3{cM8wOUVVlkcEr=>@$e<(@x^8H>Fc)0bgQi9cx~!B=@sV z=xq9?DA+5*HG{+8zeI&c^6F$l!quN1DVrjV2%zq>Wd*5S*2G~(^KmO(@N8-Q?)v|9 zg00@z#j&rK--M0|wXJ0!Ds|JXx$O|hSP6Row@?_Y+}=6?Zo?9=@f~P`*-0|HP<@=n z>+;QUp}P87D9`oKM#TI>RR*t--ky1$v)XE>*LlG|jR`}5^M6}8U6+irad_&SC-Q;E zKj}=uBrG=>hNSp(@fy<6kZHCcq2!h@k?m&0ez zAdxkJ>u&YL@3>*J7>FFHL9ybksU<1=44eJ(W5zQyW=HVkh=l*4yc8Wr2^t)-23#J;<@h>aSPg!-V9chyefBT4P9NzO7 zLMo}r3 zgb3Yi2WFQ#NZ7GHogsAt)5+GegYwKs_OqL!ZRqTZCpZW_N1L|VMWkHgJ?H=wX@9#_U3HueApF4f5XFrnBqW z<##1~rVi{oMjLV2Gfm5bWeuBGot-q`r4v%gK05u=|A(Zb^AV(Nx;GlhC6!s0E|sMQ zKTRJ_?6MC_5$VJ#jt68bS#~`Zs+A&+L zFQM`u20lmAHlwCQBZQ7qf;YnrQmnpGa+-R*$2}&DU&?9MHS{dNNLUaXWK|Vkf{&&& zgLHq;B>}xUdIy@Tl&_<4>ci|3eH)D0sln}Tdsx%xK{slYH{eD;`x+1Pna2m|K}$He`|4G~ z&xaM5yfac0Q{{;>S}%~IXR?|ZdNgzu>%yNNZ$UngM| zWDv;J=3M5vXXu@&ShZN<<6UkX$Xo5FyPv~LPR{bYrwV2o(Cr2(-)JP$l*e`lh1Zv{ z@QTFpJ5Cng+XYd7E2;?O~vg|H1 zz4~99fVII{yU!UMI+3E873y~G7vo+NH2&zUc4RPReJR7_#qu^Nfl5lSQd=AC#;L}j zK_Epz5biG+BR#%U5YMV3$|tYYcsX(?4`b$d1NXTdt#BMKuvQ`_byG*&J)#3rVSywK<}5gmLJhY-kA zCw!GVF5IWq+OJ0jWHoc-Ss%!}&NypNZ_s5v`-?(W9|oeZr;&+`|5ayy!d`$?4dm{> z{ztPw_`>eZ@{HgZ|0PJDP%Q3NtJm%(a@?ALGal+IGe$ci+XpI-{{9YG?}_-= z2Na#FYwva-Gv={0B6c3j$*JwWtU%vmx7*(rzPY|k8jCdH>jE;{lZGSS$-rpJjJVM9N zm!8iV^w&|%2qu_`>}kD(dNlCdU^#Ak8!-h@@g0>un@+yIlC*?f0KZhy7^e|T+;)sC zMVOX-dI$v81a`KbVuxQec7VzJ9-Sh*HH^^%_GoUd&GAS0!1+lMcy`9gK6aFRc18NF z4~$@9j{2{L$&6bE%qP<~ytgNIX^6kq*pX{t63{N7Ac_hIdxMAi?bZLRZyZR}a1V97 zj|YbNv-46diUfpDmI`?XC2?eWtVeK1n)|1OLtuspsNJ|&W>NacoG`LoK_k-Fm2Cv+<`uLqu|u;Q%CrUi&;6A{XJ>Ig0MLA*0c5J#2K zlq#xdzm3WiM+2k{Lm1ehCHB8oqUS7aaV&|`XTHx1HnA6`9`NhI#t-U0qVk2h$nYHS z_&N=G`ZdQEuq514B2U;D;E#hr4^iAc&or6x3FK3(~4tVy(o5t7&BLXdB7iO09{#}&v~N#5)Htm}PVmg;5Skfvw-CwLS|@PYRS z1wCI>L07;*eS(FWF0wy-jpfaZqaZnbX(XPcA`?A-Xy4Br=F2+#^3zcrHM7@U*yzN`-uv?%^#?Ck76XD$^U9!6a~#(uF1T{fj3xyy!v$$D1$ z?@+z4JQ|xXrf0c`HrT%Mt@kJxAJ(c6r1;xtMtoQC$>_M+lldaOw z5)+XJePQ?Xdqz;xH<}NO`swpB`!xbqs#DvWS~~KKU}N*2z{4Mz6e(;S4`$JJ`AVe$ z&_ZA*u^B2LZpDj=zh{4u5L&wK6ZdrONy;kRbFD)qjo6Df10F>KIUmiNW8Mm0uiX^Z zKrOwy7jS{uy^W}a=ShM8nhK=47HZ!UUSpq$19C%5X_r{hW1{birJfl~6w&&C-hUEA zE=eZu=ML$Wu$f=ZgwLlD&X?7nVi=X7WwsepV#nk3 zIGNz3e)2`Gm+ef^=rvS!%3qDT59s^(WxMH+{ptWiID*ch4ApdC)}#<7LzmC9>38g? zB})uafQ3PK&NS}@Haut}s;PDwcE+RG(+Pn2Jw*sWP8*tOyU3ooW6-tBA+VLRQ40Te zx_HNwmNr2-(DQU_->vK0Ou%9JwB)}{Pb<>f%t-l{-$A!_z~WeMBr_}P&`L2>nue1* znH`>*IDnVE^l3EVnolo6mD;M z*8Y9Sn6B0O>`D8jYQ26MS=CGyNp<`8AGr);KW6z6ZxeMtKOqx)j*c#^e4%Sip_@4k zQU0_*yrkk%g8sgR0S4u@3fK{J)`sq2c)@Yi=B@ul`jyk;t)onmz5) z!ks-g7{Ly4_V+M^H*9J!0A0~61{EC>@>9;kD3W#f%w1kg!xNvH|S=Zl-q>6_@YtlKEl4?VBxUJZF$wyq^MI z&B|1xm5dC7f2qm%T^MF+2lKoqX0)i_{@r>Icw8}x9ey&OB0Z|3Lr71y^Cv9fp8o%_ zLCelQr8SsgUw{ZF?d|PH8#H-i2gLzS4=LC7T#5F^3>|x6X!4is-T<(_mo3yJXqude zU9rNs4|3#|9C*7Ikm!d4LPw^XEfKr=Wo;?w{u2P z>L%SJVZWSLP)>qNfBezXEW@W;PMPW2?hIe z+uj+al;zS!g?@vE-Q+^g;;9@IBfp72&uv^8 zJ$m5r`b9wz0f_8qtR{v(Ttp%iY=qzK2cKYd`IB+{-o{A(b&Q|F`WM&zdp2Jp4&Sj5 z8{N-&3w*&UXAV?>@&}q4$B6EZm$YrA6BT!Pxc(GOmWubm1y<9okHB&|KHOZrq*?XgGS#?b%@~|e+^c5^pd3g%|`Rkf{3zK~B z^?_-f4WAuM`m@wBHJVF;VVv0#<-+5dP8BqXJviSr-B3F#Mdo7QQ4^)6WSRF1cJhA$ ziqL-t6mF1z95*OWLOI18CWNK2*cAEM{fhj0KVp@s!xCsW&5#v+>s0b5I>76fo}_8P z9>Jv9IVberD6^8tw@W@%ZNsU%w?v@yuq#oT?Cw{LYKDzdsY^AC#pD|a$->@+GKgcp zA7wuRj#>83jV-`0t@V9Ml*v z*ULH!ow03_r)%MY(1~~YM<+%W;ZpK&+g6Ab|NMGQ?N8vc^A!qw7N}HW6|hsI!o6B` zirpbTTodGiBDDW?3DpDcbs7x|ZVu`Fv!HFqgh(e5q}$MjUryiAXbvJ-H9oymEqM>f zVAmT1u-53DI(G(EJtUcW#(rV-?ECfhr|SO`R4iv4e{l_0tP9tU?2nMsUWRi=-7^oM;wC^h4-d@>`Xq~iNv zFJc5~nQ-W#@gAHMnt^47z7G?--=!r|@@yv0_t_IVa42onfX2h7I-NU@q6EemKNGYU zi6ec!^xDffLDLNVoIH`w=ec!|cG6y><9s~&x}KJjh(4cf; zf*!`(5?8#Hr`F=RRSN$j&?lvbHWYiSt(YaAcyX0H@o1^FxhVp@y<6^r$la5)_>2dX?|TnGsPr{xp6$j{BF9QV zETyBD=g0FJ&M9@!aGUGgPwD7wy*zgrhr_L2Mlq54vHq}4&|}=dRXrW<_4?=r@8qWP zp4k1xK!g@WTDsfGt{QRRioL=h~hzIK7%GrJtq<9UpYqc3CTKT+orTEvZ zk{uOL7=I$`SfX5n4w7lv3HKkV_iS95{4-E8_pmpd_JhZLp)~brg*D?sqc1~97tZ{% zEwX5cfQU+Vk9^GU?yW{StQK$_ci|qW&lLzaVc)xqIkXc?4ilYLo2Co{)4U@9I}5uj z*!&K9wDC42NsDCEaALV~R|#@KFL(s7rwAEF;px)(y4E5cWvh2eTHn2fiq*XWP5+eM zKl#=&co-ZhMzL&A_H{krll)6UKaudk&KvHLILSU;)~vOd?T);0$xN)^Z0J%=*M!8q zZKdt`Si5HWJ;!DUO&K?9Gf4B>UukN+fzj|HfChKd6ouBPMnK zC*9?Q-veW>gP>_FWR%trgTY|@SI$8=YbcC~VE*&=D5-$qMe(kW5x)mT|i`^H*i1)1$t@cc#mkDb|GKGWEI53yJV_!}2g| z76;OI#d>*o3PMRPG^KR|x#L}}>La+sgV-8N6t)s=6Be1u|cQ46t=3N94Nj!(1u z0-gA6w`#IwjSNV6uwBBLzpYafPe^OpD5wcasCo>ZU{$2Oc%U%J-+!B^lmrS?8 ziK!Mnw)En>b?|=KAXVctqaGq&>8lY%Q`)Gj z({e?c{-UVtM&BAnf_H`LV@5xv#(Ylk#K?scvmrWqZo#(!d+@SBA@Alm{xT47pW0wS zS(=nv-m|@aInp7;>)$A=Sh({Y2is}?r<_)Q)g&b&FF5-g+>jw6pX?6 zuBzv*o9~!Szj1+?wHGnpU3Q1torZe9?!BqikG-3l6z)9br;&%v93YxN2B<-g1_JW> zKMv;ta6Hfaq>a{Wi3pB9CZBdG36U3H{|Ui2Wavf%c-H?f&b~4#u4c{p4Iv4VAi;ur za0njUT^g5Q!6is=hlD_I2p+5(Z`>tV6Fj)P1#P5pZS*_uow;|e%$;x6ti_M&v*@+D zs!r8od+(=U42XT?l5T?O=7?|~8HE3m4!+%GsVMfb;`jm>Bp{;MUDFc~V^9{ZY_ofF z^PrL{oa*A53AO~jUG_GNSZEchY^I#?6C1;VFfrj^U&|8lkkU=#-I&tvzEo}`6u(Yk z!_2}0zp;$7qZp!aE(>E1O&z>r&{QxE8@ zF$aNzoOVkGFepY{U{mh|C*s&3#=t!uNZQX<48ybu#b;F1SPkVE z-VYNKQpf^LDztlb#rQrOpHljt#SL_pziB(_z+AjejyAgeW=Y~3#y8%MAHe4@pLlRD zwio)7hBajb7guBeo)s6=-Zyjlr~~vxFq!HPl)wEgqg^rIo>|HD?=>H}ZuTdg_3Swn z8cj(xME+LVqN3{~gRCBF(P+NPUX?TASCqljNU~cs;gKVXu)x)DD46QKv}(_BHEu6o znFcB6=5Y^Kv(yi+AQ!o756e=MEoH4<$yx1G+2mJwt0j}hiiN&(dZjQVYb-BA+Aw}F zcBwA&+0_xu0AoZJg)*&P1!z_J@$NU?QAY^DmF=7x_!x#Ufd3p z%F3`e5m7M~v6ZZ|%pPj=BcQLY0gN}O1ns&^p>FKV%=FJNd#~ELLpBIBoOL*pq@il# zRdDs*b|Ou`(_+_eTX*F<4!8w zy1oD;cRppx)c)Sq2zQiXVPY}`LB|x&1k84O>@Y=_qE5e}iCeEsHTHBt)(Vw!Y806* zR+pW~%Cy1Ir?latHAc<#{aQfFB6|qSliUh%hwZc%Rk+XhL!niub}I=gAj_Bxky^X? z9efCm0#hH&?$!WU#Oq*DL%i*9Tbu~wol>#-5+gP&=VQPH;|LMfRZeX^D@bfZz^O4e zO6;UR@mEdIA%SZ9+d%fC0o+ZO!{0YyU%DZwMIUUIW)8=bO<-%*-1L($YjcuwHBa}z zxJR4zEZMbfm+DaULv{O-;$h)Zxm9rU&d`j_ozjTrp6f8w(Y&{rX!_@LPl`#NgR7WE z@{KZBQ6-k*{eqJ7yd&1m{$vD?z?%H-&DrP1kG2+QVwV2DZ(Ny*ZMpT`m5>G+w#OWlW!4vIaKB?EE;otY zpPq2F!X00mgX_9lH^htLGKN*_n@A35XE|lr?#@GA*{qGv8Rw++3Ac~>7Zbp$*045Z zDj7L7e!*&R*0(pBo#r(0}ifcLL1+^o|ENRr8; zV)zsfU)4Kr@0r5MaJKcmrH$(CiH&@RjX#snwIzZ<%<6Kc5t$&_?KN2O-w@g z`&D~7U^2!7Q8;=U0qrtGwEOp6f7JpQ<-E&Wqt1n~|ABiv))h**MX9KITz)X8{u-rc z#hXEa(MP20)-zwQCmtHPwGn!e8!KE~thn%uVk4X)p;D)ZEsBnOEHtDn#iV?C69RzV zRR5sAG(9*{J*83WY84{K^2@our9*KZ0y zf#DvAz{qph%|U*T@Mp~>SJ~n9XOv!QnInJF$Frd?rkb_(BE2*$?*k6w)?_rh`h8wX z94c|BdaBfaA2CAWviZx$yQ&J$$sVVt$i{QZ7m_scMOo0_;;Tb5+JIDAgp}2#%fn|j zkBxaA)Y`Rd@2jKLuL^aEqvLCwy2Hc@&{JNYYLDn^8b=H*pa4bO!MghK;qhg&|FUAE z7Z-A;m(o(3o@Euvk2S7Fq_ImOmXEl*T=w)n^1#47bZgexbTymDoS5v&o)iO`du->m zf3f7vq6_NrT=tf$$HF6f=l#x(M(x3g2Cn(tqdwm zm?PG`To`?-vHY8egCibs;Opb7FQx}1`}`b#{cw-%bQyu|Uo0f?n=^68A5>LOm}YMG zkUD9}Z2eh%s=#dTF7yf!Np3BZJEF^>JIWukcqEBRr*`*5EwWC6)0hG8+IY3Z()4#(!b}LJ)N!rCsDvB*cG-^=so^B z!Ao_^?PYc$H5$3wIF%p};YTy`!V=#czC389I5hX^!!^>Go`D6L2&TtncXv`WY;vVBY%|1@o`h)Cn`44bD7S@K^9e#+ z67fB(6q& z1}*Cv?+om){M+&qBfRdVJw(33LshqN)(LlwJT&9q3M_s%md4cC&sDx>ataf*43Gua z42-ioXW7?}bZA^$2;@o8WsVkA8BM(#!j}Ev*|}hSVKPzZbt%Tnb-$f_$LRdip}lU% zSVUmC5@QHl=TB@|@c>rz;or4ba+okNw#X(V5#(#1$+h<61MSMtGB~ zdt%5LDhofixxnwo6rCEF-813zMQ6=92|~HZ8!&!wnNzZ{!u7@5Rwz?#jh`py4K|ze zeQzYs)xa*<;7s-i@g$(0TERFX`Ir1lBY85vu(w)Ufg87gM5^GD%K)1lSiOF8^8)CK zOaGMo?!@ZaQARZyFyZq4sj{)eW)EJr)}HNoDfF0+Va)S?GJ31yW{nxxPR&V0t`jwPy z=6-M{HaoZW$B&-8G&9y10~?+7^3qN=c|8}o?tWx zUaeXqUhut}N(fByBb!mqRQ3(H(%IBU7YiikR1OJm!1Q(s{!a9`mBKG79z<4ZPyEmy zfnj@&7ppN=o;g@Sd+zrWNw&x+mYAX>8=KcM89f=;vo~4#P$jxw{4zjMWqqyI&QjMZ z$!YjcFoNR;ipT~RSN}PzJQbDsF$YTK9FRF=`QeE6`l6XKN~P&`T*+p;2>PSmU!OBl zG{1GWh(qB41!p_d^^N@`YQ>@-l)o{p(xHA1u&dQ8KCe8FdAtFAAx#$-F3b&$IhNH} zIx*IB;DU;Pw+wJJ2h}?+?H0fG*cjqQKXjlw9;c_lQm|UJeq;YCr!U~{dYqL|cvnH# zHe$O&6YXr1MQ4;(*C;nl#m|DqC`%=+58$^``u!p}&|?mNtBHzMqFd&HE{L_huR^A% zt3lNo)3f?IE^S z1E>H~2A{c1f7l~kQo>~dZx3FxXAU^UiVClqYP09`;38hQd|Gw4p9(x1lY^*T->WSF zK~2bwOJL_ZkW$Ojez=3qoA~v04FRF68kr|-Y*^iYMcAZ0eR!pHUTs@)B=aq1h1H83YAcfteq?M zKX-~_$oPno$&L7=6aXZLqTCR(pJh5)kq)&V0gaHyWyUq{q9yuc#O~h;ro>i(rL!#1 zl%>UIDx6Fd?b4)uul9$;^j|TW_MhiriqStD<_AvnQU)T5Y0SlfM5Ou`20u)fYU4fP zLZ`B&@@*z7@NP~#^_wo`x4mm}FE$+he9!uGcQGGx^W%0fTYAIj_+7SRen+7?Z4pZw zn?2p}uX@L_nliNh;mrm$gKDp$#3$UOe2zI4u-oyv!xJ65hR#d}e-S<~s#?cpsx{ad z9Xg0Yl}eS$Y5tW3KN&6R=~3f46R+7wx|A{sC+K{?-w!vS;MkoWd>TaumcKcva_Yj1 zCf?aO6A5g<6^f5f=Z7AXIWEah30T+L&q;EO`01xM?blba@_lgl?#@;-ce9Sis>mH5 zw!8^`+3lp_wA7MBcy}Bc**wV!%pXLgHQ-I35FM^U;`%moiO$SyE>HwGSKS5`)bEHM z{pRdFgEu%YB!r>Apf2+38GMhMPx{x=@11tV)W7Z~))GXi?(V*abY0R&OUEFA0)uCBf)D&HX6 z>y$(QS~p`JgtHF~c1cQPYy@ri9_ zd8h+aMAvI9Oyapc`g#0nUaxP3`}tVIVomRgfN@Bf3-Jblx3l;#Cat?zi_rP+Q<^v_ znEm?RE+rN}0Qo!dGg!Lty~tx4Yjig5PrvLC4mXLJ{bjb5blYf~k+QY7FyTKkOQ}!6 zx@ryNf-Mj=b5fr(p2csxiYPRQQRf2*mg%>Ct+!d>s37-dBMeA`+44=0A(#JeJP29ty6 zaqRyleT)-8k(F2TjYtpBOC_37x_o9QFk>O=1BjxrV}HsNqU_-iTed3)h^FY#Yj%V6 znX4EVzZ!rhK+J|!gXI`9bPWZaZOFO$MZ<4*3l+6@({Wq&4g0gA{zdCi`uKKu@snjP zFC6imLKhn+!a`YD*@TGPdM3%i%T;*%N! z&4}qt>87|8vyue%mLthcI%?DgWx?A>hsIVfN(X4 zxNWPQlj+oV$92TXQ50p!BY>VSFI<2B{Co!RfSYKD`?zszouv2o+!xyZ{KZGO6_5uN zp^n+-yW^CrKwlyUt(AoaA&87GDOxZ@8~sT!TajJgWkxN;B-=l*MN*-U6Q8K_a$hm0 z@)yqa{S8ks!WWZ#z?h5n?V_)i8>{bG^oDhZ;pd9S*75nL@7%49`AnLV5XZWT8@4tA zVtCH@aLqB8NMf5S`HI|qPU{Ck&EaV_GA>wR|Hix3wvHT^h%n527K*)c$&nP-u*>Qo zm-&+!n5?T@ zAqlFY(HiWHO|oA#GU`KHe;O;BBD&zUD|$x=a~E{6=xcCrypAPogB)WGRC34j{Yka( zq%`E|W08&K4TV|Hvh!*BJIP0Ws5JW8J8$wUfnAvzxFl-9mM7B=JX@!Xm~VJC^M28L1V@jN!UG>ndh|wc2&Et{S2z!&*ABv8+Ev zxQ-N$PSY$V0IqQ_GynTPCC|K=#G2JDdf-7Ufa5d;)+6%$+-5 z@HcfOeRc3>S{*R)J?Blb1io7kyiHJ%aq?}2y^quXAOyZGPdM@&hz7VjsL}_b9FD(T zUg2pjwoHY?C-9zvw*{X`dtLUks7j=csU}a6LvFMDSCO_&eaw4XIENY%mu^fQRJi;G z^tj$FgT_qV11V6tpz05D#_O*$ra~aq4}tg2;)53T^!vSwTxo%UKR^^=S2EIXJQTVZ z*bM62yhV91#qxnPZ$PhP`yoQP&=2>($Y(BJ(IQtDKh||(&No*-3_26%UV^i;#s-oO z{`#qJv1e9P6}5u(E$_wBjQP6CA9dl~`4z2OEqr+#BKDQ+FrJ=O2PUV^h(D^D6b^kE zeMxKa5(0XRd1Wf$OyM>fT@g*cD=)5UOI+`HD0tZ9JV}s)ZlS9VA<=UpM`*kyn ztIeGC;wb}2*z46$VfxgtmM{^uOQB*&Ng9v95z+_$( zpEgqOZ^Djt2P%G`3)8P;AX-Lr;A#BVR~ze~MPvInCzec-ORGc~AdZpmMb!X2QuDH< zG*;yS>rWHxP~6#<;+uQ+Xcej^j`CkP6`Z4xPwXCk2gu^KbN?#S$)dyT<>+J5n_Le1 zvt~Nl+Gb;`Qr#4YY43uDv>NBwf#e@f4nf++?bPB8i(C;H$#)JPsyDl!g>y)v*<{iu z1c9j3gTt1E52)xOQCAB=pGW&t)F2Yb&HTdLfB;Jh75<0?aJ!7Vg=b~OlZZhwl!qNXkyC zO%ZW~-M4#0$lJ)OAXDuU_%aI>B0VO5cq`d;x$dl`@-wH2l}Ke=I>-4$%r2<^y8PO~ z$~>~56jN%nG25IOacN($)@rL2pCA{bfps~2dF28dh(zSomIf(aozjRkL_J*@O}+*% z#Va-t)#xHz`WH0@*Ls9^KQ7(iFI3BEzYo~B^`FsGd_X?i6}V3YE(_mIqh$?jj=hrQ36Y(brAuOHc*8%&>xG?|z!U zfxrHXr8T*4%3ML~{%El!=58PMB6bK}M~y8x<8GhfKP{k$@N#>|-8r9m zfMQ2KkA=|a`GKFr>L_$_juqFpr!tJeZ(XLx#-+W&gy4>pkrnK3pF(AG_zr)h=ziIBX7opKcdUL zK>PYCH}+2e=q?N?44R+!)+!yo%3Bd2L)igDk=#zXSdeSl7rP(I3zTLjpvY5~gGKND zl8nU(2IUN@DTjnNS4b!%k!qOLiV&3i_$gU#ht2n!(GWO%YyaGq7!{V9}uS?;zFxtdW_2XMRujNS@%BCurHVzH551jyw=QX$^#u8-8 zfF(HgtGGJR|K4ns2=U~Ql2;*17pHyk<3;U!mH9J~g*x`tN%(MPC)h++d(P?D!gFiR zS{+b6U*Y!?;p2*<@ zygFyH_GUkxPPr*qsX>Wm5NW^cm%=Zb3yzX0KR@+G5n?Xh#jX$^A@0P3W*DTHI5&=E zZk{xl1Zu()15|zmPXL62okVKrs3w`DND4#RV0Qfr^K^cvRsVE22EYyX1pGVpA4fD; z$d*}xc|?^$?1_c;)Lr7oBeC0~)^uxSz>7g(^a25Gv7!mra_;rHCOmuUQmFBuZNnp8 zHkLTI#ku&!{)(JmVB!a<_)LCfbW%Xyf$yA{bMu(#-3N^X=R?>jn0W^CUA=uD8M~UX zNE8tpqUU$?7c@C|wVrPLfsyVZNc00?0Iaea)1X`ga*wl zOb&dxdME>~+BXRdrH4+u^)fdG>arI8EUnJsa@P78_haHUi5r&B5#9=&BWi|E7$vdJ z(q=9tgd^GYTfUpX`qbvu)N6cWDGbZ2l3L4|5@(m#+`-@l)h+=zmc1kK> z1rRS9r66|yZfN8aI2bL-CRRhO4yZg@bxvaB=Fghm6ZLHexULy@fHnRAeJ)y z>^r9)tHj^nM!+Bo;}M!zru@d`{@Ik-xx(ag1r5-aRGG0>+xfJC;kg@W zyD`xi^yCwfeW&Pr$!}zTnR!L-ZCjblW9l)^$@?i!ru#6_dcpxco*$NH=UQhKbuVmZ zw+rtl@4IL8&-5F>nA6EuHSFP{n_jQxg`Q!$SUn_2kk-)qHGjEoh%PwwA^&)B3Qrck zHkG1?6cu3Q7{J`{tiA1P31TgaAKX_;t%{0{gm#kXj)nycF$Hz!$gb!(d=LT|HxW2P z!fsI%LL-heFGToRGV5=loiwoCV@=0c*4IUXnC>nnKc=Ocb)MMuMFsx`vrs(mQcLjr` z-I^kA7R$Z)96ayoCy`rKKD_;whtMLYHe;C#l9}?y?mz7C``%)ETpt#n7G{`y9oMvQ z!Q2%+H4?lR=LRbQ54i&X_50dwAHU*zCv4Ahd3w*l%hNy5ZL9!H zxqaP!{i%V}$yg$LADg$C@AHP!;*aoU|A)u@Yeyybbh!S;-bhPUl-i58o3UkI)O7=1 zVK9q6S8 zU(gf%d%7JV4h|FNh~MEc_!Mop)15>@FN2IE_qVn-G}gO%D3I^w5-6u_ZFl?*daAQZ zyhmfBwIQL3V8cL~M3&zpEre_ui!FOT^Kya1sVyzSX@imu3V*gLIu{rZFf%;VjQQ}i zO@ycyUSsIW^?4T*@0%VBL0|V6x@y76&Tdn~uplt~~gbJra&={gT;b0H3a! z(YzmTV-0=Tk*$z!=<(UvsAz-itwXnP-ywN(|v2uKH!?@;Rhyn;2`LnX&6KwTP4UF+?{Nl=x=5Df3s zU(1cpXc(W?{x z1BKeMmeU0d{_$#)wtP#mKsi%^A~*X`HM=3rs2Gilm0s#z+MG_kpIsw`9eO|a3bRCr z>1*9Ram6TI)Xdx%>lCB1(6FdYG>GL0!W|gP4Fc@~Hkc+$%{@02CMLM40_UF9zy{O% z3~vWhxszOp1c)q*)wf(NVa{D)xCnf>JFgQR(&fc3vUtu*??MbQ5nI6)TbbS5Bku3; z#H6w>`Iycd1!MC>AoyO!Il6<{)f~3TAuA;dtJ~-TTMzA`O_D;~+8bYXk6Za=11+4SqcKGIg^0QX|>B?P!Dzs}E zCX#U4j2HPdsWgvZBzcXvCh7)z6K*-Cpi(quYQ#7>z%6KW<$d+TH_;tk-y}Y+hoYW^ zNl+l9<4UNMLt&nv=HQbP2)}zdi9Yd1g@Sa3jBT^_xs4-f|L`+ERX?u8eIk{w$Ctta zBc&R|VhniMMP3(CsglP-!L2=MvNLXEY_t-PL>V>=5#th zxgp5*81IEb2Bi`%lM~U~ZM^G)8_JzUQ|qxtc!q;*?=Y<4{@mrXX=--R-EXP9yiDt& zsg}u|yUx5KdFib%JUgxH?y?05(LKJmZD?C+a?f9dgQY@Gt6>G4=Dps2zTJ?!_|P3q zNX8l{fuzDfV^ObundABC+X&(S=ZZ_hd&u-3!doGtr&JLHO@rF?KnC&#OD zn)1h_$X}<~Lmd%>KU{p4mDX5i7yE2q1 zG)#m(xg>OQ`}=y~UF{5GZOlmQRN_@ld4%XtsJ*o+I-6mpjBf-jBgDjxh~XJcFw1&M zFnwQ*D{im8h*XNOlyq^jp^0>G6>ESkfu^kWxlPBK#L5%KaT5my=c8l1$hJFzz-!vu zTMzx$Dqhx^^L0o(eVn=P+UlYOX;|lu&vU=iN)_zSOBnE%=XMyC6zY%pMD!_HI(h8e zbepMKn{TpSyW#5=rnKo36-d9QImdmZ=glKTXS0%$({u)vCHGZ-85$d7{>aiOKw{x- zU`VIo1&aTO$mQtQ7o?L&cFWFCY+a-eV78ZtR^go~wb?AH9zKZX?c?)WGg8doe#*QA zyZ)pAQ~T0cBM{1nUQnmFYNkG4ksb^N}!HWK1g3< zdw2;_qZgvKE0cuP=vu*SJolLjF5!qAGOTWjPOz|){v`QOU|FP8p-6~&P2oo@Bt`#P zry5~k=tokxh`4k(fhFq|o)g56<6E1?y=&zRh$%tqrLn@0qn$!)#fK4WR{by4+216u zWP;8dx)U2pzL8uuf1d}Re<~)83fwdPu2^(9Y=#{VEgao#uhgI5D-DM5E-@f<>A&&a zILx1pEjT{;8;f3dDOx*$fjPE3vz$y}$+2yK!_~0^WNUjFGT2|t;a;}8T`!#E%UnZR z;%H!UX{A)LH{esbj~uu5FQcViqR+SyyZs0f)($5UQJ~Sn{{8|_+2C@?m$Lh#Tj}Wt zrE071<;B2Re-exSZ|>MG;`YSe57XXqv=@*SmEDkYnKRqp^SWw^EB1p>_nmw#J*~ks6!um4SDIDa<8(9$%OVDE zup^peGoV>RyPhy((=l=}V+DupcaRasplE)?miq3|_p0%tE z`)=DPnvN913f|!c^j9UV_YDNy`Z3QoCq!J9=8roJ6nXX96*KlRgg-SJejuY~J+g2L z#AQW|jK*9)8g+3D{`eg(i=VcbMB*+W8!k%9ucRIX65yBAS2mnUp#CE%43Oizf9nsZ z*VkGNk&uW52#Xwd;q|M>QokpFiu5!A;Kk-Vc^_*(YrCnlpYCtg!q3k}bXFj}Gl@r< zF`*2Ptg|{mp#9x(fzfZlkfbDn#Or%K7??<%G1M8P5R>)Md4DivGe$(_?6}1Up4fDdFx^<(lxi0!9f;B7b3V+vQ4+Z1J<(=sE{vwm&f|S?`A#H!h zsrng^lX(qG=+Q)s@;>~~$NcFgt4F|tiXieVD^j#Zx18$uf(cUdg~(5ilZowizK6Y&AJ8(S^Oy4=`wSn22 zwqG_#9(&EfjD+Tau1fjP{Ko#TV7|oHRSh6lH01l)kM0_eSzcnTX1ET!FA(h%9LCAH z`~XNhh1w@TYj_a16)X=mr#jm02)*UHpCvJ^DYbK{A=ki>T+{q<(9uL-ZJoRQE#+MH zlH}`9iXRV;cvl5BCN*uh)$X&yQBmBZ{s@M+BR-Mkbb9aCJ&XZj1xpv~*_^%4*28-i z7ln0U!f%`Ygw*=zEvsNtUvNsoWf5c&Hbk?^w$=#5a#eBxp=fE z0t&M_xPkpGHimvE-J0m3Xt7Y3!#d+vDOA#W?M=aZP#0tm*|-Q)RQySb_yN99WbSaF zJ(J}|;&_-u86#~&Vt^i4!Cm93XupbVzy<5hU~ttJle>az7ji8dDFjtI+Vs&=Z>I{ zY3I5;;N@Tam4tXz8smG-^K=O~8`UF8n^^P0H(1O|O<~M>VVUt{-NwC}G+5aP;b;Dr zERho_gc_kPSny$f$41!Zytb0)zPCzt!px(M3l2(jKt;y-lkbpZ(R-5!Pe`}ejG}9?!Oz7HCM(Y zg%dE1Ki6dKIrp%ns?TztE#BhazZ|>)hYws9F;f=mP*&=APdTvc(= zsn^F2+DxzbN)b<4IQaQf0Kqqbv`%V##JB=OW|xy5s6aNFWKLV+UV^GJg+U{7cEJ@T zbXUzRhJ>WE*{MxwZ@NNrg@MBIGk3fv3UCBkV)Is~X6syu0%LTJ^5nULy`8*5+l&#% zU=iQ#{2h*>DlBrr#HYudZ|HE)_#UPH@o=$lxZO(`P|3>%76qDKzP1~y&UYZk|A%1h z5401W4|CDKM z#KAKrd(k8i15K?X{7caiH}`Jl_QdSmZiLugIxukZX1DRmS~B>Z#cL~bcF9NhDcSYP z6YG)jjuw*fGqhG6?t~WlU`cJ5yUiGZ?K-NyzgFa{_?$P|oZmUaEuA?Re9OYfn5A8! z(}&1h8IP zNZ1HKSZE$$no8u-At?!mDNu9Gm}u&8Wr&RfHuY-?dv^8Xtv4+#EfHgr-W0y1uuMR} zNQA!JYZcAM`QFjzxP<5WKJH*LrrX0^*NeuE zRT%Gop%VIre)}Q=wp@rTieDhY*HY`b@WXJfY$X&an#kwS@G{hh7B6FDDu4`&B`9)@ z7wv5?6b&6k+T}ss6EATdnlmpoq>#ApN%TEFj(V+UxR;hmekz)M&TW!axYftwb&!H^ z=bt~MZUU;d|GG90$VY58;uaruhi7h{~f9SBvGM5m-DSUx~ zbp|!p zh8^D7({r{BG#oR}?hdH%60I#x&_*E$oYozK2HK80|s*3q*n=Z{ru5m92Ss_er% zYpi{w_v;WsOEr2(A-YNqlPG&ehq;#=trSy3UWQ9GnO0X|jQulpQvr3`{F5WnrME$5 zX|iheB{mTbXIprI#b`Cq0~uId3FlE^VvHuu2{wvQ*{^?<yzxejMz97C*+%c3A#fv%V}aLnA|*im87daxBcO8m2mar)ga5jh z|HT*nk0)8zsM1GF9UwqF;Ma;q6uoyKO^qR8BxOxa4CL2X8q)$^<2C=5sU-zfBC}#{6=MZ%*bD!3C!sH-l?1dxj!y)a&)TOKYUecPlb)T zi}%Rcgkuea9F_fh1jgT~hWvH*f4MV`AmWTHjM8Ve1SE5TA8a0>P^qP$i36F57}@>U zCmkd6)-e=cKv^w$-!MGO@@|Kem;!s-v=UPlEkow;HPjW=akKm97zErQs!kALR5<}m zu>pNDl1%Y91t8g`?HO_S@j0)ZFI`A4KlVM7JLVm)4_%T)o@RfRW+D7>?9~{F zwnUZ>s*Va+9*}?#S075S{W++QKLeAhd(ONH)vJ8TmUi0WKuUX@4{nSZn|5RcpR^|J z)StV9%t!Z-O%(RNu!~3^s*i8b6M>|mB-V{F`Z2s+`1v;JKVNeHaxf@feuGC=ku8fv zi3HF9(Hxo2pPY!Zm?AM>)K#chty+WXOC0jBA6Jta`Vxc!C*$&x9VrA>>N3tSX-{1R zyRX4=>R$Hd5%Ticzk13TbJR7lo&^5(jj>U}s)yOF^b^$DFTPq1wckY6!0%$I5I6Vc zVKPHF-;NGS3Y4|LqO_z88Y^%sHH|~jse?g)`J$vllq@i^P@hEZN?IZq4`nFf({M~V zEK_c9DD|zjoILi_$K$&rf4uue+yM{g*cvxdY!z8k-_Zg43zE@E(5da9S$v5l`7Y^E=MawTi zRm&uwk7|}F1fMUmJkn7PgR9E&NRzzlo9BiGFso%3nwD9myu7$uLBIS{=A<~c zJl6@z*U3>lr)t_Nfjj%3IF7oJa1oyg+*ncd{i6fHpWhZ24PncR96YD>Ja6FM3m5s( z+T?xQ@r~F{NDarC9-9xw*9I$#%Pr*gzE(H0Pd_)Q0)C%hfB)|Fn-YG}^Vf>!%2~Ys z5F7oIqX>Lt1ZL;>kwH@0>&MC2+;0yuq30+sz5xN)i=(U$&Cge%Pal_1sfD87-fVxV zb!|7~?Rf2ft=?NAt8RwV@o{F=sAtsfyEvb3;t~LmOsrd@oYDk1YZJJ5ub;H)0W1@Fdz@yBlR_#8aB|Ua(JGuqXfDr-Sr*7tQT>*%O*qJB>~J#a^>ZSl;D{VsH?Be z`1~%Z$%E$&#sB!eeQuH$_r7)u87hsW?fM?1A}-l1SCs<7{mUWad5!|Z5MvlwmWMnf z$(}M?K)0f4{l89_U8Rf8$aK#${4O~{vbnGLh*@i?XLeIFQF*t*tP@BkVI_w`#*A^X1%i8YJb7yAxj)r(Y+=b)fXN}#Mw zH&3>sjE;{Bz7>4_zp%7+vEo`5bTYER3!*ort#MflfWe)o`&R#3wwSdd6`oIR8kfk0 zn3`WCKig#ZY}|0CL?448W`NWYVWvsEPxlCRN>ax}75~YALO|bLySllPc)grXR*VoRsZH#CwbF?;_(Vl?`BLp@P7Tj~+GydWI>kM2>ZD z=oav1dh^)K{%;)Ye>=c=j&d^ihKnYMM(>DeSu_r~T}dcDBiahoj~%VQ&%8hMcN+*u zeEYlpeR@bM)yr4^$yCB&qj=+P2kGHC-HLg}RrwT!3Wo!+2MA221V)Ir_D;siFByF{0>6_D6YDRn>g`!a}FLy&lFP@9TQlfs%i-pe&5F z%>HS7#R5;o!ntWVe8A(419F^l_mj9kH5K*mf&PAlWchzP|G)9nKQg}jrgpHpYKpcK zz(`?;z(c!I_!-w4a&ad}zilkmy9OaY+-syLARBIza?x#g` zcWri<;xb3XNq>=G!5p(@qv+gqqhA}@G07XNC^;==`-7kz9xk%BGhL%WbzM3vAz^@n z_Dy^%UX00_0FY6yH1_sm?U&E3UL<4YU~>=oiapAcHsy|T+bYJ&72knJqN#j}zj@3H zQ_{6ttl{U~3q(kewklmQ(T+Zeyl!e7d{t)L>$p^0ZUlDhETmOx_5)dh=eO(6&&B7r z_oL6>&v7D5(f>Vr|xEj?slsGiEyx)psYqsFE zYHnIS`C>a@V8ZzKG1$M_s{i4M%`4PmfDqo`K9&P>WaoCe(bSz+Jels%FR>GGLh95< z^LZ$p>LLE_!2aVe{NK+gRP^&V1OlPR5|;zEZgBiv#WHhRwyOx{a7WBPO>?TPdG{{5 zw;0^e@_z1k8oU1=a6tbqw)pR!lVClfzfD}?#Uv1MxOrz)Zao#N4b~Pa@rh^XTe%+A znSR)}*Pp2$d67(hL!pN9f8CtXQM6eMu_wXo#t&RQU;Z&w|F7ou-#y_WL!s8!*DqH)w5rO1I+zR$o}-otjQknszonZh z`!HK&@NTZ5WRFQq&qMh9GuQuU7yk!;=JQvXpSeexYTGEX&F-M{LCb13g@(<6dOM4; zVNF;gc5XS;lFFeJ@nj$-$zkK4Di8ne$N%ed9%U42rH6Yc7Z1;5-JkLpu(ArJFW+!< zYwlQ{ah)|?H;r{;LcX1pDW2`AQGv&tG7= z(!M8T+g&)=a~$$|3`l;24fp-(eEI+L)&IK}?LYkaVks(%!{yfgbtt>mSqp6Uy>Cn$v0Zjao;})GVL7ZJNduQXeDS1o|Sy5WYB54BbAo{sm}GrZy(*ePb5sA1xwnj}t4r2KgIj{T2X}XO z2)c214;F&EyCp$_ySoLq;1b;3T{jXO?#kQU=Z<^2`+VQ%^XL9rgFP6qcFkE;PgOlt z)mWF)$X*3ERY+xW z#%F@x)hXEi=L&2KDSL;9kN=T=Vn*^yM>&$B;c?1wYNHFkN+ER_TbY2h>G!JT>G|i1 zsz%N^p!#1OLivAy=Kri#|MdwxL}zeTsi;hNR63LW%>0d~>wPGi-^l6Gp%1=GbU`x; z`MUj~gAYAFa5v0xJpS8>|KI=k?)#s+0qS@75MYIrY~^-Of^&N3iy+B=LWKVs~O)8h5lYE)e;h#~lsa^Xv}C&$20BpN0Uf zT3};-u5hqtUjAR777({#4+!Drjv6ggtVuo3-Cu17p?woyVE$wKSH$|ks;f9St!#q4{KhKAztoI`&7_1n_& z@^TY829AUl=R=7K-5=#D8oGEXDPGn$sW&?`^mQlbSy4$^U;bqTGvWW&dtSW%@9!C& zy}db*nR_dAH&c7swH=p(lbIq<#9v>>AWij;6Y9#5GkOkH;Mi;MvRb@MOyc+0eyzJD z+EB-|TOOy*1@^4q^v3!ZX9r>_qj`y=9#RIQ%fJT&Un`2Ce}r#$lR!-%Q#ipwf{%ph zi(Gg-2G+>G3g7zL3oNqNv3reW?x1G2S2~?8E?Rq?*Jm{E`#!i9?d1P#6P1WW=4XS9 zenZ{`?=HGA{`zc~%4vyKU>Wlb)@CAGXf%~CR;TgfhcI|_D+NM+*ABTLiOwdcd3)>2 z6=qpuV`(XWG$UwSh<|$Ugrga4747V@gH<6B`B@>OMS9T|8Xbfyv^yiP8FZL1A>CAt z%XC%drTHf8PM4+-21szg*SvrJ&;bz<^<7Usj990rpMAV{9N!Nn{ckUgj1P^>wM88#jgp!>iZTq(@;71pe;94|59bFaYo}hIWN92|Xp+S%iEw`Lh{+9|Kx3@h1X^T^ z0BWLgGA%jZ!$BScCxyLV-tOrqA^$fR!p(%h|L`}2_x^qVT}_QGAc;X24|7)%CsQd8 ziM=FofkS25tO1HLOO&)$yh3cFD9f)3vMxy=^S>L!%;j9PacuvZng@0N-Yz}0>LsLI} zC%TE{Ne2iJ(50$Hd+Ks#_om#ynSjXO6pW`oq3ir!_*hT`@$9LhvXvdQ^lRJD?^zO9 zsWGl1?sK1L$Kf}bcYR#d31bWkdC)_2UPRAiBgI&0rD&PuLq(*~X#Yjn)d;#2DquJ=4!P)9#ke);_JopIDgieWsXK*Q~MuSaQkqaMpT`a4zOb=(}L7 z*7*|Afi?>aOHz7JjFwY7B=Rs5LaVNbd_8*l_7-dz zpNBt)-Yx_HS0)z+qS#01_Gx6MT`4`G*J{6Qh!RKn{*LV9Ts~N|=;6M~?>VZWy*UG$ z!Fwi=(8hFEz;LM?!R7n`^@p94V1~=yVcW(rba)kcq=+4*uZb;Nr8j;|8q2KalfAVU z)aoZ6o0f79)t0+lB!+LHS*CP`>?2=UA!oz8!bp7N5qhQ$3p5Sf6kOKIKewFDHFSI} z(Zh&e5ddA%4)O8Nl0i@0V#g3GvF|f{_9?wi$iXC-4icFwDaiw*D*U=7C#yJ`cSF!1 zlhb$S?X6ABY2z8eyv{}9cS+laBA16_;H{o&;5#OBw0eaGxtV|h+?!$|_=BYsxU<&y z%R1>$P9?;t{gEpByqphs*OQq_30*(1Bai#UJf6Sx<2+0i(qbT9_5T!m+5uZ7!-NE} z|0`7ll%@+J1kfMKsu7gH020Rq?Z=HCLCmaVsj9Z$p{JP6LV$(f@GwEpU%ad>&NO>F z%)<}p1TQ40K>K;eGZOF1K_s*q!~Q5k(Q%cH%LFQpXr$yLcZ~@Zqd`kI7RhwlB3{p7 z4%}Ies*>O$2mGTcbhj9A%EmvX0aJp;)yRPzRb0M8K%7MQ&?WgM^3|c^N6^E-J2y(A z4Fuz}cAM{EAA?t%vv;n#7!lH)RG~YbK9UsV?@p(>5a^;0&d~ z(StFPsoq8uybpSqavUHg;OdosdP^V{>yO5>ff~*M#@J51b@h`WXTPi%ZK3mLA>nVJ zh^uVhD=lv9c4k2p_7EtgD>)ha*LI5q@&^`ROl8-~N%9qhw?yVd5OrI9F>WI9$cC)d zi@H}hjwUO}i5z>;)E#D~%G$Gh6+aAV*P9<-i7G7=Tm>a$uy(5;BDD-5M~j62Wr{g0 zo-jov($O$`cDu{n`+`kcE&{It&zgAw`xuLiO~Ga78_MPsc^KcGH6$2vfEbb!JtNU_ zwp5lPgH0An_=^yDomA7^JtvXQ%o%AK7fwv1vM|EW;c1#31Mo9^&}23KfK7c3ORCaR za6}+4aA(x#(Wfyt+%M^nB+HumRYOjmE!yRLW!!(QMA3UH(5)OVkQ|Zq4u7V| zI$pMd)gyz?CB53S1=tl&-4rb#UAsbH?k~s+i4RSt@Fd}72;ztZjO}4+yfv1&^L)nX zN3-b|n|=yzz=~oeq^_{2b5eWv{X2n1iyZv%1%VlVm#FX4p(P!Gt|*qOrQBzoSpjeY^YSSgsz{CoST#yM;;$4X@s15^C+wmLA z4f3?{k7s}D+gTf~i8nd6*h5f9S*xwC*OKNm-|lysP;8--?+(`=#re~|XNYMwG`02r z(t5n*AjBZwcn8t=PAPzuF`YlfzWrXRVha=dWq;IAmiF*I;3qw>AM8oOsPjrY<^4+! zyt_!3U7dyz2QE_o^;zjE4+7SMWv16xK7kc#7WZ90DqjsT<$G!A!CFC~6|nEHJP~#h ztwnENuM&as>)2O@+n3#MrhLh0fVtdE_8ic&?Mqlx|P3g)v!WxM!n&Z{H0qhpG?3Ave;Q`T~@W%V9W66Ka>^z9V7g5 z7gVU2CGC;dzo4i-2E4-am}R0o7>3*^yz7r!R1&^|yopc%#Zoi3TzIGQG<>OSdQzG4 zX(jJ?N6@G;Zva)mYqVv_!oVk{VHAECKQOWCwk8F*A^d}rLtzYI!Jp%KGWH(`qaVezZuYOx7~u8;bVC0a zO=h*m3vvYXogHaHgI{gScsyFa+QlgymkAtHPqHaW@T{nOmnZ|reaSW&s!@@_7`u3g z4U&`?RQWM#+L$XXw7P7uURtOv6@%AXE# zvI&*2fCJWsK{jNn1cq?Z(GE9YetE`qxc^2Q8;n*A3tg>jc6r^*Vc$KIJyY3PiM4AM zoo%5&Kw$Ugare8Swcb64wO+V@+7@;9LyA)G0310gA|&Zqxw|*xyASg35M$rP_l4j& zM}Y2=}1fOnDkA=+DGMb3-)9Iv#agQY?5i8 z&6ZpeNp%rI=<{PBg0I$JUr_8;+SF~?NOD$q{0SQz=94Z%C<6h8VyYz%qBk)^uad~< z_9P9>r*xQqg1LXkzQx#oZgapQ3OeABy6Qn6w0RZKklG625`3#Kg8Rysyt_^okeEAU zec&i`;yuPJ&9{m$&^mrY1qgcm)`=QmZol*dId7s0pAPyy+*_9E1-F;eY^; zJeA1EoacFSw7B;16b~^gzG$=tzPcq$;13ecTkdf#A#F-`?6-DaFbR4eU9b5tjnI7J4$GTkg-vYNL><#wGy1P>=r@_L_nZ4XE8-jCox;{HCFqup^Jrt;-Od-uxsZCB`N z#qn+Qo3E7_VA-c$fo+@Rwc2u~pAX-Tn(DPNi~~r+m~7T&nn;xU)K|{{{EM-0>0aL= zhg&r8@9a2dz#nxeeiU80CXWR?tOyuDM=q>jD^LFm-h#GXwgAt;_hHGf{2NckXb#(Z zmc?>$5L0bHJcEeD9~#kB>9$@Z3Z||185qk>ej@dSmGngA6*+Yp7;~dl+-L*e|d5(k(85V|cMtGiS-!mRqsV>7U>OGXT*9~&nQHK1A@wV*X+k?Eg+>&_N!=wi8shqGm<%g&_0yqLN zyUse)e$>B?`Ti(_@Sm^`T0vMQZL=SuPNytOPl;7*a_aai^{;m~0QQMRjimZ<^F~Oi za#h5~1S3Cguvdwe;RF{JQVZVQ>roM(heBJH%xudNcOxW1Q^P zQQK#p8E&xdeZ|PA-w|2+Sz0rKpV}!HgzcGJ?cM{3krvQHYpJJtu(a+YVCpB*@8yck zeo-uIdn)2kTr4CLJMVn`VTh)U_y&l2jY%(NJF2YBJRt;Y_Gqmc3v-EI8TS!=arjSQ!wrVD&Mw@=Gnw4j1TI8jAS3BXO*8>MSP95^HB zHqx$MB?rL4FaP2B7mlYY0Y22ODBG!3cM0TZTxed&eLw{O=xBd>$gGp+=uk>aTJ;J5 zj3AP%3k;&(9qwn9ILpJR%V#k9eE$1Qg9#)3IFa2^ZNQdl-&DytTe7Y2Aub||O`^vs z3J@BzC1EPsoiMcXX*4Yu!`$a`hIdK9vIiY@s_Ug)jI2m{gx_?_vc3&r3=T+30RQ?= zcH+N3Ufu)mjgYkYjSJ1Z$+4~ON)TUwL;a&l%V^BJO4S*PDgaHchix371tGBIE#!&C z=M9_^sny%E!*drP;uEj-mbqxKJaQ`83f9XGd4T3>$iYoUlHF7emVY4UW4M_sU5Yc3~vExj|h&YE` zzjvw@tN0N$xeIRJ?^CIpqglnPqR!VBIE7TU5w<=GDAK83rDQ>`+xA=QPL}u2o6=cY zh+pV6D+&N-6}!}EBga_TlprGWvuYEVnX=jvHe9RCXg@d|n{hrhrr&x9tPpIJH+X$G zcF+(KMYf;IXVoW@x0)9QLvKHkK8?A#O#?Xt2~y;8?Z&UZSjj7X=R|W7ce|r$D%<@y zI*y^LI4f+>=KJb45woCt7rBCP(db%(jv3=GJe2ZswgoQ+*Alck>GDYPa;T}gC zFMTAqWOM{iLlwp4s%LLW)3|k?gO5LC2@G)VJg6J*%;#QTdcPDN$H?xCgw5Dk>-y|c zageH^sk4x4o9Qx_;(U;^rWUi3sqBh!vm3>HJDDk%6@H~zzaOu}n8>|TbxclF?is}w z^x6BxV_#y$nP}-^_1sT}g)^G#I!}-U<3jX-+h9F4miNKx<5E2s1}UsTj2zraXUOOL6Oo7a@uDVtqIK_%$a^=z(U@ zCa!pyx${YOd$?$_2|=~skOS>`I3J-;gM|FXXajzBoOQd1W`Hl;1pHLue5Ek&r2#9x zU{!`Qh%t-=Uq#|wT~zBA!bXl9O}o_C8o|`IA@I@HTJ@vzn0CjUdNht}lrNEKTAYL~ z1bU4A*@U8=_aw~Lt)FR{suC~n)(!f2bl)9{1i=F^nFyT=Uc_6Unp@b6H?s)v58XuPC%G?<Q8b$kQ4GY+}3u{Fb@qG5P~>4Bazl4d!+l(FUMNXqR46Z(Av=_H@lGjpqyEOFOcy130f4S_!QPTuk?aC!;-!v7@Gu>JJR_S}AVF8TxFviW1|C;slf2(?xX* zT;UJ4N8UpZ8g={cSjM_rdw7lq$;pM|uw%9-x^6#{UTe3wN^yr>{0XJGqFBXk_0WwO ziuf*`h3J@9@!=i?!$7N7IM&1p^XH?W3w0B6DO?fcqox7hi{dSL_+?rlAGpOEoO=hX!oe4C@{}Z!taL+7ErjHp~&1yndT` z*zcDP4PAZyU;$fsw~ro@PJB-nJ>~{tu29B!NYrsc4UzrdmAE6}a8XG3CdhK`)ZI>l z{V<_V_cQ$>hN;I@N`Ec-o?iLft}t=2T~}j|KT}O=N@sksX$41*{U&(HLhlWjs=+eD zc(G+L+y4^>tAwJUZ3l9xWk63Hk@^Ns{5Y9Z#}sBz_&@bT@?z)QP!MAh-lZmmD zAsw2Qoz7C>^BYGV=eijMgUpV9 z4~oy!N;(-170E!E;-c<13Oz!OrngVHw_zm<$G_Gph}qxFYHNP&Z(R3LA_!4&ahcKF zs0DITGbM7-mtZr~=s%%%=2DY7LyMn?RnTdS?68c$$`{7Auv8_KAsquSLlxo(oKpzQ zn=cje$k8xiZ;#}Fje*+U4(ZsJu&BVmW8OeBZWoo31?agpHeXxUu7x)QzcamF%;U>- zBEvHHRxIs&Sr9iSF*u~NZn#SvPC!u$Hmz`(IAa;|jceYfi3)G*x4DB86AkMls@fVxAJ!niD^zH3 zg;+;^woL?v8b&ciP}>u`?4QaJ7%mv0qmiNp;H2`N;@(k-!#HC4z;X$x&*zYvHSa=E zl%Xptw$VIm!htdFg7i9GN*Fopw>Zo_oP`LMGMF;>Cz@mM>Gp|GxjVwprOjGmzdF@@ zD{+e(ON6Gi)Ty=UZ9k1^taylxA*$V;QB^8Tvv1OvxqYa=Kiu~|_EO(9ND7l_%6aY! zefy=sa$J`%f})(K-8QNC%+bp3piYfhaek}`O_i7oeOG4hFk`mQxcDb8e&DCKx*2DlM}(a1S-F zBM%ObnLd@3;NlG8tbdm&0+Prd{-6+1VZy0GB7GjmrWB6>vxR)H32m!?noBen7$ZJu@p8zn*`29x{;{icNp8`VFRjpQkI%b8a>QZg*hhQ68Uwk zKWk!9V|!iJsWQL}fj$IjNvt&=3#K+2X&2-c(FtaF{V7@0??C4y%}MY&R_TgN6l}U0 zN)3l!@9@L7k}AdF3;uKuQ2G6qG=80VSSIL!de}Z}w@}rk3-V%NvN_k5>n(i-2(p!N zP#lJtoiQR-a1S>MmoLYJuR^zzH^GeI>MH2rwyeqP9;?lgEKT zKW`K==YmSvvRhq791og_(v&J>B?;wVKK|kQNQ`tgczR%Kv#d_B>Bs6IQh28%%%QVN z5bSlo?sA21e*$js#qUXFn!*hqPJu`HW!l_*K>1CK2?=l?dZ}o#%rAQ<#q1sw$<0pz zNAK#JZPa0VPq)i{WN%c50)GI4;G~pZwhpqw|40zvo%4i@1$}3szzmk?P#44)^p2}= z50uK^D{Z^mJ4?L7;nG z35z~a5oD^}erq@rL$P`F#>TG9bW-|M>EUsz!s-sQ!DeGE!dg! z+Rssp7>#UxxNr$k=ZaczET?YDVI#f$A(16@=E^!ph3ti~1!X!0_Jl>T@{uhOV9uEb zNR&n}!PC$%^8*q02tfeY>$!@o>+q77!8lt|OA2p+G`2v8I_7fcS{m$6kKj3u0x+ zDGf-)-wRCM#*UViZX480eoPdb)WGWR)mJ}L=nmb6+fFAE)5jYIS1Y0)_5|M@4lK|o zoYyT)Sq`n`RAxAm`-Sw z)S_NWkNg7oa($;|M()YLzL7)*evF<_omL&`79px1MfeF%xwZ31?g_iD91y7uTOwTG zxM8`xu?*s3HO|C)LwtQcMl*1%sXy#sXscK0#9ml%%_u6ZH zl)+6pYf&z*=UGMf zr1lYq$>Z00m(jZ{0i5UL0hrFy-)}2c9=~aGY+d&cHXO+Vrb`Dd4k|PDm zd7~LL@Uf&qX}>BYBJU#Dzz+lt+61JA>%WsYstgE~4RW>+VN4bw3iykgTW&&pLQsR+mC+`>2#U<5Ed|b7MmBZwIdfzl2-n7-f_D&h7Bj zH(YDi-fqo}b#+GG-2>Sokn#}pyl3cz6}y+Kmll6Rs9|GX;oc83kEz5kP(;?<*#Pbv zy1S7+s!AE++Lz@NgZ%!yG3WnDm#jqol!qIQYO6^RH2P>~csd?m;P5G_9@H(0`xEz6 z2g1%M84{lF_p_29b0cXW#n7?0+DiuxW$56L03Y9deK@g+H!S%L0PQ^?vf9&(h^t;d zb4juDoFifEtru>qabe<}P0o@mMzv*NB)3m=-)9*!v6-5Y{ftf@f?)Rq-35yoQN+$^ zHc~JM7y?n2=;SdrY9OThr(&ipYHS<>Y&x@&6qw4w5dpLIDSCK)t#Z57BUw{25p$zH zl7w@zoML{bF|rU8G8*DIreD>+ibBO6RHe$BbI6gc0=+uku@~R-k_)CZ)?+~Ygge3o zF@vw*LumZ_lH6Q7=)>(KD?F%=Mh^tO8{{Bm8t1KU4KXD64lX2o*`1lJg>^ZDt;XQ|;oy z$s%mK3b%^M9O66W!lPF=y6p+WN#q2-m{R)!aS=oUZz?UtL#o>4T%`zazrSW zH!>WvHSKXkDl_}{^vk5jjq`diYUn7mBh&-#4XyhvsQhe=dhBNTgXD~wxq&oQh^Ngw z6N>iwK!D>xzeDY1RE=0Yt?!bVFP1g)MT)|U=DZZ$>g`N7Z+uAZ6IxOWWyhl#+#1>g zcDyeodaqKnc!RA+Y-rpsaH7b8)RVMeU|+nuXtpbR+*>R$+repCpP-E-dNq^p47#*I z3!l?e3}pQxoZ;T#PN#T-%j=K6#dL?uH+pqhLYxwWV@o&gc8ULckP>E66!AfDt5+*Y&zn5Wi1k3BI>l#>;7#NWq z16HwUO{`s13il@&8n&x_+Y7$Zu+<+AA_7?QgLoS*P-j37q+warYYhl$w?zt=K!#a$ z){ZM_a)4kCw|8Uof{2*wOB5g6ER&2lPx~Zt2u)Y!i$04*l=htOmZ_SfrTxf+@k!)}M3z@(} zT}G>`s;o|C+0l18#teS>u3hs2+{P?UZR*mxQ&@Xom>IGMCq!wFuB)Lm)m6}41V_JO zve@=W(MYXPGovBfv`adM!VM%|zq{Q^ueC9)6a7zAZV?j#cD_GF4Gx+2@2G02l`=4D z-eC>maK-vlxORgFn$7Em+Z{R|4QN!Ae!H|OQ$qBGXl@merBN7Ug6pEFi2rLSeU2;eVsnKd<8mQhg)YvXQ5CWCF_^}_j zP&pUjG5A4neG}jTB7z;Lk8M|y`YLu8l5Z03(QAfo1Fy#4Av*AH;LTO8OSC`sEh~yz z7Ka6{{7%1fJrSm%J@9Qp27mx`%JV&rfg>eG*rPz{1>KeN^PM%c0Nam0i8g=adr;^)KA&IeoduEk#_);~nB;4>T+_+y!kCG@1?N zwOZx6BJk9zU#Wae0v}IL`5DdeJJoAW_Cqh#ES6XdM?aYUd~;9CL$Rp7+CK($q}V7; zT#KoTA~++TGlG14Q%;WT9y31kYNpI(+;Csr;_^eZ+8@KZmM`3!g_4mxftMZrreh^g zWhA}+D${VDZmWU)CAAWRbJ8`)`#b5cfk(+-t|pN-L@B>ew)TN}E40S$Eh?L>F;;{^)2N^bR?lrj)+kyb~7JuU&l584NA7M0?2 zm$U0P00paE#PZOuEpH&0ckrjUYQZfOk?6O!$I9gcG11TWQm9sdv*!Y;G9@C+nJ>~% zU<^F2imDtYqvNAe>IuS3bENZ}y=gR;T}rO^qJ2v!l8?hOlzt+)_=(?{`my%qmgBof z`KBbY4Le^2?ps+T!rXo0e_PnjztSsrNFjcSQ(ySaIY6O*NZcF9e zmJ$4a!C_o-uew(x{emlB9n7~|)}zLJaw-OL(@T?@nJ(Mh+V_IE;aP92hVxZgUNiZ< zRnMxR1vlc(ybwK_r0WkII@?uIZ&VKQ1hb%nU-p^9bwQb$DN>C-4Ksw71X6hWDxcF8Iq|o__2ptZjlS%;Z}}#wlH%xV`+t`KGgrGXgq2~d4}ax%iZr(X zwXZoXkK0EQZJsdpTXJ$=sS%!ft{d$!v2q(r$cQYfbjY_*gN*RrxQW%=Un}!}7EX91|q%*)*od2S{^7jhBWbmIVsRKFY zR3UpRXv=>~Iftc05U5XBI?r*srVqf^PNPW^DtnM1LIP#!9OZ6_+7tgFqhzn|jG;qM zJqpq)C8xbgs0!bvKgY;p8k61Uz@6tH1)Ge%_e%@wh2)pFRl-0jE`=!sB61y9*;4cXS=qlMrI~Yms-vknyL}}i-1eW z?yIb=aBbc95Q80zLaSA(FEzNU3dyc*^^25qCfRl`kA8h*tnvz@sjY)52;X-5T!XrY zHUfOgihYl;QBSIK(#e55C~b{EP7-2Z+)%*2gNqANR4)RI0xHYx`D+rF?HV1Y_=*a{ zmqct!)oD4VIrKO3Qz+M0f2S%6e$5qGIh_1~!D;u|u@_Usr8l6<68;IrX~Yh?1mcvr zIwV37RZAl(WL90N9OatsIj*`O_t}1?IOn?KG~DfXf7-J3!NuDOd>!dY1HsFSpc(lY zJY8dW3x-?7Z;weuSw*a3`jd9=Z9%VV3CS$kY~>kYUF?H)2j88k>}&i{x~F628?kXS z%+Y2q7)WzY7HUS0kXAnhLXPx0<`vJXGnS$@LR|&Ea_Uoy*|i2JofBu2IBp}sux^98 zw`MJ~{H)PFHk>aUdjF!EH<+0zg}(ljLmnR@_XMr%xVbf;m$+?4U~(cI-ryN9DtOFo z%e6M)-_a_!kL+9tf~jIo@MEo%SZ@$d#*^-ZhY{F^Fp(d(N6|bS-+(iRLj;SXtH!8h zPg1YVol(hcq8uJQz*q`Z8wn_&dRAf#|KhumsE=UDq4y%@Un{F>3;Y%jJ3XtZuWVi( z?0t%rpQP=CJ(*8iXL~McxwHS`b)Ib+$6GkuUNN<&^a`G9w715o7^@nev80% zVXcvJH}3Z$etzN|S2(V-!IuOjSFC3AD>tMbgGiPrk);evd73VLRq^c-1%ebo&(zw!3;SV%=P|qwH%VEaW>z zQ7y)d=;%MypddjiAvy8+1UI{ViA)c}$;|*b$_M%xy-NH_V|Dh%(uG?)(c-S}$QZFl zE)hq}8tGS!WI5$%hz-cs!enDmkT^hX0hck$#-+JMUd&@0)u^1_3#3t)>Qx5nvy2H? zs8B9Q<@JF$;CdK zeVU9gGN_G^C=42|KKW4k-rx|~r%jS0C{t=h^-|oQakQdG16jTem|qJ=lf#TerHgMM z(e63#Vh1Y6Y;V1(QFfLFx>i}(xzp_Ni0BHK8lox`==`(DLy zp*(xb9ZYKk)CRxD=ms>25gQ%DU0K=sXD{C4aHz2+smKs#ulELXIR0ier=yLG=5M)c zF}C_l&cun%92xhmY$9c;rcOMfGPFzDV72+FO`%t3_LgL8KkSZnzHVd$1&46SNs5RH zfL=a2N?r|zjCQ%72qE;M%#v|FiPyLo*)sD1>lyOFT_{W2SaDS~Gv6H<`9l$F2T(;H z;3@239->0|Hh-K-@u#Jxi`a!|tEM#D_QK7|(3CY1Az5gGT6LGLBHZj9n^vd44t2lx z4UBI4a7`^9X4#UI_p6@bXugP^_mO9ZKlKRa@@nl2h33O`9CJj>dwaO0+MGaCpPNL+ zgvgX)0uaM6J)vsz&wQ`%vCwQiv_hH zM4m1yEbYN~a5;BzAJA8|i?TjrBrYUJuTH*h^JfF}B`MmVr>_~A64V(x_BSGyksW%r zZw@cI1V!lv27~RNEW`*!1!$&8$8WLMmrH9D{!qCYJCs(!Bf{le4=xYi2E~Yq!wsm6 zV2=j%JG*Cr7<+Mzn?b4?M0ZIr%?JcVWG)Br+Mw!FCNA_XUHmH&gb#<t-c>QQ zLq8rM7k!!z_u%B$zaB0)su`r>=B?8cl`VSoTJWLHQzM#`9d$L%w!5t}G4|Y93|P=L zvXblxPN|{?F@&G^zKWN{NIxeLoi6_<2wYJspgorP?xmgC?I_+nu+k5NtMqvgi?{?P z>XAF{dv31n(zGQW8m%-@GPkG3VG)Hu3FD~F*88-sGUKN^2Qz($@8h$ZoG6Yd#44+0 zKXl7;gRXblxkz(lp$DVB2MeXIj?)7FdVvJEy|9iW=r(9}B>hE@| zVaY)F7JYT6^`$N}c2KR9G9Z{kghu%w+|{+MMe-bkVBg}-il?QQY;4vIYODrGix}}S zEQheF)Aq6%q~=|iJCtG}3b&a#gf-rM@s+)l8?hfI+>MhoB@F^ehTX*w>UUFMcLXam{+@X%wapi8W%eU~uK!4Zc#SW#>_3ka8$ zLt7l!gqoE)Hi?@iIWkZ`WM)lL5Dl5nlCuXO&Am-hZ3NZ&5E6R<$~$8BvVyEi8D07C zsdQ^`We)#DsMrmwcCuXpk$rE7RSAZ{(%Iu# z^QuZ3)R4mL7|MB?8f%g?HGj##u)!y49XfAqjwz5BuHUiItnRCW$kC` z!PqWu7`fT(Q>ah0O3RLZzGCjAQBVuA>NtvU!KBCRSwXff#!X@Y$r8GR28li2MNHSz zMaNUeV#}tv(JhOAAzS$+mbVHuxzc<%ltQ9+Jd3MEJJ5Rq-nai%szo$1#3>h@ah1G=u&Jh=Oi>6#iFI~Nah5uY*CE-XhA9VU)w2J;ruU4Q@= zorB9fXBE;>-bJ2?`F-?zyG!(?cs!GM)c&1P)4%ti$bQz%A8DsE%xMa8?*J;1#ZHY{ z=AUNVX!!WKtN5T69Q)qNBsB0>^FVg~995loBBUjCOY$cc{gbOJjSGAVo0&WskcB&SN08I)B;~Et8PQV$d z-~#D09wv->)N%lL@wM~rlHLXkca&8tF3A*?#Am$Z8dNP%?B~#Jl6EGt=<)d!c9&nw zJsyTtpU?i*#T20JDYX^(h-G+Gkpg}Bza=%*%K>@PGmsJc>9GXx0N(t8l_sV6Cd$2% z-=TAM3DcNQqps$S@iCCK%Z*|ZP%1q`Bx6)dP`{y`saIxpLK@b?+7-d3A0#Rv9+T^#@KZ_7|g46ccIz|xC#u3n+l)N)BZGtrUGOZ zrhTV&(uRW7Xnf!|i${uxkk5%JsN&ygk?t5dk-&$Ql26H6@`)v3u&-JO+TPKSXqwSx z40rTA10)T9KCd-&*>U`Wz?e4sVDVMo<5M>pU#--cV4jEPAq+VNCvYtD6Nav6xevNq&XIy~NhiDl`;=teATUefH2*t_P9DDB!1rkq0#d0p6aJ57@O*srB70Nmp~-fPT2{w>D4J&Xf=kJU=P) z8<|i%GT(41A5DBkaXE*%XQ%Y(W{w3~oR|`xYxNXzJ>L6}DelzCe6h3~(+(cVhC(8m zE{aC8u&n|y5yQcDo8Vw?f@0fe==qWLp96)zOsWJU=A=Q&019?{vM7hpGp&!q35yiJ zfY2RW9Zh4tjm2mwKV$lp1K`=JpZ=uiB!%tmcP^q2W|N~i3TuJ!nE4w0H9{&_(9%}{ zbF}(mM!lYYtWVMi`X;MG>?p4OF;uD+DH?s6e+_>G zQm;9_Vjt9IUq}P{u8O8cx%Ejf8;86;vu`p0`?Wq;O-r6wRaSNVVvbJ)bZq9U#l?%4 z%eg$QSOoF;+z5E}Rv0vt{9(8Q(ed!X1*pI{`e%PnVw-W_Ch8W!W8v>i0D86}MzVW? z{qZmdyC7~ndipD7e$0y$IBeGofM^KLfi97_Tf=ChzcwqQ!K+FU#Hy36at^#sB*goH>v7yJzTQy_d z(tT}d&Jw0W9Oy|om>=;gc8Oyi$+J*s_i1b?Kx4epX}L6FvydYE`qZ)bUVab-Jt?`` zL(#QqR&==&&>?|*rwo5eBWZv+Jkc;gC+C3b8BR$ zov7kF#wg+%%AY>HC&2hOy?j4*tY;&FV>FeAJT{o!IOY7B+!eRJO_j-yCY zsJ$_ov0ATLhPJxR&yVsQWq9h^nH#VeUqO+9OO+?dWb$Qee&a;yGZW4dz0N>ubIhS} zz8Y3C)$mUs=vRJiLQBGX=<>Qlaq|AtGPH;HO7lfZH6vVNIc9&Zr0fTpLt4r{t4FXK+KgUD|P3f-wL1qz1FI1k(<|PC-CT`$4j4Yy(Np_qvEfD~9yc(vN)X z1?06r`8YYGPYZQqRi?yX6jgkcgR$%jv0?jIiu&X`M$1{!pT=wc4J)`Mv3mx40YwR-g~UBlI>#A?bY z-TlJjo*GA*_mx@o8QpFPiXl2lLYT@mB}~)L{BfoKsLD=WoZp(-`Z2{)9sG9%+X=*{D*p%D`OZfE}QlO}B-N(52kBbK_Wh_`Lq?zF!J_uVway`_A0(+ zsuHM^kOp4W(D#SZ-e@iKl}yniJwl_3Qa9_=u0f(b%~^FO)bvEu_sTD@VV_N1`{Cym zG6Nu7&PVLlsFwP3+$)zBEEmt^fP~SD7%$N3{j_ng#Bn^(mPE4q(=2EVRKw~Np6uml zKgQvKBmcx98u9$!y8-@R4|0J5c#J2ws9`_fFS%{KIcXeGaZx!r;tz5-ODFO{2^wVb z%?c?(^FYTD?2!G*f!@cN!PQ<$$iu&`zG~wHqFy!6tZ-qGq;OYD)lTFB| z$gespiIB(f`$W_SZ)Xa9)Sk?QFnv&Mp* z{6uRBnc&S0)F@5`Ony4;9{rdxEWRH?-ZyEt58y~>pkUE&loU+)0JbW{_-fS}5EI*k zqAp)7)6MabBK8m6fiQ+~s6f}g<12ERuI7W*q#Cn)IQPLLJyk5S%0!}2y0kPAFYe@M z2Cwp$A_ru$iDU}%|A())3aYE?wzWfmU;#o1?vUUXG`PD4hv4q+vIri6TX1)GXJJ7S z+!yZdzHt7@`+mDl?Z3`HV7mj=qB_@RDwKkU-v)R_7Qb+XH-W`VBL7^$3RKSFi($Xu$vO# z_fDf7vP<_p80&7tvy1+6ujEp-58G5djvzM-6L4QT&NXjWeV=d5N2%B} zit>%pXn3;Fl4&`%((E4`Y8DJynu2l}No}!(XNR__%EqZ9hk>~kgqAzS z9M_uHwR`h;3wqRM;bMjHQ3V7QR!0_-=F}F+!DmLggHh*M3-s>?zVN?;1@ay_UC;mB z1vy6X5_l2Ul}wFY{S{|Hr~&sXyV!TpzI#3VFp1CS_0sqgHbl&WNDASsWXOhla*Dwd zBEDW|TiqEIr1lPBu*bhBHI$>#t=x-=a#L)lHJH4&E*XWIs-wqaum#PednS+G-`w%u zhV!1^4~={Rh)ZOT@<03Y-|b?CS@QrRpS@am?gncIysn;QAOSjKWOX681_f%umKfmKAy^?wIR$qV+~B@h#0Nt#*3fus|G;C{3;p8 zx{%fkuNs5c8HIc@2!Ec>t={d3r{v=Ydp~#uPS$hz1ZwI>!|Jvyb8SKT=Lhz$(~tdb zrvg&~2}lyjueI2wLnZ5$1Q9L6@{Oi^knb$(tZ?+4g_yJWn^ZW@Fh6E44q;45Q!oAf zp>a(irVJj7A&A)t;aOIg+mmvhkU~9_I6W%+{kp_&W8i%6;Q~J>jq(QvP%H5(KBk35>-Euo9_W?81qE*H_9h1PjFk-&IXhK!Y;ac&dO{q zaD6Y=Uy%Ld=V_19b8eHHclc8sjn2OlN%BGm0$)Fo>J1X>hyf|X*V?5|5mVbVym173 zQxSPc@GNz(q|#jI)bBD}=V{m^shbRIb1xK=n`XB9^Nud99I zZtY28A@eP2TDPnoYa^!)#R6|&K=Y3rT8Y8*EE#Y6<@x&OZDYw~yV9w@vA z*wU*Irgf?hGEna}2)Tkl{hkFdk9E>l3x5vK-~?b2Yh~Oy$v4Gx9i*}b*(U(5qJ;S5 zyCXug`)Gfr#(pL<0^a^OSVjUr=0l;*pU^4)w)B&iNQfhfv_)6WR)cZ^E4F1irA>A8 z*^m>H5-{Y?lh*k0Di5m;Qt`$PRNt?Wj zW@vHq`7!e4kqvTAXmj;?U;6@^{>6{DRcr8>tH4dY{I9AB+)n~Z4fjvNe`1Ai8S(it zOh+_7w%RoRm2IH#wcT`tyOfR`OAZS5Saoe|aQM_F>f{4&iU51249WcN%nqog3VHfw zYjD48En`p7^9KOcy~KANX=D|HHT7FuYN70YUU**US$An=hm?;|$Qzo(0D5j4$K)V` z^;21zHMxBG3af9j^YR^icAeXTtXGl3r4*vcMY3Km#RGq@iaw$KSln@gEWFa z$ziN)n{=m0ZG=6~Exu4QnGoeK5`w?irCq!Ke~m&wqXkxG*1jXGcNt^>hJTK!0HR0cq|6rk_Sa*ak~Tk+ zit_>m-x4QXy2W$-8Y%cAuP-yPKtfWvYu)_69qHAqe;7RKM95Noco7za_fTKDDI{@} zTAk9guNsh9nsi(enSLOcbP3AZaO)N&kv%M5sroe5d@k^Vb3^)kZ_#OK7<>Q}Ed;>U zKAB4J6E-R?uXgiy!yH;=q`Gx<#F*4fP$aionCcBFp#E06uRKbb`fEXTUr+&*C2l8A zg6}#rs)wWph6HF~)E1e^B*o}n>K1WN19cdyp7gUrbCE`CpAn@u^UYR`J8a5v8^wB= z#}$F1UP6fbLp8re9fYw*Tv^G+xUjwfAf)H90aYu}R`%}50$ir6H) zy&?G6zq#e|qB%!zGFlvsCMtq1be;D?Fgd&Z7vW>O0m)xJVuWiUq0Foh3plH|5$Vig z@AwK9G~aR~{94LNHTaUGZy=mDVFEL-M(Sg_{eccV@W=4*J)<@zE^yl>Wy zO@`k1&z3cZQPa8AIZRa90_0|sYNRpejDbwiAto;0_fFLZ*?U+m0 zr>)0bgpgw^;(aO#5Xhh(*6NPdkp6L?{t5lmoaH)Jd^aw)L>@B3k5UB`8F+QjC8yT< z>RiC8GBGOfER0EOIyQTzY^=+`d8w?3*A9?c;z7dAUzIPk)_x&inq1Qu4*5Luxdj?~ zz-h3YvBtmeHTbXRRbTucE2|wh9wU<0o-xoz;x>TKI8X^==~Y2R;Y0lbpdzAa4!~8) z?p&h0_P+HL`K&#A3}aH@vm{18sMmJ?TMQ{hAkUuF4$mneO|OD1bH~FzM+%Eg1)zrO z6SqBaYFdY>trtq&x}7+tC`%8FVFE%!|upBZ0Y zBK;Z|1ET5G{z4W;YwKJ7Sur*e%H?K-j*+Ri1N(n8vRL%%{;3i=RV3QjEFj$;oGq8X zLkSGh1JUAca>{jgKNMvJbJpj?S7lkV*s^tXUK{xbk!+(bN$R=k{yd*7_G*+Dl9u8& zghvEz^lvKHQbyF@>|!6=yvE1AU5b1u1s2fWtQS@YAerRG9i|^hMv_YV)p>W_?!1AI z>bb!SodN(pSLWfQ9kL%j;?6k${et;rQ5mhVMxE0qlPxx}@QySIZ|%3JL=%~4JP*wI zcpl-uxBn#*iM?QPu;2$;Qprny((lWzRGtx>|NfaN?|cOTL_px$SNj6!u=rn^l2$V| z@sw|bpGCh)eAAikrsZkjPS0eG^@MtVEMgMgq%@MSp!9aUl$PzOH8sYesdg~6(zf7t zIkb2qY(90mNL9@8{71%+?(HL}m$H)F02CE_#Wws%zsv}F5qCq#W5ok5v% z<1@f%8!ZIh!?%a5cTwxzL(=crmN_}1C_m=YagvkK&zAyoqTQn0$hutQTkXkzj@nWm zhd}%s5Sw>ATmSxlTDAWj>b8$2f0>SjL57_XAq_U#!Bmnk3NGilipci zdU&N7roYO+<*I3Hc3Ozk%{)ku%zOdx-pq&J{-SY@+X-rJ2Pihq`^8i;;CnH`XmLhD z7g~129hKp+X&qH9P_qb4J}FiGdsZTGP=4u8s%YG>-j)CKUjdQs784c4r)&<8d~{)P z16_O_?{NWBjU)GGK#i-j{cWnAAV~#yEe>2G8$7eUpt_;Nwxh>wX8u97Iu3mXj;J~F zaaNp)nX_>DZgrK9cW|k-60_3^6@`G4cc5z4ot56;6?>6y&IJ0O!WVId@9wSI9o}d9 z$TwFf;Q1VeTsMi4s}1W?Lbb;-E@|aPYSyWVZH721SW`G?qO=$ zEPC2ZmPkguzO5}b{HS1vOBs;;h>qaiJa0&E(htey5-)L209G&qu;^;5{#q`ZFm>o) zB)@-KA`5wjJa5+$d2j`NvhATrGC*ZIlxu3;L)7F3C{7!}atg|gx&iS>FLa35pmVA_ zL>*6Vpl>-W8VW?`WCYMT*^*o-DeU098rx1)|!Y>srQ^=c} zg@DX0ejsOVd>h8pQ8Vw1^%eIB2Qd|*<5ge!lUEjm^C6=4P7+VeT^$SHYux6``7b9_ z*hTm56sBI>4@G`Utc0x}?Y`LC2S>n+;JO*5!PU5@b3ZpGzdydg*8A5L)0clKIE!S2 zQvNi0(DWygS!=t#n*qJW9&=9cq9xy@edNxR#T)6$PjN*_%Ve~g=ev(C-X zPl2_6VO*A_luf_x%_5h(6J!DbUD~Rr!HCLhmVHZE%rylNo0cxpGB*aK+1Rx^( zN%9tR^d2yu6kBV<^R;`Xvqb)jP9)`iYBCCVND2r0zrNjFec-Rnttj`w&QJh4DBM*q zUm$Ded@yWN1URc~c7XMdUIMirf+2RZN_*Q6n38O6%ab}1lWl5ETSA;_ecOB*Di(mp zhSaeRjq1MLmfpO^0oFxJVwZD?*<>ed^uIm0ort1z|4di#FeeH=YG6_65Nc?69bGk= zcdo7lO7-pY9`<4^7+0~{EQ#tC!Ex%e>3;C$f7(?3N%G4y+1w3Snx)gTE)K|fa=>V~5G?t%Vro{534GtRYUGSW{p4kxBZgOtUrS?wW+whsriGDZb-;fNqyp z=snl%W?a#6cF4qy)MOCyCa(oE=DA$mdx6r@2Wh`BM~Nw}=%0PawfPs=j84Ngu6_1D z_lU|QLj+%x1lCKyf2k4H2+*ml3$tqDvP^`rQGfPGAWT)O7_b0<$EaFEA3G?*heYOy zY0-?Rzv`jLC?0;MhX1KVU#-1oD*cYzE!MP`TA~W<@60%UhjLjXre)TUTdwO(CrLf$ z@u|*z57|tu{6LZxl%l`(F^@EqW0f*gYK1m+qk$ZkPN?=YVs@ywPjjvi44~;YHGMQRM#N;EYStn3JfEe{9 z0H6mXvq*N1dnNx^tMFz|yazdvEqmab9Hu}_<>P%vK7I9P2LSAmKS#)0#q_BUT4)r- z*Eipkr?lM$gy_swK=|pd`!s62xDts-v0A-Tb3$j_)2d67ISsK%Hp@KJHLnnWt5D=s zc7yk-Y)ubm9Reon23(_pF$pjJm;!4$C<*N!ZCjrTg0o5ELu$E2s{YbmH@cx2 zBke36*?T!2;-RH*n!ovgfxwI0;8ydv6v#5{^nS@HGA19+Xq*Lk?=?FWA_U@}l?PV$ zu7!}u-?uN@`TP!N%B&y?TjeXBCyA?0WE0`%;_etdG`Dif%zr5(2E=z1sj@+I1YzCi zcPlOmHaQtSA+thxJ6x=kV@Hik!X?crnpx(l5adSQECG^F9A-mF^@7(;A4>eK^^E~o zqG-VLf1iG2NxY;P!<_-$Aet07;mi`edP!9>@`EVI1QsDTO}N=(%ESm3W0_4}I`PjcoYfOU^=K*7<@Z<~8 zvddr6R5-lPa1IFgOR6`tg^zIU9FoG55?NdbQi0SYZl@DY%-hk=&1{8=m0Zw0PVOWT z20oa+JIHhw9?QKEBSidpV~0TGhqjzX=~k_uOa85B;%stBD!$i{cb{&)X`eqW5TlM{ zT;-p4*hr`O^GiLe4$4C}G_EG)9Zf0K&FTOG=_)GGLJ^`16+A$B`*6CUprXO1cI(aI zS9~?iz!&q``O9iM686R*a?yqebi%;pAVrks3O1;Gi%#uZk6kgs+80`dB!}ZCd&L#& z`{yHITX%~F?+6`zZMrv)k@u^5)+vOgS;A0pCm*qe?L^0~FBR3J6hZcgj#`=#hHEzs zwU@wST5N1By{ySuowY-<3HHrPe4~Yk5m17=JBV`pZjjdB?^7T;*TCAa?5hCWoIJbs z+g~*oGWnD-5^uwDn_ztYDIasj>#-UfIroddkt{b1yQt7E3rs+W{%ykko*%07r#l=V zens$Kr^-O`>6yZFV!-m0{kc;#i$&c z7d0R&^&9N=VB%07L1hP}WHAX;77f5(X7&6(l&%{Dpip=U5Ze-n3tE#N{VCA$#j<;- zHC>h7KZ-B%|luJ!lmV}1*U&IW2E8%e8#7=zix5o2L&Ikj5Je675nBME}cHGl76*7o{f-v zR}Ak#;KaEO_wb#N!)E|jc)bLSf+XDKl6tW zk{%hRCho_(nD#wG>Oc5=tu9I>m$M#W4W2N{Wk!MPr#(OMT(JX2IOdAw)47G9-40_0 zeod-yFid-f6qAP@GV~WoCH9nX%Hd)C@@e~)BkEWk14PogM&dgY^$=~FvY`*dSJTFX z7M3+Ez#y^ZsYmKs9qgOVb9W8oM}ksTzbRGP!cE%u*-Z32xrPHxSvln_6^9VFX?K!w z-&D18K51V!J z_Na_5f%KTY3OsmigFS1>mlJfA3&03W@pT{N@KzL`LGiA7^DQ)p;7nPaA43cXnYf(H zk6)!K|EkEeB{ekySmd7VJLX{OA__-=LB9EhlO9u&y7W%&DcBVu3B;h0yDJ6K=_aI& z%ohS@tg=%CM|8?|zOO8LwFmc%601tkcm%lQ{(fl0%;vb(`RMJBlw8pzKEO0<{UzRP z@+5XVDEU`VHbvwYVZ1~i3jCdyVzlGwjzBJnEnuKebv6pm*;R`U={(B;K(f6dZ6E%x z#>DTiz?tKI)s-(-7PH9W>KFzaw^Z5wM2SksE6j!G1E7kE0P0`y2-$MzE8oW}63?w^ z&Uh3GcSO#A3f6xdidF+tJg2jdPa$j{Kg%`Wps-gsnB2awuvhu=V8aekaF}rRh35uN zn>GJV74A@cOckA3Q~&lOQv-Lt$%4-1h+Q!h(^gQZ40xCDl5MABIM9vPsj9{j^orD8 zXxBKp_QY)@@%=CAuKU-1)t~XCswJ}YcVdK!Fej!H9}G^PC=qL(0DQI{UJw#Yk#-XK zA>eEkr>vo)Ksy_CTY1(mE@9ND)5aIc5>A>57>)eTQ_3jtBXOLU*jACD}`qH*7~C zKh>^9S8C!rNm1?yz%u!X0$C$wPW+~wBm~i!98e~}1y1i~`piFK0gQtwFeUG9oB%jR z@0Gs7{9W*QH1TCKn&JW6Wm7F{Rudr zf&ZS|W$!cTu7k>^HZ_)ic)d{gMS6%0{?i|dgRgtI%~I}xXDkh8z(GFm?orqwnAAH! z)YScvS>#0Of+i)aO8J((kE#9UXMt=c-PMNpgXpWJ*>RLTjTAp-qOWNGz#h%~+C2G~ z)fKhkZ65e=_Z&~yp9`LZbXTtXTmeMjsod2Izn9He@ol=dBP_)F6>B<6VZF|nFG9n@!3uj`*mK0oNFI?4zPvD z29N)Hq59{=g9nGhspQwwI`x+UO*Vcwy<;dP{1?3K3#2}YSDgBT4%UQ!LAfoY`CA#i z=OF4p?C)T-_}Fddup3f+*$fTC zPKXSQDQcPXz^>B<^*XlHD~`lW!XpCJ0~61`*VUK>V>jn}eo=Z`z6Q3y4(jvhi6Jel z(e<1LOo5L7gLqDlvekjaxBOsZW#26N_Q8>&?nO)BYXKL{3)p^H;~PLL!o|YM`!)o#yuott!ZJf$#LKNy=LH)c#8ba+fgX86c>IQ;KS4vmHW|#^hL39|NSlyE5yq0T+%EY2p$7C ze#kirdfm7K9)U6`_I+l05M+e z!0&U?&JHSE_PoeI@G~<2xxvGWkqRiahjE2tZDwK} zGgIPcmajvji>86ee`iZyDp<4fVV_?BkaT`zjVQ%CQQ!L=Q(ubh{~T~;ywWC3mYTu* zh2+po+uc*X|Hh8hhw}$cw;3~~IEqxPzG|9E*urU`SWtKxxe|j`U2A@XBFz9Bi-gLj z@$X>(Emo=f)LCYjva|V|Y3iuz3t)zDlT_S*&(zXLE`jWt~o*<+yRFq>yj(8gU`}D&9$YzNZ zD=c4G&=A}NCPP@ZV!qC1O3?pA=BajMeJt)*Z(VbhvPjv{>f z&a(>8;}PLsz=)a_zjr?hIU;Y}s3j3H(&L+9M5?Y)DG)NKFr36kigkaIE1F>QxV9fI zESW%ahF}%};ZEtx=0L1(9-XLX&8~BzT=_%_uRud0 z$Egj`;tGzO@hG<+gq?qrZppo^)|aq3*}N0n&>S9`>3X!q`rk95rZ0Xn|2P5*<#nC? zyq~af#!MY85rznqG*Yi9g68Cs;*ed&@$p#ZbojmQ;97DY8HD1VD8q=elQbTTPs{+p zni~72QS$Re+)XIF0X_G?5bMDEe&NmetJEU(@D%q~!d<(-(udtN0N2yhVgTc^Rl*Ly zzY+5VmJCfl{y_w0wgbd(!W<7W=4AmH^w(0u;v#mr+wnYuJ-+;x!N|ojV}B1Vrp@Yo za!|MPH|meA_A9UhmGY_;+*03bpr-3@f-;8E3_SsDKqVD zU~T426c=U?;!fR>0Hv_#(-~E!mZb+XN)flIJduRk(l}#Lc`)rfKs_rGj8<3ar}i^~ zRuJ(`z1q3gt@MD z8~N=YwEKAM%HWMrp-kZWM*x30F=v)36++^gMC2m5 z1H9We!DGqMSITEj0JPDc&^dsr8$}SEGjmq~2!DzNghZ-bQz>4jYJHUk?4D~}zCWJp zAuBa~DK{K3MBKuzO&c7VDfS{O^^R9s`;}yFOQ3~e>6}nnTc+dAKM{3kCS8C6Jgy`( zn-~F64Rq|ezrIdqvzM%ipAEg<1@^RL0ZURWTwRVrS)=tfIO8qSi=m3`3WpDO2?{*? z_k>8o@kEV(HD7K8)kF`c&R&v~+KOpxh@b$V67Q36kT?1KzTTY=RZ@N*zJvRE5`9OY zJp$-6>Fx#0wAI?-zMeI0MCQIUb|q|)sCYLc6T=pwEsH#vWQ$hOMkL?P;CerW$BV0K zM;rJk9NijMP9p$dI)SOaBiM~lI>wH8tsg7B!k-^{Fe4Lf`|2M{M}VAx82`V;j4&DK zHEY~SEP!FP8RjbCpb$NH@usc)j^Zx}G}s&9K*D8A(h)kcwZWWl4+_l`jDFdD@qY;u zy!>+LpIU|v3%EY2dwD-a%(jVW-;&-gI&ajI$wklMb$16v{wZRCM)Jz1Hh*_KpGI~j zZE|s6B`!ved}?e?fRgaRWD-bGLuGSb zH`9sH8x9O%&xdZA6e=#stZ?v2uv zH%Ixpa@@W#6Rqlc)a1v6{TcyXu0Gv=Bmm($zvt#j$@=9((ct>jEfK_`08#{5BP2;= zFc(BKT6kw4sdzLbL;-W^a%ZO}n6<{TFCke^Mf-_q2Et%*p3H9=YPV=6XWye&TKg#s zY^Y0TCU~w?(mb_l7_JKz!U*h%dFTA0^Xro7;nBir)4N8jHkQ259K>);xnYC-VeC z_a-BPsnBxns4_?ggC|gpx<7AFP6FBGcuHGrf0 z&qMbxFbka5AkzE-xQqq^uVW-;luab|jsZ;sN%ZHtEm;Ydav6aS>r~6V1a`KKeV0`Q zYg=O&0vL(5{GCce^f*70g=y}`L=N1g_>DVejs31moUMb{(B7>9LRHP=ffBQx{=F&sQB5|zT)L0tf?t(hZJGW)fLZXyLjiX&$`d6B!YVJou$vJIJMdwspX!sD=DunPvn{2L3(C0;$(|q{ z1!e*qhGvS!kMI6VV|j0j_Y;UHEp`PBON>Y&ks7_NZ=@UE|ut$IXteq)M8%#TO6T!77>VhJ4 zeK#1!)m)MA6O;8FPL_^>495{=EJhj1JH1GQglyb_m0x}%s!s<$ut_^Mbj*x#RFOhZ z-gQ#`3lu6K#DRxD?;`s*w?ld4V&TByvbWe(d)`2D%l4NgP0jFL4 zhnpo?u>;9|Q22sz@AM|YkQ}=nwpo$w4P*DLH6RxjQfCchl9#CqGAOuaEi2%x1kHKu zqXyU5a|;*KhyNRoZNqgtbT>mniqA^Zq=8PL8$- z1_b8tBc9d#E?$gi((~S92j(mVi%AA?VwDy?`7G#w>9ocSsejn!n2w(@I2dhD0oZTi z{6z{kgdNXI6#~U$(Mi3h2NDFdXP8aNKtrjzDt%{u=zSe2r^v|sy-0(<;)hm)`LQ%y zcODeb1-l8)v`vT)2>^+L8(tzh?1j|Fsp}hI^>kS0(DxAts&hSbZSqWUWuH=ejRJXA zjYaKd%kqiMq$Bjr2KTe&rFdM5e{Ut|%|Bl@2SKojq+yEEQK33?giR@_hV`ra9RLt7 z3ld5Y{+Vuu3EbO8z}IMjadl`v!Yca*Xy@N0$0{A2g4z&PpAIjWUN}7QaO!yoEOHJ8 zl=aPP^R!pXBViZe*RSGEfRPi!bRl3SwTmG%p&U;)kgKTfWn{}6)yf7= z9f^LTQZ*NQ@$YTEq_VIF*l3F?>==c%Lc}0NMg;VI8PuR+gC)O)Hw7zMuO9$$kq&J1 z)m7R937EPo%|-h4oZny`Xb*Rj!$e;_3wS|cwne8K93C7Jbv#dg5_wh5dO;w%dR}7H z-~EezNUG)`{rCw_H#=y?R0)PIV<~=!ULIsTJa)_I3Y|NQld9qQJHq(f%qYpi%H_1p z$rWe4J;H@gI(*q=fhfk5w{P_fR6eog4YTy8ZO|RP$u^_gZkcD97QMTtY!TW0Tdv7h zH5vDMp116ptDXWACb#v6;}P4%B|ifuU3bw9HIDFfpic!Al)%5{d4c}5Sg zsHn$E?z8!DbwL(8bt3cl+LI)m=1n4n2*-F72qaB<%}!n|*q$&~$WXjGmxC$r`L5NS z8tMVU+p~%*B;=`s^DKQ$Rgi49iMs1gN3ZE?j2XofEwz&DQr6oxtP>(NzDW;(@liH> z#U&@G&LH@B<6vV20)^x2(DrcY$XnynQGX_G9)J8J8A7PQ5p>o(Ov@wPrEukW&?}r760GisnJXyFR0`J;Z|bI{>3-$E{qO(BCybC z{fX~ss6gprc`%Gb1nx%ym&L5b2WR3y&C#196V`WF9qGnUWt)zgE`_Cx>(`K_$07i` z&oA9@i+nMO1S+`q`Hq&jyzsay;~V(?GPkHl+Wc0zP%g1Q!(E(X!PtmvECF-MsVLHM zqHsk{%4=EfdgJS(cNRtY$NR3F85s%5aT1@hC4=D}%RXYW3m1|FNkq3Pc536o%*r~q zP5{>(Tvb0MmUll#r|z3My$vl>*-0*e6S%q<0+118iA}36iYM2ej0fJ!$ylCa5&KXr zgEr!a6IN%@1iSV}ev90(6Ng~5c3sywkB~evr{}+Sc)Np@?G6G-B-l4Zp%s}U_ih@Q z5|4KdVXs~zl79bRKSVXZuNFEuD+S$#7pf$gBw$R{#^?zah|J!O)&jYa9jXjZ4rb@1 zvK@I;85fr|p2r;4Ip}Wc%H=7ard>qsQ$uXDrrNTM%WJ0>kq% z0x{CtFnCm$Vm+08_!wXnP67(C1)`8wHr{G59a>6VF`V4kjDB#NJC zcCye`L?k3P7(LdeagK#qF(HCk7+3F(l6wz+Y2f;%*kljBqSW)sO~eYEpy7OYcEtF$ zkT3pIYf;(#RSb&e%4%??G>5iv8M7Nz`l=bX9-&nfSj4W4e@{&B$VX)q^{yo>vzr?Y z|9WOkCcyJ?>c~<G=t6`ZcSX#g*iZl-@OthT3M*P{=zqO zCH!mlK(Pgzir6>)9%JBk`GP@x2_4mM(es|q@%cvLkv6r_Gh1iH%-_>Wembt<4fygQ zO+IV6O!Skfcr&=*f@^cz0JiPBHa8z#KJv%i``PqHv^YN36Jy;n2uaTe94vK2{Sehr z=M{yzjK}-S4@U+Znz-6}R(H)x0xJkXoR^gkp2Gyg;f>ooN^<&bBeI&G?7(aq7v)=bNLRV2{< zt%UE5eayX(O^Y%&aMJ0PPGH%_rtVV)!5uRG!LksR#|}AN?^~CXiwKZHw0z|(y1ct~ z#W9~^7;_;%_22FRtt=2WgoJ_8N`20IDV0a%Z}L9D6kTOQ>o|q2 zeb69cKC5qbi?niV-wM5!@DiIv|5SHLw-FiIMHMZ{|Gu zj|S3La__)uB_wQx_ZxJN{HhW?NO!4n!pGdUh!8jaPI{F*N+@(!PChvv})#)8VvToRK-gSmg@`Of%QvqFuId2 zQBJcm(47hX275sS{|@K&4^ZPQzD>E5aXxa95eb8bhhG+p*qbzS=NkiW1%Dbe9{6}u zj5Dh1d!fU%bvfdfI;MYs)K5f1q^*FBx|kTwYB~}NY_$RYgInxw4y#nnE>rq;Newr` zlyY|>6)g=NkW!@w?}K(wOxc!Q-P&EOUSVY8&v~Cs&$HHzV}!P4$+Df^(aG%fC$EXm zQcO)Z2c{I^(Gg*C0#OM`!0KSl|7Wv|NW`tHF& zi><=`hXo*T^#u-CutX zW|g1r4TRh7z!i906v|ThXGn{KO*k}sbxvMy2t!2jk2f$C_;7G7uLma}n+%*Q)Zgst zJ@iSnA&@Zqz2E_7A3i&aRi)nso(2*D`C5J2IF@5%N1!K@9Nznf!DVD_)MH+&Q0zCa9 zgj4_YHGey@v^rIeN)C>w*qx(dZmQ&Kzjg!*P#t8mSRRq5FE?H`-Ngkn*K<}xa4ZV$ ze<-{2&KB?-hi3O@ddtUNm6WbI5nZ$MXQdYhHIxSED7FFC$K~68U%i`^gOfj}F^#vX zvYx55%)0`gr-RIN6b8A6!7B&N+;hE#9lYS_(R>^2%6ykaGZwx^6Yesr_=Qs9{wmK0 z6*?c^lN0}FOk$7{NlIKh&faO$er1JBB4a{FfqY_=z->3mbH+M<2W9YCcbOUG3Duq3 zl%`n*`GPH2?2y%^`rhv^=gJc)Cb5Uo#H}{&Wp1^5Ruz9^Z(=bt7yDsq6fVHFO_Mn3 zRKrkpxc|<>ar4w~V*F9(1Dgd8!67J({kT2EUoDJb(luQ^i|snl_c-tF;g`I94-;!T ziT!>HKI5YA3ZD@l=fO(v9lrO&U$cC<)EU#te1npZMWR-3u{7~$GqHGoFX>yy<~G8c{Lug5upD|-UJEZe$@A?+U{oC4~fQcPjZDT0!N#msT5qv znH}Q`m&`Kxg!If3HzfvjFU#qU>8q0{V_)d)!szJW#e7+Y?`3}h!?y;>y8yyxuCvBu z(DlIfidp?FAKkP^K}nYaU%{Q*-z{}roEx{lT*nvvs}Wkykf#f$#h z&K3$CC0MpD9|?Sz=p1%(W{%rTAHn6mB)j(u0EBiU!c-|cIuP$JH#%-{g)`NzA(YK& zC9d2a=BfBDdBTMwi$c|eCI8}yXnSW|cE7LX|6lEpeiw8>eD}aMrR;-b&70p%U-}l6 zJl$;^%QRXVMRMleG^m?3DZ3->_#k)ysfCnf27Wx!f$zB&N}s<59(a3_C^Su5mLhek zoLQ!K3~ejW(}EdBCb&223%s|heAhkc0~t#NHK&%SXBlr5@H}hdv>Z6x_8#X5dy14^ z6ET0>gZBjA|F$sve1IAU73v+dX?;B&9Bs)Y014Pbw`rgLDE~39(7l3RH2t#F+(Oxw$c9r!w| zygO-wW^21kjpnUX_CRRjR?RBf(Pzz0mP~+cqJ}D7ZJSs!;OLZ8&U;L@bp2o`tr~)&e^Pg zkG*Y1@%VL~!Zt9PuHrDabjMnH=aJ`dG?#0EizD!V)%O3C`YOUef$wJ;#1S$5+Dy|V zylrcwldR{Y#V3%^qlc`T2^p3%db``1vCQIxL;b3$t~+{k@;QUd2jT6NXOWDPld2ZO z{5y&O5v|0%Ac%CP)8!R@{RoNTv@OHu{JzBVd*^yyxfD_ooZt zSbfXs7l@`32W;15p)#R+SW``+-m1&R_~pd879s5BD8Gf2q{9{$+Uj;duEWU7W75Rp z@C^l&+oB{XSAl*vZ`WB=2a4CSdvBS~}blK|RQ#Y3)8r!2md|PdMq{)>utqr@^|xc8)vr zKBEKIdEgos@a5|{pw*2HRsxQ9tpm(AFNBl%N3GlQQaid{JNUWoio##e=6k;%XqsvA zcH0Sa7^oa?NhU@K3fS8vu8mEC9-D_(k_$K78^PRDuDXAVSm4x88C5LoO`cGC^wkd=$AL1rihP;uMDcyosX>)n z&wM%Ax5j+Pz)r}85R$gv2al|CI}}Gm-bwck&WfG9?QP7*g}_^Xy)(|dFNgPkdFvbC z5JC7g|5LX10os8SzWx|WVdJ8AfOT_kf!~&Ci(<#rWVX{t$LY^JFwLih29U?o@;%8Q z{BceI`=V2U%Qy;tNV8riy3_AKEu17);FY+(3X?uslZRHb#9?(`436tnb^vaD-O)^Z zd}OByvZV^}z9Y_^cK^dxTYb58AO`TtlZLMr{yGD8=y@0fT0>h(X7=6)xaioR;48NI zKIGmpZ~R4;GT!*ID^_rZ&bOwM^`|M8CL@b)0F`)l0i}e~dX}_zevfGeB@D`UBiSe7 zliv~K3z@2F)ny*b{;n)n%=o9!6GS=Ks_WQ};*+6lo75zmQHj%e;sjFy!x{N!(*m!2 zVHm!!Mb}ZNIXkcX^`&80#(~#WGno|`=h5)V^|#;wc^=!;b8Rgh8*+jj+p7`R(+Tc^ zvx}FjAqWOmGCIz9B=*`hbo-z>rn%fn`ThPbQLeLF^~2|GWfQ)H_D~VaY!}{^=-4MW zk|0S4j)>c95sbMh&N<{b8)vINTFS7*+_)!y$EEt)dp^L+L|`z#mBE*f?>TeBlQNtn za#9a_>pCR@Wpf76Vuo`71wxBobSqMZAZ_0V_D%%vag7-_Ffa!u32ZQ z_NnC#Q6tKOkjSEP&4f|=FOvGz!Wk)%8$Rvb>xjnX$q*^3qoDhWY6?-3hDJNT{M9W(`l_UN^BuSM#Ss*o4U8rmv#)Z0R~_s@xYYfqeZ4{( ziyKJ9PnB0hCWtJ*08im@)KkRuIh~z5cscmYM=~ddf96Ec*4bCRy!Kjac|8;KG+p1^ ze!kY9saYEKgm@s&X*fvS>Lz`VmLMPHeFxo>)TEH80p)?r*u@uZxX%rh_CwG=+EA5$W=rKAtUjtSLi#gi zz~W2&^gHE$D$fC&)^>zU&w(=~4aI2ZrQ*qg!RR%kwv)`*iVy4l&dVLOXC7=lI$6~J z|0eI9AgKzBf94xAZk*Ea!h}?lgzCR)t)Ea8Gkp4J9EVkPSX(r7Z3AG?>vl%0Jkeu! zblpGgY0cQ5wA5Ue-0otINg7Y$TZ%3;ZQZM`&r+p51PFkOp$#|2)L1&dQD|5MzP@xFf!9%<&(jhT z`RR4kTxwp~8r2MOT8{^+uMWWq%TeV}vQTS6ge3{^&DDd3Yo=G42>GhTsv`PD zJSq>12%RY=K5&3!kcYJDQYZzzRZ1{nfQ;>rb@^-wgb?B4ehBlmEkQXyJ5bvKWmQw#c?0@a~M9l6T?IvHt?{(`k+E)#!$(>@n z)ALQhiL_iUEACj>F4wIBmRsWFyR8twGU=u&$g?>8ASk(pHNN^q;3N)T z(YjdWA8562m%dh4*YPN}ZoVxrRpfT?d3I>vZxkaF-8jkhZM9z`KjC}$wcziivS^pd zr!82U3As;yUd&o>9vo&<#P1mXpRY+F^h)AmV+0s-ute{m$sGlJPE2z%v=X)9b7GI& zN{&K+H{GU?K)+J{NV9j8xMjZ%&aL{PP2gg4;R<^PYp_YP|^+ulGG2P_C!Mo5@PqGJuMJbg2R9HS``LqI3vS5^4bHgwT@!A#gvO@tm3S(=%u8 z{p&u@{1?OHm;J51*1Ouf*QTuoO+I1UvKy(6GWEtfifLsu1clU*3c4V^JvcvRBHp=y zBzoj&TJ~&9@b0lvVCHQhf$A3c01%WZZ@=%rki_ zH{IM&d;VfRMIx7ZW$n5(xjWaX`4O(|kv4QicDw69`r4*dbBMHkyvvP&4XF)EL$ON5 z*X9K^JZLQ4$#P`+F%^<0w>9ZnRXQZ*i;NF47Ov8DtGNt5Cz)rCQL;YI9Mh zv;%8+_vTGb!V~ScnM-}3o%LL{t3|M555^CxSe}J6?{uRGig@}(DfiWgyeRK-u9Hku z(-G*SSy@Tm(VVBU86#4*g$p&5#VOdwV^icDoh`o?5o)raxq^`eh<(o^eO|cw?2HkU z7TGf;xwSU%BEvwjwmLjh?AiK+PUNhMP0ZTls{*zR&K)eKTU~~fXY{3W>856eZ}&#=Z#m2f(R$i@x*iHB~Oy zdbS3n552rCisFXOjb7MUZrQ5KlWjE_Ce5dT`g^!|-Ss?ZLt+t_35dzgScAG|NQQGD zL1i778UDB~@K;(j#kiriFG*~v%IW!0b{UcO$C}aUaB~wP>~=D$h~Ylu?1zmSql&PW znXbX@q^NnBla@doXcU-NPsCRgFmE&8@f$D}CK@d+g##g8ItmtY)+mb`pu0)Kh$)j{aXK zmdp;E`)1#W^?vCnGXglVFoBJ#aRx#QkP|4Pn=6uH>^bH-al*BD80COdWv@lUiMdaH z*B0qEbPVk+u{VXce0Ceu@rnhzP;z>y_z237O>*jED%hB9r@gIhEh$yuHU8>`WmOZC zV!{n=@r~@MS>!B;wxGHc)aC{~+}jnxV8SB3{;c%*BZ!$1{L@Zj{E`>*9K()VvjXk0 z%M!{X_n|s$;1G|+#4qx1fvQOP)>hy6IkC6lj>|ew0m+@WJQn;CMu{MSG=}7*vl1p- zOnIlDbza|?!>K|=YS$JIPT>#o)g`%e+I2JfKyKb2s^M;LZgjpgTyF9RC!Qy}!9aQ# zwjm=6vSBF$nsvxhCmdD=bV2S?dlO{X?erMnw)>6}GfD(GVH?dn%wfS2gI9?fTHaMa z5J^Qnn)M%|7_rowJ95Whk9Ee|4?hqhx)hw9v==?YE?MW8y-0B0U70bR+%;N)r8W*n z*^*q^Jc*^L2~cvjNmd!SjWy*gRnm2)1xU#*dj>5y6ofomj9g52$FRtqj;fU5xWM{XLZ0NnbbOF7TRJ2b~V;Z#Qus>M5* z4Bsmk&P;I2%y72`oWHc+azD#xsma;+;^d_c7wVlQO1ydkcE@; z^uAZ{kex?U>$^!~v~$G{L)aq4-we7a@nubfE!B(xMDmfr<4N4U0;?Q5J7r596>;TQhJH4pS`&|NI=Twjy2kcvP|b-XkVU9ReaiZgzf11 zrmNBWn`mz=BWxvt{4jm|7f==(T403Aej@(H9E%+qIOJ;5awyfezHI0#37>GiK?K5{OvHS#+`y85tHNHd)O1??W z_K;f0AJo?b?i||!X9b*HXI%I=kuucvzMxL%Ars+eQ*J>kUycm5z?_oSUR+#VXIwR0 zNBX5O|4tp&y|G%c0;4fceg$3geO960DZ(qK>ZY~8#f z@*`vpR&F~``*8e}tPs3r~Tiu1wkD>td#?Lc{mz0Xrvn9mOoFI7H>(krUtFi>r{O{ipyfXR#1CCpbMk(x{g zXC-GZ>B3PcAd#KQd55~`NTfppEq3|qr8!6z(CdeepN?_?k-;b_d~IsAP3SeJA?huq zDH4P_*OS(wIT?Yed*eAuZRR*(V9K&((BJOQCNYv`o^IlLMhi8Z!ydj8N=+NxU550K zOmiXl!S+NB4hh@IZSl3vFo&U1HNqEgRdCv-eDIDNTP1wc;T^7LIIRT@mCcy%DH{Q5 zT=hwrCZ|VT+JJTn2^Yb@G@q#tvCIY6e2pZaS?8&ei#|SH4<7it#t9)?z3w9rwyjfS z679Ar`sIuxI%Lb_A%+w^)ScKwODfq0XFCG%`?|6KywWv+%s^@rJ~>6@hO@%69x`Ex zg*0(Iw3z%nm8c!)hMDxa-h9h}v=&R=zR^SKELb`WSzN0??(exTF))jObEX>)iEGP& z?j?$ka7a;JW+sW|X4Ky!GMR#01>d4IVDZ-nYL)=9LpR1(xJEt%3+l5lZVrJ!nQR%i z46OSLeQ|zZuqoS?)LSvT5Gav0`2hxc3d(t?4woXLo7%9B8{HivEREtWvV9k+59G^% zoL1SXY-D?jaN#p9qMVxvyouaTTR@u8(Z-v27e^a89FDb89f8Ck8=Vw!ec3T_X}wZs zTGXvp7re+;MLENiWk)UK&DWKJEAz@pB)X7Lrcf{me*3MSQkS_bw>3)j?p^-840kMA zk|bsni+@F(-19?EiTRO{x<{gFB;8^f+OZ}27Aidjh|j2Skq>0%-!?5YU` zOXinWH=Wp`EJ(7si3N2nSpb*q3fUF(b^0JgbS>N8USTgS>ttG?zvlrL-bDC)nK7g| zvAZQEz~&F1fukdXeoH)NWm222aLiP>4L;}^y$F$cw~mTt-x;gJ(Yk1FmEc{;qlU9- zpv3_@j4_Jacb=k$rN&e|eh-J3^q>Xhc315_b3cO->l%8v=Lysa)_5nrt;7ZLkb@wn zbb;ykb{1l3T{f@ssbZC&#p({Ho5_8Ox31%a%%7XHUwgH`@a^RxSGVXjv@@@^X4!H+ zmD{6w$oJ7dqzCoPIlr4POa2J})MWA@YkB1Z4`rbw3D5a2j2Pzz3loiO{2Q^WnJC@5 z^?5?K=X(ud0b7UL3e%p!oafp1O}W-w{3!Dq7{3!vhUXePgSmw(qxB4F=f_^0r-5C4&{3P&C(RNpoMWUelT(L?!4AVdEG;Bj;03|y z&xbu*VAO@<2BEGh5+CV)_Zf$b<)O@6NYI6<41_-UJq*{ZpNY_#!Y8?>#TSeqgM$~s zDz6cb*I>$I`AB|IAB&&9{=b#;ujO-#UjV^&9pNcG^ZiBU(z{j80@W$)PU3LKFvbqK z$jg=lkDe{8)sA($e4kToHBA$vZqVMqfxsV8D~u^Z4*kpj5%V74UJ0l-H+wlGwBAH ztH%UzcGyqdc{Wqfj_&bEAZp|UI7ObHle5KamW)P(mma1Q^`=BverI453n%-wBhj09 zMvI;8Hm6hKS+YJMoT6GDK##7atu41uX5`G^(9Bp$>Xh%{&{*{q=&@1o3};R$XUP-G zU;a$=`WNr(%3e`i-t1TLhRY3E>?L2#(M3RhS+r@)o_YUIwq^1UoQqx~mMhm6`ap9q zyw}hZ$uG-(e0{DuE3ZziP8%+@xceNuM2yXSkQyKYaB+&4@HMJ1SYJS-diBZQb$g$0 z-jQu_=VCVk-cW9PL)2BeiOZ*Qy>9x5zGouy^s1I3`dZ|5&Naq8Ih|_)cER;%=QMzBI*gIg(XB&Euya<4 zp8DPR8>f;feKBpo>CCwRYX{*u?*gpL?%UOv*YJ(QXC${m6)^yn?V_muJ;z>G=I;X*r{BI6^)t){8 zZb}UL2QCAa`glL_CLs>*H|Za<6XPO`0a4t5E|OaD4utTE+^esl(ZrGDQ~zA?9SYiM zS}4z}p7e__dMiMN{898u8=F5z z9|`>kRu1e{v|WA(D}A>fjek&dp`a$fU1MRA1Ea+9k-H4!8YK@GS=iLf%#206{ZHSF zkzd2WfX~#2mbhmaPxQ!F?VPnkRi}na)T5#W;{x>sz+b`kUxxNOSOOYw1^}|_%^!Y* z$$8>*YtzDSsEcqt2Ie}+T&j0t_N7)Z8{!fnzk<1;#!5Tx!*6ikjFMMTdC9g_OWxv` zD#4&z4d}6ko(^-a4lI(h&rswhs`X3D?*i6v{i&3KX+f)pWg@&DFtBhMr@wNltcP@O){0sHx!yV* z!pqA`n=kaz_rY==uq$iXI)F z6C0|CmM3l6OA>EOGsNODr@vya#&2UTfgF zZty#Ws`)fTS^RYyVB!}@&J(F4e9(jS>qhwAWfU;2GqS|Q@?X4|ULAd_s#ra=W@!l4Q z40>*C{5f3s-ivx{0tS4HdBWc)RzxQ{viZxWHjBC9DW&}167E0$?sov_8&u0z(lO2BB9xd2 zK>;^oZ3FgT>4r9MX{_G846uMv>1|eFeOCUAE>%soJ7zb1V+uPl7#@Ajo^j^CgShg% zbW)GiY;0m9E18c3>E^8~r3-naKBPtjfa0$^&VnYzKYYdG2pj93%fArcSgNRGw;LAT z*=Cqb&*Z3Dcaxj5X^Zmuw&?%rTE4yd_@O+bIi+lq;@;qg#*?SA8qgAUgL9q9`n$SI+LQhjpg%wn$7i^^W;eW9>T^98ZXY$4xj&*~ zY-XE22n-lScQVY_23uNGKqy&c`$WK{3p70S zH2@13np^;Khwu*{=xqAlWfy_}=E9FL`3|_U-Em$yVuQ~s>P6~Gut&XPy|vu)^tfKi zee<&S`Z{OU*s9I&F+wrC!5nvi{W*ZZ-1WCN8$p!(u8bmsF@TB|k2f6IIi-PV?eQhSFvQjL@ehmHeE8F*^& zx(;IkIO>`!Sq7LWczdwi@%-kdmoLWZ{NDkieRO?SlD#B4PC_1Yq^sf9>b>XYwN7 z3LSq|<8O%gpYp$b@7b{7N9Gr~%>O*-9~phag@5$u4|ej8iTubg{_RA6AfqbBeSl|< ziY=MGYj%@!u5ujWO9KB*pngeL)rq?3h-f+mr29J8kMoz>54&M*Rl#=w{r5EyJumLS1jfD?~Rk!YX|;oqI7e^MVlQZ@z92f`bJ*>(CUnFt@n zX<@y~2Nx?c^U5p@mo`UXyCvJJ1dD9VOQzVx!i=-<_LS~w?Vux=zB7u7pH>@(xLYx3 zweTyN8Fx)!Np5<%sOt{hS;Jev?2lCYVP~J%)tT#Fw0sH*9LQaaD76_W9)aBKxvWyu}k_!x+N?meSY4bvtkfl@SYxPQOPgVXsp32iDSU|=etV$^9}Dn2yhtR^DVBx2zh_ znuytmJQ6A`(i#!}EHzBwLSsCbd_H%E`Hv}rx#>j=Gjv@03PJq?4!(`T( zB&y5hwe2NrVX&y;%t9Wq)0HG=`AtmcRa41O?zWTaXe~3?m)Fmw+$}!dN12y^wl^4s zU$Dm?_srDq;q4}dm!yM(rk!1@zB5JOha%AesM@Yhedwmo$o)Xx zp*~}K(fekaJmh$VBd?-7GCHa{c}jN`*E@%ob3zkYUC48nB8T9hjX8`xz<5{~OB#Kr zDEhf@vet{SR&mVu0r3M{W3Q|tK^v1YQ9k5(>2|31@_uN`@xVo0NByN8p4u5smodD{ z#(8REw03%vx**UM9*uU#>8py5mYV0?L#_D|V%N+hZr7>-MjvQ>tPjEEyYf*)T6ZnHOTWl{56T(h8d!u-BrUXr|k&* z>S9bH(C!$juff3o;8uUASik>4{^y@u5}`pK|1qRf~gmDlBY{_Yxe@cF(t$dyx1A!vIRKAQmrDW~H83Fy|bGi?^~* zsS*7W?ObrF6O=v?Kv2O6S$$&F7TY+(e|cPO``F5meOYXRCsw6M3}Ot@Vih#`CBE83 zS-5U&z26(P1~7uE#^;yF)1=zklDv@=lqU!;P;~HtaY|m^y#nVcA^l>l(o0=_2qbT>yW8N27O>PBCWL8kef=xH+K5O+qs{+>K0yYG&`j%RO^3+>+B= zOE4sVO=Zf&&{Im_uy^M6Chq#@^JCP?!aT8H!1nLCU`D-~946N_LyC0Fxc)|RrXjfJ z@wMC3u!X@VfCjO9u?)b?xcd7mX&sg{+dv+Vq?i?y3BwD)0nMUw#8Z?gc*6OnDSY~ZvU zQ7XcBbV_r`|S!3ecvU2*Y7vy?*VDmNKIUo7|>8~F1Z!|uPB1=;CuQU?fR;FRfA!v(@`O3RlZ|p*cpgKjJU_T}no=lWSU1e!VG@X- zWL|-*MWpa7t#gSx;d=>*B{maEt96Xcbt~9|c<+3w%_VLkND*Qp-OsOTWqtX&^`K+C z;N<#l_k`7(<89x4x&I_qe9dLgRqXBXl*f|oG!|&#G<6(tV9%xREN*RfKhS)ABOB#p z%M#D8Q?P#Pm6Qe48=17yP#kz>yEg@kTonDd-isHjsWzLE(1Rzsv`zRSq!y}~(Oe*# zCuM{v8cH9!P@bCLHhjG36Q*uC4%d^}*$*2MoSdB4{+KQ%6^}MWh>Xe;}v@k`5MjOZGXBerW+2F za`xr@UIBS+B=PK?9|GaC5Ci00+b5uT%zEN>bywA(D7EHqYC zelOv=Vw{BC7k|kLceB=wBstm zs<~`m3vAR8zH((HAar*Vr-HBjI5v|$LWQgWmq-cDeI{=AudRoWJ0<)5rOR+Vz#la+ zwE|9cYhm`4_0`?PFC}fV!UKLw@NMlzyllN;?MsfkJK>C&JLP<9eBRmYYKAo!PV3rgl3j^IGc&c!oH+k;)QQ?M`Sck67p|2DhbKE!E>6a~h!5sxq4! z9V;m+=-n$A^m@QOkBXhh)7!NLyN6=Yk5Vf1C<;)5fRIphD2Qs1w2h8Y419^Z0!;ow zrPv~~s!{yLkKFRbhO}qLd!Ubp;p}fYp%jJH26Z&VwU1P)84^O;mh{AXdu_s(?)Wi~ zfRLa0HI&!$??`5z4Ty#Fwc5B)_0#&YZbrZmajx`*cnkP!}#@E(f8RU!X3)-_sGKzmuv`hIXiB13qn9p95Yu5Gr zxm2-7pjdiO$k3h4#PVSOTVi)0ZT$x)D_){w3{AN=&TFFJ3Zq=z{Y3u1qxZ*7?b-Lc z;?V~M2Z5+}_K!JQEO7ES(CrXOddcTDtB}#pLwgAT7=iK#Q(zf3tR|fu>%d&!*c=W* zwgGpr5@bDrU|bP;o|Fm9VbbD)&eQLjbsIA2lI!MA*o@@(S9GpU0R$GG&{v?en$BJq zPnY_(h!ANb#~QoZ?d`U-BlYS;f6WRC{vaZ1C^|q~U3T*_aPE9bu_5K9EX)>}>~k>q zmA`45tmL>a*#t7LqWo!ketC@rw3FbywJ9<6A*uRSJXUb(GHz%FYv{;sh>-qXSn`AK zei)whMlvL$Gyvq$WEC))z4bEx@y)jPoCTA&IO%BQ8s4YCDf9M|>1B*JS!=kI#S*Hz z{V(nXHrzg|=@P@W1#T}G5nQn5x~cMvhH}i?YlicEx%>ox`J0#7;lh80g|*6T+8wk; zm~4~c98@(kODV#lOBh(V3G-_ec{OAM7wlY9OZdgM+XoV*Y&bKvCGVTS34Z-2m6(VV zBt7={@kMK=9KgL_2{OiH2OF{E5QoZ~6^0HvN$hO?kn8k4*XD9XL(|2n>U#E~!(L_j z*|#X==7Fp`)0De${R)=baZ*N5gs%j&#A6s{C%M*ZKEXCKSr;G`7*g9agk_=?{LD$l z$2E@ZzV#+`u;Aa_#Ch%o7>2pmzVQUV_sjqJUDk2BN&Gv{w|lPi<2P5DykF}A;WL2& z$<~gd%wA~XcD4R=YB<64(vDE7Qw_$@V{-gpvoDosRuIz>yHvaTQf8+&2+tcZ($%(* zt_i)uDYYuJxrVecQAh9u8pU`lr4HmtgMUP``JR$QojOO+(XU#N4IRrgv-=jkckSZ~ z4|ehG95pJ`%$x|3yInr)ZEn+7>`&1POxWG%K~9EUN@7Eq0>EnR6fczqEyx7z#tA7n z)-?>}0j$+tIP-ddx3GY~@^nXnm@*f9OL22<<|Tfjff=vMENi`Fq#Hr}J7IUqkvCAr zDAC9dfcRd28z{iQl>Ezpsb*%|i_a4lpIW1e{Mna#pUi}(x7EJS6w)j4F`V=6ywd$n zv7{eTRhx?Pl3VW51O~Q@Mi1FH1TdwQrfEeBnsUX%7akmJtAi5Rq3u7S;e4mMFQ1$N zEGh^e#sk3W$#1ag$M7hk14#8vplb)%a<&^e$Rnn?DiMv{W*z0l0~wr(fzK=M(cE2~ zbH_32a#RDn_Y6<06mhAacWreQxM5Np07P=}u3U{uO?kd~TczLs)*<+sj`dpXcX9eG z4FM7{^Ak$%yZC^`Ji64*cjZc$e-Nu`suB|_W#lBQ^8ENi9M9ZcoMtwIR_uT3>33tn z-(KH$@Ur}(AVeAH+Bx6*=3*_iA3h@B)u{eDI@o;6D%gk+}bn(T^naZ-xCw zM*qm@{|7MgI<9vgz&)a)g!%&n`uDcd1A{u^ZRt#4O;3_gym&E$8&2l(9lW4n*lATZ z_qI^@dw%*4-|5kbU7=X^p~XA*JWt0MIkwtH%0;ImH=m{t;<_q+&b`rB_|4C zxvsuDRs7hWT2Xvkry9P}#>8%U{*CB<2L<=}Nmbt+{>yJ5?u-|Ng@wy~a)UpyTCS*h zbEGhVkH@{!Jkq7~@I(5IfqgF7^)u{oNCjTY z$7(CNOY8#`>KN7DiWU?GJpHTJC@WUF&!?<4&;?3RPqFqoqxiATvU~-{4I4Rs%F#?NEd{2u);xTrn-}>Dn~o44Ux;?5 zncr4-0lZSNM$ZiOx|bIg0>>1*Y;$iVRMhgYs#NeQmR`_f*D zD{(NE1g}6^M+hpTYc#IeT&nCS2=U}E3mv;hv~(AVqwG(F9{|UW-o1dH`NFBl@nmAJSlMd+;ckE3on1dVXYzkHOpX>iCtNaPEBsB z&;>h%`$l70(AK`G>iZqKcA0)fO$KZfVe;Fljmc$F%FMRE(5yP3Y%a`S&KmcOrmo9m zfQ(u{ZRYVI*q;9Ae8RV$X_>LSt5cl6&1iwP!)PCS2^ptRt726&%(yllu_5)co1oCp-WJW~h3sQpbT^)T{#Hug z0b}@P9NsechT(qfv8=;%rD1VTBC~kt4yEzDA+G65xj*`=cvHt}zwpLfID$BIV*7O~ zVUJ6E;lrU2eefjT#@s>dx(9if9>2WEzICC{@~pjOpY?vwA>dR%h3)6-tXrjnro7T| zc}!wgimFs22#?$lL{e&y3azK@uOxIm{oObOa$N`6* z5_=ngdqnTKJz{qa%jEV6cTrBo2k0=8{6Y&cR*rsu$q;%2Oue%@l5DGYZ@EVvX={Bi zsI;{iysYVG2e8$m(%Yh;4K)>x1MM6hgDj7%X*&KxI`n3M5#0BYZBLRO<)ACSNrQB! zIUhh0^@$kkc@?W{|Gt25IbZrixAh)j&ko-Cm4!tcIGA?yr($QhwnuAuul{?W{N9$o zkZ+?ZfUr_BWovh=j1!>boej$gXe_-0l*4`k(cTXNLw>-Wwf=a#jrxn7tNo3OB8I2|P1 z%EvPVXkp%5S_LEP{Bqr=?m}Pk^Zu)Kw!_NF*u#6|PXLpINQ6^&dw7E%r>N(0G7!Cs z#nZs^lgf?TB2R1iqyn1wd@rN9)@6Nx^@Xh3X&D|TO(f(v8^Q0AA%_Uw(@uege__qX z?f_I)(Xs~xH~IZKm zzBG*8b0h02-Jy?%e|47&;t#L+g9rRBfd5(L=773OpTp-b=?^O$Q%bF|{XnLFUW5A6 zbf@VBpaBkm@d0F}p+i$_IT$Dq7BE$r;U~JgVYYEY+H=llZQnEudokMZ?Cz}k;vAVE zL>J}|+T%bR5riDNieIxEy>y=Z7u^Hj1%RO0p5y>l<=wDp2JcDh>H(_(pr(DmP6}|P zub83v4YhXr3c{$F=9vZ6YKx=e*SK;aGRCA2S)9m&Owt4p=OPLqV}{4X%#^)!8*=Ry z?=7s;W!^on(L)cgeU&(}=MIK#@jQEWNuzJ=%uUC9vLp4#FoOAn>B^rSzd*OW!ZR(X%F!byh>Moq7fPcCUwZnPOZ0 z60zhl2cKImHN}QW4_R+6vt8;I43Yi0hRfN8r_`*|4AA=qsKLssC=sj0=XcRFH4zej zS-!J4_oidmkfHBU-|q~+5jVfbJ6qU%Z6$FId2ee6Yy!QbR6z|<0mWscWWRkqNtJ7THv$7Ka-G%p6 zWz)Bk#MiYhyG}1`nv4vVIn2=;ZNJ*O$-;#^@!ni}Ib9sZwUczMX(&-VKTi9~##}`T zxqyi#y}5D@&K#=y$$wezqK2pmOOFlD4kLC1``VoBrZp`) zsyq4HiK73~zv@2OY`G|iVlUKr2kiRo)BMKiZ7={q#Ds8i3IIqn9xZXz!`+UYoX!EVHff3S-!H!K+xYRdhx zlG6%>t*RdoxcTpx?faip&&5|s*g@M>_qgy^i@&wbiU;|I;}>bZz@7gVNeNmKZ~zA0V(}#k0seRT=;gjP4ke zS@XFQu(V5WCz2-ndXQPWH&&vjrVjj^i%_3UDK8gnlL))rEg*;Ons&hjOjn9u2la^e z_Likg7cc=OW-p=U#fqbjC)K?4Wnfck`Ns#3r>aU?w0dspR>*_5e3D%{lJTslRl3$C`B(1RbVZSJ!9^Qu9E*rv*mP|Jf=aipk+p))GMCq<+&!kmu=@i3?bm=j|qlS+|Ot`g4 zEV5^fgB>i^dl;v7aO;8eQHaiAbc&IOy(V(s;0uu_Az8K@6PsJpV;N^_m9@ySaBccc z?*D=zUjuoO<9qKPAyw$;1u(CtWNVfGg>~iZ^4()r0&gY`N)|&6b#(Xu$138!C7^Gj zHMv>(q`pK)=)Hc0qgiqcS_1(Z@*mkjps3G6Q*Tgj>?D7|Gl~~H>XH7HIptT@6Hma~{f} zq-1m?mNYYgwzQhN97wiFPlb_9@}ZB{)nhI`QK2(#DCJF#7$poQghZ@q81jT#>ZNC^ zb6k<&cF2kf*~va^YY;-_;&%4@&cbK6AdXZtGy{GomPe2bw;3V(;sJon>O5AgGx>XOvI~Y{Fa&Ikl`TRvBHf?|R zCouS7UaB9NIHJ&Io9In~)?ory*P!UhHk?vinL2QlW_nguGp%S-16n$@6H&$F{`o%p zB?b!~A0eaCJ40QiwWTR?D9G%Kn*82(cQ7{K5IONql6@XEqoAq9>qhl)c>Vjq<{3~_ z+kht~S~4idd#$6TR5SjUs_~CP#dC4#rD00*Yx9t$Vp{8nr+)$52eBzRcm49W!`c7N z`Tq3DH|pLw)2fQ7*Vi&LRhLEISJl4JdlX~vn;@>cTX!gJ3Gf^KVt(nKhi_R3&?*83 zG1|rUTGdw`*0cafAVS*%rfv4=z7H9h0_gVLKV?Z>3`DHElTHa~Nj!N!Zy<5Y{l2@8 z3%70VVf7hN#WDiL6;0Ov(x>Uyw*o`jPR)e~fk)-Rx08WJUNU9ggUN#{wN^*26$*?jp|pz40S zS$8pC$oLn15;$CR&$s@oW?5Sg!v z0gpX{xd0PF?mDYDCz3;Rb;*&X|ebb(J<( zb}s@=WZpk!B!3&vfyr1W0zVc^wnFx9bEftyd53H=-Z$W8MKSw@$mLHjN%08fdrFdI zVb!R`+^Nse?j2kv>b^RjKP;y8T1W zg0T^VHGW~?76HN zk=)}$8W`=Gs)=b^t%jN1oj9N~yld-u!grzjnlh(=-Fgq~-C?Kl%0hCzazcWAgoJhc zFeJC5SPI#-`H}NdsgL>foh^oR5m_sUEj$6^y5cYy{lIl9MS`uGoVdiTOdQlv0~MO9 z4c%EE)R}8n=JpX1-kxCEv{^sxiFRM?eIP#S%`*U;lD7yQ@rd+r7|N&=pEPNmlI4^Q z^){ho$aJm=+rY`OWGnsk2Tf;FXanPvRdotz|jK`@)y&+L-0p_5->?3-{^+huZ&R3 zoEdRLM#UnNpNRShGMhEv6{0jfS#_XtU8 zEhJ!vlE~bMsFtZ(|ID`5){z%gI*#ULDPpBed$-X!sY~a6%sfBiNo4YDRt0!AL9Y_= za;tX-09H@L87{Q5LWmQ%7ra~F5=Ib@<`B!{92p&)^b8ZP0!A9A<#~#3@x)K~-x_xx z7&UmdYvEdJUygCVQGIQ&S^Z>mYf6LV(^pEda$Ywj^i`F2+qF50?hc71$hk%vJ((+O zfeN^ZpmYf5aYi0Vd6G}uDUn4EUl#REl`^0gVkmCnl>XIu%~=AW2QeTCUd2#0@ou>c zAk(LxIH!#zzGU`Ycz~D?S+|1u7#CP=$@Xi_5Y!5`w&$ts6!2BW?+5w+3Z#4&=vSkl zZFLOxu|j~!8Q-2!_@nzf0-JXIsO}?Crf2Q28aXH8iKino`-zU;$9Q05~o{@%yZ_l6+ zEx=8sU$DEDnZ(Igs-0?F5 zVO_?@)FkbAJOp{?L>E5xbgJ*T+>95`2ak>}3FT4#i4)e4IEbM27zZja4=QKIQgdzwE@aa5twwlV5o{H3 zA#xD3kNWG&h*A)>uo~2Jyp5>!xbsqhoERMU`8dd{ujc0JU*v0#(oX=M$szq#`%24+ z?W4~EZpHg{JZAH6ZPqIXendC{9~DSVw+7zNsc&TLn9&g-aJN zei{rgI&m2VagPfPdBeu(T%hKE{Y&E<4{nOk73`i+-3!8v&w5vc(FgH?acdbW>0)aF zpr!@933<4c%RfN!W`Yo~*>B1w4e_70T{1#wiI<|X3q>gPb zLy}BAX5S;B=Nc&@t9|=K=s{3798SwHBbS#gCSnuJ{=9T|y3_bvM5bB{fX**aQq|Yw zfVd%&o{_D^R_cpY6Paic^-g>JXVL|ZCrJ$jHTO+mLDS5HVLu_yrNJ2G4vmQ};4!Q3Ey(+QN`7Kz9sXlQd;Tp5=lH`SR4GR?S`#3#5; z!qXs>)~B$YT0TC0@L=f~5vxw4xzFa>bAx3)E(Fu;^NlTcrjR8+`1qnTH4h9BPPRILEN?= zILU5&F5oqt6`Q2gml>$jYuhJYQG=aIyE1A9HZ^l2c4d*rJTl^Qii__8HP23e8|o|# zS70oSsvcLubrqKTH)Y&MTq&`_gaMA-;%t2{chgn?N1$n39%cgwn^wMv%#9)|7^V$* zW2|zVgkol-&L%F2ueQQjTP}vS=R0PvMC^>HP7Y4gG*{C;pnqK+sk|sMiPsnLfGB$3 z?6X74yh~jub0;K}hUtzzH7uwm>*y8-PjBLCoVsAzh|j)Iw`_8HI{D}-XR#Uw)g(>~ zTF&vg#uU+)=0hddLzDO-cj~u0{iwyWK!@<$AaSrUWVC;zq~q?~b^M759G zM+{{+w}*qw$cvQ-#vX4l%vN18k&Pxio^kRSduz&T4KLT)niZ`vmS#Mpyz}L_b1=t> zD8wX|jLgdF-m$MHTdkFX2uaRAX@b7Y0E3z{Xak>Gwq7HyDpE@n2N9&co%Rpdamcj! zW~7ZM>FF=-KGQXc8@_)T0`CD$zFO3yiJyTG_{!Oru~YsZ>1uineh<#9JJ9o}t-R~J zh3z!gT4Y}6?LK!DCuZkf@B2X+Cdm52=li>#M@-z~b}{R*C14;{5-PMDSBLgNKMa~| zno!pG`S~M8C|nDCj%EqUi)a-o_ulvKV8x^rvlqmQfm;D4Pcw#h0Z!0X{1UI}j2)PB zE6@ixuEP=&{+My?f}GBdF-7hd42>pMG1kY%= ztTq|~IYLe79-mpt#av~1yNR$Cly7rJJ*?2K@r)8xm(|(hrHxd>a82N6 z!=HSBu)Ts8v$}d*o`W^4r!<9`eQTjF6{_~3r%b82B#Ad@0n;AqYSDEqvzmW9 znPEKV`Z;%imD~cC+mVr^L+007G@mJMT9n2PyX{Q(%-v>+8+hT#4U2rLSk8YB-fvf$ ziZFGmh%%Cybkh~)#+D~jf_VZmBB3~+F!9Z8p3bJx1=ZFvpVyEFiyd8Z%rw99Q0$;w z4q+zL+@~RZP(tR|0>=r58=Czx9Ln%o`;01P#2ht0^`axSXK#qob zG>%a~#!g$AXCCY);19aDt8hpH)4ZojgzG$-YSyN6!NAnmj-22PyFtJaE+&yN$taIe z{m!nPL8UU-T$F^d#^(t=>8kbTC25*gKH^G)Noa|t`)@CMT0lAzoZpq=>Ue$j#_`KL z)(~#8C6k+q=8|fN7n*i$-mK5!^P@4!z$LF0qol&uTx+T>Y<4q&vumWRn+HGb+-i7z zreyM&!pV9}g^5oNR`|(nf+sQ9^Tq+`<{Bg@zWdaPF{7AU#R5$MjPu9k`@Y*P!ytLZQ~1JIEwe$1-Yz%d5k((x*tu%W^(HCCl}WyKWCX-H^>iU^f0*k z%ev(B3x!E_WzK|$Fr!BeF?KiK0wXK+8((vB^Xc50ifj6rX*%i2{F}(DmZkd`E%81> zPv)@cmKJ&(RV&=cx>F+pQ#FjO{||fb9o6)-QPv{Fd*zK?FOyqK%awlu%Y z#wvo?v3kF+?ZDZC^OaY{b%j$}(^T!vDUwyD=_Z{Jv)@aybs|7OkE)MURH-STIXbM9 zHN2-5ZLg-(I*Ck&EVKmYz*1CNCg5vbWbY0EtB1qRDRQdu`_*kQQ4U9`(si`eBr)#I z=rz_mj?*p(a+TWiY`zQhDN)qTN0BaS&H!Iq-Mg(4V5MyfZ` zk?)ybVjL^MwI{JAAqaGpoeygt2BAVAM0;jm6psdvjuA$I-Z5p?!mz^=%Q`Lg2B(Cr zU)N|^UT6fnLi~Tk?x2Ly5xLJpbfzwjJ1=`L3H;ZE(_ffCSe+oyDJC#qqdx5b^uN{B znVk>sRt)I=QAGi~`0Mzwob|2n$My9{BL7OLwZ(g&uB`*gzSbbh9)lw-mqc>IWT}TH zH^j8!MsDq4aQkCTbLJmVMaDu8o>ORBg+tjdqs?aZF`CTc_y@RA+Cm2&g-UwJSPDCe za8vm_1;Py(v6(dK^}bj^6mHf7)B5>50x`YWwX0nBl0mH6N7DnVEv3Uds)oBCU>UpT z;*D{)PR~+E)at~!osPiRP^?JRO+I}j!kg76ZC?hOcMIDsuD0}6nm?c5S$rIcOZBhY zbF>pOJ@@h*s*aH$7%<`wG^q*OpVzfjsCSdw12lg!1~)XQc=hf?fslFSB|=o4tlI90 z{3cq0g%ven7*B&Jw$F?2eexXT*`WgqJiOFXbxiB5pvn4LJUY|Ks3-ij?=^JQ_EoM>UpzIHZ|h4hWy-ijW$uJ=qT4V`puhvwsNT zmY=@!jsfxARoO_=gf!h^j)KCVZP&}g6C>;qwX)0Aj@w&2G(1r#YmcPIFUl<2cl>@; zI`rXgN0a1B%7Y*k;$3?+&gz8ICo!JW$Wwv)O&kg!>++Nu>5^))xQ4hh`Vqqr+UTIx zS->2U!5jnFCd}4f?YsZ(aKo99!q^kY2eChJ@BYA^w7_$K#{=N8U*z3ut zG4=9F{?jn~XKZw=!aB1g)jOeH%=Q7n9-Y`G&o(RFRtkSG^wQy?KrBYww#K+hEyDK$70XhIwjSul!BZ~ zUuilU_i2BS#CI?=uE`>X(_bH zNk#ecxfUXH@7onT$h!Y%b@=&nw&#JxR1I8hArIHcDro8Z^Xcdo9(T!Ij0#;=9w+aC z!|H_-Yj)29vqAe!ITg4Zy0eulLS)t_1_XSGM0nxd3CI!@FR2v4@}uHQix%+F{rq`V z;Ku3X7pFv5mQpI}8)!r*MM!5IsdSxFA$oBIn)W?WE$n+y#QqT~? z|0sAo53EW+Qgi-5eOxav!&7MfMe{WcYn2S=l1gx7DpElvu?IlwcxwN*(7~Qie+&UN zgd5<*tc$L(@z8~y`e{-$i)Z3kmElz0#fGxBh&!?SRKaszma%^Q^Ld0D`rOK{-LT(L z4?cgga%7Q%dtgi%NNjQluXI$aZ8_`S%L%1-UXvMH&F2olVe`^V()l9jF{LHz+_0$R z2^@#5xDdh}gC+)Rx3Y;-jDGtAgdx4tA*?Ely{YhUU|O-$SX47>0D4dE{Cz3G)WIVA z(LmZ(?AmJzsa#f}Yg2pl&w5nFs1wAz44Q3^D{leTzL%-St~nlbSP@D zp}6kmUJw^AujD%;#e8pWeEE_Syb@IU)_^ys0rq3NW1t%lzkz;vC77|RN*|NO*8i^Z zA^QkBM&RLZW#q4M?2e(b;iXjuccBSV`SG;74wN3CGClF7hpsm2ve-jIf~sQI^|ZW9 zyvta+z?vK)2-V(s=9q$oxx;g|x2<1Z`|!YEzDiWeXaNlEx0BFaGVvI3Fy%Gz5J+!$ zxIsk`i);ck$dNSHsdY%ZaqOQ#FFfMn7iI2u7DDe~)fKetF zM!Ea24;ECGhNvN9JScKGr<8dn!_$Vc2`veE#&3qcmdN&xgQ|1%pA9wRMtIut1q-$& znhTHA?DU^in%I|?;hbS$LB%1T)5M3_u*F!;0sIk2Xl^jEpR*raVkdrxIlf{N-`^+w zc!*6Ho=H_5ayzn|7;pb*uzf~YP`aTQckr%Ma7S@TW~Yl3OBLSnrml*uwlcr9JI7om z>7t?jw_-`&Y6dSfFWX&4?`qYh`NxMA)Xd#0JV3-9ccku^U* z^8D_2uD|pg6A&+1j6i;$O*ay?u?@Zp7OlJlC8vctgo38I;STS2nh%`DgZ*&PmV;+w z4=%lmHORkWqP*YRAccM5>oi-;Ef@}E`+EX#yPZEhs#HYPJ|HQ$zDUr}Ma@gvPYyGT zsT_v7CDyu(-5>P9eIYGzU#5GUHlw|W6m9WjYJHY#fKR?xA1AUeJ^XQ5ZF#L?_)>6v z-HVVLw(n|KtGp7vFo^p`bMes$gOy=(W^Uc35#%V~FFmK{=%Yd?hziu38=L0s?@r{f z)Qj-|{9EMqnE3fD-;s&XAyc^}?Uq3j15N^f9e>YO*M9DmN4vsH_C zra6v4=hp(D8ed1(`D>-xyd^wlJ?-eBrQH3AEz>E3Me~Y@i46$&bl%kE4UnUpl+=?9 zh~D@{B+||vzM8qzil{@7w3^s`@7t|&DaVGExmpz3DTFYKNAmd&7G8B=_dOKH8o9%P z#8Nq2fZKF;vk~WxM%kkGs$}XVSdAcLhz*{l)W(2LEbXT0Ms}wws$}JbhQQ}JpLN-( zg9^*3gOA{%(|poqeJml#_QGN=MZqWR*3by3iwEQPDDY*RQ{!E-t!}Shy;>QHu8&d) zTzFwMIKA92YtmBfb>OQWzPQb~cY^uyg<477epQ<%7guCE;aHImZ~6nm>y7!H6C^hZ zK3Xi~R%?*0SAFzr)>S_QFsvdyU$P+h{awVvAgu}u z*b)urE>VMLB;S+@lb&_aXQ!s_H#FcwG*qO1f!OWKPtk7h_Vg%)$5eesV%A(UtB|2+!fh+%mZA~4H&xS5Kt2;dndFsWZ^1ct95vz9mmD5o-0ee(i|o)a3a#aIu;EGDHH;Iz#BNuZ;6U}O?{Pbt zJ^e?FZ7^%Y7JWNk`?EoAh$Z+aC|E}$h82SNT_j4rbw{mD9RV3R}>dVR9TVDu4|Av6S& zINy|*80iXsW|o9hR6pnOb6DJr;37Wg__BVEB!7!5qkszXtbbkSz$;{iSH==XL##@V zhE`~VDJmZe?BoaM3>OWqDNj1QuFZo+ZFhVB$_~F=eyV(EBm!F_6#|%a5L=q6>7(nn zV#q%vHELI)`rY}vSQ#1(Q9Flq_4AC^-yG3! zC>0BhwB=6fU<_*+uqplyq`{kFyZWW^a&Oh3g7$@#T9VR_l%2j-sDuN2r8~p`7jm?e zP&q{THb-$tsl-7&y>k9zJ;OP6s>`Y#`M5ZGKInJgE4n{OWzMA>++%u6gChGA#!?-< z##R((Jr<*)6 z2p4{V&vrR++rNe7+$veitLeD%VoJcfVQm?zMpsBwPc7>CZ0{8o#ZKTLq(V;U$YW1a zBE4E)>9=JfhMUqb0#@=h^Xg@{($?=>AU}a4WFvT9;MsvPcsF?G zyGuShhTU7m>e>S>Og((D{P>DL!g7!@qO7UqAJT))421{RbnWqLHjx;-v~!(E<|21Sd99P@hwGgHa|g~~X(*Fw4n;snc!-i!5_;&+ zq2-kNyghjUCG^9G?|6Lr11y$qcAdeHG#60*#kTaY@}_8S~vW`)PmOoK}?)kpZkLFCRDZ zB~dgPdF04aj{ul%*Ns8UlmSRuaCh2Mfk6$wxu2Se0J07J0Is#CkM}latW+5&-OSvaA0GNA7if2A z|MUKkkBzw?jHpnc6V4%`vvBX^`S_Nehs|7;V2*Q)M|VAx93JOXwf-LvclQ*N_?!(&#d3eZw)c+ol%Uv~jJ^7NP&vcJT(%_1>v=qJZBkm*^f z1Jhd89M2=Qtt5XGqT5y3KKs(+Zwf>RfU-OIlyQx0p~>wpk-}R1;^Dk(=AexFc%B4v zDbL}phVtQuND1_i8G}cXT(Ib74zK?{aOMdE3!k(oUkazN-le8|e^EH`d7!^qw&>i{ z=sL4ZQ+ZC8HHR3*It~brg8FTs+RK^~(T?;O!58{shUIm09XVH4=t4J~f;uK^qxJ;5 zr)#bw%Rl%GC%WtLxE{m5sjVivdVM=dpyG^Vu|GfV1yDEKOi@$JWyT(>g&MINb;3I<+S5V-YNy9>V6w(0FY)1P<cOs{v$~Pl531S3rsHQ+Fuhy zsQ(x=fAQh`HS3R$Vplu&-ItMocc=1@K?h~x^KQmJi@7{VtwC=;x`2gMA5@UJ6Bn9l zbMIxPklTynz>rOF)fVyCPA_;HZ(0NHH1GP#>Mj!g{Z;w;NbuU1zCd+0F#6I#h%_im zqE_&OplutK@+jctc8;}}jUO%=fx!8(soQex8->gpj&H<>7iLgmlZ`h)P9)s$L-}~LBN?bhq?UU0nukn*-371P;s1JUj1ZhE zZxXGhl+{V`Y7?8QngU||I8{46+%oaUnH_OuJ&E9_`G+12W-!VLDkUVX&bKP}VZ4}U zChuI?b(MG@lMxyFS|Z#oEx+ylpARNz=UeSOhgzzoo*tK*4R8axN=F!}YG;88+>=J4 z_<|au27JT$SDPyEJe_C21g#^l@NIZ-G4H-@;BpdqDmqxwOHisN;6rg)5QLXrp8__n zLV7fj4iPQ+SISw&g*; zrUMp@Ol^67NOw#aKb>h%$rh}Br5q&e;!7>G@G zZhJ>BkoHmGy+rzu72ehhYZ8ytTHS^Cn0{yg`R@nek`L>*v(E{gbx`Jt1WuGN?UL|X zfvD&cMBRm&b<;qAL@-=l>nU-XQc{~BnYS<{Qvzbd{9e;Uss%IWl=?X*dfsuS*|3%breiP6uRwV` zPac_vPa2ha{H`di3P&l$4!}K|g`sVs+ZJRM)bs&&Nn2){0K;%QgSE0tzx5t__cA*q z3m#QJr{$L7-~bRV#!K%F*Q~CfS?_bef@vDRbiO*~ut8N_%Pv&WMFAw zajM6;!_Tix|5(#*^u(aoJRW%KcH%#M(VgR?KU+^MEITb^ujaL^l%yP73QYaA44Q+E z@MesUQ)-SPV=4bbqHlSJq=+@5tf@Lvg0Vopf!K%5@+Fn;YNK5#YDz9PEPrJ(3dJU zyQfmKRTMHuF~Oq=S+BPTuF~^{U}?oPBaFU-EHAmi%o5*38-437W0G`ECM`7cM|%9u z&Fie^(hrcMRC(ACI`(zW`{i~fxVlT7*?+ekh3UDKFX=zvhiQ{V;pA%9rwQI)Gqx%Z zC+!RlVUk?1cV`}1^%s(ATPtEnX~paH>aT$@aRJmY?*(g_mQS0DYCrZCTl-8}$k8^i z4obNFe5sWVs%M#jRB=T6lv1D&MRewZ{qS-7O!gNDF}!1FY2G9!Jg-gpkw$6k&@J2u zG2EuAn_~otD$4d@^9viuq7ugy)R!Uob-)u=fb{p|F<*(Lis`=Qh+6F`KWn}t2YeK0 z9~`2FbIk95H4!{VE5qN#%*v}@`dJ_&A#Gpd+im(pt}_RdpKdj?X32k*2g%6h@{hqe zt8x0u`L5d;WmM1phP$_yEok`D0I1Ga31lgcg7(JodF*QF8V{Hk%u4L9j4_QFooFqW zD#+d}Tw;O5OQV9Ajo8%{JUs*si5h+f$Tpszy0liKcAfBjZFvBj_Mjak=iRSY+~LWm zz_Igl;_-c$27er!(F;kQ$wY-AkBfv%>Rg&(kWHz8u2#k5ow($TNq zys4YxkqCa*%qYTf$_ER8OE`cAdHLv{BECOWHZ|@9q9n8A@Y#@FrL1Q66I)vDm z9<;W1Hb>raU3h(^AT?mAuXJx(6;D`=gD>wxsl;}7o0Z|jLl(zm{-C{B=-}U5D0fPh z;Fx9m0b=QyoPGsgm(h;*PGD{j?$fmX@Z*TO)z%b`uALGsO1INTLo*o8B)@I$oNM}r zw3Y_GOg-WW#I7#)=wDd)Kz5vpU{{n9k=9Z$$^?66hh&f$A7CtAH4185AH06=jw+7#}=>HFC$KFZbe#A#6s%fDp2YH$ch8d?^G@S0R zJyxP@fS!KRU@hL~^aU9zNqf3qLZdQ%LCi(2-*mTJ)LqnLjH?d5=V~!#glBbxKHOH; z=hl<^$1^#rFIW6*<;M@v!Fxr-5=JEW~k1l<#DgS}Agj;P|CcG&uBJC$yeQBWa(%L?W>jX1B zki>_c2q}zJJ$M!!I5x}4sfd=JC+O=R-E*{+L>q$DzL>A+VU&ME`CjXs^QHwaUbiZx zIx^?l5W&rX1MFLegO{={sh(S09%?#E1o>Z=%sW0t^^LG=Cu<%}BSS7M!X_DH(dO*+ zbAtBMsR|k)WJ~YXP^vdkk6X`Vi_=;yq&=)X(o8g1F03z`afG%7$w%D)waC|m!yfi6 z;x9KwqN>?-D>eM(D|CQ8d_(BbLU|(EipMp5hRjRz^}Qi30S+DaMk*Fje5S{O^=o(@ zbu5PsDBmuSug~@Yc*FwE$T;uRfDzs;dF*l!E;1#TCR&ID$JE?JNfZF>+oIysldz<0 zVFG{t485zt@%wqfa(w`xJ5pDJU+TH`w!Q`&tjDQ{CL%4Ef&QVP9~M4k{sxr5L?kr} z0PY^y)sJqlmYjk4Mc#?Qw8O_KE+6~*BT7*(ZsIRHyNKX}sd&b~sle1mL^s4c=@!pW z{tG?Px-f>82jQ9^TBa2GaFbcbhb00AYWi8h5q3rZiOA&sulN-ICzi1~T3)1&5G-yD z`m-&3(}>p;@Sc~NmrI@5swR?r6zrNjP;^7X-$jAPUj0Eln<4~ov$8U)S>0KcFQPj! zg@XEeI#BHHeRtHPxJ+}P;}8X?aSg@=$pSZ3h8}EHn_ZJ$RbC;PZxPm)3{K!)@~Cr1 zg}u3Fl@5`eS!`DYw_bO)``KzJwX}k+mAVmr`apKc?8Ck*vElD1&kkFnh8BVh07^Ca zEZI>}Wacurlh;>?(U1zJuL1S1bPSmFR0BD)w`jH*DzY|suU0OezCy3$`~-?E(O`)X zrFmD*P^i8z|Ji&4CECm^bti?*TfW#Hm|s#GolLptquJNkXXd~Hh2EF0%oHJp`&Qy- zC`DFfi@sYG^j0ls{-gGV&4cMe9rGTh zT~OrRT<>+9j95#A)U3f1(bIZce8s$4V_aBx*u&w3$3Aau9_X|>20LU1? z3W{biPNns0$5v`RXLi2qBiMWT1oDvUaZSi>#zp@+Y67>KktuzAA>+6?uHGm{bKxci zmgV^olu_elWdiFp6~EX8V%X3mFodpvqL@U~2176jX0G z$@fv8s*AdY;>}!ioVs3*f^f(hRmNZRqCp{@QmcU#p{@0uZ-ZOXH)jSD7s`fCH1YyQ zKbn@9)4B=^ZwvU<}^3BN2%&<+@FsE^|eHs><6)*hFTi(h;c z=6z}KeiwfB07A&nNQBQ>2l=CpnRp^Xaa)<|Y0=YzDuJo1K@T;g;C;0sBAn~1&JlAe zZI$2@;EnX62glq*{RXE3PFkMvLUuZD_{NT-%l5y~2$;(lJwRrS=_3K`@4YRiPAUlW*JCNZ_N$<}sXI8}5nz>TUUBDIj2;2ts$-7nQQ48&j_ zK%|@)2Y*Yt>{N4mwy4PG2!Aybe*#W& zMWvI5^Rra0W=hTWO;WPZ@dzTR z`_2D!$fSI!$XqGkuZ`e4r3#*AP={>Q(VjRv@~K0HPFW450qAfRpwvAJXBX!lPo1KP zs)xe0yd~YB&lfAxa2sNmwr}j0b?8h<(uT^el(!ZOg1IquZNl?@l5rfb%G5`#`v|8X zAtSTpjcjPkRTyfd$3ce&Of)^Me}d%l%x{trEB0GSN8Ie=Z_A_r@O4(4_u!*?zYn(| zs%a(9=z@+)o16%R5Zp+2W?F-KP0jFC%fB%E{h#PJFXQpqhN79PoW&5^t)2vYoJ}hS7FoHZcGnSjY~z(_JNzk zyZPy(a3InO`gnu;sQCUK{!NG@4bk|p`x+GwBJhiYPMQ&rqz^pnuh7)!26Y(wrnOId zABlHR-Pku6H=?&mLQ#*S$U`CdCk%E-SZh_LjW)c^rNgJm4a|&dzj#Nn4EaM35Q62V z9dE=xf&Cqid(&tlI61lG*i;1;2`vXlDt)*B2yvMt2hIqzYGFeI71Z~sHCf5}S)!Q1 z$4=*%1$#Sd28sW>XLGf~4#&JhddJ9_XwB${E@yL`)aR0;xUSiAgD;Cjwxy`cnZR5u zM#Eo|T*6hlon3C%PsgQ;`SYmDtx(Ul4KA&ykeU%~j98%HqcuLDNTR%g^*V@ORI0SE zxUM#JEXrau?TWL6V>b7y(EcM`~_G?-c|k!whf?k+KuaUCqzwbYkWfSgc# z*xuDuH(f)uohd4)4WOPWv6(&dqA$!Og(&l2-g40~=TNgBa1Vs%L7R4oi&+#5;%=BD zEkkp*=gDn$>+v29PmuQ_lr_TgD_{5m8g`qZ&T}HE=+4TjQ?jH6$_M|i`SM*Uxr6JS z(k5M90(q^CI~Jt#W}xqN$Ylo4CL(72H-@rope1_I)#t12eKglQSr2_A0m8pO^F6%? zWPq$bG%PXJ|047nCs>ee3-(0jx=3gHP2%sFoR9OAnVPPmQRwGlSA?O`P`bQ*CeZ*+ z+s}YX3zUr)b_V$-&VM#>vNUVHT+j&qks8NUa8;kxfc^FUwQTMeLe<*NWJ`ju&=fR1oAqGP z9HQg4Ztc>Z@E)k^0|iCge(!mz=Ks+hROIrw=snO)S=v-p?%!>7J$lvE@c_FsS@x_x z-K78ZvRy}>S}>rgN}zS^;mJ8mBWyl3necXIMEb4bS#%d2Se1~UJHgV1Xn^elTDxAo zwN>*r6njFoY1RZB72DH+y$v9r!w5?n!HT^)dp2IiB)-e>d(AhPk~?Tp0g+(uKa93U zdX|=2`G_y|jHWce)(SkO!E-Z6Jg?Tyf$j3@Hu1Z4$m>HZOk4EZXZ|i{xB9)qAF7I) zoJ-G449IU%{OoB-bi~xjXf29`vAi4 zr`<19wx)cO-~DH3t_{}P%9Ld5Zv71`_ih$v{j&Vwh(gIa`x-!gM#n))0tGBE5@@P9^j9jTHKG#Wyv zPmp2TZb5Y-l@03e40Xl*uJQoo<4GbC-!^pH552YMi^#FJElel8g11pvN_s5ErszbO zbhxQQD81zCS>4dRc8-37k9bv6&0mDO39$k8$2(L-nUD>hL%QONZaoqC(m3Y2)o}>7 z?sn+!x6Z~tv}$>Ji4uQ}SY@Ld3j#W~n^dh3k`93jIT80CF-)Z}=2ixrY^7=F!1 z9U-2x_R87eZ6ZXF?#y%gBGex%y~7pJPhu-_l-5CYnS_Y_-~$0$z`xKPxa`@uw8XL( zWalmdCq=3I-*2ryUXgriy9v_!ZC1vrdJ81FJjpQ&CGN?yF)#{dg>zB&nEf$ACfen( zy4QIr54wH_wNB6o`g>lk5bHvfr@mNWse@EWjHfv9dEC9(Yqo_+7%Hru3R!7^@#OeJ z2d5P5Yi>X6NR|Ug^!vrfDgtPIfa~uyO1BdI?1NFeh-_EsVMn8`X+dn1aeQ>&PipEo~cHWm?sr3MxkPW2IJqGVz1#o}*Ra&lp>fBM5^^IJ# zdF@zE{e2SuCQ^+cq2hC_X~zz3FQFTsXHo(2%syyy;kY{eIC^IJE8x8|F_UWeVb&ns z7 zcTI_mzUlv>hp!4;KgXP@gGiD#y{no5E^#Z78lWHXr?N1JL;GYHKR)?IF6v&oc?iA=2tFiy}BE_QPUd+GY57f zH>et$ywBpUQ^KD>&Dy>@K`tdwGN3)2->Rlwb?-8-81DL0(Qbmz|EwzvtY8uONG*ao6%r;VU6rbz9{yOqEX;+(`rP#dh zsq9!VZwY7_yuqKHa|3ayu03{5S0AB@ZE{g>(n#OC+^DV#$JeR>K7^!Ct>kb>TQ zc6rg%99`GzjoXgS`}KJ>?l2;ME0y_p_3o(!zIA!!RbZBt!nbd_0K1j*U{SGG1gd<; zexJLJ1o66Sq?xVjY*%uDr^Rd=FC@KRp{Vdiue1(pdSWi=9(-Bzj^{09LJve0Y<~rw zAa8xnXId^J>W;0$71TvoT@7lQUAhP#?#_8H6f`Zn@7L71XWAeiPrv-_DSALjp?85x zaDR3`l+&qnTMs0ztyHOkXgqON7IF?YRGFXxv$3?oN2L1ppE@JG;t|=bNs&GdM))mV z-}ZVplW$#_e^W%>=6s1w!VQi;Oza@{COQ)hAj6nt)y?MHlB+&teIx3Lbn7M4bw4cc zJoa;&7!ld{@|SAG1I^A0B_7K6pbCJEmEpBqKu+?&&k~~JG+D9v&SFmlSQNO81;iFy zGo=st_X1sdc9||9-B$cDQMP47SS4=kH;Jx~X@XF+Z=(q`A`?9`bj4R}D4c8X_NH{E zi&Xf-h%#fdUUBm!KV6tP;v4a1MQfeuC4+5Gd2(H; zTIXboR@0GW;~c|0e={?)?u?(#H^k@MVfh1!7j-kNci*k{=6I`y~$-DjEZ_eLx1q)KCW)E|f}rj9&Y5h8E?s}Bog7!t@Zw@;R{z-Yc6 zU0={YUIannPXxeJ$1r7%BdRW40}*Tc5EblcY7=*KDgJjyqn}MXcKxn0dU-xB=g@uB zN3=;31NH$R!9G~*Nq9lbk~3NVT9s)mF?8M4wP4{YC6lg%8@WH*opZ@q9`-yK!y?tL zVl{obhp0gE5RvTxF^l1vD?RIbKWYBz)cYzpsmK&TJm`P;8&LneZBaQRGN6pz;A4wx zQ%%8bABsa>H85)Ks8eJ|ciq~TVIX{~c;`w5{SfWYq4(->_XJ%DXp2j$uCNQed*h>& z7EhH5`OOW0KJJLzcP3=B;Po!O-RtV6I@|W$Y0#GER7^=l$^9j^`tL&Wli;KVb2{dW zQt-w(Ra6uHZHw-hR4rV^vPB^^gt$TRJI|moNGJWbQ(%&b*g`hrlh}Iy)DS&RW z=6*ywBCA!dJy91lP&vDEv(y`OYp34s@wi*Nw(ZN$(+06Cf-GNgdzoAy8r`=`lR7suZr^RY>uxKeL*saOf<~K(AD*NKJmOx#!rESIbPkPuKUb6 zw<7moLHHccZX;K^K)hGZL*X`+XZ)ygYL;oyTvyH|NZR%9*)0nZrz&H=(o`P_s|;$* zgyOa#xIT-3!9U;qiX@W7AH*O0?>6_ZmS!CInBO@{-)w*_rcWQ`R`x74DhtAi0Vy>f zhsV9)yx{j56B~1nD3&F|6a;CDQPJSt2oal=6?wG-NzPgzqd>&%+qbVu3bQJ%V{_5hC+tI}`aoqRpFp&SK&0=;|*42dDw_CfL z;X`J$A(ax_lh4w-xvQ6>=p*s82r%o^10(rMuVa$%OP1?b2;x72r0Dw6nj!HNy^BWL)&|lr6khijM>+ z6iT9vWw(w*kv3gztXe%bV+Mz##V>Y59Zh^J6!PS`?oOpwPwG4BudN>x6*Y8y6>T8? zIHwl!@X(&B+VI<{D!u!(7_dE?i3W~meD-YH2Q(773wW`KqiWlKu^8^%1I{_bm>!fH@^$B1X` z{Tq)gf5D|Yn67O;IkMX%^P}6fMa4&2-b*npnU|6aH?L(F`HBfmAHL#i|HqQlJn_`& zDu6Q)#aFxhSjBgPI4{x2&YF<^7v=jGx6Cqep5mcyLph!k%yzm`@7^rx*XYCsqdz|K zvCa^IZqB*U7*;`=HCv^f>0Y4rtEDo@DDd`dHz{0lySb=%m%4sW?c+HLAdpS2+592G zBjijI@*RXmPCC)qyQRzj!6+|;U0F6aJ7nNHhqIN)8Wk@xKI%LrvIuKD2y_ zox=jVOTf^D^xj`s3x?V4RsyE9CX&mu-su8saw>a$@#vkjzN}kEUUY8ie&*W6nqd|j z$3^XHuF$TO(8Uei>mje$y??MYOFgMB#Y-O!Q`k9adW^U?Yn1wSb|tV_??%@w_1MO@ z$`2?4d(jZ-`m@a#GgGF!Ik7nXe)u>!k~Gy9wK>=q-Wf#dcXcCBd&_$d9GcK+NZ z4~pDvbF8;Cx%_iwW^ONe;5ZPM1}~g;9_u{%$FCb5-h1R0!FVu+t zq`d1d5ar6&m!}+0{UW0N>Bne??Rpz5`%y5fZ{s&U{SW!I%cimi#lLX>|Lbb`^M*Wg z0T^u~;D4o39k`eYC;zI%whJ9gc=P_Be&(;LcmZJeOvxwtmzEJfoqs0(Lx_;E`D&{L0TkibQknr!z zcfvDZn^ZJ+Wf_UX;*JcB!~VtQ^j^3dqP$d@%vv#&;M_={vAyJ4yJz>hJUZN{<~uF--Y3Sw=#ckSN@-O z8Q))zY;p*il4AjbPe15H_%A2+L`Tu@Io;cBcX3{%pwQ;DE!RzYkb}XwK01Y!5xZ%y z`G2yzfBX(g1)pB02Sd$tgJ~GyhfTJ%A);FR%p3>VLcrUKl&a-wyVZvZ;Z=!Rg-d=iJt|wy7c%;!)oSV1e!J?cjIOWdRso<-TC`78q6Y%Ur*kEsaR#w);y1(956mWGp z;P;^;p@llPlC$qAQl)S<3Nu10M3DMK4T>yT5BvL$G5j$-5E+`5d(*SR#e4G&QE;92L8wYxPbjwE~w0LmG<0>Xq1+?+WcmRkn42 zPuQ~-I9-=K`CDfDt15~|P%&ISrq%#>v~qhtD>`71PUCXGm%PRgG7H&&mD%a7J67Yh z0(e!vGo22hYp!oX_*1Hv*krFuuFJPEL*Kw4QqPeU9s@q z&N=i1!`AI5`o(!$^vx*TJ7AvgjZp5a=xlGnvtrCbviB!YunW(yJJNr}A2tGC$1RV> z;NDtSOdFVb#ro3vE&K-W9fPK{(R;}O^*rA>c4X5f0{|azY~z&*rTUvvs+L*fgW7oAFPPEC zybMrN#tC+)SqCv@ByGtvHw4#&aAWdi84qVxYPAAl>lp)Qo%)uaA`i$I#qO7;c{P51 zC*k*f?(uk81eVZ|hvEzgqG~p|vAt6;Ja?rY6pm*)62jqip7t2j6EBvQ;m&@1Lvj9* zdddW}5*M;Qfj@#7bZUZ4ec~~GRD){971W7y-LH2Kk^^d6^NS5_3k5QgWA5Dn=uUDUfplC@RQ?8J3XR!+ zpLziB8J|ecx7a1}_q*a>pFi4Hb^#exl40DSExUoB-22az^56P+rVQ88gbbaXbKQJh z+`=bM0-dm7e%(*n5~(^I0=ZONJ+are<-2&XEHL9PE)l{>yqTu*g^K7n(qyLMJc~2f? zeYm(UxF?E`ShXQN=v#r%c>1raiVYPs?a6trYb!fc<7O&n6-kUC*hleYh(63V z5z1#TyC1X=yhro}Y#-kP%AG)a{fSkIiMq$fqDY0N)Z%o$*nmNr?c7YNfBVS#&jU%C zPRT>PPfLN(<@wiWIzvogYvN`VA7Wwin&=Vl1FnFAB|3p!!c}66B7V1N#D@$YXnFQ) zrNMvRR=X`X;Cul=Q|&_=xa{Gsk9K$cEP=OdCI77k!n-liSpeVuz2@q`84SQZPD}+x1N0JVI5AOrHIK&biyou5rNb8v zF>gFh!7a=Qey}J?2<`LEL)G!41H5ux%^BZPbtAA z{@9cHILbV6*l&G8p6ezkX9uOzr7D!nIeYG8BU1xgFP?4ebgA#w=l*EY8l!ys<&)jt z7T9WIfnUg)URK^%V3B{ow8>eU2Pjx1Vpd2(XnDehUZtHSup|3k>nX#8W-R;lV{xzU zeyQNz#wrG95wPEPG^zi2^@A!Tz||X``!g@bILO)hI>Ij30xFj@sTGVL5sj1=ElC*pj z)=P-hrlqrLo&A80(bx$UxjnF!dBlCxs`%EG@I5p81>=cVS9HEqiIw-|B=2h3TT_+- zM@jqp)3gTA1;>e-c{RjX5cLL4pX|Y-dWJ44iW8n=3`hRwV6gUH!Grn>auI6Y?A@#j+pMCb;=X>90 zZ-0JcoPRJF~ZEgH6hFSJp1$3zh$)i!(2V}lW%yrS6$X)LV&VS=RUw(lTDxKW`>wo?(pty z2K1YBlH_~h$(O=0K)jAv9=E^lH~+EOYt48Qvw6U=w=hY<^M#s#?&@Ye>~Y@`>UyK* zHMYqDHB+ANcAxI;*eezyFb2^rXQ`@5vO5b#&TM~7KET85EzHsmVt41bx1vf&--gzS zw40h(B{r;h%pS$>9tn&z6+Hys6q3>N77IWi;Kb6JWfz6gmK`hAWmqt%N7I*sqAmn* zg*frEWtLX-zcNBzzeKU&EX?~+w5QgEp%~Bq2t4_Pr8G;tFbBfpn7b=NK#4|GXd6#; zinx;^=Q+3cVbSAsB%Bon<`k2?_hHH?-a-p~`)GKmrbT^uEzg%Np6@Q{8t`I4=v0Uy_w*deDjE=xYZNrBkc_=I zDZt+n0LU?MYoi?7hRT(6yv3-U#}2^hj6T;-8`fg@+YZF2|2Xn}uDY~;N^ePm>ivQA z4mc+xz_g9q& z(p-TiV113B)qeIa8_MZ$QQY}C@b>t!?mX&Qvl_Bw$fEc93iKA1!N9maaL;;>5}0XP zS1gM18w_2~67QWzUBg8I^J2fv2W41><^5WM9!i2&hmwi*jm_n+Nq_{)>Suu}1LMP$ zbveDcx)yNS3h(Ks_}gY+0-#*PvYU_pQ}9O6-{a`N4okNG8t%2TDkh*!A^Gu*vjE9r z9VjeYOsR)zmwdt|xog=xqjDq+UeP-f4uGJA69`| zc^{Eiwb)g-r0$IGReE)SY(Tqrt$*L@@<;mxKUxohsN`MBl(kQipgh$I7_?^WjUde% zi5!6En^)cU->m*_l=Cp<<)NB!Q!l0I5xf0XP*X3Yoq4QHC>^-yJF>Rsosx_E%d(4B zfds%J2{tavU_{4Oe5&0Xku8^;4>_o`R0&g?gSSunWp@DTH*QrdH-!M}5Ku#``xB_y_MH#D<$8RY ztFwe6Q50@vD?LB0`&Q_DHD%}_pKPv_Myy0^1(9*r(r+tDrLXPEzOew|#=qVJce<EfR5XHDvx)V>_!Hhz)U^hICree~hH?}HMiHMjQ?K_WhakTd?_ ziSYRqy*|wo1!)hQn%7vsEY{lm#YZ>|e#Y`7&bIqi*q{&eNt+QbQn)YMC<2(^HmzV} zR?m3KdTyP{=$Xzw?D~XC@cAU`+}|9K!D_Nok#b7Wc)Aaapv@VdzUKc@`Q#!ubc7*P z9S0vgOR3`&DFLdVKFGb!jP=<|WlsHtSTZ+wHixWeK6J}x&~aba@&dnmos?7nUM+C@ z_P)LF_1UX`e>nf1j46mehyu*^PIJ*NAd;Fr!mD&B_kPv(AszjLZ-!rf<=oa5o_IdY zr12H9z(Kfrsr!abE%3GOsS{v`r0W8CUenozMc26=TlOye9H8)tAUBsIH+@~HcGAW6 zhCCSWwl%3*Nt_baQAS&$2L#|lMLkv3pjeB*l!Xckc%(pXENfgxz_mvLgFN`yX>fez zM!dU9D6l;D!|K#lbMf>t=e|F+$Criz$EW3|=17)M>`VZZWI0l=qg=Qx!07GWcU!}R zr*hG;p>U%-CjtE-p5WP}yh>V7@M~pRCAtPK>yw{M#xGOb1AgPaQht&8vvzuPkTi4j zDD%*|1_0+|o;2AGYux@2Xwlsj$G5rG_=#_M`03j>-4icMR`={Cyr26s5#&l^?m1`? z0qb)OQ(AbmMZE_h?e}pSSPxZfYH0`xcCFQ3Gz$*J%t;m0u|ts`U>ozY@FIu8)n5J3 zBH>87aFEkQ4b8EMZiP_{ZEty5mFLuBN*vqf#1blQCD8r|l^(=}o{dfqVZ zpl{(FDa62lp5E2`K!!Tkm+3q9ZB3}a%Y26=vd7Q~rWRCUiF1JOjUv1hmW|SsJvV&u z$B5NDe%NTJ)WJzzHIKyGG)jJ#2i&(qp@;dzW!$}os6KfKgLPZXHuvdHk$nRZmMjom z1u!nA3hj5|9!T;HPxmS>K&I~kfIP7#-u}5OM_=qfnQo%|^S%7`99d&vj^vYDaOf8| zr0HekTcd8rKGIK%v+3o7>ht}1wrFl{B+7XiZ-N+b?4e%mNspDX8a>fuDf>LzTAS;G z(-Hl~C=e{5*mgYiJ21p2(8RsGz+Js0*QCV1L90LR=q-EJCK}mqIqcS(SvPz4b4yIR z-F+IEohY2%78N7#Q*OF7TBH14Q9iL3%~wjeH0)~D^wGssI(@S1k6CvtE+$&sxaIJ$ zeE9unv5Zn=PSl)pLs^^ne@-Zwicx(!GwP{v-ZvB|BPUCx-U@j!Uk}{+BY)f!JMbSv zc-tYArvXqyUV@ivR4V-=w7| z0JIc|CeXJt08Qe5ztaB;viaXt`cF>F|E|*iuG0VdrGFo`{r|^%+h|~R$hnQ2@`L{- zZ8A~ylD$#oNonjxOjJM{bC6^gyd67ydY^E)S3RHb$7{u1NS&kq$#MCw6eN7t&Xhx@ zPtO?TPmg|RJd5AF2VgM5ah2A$T&Ehi#@;H+y9}JoF)D~Ib8bhF``bWkD6ERA+V4E# zx$2ueGVVqhnvR!qIMEW~Ct72p_0sQu-#pfu_T9*COG|QUv^emCnfV&J_lF{G;Scz~Lw8kvMb}AMlyl*TjHS(qVrE9Ar@M(t zo&?4Z$nloizJ(n96+rXSdv#TKr7YagUU>a{^Q@O!1*rmpH&j)BZ_%&q&o!$UJ-l2I z_+2buH;5~6BRe|Sb{#hXuv~c@GZL|I(MM+t;z2yYF~wrtPF^Fo+rx;&sW<6&OIe?jYj!?g9TD zzg?A`=uz51xwk6fMwi^(nO`sKzy9~9{O`#bu+C{aLCeHe!yMOvl9>I77;)o%LB~tz zm^uARts`a`VYOe%A%HcP#8sXKGUvYS7o>yS>mMuDDyHC6A;0x7mvcKMnOQw`_$)(Z zKz;F2PE3owSoVmSNLbaEif!DoF|P9Hucsux&|$f^be|}GJT0bJ8$cM6-D!XSx3=zU z%=xRn?DJiYL~+C1vp}X?JdS%{BoRr{H>j%66!g014s>HAzmKk|JAhvNhLs;!80ypzYf= zH&@r~HGL-H_8eH#njIrDlx+YGKtzTeM{^AGt{93LA4)v;o9~$YwRl?GRa+AsfKPmw zrI)^t_1e=pNV&;nX7oUSiy3(ncuQr}`(SXdx}5tT@as+clR;U%PJ3?9Upy5|Qvf!yi%51g8y@K# z-rW)Yn}%cfsPRq6^?;5*{`Fk5YEV5+qD=VUvk~CcJpQ|Mka_?R->zsDG<)R_Js%eF zdmRO48PD6&T+PsXfsZaZcrdUR_-OCHe>9hGnyqknmT=&_426ygrDbWzZSwea|DJJN zI<_Mg;w21^5i|UdH5z=oj^=yopGzNqdkAn77c2gj1CF``e6BFfBo!l@9tpH)Jo<0< zUdb=fQ?J7Xn{RlBim+(-oi2prZ+})Q#S1ZeBM;i*4|sLEf#W)APt1HS-*nw>y?d@{ zPD_z1hVBCF+57zO7_934lOBFDFHsjOBY6_}07Y`mzvtVH1cx|n6&~itg#*9Ud2vzu z>vY$j@R-VcE&p4`}f*Q z&b|k%>L^olr{PXx9$=}Q{^@0RgN5gQYopJG-~RQcA}_>V0`$fX83bQBP&V!c ztZ0Py@7d$s^JA+HyTOsF(O>(3Wkd4)-4S}o1N(9p^jn+o=Y^xcw)I}}{V^xNk85-N zJ;tcy4;iz+@SEKMp8)FvzQlzG7d>PFCl?oN3?r6G?33h9N|-*r!Y;H`q?Eog z$TGi-^M|*Dsv4l4*oT#VP@UiDHPaD0h-fe4Unm;x0E!q@z494$_jdw!TsSZ(wXffG zAUUZw-%vPxG1I_vH|@+N1z1bm{|gxT6`cqDZ6SknN3K^~1Prj^pA7K6>GE2t9MWYu zr{yN2<=rqHf{A(^y?x#6%TTt%Rr!?w-P{2Sm^;Y+nyrIVxpuKbHg&73{oNS99*KFH z*Z3nV_@Y*o^_YO?l=a)@yjS!Awbp3zt*|s)h#DZY2X0mCD}wPmLc6N`TJ>1A14wF1 znl!4ckLYjvhK8#Erbn333El}7@;<#s27M_t^_>RPo3-n^Q|T9KV=_ZjXZB<$$>=Zf z1z=riq8`^XFyW0rjN7CpEoU7HxI7X!67d$=dC@Q&E$Q%}}kLdo?CV(Ygd` zQ2BbyUWOv!`!S+QWY!bj({o3yGR_p(BYj4)!0rQL;dx{BH1LuCAhsu?W)ZeI?wSVv+fdOc$0*Ypwf`s#=pKsUSRi_3xm$&D{gj#cur`p&O+w9KIqwUyF>avv65$ITY5Ko6`2 z7@&@SQ7*rEHgf0S0k|C*-a1vqYiPC`EG^Cg=AX2M4^ut?$aQO*j!wJ729lDkgPG%KLgau$uM7K?UJ}cL!pDaY~vs)|2WAtFN{2f45Uj zbA@(UUDzi(eFMnZ$(@6L!tK`Jtv6JU%G?FEteIOIVg6%8QDL$k=VDo5HbMG&#}(89 zc)N3=D38+9eI;6jJmN-~XAQFr4(F#G`K$P@Ip!+|PBTdg80S2xu;6dY5*0E_CMY?V zb4E+O<}Ww3x>d_aKH%Ro*t~tup1keeHoA@6a{yl5-|F>C>Oi6FUaUv@-jkeoX`R+s zDSOCZu6dST1LnQaPZajSjMT$R)$#jWfarA9(5%WSnn{~1(8IFJ^>XY&0?7wWlhbap zmnWt4Qf~P<0vXhL6$tA+KY_R|Zj|NJ$F&L;5 z$s%q{zHDoSWVHw_o!sO~cNr|+?=&Md>hLnqJ`vv$nz{A0@@R2ehvG~@)UEeqQGxxQ z!1L}!w|3!+`?s%%_Oe1tKFYvdu_!5&hm<5O>zo=Fzq&*g9TKGR;XEyi#32`86Y9i- zL-=IFi)~wPBV#Noddj_l5FT;@wDfvNuJBo z?0Oy8kM@KLRX&*kZKwWBLE;mD*rM{@@cv4OC7y2n*(T#lfl*nujLSo*Rm&^yTy-~7 zRNCqoY|$)>oD7)Um*#L!*;;M{Kw{-T;`}IyG^bK=9SXZ6;Bhl4WIh8W6wE7ChF1-l z?@RSLYKF#dQ2+%|iUw<>3s>P7rgS-NDanDX#Qb`Sar)$N@^kkCp5Q8rcBH)3Nl9}T z)E>G(gVlNsf&nYf96UU;l`5=~QT>y?sIyj8J?AG#ga?}Wvw(3jIKeRZQeS>Fv--z; zpYzYP1u2tSr!pD0np)N&8hw~D-7Ow4Fe5uvV9jy4GB3?2s49Lmex5tXOz^KM-yk9XTWzG^kAF;qd3zqOl*I1Ab zPObX1m%aPTF`3Q#U+}vNT645Cxo522E=c7`f<*Hs{XOWKew0@_3vy*Yu{%b4sqv-d zQku5EqVZGTCA_x57P!VbckYYq{XiMNlHb+&0DN-}kyFDV>S%s~iB<(nB?nrf5L+SM zi?Uy1xM%TNhr#Q~flzNo(Iooan={PmZ#egmST$B7{WymOjim5PZl|IFKa?*~;LAnGr9s1*BC4!s>%FE*tF;<7 zyUB$SwneGo=q9bwuC@a5;2LhTu(G_g7yU&%8O=So+E4cUz?@khSIMYI?Hghzy_a91 zjD0b;LvO5)MTO}$iux~YM_UJRiqf>O%^pGctsgq@X2GU3{iO)na_4q5rZ41wONBn= z#V@nQU1=7(={c%^rr+6w1tR8UuMMt~0JizjoSbo7iafPQKe6dRt^76_U~V#ND?jZS zUeeCLt3(^zGQ3KuAjXkN6Nd>dpIv6uCfSzOqgyZ`ghk~Crvm({G!8reVS%iz=q4r5 zUKaSxdLQXB{!wN>Vrs>HHHs{=#`1^8Ao(aq#3^8h%D~mZSU*idmE+gc>7azVsN>(j zV&=RhsxG$>;K3doos{{a-^uDHxTcm>7&q+9r#6K=AL zNcorhH;LX~=vZb`092U&-?vUc*{Z%DDPZldoIJnLbBSU{#5Mh+x>Si z-w{Suo$H4j5%&&Oiy`ezLHNAkf>CT5rOImEYe>TXN_zrQg0}($M)``Nc46a5vE^Gc zib9nli#aNj8;XUTkN6XSN16-d^x~?8<-%SfXBn1Bucmna3Rz43bB}sx+!F}dyIK>V z<-b*d1_*py0=T5r8O7}R?X<2$8|kG8ba>Bx`lmp%9ANamxgb+mMeYc3Eff>fdgUZg zFlQh9_Rws!)T7iFnM*LYP~w%%>NnS$Mist=emD+LADCB7hhs>?s=?UNT0E35xpL`^ z+2fn%DzvR+X{kFTz$0V(L?_XVP5j*5c)pp{T|DsNw53yzsEe_Vh1vRec#*5jk;=gD z9|BYM5Y!uQIAx?oS@_uE0>^J)3F4B@7hO#g?Sq*%Bksi7DFm2#d>DldsS5+EE5}53 z3>-hw)d{iZ`Sj6BM49%E%gS*}T3TfKblLLeQ5)C0;&6Tr^_ji}iAu8Nm5+TInX#2H z<;Q8@;hs8pU5Ps3e0@1a(FTxMQEdB|e0n~?x!PQklyF4b;VCI2CAP!ZJQ4kY!Ux*N z1#uwGA4k8B0SLFBY{G_)7k4k;!PUK*VsV4D43u>^rP419cwIt?s_l}FUN4UK0#0Cp z+CHGG_JmkV{YZojVZoekPRsLi!on5Ko}IvKwsN-Lyn||~ZMCFd>ja{zNl8j(AUznd zP{F+vOEF2aegN}HuMXVW@b&aVTSg=(GiWTmI&+1p`62RJP{gEsO~BNnL3biz>T>9O zq|ymtc-N!F=4z1(xheH4bCHx{a!;8=wu<**h@j6qf&IFW1+U<}8J2-g_phukwf6Wy zdybJyZm2w8s7ONzH@+DSx0e|*C@5$M)TT2oZ6D^FC^Vm>W+_crtr|pM=p~t?+K1+# z3xc=+nMLrN&}kZOJi&e8+$(o}BuSybx&%fYZDiNCG~5YhHU3<)EI>YZ)NR4dcZLb{ z0UfjCi7l_a3-Cs*ljJrYPH=8C798=~L}zklei4b_{==@feT&FAx! zR_C+}$ zN1%)TOOAl(xw8ihz1^IXUt4d~k4UEUOo}N?eMPkDN$2H;e$EtLn@fF>c+E~mS3nsj z0Y9bQ=KWespzA(_)ua1)@5uG*I;U=v9Ue@6PGj3bcJc?3XCEfpgl{#?P1Yb1HyaPE z>;NN>E`MmO}Au)-D)yM@fN zbm4}%<9SwyHE^{y!?=L8??+g;o}H;D*Q31a=f;4X&$*(@BTewFBnVmaCiTF)&4)5o#2dx+Fz75rKimC-}E{l6D@mWI02WhOW7tv%4G-|T_o!yC$L3xqnbm64F zJTndNC)O3TrlnSyDX)g7kW+1a#T+oTm51&F{Zu;-TLrvGo0OeD-E8TUO%5tx=CbGN zd&n@=_NT#yzc$99&|=pTNNcS{^o%P6VriZ6aTE2F1Dg8dwA-y9_OU)uc*dPSeq82x)q;ms$! z>pkVrF0vZrK|YpHzPjN{Y-<;Juyo6CMOXpt53>c@1e#(yN_5HOKSP9nu0ePOf=nS@ z{YM6@yk=VR8kbJ;z6%JX*A^YUDtmk&hd4F>;;NRFPCGv@o5vA$czJqd&=tK1*g;$e zYYO}pO2jx+YC@g@4Bbz?(k!EtTM7z{mu(Bae{i+2$zv=nhsp@#l0_IHWR~wh%?~SX z`o2@3oh#aQbg00f*{gVr16XNo;KI|MI`SHw8Dl&ErMoP-sfQD6NdmSl-L^B9i0^yd zn?B>B3GMb#X6tKakih;soaS^3vfT_uMK$Kiwc(oOY*UAHj<5Qsf=><*+m#-ecBzle8Zid;FO1(L} z;M%Yj(1O_@yL|QhHv80Wce|ZusGlSUNK@^OuT9&n4>6|O3hGL#*C3hhy@fAkpMGIz zYLGB}T@*#{C0A-5QQzN}a-lko0->8+^=q{$I^xcx>T*`)lwx><9qXJ(&BRGo6Q`|UNaisr zbOcbrs-Rg1(VnBQ+GOI>wvE&@nW{Tzxcg8I?8?XG)t-cm$9)@ohn3ctTEY(V>lhF# z8#~+E#rLh#v95^-Gjoa7)}PPV875?{FP>$adQ<}n12sJy0D=~YGF-yvWG&`(ppT*b zZc4%}$(5$#L{vqBZUMl97IJRj7I%1-k09n3LHRs7v!bAJGMk}F^?b@nUQ(D5OM4LA z*#TpQ?*!~R9<%70OxlX$o=W(Er!Z*b$DMg;k9(@Cv)))#)3ae;^tqjHsPS^K=HxIA zH*W2!Wlv-z;;(ZRJG~o_cq@d+hvisb;0mK@++?LST!a(9?YqW;p&6Fw8UhCupi$*% zyXHa%8YjLt*kXg4P96Vp-GY7YE`7j@wlyn}bwtjFG+>ZA5k23Alt80^?O#?%nYlgcV z;eUu)n7P(mqIddsc9L#;g*k`I>IW;j*UG>BeAJJH+fR7=CLqBoFq(h_x(Jtq1o)v+ zePnUK$q^7BoH_^8>G37>D@h9Adc>^7`xfFLh$tI@GQC${F%GmqrgFs~__oLJd|ANT z{U^xp@R;Lrw7DUz>FvlKW8pBRC4oO;#quA%pBL} zU-4R%4D)tmUG%IE{~k6nXgm)?bC+XM=ZUy#ae&T+cfG3!4t+RS%T|WC}a4 z{a?-%_Pd%a~n z;vJrLq>N>_O;*$KLGBqTp>IXP-f#G&!sarOd>*}>?FKv-g28r#j-c-o1tpnd_NgpK zzbJ%3wQ=6>~xX^%JXa{;ma(O44M?%`ua% zEJx<%WpU$5g`=HnFY{1J&nZ@D8K5db?=7LG?#LLESXzKYm)zWth!dnudhl(X8uML( zZd3R?hnb>J90n=r*6M+5h4%{Vaxo#f$$#6~!u^ro8j@csUL&OcGcz3Ilq>oD1hZH7 zx=?(e@j^+rVoh@eA)ns?a)oQpAB2su09r^bGkd?yXwBu1exybDLES89t0AB*(V0rI0h%AoJ1pqB0es`ZWai0q)U=v)1l|shNx?zsJ_;{ z&twyo{AsF+d>(m<<2e9Kr&&B{bt5m;#j zg@6M-y-hT!tD&EJj}VjsIM^=Fbv*6Ig{NT;Dqi+RVAQP;a zVIh~ozP`2}B_n0+e@W^N|DamDY%dA0Py)B{etWT6v*K+Hy>$vOv1cPa zNuRZZJ?D>)q#Qk8b^^KS_ahQFyjRF?*-g=~f<5up`j)i}0&`DbS{D-V(P~5{Wu@l| z!+H;R3_T3V^ICKsyZ`DKrcyU%y;f-AozRLNJ`9V%Y*EfB#uI+N{oxp4;eFpypsZVh z?*XQ8(8PysittvesJq%UyKhf?0?fP4v**wBd{-E9eN5PHszXaSj!q)syQq8vtce`= z>0l{rw0nE=JC3xzP4(qSD4n1&WPW7$f(gRuyEtZjTQ1#-v($9wf*q?#ajm`{oFvbMuD8=UG0lD4Fo!YHo6)W`i>(wJq#eUkIvwWF z<@g!P-V9K@Q~3ry^2upgT7YwV{20Mr3^o~>ceX&heSHH|%i^^=ZpnpaueP}tWsS;q zF6y+9g@Ttf8XHMSSoQ)&V{$B?MYmBOaeg| z$k{@*JMfrmc@`)osVaDIfHp~*o8DBGrOyMwh+M7t1`wC#q6iS!0BW@;;($Z$a976wrdIS9EKS z@x{&TGz0Q%FlIJt9_Y+ESKd*(f|Y(EMw>&hX|Scu)BX|wYK*YU=r&G)L87;Cduxr% zET4r6K&t>uxxSsbb{A-58@>vFG{v|VZv_MHtB=ao%jFSS6Ofd?6JQoI=g_I^-Tfg1k%a;H~g?w3WARFjW4QGh$x3x3lD&`-ME2MQ=%` zUkqb?*TbGLDOHnqM`kaDfrio~;bS7l z->jf_XdP#Ir+u*n(c&(hs`4R#6lSLEHfhlHd&pwE1%$7%CfN|e|FPE*3w?4CvEA7+ zg(17Y-y;e|h9>vb3=Vn%v+PNySAKGvecNYjk?=nB8c#irqV;u+eK~B%kxXDuSWTiy zTKy#lukQv?bMXxj_+g}C+yF6O}hPi$W5}K3`6CxPmomnX&$H`KBM_g?(p7b+X zt%{)wZbERHh!|}PRWT}0qE~WBR=ZAGK}+ul*mg?1r+*U)>FR?PxMS&+upHt1Dv#j= zDR7Kv;N<)p%budX#o$zK9);JT*t52hyHA{8uA_g{xjwK`{D52%2O_EAdfaWCIv33G zPw~Dx1wt$#dC|nt5w~Nq_dO+dgLzU8@Kj_{wxEOZmEe zG5K7VFQ2OwSlB;7w{}CiMS}0Hi&MiK-~C(w{uK`mna$nverG$|-Yf&?GA7|7-U^%t zE&3FvpT$R%$IVJh53=F<2nd*)e(B{Gw|>`jgNN~Py`d;H=+yglmp+OQ!mWo zmD+Y0bfLH{R4?oC9mR`9G4|JKS;Cl&1^I`gaH^WCscFL3ruuzseRiTUWT_?~mFvWO zvz@+ZNGnfGQM5eg+2Te;Ixs_da8gS~An-h19?*2juO2@+tcMabZUNATF?l3~mHJ&{ zs?=*3=;hBR9Q$ZACpSVt1>CFdM}&p)onT(cM5KDW7gQ?_Xcw>t6$Hm#i4L{H1o*a- zj}O=*pMGN6P%YA*PO5v(olsuLM5Te+OvE28ICTbjPy zA{qc7`{nwMvY6$1+koLmj2vrHa+dMrCfL=uKpL3j9cx`Na`vs9!q+;TjnPZ_V`%>U z2HZxk@^m%stOfQTy}}wpt5h?ni+m$4^NrHB>&nz&DErT#`qs-QGd#f;{SPy$J8`dH zs8OPTMsdPPGR&hR4&A(ivFOV2P~}`5?~20>L4hd8=@s3Njj8~L&xI*Ph|cB-pBeS{ z5*mc9W!5akzvDABgs9fl=t~78vfqW6dQ|N1R4!CdrH=e;*-{x^zYz*@?72&#H~mS6 z?8jU&eWHShccU}!*}irnWB{kZE%v^ZV{L;ZUpUO+hyLnnj{A(`{0|k2=_sDC(CCW? z;V0LDvw+#aQsy%|Y~>It-w$6|(!x0xlp+SwzQ0dG(3^GxT!+UMz2G-lW}-^1SFb&THYqVd7naTd>Q@tCofRkV79sUYi$deW$CF5OT99u6O3xM#|s;n z5a-Otd7eC>GVf});xKo-y$_@ zR83f?npGWcg+WDPU}AWW)T3YmG zFL@GYQ#N6on~l@ojYkV9Rsl=rE?d#$Y|ah@#*UvTV-m>^7qjUlhlOge#3h1}1nV~o)vir2IiD;qWF^Hw=*`x2o8 zOI;fW6!5zf4q|RwpjTW)80^zlV79M-a;X|=wEXV{)E~ysLI`Qh$~MpV^ty1|iO zb?Ym!kfNoiG5hD>e*o5{d6#K^A*ZWIFzJ^%sk%9U3YJaPCT2&!9)w46rde$w;GsJsGC zO9D^{i;R?A0l}dT^HKoI$LYxU%YK1rKrG{vkzJ{dfXU)81s+b{D363`cB)&5^G)7v z8-kLjdcB4Ql6!!gP2TZfh^OEd421O%@Xp=!E?%rlk9=a8lPQj!e;e40vAmn8vVAaShrMM{7cDm z!>J36na(B2q$zh&e%Wn+{vl%NkW@ks?3#|Y*9`YrTf5Ll&PfK$7i+K?0hmtM0Y9RJ znk@yfkgp|TF|u|~{Ed2K@2U&H<|OD~h_`Xp7NnE%vQ?q4GjWr&@7N3`<%Fr!;70gt zn8^fn98Pq2$(H95JbKViWutjFLjml=Jl+)nfvz;pyD7p#4#4N_3*jfp*KHe`1AF3~ zk67-i!F)qBIRQ0;Ep`)X5hQT25)g@5VB7{D@E1t$Hh#Zp^4XIWnocb9KJ9J8t+4dD zaWO#PoIa%W5X?&ACy=W`$p~WY{D*R&G2Clm<;!X#)`&Gm0@ylJMEi>)%3BgTVCsjmcsa+?g-W%?&%^Fmjw58# zq|%{CV(k@*Z=Ok2$g>f)dXw>C1AVu@>!Y`UaK(j#jXDMyGKi?Kc4w~p1vp0C688;g` ztH`r?0@a%bLvLilY8yAXRzJpPF}jYluAnWS`1%1%kLHltf+C7EY@uA2@>V+-ZW(;I ze5zDGG!kBmdFFhFsmo#p+!3FXeo(N3t6}z2%TX|vK7;TyX-X=$;DTyld_Mye*%tN^ z+@aGkZ7Y3f=IGs?B8k@Rpo36#e5P*?Yn=@wND_MGE`v(#Th!>i4zhR2F+g4NC0u(R zt~Urt1)yGr%oI8p7}4uX5CeKX%CQjE@+nx_&fe)8?g=$=NpW5w zmkfO>95er}C$fo)Xi&L@4*fWiM^)Vd7)Gx5fUp^KD_^C0bJ({)iP2-l44+wNKoaa9 zswti?U`+A~a6QEGM2Q)SmFMjZg^bFcle-9Gz61WJEYu*c4N3sDnxIj~hmBBg@f-Y) zIhP;@CjpZ$6fKm)Ly=fedr?xkf_6Zca zCs8g9b=X@4+Mv;RYHhXeCuto;U1IAt5fPmP8>|b%+8tce)bJtvKoF7F7A>LpCME*M+CzP^EUL%BodMdO~`X92*|KrzYsb3uMwc)7sV?bac_)lcbyf1seQUGID z!0$dpI??IYLAdHy?n1}06n9+Cms>%rtH2!YHl(l4N7k-_$Mkv~mIWHzmn9GF3b*Fe zS=e$AOSWul|1u>;Lk@ubya)6;VwbAC3b(nd?0oR2AZyQSw5**8AM3DA+4SL3XJWhdT)uu43g8N-yCH|s&iuJbA+DvN8r`D`7c!`JQ!-&6TFc5Ofvk-;+8Nx`)Qkx#X`wMe2!#U z+KkaPR_;y7MX>yVMXuU{m|uLAM1y>Bv!dr<@6rJqZ8y7$ey^^q5$@|Rmay!2XGl_? zjh>l>Od#}L!oii&Nm?#DY>2505sm^IwZzIwJeEpol{qSOAti9 zW#^w;o(xkq@6Wfee-&QAg%!n7KWXj(Z-nu!pGK_ihiD8NkrKc!9ltbQY6Tc_Qp#F) zjT+rRd>a6kIM~>G^yIK_o#f`B&aO^HsDtouoa&e_k#&hj1pK5iWhrsu6$+&6o6sg7 zC)kTTdWx%X5`_Rq#I^HzxRQLn79V{?~n%l>FxU zL{SGF_1{pSN|x;Te1d` zfpu1X1W1q7qO8e}8bg*2J6C3;pR63q(ycXJ;P&d?GVlJOSLMb6$9J*k32Bi`px)}* zX}&Acy>`tiZnK%ETu9b~9FvLL!9|*<9_Ibh+jSeKGWjISJnIvy@ud`Y)LSCacFBVG zfEEd)MP;DD-KB3_!xZTr(yhyGKCcZ~Hm`@@goT923@?oIq&t(^MjTf^{AJ_j|EFEW zrYf^r_2}EMQ<; zzu(~QLvED{E0==l+*6=Cu1te3+d<_iX7Abt#G#uw$*W8h<_6ye@TI#7Q^~B`jRJfa zFHT{>Mp-DQdb8#8xcu;PV!qBrm!sS0O>y5Q^A0t=Cn_|d^zzS{ePG!`8Tt!O@2-?O z%-$*ByPrXLng?qfXfq?hlCcV2q_!!>o3{LF z8K0chS`jbS4^1gV%L&4oD~Y!hGi%OY*ma#NzMUZ!Ycsn<5m_u06+P| z3Zh+|LDDKBXjhVa2EC{j3zY4Kk;o1jLlEx>&?Pn+u7J1-Lce@==wf$22o_=nj;G_N zGtLRGx}@TxHj!rD25JHd1@kkyS0a@~M|^_(`pync>ePl8ye~?7b9ckZa5}X-VdvnM z*yo+fmOwcxmIh3GMGbI$Vh$8qzZy~Uj+$+L{{Cd`XohB`zRI_KDrK$}vv-r-vez_sgEvg_VPon?`&gL;lI%4lm71+-JaG``iwz|> ztZ@@>z|Ha|=mg@AT6{zpG|Ew+MIQc5CofGu$Z9ZMrvc}jmxdEX9AEG*NPB{c!-Tz* ztOz?LE}aypf}2?(hOBej_O?qP+T)y6D8toOHU)p_Scz4Gh+Y*e{e1>{W0zW)?n0v4`=JgLViGJW+B^G&BMUwCVFHVA%7my=S( zN|(3|`Q+J?|6`xVnPuk!tb}h|#03jRzB%VwUCwEkgv|FX#HhcOzXkZ45%Q6`bx*_s z^E)?u{?c7OqmNeJ>#AIrF^F|vGISl?(?O6KGKmdcybY=K*60Np+qbGoZ!<%Ntyqt~ zoZL>5Co+7Y0C+LlOEFcLqouj1F4xb6wQ2HvTM2aNr{lGU}I z;xItb><&6@(FYnJt!IAO5x$af)r`3{=U}$W?etOp@0#-cfBdYnPkIc&&wEYgt&Z*! zA{~T_kab)kgkD_GsnVYk^La}xCzWHK+F@*X+1~mCPS2ND=BW2(>wH`@CbG{Z&?3%2 z16-{j&-wrSC8YfN5CQ=!3AL)hIo` z!(=uSXI+6{)zq|_ukqa=cUDzMgmS|%ls((Lz!6Yp*s#oIHtw0g9t&QMVn}E&^W}aM z@QuZlK@(RBFX8Upx-x`MOVB7M ztE$Z&Sl?v=1$p)2X4P~#eiCovj>nkP;nq8a_#IgQ0`nN_d}7M$9yMVB18vHnt|?Cn z{`3v0wCS(QDc|De^-eHQ*#`%%ROyEQ!`_>RL*4d&|(*~=h% z*>@qk8I@3E-$G1?$-b{+Nyu*OV;e&?ChOP-W6bY#-S^#nUH9?5>+1PE&mYfoTu1-p za2!6L&w0Mj_xWC4uh)C~EE(H6*Hk@CaItZ&tt7qxXP1pdg+NAb*FB|m68$<|qgHOx zZ84=|@#ZW5-6rN4>2nzVSWxQYKe%PH8WBpMX6-4?H_H5;^LIjJaRkd2bQh)RbDqiv z=aLf!cNMUFdTYc>PjWF&!u>meNS7t`=@3`4I)+EDm}2BZ!P?d~Kfh8M4thEGID5&V zHNB6X91OGfgs(&jN>Z}@)ngKNz47N=;cBC{F3PFu+|jVMnc>etow7CYePL~*oF1`e8s4b%$W~c zLu44xniq^zwGz4&T)OXGa>-m}HH1T0ZY(IU_h*!gK2M%7WY{pbjRZwa=C+_g^@%eCuU z(S5{;B0_tlr+KScVxb;cgh}b4EDyccZ<6O$vvoFY4KL!`qQw#Y492NnyNIBF-b#c- zcz}KhAk6@x569#G4p8^$dTKmAy| zmlkBTA&=g3&tbdB)8nOVlGcun790J0c|;qMxvdM&vUKh8OUSHN3(flaN<(eG*1)Nz zAy$2J%*M8)XtinyhwQL%{*i+wd)O{*Opj0-;cBZLhF-JhOH|x1YO^p{P__xP#f*gf z^K|$F2h*^-*iDtk6NvD`)wR7kBD>ymPfve-hEO>)tR_D?fQwm7eXwU#ZJ+aqZW$jh zf?Lmq>NxhJe$%c!w|IWPaf3Rgwb#H9&Mu}uA#Ps18?n|-!Yv(?I`g3KTZH79@eP68 ze2p2-#~*6M!pi%fr9Jw*JSh}b6mMW{bBJS5FGTVT6tED&e zLPgwuGUCoot*I*3?0V!S0CQF($%rF69+Ai4PV32ZY1Ia$V#l3{rc%Lnm2$L*C+q9=~No${EFwIFQ;u#=6SrY9+@sZtB?t@RHsPty; zlmw-wzeb{~&X$c{mRFUSI$i;&KpX7Hv&m0Xt+uz6@_6(SmW?l)I3A|!+rPGS36J$z zX<=Pzu`o)j{+%d_Z;Ms(k`imK`T(TUFD$cORLq(fF0pO~kR#i&)9Z082Bv27$lIJ+ zpAPISM|tmO?H|FdfE>jY!QZ!CST$jp0jSA5zWXPX3HMa#a5OHCw^7P<&O$Em&_g6g zS*(2UbQ{8_vmo`JwMUvF^DU%+?Dp$=15rcny-EKlxxJSb-O-;FgJwO{Wr+QjG9bQ+ z(sIS76;LRkW+2jAMLk6~UluxRFTcz5`mU!rYEQ-q0~o@a%RiM`%0GAD_YS^52Ih)$ zHfZ68{HK{_YFi(61PwJHv255v$OnWBGe9K>3(^yh++ir{sa-dIN}`fqDtmps12|@( z%bGohEhBvy56c6fv)t}IdetBDrHGM05mUg6SQ4P$Gx;oMjAqwQmJyIypN<~=-Ah#T z3*B-Q0k78jAFpPTcH}*B<^y?(?J2XisJis9J(6I6_6EJ_VniA! zFsWo{JLvHOkeHusbjOcx+CR`}FL?e#uwrGcv07orPkk4<{okz>0BGY8A-|>ra%b=F zaHga0kCw-Sh+g9l5&aIywdu#~=l@6+o}T?jd~pDJKz|1wnDL)W{X@$5zcr8k@xN>T zcS=(f16`JZk2A15l4vYj?K&a~i*`&Ob$w)*!Nfa9RIRHvb=f zELGm!;n$&;KghWwxn?Vv4b8YGww2+RJgWN;Es{`C_K|aBpb`SaOwbmeKQoqu&#`|% z%)@%$BeG2Ur$s?a(fp>cVl|f* zFj7jX0G;+_Cv*t@SLh&L>adG-t52H&o@-P)7P46Zr_g5HPp$&e_M%I#e{G$^67~RD zhUn-lEv2cRw3`a#iw2d&3~+0$4oYk=H`;YxfW1k-XJqI36MuZZI2J_@ujIwWUevP+ zjb5-Yjr}D>o#s2cfl{7tmR6c^67QISR!6n@=H4B3t(8T4hPfSCYNy}TwQf1mG=y$i z)g#O;acK$l2+27wuq|`JTvuHJe`#NxZax5QKJx(r-qq6$@-CBYQ{+TG5He(Lu|!m& zQ<|%%;P>yb0-|O9ciE*K(YcQZw#~<>KDnY3Xgt4m!W2$?@XL;>9~7to0?{lE56)i` z>$#x&6bK=-EivJJXv2r(m$Qf z3fZoh+Fc;{zaa9s;bb=Z1TNW%oa`RmkC@jC6^hbmTXAGHn-}P9GQ2mkquV~DJo;O* zSl}k@em_~tuJ<8(D_ya)!W6Qnjz#(tx84tQw=ewCNo!7dqDnCQw5477*os##9a{4i z$+g{Azu&B{(9s<L>2E){X9jO#BaV2{#?F$YXJ$+jEF1cmAjArn$;Cv7{6k% z$+n8%e3q5aSpQPg;ABPFV5bMuF<@yAS{hVrguKz zJGQi8#O+?}=1k$^<8|gi{3I6X;|Ftqbjb(5ar>|jnNd5l2S^Nux#k~FsJY#IdNu{yma#CA@V)F3zN@q@u zx4qT8S(Xyxm9^jZJ&dPL4gDgjiQ$V)>J=d7dIQ2roAsv?_a9N^fBg8=>DU6ky|rM& z$tv>-eykP$oc}uaURj7Qlv6x1kHLNyqu8(fx!rru@Ban)j%SFZ%Zq+u5R_l;Qb`@dVZ~4Ke|ZzYLOgwOAj~Uh z@X71>!&@t_UJ4aY4GBDs24D6>{fgL+DRYzgl->5jJwG+D4zJ9zRJLMh#jjmB!Ty(%yq%Xl zvm=LZOk-U6w<-Piz573IO7pb)Az{B}l>2WgKUmi4I5Uu{Que!r6Te&dxWzA90pGO+ zVM&@D8({hFPV^Td!C49Uwbwdw1e}%dr~4oKg1o4{deu)1TiR2z32gjA4k!7Sl%j85 z2cF=V{OJNlfYOJ|^89`9{_K|i>BIi^;eW+*{_Vs6aDDtYNXXx&^tUPf9S;07?)^U= z4*VTi{xLrPfme!&2rk-=C4cNs!0-O z=ee{|CJS^%$MS<$YPP(sM$0EYiQZRL2r!_>uP}@J<=J!BqIZ&k>eJ``lnPAM>}C<@ zjMztCbaYb(v-hN4iLGEZZ8z=KfwQo zH0Qqu(mTo0`{F%QozfJxSS|$@llHI&99G@|kS2-)4p_QY1g#iKCqiyI_WZ>1kVPJ+yEE6iEt3#hT zoBiWA`NMDD_8fdCMbffL?iqA^hTydQ9&3V~1z_1MuMkeLt~EC>E#!*Z*q*qp5vT{b zTJ19--=#_59SKiplGTnDe^`gkbPAc04t1Hqw24vz2J_CY$EkYM(El(`{X+uzH1No6 zxA`37&O#>`KS&YXH~y6(-X<}O)5B?{AYSv{6@+ab)5ca!*HtrCw^)wSnPa1AJ+%li zicrP!SktV>8*EO{EQ(|wz5Rx7X>W|6o}l<4P7{>+&@+TSJ+lXX0sB`?DO&RswxXiC9dWkmdRz0`iO0cbNf2N{HQTt*rEa8A8({yw z8#eEMXE!oXNEVc8uA`eb$oAC=*jmaW;)C*PV{Zi&o)wZst7g?uN;&W^s8ZK56Q&5h zZJ)ExEzQ#*hOFPU#pHwmxzIR@Udu!P=Yu^NdUS3AOYpTf9!sA_xLRJxTC~QCO7vuE z(hqqw*_kD%>a`A{xC+QTbh5P~luf%0`)T!*eksZ@fTv*cULJ^P-)YgB6-e zXwmo@n&A@Zz-x|UU7}V-f;>B(F=;msXvt{V{i16?uqmT)hPOjf`+Vvm$kUwV{jyn$J zvr&73@jg&*u#E5xO)^l_$J=mY#jjaCLwKGsnP(I7*%oa}>J7#i@Cqnj5-OM8{Caj{ zKEUW%)@Ndw@aaw^!S&d_+Xo$U4N$5pWDANgPmRMuu)i|ds8@rnFLd8&Y8*sOJ{Z>Z zeDCrzR3i@IK04vl9v;vjONZObFF}jpsZ~1!A6p)eTp~wJMa1c70I>b*e3Yt1Ze4$* z+SPmTja7iYR~fAHey^1iKk9BEWHlgid8@*>QLzFx>fU|I=1!2m0wcr{XYA$->w98R z?df9oqa!j_sAf|&3BDHnwSTS1|5o07zZ_s7 zU1WPuJpv67us^^?>x)QKlU6i@Q6R+f`C1~U(?A>#tCiD^lQ3?lY20+Va(&1Uf-cx@ zp^IJBy=@@4`J?^UpOWZDaM3(Ki$wGnrOY96AI$ODe^rhI0V zcmPe04OK}~Gbo)ajy~zsB~v<_qLs2Wfi#-ij0};z5hC;X0ojmh`CzFN#T5+P}Wnhh17L5&qgX)3P)IPOWM1Crx%3UEo&fQVZ?9#V={i+G?IEWTC3fzo`a`nnWW-CTiOAFIID_l$R zuqano)DT850B6RRDp*Psy;@01LcP$+@wWb~RM-Wr>chWIJroY`Coi>Ru~jZgs7_K7 zz?z9MIL;Sga(jxcOl*q4*dEj!a+$Se>1rQQV+M@HZPgO!=Qsp$HER&(l>+M45R zTn3mzA)_^nw6b#tmlf*cbxSnSMp-_UX5UmW=l9+#xtB@%gdlX^)NQEp3rk^H#^X%P zC%_iscP0qr&y>W^G(sxRL6O+kHg&53zfgR?eGF`|z~acpS0G(Kb@zMqHGH1^Z7~2Q zSCVT2&JQk6IrVhAvh4^V5#cdSJkkyr@-tsH)2-DcUOMyG?ZmQ8XIx{z7 zXD>cefWb*i*33OOVT?2hmF(lNu7As>Pgz-}4!M{yz~_tW#hfNQEIuLA(Jd|EaPe39 zK<&k8eH^|>?6XGDs30D9A57ED=o6@%^RF|RbsSckbGV~)X+0xaJg;KunY;QuyWCp* znmdzjw?^3vywcPnFJ@ajB%fHq_idbg+YUFw#gKc5%_)GAeR>qU)tgJO%`zPIVkm@} zootg=dHZpzyFu~h6wMbzDN#tWHfy$Wd2J20paveHc~_|Rt1bIc7qaEPE-+}h0j|s$ z2}E}~p|Od!Wme~)4CKml;^Nx#;<&kINVZIu-bRPnlJOef|9xAPlY7;)0lVgXXwX4z z19O6Bh{R2F?*b$xobqLv2B>-8VWUSng?-QI9D@Dm3{GkAi>;v^mNSS?ui)6kvNe#d z3;EAUb-WF3qj2_$m6W2Rr89In7bUkBE#@|XsAq@Pd>)*=#Kahx@>+Io199WqJ|JxF-c)s-m!wp1 z`Vih zM1OebTCF8lZTATG{8Faf@&a_d z&YX?71S__1&d}0KA1AcyIM1#Odm0B&al4%YGd45oYO^*+jnSG&7NXW+S{0cM85;kg zFoFkGEwZ{jYP47R_;Tm7OOu?axeJg^3GR)s=jgg!#(7F;mG{3*x%SoQdtue3uw|-u z;4^P+X4A#Tw$%-NX5`28w>P7!ea$ z_b{Ov_N3ghm2}6HYbnnOFNl#B!)Qq1!m5z58qAYST1^*;&-pZIZ7&suNG(2ZUgmN= z ztYPJ160BNW*j9T46IE^#Z!-;$Cr1!pUm}h20}_Wfj!VpjQREWby+6{MD{dfWW!S#; zol0@&4t%E@R}7;5E9MOhuCWKG9y&E!n_lEG*1|r9jDzr+g(bZdjrbWLk@Ug+T<18H z*rE|_A)v!2&Tzsew41lx#p_NGy#x#E)^D2|&k$6P?UHpUscxL`Awj++Fgi`-N0$c#FQwYxfM3hkdD=$7rZO zH*@-vA$<$3&f>3I>x%;*jqvws*#mk`^4}}zJusRmvGtAN1Y(;W(c2c?q|#As=@>J5 z#a4gor0Am)rI!7lC@K%2Sr4p5l?{rAky@1B^qw*ED==`JDzP^6baRWo;*v^#RYqpQ zgNcnYG%q_FQB^;an6X2-!pJ=i!tQhW?qhL_`e!}!H;pf(hlrkSqyX8MeaE{4Gb?k?rwE4r|jfM zF1j9f`7gzoZc!rSQ zxJJ?QOuy}tPy38?we8QewV9R*x9-opLu^CG=9JnJNAyh%GM^#J4%I3F27oPpwsl&6 zk6=dj`UzW9+Z~0rBUHC1WqNgCgmHGNKrp*#RB$y*l~KblfCfQ-zs}|fXiutt56o^} zUf<{>BU<^XFicXLpp+dCE9o1HtdKTy;dS55D-F+SV1#41it%9o1ESFPN&&8R@6i3M zthS4_-fQ@>D~lD{bDx+(rSwB2mcq_R(Po9(SKN}Sm~5q=lQDuV`$~%fd4rmAD?ES7 zXJnS~aH)z7(}FEeuSL7aL<4P))r^2y8OVAy)6@dO!1VN)RCp+@EW4=fc+Hg+(c_)c z)Y)D$G}eu;kV?npb6TisE5^X*A3mh$IV>=1{lrN^h@Q*g9GQfwZv`|CoObmkY$??y z4GbxHzzoj%Fe3hXQ}NV!;IP@}&(VWU@bT#LWXPX z-;+FAm~R{5{Dm#$>+1J+q_z6ZfU6#NTlg`x?fWIUQ^`2EuP4!qf6IJ~ndjOu$<1Gu!eEQWY-t7IdQnOaa5aMjyMw1JO#eR$rxOPQZq zB*AL%IDVbJ6Cww4)iXQ8x?&Bz6yS&}So>btsr_(4qk?<6;>^4*LkKNrz1^-`*tgeLHb&CLT8Wiu(OqC@8ay zVPoRD2zIH&A`Kf9$I5Hqslg(bJIbB#uCM)qSnUAHDd!~<-3L=ncVOMH-SbPoUnb`* z?~nHMD6{4=ix`-xlp$0ZWk#QCl=P%KbW^aUkN3)!FxF*8x4?QXrD{oHU1I0(<1}k5 zki2|wBdAeS6U8XGWJH)%FE!BZIvE*c@7$K3K)wy@wJ5V}RLj|#Vu8q$xFEJq7%J?Q z0g<#XKR3zEF-XgQ&P_$ls=v_VqaeGpz*&(7y1%|AFX+Jy$wDv-t)*!f7O{djK&%KE3XhQ<%}u|9b&K z6*rrY9dPQ@LAG|tH4QHyRq1nkeNpEwLUnqe#JrmAq`ACOQ4p7;JiJ*3#*BzB&C*j{ zxPoB)1?9;;LVC%?<%u6R-#0~x^poGkj)m={q+(oerm-+1d;8o(NlJo)kK%r>={1&) z1VsytIpO;qni!!@UE@qA89t?00Ziy3ewyF&wt{f_xqY%F!{0Pv_61EGuFY`s%5{@3 zcID!d%#>bq&CxuunSnC<{u`a5CYhS@FeRP5eMWlOa(_DiKPN6*4_q-rfO`M_GjZwIu8N*qDA$-c?2?@elz}1E{N=|I^>}z8<6jP8@3Ow1ttfj>j!E+YF`4!E zm~00umDwP#ZRS!7bBzT7pcmSzCBp2AWiyBC2w+Q{A-Kr)qqu$roBlMAuqoK$uSOVtIFc95+zg;w_{(SNS9G_UztLpQV+>C|NnX( zKqp;(s5!NFUgT+~)%n4UHYp3OeSWiMC*B4zJzgHwMRZ6 z?FT_kXUe0l_T8OG`9;bvP3^v^0$?D0^3TVSZZ|=^MGQI358Hu*J<&{731YEmL2-Et zQpxi?QgbM&5?Y37rI8N3*0m8k0ZHa6BAX-mCkhCEE`$CXa{}E`Udczaa)%KBWbA{l zL8CT)aFd$Xl|(R_O|-##IFK*NzO zMaVO&8}=ZyeXD-Tx|Y!o@`dAPfBfd1Q`Qw_7lxovBha|{+V+LIf8K`sp!l^*kD_B^ zd2E~pS}I*8Yga1@$F^8U=aU~Y@7aG-{~7|)pJj2Ifs!ZBkP)pT2L{Bk5KD+d$8fJd ziq~1tqmz!^=oJCaR)2L3>cbT{C7AWIeEignJ$C^__yzY-gB{ee|I&W>jp6s-*gx#c z1By#LS(vZ4*QQURhl&pF-kTYJEpa#wa7EMmfIvV}JaHqc)p>F#Qqf>|XXbsY=yfhc7r z+AN*a%8aA;u~6F4i#5UG;OjQ_?&PPdW2=R(SkIQrr-^Uglq^QR*F%=}dmH8T=;z(UoGQ?Fx-Dxd&z4sBo8HWz1Q*q(I^K2Y0=;T&n4GT2rv{E=r%CE=YruO>1Zt$BDe z+kjt#EiOMcJ;yjXJ|%A-@5Ry)bvBt6%mGY*3a{2Q*(3w7sSG1c(8(`7IQvAdD;s#D zFQUH`G-x(LyhJQppY1>r4;$>JDBQ@{g1wlJ>lOZ4N_pYVslpVv2^96BY`#T~M_c6U_rL@YK(;FqnFahMI z-FOQ5fn?+FFa0Aw8-4$jK}DIOa@1Nbz)yrZ*skA6SSbn51%xl{P23XmPy{GtC5}~l z^vb&?($iQ{jZf#?KOTg3e*`(^ZzkqPemDWdM09bIB{p|_H(h3h+eo8J(r1DUG{F`R z(hUCo?86dIp<6OH=g&?Z{+V~O4ZBw9CS6!1OX-G0n;%+G*8UgpN$+VcBElHj3LAjhcwwZaYYIvIXXq@5>=O= zGam_6EYNEzxy?-$xvcZ7&J2%ypkJMx=X5E!u(sLMT4^|!t=!i2pQNi#6$QdVM)lHY zGoN&V<$4<{x_->I6(gVN@Z^4@d?H@NC`Q1D7qp6IxjxL-IkX|`L;#j!vQLyIse5R*3EqtB?%$hQ2L9wR4`OGm+l;}^-} zpQ3--27EudCj$VM>=Og0AfUEQ_V-VcR%xJt*4bqMa8P44K%425U)9RUxK;ECKr9|^ zGSNSuu87mWfLP4m{V+E`g$!@*=!o1G6f@WaT~249Phw^_TtHxH_miBC36}Em{v+%?t0KaP8qobsKTeVX+!zeE#=WsS!5Vy?V{((Guxwne$ z-lU*ir_DvbP?|%dTz=Gpe&o|BBsq^QndcPOe1@OlnD|0>UM5=Mj3i_S9~Z>H0P%=l zphe#Eps`#~jIFAY?w(3;iFadg>KCR?Nw*a!KlQ4&kTzz1V8Tj^sZaNYWb0Q9?`_l` zni^y8%j56Q)GR_r^vKW!!Pfe6>_#SjPy4UokKM0O^x-OO+ajw=pyLg>e{=@H0zOeY z63~mwcyk-iFwQq`wV=T-GIcCbMWwMiUeu<6+^|#PJEikr zZrG*Jy~RmN0Hw8m07$IYdZY8dnyfl6qLbn)`_lqDymH>P6P_KD;nZgwWt3(-L-p22 z*;ile)YY2A(K&|~eGf50fCCwe+2M3cY^~+Ab5jLvKN4Q=;w)HTz|dgoM>m25ZUmVJi-EBp zNsye5BIF~-BRpmP=;!ULQ!^~NbUXjt#E;1vW8(vBWdS+bR7xWTN9V@!6Y-o=Pp;uv zcUEj^2HJiC^cTwi4N1c@yakwvNdszcrdQ?RnlsAx+{3lJ-Jd_wc$}xey7ib@7Wu5W{VBvvxS4dhz7gKtS zfD;X0_a@hnS2;<+^hQ+htrwd6vU2JFK3bV%Z67#^-70Lsf_#<9zTK zJSlmha^j~!DAe`$okOYIT1AW_6~?|7{4(u#F~Fve8ZHVKqj*Q&!}Lld)nvAL6>1XV zxogq-JVixLW_~(ICCYIBJC(@ahH_yHL#Z0+CkvI*`$T8F;k+|v8MyOb(y#MX^4_^a zdgzEw(h`XO)_qwsA+pB;>M1*8a#>tlP0QS(EmL!-(7Z!er_8?ktMxAI#S@@I2c)FZ z05t1fnW|lec3G~e>1zu$(OxyOvWkOG?_53Ua?P5&I&H3-?}t_O(bY>1@NS>uZ^K@o_Gqyk7fR*zVFR`V6L7XTU^;r1Clr`4`|7f+}@$##@$$&TjjS9UQF|6FyR0)6*r>nJ>8J?S7S^mxEvBn@47;dcEl zXL8}s-Nw<|n)5jakEBtcJ8lE^FOuZW*YbMfC8s|sWo(_KN56dK^{_&EE!RJ{wdReR zDP#&}O*8VuGd-2cw)BV|$4c)tafIo8D@IPo7t$!>^^93oz~~zKe8wI`n-%DK#g|SB z=wH;)Q8Mv0-yX=)Q<(10GRg8B$OajTh3TN(jI*=Zj*sN?D#_N56W~|TR4)^`Jb?x_ zF%(*0;qkVXciLymK9$mNxRhhxR)x$#cw&WY58Nkem5a8Vz?=79+~Rk7in{3Dd(mE5 zY?92*ETG;v|Gjv7&xI!HWQ|jDTwl#3fzYxtG=q=fiD@;}m@ATw;TRbROX##ambGo= z%q6z^Fn%)H(=?+xN&FnngT8QfZn(-yF2g+i8ix9K5z;j4|4bHqdyoF2F#4U0vvM>T z@Wi8sT)|EnJXp?baX%$;ttj^HNAe+ zwAfZJL`-+%+WE9WI$ocjdXd)s@AybsW>5IWZ-PDSQaUK*afr8YC)}}rVqC8+a1iM! zUXk`OE?dPDStV<8>@zvT20u(!XlmtZcqBAfr~AFC=m}>lrlAs(j`cnw9_ur#G`ITH zp%~Vt+nuv+p~mFghU7Vu9xbD7X3-|=%yK(7N6`SeH)l6?vT?0tProFed z^K`gRq)S?Y!fCv9a!I?eMt%NGu3phxZ#B<(t~ZW?Qx#eRxPU8=CldRA76P2%56c|Y zGgT`;`uG*-kcWNmkOSL%|7{^!Yx9jcA8hR;s(e%qaY&~g!MI|7Mz2hCLaoE<9WPXi zxt4%5f)~Mq*#;ul=7!@*&xd+MJC$N{!{VszrP~0j-l;5SD1LMs54_l6Ni}>c`8$ zm~H?8*g=HbQHUt#*Rxf8Q zIEDwiB%)=-BU@@!!;`cb@|9`GYY_nP)m{~P?u(Q1;9|y0bi$Km{VCVYJB5_e^S4j9 z-=A5Vao{{{1YfQ;Db0&D@D6!1hIeH6UlW&k2ufjUYY+v$bYL za;kiD(5WS6FXxF6E-S2!nP^v-e0^K|Mxm=Bn=)B0a#os#B-iED{kgFxZKiPzUJl@L z_*r}8&Civ*Gelp=k?o&)&*&pN-m`U==7*Ocgn0Y!5Tc(!U4K2(Ea-ejtJkpWvhtE5 z_ffZ+eElSM2hH~i9(K~i7nM%<42D^f^}(#qXV{{}jh(x~kwX(*`7!>bJ+Hk+zc$U( zht}@jD;v-XVBxF>s2jlHh!!L>V?nlAtmxRN#*i@6DB`evrbV}~U6oj~sx{X-b2+lB zv-m{OY%x)VO*~3Vz?gYmY_$~;$wPhk41v)>-d|W=aEH%TGs;9K_=gmXl;T`%Jid*E zA7E?)Dw1-aM!|LIUJF|biz_m33rpB+&DL=~8d}|SGQmH<8!_YmdYs^9?2AAHsUivK zV`#}*?ThOb$OG_@a$W(#T~3N>fH*}Yww6$7SbU*vudGlZN&e!x)poeQMeSqq-9_Q; z^3Oku788oEWjZ+|h)l~TNrT(>?Z1OX?5@UuynA{sXqCk=m%)$fK}%!nTpFv9ytuX` zT8VGBwHGyf5!&$+ya7=f$9j09$E9U19Nij~0Vf>NkMi%te$MS%+R9E|7PWCwEETIoNP;($Qn6+F_N|BK8N+)F?Ka< zZRu~bV_p7vnQ^;V3K~B?ilBe_y!zgvbA46o^5*bkK6tv|V|z|1*FciQGD~YAp8poV zW2qH3rHV`Vz+6WX>Vp=G-Qe3K+p)~X^RjraK`61J!eePH0P)g)@Q{?Lj}9u+ zkg6nQk51v^INK2`{u4g93q?5#R?!Klv6Ezvx&pe_CJ8*YjHOJ{_9htNey@r3DCEqi zkhXMJX^mv?d`!UunTeGZrW`FBe;hV`5S<&En zUx3q})fVPX)Au~7X%sSqDt$a!-((r zjn;Pxt!YY)Ae@i=Wc5z*;5r{%e&QxUsJRepIaZtu&C=@FM)Hf{Egl^E>&x4lKicLDY@Qd=!MVducfvu#t-&dW~tyjt_P z2Sc7#MIJU8+O_kvc*P>^yqMbUq;qq__JXT3{ro+7X8c;sRTax=>2ZRUu!N3t^?REY zBlQH@96i#P#;W$wn@vTS6?!CS^}N4MGlDo%mlfD{eR%s@6xNv_6een}IJY)8avMAB z5?FpNlo($kam8y*pM4jA4CM6nOqD4)a<*ybM){a`gv+NZnr6iaC_ib`znXScwN@76 z&p;JnKF_Wc#G(>(5;@ZE)e@1`(+c&o9eDSRLaO62YJOXnI>!vdCCZrrHOZp6>`onS zpKS?^cyR~5j)eHqwkc0RJE>*X7=b~J(IOtru=IAl#94J-D+KAK7tnn9M`RfuGtuqb?-V)ugIodr)*A2+6eY5 z9)V*N^FBKAji`-TkU)ENjE)jK;fkhUO-ij9 z*KtPp`L&|*ph3-(GK=Nh_(~fgVY{gL@~@QQ;hE8j<+DXv0#>-v5>r#CQ2)k7-Kv|D zXrl{&L?niOV(xm9Y$$uPW_=pD(Yi7VQ^j2@S^!sM3pz0ak-PFt7Cny^zPtN zF=YLm{Z+OV#a6w#U;#+76^Wvg64K6-Co}GH%MG&;*fz=f2Q4t2A8!TO%1XV)6|^fa zCmdVoBE)E|=s!p*VVK4gQ4~3;4;s;J?NA~Xm1taRgtpjx6*uXYn@+)^Q;NJ zR33tQ@yXvFL+qvc$6JjVUBrtJndnL{h^siV}lk+68q+b)Da_BI<2=5G}D*Cv2}rtD?fD99jobzy>)(F#CgDW%BRDukYJB$P9%lIx{XmA z>}^KhCtKpLCd^#6F^|umfG|o6B48V}&)0xR`mQN27qtwB8~Qbiac0A{gr_`JO{no zXQbxxa}MxgEfbV$WVBv)z4MQA8pw^#MJ8TgeS;Bz!`b*2S8<884k+4uPR|vq4PpYZ zNi}&qZ*6(Yxig9zz)+xWE&>h1Ms}N∾G?cd>?QSIxx~9hT#BkrbHjJ4L%gk&UmVOBx^Fd{cB`Vr@Ws33b4Wt0_+yCc}VAtM%BP&5pf2ace^G(6VHUewmuIXp3szJlnt* zRf&!J72|Jm?+N$rUE4!Oxr*$i4o0F|iiE0Is4@^ZP#KQUP?glOuM%Z>PztVf*yj-X zB+M-WMXB@Z+}@Gx@sUrh0N@w@h_R9|SrW9+~a?Tkb$u16d_nAeW}4Qz_xj) zU-nQGP8fMp605)nz@&P&vL<|bK$@urJ~^cQHJVx@;M^{2{HTpNHc7}p4bnZvik~P0({AI| zNBP&pKf(fPrjI8&fe9XS?UCXEj80u%1VBxH4tlk5Pm3PH7uhi3bQs2NAn>qg9&enF*j+j5)2` z4QLn%USx}d6MGvz2QmZWc(IL>nSpPSF7sPZ&CioeA-pEQT!4sq4_&wRC-&Hc~t%KAjK5PyF$QK~rSbm*$hnSNz_QE<-sMpNGjCwz(T=ej@vsp3e@ zp`io2&tW#?4vlL2kBy@JuZ==$V)eVQ&+TfW5k9=X9pms^fL}3HH$vg0H9Te`3@J>{ zF62UB{pzhWZj3_rP8Vi3Rg<@oPHp*8&x|HuTnOu-=QZ-d)wsPLV+CZ$@*p;SSZC%; zas+9Ns%+0G_G#W3%~94WC^Z*$*#FL8%HEgZQM9gO?i+Wy`5j>Pxxa}Gu(9#mOHiV|q>-3ehu3_WWlQG1UenGG z#u&L*4Ib9Jf1AghGYy+p#SgFS@_1EzDXOvnA^I+RqEuEzv*yj4lT+npQW+W}&ue?~ zO%no(d#P{C_zcYz1J=?%zKdJBuLPf_IwuntBx)Ojiry(D7vv7ly) z6B)u+*{_P6zREJXVdadr?r-}_^!e6pp*(JT?-AxX2}}(u^H5zCjt*G0b}hR$>ut|P zep0nEJaT|cma%l-x+-jkl7bf;Xm*|Q49AurGj!cm%QSM(w_;bLc_(#d^h9pL>m)3i zZ zq1ykTQR0ea+$Z66m?xwh7w~{wLqa7G`scXqqvHFscwQm2&h=+zJ|tw;vg@9{U#rG{ z-e`z$egoDjg2Oamu+B~DLuQ1zquwGGJ&uj}?7L|E^

5U$_t# zLgVwD9D@usELinJhY{j9%5DGOxU@&-T`6N;v(_~h>d!DYG%9)xkY$Iaxy#~eXno_T zV$`?eBFl^iSKxi>8MGlmdMjL#%I>p*Bk+YyoC|@&)(rOmRVVf-4p)g)*)Qe3-9qy5 zfTe%7P){QLq>5-;xl>88t4v|{nFK{d|376K>|3_oXHRE`9anWa%|+5Zp7g?R-zaDV zkK`_2oLSqdl9ni}jZ+kr5(4KnCVd!b^R8G^7L05-?+|KfU#~sPl{jCXO>i4Khl#g> zQVaWW^T%awyAD2NpIO6|jm3`0V@*)5j-f?gU(_EGVg7U_?auZ0%_{hr&6>mvNe}-6 z8!n@nzH=Cs^v!pwWZ0A@ja`_$uduIjV+o8jRDaSqQ#L-(y6m^$eIjZz-c|ZAyx-y? zAH}gL?u%exfGm97v5=vlQpUh_Q6gnRFwvis<=_)WR$jfzdbS%3W-09>l6ZSnnw$6P zb~sIJVBa+jSq!e$<#z`ADl}$&uHKrhPcPK#&c8pgJzwdV_Y#8{IseGn=KA<#RAk8I zy0+fgl__VHeG)lI8PUC;aI(772^m!-K9Y760GDo{iIHTO=S>VP(x{@Yk)ceFIc$PO zi^+p>CZ92lKFW>dD3w)726UpwW=o+8kVz;#5#XTWzB0MTiz7Dnyo~JtCXgIj&Cjw` zd}=Z@XUxOgV#qDeK*j8nXkk%Y3WR0zBtOyT82$A*<7bi<@ryVZDe+tzR1G#c6gziZ zc5#6trT5&->WS?247(XO8?*XqS6n0XkO7Yg@q~~p^WGOFpA)pYdY6UR<{fkAMZ3$s zU48DbQ|K~AKzQG;TOp)r<>Uw&$c`&{6`)|H*O+;Z&peQogNRFS53}yS@~&5F&i0!= zorQ2>o`l$K_La(JWz_sfliV5BHzO*CAeLNunr;oR$#c#vcD>pRlP(fdmclMBa@}DG zs5Mn#(lZ&lXB2m*&-CZtiPv%Fl5XLAjQI2%O8gQ_Dai^+F9SkCWtI+C)DWW=m%9_j zUmid=@eA0GRi?ecP#zCi5V59yafX!DGTHp-;|8@6{BT+{|MARili}ZVip+!c7m4?8 zO(b?eJf{4xcEWQbC8E7A#V_?nuxd&7b$uH$P&tJfH}2G}gbLD2E0uWSL2z?=BLUy0 zz?EQOb$t-4yK;qPUz3%O=USy8=T{7&qd zvD~bUWr@tyYc|-7`kdT3qvFzRq7r>u9Im4?sqe+QZExw~%QH!mUZ(1&ujpP-Vv4?^&kL%PrT0_soqJRD%=Nkl3gYV~Gq zfyk+sSAvX$N0@D%hS1NgouuK|Ygb%*ZcNKri)GPk!L}WNJ-tjl0FR|T!==1;HR3zi z|E0_RvO93LM*Wp8^lo4-KI`3~m%k zzO!73$4LNM5V`I~J7ZrKtvJ8o!B^7}@F|96Kbezw_x~Gk8MLmOr`hilVP&maFb_9a z_!e%os>Y;i*m+gA982_E@Obnxq5*b1_)8sQqXJSo*hIfmCf=yXY)X^SZ_wZay)|ND z$0*3FuE_<4PKEL*Iml#=?NwRH<3XX=$u)>YBgVxKPR!iOr`h{}ECTkitY}7@<>kJwn@RHS9YZc#k5*VJ?m5?LCr>OB9 z=^}~*oe@=EU*m>2&)jo%;@p6r%%jD5zAUn9{rvJm1>@uIS{%AWrQAA~hBXy$fp5PV z2`&pMj?TSw-@Nx6Tb?k*RN?0b+ceue@qGlQDl~(r?an(OhFq^TcD`OqlHg`(8_f1C z+(_?uOf(c8#|sOnLWF^CN7+Lr$e(-4(kez?g^w&*qV7tA>cmm|a!K(vo%KIBP{`S; zgp%+1thuG&jxC)|?5I>=K8Ny^8#ee?`6Fu#6xHF&=P@O*d;lYs`W_+jIJZHm4FHeZ zeW;Epb1$a~mL{^bB9s=g+;>KJ!3n7ym3IZqmwIV;tecFGySZVdr>`K5O|TUK+u58_ zEN?JcDSjq`jMfd1Nb&A(a!2BC@D21&CEPA|%EXFZ-jemFj_@){xTb|KY?q>jtyKc( z-x2}X&1<8N5M>7AG28MX+OUK+2g5G-x>f70+D2r$#=v3h zDzp{_{k(oTE25Ir6dJl{a<03RCs18YN&0jA;v z`%6V}BB2DFho~=+y8;)U+Og9sj^7GRRj9Lb(P|H{!7y5lxoESXoFzcL(=S*GR(SmI zulhcK1SZ^VaF=&v{vV5X=RxbvFI~b^n!XNybUf&T3G{Uu2T9^MYFr;^=2`ZiY1xYr zFnO}LvQjSd&=`Xrb@IZEP&9Y=I*D#OGnJ*2;i)=98X_klsU>TtAo9de({G@l6$=IN z3a-@SRn(Un0gDPTsk7b6%6qXdT-OTwy!0ve?~9=hr4c>*=G;`)CcWpG#MxehJvl#~ zS+*DnKtR)d5bI0%nSIS1fd{Qs zAE|eKHL|vdIp;uhS3R+oH@f~{XLtiOL6Z$(<5tPYn|X2lQz@ha*E+wfO-@wneolvT zWTyu`{!~!+u(8R2r+urY+$QP;IJ}HLQ!yrn?nZqv+fK zD?-I*v-f%+c4UzO9JbL1WARNiwbvw$-47(?}mvHmBGX8fWAryd42S$j<=Uj$WQ)M~YqZS5SEiNx-P>61}&^cUJA`>ym@c(y){> z7^;z*tNo7B&N5xwE7;7jG;Tt*`D@hX z=|TTI8ivNnQtLJWV+kff{Wpt_dJbk_h;E_SN`2opjrN~okau-CKQ;g+lv+UiRoQQc zm()eRRH<}M`BTyzT{rRWp zp36(Xh+nb#^0(mmH33P`<3jfnQ1%lBF;4>f1pjP4KUHrY5JUc?6{FHvQ_zR?6nSp2 z#k#WY?}8ivf9`s~*v~gql73eMYr|p=#K-HyC5C*Vr3)7%Qx6q-++M)3PS>+(^!v<# zZF&uL2}$1gGo`F4fY=3o57gjf5`>l4ye)R@U(m$<^x{w42EMXnFgHL|52M8O)?a= zey+4}?C=M5*P;iYohlCg!;P|taVPup(o3v7{WMtt8;3N&`hE2=H?(AT{KrWzNs{TMuDAzT{uf}Ztpib$s|y+vXqT&V z?Y%~E(6!-?x6akC))Z3&ypXVc(3?wpZ#WuTy$+sU~!P zeuX}C!R2~i0J9f_NyikBLX9(tdSzeQ!d|$a@m!S&OVVsu`yEM^F2yXR3zLjUP!M*z zPRJw5Xo|h&%9riuSTc|ktH^_Z26CrLB?q*TE1?F z3krqrYAG`2p-ZMW}(G zC-*G6Y)r=!ENMBVG_$g%CfZhVZlTJZ7_3>ot;9u)CrWoXQSzc)Mt3gqo#~e^wjo-# z-QM;lpgBI%FB^Yhoxm9Rq^z#5sLrZ(6@JF0ULXa_XE<)_>{T$hX{OxAl9jj-gdeb) zAPdzS%FWqdCpsmQ-({~M(B+FFpB}Kwud~*2T|DmL;=!CmZrh!PZAJafnRxu5ah3hp zcWym5<>x_OV7(Po@y?|$$~?auwGCqL83}p~~bRW#&yC04xZfr$)=Ilf_>Z%JM8~HWxNgjZ>rh~=; zkYpxGrYnufRlW-Ij5OIg^6IYj6?yKi`mOor1*X!*K856ShBJ;t%;J`Z7Yt&2KxTAQ zU1^mez~^!LdGC+s>-TV)p*c22Y8I++pARMZ`Q*xn8@ej&pDXjsr0JM&Egc>V2xQo6 zFmv06v@^5vkQZspNPm<&Cv&y*!Q#pUQj#BqH7izxs*3kZrkTOkj~Z`Z8l4%s4dXW3 z0tl{*`xG{6ix9$Mv8wPTBA3LDQS7VnAENfa^J;Zx2yH6AMBz zqO0?a5t3#PeUhHIv1k(_AChdLZRy*RbahcM`)k|g57FGryrTV<);kE{r>;Sf9^6;W z74}l}+Eh8}an_}Yu0plfOr0BgnO(vo6ZT%+vfQ*0|JZyTv z__t<1q%kInyWHhHqN)uzhj^f#anUZ-xxVyD?|Z7(U{m>p_E0hf(L4^!j?!8vb}4wE zfv|;!S?Y|q>^5mr6Wr)2e1`L*QwspDuhX>&y+(@f0)z#njgwbqo$M6HmM+xTkVQ+L zqbMF~&7}g6G}L>S<&9{JQ@#3554ENsh7`~V;vHcYP|%_z zC@9$p0|sP-F|G%DWdbzdUd!dvkmbkDKR;iq-&qKNuo6}`m(-?{ZAYsgx7_m|GPww4 z;OO7-DvdxeiZC)Eu$d<+*sg3Gz?IMt>M>7znrDpO=0{4CT}=Tm`LgLfgRs%bB_p== zFhg|J|KV3L<5=b?;zC=lsym-?i&E_f6sLw;UbhBX$sX%~{>P^6EpQ-YucRr1`D{zF z+~@VHs#^miX|MAKLS7z>du$jFfV^P2)zBK&tv=D&Wse#3H>Q`| zq_?M|)PyS44#QdM`omY5!_4R<#%3@R@*f>npM!QGdNZtPYYh{C1KGQ9V0D+ppA~ko zL$K+vQtR`#$u{pUo;1^ip-#D~CAeXwk{%2=({K38g{S)z6#+|&q$-kvHipK!4DSo9v| zJD20omW#6gFt8GMpLL7T{!aK^+%?CK)Ae0Hf@_XVH%YAL^u8A=P1*%?K|28{t3t<5 zd2J2KpL3t8xRoz9fIz1&zXK0=kT%QP2#5~gzJWh1?F1T}kg*k<>OK^K9DbTs(peKU zrK_^hewcQ-q`%*hmDtS)67?gyOU{xQZh_XpPjbEtPjvfU84jH9j<^l5FyvVF6?z#W zZEN0$P~hh@>eBc%aOZTAKgSn*E{rdFDo~F#L)@21>zuPsqTcxy%CbxN$zYEC#82fC z7pjS~mln{;Ky!}aALb0yAO{$!Kxn^eyrcm07rQuFKS6#3T^Sw;U{6JzkV8UU$_#|9 zp7oj9Fo3qrNR-^0bd6}i?3!^3ctiJsF7-M3KmAUJBpznm*`7UoJQJPR;p z^@U4ohofa(X6iq;ehiSWd+MJV2|uR|k|?bMwA98a{N_4^-#MiHwdEG-Mfc7^v>xX_ z_kDI29~GE7N}&g9Yh__+if;exkBSoh9`WiZx*a?wB*@E@hBsVFXO-U8fjcx zwO})p!9RRayL3x7vT3R!)VNA{x`XM`N}sYs%RxR;pYy9zJTM%fB54FtZVoU$`;MXL zUCf!{(lcF;Wly8MVon_zHV{N;e7joZ(rpb>;LrC&L0{QqH=^hm0%F1bD5mom;EE_? z0H5Y8umUzGyj4c)oO{G}cZmPTrdokj4W~!%f|i?}%5>daPot(f@BiHr$^~`2Ja>%PG$(i!&n2eL& zBo@Na$uF>=Qk&rTg7(1@%9Wat8~oZ(rs-uGNQ1N=+^qc)4m>Yvy%&EuZ5MZ9x92IF zg%YpUS?u+$ISjPDDzzxL{n_@~wP=b>=l2X>Xxk2?M)`WRt-mbto)fWB$`^y!S{wPf z#g5Tlgo9XMtiGe1Z0*Z@rKeF`(DbJdeAT|8Em`##wdMA9OPsBt3M=zfA6Dd&)>T$_mx(N&qgV7(~i zd24l`&*#bG5|iA9f-xk|QSwMBzMc1_;i${fflrLvg0z^0| zPVcy2DQ(xuS#QeTvzr4i$`kBtecNPnT}Ed&hPs{J7Onf^xTsBZ9j#ErIK{+xsaJ-s zeEzZzD*0@@BuLJ*&_oHoD)`QTgj!>;yXHdbwra-CIV^vO>2MnHs~@b1OcNcp4TUG6 zGDrr`>mNm&>rjndJFJRm^bVy+rrdcL%=g`gILnI5(Ti>ocT9g>l%2oANPceN(PaY9 z$@}yZw@63URlG)6C`Xdcx|jtDf!?8Z4DDmwtQ%{)H6Ni?&ti1&;7;^1}pM|rA@lL(;ylcQK0E-Nc}KG zi60!=HbInbWhMuA)Pyt}85}i{gf6^;J@_~hqZVxW`znp_eUNoId7Fv#E8ALMDXqbz zc8Cg|_MU90(I9{UI2$Z_85lp=x%pnthIxTiGyAcZ+C{3yI z$h;;Nn|xU5M|P<~ydY_WmYLa-TCe6N#J5@aQ7vdL*s*ph(|oJG_m#M0N>{d7sQPri z)+EJ_{ociKea1$=?1uE^eqt`-P3_ePb@)pu7{VH3X{d@p)BH%6mi>v%W?t>C!BcZr z(rL?yI)A7v1d>?hWSD386>P^sDbe5Q7G8wHu}SOa<0@J&xSM|Kg-?~c(|DUv%Z7lB zd&HxdBk_IPX8T z(VPvio%q2>{?E355O__2Vz;ISF=NRIps? ze+=Oc=Pji6Xn~b~-Pz9f{F8{IoEAyX}F*_M~<4&aUW2|ia3*y3$ z!xFf)uY48J1?#MQ6baseA&Ek}_8URm%tt}ysjhRchF#}ADX0RJeRUM#cho>6oXHz4 zYvhxrZGK5qBw*gzzrjSRZ#MCogoz8AnEClavLBr12*U$Fh`Rg=^D^1?nuV;JtX~v< z&)oU5@|&=5*UdKY-1@8v{3iGE{#0OcxlriL$=5xtZ=#YARW{{~GMs9r*Wtdwb)wsl z=9Qv2kp;{vKRF<>yUFp%N=;R)8m~GCb#U^0|HyFO&78L=Zq^kacKz6aK%dkGt%bxG z5c{hKXg-ncHE@>2N@-OtJx_EF(^KB6X=Yrgoa+1Af!#h+PYB58_0y?ej78tn0x zW{qH;e{@QHd>cS6p*Q{l3`+TqC(& zDY&Px7p<`OT>6SV@OftH#dk6u_9PsDHe1|r(C1E`Q^-9J^kY2qb>g{S9~pK^_%FtmPp1bF z5ej9;0pJPeaoWph)uPb8ztQbu^(eI^?0a?PIXbGz#Iv)=r-0@n-a_5vEgxGmG1+o+ z3O^V8ZQUG^9X$V@Kz`$i5iRK|#>b=@T#p-}jir*{mL++Sx0HsRZg^^=z<7YCt)G=0xUF|0v~T|#quP^U<g~9Me?rKgtp>TgHm0eoTmavkq12!Xgd)o+A#oR-L zO?;2mzexye7cF|yhp(ToaA#kg1g~KHJRHoVV3xABznuFpH~=_tz1~I{fssD|n>A?S zI#xFwv^&Ul>-d)2eex0HrzH7cK=LqRQE9 z&}?J4b${9FPGrERWNKx$Rvw~;tRoD2zk1meS2I|LFv))+m6CkqbxkQ9yN32r*4pT_ zJJX#Jq(PP0qSz^jEuRsR>PfSH*0Rwv`)ci8EN1Q%P1OrT)DVUX3rLbKI$B6Pcs(M^ zK~vQD_pVw;htj0Ip%HCW^x&E{RS`(GFo!L&_spoeG5=DIIXP}l;v-}*%*G9j+Zhw+ zw(s@r+0)Iv9ne-oRH~xBQ_kqv31mUh`gT4CpQs)2Sy&qJlp01oAnN3PO{1&cM=BX3 zW+71pH&lCTvP}8qsDq{{ii2h+H6%D_t^4!B-~>+kEydRI#X8&w=6FsS10Kn_BN2Qt-m26%Bt57pq~rW zk2qahI_wyJpw#-pF+($Q8$v|Pvd>r&gOJ0m2RT2!PF8T-5`WZ;IVt?rG6~Rn*SU#D zmbzq6OA+*<_1Q#LO`p3D3e@QHd}B)bkKlo)P2wYylYAYOKTUQ321Om%HVtCMC^v3i z^UCKMpq3Q(63FZ`1n!f^HXhn~qRV6Ek@WO9ht49i1K2i`AisE^KoXfrjY+lpK)3NO zs&)~w#HQ>P#Q`3S`-~W_ zs$e6q+O_ZM+b`XX;mR6y=?+rn=J8<1_7AZV`P9xl#8X^>2-@iLxD$J>FIETRu20}S z!VvKnF?+o^r%DI%O(akB*0qK#$9!g*ezhB=wCylexqbgKS*c-b4d_{=q|WsL@+`jkp4A43kGB|u-i2>gZv&?m)qx2NphWq`&R~d5CRBzneOtnf z<>jGS7lz_kW%_aUDsOXdt)bzQSWeL-8$S=p#&%JOnKgR?&XsJwR9S`%vw=s#*GyJ^ z!9ellgxqc$^a)<8BYS|scXfg3rHi@&iKWpdBYWO7np>q_3K80eo+K$JyF5#&>Vj~G zybExzR%h9p^(Zx2lv86(a|i`jXh-QOAG*$x7?`Wgr-~7?)Km^{9+f9{Dw{vF^nZBs zde2y9xk{_)mzqs@?}oYSOdx=1Y06zD)cGcNPgffVJm^U-u~bURgV)MUUyZC7DKvG> zX8J0(k}J<$tDfQcO;m9*6FU1O+)3ndA)|?Dj@z;S`c@;55#!NEnbVt^j2~)V#UpJl zQ^xQ(_B*oyZ?)IVE)eKB{b=``y9bzhSs@4ig*cTri$is|nRb0Hl6^iE)|}m-MEtQi z?}i%KukR#Y)-~FdQcW$sNxDOiWj&tEm+$`4sY5p`BP3IG7NLx}>K^J)Lk5X9R8`SO z@%De6fZ<@>fJ`Qll75RlXtQ8puHDQvK*(82?OaIx7HC%0T z%ffA6k#xruyN*}#L^rZ}fX#==2%xQ3I$7eeWuBjbw~f2I)>uHAfU*zvadLRGY`G0) zHB(ke-O{$|P;p-H@3o8}9)TZ+`$l5mbYDmzmxmpCvVI4Wt8Ld&6nvp#AMcVa!!RnYkLckZDq;|q=1iH!tow3h)Eu!K-`F`xwtwUY9;P!zvfkI0^Mt@j|-BA&j9Q5 z>!nrIF-U>Q8KenB<&EHPp9$So>p zFjP$L7(^hSpu}Km=8ZB=29G$ivPAqXZG)x^(1^fiILHEvhFnc(d%tA;L{3Q=@FS{sX=O&6m~s@ADqjKko0&6>bRx1{LMJ_ z1)VCF(u&9(FDKd+lA<=)ZT6RE@Rj_S{c7ICtroP@2kD`6@Xg(N zSm)ez#cMt`VyNpDG76D^qqOsvd{o|2Qpk4oIRA}_phgRBR|PRTm>X-E3Cd_cP@hSd zLSs7TSKT4QyESdrN#f`9#pTVr@w7Wy8U!2P^0&cD-2N*iEncl}np+rzkMB)i!q5}F zG+I}Kul^S?MDWdR#8^52`q_}yagay*%IZqfATH9%t27$pJ}^L6@PYZZz&6Fa#2DFq zJ0b`B#RNqR*+W;A+9Kk5k`!rVUkR->h^G z&HFq6r?j)0_v@MKg>z{uDffXwwdy@{GFGmys*xTwwf1=}Cu_Kg2HN?Be1d*&*%(6V&-vc>) z_{5ep@}tzdhCS&Xgcg&*d51!GKdOH;x8B^*spiwZWfsR!w+{g*2N8HT^U{F^O^5@2 z#;;4*h;xzO5QA=wh^f@rwN#LgM03y?^{i+G>Em{nymx9~1Z| zz1_E^pV>Z=dWNvB)(t;`w)45Qu%oe3W+4JGJ~0-EOrK^pteQeA+PkKB-@&A&Pv2OB z#sZQ)>0C@Eb6(}Z>7tF1OIm4YHf)KZcnU_y|L9aG(z}uS$JA1@u#65wohTBxb%!1x zTU{DUt_bN~vqt|7?!arwyrOj?u}0gghff8yjnudcZ7JdxN4*G!74(1+EQ@5aBD7?Q zy@L^nF}n*|J!5=UZJ~a~zn`4&439hgs6@DzRtHfr9uoC_b&A85K2p-IN_{*D&1#zQxvj; zBmW2wKpx6D0!&aBpHTf9)cC3a&rwjYUXWq59n9@C6WE!1I@$R;9HU+GAt71t?fH!Y z!|Ia9CZ$6?H5TC|RTL@c9C}SMSvMZbfGH=)$aR*usnqV6Rz#i31a8em=QszytFvK; zPqYxo+ed{@SVsgwXV}E3dYprv`Hz>u207}DCIIb)^TV&_Wxvf);cAFXx1?&I937?b@j!uzC3cpnevIj^H+QQA0xQNlCJHvMZFGa|u%BaWtgs zS<}Lg-qxC6LW+GqMdtqb!ReDlgnZeK6h*^Yn|CNf3f}6xwEl}y!xBr0^LZ+@0EWfI1RAlRAO2@>;QO?3$BrKRFyD6=t9|2Hr+-Z zhX>2GEan}Cnq6wFQrFElH9SSn;bYfNT<-$UaH)lrm?AU|;slUWt2^ zX3^Q3n1Bw!WMEFpM2YD{H3OcRpGUah9L*S?K`&!ppIBOJ|Fp%>eLC_`uIE{bR;d7Y zD78i=MfpmI8s8lr3!GR?@6YB$)c3m)!lh19Q8iDxf7%+g%-c0i9TrG_bOnsEB3x;% zD^cm!Wv{%)*oU^Kp}(j{RLpzl!?pvQpJC%v4B<$2>=&S$t?35FpbTB`Qz-!gr5+%X z%j~l>@X|nG5_i~C{|UPM=b=H5@`AO(MkF=ZsLEsHo|Jj9$vEMrSq+Mla`C7k6_x>9$8wbe05=%Z2`9t?5;G>GeR1`mj2? z?Yt1_ZDwOT_vO)+K5-HD&n}9vK!2QurMVJ6Q)(c1j;bI#|NHEvVow|^dS`oMrzE7#K$YUYith9b(Es$nUf>oXpWFA#e^cY@cy~VZPA?i&`hX&@G0GDNUY#&B4Yo@#6j30UkO6pQ_DG9juZ1cqan@9(3W;OYuDu(w7Dk-5y3U zB7@SG(>+@xwOl_+;R2nT?1vrt$=(j^ufd0C!J{zN4Qa3Oj~raCE5m?{1L|$^uhQZ# zC#LR&OAz~Y&KUrTBmssuZtp(?b^j2s*c)D|&UycO<;x5?fj!&aB#u+;uXV?S)}VR0 z36CF8=6Z51#8wLsg5P^uoU8}5)bgz4ENU0+6tbU6DpV~VAazua->0ku4vu*Tm&KljNLOAj z;5uHNX|;Ij^WNDeJ@X-u@+E$7d`ufZh*aH@4XWNgU1?;V2Ei1#6tM85_wKAao>bL) zI9nSlE~QgH7}QFKmJ0p~H~-0#m!+I;ZM)CisxCCuPq62^N+?75$2Iw?iGqZ8;C-}%~mGYop z`be+c1i=awaiQ@yfT6xY?u}-~-?D?R=G?xuQ^$?!=%Zls?Uh(%g(*}_MPxNoR>vfv zP0h42LyJ4G(0S3WTo6C9UZbYnI(b2q=&8(GZ?>y~RAAA2B;=D?N4kQMw)%Q)5VO%o zje^Kfe$4~Lq6~HsFmu~rtIL-!Pe#9qclR~c_)heglh!$%f*Z<{4Fn<>#~+jYoAR^H zLVVw-KG#{<1gEB+-_N_0Nv;k);)@#!Ub)uhowx`Qp3_*Km+wg3Gb3SqzyuD~ODc^s zE$Y892J~xrEX>Yj_MKF?ETxwd>G;}B!M(jDc7C3_!p|nR*rL*~zt~1#B)5hu-QBp< zZ=o_TlU64@J^~8WUsONPY}^B?q8K`LBXISURNGoXeqZBp`r3xy_%mzKY4-X;^XxLf z^l-B3o9*x215=cAXrYNb^8&$hG(dWS!VRQBs8Qj6&Xp1(W3~G}$yilC*D+e`ouB|S zs%D4N0P-(jM%8Ofy4S{hx}8z~3Jai3jz1i>!Q6JU>Lu)rR!OSEIAJi3uf3yoJCG!u za^%Ntmcx(P2(wrv1L@R5fw70+N{h~MXJmsMlx0HL_>U@j2mqSt9q8&`H+dW!}yP8T0^W&qUhx2UgS+ki6~+g z8v6;R+g$H|mPkyvFLF@&RIJzJU6K?`)v{!(;H*A#LFxt2GzJ=ZE&5gf z<&$X+P*89@oSX1t&?F0|)cV$6rQ*ScghhM~>qLQtue5W8?*V5*appW0Dz5T4 zsr6pgklDTRmj$g(b=W?$SY3E*m+2&0$u@4AP?#>K|6GfdqS&-{VZEo$RBsR%RLZtW zQpg6LAji96oyKu2!uCx|UrN^F7`fy%93VH5mk#X?px3nVWBqRUB6#WuY(}dsc$NKm^XHOY>??hL^fI3ZQwyT5XAtBo*w`vDc#=O=p3&cU2%-i zkk%R6{z%L2j*&LYDf1l}+qxbRt4ls2D)$9k{32{C$nFEfVtOm-8nUkz(!mmM)6wb! zqD=ia#~E*5wV%VEfs$@ushf4yNPMbp&5qIqBbI;tX3}rZM}OxZ4LIM%%E%8sc~k=$ z^bdls|4%N;kdEtrhK&32-txQzLPkNh_Z572 z2#daC_7o=eZFKDscJOgRfsb>e*`|#&k2cmd%EN!&&!0$D^&h*SOvdOWRt)=p!%gu0 z{u=U(tQ-u_su*O0s~NZ;)0-ri3hIl9=eAxJ*5rXw9Z2T}eIxQI>_wFbii zvCRSFA)L&p9h^yPk>1^%OPZq1`oF4wlx^&COx0{0Okl?X3?m7JN;P_e1AU3 zA;OA><}i1n4%CFBij1funV%C_r*{_mrvT5TYx_CBK?EX~^jDXw>D1a>$Lw_Z>B~j?GLNYbb9O)ArWg)sW01QDrDZ9+l_meKELvRd-p5x?owSS z@upR%90K4eX8oo2PWEwsyUS1QbJrL9l&lD`hxn`6DTdQFZBOe;b5=Ew9CkiCOJtj1 zw;xhnxOKWiUoZi@W~W5pWv@*~lfVUw168wNi@J-D*K6+W3R%|=u#A}Vf$vZvvw>fo zj4*Zg%CBd~<$}DdFBU738U6ypc3TjFocWkSH$)lEtWb%moe-&yL5b)x1J?0dpou|-g;Rvqfs`3M$T@(p2Wv~J?Z!C%h`F5)I1{=W1m%gA7v?JR(ysr*B{9N zhT_YDcvTe|S@Q)p#!^lu{b4C09f1&|to4nBz{tcMq_c6hDA$m=5fQ(?Evvb~E38!w zzEF>X1TBtj4iqoMa0_`yGFYEu%p`tUO;+E3FFBB?{z`Y}JP>iU$8)RuD@At#)w5Qh zdWN;cIHo-=A)mQYXdVw-=E=2|FyD6b)FX$3kgof&mFh>$yz`F3Z>>jDoxIV({SJtv zYFC9EmxR`PMUISz2f$hQbiwICHXjEJDgdUvXLr#0Z>Iex52OJ5=NW<^-(>40e&*cA)6s3=j|y-vhl#$#@dxelV*j(4HDp=lee1B*M&boD3FrqMkOY`L-2`_Z_rEcFKAX_C zsCgJo)5FwN(~fp-HgWHzskSh0ojDQndJmy75u)MNI`}x9zi9rzol*~j0+RyZ5rWI( z(@PkG#@!>f^?z>AhjSr7`PQPQ{yC0!YUb&z)URecj-T0G)n2sYvwgcvs3&(7N-SqC z+6fiZ?|s>TH%L_)jD)KW2Ld5zwqu7&ZYg3~lJ5WeeF5LeH0qe31+D_N<19ooM zw?pk$OZ*VLd$O%K!@<*g0k&QAe*$f!Jpej-JJfvBD8j=Q6_A#e*NAa4K^!kxzug}D zG3&6v5_anQi%Mm;`}Sh<$>a7YoP{J{sDafBw%u<5UXcyXCu3@)Tq5zN<8a;yEM~ zu&g-7ZJ4vk@pMcYN+^V@sHoJ;IRVwzg1FeT)%vF3T@Ub}x`3k|q-F(pmm zj<1z!UfwV+n=b_O#D zhzmE8+|^1O(S&@nrf}Sq)dIX4({|X&t^%K4*x(cs_o#q%?Wrg zf8?M0HXt~#elhCwCjJEa8nB07$HU=Ox9x;-e1ChgJ`}4`{&y|_T}gq>BOICp2gkO} zd2T-x00s3iFYEJn=_W|szFD&6blo21o4br7PC8Y|indKOM>gxoZEX?4yuap|1g*jd zm+*izw|L;AcWwuV>89=5wiruo#hFvZYtDwBn5dkU__RYi^99GbKbz$reZx{yi^gpW+iUz&#|?b-TSg&0t(Y82cb-$} z?h!=&<$`+Z_gdf4CWivrX^1{>We1KiD5Mn=5_a%Rng2nPmi)&NzNK#AegvoZqD7!z z_B<3{oOxOUpPrdtzDuBYM`>Tq2J4QJ&JWvPU7OEh`*jtVX2-Tijtv3VMx{;s_ZNVY z!jSR{xflGR`$x*|7X3E;L)-Xl7x!=Uv-T(U9sF=^FA(j=ZJ3uoyjft~VzSIQGP^K6 zYrm0h-lSrkuF=%}$O`!8XMFcLY_s0ioy&6gF0}Ho>qmv70m{Kmf;ke(-WVW4MeWzi-f*}6~3g)`tX0s z1*fUN{4K^_GMo*V%fG{msN4Kmkp%1}8nEoa4{3wibb)a{QX|JcwdHPohyi<2S z7khh7bzkg*0>I(#JnWe9^rqV#K=8$*tQqu0YF%Spr6BaNIAE-LA)_mj>lSK4JD4h- zGwh9HTOz=p-M0UaKYMhG`yk9LYMpMC$=m!(=Lie&`GMI8teNTr_}f*O;!EHl*3d1a z(2KGJ%A~^Kkd8ZFnCen(jgy1MiJS96!2bQ$KnHpU4@um<$izt${F_qKQKAug53|1{0p(& zm5aL%SlX|a@t?2Gd&hl~q@MX#9~XcC^%LJ;q1LV#+{GKeh#&u2&F=5rQUJ8Woc*rr zLw4QKo4>mwZjTptE}!_7Cd+?Wa(40_6jD*`Y_1w`p()?~UJ>C?);xde@UMK*e_3*m zKA_5$l6JIL7~nwn9{)Rb6)JS*!&|@~{^vIQ@?XvpnhP==hOcLN02L3FU5(75Fd)rn z;6AGR{rbOfZhJx$`OYvC9VNf{M2zN9KCS;fB^c#6DE{c|zi@IdG*h_V2kKSwJ>2D{ z{0EE)P@Rh_Kl)$J<`-slrei-~OoLqVBmXw1|6)J??LPkhZcbw!FS@ub{-q1NYX*vg zHLw)MlQ!C}aiW-iH_mH|3jea1OKC1DKn;)Ve$K9Bz@J7w_7V=b1D}7<9q{eC1Ft{! zjQ~Q_U!IBrlfB4`gAhBR;XN&L<^O^vAt0xqdJr)5h{~fx&X)i~?po@0_m?}`<2{=&$|*3 z@s?LyZ#6~o#}B+TJ$gR<3!pWbb1z5t9wwlA{K~ifo5u#Koa2iXDWAia@IQW>zbE7d zC>{Q90{Y`RKt}Lv^_@A$0+Vq11L+Yqh|dEpye(MLI34suFET> zgWc|y?u20w{jx&k3I>C+y_!bQeRk7>%G@shetg@h6t|51` z;IZO*No^u%+WEY+;j<+yUCMupTmSDLLpn@>eb`L1_%C@;R+Z-q;G&Y+ zH6_{C!wzw=wkYj5@xVrH=bqXO0G?JG3z)4K&1nAK;)%mo*;fUIbL%E~;jX~Sv#FK}( zVIB)i%l5rVUy@Euqc*=Z8Vx4a`GHJ4Cu+BXBY_cCg4Msny zrrJO}B$I4?YzyY&)BYDJ;I~5`{1#;V=s_!7K#H9ISHgClCN#>9_^w^llm^q~RGC{0 zt%urHI+h%%-|#@sxA&^H{vIJRY9p8C=y5wE!<-yso*T5DJVkaYwVA4Y#1jwb0qPbd zcJw}6r-{NgO_e@&m6?gIhK19GH(qUNIHl8ex^+%h-86Tz!oe_Girc)d<;-;S^Tq)O zsC%g$Tn5U)T?zeJM)7xoUlhCqj1FwJisPS>xo~~C1kw2Y* z#;mLi%neFKaVgDzWhYim=WR{iE$LPl2aDWHu2%=ZjSGiM-WTy-^R27ZaRMf$gtLUZ zTs{XxZ)pUkp9Uz0z2lLH)%2Xp?kwONZ<>4rfb(BCjt_`kh*6h0(@G)j zVjF+uAJIKnpde*Lo4>|6?_R`gf_xfvs=pNg#nIkzI&_cc)TB(iE88TQrS*M%^ntt@)@>S$}#92tm@N~`r;1#j)b1`2G zDGxUv#U}^1{fK~9wEf48HlhbCDTFrQdjwlGu)IrTF4*^|MNAcN9EpjFtY3D6`S5p@ zQI?uT<>DoEw0Zkr?}XLN>L7Gm7aLjHuvLNZeHGI0K>pgY^7s687mz*-Td03zwL47L zJ@=Ma{X~LbH=>YBw{O4$Oou;MqA(W%cxgTBW@ALd?FeC!opGVKv@{tsts-O!ax4|v z-@&D;BzoQq4Qfj)eg7MisMo1p@(Alb?gVsC>5x*F-{f};`R-~b8=y-c#ei-IdJ-== zGDL&f2Dv+`S^%X3XW)XAd7&7ZCa97wOsuZqw|=DbniZFZxrV(=oC7)GlbvG9Jw}Fl z3LU&v7?Ts(W*vghtR|spxJ5glZQ5x$0_9;=VtdYtP;#!M##L5F#P3D5O^sWae)1;j zqHVoCY{NZ#q|Cok;=6+@3l+d{@#;J!If!3S?UHl)G}J9pDMCb&w;w0L{k%A>I)1|5 z1SYR{9YfX&rVi>1k$jTGmDcgh6|r+z?*jh z4H*IJQ%JA_sXojRUfofy6wlKvk|=kPHbh`O8shDxZG7@Xc&4Q&I*W0p&egH#=m@J0 zRoKpi9Psq2RD2nj%Wj027n*d#JTOo_m96*NG|IZGRgpF@KNhoGG@C>c0K|_fpt{HZ z4}0$&)nvBC4XZTij)*876;Y9*Gyx$f7LX=gItbF6^p+r?pddw&8X)vY37~XBKtw@m z=$!;adI=C9fe`o}yfbrW=Dm0HuJyftylc%rtgs;GdCorj?ETyQOP@a(;SG8nxNmaV zFH%aB;0Va_Tg6vUY%FSi%o+jy;q8*m8wu6c9zC}6Qcckir9E|H^$lM_YKa$0<6{kH=HAwa$9w*OXuzmtv_I^TMx@=o--+xYX{?hEo&_&4F$E0xh7 z>x#|lO)<$E87RF?1LJag*6vvxfMjd~*#h>-8G{7bSa|CJ$Uu9@Rr&=kJ{~O4moj4u zf0$DrW?z7=0B?3z_3NO^Q3Iluk_48HtO%zi4x{!t)cugm4Us1&}@U= zruV%}i0Q<$%#!G)d)=NUtJ=6{5yIC-<(W6x$(Bu*Neum#gIF`NiS^CDa{_(`Q<8Us znz{W|g;CVm0Ub|mdvDgnkLCga}EQ$2z9o_87{GE)>R41GWwnc0`x(FrzYK0;M_ zWaEsRRN9~4S8}!Pc3(l?ZTp2rzqZhZP201?$u+*pHKnt!t9i_q{$V^)1ds?f)#qH_ zkl4Y4t^-_zsFRG1#Pj2KDjC&<1A3-Yw7;pa==It|oO?)=m+TTfxF`V5EUByLkY9A5$8niM^+rZ}sKLq6nV)u*7{l zafmGdI_xU|^C@Ih0!o2YKYEst0Yvz|ZQL{6%y|iGB>t3bEUW_F&_=XNr6p|Euc^zD z6j+Ywc}qod5neO0R^u87Txza!uj$dOjIAfdtKDk~X!#s2XotkD#hPXvU^P{uOH0qA z^^Z?qtOLp7U-hFQAOLWw-Y*Uz?~QzFS+ApIJ)bOo?k#wr#bi{bwlZ`{eut=nvmQsL z*?rV~*I42}Ttz7J_zVsv*`#^}OI>)|CcYsx7w^B3<<0Jg-_#m?%ItY*sl0I1_U+@L zapuLlEQgS)+@f)!A(%BZKl0&ZgievYfy~tW`Stox&++{Pl+--D~#&RWfLUOckbcFhH2%i(x$zx##|j8b$GYv(4z^8_e3r$L{za)g0gv~EemUvPT!gmyAv8>{J=&fup;Cc`kOIc}iAm8`5zrezY@+p`t>gFn z*EhPW+68_ly~waJ*F}_MU)JHwQRkXYomUY$9IBqHVn=fhOCcttJ9IWv#sIK%PvJR7qLGl>|VGTL;f(YJL78A7+fD|1+d_jnxJZNheH-nnh~ z72=qVYOnkvWc+@&;k*VR)wbt;Zs_r|;4$3_seYOA%n>#-u*;=l;WFs_mRu)DF!u#4 zk_7bG;z@;Dq>rnyN;8J2ZO^%rif)YEVXS=Duf>*RpJI4JT|mwyvi-_@c1BjL>az!_ z#;*53NT65#mmLO9>u43hRHg<0XTniQWjl-XB5Gy$B8k~9-O$w41*5C|Iy-&JKzB8b z;zx3`hPv7U2+@K#1N!7KY0t&^@(LJjzx{r_m{YZDu?)uPQk%7v)sSv^51g9Hv!}I; zUDdhPiGoEb*`{EmCT2`X%2}o!>W%+B@RbSUxC-QBQB32jDICe&_ZN$qT-z782*Dp2Arg$9wF3(X`B}-q5U@ z6)`ip`;T%lkBFvGuR3$O=9m{r2E^E+0%bbfYxFV{Qcwi`ad_Wf7V>IN1;qs~5bxzKBCiah;z? zc%>;_bqzZ^A>Ci{R*#zzgERzpKcmx-&O>qf_wjX0mep9dom8{D6twWsTtXFK2MMVI z>(R@{oeUv#1$jNUu53rq6g#0hi@w;S54 zDNR_O&(-l$jZpUM0}yHy%cc;-2*Dfh&4T8RKC`Kc4E`z8-Q;l8-8%{O8_AE+h!j7I z%2KOhxicAd;7-8#v0@w7MhuTOl+Y2RaIn>bo4e{&q8fCRJlj0JEKQBh>e3GE6MK&3 zGK~Wr=^8YUnZC590)%ntYcsy-i&cw~@J#kz1<~auOZB|d4vF^Heto5Fv{aY23<4}!DwN!8qUAt^LCna1J(?& zF*3W5){W(rqbAGsVQUPl`{g~;keJO?iZ6ft!+;_s5xU$`6JA1P3GLup#YHC{fe zD%vVFe$DDBkd@sk>~^?$X~}WkzmZGwTxNfdxWaKz^`TVud9Q=3;XF6krgy=;?IR|n zF@Tg$y-;McV9X$ZLiK4;o6Y6K*xkJ$6y6#+p&pV z^P^vSZ+Ym6o_jDf0oSwccZI7r@V#ML05KAeF2H-U4dP?n#tG+h7HyeyM30( z#jV3v_=KJA3Xa*y%Gik-P_)XBiFlWF0DCUY_O`pu80uDBIN)6xP=vb>pgRob-H9=E z-T94Fn_^XnP{GX|26c8N?j(*h`K1{0x20YU24Ts;_M2jTn^|6IOq)gh_dR>^s|>mH z;kO2#=!Nu|r>h%2)~863p`sXF<9mz@_D#BicahCTK|~@aY&2Jx5g*?6dvDhCoA1Ol z3^wW9@@#gS-mDyo2H@J|4x1|3c%nCANp>BW3$oN{lXfnQv&_XK&V02rJFq!Z_Tr~7 zz8PkgvTy`Ok$rc-J>Hg%d$1aCCyKvw&Sh|Or49mc5Ly-*W}D;!B8E7AyE2q{t2Zg&GH)K1E86MA zXy<1W_c0BJphiV;C(FNPs-r}f&&!sLVuZq(!Vr_M0J9;ylNoZ9$v0q;ad61OA|!)V zZc9#;C5KK+DlsCB#AHAoPsUnNTrDzTYwE!fu%1~`OD7pBJ~QhZjRCB9 zKBa!bEJ@m-qCKfpVUOspCw|O7F7N3`#imz9K?Bf6T=2o8o}vj~+E7Y*j@`Z)>LmLJ z!z))HRfLK1$(t2^oIs<`{;h}mU_wl%jpzD?QYK#oeNBo*Sk6|_8(<)BFR}GSgH!Mg zK}6F6bfCz@CSZ0baDTV-8vD&2Iw(-8CSId#5e$1#^xe0c=Ccr3(P3^bb84dZ8N1_( z&)6?sU+}gxuZhR5!vRDwA)z(oAng+g1H44@u8vki>*s1th^YqbiruSZ);kkfyp`9c zUu=3dMAcJ0ReU3FQX@6kViuEoW{t@SpLWHI3ZFSRzDW@}!j~VSdlCyFDEd0|gn5+{ z52cJ&h=;&#qd>ujmx$@_WtqSlx~pQ{o<*d~^EOo#^SOZ60Lr&y(^fc)w~F$eaY#3Y zPen|rsQV3d5th72q{uTt z#*-~$0t6FJw-{CfFm<3@%PBwOr#AJqGW$+wYTU7fjmHJgsEB7ifuMx3WQY^C_3~QhZU2BX&K2d&PJ9jX$dQd>U zVj20w()FkhGW`h~S3Js~S1G4E_@ls!WVgwt5(`_2GWhahc4NWfT20? z>r<=?wyU>-aSaNHMVkjybrTgk(x;AJzPG9^SY!W4KQ*{99%+G*2*~HtN!3eQO=Ci& zE}a00BVCYIoq2js`&wgUw zyK69NElSK2D_a=PvP_GX#r*{eU$onv1 zQ*yOH1eldhO=1YYK2>%+U+-4`MdD=!)4(-wHMvuB9hb>3X*_$0fhH_r5HJUtHF8ex zUS1&o!r3HOx9+^^(+cR@9`tHq;pWxN-eRlPyRcD^&rxm|iIahPu{Jmu_IaDjopiY! zsfXakZ#=cFGOp}@GHpSF6IhOx)>%kA1cwF>RzefzOh%9DRSAdd-l%VDz+!tf=6QiG zU-vUtKDCR?Nt0U_W0!n;A~%?CY}C^y{-43Bzc{&r)-R{uK%1ich81nGb*Nf=r|ecB z$9bYb8rr^vkP1{f^!=7Hc&COE%-S?-i`mY4Y_A>I5H{3-=CyF~6 z^r)Gf4Uz!^2g73qA)~I7J~~v4TIjm90c`LZ?aFu+^B8AqgwkTxgYl4-P*BHu6ZbrT z7ll<*R~j_D!Skhead`7N<3?zLu}{&`du=16;p&d?oUZ}UYr*zQ-CI2>Do{vVO8E#} zkF08aX--;gRqXWbfggJO*UyN$k~rI^dk2*Yx#osIdPT3`3?&`CNXr&j|=@VEIr z3^jjzw=ar@LP8%pS$9`{_l;ay$poBUV}#Y+EL^P5|M8_9))S)LD^!B(j4N0pc8NkB z?Tq*SgJMpnuKHZJfYH;z&W#LENlgjnQw^F`+m|y)S{SE(4g26ZD)nppW5ODPmMPrq zsKmO20gwbNv}*@&`w<- zN?nl2UA1p|;EDT+)Skn&r1Wxxy)3b~SMJfz9TInMGr&{6G!_`gEm0H5sOP|X&s){` zMm{h>l01jhtKR*(h;t+wfEGPISgyz5A!Et1f#t z{dEkT)}JdfKWDmKH?n)>X_{qftIBGx*Ff!@=%$O>&i&l|oiOO5d{ZXz!?;VwYwN6OIBk2SxT=yka#PuRQ(XZ}H~jrV>h zm7AN}w;{{uNTxM;=do+^BjpYoZ;wLmTqK$wbX5?h`1eTz_n77x^@#Xt=49FDh=h@J z^MF-6k|(z<7Oh&+JOYH&)Gj<8Oq{~lhF++b^nSjs$c)kNl%C3-Ti0QFWpw|;tpcWn zk_o>6b}ghgkvGpO*uNp)hqQu18h1@^*ap(?+`mse*S*z{?*J~Udtg{$o9v>?6$J)n z84PU;a0kBO#O^(xlM!}Ia9qVh?LZ@Zh2=rSj1QisnG>@I2B3Lik-J(kcaI+_e&>n@ zT_`0Bl^uNzlYHXNv)c6?SuYT~Iy+&v!1~k_Xt`^c{VlzE(N`-cv6QuCDHAp>FS#mp z3wbsV^#ma!J}G7tR&u!3fMwo$nSZO4-b?bWnT9jFk8Yhw%`_0lnpE6T?qKi7I~jGA zb4m93*EO{>AkzKXx?WW*6xJ}Jw*9d7-M$U(>^!@dI}gMHTBDw*XPUU0E4O9tIeZ~> z`&fWAWx*P?;G^^G6*c055PMjJTOegfjqFO7p`VExWOX$QrXu?TRd7RRuez+7sqb`LVk50EkE~ar2M!}=?R%;I zAWR?E{Jy#jD8_I<|8JM$7UM+dTlEvCfW{cuZ)@*FK5^a+Sak<-H{8%)nwMn^`P34S z%8kHSx4o2I`6&_E0^mj%K3nU|pjc-lk&|e(K~*T+@f$@)4tJwmT(ws`2byc4^4f%u(rOyfg9mlkVwhlLriS+IJf{K z6a(#SQr)!trMKQH21n47hf{l(BVVN@5bc(#Xs<4l=DoaDF|K^q%zTs^)x5{>&l$Cv zX+R;fGwTt^)=t?w_uOd+M)!R#X&$(2g-zeLQ(EKo5u6e`r~P4}x(goQqx6uIvxe%; z6E@b?@f#cgVR9be__>(nwMJC5@6PS*y=Jo|853F9C7Pv5IGK~--Mm~zza7xiX71E& zoCKok8UQNRoeIQ0u0)vUGyy3Ij*5%jp&c~d<101pvPRv5LV~U24Du3)sLsJK;8m7i zvS&>RpK$qHXYJ#`b-b=B7TKvXb(%Xs-R`Yv<7R_0x;V`ywFmVTu19nntFoxW0`0)| z-YE_*Ua4wDG%Oy3UANc~TKtOo%GX$%TT`{io8=m*eF+(S8vqU-eUz}dbet<5gD4!# zqJ9iC<)63auf>ehQG!w_aX-PEd%JiIyU_}#jbpiL(DRY(65)_RX7*i9Nyo%gsfVAA zO3$XaFU^KBa?G@y9HQbUYK>G^9$(tA9z6kstMfcFB!8_QTINVkpMZ0}4BZl+o6I$@ zQk$g;Im^zD+{kp#F@fr>Ck$JE9o2qVArvtI-+C9Cm+Z4*kZLuXI?|e(qPD)R-}`NP z@C@rY2_5kg9BXFWX<(vifuxKoiaVC+bdkk*a-qclka40`=9e6Ldohvjh4Z}=6>e=T zD2JHo$!1s_76P-=ANFmou!adl=$ZAcCWZ=0oPhUs-{rmx_mt^`c!@k?gs7ADY);Fr z@FTaGoGRH6eff9jPwqK!T?E7{GbdFM zuEY^2Zp0NHdfk&wueEoodW_On+@~twbE~b7#N`QVjw>QHSdh)06FO*15iaY36X`F| zErpG@v9layhu~~wU)FSL7qdjk_Aof4ScG`7>kRN!Nr=_}qr_@Dj@JQ>1;M#BFM0eq z4$6TkKXOOQdVeKo$cf{0$e#5DS5%sZTVwsA4I{e3Vf5bn($eWR7f8uqnrtX>VipKO zpVN>dy>~uuXrdYNl-;#Q0f8K#qtoE0gUDNl-rs9s$O)L(^B*=h8kZ?wme^~4X^`o( zP^O4nb_!fhpcDZl@mJ%v`BS?(!35B5yXoMLLB~ejARl(q(7)dpXO`riAFCID+RKl4 z_%XH9IAKwe%}cgT-eJSk-|6eOIl1DtkPJ;Sx0l%oi33O26cGH4XEWOdEytr0gi&+X zIZh4Qr)z_@?_o$2+CY=odosXe+1#DebpzMyslSD`d76QXRxTc-DI7pVZ|weq4dVEl z&Ia`MOij=IYHme+JhD8NHp*ehCr(-eO~x^5U-S)?dwCcvoRE9Iwpe>rrgxE)N#7a2 zY$#dUHc)Kl65L|eh6~)~M+wVesdpkQb-PMcyz@!yn{#4b`#YIvV?><^P=uN!>yl9m zSa=3C9=@td{c`ZBkj|oCf??{x-am>QT|gf}`E9-qD~8!lvEy&c z++A9>98^rBfohqh-+My<$ydBd6SKfbjra_2xf62n#0j{t&rWs1^6Er)+!3A%hi+B9 zB2ytCmi-0@!&2A8;->nozpP89@wM#}-Q%wlzw};NbhusCURXF+3(P9ND+5)?gVNE# zsx|{~>=(cvme9{GffqS}ZhUT-VS!mA9l*mC>giJJ^wgG037g$dD>3v|8yGVKL3d)z z8$TGzb*iyH_q=f@wQ_U7)9TBF!}!;LNsG!oK)@s)Gl)x^=#C`&kMi1iOpP%&OwM}7 zQ|tMg4G2LbNG6T8jcP$KvODq&qtZL^K(#dpr?W(US)S&7<-piA#0O z^~k+XG@$-U9Al&9^?nDapJ$e06WfWjbt({0!P>g){W3gFe>pnT0%&7$seJg zgCGYGZm&92!`v`Fm z;QhTLx5w$#ajJVtd$?33R@>C#=As)a-cAd(Y2)Zua$cj#pb^l^#O*;xz_Iwc96$uq z>E(-;HAX{IfRXaxU56zY$Y{jR z{Z{F95QA8oEG0&PK$B|QOA#*1j%KT3KxPf%-h8OXAgjcMQf!_77ipUm+X4f7*5cPp z5bChFx}ETqo?O6H6utBH$a$IFfP=)Fz}$O7)M8dOP;sBZ9Q?&@-Uuj9z@Yr%JEVop z_*KSgPQR`?w*XROjvgNXu15v@2KVF$A05`0y+O@|nIzeTRf^kT!UPj3KzQ0u8E&t? zdna;B&~o8AV+NPqR&O_hXMkG$sE1zN1iZ8Nk}LXUpst6{EOCBSk&vLReqHbC)%8$% zy?WB!$nt)C@S+1zNl~dG-2Fu`%)ZxVqQq9|9Wqb61{hgP1&!FQb=;;KZ8pEwf=G`O z*O1WFHu)?og(a*Uxh?apM(>bEqZ`tb!)3Hf@9|iLCWTa6ox`*WB8J#vgZ;u` z#-N(!bW^+!!Mwo+A%|sWj=MNM=hBk?Wh>#P_G1-V+M)|L?I?8P;wdXNFER$)BW3VM z+CF<-rgejrPIpv1n720d!dsDXktt(jRn4}O3X7K*2tRwBd#`5zbG)Q@$ z&hTB>XS6>R2>+R7TbOW_m_$(-xY}&CEKk_cFikGni__q?jM0GXuGH^OyoH_hy}#9g zZH`qp^k|o;=7SCFNCoaQci24>;pvCfYr{w#=xh*q!OWAwi=zlxv8`T@FLt;`dYCBy zi4rnDLpmE6xNa8_O&*+iXIB$AGg!C3H4BlGBf2p2$ZJZ%USA(Z4-sk+uu18qG!1?E zs{t^AiTr7zRI;|f{tj+salkb{1eSxfWVzA~hwS0W`+j}y-+b3g0}8iOQ#9&30|N=O zBoGnp%oxq!KXYZA^++p$l!+Ucyn?xXWSjsUA$ z0?*f=_ZBqw%`KN^H`an-96YO1LLx+!09zDjZ@Z>*!5W8)@ zIDnWx$iEXNG|=#<62{y$yxSwCFp0wTyX!01;;OIVWghcAU&~{c9c2lU!<(JY76H5e z(%ng<`Ga>ClPz;yp6nWq>6?M&gbe$zU@otXm$%>8Ew$gM4e*O0~b4ClXhfMJ!X>HdfpR+7%39F%USH&1l(ryZ_I);?9{RClOLYZv#DC;f0fwY zs7WyMMMxiZ^_E#|7mM;62d{nH$F;+i>aJFykevZ%9x;Kqi)SqNM$NFpjEOTcS2C*( z9pe0^eCvkJped{;<(AYqObXgHKH*U89pdR5DV-f}2EL9#KS;M72i&TR_4r}&G0OX9 zyVwMzz})m~2R=@IdvDkWv+Fyba{ftx6>o)yNu34+@tEnsG|K~h$-yYpojQ+kAJRJQ z6KUS~#kj#Z4b#*p4I}LI`~!b+8zxZQ#(KfHs0~x`^aReQzFC?V)(DgKNT8^|LtO)k zK`g^O!{(zZA>-h{&S4%U0a$>RVDyzOKo$jt&cZae_p6Hp^)K@fa4+-gt(M#f@Lvc z4g&w>(Er5`3LQth$369Hwr8M7m0HpYvKPRzhDIx@XjD0M|JO)L5XBOKPJ=Rmv-d=Y z1qEkgc94PwRZ}pmSys6S9^vkOgq42D@#dCMn(xX}eV=ZTy6H!wEvY0zrT>V7wu6o^Dbp{6|2TUC?|QX$3S1uf~hGq~aUUBKyP3cHZs_GcN*!>z6V&9Q2DpuT)<0 z|Ly({1sR;@B}%u^$+R^8vdeHNtIJxnaOgw_AJO1kuX4KQ;Rf_T%eZU&(QhmuDTzrm z%XT|VN@V)bl=1%couTO7G!#kwxkU#Wd;z?g!Ybeq+3+{_?BGwLNJ>hOjlP@!3DC(w z<|jrNLRq1i(v+YGD;e4{VP4{}Of|>x|8`UVu(+y%!m9fb+fpLnPEN1Vv=uw0=NLY7 zZ1?i(`Ey-0A3$Im%Ms9qo(w^8HapsE_uT*v7y5^<|Mz3CXN&PKD83mCyP=l(D>W=A zP5|WpiI+N9RpHSQHiGbX?k54243DVn;?ELK_no72 zqHYXV>*kbHc%>@vw}R{s>+787CFmL4NS;0lp_X`Y6WUPnP+L5V@jh^>|5*tB;k&=m zCXV(CN>?w&TKXSS{6mW0+2bEl{6mW0De(J;f4Jhuq{v_49Fs^+^Lr%jG%Eetiy4LL z>phuM=4N2F^F={G$gC&Nk9_j(&q?Q?7e~LK)NVQX6$4s3;5V&(pr`T|+bz%}DMol$ z+O;=n+?+y1E*{HNs*MUY2h<}k2ikPUkVa{mh6A?b@e7Ch*`T9pKzgUTOCK-I|-ny zm*}E|EN+b3h4HNq4R16^DeVIFJ7htaX^i|@w{L7xQ~B@2{`(Jed4L|(*lvW% zrQAg-R^(A?{2p;4tU5TMQP51R+(ko1S8SV*z#GSHoTCK+P!IoCGI)bR;)WmlO! zVE*%pp|@A3IPV3tn8cSKTF@d`BC;Z3cyL2%lEx5SH(o5gL#9k9+(puLPaG zBzpDg>#gO{gbEjIT7}c-064tAKpzey(pPNVLVZD%Z@G$X03%>;O#TK`ySJ!M$h#ew+A6?U_^6p;;sLOo3;6i#*dt)0Ex$>+?ipaC%VPBya(`#_ z=km^$PIdR5SzDKn36*BDDZ5h7_@a62)AGQo14yGJYXYk-*2z_1_u+0Y8LX9JGU?il}Y~oxc*bGs3<`p zxEp`iD3AiPey#!iKHBu>)?YYrY`3sf*r^B*g0Ne=HsR^GAu2N8j-H78jv0yw!>`7k3hXqSje(# z&FYw<`|I(aJ8@o7>7ZFypuE#)g%ILj7u?=0Hgak~(%oh1K@?vk&}MN};`#5J`7YMx z7qJ0V$R;+KSvWaWJ7TeaIPC+4+4Y#t>*Z1zy<2HLsXwPX677TD^~&RG7YB@wOK~L z{0xWligHl8&AGS53)$J)zAf*SFEtvit{lE!Fb|<_Kr3APjrmYRhor{0W5tO*5#!~6 zj}NDAlS8(99P7O#SQIGi5tZA0erwoy6(6b++8Y{yngI(3)L%dQGcr7=h3@6cm)T&_ zR?hNhrMDR_ZPo~=Av8pKEnC6437dd8i}Zi&?vW0_4dQP`^M*ae%hX*1<5kWjPIa=K z1NIq0ep_gEUsPy5u%O?+=Jd}z`S6iaMf33*PrLqnU4*=9v6~A*{u=`_33yP+?+

I9%hSbtR`OdRP-1F@Yb8m*e0s0{Hc z2fvV#wgiu!CJ~`~5o}xEuKb*g0pt+N@fd&TvC%S9K=H5srkES4FTCC?#7_!^^{5df zX}j!p$2Pa0&9m^NN=15$O8gudDRWH~R-6{p1Z1-&YM9=leS}pS zqeM--Uv=4N&@avWTMYf(-B48m7QX4&>-TEH<^#tc7shwa9}T1fZ`Br_{7EHp4nVEv zgQV0FfDl>9A2FY<2f)x+08s+e_0U>0sLPJ6Tc4*g#mQg1&Gu7zg15^7fK;aX6GJ3= z2)D!Q-?Z*`*gZcsTDwO6zYL4xHMjfz0PZit4%Jxu3V`jGYdt!s@Y?dry+3eu|9&2& zHvj-zcL^ZUpS(i_HDX~3u>WkL+#M=iSTNxHIc8yD1=#TI*oKJ!@$}Fi*>L{c&mH8U zpjO}{F9|A@asm#%^M5=^nIAY&f4`pTSAp|trR%wTcUA1I@^50*@yVGa^`FyiM~}T# z1?coXSiB9ev+R%mfi-hue@5va{Ldv8z@b>k+u;Ui+T>fmLgj-S`41_+TMhq^;vcT~ z=Ysr8D*i`VF%_rpo!#BtUA9};t8Hcm_d)%%_~PwS33~OaTbwu&^#<=sfdiouR378b z0x_8A52r{DCq_6IIdHIU&Qb5$S4`ZT>Nb~+UY`^;7RBVf{5cExK-MDYIj^~jqUzlB z4NJ@+GWd-g1QyF^3B-;*@c)J^kzP1ue*$ApuewQ!xZq@^-Xc~luM_yIRK{pYdq%9&~Umwv0XEZbqXW! z%g6_)zF#JS*Km!?i}Lklx$@obAVzD&2s!z^0I-WgR!y$DSG=y0vvbtOlIcU)NSPm{ z5h?tsZ^IeB5~sV>ef|9^l|2tHenCgxg3Ucm&^SjN-n~-tOcKfySKBnuv@(u-G&Qq4 z^GP06qoSub_${)8?JorffB$Qc*D+hiF@9*}im3j|AUtf2GWLxvFz%!5Wr$Fjo%Cfp z-Au{WR>7S78a+Dt_3X0j3w9h!|5ImWg>*T}FE!<1e)fash<1k$-&4N z`To(Uy1QvHch2kB*whk6FW#T)!uD%6V~lgq&JOi(4XWL^!W-=jw7iL15q3qLJ3zxIpHXRP86S&t?vB;~lDG}I zpIK$7wM`D>$B(RnPHp>E_n?g4l`~%YVYS+y&O!Eo2?`arU9e23b7d$m%TAPXsxjlAMFb94u+s+JENN8SxxhtUhgZ{u6@7bbSc#yDFxt@qp4 z|f4mZe4{%?=8MSNHp_s@R!PICeEf^M@2G9es9o6|WEajt3QR zVokZ_8EPsl??s&E3Ss>cD3j5g+N*#l@Atido{4E3W}%bE^pEar8j(o9|O@U@Hv_O!x(4Re-t!ei11n02IFu zjC4B~{qkqA{D1taP;j6uzZgH2G%2%~XV(S3pkPJGUK?ZMsfN(e9lVpP;X!%u7+%zo zXH>pgF|b*x8>Kz|7Db^yEVBMjI^fl@U!j1~#xwQrZ&OyUplK2G<^)?=;N7`{!6$$J z#$B`rNRN8>wr%fij`$+YgYjTx1y4HHt4nKpj0RBeOh^r%zXQHZf%H;{m*B;RTj)7P zM!6rgUkhSjIna3b|495U=Oeljo?0u<83n+{yw$P3ai8Ib}-KhZ!kFt8oKf)UO1+yPR^lkf%W!}rPFt^jUt z-yM#!Y|Qx)@Z~N24wVM~M9hc}@lLW(A)ILpOdu!ccKgz>@Y#>Ifu(|MkIt#4KYA3W zWunc|%V)BM0?_>x&)OeV+5+t3dRAez_v_r5viVF#mBF_MXVD{^URtfIr`v3tprFA( zbAUU8)J4om6hWCEkBA9+z?%>M81lGY2G%{Y@TYYzrK{9BBnlXNj&gHvG9$81e={*-6R2n6~Iff%6` zEopN8!Hb*Ay^1wn^T+@Fm=8t3DfP}QHwcrxAMe-6g%BKn;({;1fXzg}{<4{!NF&Rl z{(3VJY2MsWsej2qakehd>7io;uB#tNOc_71^nZ{Yb}T^W-Qy$59CB<2_fDqv5SEV% za~*X4^#7QKuT)#vX}$fLcLiw8wyzB)+e7Rs9Jr<|T)g^LIV;cbE0d zIpCUJJ@psHc^>D2x zQb$KmaA1?4 z9p%d_4!SH7Jwy9 zJ-aIiO_iGZSh^*xG*bAEq42@#&T(LnZ+UfT0}WG5uK+iXdqSLU|9g+&d-HAzY^UWG z?z6m&ls4zV6h0`wGb8lCv2&@v6sZdJTaYj7*@GV&vD?3*WW)RsYsAXRYHD76VYpbQ|baS6;}Bfo__ zxYa^mUhb@zb$g6I4pm8kx!sJr3JAGChH8o*-rk~v+dK5MQ6zr+qHeYA*&KT5Yf3i# z#+8s#yFCBOuWzhl0thJP?2L@=6fyg6q9WqnPeRf@NxsT!K&LD@Zf`uAZNfOx?(XlW!&Qt1^@zKjC0xJ|)1|jI>N0#Y6+Av0r(LM~bBCqZ7h&*VSs&b+S=zhx(@7)LVp?De zfKfUg>zjI+LNCMAR!i+pc~qeNxl>qQD%Z=Sz7Z|&MC>eWSr2oFr@NFyN6s7(q5|Pk zw%;0H4AFgE6E^Y^2;Z}aLO$Z~y%Ig{y>>D5mp2!+4X+neRXymGS-f1mTrrv*xW6aF zjU8q_8t76kNtCwuNSP$%lE#;E-S>SGSv#cCVW998P}_?t?XB1*C%k#{yt5~NI!2M{ zReu50TG|;;e|H0r^jTL#4}LIh9Ql#jyQ&R1&K?;zPO`PFyva8%d)4!dlk?VU@8O5^ zYEKjVT$W$L90pxp+n5r)U8xw)+bg_!ovfhJfM%Piz2qQo=)%q+?@^+9&BD>1Q9wF; z!sa+94N{8{vXjtrXs$g;%)oh!MtPNe4`Z(ebZ+#;_2j5@Ed*21e-Us51Uxh04A{`3m!FasI;>J!Gr1d^l1A@ z`z0p&vV*pGZGx9jFd8KQt`YmdnDfMHw8zENQ%sOFuMxX-pf-9q1%CL``nnsCMHkTy z1DQegN!YfT4rqvEx$`b@t-)XNAr-)@2wI5qXoZ^J<^rp@p1!^SFoPNRQ_tBF2-&au zij|iUP{Td%J7ZWw*MN?HIu!jwYjO#B0Gdp$?LHh%)ryOd)T)UI2{{UpQE<$7L8gTW zgKctZ>xfS7&*V}5U#hjwSGY=eg;8nILPDdx-vP}#o<003`LO{#-wSWcik7#Rt12v1 zRlmM=%b>zBX&7YL#86GZ_#>LN=5NDLUFl|qEW3RlCO-sGbmUzYTcKYcNdvfOR67t? zP77HnH~;ZiO;;fR^PFL4s(;`G*O|_Yd?WmABHe=;w8+73MGh5u9h=GzCT*zJSoxZm zpxOf_l-k8 zEHbVVkz*6>e5FBnKX(IC=?^Z-FN*t`d1SsjtB(Wb80cMKwwWBKD{<9U!K|hAy@+!C zXW2WE_aY*)_0i4F@>_yhN{+?bAZ!UkKVqxP;xXWgbDrgv&1OO@5;8?lU zeBc}}tnImOCM`YOQ|YbIvXy3O1h~(&AL}RBuWA>clJ_F6PQHKmIs7SCUQtmzkW&ja zxC!*Hyc9yiTmi8@E^%L5`z#NHm|=TN%f{sTMwcQA{e*m)wld>zzl6g;V$wa*=Ar<5 z+T{xuj`F---vTE2sPdHIM$~<1z3P8i#?tTvnD0pXZOly=^7Hd+X=++9spzk|Pc=na zs{&IUgb!5f*w{?Hi}&&fKM#20Rlx3Ab1s^O%Oi$?UHnUZs$M-HXmv3VlGAB1>z8w$ zPPZfb-j|fzX98NqefZ+FRM%JT&9k*>u!+-!#d>3Wn`^nG-LmIOSQX`T%zIj zG5=^Yic`Es^xd9LXalZ3*#zUg!Q{J$sBE{<0nO$G+#$!BK=jJhz(t$hoh25H?PBA} zfwDzLm7M`mo?a$gA(@$H>Y=ZE`H82KGSbifNWsSheTTFo$@^>0(&IXSF4G(>S{yw? z^xbdNPzL-tY{@MNDrnf5Hod}lse~fYj8@luAiQDKIxnD1aPCk%3tU=Zv$eX*raR81 zTS%XvZoUwjR-g%goOnBNttRsQYR9AVLvV{Y*KSwrH*Jj=2$&%Jh!cZz5edL^`TB3$ zzW|w0JhyD-6h@=UK5!=6ui_Dv*jRpCVN6q#Nv6sJ(&xYmHvn3!N1!G9MLNJH@gsK4R{2cFogT>PJ%%)Os2+d$x;JbMFc;`%0!s z^-yNi#Cywj+kG)_Nn3~PBjbtkMq z3sY23oZw4;vMr3q#dgM{L~08NAf`?E=gJwaiaAQnJpaYI@FUycT7Xmc==zF3&!@8D z=DE_xeZPdY%xQ*&YP`}3hT&?F_e_n3#A6FOOMP0Xv{fHm%gykDu=IDSUu#ctY|@zS z<(-M)Q#{hLYSU8OcMKy`Id-OgcUpve32$g@d>OXgZu1QeBHQ&$z(Ows?%`q3oM}t; zRgCR^nF*ssQv$zAeBvU;8w*}TeoJ;JdWC}!kePgUjkdA-WV2q8@k%hsi$$`(u5P18 zrD0NrCEpY)Qe3+pegK)SG|$z=YRDTv0G;urdNxfEn0X};1_wHq9YcgJY@~ERrAP_C zc=o=A!9*h1MB)!qP=BEpspXx8VNYNN?_;VSPkkG4D2@Y>=QSF9_sQ6OE-~sz(?rtN zs7t~=seV6}fDHs)Q=~! z2PUR?^eC@Kx&$Z+S4|wzhFjDWgO?rFznqPxY@Q2^YA%Ls3gVa#uRmOfMJe0xd_*?_ z`<1O;ZxuwCI4qa4*M)Jo)N0o4Z5BsV?DrQ#By7zT!yrd>1Wh7G33UWHG0DntFX-6g zX0C>IlpiCw$%EPqEa}KM@4AXFg^>$)x6y%;=$>(JtPuT4aWI>Mtl@Wkl>i(`t`se# zW~Xogmpn3JxIM`nXq)zK)qNbU!Q(rZ5oY!ADJ`vEw+{Iei$)^eERejJIOV~)w;BdY zM7y^~c?Gca<6^wdP4@igRz7(mb^yEYfgacl$GIISuDT%z_t41(bkcsn&4mSyc1K_K zg}&4LEUe`+;IC_@J|%(ee0e~N!IMokrYX3NGc{5kCwjjKxQM62RkE9DR3$dv2DOK` zUK+K#;tAK_6bWH|xtztF-ta*HmUh+%HGE$CVjeYU7FF{=uh2NQMA;K>a|67Rv%TIu z5;-$ur>zk;jP8^uO$*hin;7d2A(_>FGcl;O^7Zj9b#mH&!_wT+6~gRTA;Rg%DgE`B z!PGP1f&zzRE;#Ej=4F_eaN4Cd3>Zs4DA{q$M%MM)^Ng!|LuZ|k#|Pr{by_V;P3=#f zQ}YNGBA%`&rMl1K`_Bi?i~8f zX$%_%0!Y{CH`?m7`QlIFFQm-!WKUKcOwmx6+tjJQL)*e*Jq;w zV>-{|iv`+uADY&$TW1Z_k0=JH%_eMop_$WNwWlIp#7sDh%Pa9|hz~Ppx8!lJs9Dw9 z7FK;@i+pij?-u?9ar(T_=s_o-;f9ZnV(w(-)Fk@mb~PHTegJkTA2l{_kJ`T#t5Z+A zE`9#f0&ejCWAD9#nq1fR(PaY_0m~8=U6*2|sDMa~h=@v4P>>o65IRV&iGqTPfTDtw z5b3>!-XbC(HT2MX50F3zgw)>?*WTaW`^-7pHD~6`oIgJQat27=_bGR|?(4p8T&Ps# zO;LYhoy-0175B~KbY=Ql1mdGKd@Zz{S}KjTqlf}w8btx1I!13o&^5J(p_Fg@fxqRq zK2_Qrc+&1MwJUrw6n`-S!Ytod8J0H_AyqHBr99G7k-FP8;dZ4>P_<(gMqW%uU9Owj z8ndT9c|r7`uYfr61O+*ZTg+yfs@E3?Ue_~>9Mvi!4Lye5!>A8US=b)=RV;k)W*Ix^ zD*p%EV*eql^Z-0qDM#j>=z?TPZtH8j{*vN!3bfL5`HaK@sf>e)b?^N6q;H#efA+$L+^pHy7mL^=nJo>X_oe5|I|RM5OWg^H3I&(naN z?judNx=BmFi>RJRhPvC%?qJYvI!hDNTNColF=wsQe%uQ};j8VX%}e!5eH?9G8HNU* z@$*AJr49>z?mNsQos|l~XLO7V=B7MoseU%20K5FM!w^9%tX3v)yCpC?X9svyZN3-_ z29Ww!))VE=rz5bHQ1wtjiv!Uv1sBc6+ueHn#+SpKuk6ic^rsG#EL>S|`=U$m3s}of zMWZbgrj@&x>G~VhG|^ZQnbzzf9tE3c^5>y>0)FQDs%9oa^tTu+li}NXL3;0Ssy)AP zmD7i6jCHQSaN(01zLV{T>QcYWH*i8cPK|Ji2AuligefX8c*y5ORwA;0Hv~Zp68k*3 z_R+QF>qF_mx2Af~e}&SxnE1dEoGuV}Ck0pB>$6~inx`$w8bO^?OlEc|rB zA+?J#3xrPNy>d6Rv~2MQJ}B-==g=(?K^Q;Hjgz&FM7*g;2tMLr>uug*0=u_COX0LV z^K_u#QQcbQAs*_YLlU#?xuQ&}6f{>x$xg}Sn+4TkSE^uekLrSck4%kK!L)iZ*kH05 z^667RzDk5A_e9* zetdF@ie4(q1wxk{O4zp5Oq4THr(b$atA|Eb%r59~#q%SNZ^gTHT-54=u;`@2@C3OB zSs1;cs_SH+;cZ^7`pfFo#5IsyYIOah-coYwzG5IMSyS{}Q@F~XX{_@e6)w&`E|>+l zN{2MamF2D|(kd#Llk8|bMtO0!Olt--^SGvY;0ScgSp9iA@~}{EH__%L|6Aq3xY0}S z8+XiA84X4jq&S($=Gp*X4uZVH&{BKe_HANqCs)E!UK1g90i3EP>(G_ym$2U=qgBM49~Sk zQuB?aIS8l0n6qFWFw4ot<*Lne-%G7oGn7p0Zro;Ha{_?deVUB0-$Yjteg!&m#tLOMaN{4%UVSm5HHo5pW338Lx8dx-~Ldw6(QZ+6BxRo;rbB>rh4 z9kEO$TdnNlj5DFC*i}J^QP7U~=T9Ytak+E-vT9 zg=04&oU_3_38BpULs9ov&?};{ZP{9_O(7e@k4I$R`LP&Vt*=ifEF6i&nL1T8_;b*2 zGujpk;ja;68x=SKJ%;ghaybAbKM&e4+&0`qYjx?craqVlNb%Pfk|P@ZIppr;X?4>y znj~6jLHjUl<(5+^s~E)OOa8~-rcLtEyYK{c$=BbKtzm))a6wLn?KWeVw*EUxZ1Xc% zGWjP)dFs9+7*sU-EkwliT?s)?nNj30Ry4ZYGpGnN^EPxb)2h=m@9?irOEXsY;A=iF ztt5R#zWSo`^D;|nmR#EHrdf!|#q5-X4XkDYA2-ETf%&=X(o(@AV6T){_U!Vo)|FH| zGT6{_S__n*eZ-iDcrw#YP<$FQPybL0qQ0zpSUSEL7@6<${1#?meMV6At+YMP@gs=J z#O~=K{av*)WP}}hiP-IV?MQuR>n@kG=kq^?i27a5c*A?2sw7Isq1DcKE`P3XFPdl8 zTIRFLSx=??aW*_>rMgr5PdB}QGu-&AYp+M;+U!pP;FbVf)#DazttoshPKPSExsMZq zTHEskAUAlDk(|LvZl zR%&ovv5k!zfRXF`Fp>cQYYqGwyPzhr4x31Vf0F~gs7ui;1q#$okIML=SgKvwQf_%# z!88r^4-DGY);LFg<%rvrl(a|fUCEl9w0bp2u0OfYXtsQMGdPQ}Ynmn?=0u9&_OmPG3 zeozcRb@FHC=V{Rw^95@GFyVD!*J7my-V#9Wbr%(dRzVoXMY7m6BtPP!ULallwB>C2 ztML3N3ekpA?(V1w4zBqY*5en)7Symxe+O0jF76PhM3q67YuAn2{H`u{*z5CpjVKW_ zDV4t#brkWaBRlOjEVFv)4Jl~!FErHV!iRZ~pN4X9Icf}7%-0#$6&}%BxBj#dh1X5B znIIWU|CWh> z0*$&wwl0VlRtmDBq<|rm=RU`*Pt@(Q>TdQ^x0-yI+!5(pSK$X6;wpREG#atnxkPVm z^3Le?h!)pOa*0se(-7YskB~kksYQLJ-x!61(tARXD|-xR&rn+-A?k?A-^)ABWTncX@r!Riz^|^c)6rq4s&-%V z++Je{3Qn~}LsXZK@=zN>MC-XN0?2)?ICHOB>W08VTLE(jYKw&jEO z)Gp*?X$lv5c`i_XGb_iim|K|kOKWhXl+r48K#)hmuAr>CdeN&Kk;BT83131dio-AK zvOW_@X{|5t4w0E3e@5&;A90Ip+~;>-Jg62&EQa{y)^2H4HtFQkdQeQ+y^rzBHH!9W z&~GyEj?WkCUq~_nCUN+DjD=GT+0Y=hH5UpruIg4xDLIW1IxZKTpR|3xXnoltw`XFj z{HY&?%VX0-giBJ;gwWT@3`pNGKnNiC&0S%FxF%Abhx4-bol5wxw$frbXnC})iFmxLu0YUiv*ngLP_yL(a3bT z;3DCT4}&<(z5c#Tc^d$ZZ4xNv;Weh(Wa^Yh=`Ey3w=_&!SejF0z<*A46xA^SONx;z z5%cnEt;YFj{jj^m7uCmbW7ga;tYN)(x{VbbwX7^VSq=SG*JxRXtpX9kf-BDC@E(w( zF17!tbPZEDrTWnBoFv3BkIBY%E)2N!&Gxjv%~sQ}d4dQ;QR7FqI+u@)WVg@F&bBPn z^L~0CB63(cJFzvUy&!@Egwh7cl^*FQ;eV{E1SQkU-Uo!)*_;;st`(e)_P6_t2NR`i zHvJdckb=TJNir@&QNmlpwE$yipUX^7u(STHPhPQV4(yiG()6w;X_xAFx!oi|9=aR= zaDW*QS!%na{Lq81e|X5JjtuMOby?ci90;G}IIaEppu~L^z!|h_$+^xiy_y_CFl9qP z22kRayx*pS0&N+sg9+vpbBdY{U$k7%x$-f)@4Qi-Ec)t_=aqub5|r&!>3~}RLV+xv z-4}%7;+K+bt-)Ue@o^D=3pM3UB^H;}i{>8z2M%T(5sr^erv-3fEd`klOSK>gqQ-Ql z;!D0`1!GsSc2^cuyjDk!^_m$Po&v45G6B78D+vi)p@gy9tSKsq=y=`uD;JsXifbd5 zyl$EtD*xk`;cDj%HlkQCNes2}x_w`2=;q=z9-jT!>wkz5wR-)%OWR!@^g!Tq5Y)R$ zYTC0$MFJ2qg_!dWco&X+B`3MyPGgnkU`&aXTRfApoG!h$)KO?syoy)g(oJ2Yi-XJO zU?n+v_eA?@bk2())Uemb=48FvgTnBJ)cKoEy1GQw>O-FP0cmF+Cf=$D^P%KE4Q&LfCxKi^U>+UH4)8#n$zY3^eX{oqZO6!Judvgj_9@) z>1DMRIXfq8evSdkn)|4Hm_jfb$uT>p`dWgwZ@7mk@86rUdViyJPE<=Ji!4 zmksW`lP-}fxj5N167R765eIWT(ejrAc3Yomw)SxvzYA-uo(@-PGeySSQTJQZ^1|zK zyNv~GjWTV|*4X(ePA5hY5BOIKS94(X&rXu2VB}B;6!wMQo33h=i`1E5fz(QVZr=C@q`ANn`S!TerxU!B zJnLVec$JnfphsiH6hEwhTBC{E8`6_xs>l@mhV8*$S|=M$HV(GoLTM;cNTod8h0L z2}(tC+ramknzzXwLUJ_&A+LOY}e^}C4UH2|{V z;R^XCYqrF>%hc|Ke?kY{XQNB=wDRn8PSJzr3Fcb<6Bmp-rAHouv`0+=$+&oT-_N6A z(4+;M>~*c%!~dh67ZRUQl?MO*>d^X_A0cMNCFA|XJpZmTq6{t;c3t&l~?;LOoZ8AIMqH$Xs8M$bJ_hhUhR;=vo8#WSP_O@rZW#+44|g z!tazg}m0$aP(lweZ>Fz%GQul^nY2PL0=M(KUR7L~>sWe_i z(A$~6HrWAC9yqidSsJ@WV#wLgrdH^*I>2C7&7WjCxS*GL7YPK1Yr`GZwSqeI}ACI7YZi9Oa8U@fIgsw zO;BkMSR$a2N?x&$I~R{0VjTkL_A7Lk$}e~Rm^k}Zr~T)x2DHvljuVV-{XcY~H4tvL zJr1D_8ZTX&x7(w6bmb@lPT8O`k<{8q6E_CR(fgziA-XBaR^JPbGczydSNhXNw;;zA z4KkzlO*gd=9|g59KaTgP7dD?6L?)CriQ zW3I68LSo(Fd9bQT`jQdm%^?OzNcdlZabfUG@Mo#@%;Z3zfi?E)M zV_$5a##s#!F|X&L=U zDvR1vWo;&VfB!luaEsZP);>Jb44b1k&$it<{o~7X23Yo=M`;>nDgr7b&Vw5l)mYzz zq-)nJ{fUz%2B}+Lwhl{W4!y*#mDQu-KIjRb4iR;IH+XattZ}HB`@`)` z7DU!`V;h%*#x6+|-<7z_y#h!W_q33M#H3KE{ST(=^%c1mKs_;#;at5I{8mU_%6q)~ zhIgr&3g8)+TymNC8l@|v)qr0{!n+3PQ?L_#0c)*F7$z0(+IF#tl71_z4vS(e!`4@v z5ySB~P~4KC*;-hTu>Mw7RtIBygNDyjGBMF1+(M@a7*_2f2}mxVfl%sr+dSzJ=kv<1 ztyav78U&2_%r`%YSV2Aq^7#C_@&W=3OP=2o=}~cFvu}cURj-sc4WePK#;_Hn$Djt0 z1YFQ3cevq^JrMFK%toqoGbj|EcEj|f>qTpd1<00fopEMELM_%C+`o()Zr*2OrY--( zOpW`poh3ebjhP=UhOB11zXZy*b$+9ro%74E`Ia=v;XtZZ8cJaIP~dIn6TDvW#97JivDk!E`^|uJf+CAH1s*8BF-5F^p)e3J=yV zJ|Ng>1cJ`*6YrE@s)EZ(&P$oyRj_5(&NV3jRm>7deQJi&Ji<=^#%I4jQXP%l*hg+h$?-OcC`Uve*l^!?>Gy40=eKS6?L< z!?f9jbXAB6#JTRV3@y#c-qw*wI_f-9N|1=|(NnN^^Jagn;Py;9M=F85Grrb}fqOtgd}EkUJDE<(tJyT!o|Q%j z9C5`RTn15MU@D$Gu(=<;RDxIwh5nGoZ0n55&Th}66pfmbWtA1~n}dJabD3<+05*0hK+_6uYOn!!zuXaR;oDq|yGjDls|27H z>2P{O%y&b27{=Tz)$Vbj(+TBU++nOFuhk~=_zsId6JA_e}8{#OP?5r zjMWm{^q3x=upSu0Y?@MR1|$Mc@u5i6)~ZoCYh+$Dlp3J;|#*gUT>7 z_o%Yv%`ul(Qcv@>M(si0?vmsV$aVKd1LQ6DKAKp=#@z@o6W-38USsD?KIUxeF2`7y z<_1mOJSq}rN{?9;^b}3HHtIRT2jBE1wW4Sm(Z?YUlsf$f4~_v2k!3@RTpEE1x|6>Y zAuwZ1Q<5Oh>rtQ=qG+z(qrDNk8805YW&qpQH+ZeYW>|eK zuyy^PS^zwP3wuInpJB_>9zr!&7en+J%?|l%PXsp$_drT?xWoE;dzaF-%?7Mp<&=ik zGy!-I%1fUd*QOeh5&`Ba@rB>acp6hYHV}L^$ zooSFB`_RUvsO*rKsxV+pnt@vVCEpF3uCKFbtg4WNMlPC@4a&sl9oOJCq0WLKV4pxK zsI@yE3fZ0r;?;iEfm+j#=&~w~0JH{{3klFx40w6OvI%61VrfA$5ESKAK)5L{wfs0B zLV|)bDIatXBWHW1D|<^jyt6P(9@=bL0Hvxe=Pf&O8^hnodfw1+kbW-$*hvGAUi)%U zc}Ig#RT`8&#m}ZDUX-b00(diu4NcmnZyD?wrfP?Tg1I}T(H&U3A~1hP1GY-uHNz%U zuvrW-+r@c#9TGe8FDsf7>DweL0K)q|sET4A-t+tUHGg?{h1rMqs@)HJL}=*c%`)Cx z6+|}wop|!{`MPd=a|g~?8B4al*gLr2QC#(atx{utk9c=FN<;~tf*}9^@bzY^G(d%B z+pkx8-Ow>VWfAQ^zWP!hWNKWt&+Z1LPacNY+jyfexm%SUG+k0ZAAO|UHd~oYul{@| z2a(6hLMs>|4zGvu?xQxH^z{IUKE4gXkTkAMvOPO{9oI*lj!@9QA#%BR@HpGCf}>&Z zD9##5Pf7Z2`naY#sioQ;kR@TE0Z%*Z^$9OKBabq2^fs-F`$`#yH*ft8zRf4Q%{R^V z@@*AD@4lv%HX9t$t(!kGtArD*I-UA4eWi2NFdjr|Gird{M7Ln2+#um*&;PmeLH#!?lUH2z0VV|4^Pza zxJB$b!TTxsh8SO-Qz;-TC(KEHsF_(gs^;Adc@P#X;{PRnrd=>g#(n6G)Q3FATI<9v z&L4|Mwxqn4aMJ#6F~(UIL0{Iap2-KzpB+0)d>z%@|Jw|3(fh3V$XIEqbAXy52(wnM z0mydR5EvLEqDY#~fzA8wsK3o}CuP+Qs8{>NyL@{flbj{RJ2nIkzg|$R$%aLTBI6`l zgqbK~uyo*)iMvB%xPo%^a3$lp{E4}2u4oRlVK8sE_{8bAV&?;D1fY)M%2KZb!+gge ztB8@fJ$YTSPrHg`fG|e6v0J{huW6K&sqGRk+1g%ZP$;|qBezi54?r^5n>US$Lw*vU z4i`NeB6?|HLmlr}hqd%n9A5sDFxY+3#>SzN7ZVWBzO}OkkQ>!}eJ3Ye_R%sX6<`U6 z$3z?;q$z)6e7k5?Y6(`O^QMr~b-7|-1BB6UtHvsW|CAGs$pCzPHI4&+99*si@XRcr zbnP3%H-akQEU0vc$7o#ge-@k}Yjzp@nu|w4u3E1cV?nd7XuXcd`3b<7T!$UYnkPf# zS=x3B8)WYe24TC;-WF1y3h2{P6*Gso z!MT@GG@Sq;osDN1VDbX$*y131(d*^r7Sd~MAM_h21&w?-rE_UmQ7cib(b4l^Mp9*^ z37p!4dLv~oBG%UTB};{cE`m?oUfiO2C5Nhfcl(jYWo!Z%ruGTWe{pULGCVfW9gB#t{LW0D9-wm ztyc7P+%a1%jc73_p}jWaxUzY$Iqb9cy~~bcr<7V+boSm+4+a$8SH?psU4X*XGdt=r zcSptfA#1Gv!rdei!J?*&#aXuv$3Zr^_4@H+QOg!~_Vzl6J|kMzlcYKfc&tH{lC4_czf5}w z0Rm;9;;7y{J3BiUNsaNH4wJtQtLYUV_n;qP)ZMhF4h%i%**12}bfzk)H{A`w0bqLE zC^a=z5*gLcK&~jA%q=%N@T)jqu?9d$ttTe_(U7I6eIKA*XpvK~3PtrJ2Sqf#9NB+x zXz@i*hvbRQ@SOnQT^SIUsuA1iAF|9Tn*M7$D9KjO`K{uwX0{R@0}Ud&{=iWYjbn+R zee8woGAQ9)J_v?iaAd3uoE$&C@(TOw_StE6vqbQ~KT;Eq&*uVBBGwrTXLjz}vv=iW z$|FwDn+%#ev6rtpHl5n$vDNgK3i}n&T|gSx>r3DN(JPl*fA0`|`GUHJ=AoNsKzG`4 zZl!})zi@*d=+Yf_N|&3xZYoW=+5QTp`QZ&fnyBNRhyD@+5lI7m2_XXK6yIFEw&R-8 zKHC8xW5f0nXzn}1g|}+ehYq2%PW-~NTy5k63wogNAKWnfh5ODDa81vsPjO7$KOlyn zLqJJ;eDTcAy|SQXaPk&dUiba7moK*fq(g$6o4Xqg)7x&8uYKRq>L%F)cJT^Fs=}|{ zwoom{72{e z(C)n}DPgf(Y{d$J(_g)AHgcbpfAOmwJ9L=cIDX(iyGFsf+9uCkV?STxKB7qEQ}q_^ zUkPDceetumz5d_Zu~T%Ob?OdfR2&5F^X@5FC(Ds{uIj%cogO^S-tfBrP=B#Ka;*5n zWjiVM`&h<`-2NrPWivp6L{Jj2MhbP8sx&(QB|56zEe>8Z{xONnskotYJSPiH-*l; z*$1(9Bz$~}+@$9ttlhZr*UN?ItO?bt7a8wOVaOC zF1PC5Tg?M(KlZmj>)@ds>rRIR_G-!Rg%91~x$;$ixDtdRxuhwZU#$CoP@0}Efbi&T z$Aj-%FD78sueQei`FW1hrj25IH}TiRzkj0z zenGRJW%cF}AR!m#SgPCl=vKCaBT?<&<_}p2#0couh@U<^ywMSL->qxKeRkArqXb<7uH~gEDb>BDmxri96Pya9AmuDBZBvHK?-VH=X!{*sHDqKY&Vb#eZ7iQI&W1b zKMlL9R{Oh;?W&ZyyDM}*I~^F@2`9=xq?6W z+*ceL{b8X$_oi#j(ZrJS7VYBuW4Y+JcTLAm14c()DCc=~=tJ&Ao=b@$hLFn=APz58sebJ9vXUO=k6&U-47 zV5T#p*UVvV5Ox+sbV$_s%kY;o!BNx;7zKK=5(KAfs4@K}i>0%+te<|;GV+-iy{*0Z zQk@=z9&>B!N6o*^{eS)66no1oK+4nLfDrR` z?CuZ7^9FLLk|D^VvdXj$34(>bzD>o6pQo{nuK?6H!yPf9GNyr5>`>yDSL1odnF5bt zUG%cwb9vi`x1J_8YcD%Hee)R0l?m4}9hCt~zRXl54yELBcS9-xLRtx|bZ%OaudyAnH2 zT6h7Nfl8BK+*<#}ffKp$)#GYWiM&&*Ye`yIcsHUw^8S6?ZMa4F$Ee?9ns=MJq1Kuz zJE|JWu(SYvZ~+R3$9XF++p@nuF|kg|*7Wc|eYhd=g)w^*8oC{P;x3QE~~8 z>UzHbiaE=$N;>Yk2>u6Syfj&RX-P08>}Ih{5C7^KgJ6&~3w201FE z8iUvn90(Dm*7i$;n#$;#Ku-p(dWWq(?9Ru246Bq1y}$7o8C=&d$}YyG9%e2tFmMMn zfC`uKDsVD~S^Zm!eI8?uD8;&4hTsGF`2Hsn&x7n$l+4$~0+|u7#i0`WZe{_yaqx#F z__Y}2$`PSoN2^yg4|h(aF!?{ls7|lYiRqH#sZE|6OdL-BNB@dd*Vi0Neu(Ua7KQ zGgXdtKH5$1O!2(ZQU`q5T-B6}g-8v{7b@s5TJBTjzP6f=iatM#no=Z`LINg79)Kq( z6rD4ta}K%Q4831^*$i>;vcpIg=D9Wz-N5sdeed4Yfm4Wh??@rv{iJqED;?v8_ z$cZ$Xt2u?2cZeE^7gl)O>!RS>mcfUeeikl%$K2qAv!t<;V!(3wi5lBVv|>N^l_kkK zJRt0(t>8-V;k!RFw*wZJ z^T8|WJ!3s8UA|q>^3$k}TPQ;AtnI)St-DGuSJcFWazG#+%|7i*Tj|#jla2hy!$a00 z2s2H@_`tv!7Gp&$X1ILwyis$addSHdaqh{n4NTdsAg=`I4N=bw|7|WKin9_KZ>kmD zTEnX>6~rq%Fsak;JaDp%PWO~aD5Dd7&z_T5`1nMNh&JF+lK&g78szhkE16nhngLVi z+NUTLCm?>kU4UftyV-0D-IHG06JtQiZ_YMndy59v1yj#dGb^^qs&mfIh1^l>nHg?1 zl;!f?TpTsHt@S2F4`tC^Z0=wocPG8UgGjO;FYhWAVkM`fJfQjpe7-VyML>p1q>Kuz z)t1y@IYkr1-Kqyw$y`fsZ=JqnYg-6}ri_9`FI_pIyt#D>9zYwnoXz6ZmKkSt;M&}( zFpj0T?cI>LnSJn5+>sw1zfyi=J1d*R3k!A4;bSbK+rl}cxw7f9(S8LvmSS}&TUGI~ z@YDaSut515+-0 zl0ihcsQIGtmwuG}^d-+YPRlhVv#emSGAg}4p}>_a=tDzpwb^RE6&#z|Upcu6s`c8n zKTu?Yt<);kB(vCaxS!=B^pBJ2hy>eLZ{NzkZ8!e&whd8t^$i_^*+=N@7Z_T^Qnp`a z!+yNi6y{qVEKJrGjU!iA7uvketXj=ReM;mM`+XVvBCMYSg7NB+t}bwTsjW1BAjW^_ zh4!%t(Ze8@bqG98iJ91uC{ zdilrIe!h5Z7|USEGBU`4!7H}DwZ#ImMun7LBg88~0k4hz1?eTDUy( zC*ig>4xsvaS&PLCxDGSXDWS}B3wM?o$AK|qksf$?Kx-UL$tXD1cPdWAXpwAMpUa?; zZLN(=O;0O&GN_x8@$qTW6l?C3J9qCIi(8GAFUd_gPU+#K5jFXdQ`gR(JsUnQshD8m zOJ44BfEK-R9QL{iv>?5tYvM-J6Zm=H%$X3JRw67bpo>bUD=&7{m=?{ot0JmRlscFy z{J*DoiUfq?@`eF@H~RL|yN^N{cI`WMLrjw!3jao=v$y4fX+YtBiBM(yGx zM?Yx{cUtpiJ)v$amp%wHg;=;?U!}aXqIJ zj%h{+hZ+!$8&TCzijG%ny3>%od9Y();ewC5&lV%X>e~lwD?c<~8iJ2#;FLUv z8Gd}?HMTTs#n3=KKSZQDeZ+U3vCa%ZElFW=s;7#Q(8WtRslE*7T54yc5I6y0 z^LT?%IhEAI@M^5}BUEp_xA|(w&uyL6{6nP76EQF!qMtj*^dn3K#UT)zsnvc8mO&+q zr__LAaZ$Rk7qx5sYrS5JsWV1Q0KdIqfmhL!%B{g_h7W44R&PD>O5lz4NDxEBOCa1- z#o%;827>Tr|DbdJ`y#rlmUu{C78cxvhDNo&a~CZC>@)=Ergx({%5w+5g{LDL#?aLdxRf1rKd)_R;Ey**12n0? z$pl1sU(akG+g&L;_2w&C^60%vxSnonSp*hlq7T0?n3vqXu-CgXR&*J(_AS+Iuwk> z*3yC^RK@kG9wtZ)IVV)mZm8)p{JRT7s4*G=oLkc|^H9&k>erVBYJx4W`HA{mM059! zTTa$3mn!C+BU(}r)lMOlm_y|In``2d6x-)F80tUxR6_PZBu{4GQdpIBfk?pw<%i?u zRt>Y!@3&Mb4;rC}bck6*-{4J;Q}?rdi16eQvCFN-+cu52R)P~jxj}4Y+l|bCL>y7v-4-_b1I?OsxNMR ze)(!fjMbRJj-4jm$(kGHg0e1$`0tEi+pn5JK4kU16WY>=iMrfg(twMXG0%AD*D zFZ9b2+o-iGH;%wHtUgbmId~!0#eZpbdLi=yHCmz@j^GJ^NWE6#4en~ zg&YgQwcQfy;zr-bp!}&Z`Hy5Qsl3m{o+{Nuhg1ki@Cy~c!btmyIi51&<`eau5G`u! z)~YmmgE(zu?pMWo#_q?uw-?K?(6@OaJpfva>iCg~u3kDnCYM6#Oh!b%S8_aRY<$yz zW>?^fkoQXs_9fFYwn(u;s~a-uOAM8PcDu@{buD|N?S&64%2V4j2D>5z{fosd40fef zttfvBu`MBlm5rTd`OfbeCx4u7q8MTDAt@{;C1-q4shk{o^xg!PNrarQ^`oviS}R4$-D_@+zj?}TQ{ zyWfb=wYdV8?hi0Gz5A*XFycV^M;+y>8i>m3tl=)PvHAB+ z5hN?mA^2y9(lx4&xTIzJ4op}`&Ux1MvR1`%i%@_V;D6!Eg-oT&Hw48bdT|J`$(>1S z;-Xnf^v)V9V|fd635Sefzvapr-ucB+Nkao@OkUk>?gK+fJ1UcPuM8vF&QU7nmd>}< z`mH*o6Y-j&2$KYVss_YucF%nL=PeUIk^0x^&;{7JzOM_L!%mA4uQ_?;-eo?&YbrbA zJxeiVXflF4a^(yQ&DUp*##=O^p?C<&W9j`RmOtqREmFujy{<#X9EvpWi=wn;!xCJt zk4a?vO8V*u^3`;jM@Sk3 zQD9mcN@knI^j&=Mc)oX8IVTPb))BaHTXpYqDIIW3EE&fh)I5gV(b=N_MUX_c>7n^G z&4*XzI#6_)8%Cw2y*g(0aUJ%K4)?7k{CBURRj;nMfr@EbO!pcByd$RRcK-T$wK}?=8W&@pkX-ML&tSa)%G< za2F#&edM|JC5`O2_(^_dl_t7&yiZH|N?$C4+FmyHDb9(OJ?d_OeATcosS6CFrux`p9NpF|1mm^o@ zOis)sSuB-Kt+!HQbUYBTnANgD==6@>9D>`E`50m^X?@@YY*t>X*JtECStid8VV+mM zWWBwFcV^_IzpPzFPQW@oqW(^JQyg-&5P(!m4(B!o2bhf#>=R&glW1U{@)CmHAF$*T zIUO3PfK_jq zxnM5EB~|e3_w$sBwO~9I=Z}{js$x+yc(wAXHtkmX7M12k`MEt*=>y}Wp=)9eo*LYV z0ZZ4iTcyiB`iYYh67yk2 z*qWw@L=eZEMY0nu^blP;Say2SpvFsx?kTAhPz>K@jcw#3abI#f?q!aXC<7nl5J*KG zT?Nu(g#RUq053!|fWl15-|6gd3+=ZORyNzOMBwEB^V|t1D&`ui{0GwlQJVo>#u^`X zba$3_UyC(7uh3&t9scr)rbA5y-H8-N&nu1{nxw`N*7E&u$5J~SvbDh0;U;o9Tvn7D z%CU(gn>ZDNN@VHsT&=SZN3FAM)5$IHpl^r*f5&o{9yxP^h<=c$cmEd0X`QUjiS+(` zFCiu7G7lik4KymlvG<2#a`TEiR4aOh^1-+y$JNTPsnKXTJ{B3`(E|{IT{oIJk9Vdv zr`-w55!;;|P2SZvBA{G?;#e;gEw$Y++3*@?JrG4v;@gIuofK>^cXbp?Qi9qkJ`=T7 z%~|KA3A}xSyFCom%ym_riY2N$f5YTL61>-adb&p7)wV3No37#L@GJUm8h%(tUVa15j#hi4icIKjf5PwhFL{UTM z@vfZoI-YhLF!u1k$;$s)X!*xgx^wb*P7tc$K)^Y*OIK_qjCVsy43+5>4_Vs*+o)5p zBh=o<&1%Qa13eRTaG~4MAmc_qZ(v{R`g6PH0||(L ziG48_-O`pzXu3Y4AlOg-xFPW#B3h|u;g=|Otq!{sE2uF0Hq^@6%4!VXL^f0OOYgXn zfi|$aS(rJSsELZiKxmP?5q7VN3#~Sn2bm?ym8^oGt;pFZk&!R>J<%2?fI&t&Eberq zzHv0~4n$#?ihYxLAW|pyxShg`*ldJJ${d7uA=V-k2}rMylP1 zp@qvAzPWzu;-t+LPdtL{hr{WMu=V+D=QUi441B2#WvgX!v!~Y2x4Pr)5E_Q9)?XuZ zij6WX(pKT=eZ4#9p_eRbw>N3kg!{1=(GQ@hz_fp>qL8G#PU%^n>5*=6Z9NcG&oii1 zb=2`V#aPy4|F0;UY}B?h7_3DMTZIK^5ic)~_igwpyk zmg06JHNk7Gk9lMGmB#$0DaA)RojPJWHH_ub;i*QMrF3-b?-&xbE z%1Gg+&8O7+TeFE%TKUa2TadY7;FNwpD=keLww1%sn3`-?>yHWA8dr(6l5dA9g#@CE z$LAY6NRyf@zM4}Q*2fc;&$s7E!wnRW`TUX9`*%ine(~99yazJ;=SHT{#{;@U_pL?f z)ub`a>o97s28=lp5PfJjggeedjcu;{6FaTHnA-hJcUHb$+ZAV+->G;xp)oK%tbHPe z@Atlk4-M3`l$_mgVB5&v+}~|4=t6S?HWxLuY)2i!j4a%;!Jq>KeK{;dbR(qt>&zB} ze)7YpG!XeT>EL}d*LGWBwk=dc-H+5JQzksQGin!~*;fK>JDSyOzNPYK@{RC!+w*a_ zkP~vs=~~JI@*YF(6>T+RCWuE{OZTPbYBcvVrQYu1B3KY2eRV<4ao`frDs`8Rklm8mVX04C8-jM)q6b*dr3SybOqpEfbxcjA0v3$)Zdyh;W@ zprO%1ClCEyv>cn)<&~V3b(^`_dENNwcB)m$eM2G+$=BV>V-)T+UZdwau7X@~B}P^l zArZ_5-1=$xBe`&_dzv0{t%ol=b6f;@%d78G^j1|``9Xp>qS6AsZS{tw8m6>jvp8Dy zji5!s@gz>beqKkL&zr@C(bR$hRjnsHV?7WEd}6hUt-1sP8_7^9x{$pcId%e;y?Eu< z5*dIb9k{0maJ%m>7)X~6)L}=m$Hd-?A*jaS+05PNusgUq2qeuKU-G(RF5~^baFQ$N zmP})n{#;Nz6)4d9kJvItG@~&PWh>(yY{3ojZdNNI@tDfl_IC5w)?vNhIS%j%wU(N4 zlV`KDvI0KqK_hZoojEqP7Jay_m)hb5iAsp&wY#rY;|TC~T%-0watj%)e@eYe%b)G9 zt<0&Q-Yy~}RK#dctF`;k+Nagi1y%c=kmV+us*Dn)H{$jH;3+wCV!k)CXXY(hdh7Ys z709?h!+o|M4o_ay1Y7L;MttI3^(K&*{kL6ZKBLVY~ssX;VV}%J&On#H-(=P`E%L_c#WUxyIiYw#i8~br# z7i(VxqADS+v>dXaQUqwfbMhB-?_1+FaeqXhl=z+UFdWk0H@>N>Ja3`j{%u zuX{)T4|{JO4R!nfkKZX#Ld%U1lA^5HvyLb#TP0+#EMr#~yOB1P5JHHt#Ms4P?2_zz zc4I8rjj`_r-)pG*uHSpP`JCVRp7TBD^GBUdIbE;ox}Mkb`FK8_8^{$o?J4^dVMV{z zN}+s^1OHtZF@S;?=b_Qsu8QrmO~{d}@;*K7EbtC@zx*Q&9)gEF*eelrNyb53D8PDa z4Y#H+`rzN}vP5^hfWtM&~SN)>)=2HKGRYGl# zu$h6LP(ON}s!Z=@y7Z&18AFc{P5lkzm=Yp~JEK#k3pQ$H6h)C+9s=HOwEoiEGYIS;xxk|XSoQn8{Y~M8S?}te53*U4dnjIuaQ_Ap)nEDsy&r+K? z(B5nPgpZ^9zyPUhJny^+L$7}xprMOk#w#Wx4U0F@$c7C?b&xM;th8)jUEY*Vc*;xr z&60a1(Ou}8%Q}|2W}U_Dc-yx}Y5gWtSVJ^EPIbDsn@2lkglI|`M(3xu+>YUOoTB$- zmc8V`ro`4>vC8)!Nnfvw*n)bn$fc(@PDqQwm^Y;RT(<`25RJueWZPWcXir`I?)X43 z++4>3PynexgO~m}%(L&nVY>4GJ+()x7Ihmv{C40lCH#dfB3QTvUkm2{{`Ioot7?b| zCs*b#5|z5c?-+D=e_ODilgO*Pz}5K~XwDRM8o8x1Y6nV=VXzk+v!yzZ^NO3U0| z_SA=IX1;Oj1mzKPh-p%GWs@VfrzCDGi62)wWJ2gt_a!< zidQca?X+G8yfP?}98Ur6ZHYEDfPm?c4Lq&0;d*AB8lj^zISG}@`}=+yUjojt>J zbj*aLx*sZ9KzPC%(ht4tcIwS`{QAHDCUcntL@%Z1dur$MT?O&LL2B(Mlmox+6~Ijr z&K6G*^@rTM#WKl%d8#5yDR$->A+TCd7j&>LfLb<6F;F?0K?_2z;YXHKjk!#AK>Rt# zEotv#2ZDkgytXSrp)m#R07fudBYj`Q&4*qVvi>Q!kjQZZ=#e4NF+1n%g4yT=p?aeYT++o9!l}^mc2s=xRJ(fWd3!U=8Y!&AZ(y`xIdXfw8 z+_-Ur;L&|_O|K^(_3+PsPU!%LC&b|3M06G*6f+&&*?&r2{GvZu&W!SayNrq#M#8D!Mv{o0hDv;o`w{ zi}En25w`;-w#+Yz_W1*pt3mYoP#~@$P3a*mJuBAra3dRZGpyGO7*sVmn^_a2#l%P_ zCLCzGC0yG0S?b!a7Fmipf!&6T2^a+m3NT^wn!0-SyM-}UV`HgUdWc+!SHZ>oA-l+C zc`Q?KD2si?OwhPHN_Zu=^Yc}srl{w}0QP0V;$W$h7WmgU6^PiHvo~*o zzxQ-~>vohxFU<0ATh-d-PcD)-m3yhgL54jYZBv&ttiADDq}0bOq2(mU4W?6TQa#V< zbA@&ztli2{V8)7%`w9LSnU8PhQ0{6&@BJxNYyWsI{vDRbyXE8WDhQlrjPv^fYmqmbaCB-}o zuAS5Upm=Z>j^~F5)psa%T zY*e~aN6E$Dg`=VvsH(~Or0jCA;x0ycj=7P{_8Xa*B|e)|V;V@Z{~#7x}t1H}F4OIZZ(Vpoguve_0%<)&`sz$lMI=XSiph|o;PQZRQ&rdw!F)0Tu6`W&wVWH$Nj!T<35e)#UgZ<`)lIqh40a3H{2 zTeEbI2UgpJJ#MN6x)vl1Q&^_bKQ(Fq7uT02{V$)mDns^n^m!!IHI2Pej*KV+f)oo6 zQEn5{Z2F>Ilo|?@t ze<-xsl_OVaj5RTQlO8SzXYv}ZIZSD;%~X&hi_>Cie@^rWPEM5F9y`7h6DNn$x}YyH zgCU>sk6%dXMYT{iY#u+cMe)W8*C|(GCov+-zpMQH{G7+65IM%uyy7bq!`+$fSWl#542R<1p3Fw4u7eHx}w*>KBLD!DBGA(1C4 z%knzYB-QyNV!zf1@$=xZC4?wGyeUL1*p9gJ8SXV9j)xST!eOrS`rQBLDsCVY!=&6N z=?yB;7?RI(2ZAM<>?$zdD&C$-hO6YnF(Jx}SMCmCv@z164$QSdGz_g-;tr9&oR-Mq zy#rKcO+P+~RyWE(DDD&0S9rD1$v-K9!j<7T`l$~wn{AEpa5*&V9?>6g^2~0e9eew; ztx}H0vlR#aTiiNPqo1sFM3)OiG<0x}X7MBKX38Zicdan(%55lJt?0|j$%dm`S&?n` z^>P6me0<-Im`B5x5`vE!!6 zpNtF>f<;X&J=E|KR)k<}^aO<{1m;Q0U>$bM;);oxu~(7@V*;7K-|li2+20`EA{^y)!sA} zY~soO%L|L^J?yX>UH5YvSozaLbHsn?Z}@)nXPyBufDG_Vt4`@&iX)>i>5fn{a707_G%Ur z31Y&G29Cg!KGK8dX0F=tm^+TKBxtFt-&V()EDxzqdD@&*-h*ifB^6@P(AVHBs;QOe zG2Yfh4xUW>NE1VxHIbc=VE$FT75Og6j2}UuOw{?ypcCk)dnmOB-3ROsHt84DY#fPV z8g|(>NCj!W#QTDEY~L64Z}+9(6a_jVU4^cuq=iOIeA&}&B_Dbhw6uWo2!8i2Ydvm< zyT8oD3N|%9MckB)%x`zqz3%xLY*Ppv?xVrM}a2X1&BdwN4QtXKE zvG3XM%T(?c293``$4ypKLhF;UY9-`*kv(gUo82hnf&IdI}F8CTTZNQ4cj((VC_{ox7Tv6Ra-L@&+l+qb_xgdWq`Ve8Cw zfbJ+vVLS}VHB1Ap*W@Q3gg}Aj3PtITp@N&B{mF7fJo(ci|I?q29(wm~;cagb=SOH` z4uyyIJycUvd|QV1gJ8SZ8a-X|0o@XkTzsbcT_FH?P(o`!($eGqZ2I>f#_SJDUp2vM7Vf& zF{diAEAK9n=!vfU2AtA)m=ov)x#CQgCR;5`_M&PZ%iN91f~tCX z7fmM zCTPi#-j!CLhU5p+uq>3?KjC~knnaSB`f{Wlj#%vaEq*64j+2UWyn40p0t&zVjR!UU z^EVE9ey5pEM57ZV7!Obdnq5(3c+p<%zxa|RMIx|G?^yB)NPG8pTqoShy0$#+jM&;i z!zx_AbX_8&S)mLeEJaEy!8`XHpLgRwz1yGfn@J(+Zn|3|@urkgPJ$3zeZDbK>*@an z7P%_+3tO!nNz882oe0Vlr0_hSJ|-HYnwAy!C+d=7w6G;N?0yYFJ-VFn6pQ9d2lH4; zHuEf>{$n{PrVf#Qw@>-1_)EMhyK(7>&Pr1x$vJD=%woI6YB%y7Y5nvcKdpAnGfZ9m z8~_4p>3$KP(Bv(rsS^s{#_ieq~vqaI{#}Uq@ z@&+8{7oyBvSs2!fbL%^iCY0Q&pFOH9%rtx&x-PJ{9Y!}e*T30K!5q;w{Cq$N+KyJW zIW%0KqN>=jeKrx0UlW$NnPYxr?hWr(SJ&eSLM4%K3ej*Ly-Hu;_9;WU^rW}&!PPS- zu$W=JCkHYrMheQQ9{lrs)E%ENDv;vN0%yovW@cU54n*@b4pl>irRZ>gc6I%y z?4<$K@BRiRfbu)+>NT56cuGv=>d_jw9ikU7x`v5(CrHEHgp85Bq3BAht3a_0??e(( zY|S;o%~I)hP3S$M%f16>1h3^PbZ!Fpq<&z*D#ivN=@ytK{D2f?gBJ`KJTh;Tk;MJTIulUn_xc z#8g=2Fz|vFsB>1am2dpz6#S|&EU-xuMs;=dxjI4BbuKVy#||WRO=oGUYiM&8ak&@^ zZ0^W>kUEh03~SJ~vhlIr?=tTMWWJ*SXVFVRkrWqTUl4KY<)@(N#<7(LRl;Z|HiMV) zVQTW?3NAyoZ+I8FdIFK}k}*Mgaq*4Au31CR_I;F5JHgQqolVGPugU!9v0p?X%zyUy zg*3X$g)4O78zcso(T@~1J-?7NJ7|otQn;%Ya#!`krOWaxwl9?;CA$n%w>CF6U{s7E zg=+eUirWjY>F6FUMZWXffPWqeP?vTJ#R_pS}QNd>@M16FSw7wy?A~JBVas{H8$bcXC>-9 znV}Hdm=lfih;-Z}UCK~Phs8J21nY<#cJD4XU6}_JT~k?t9!ukR{pDWNOh*xv2$M#_ zud39WXYeW!tuNni))#CPXortU`QnRV;@=vImb=w@H#m^?hP7~B5TMTp@60ZbGdUKG_|7G5xTzo0~5)iB_2 z2s`Lg{h~+AP$8=<-lsGbhO&NTrLD$>>v>c^-FJ;XY*&n3R4q9OrMTRh?MK2R7ruMj z_U4VQXcpt3I!m2{)Nl=@^&YK`6YP?#1-_@~Pi;yqV>!yaY-oBqni*?rYd*$k4x}e< zTednLZvYA&=)EV3u)+UVgcpRTv#m`^8!}C2=1a+#XUeedeo}-<#QuXv#x9mE&z7yR zBo1aKd>8G5vpM#{z1&s?yygb|AXNcR$v|FbI!|(X#*389nj})?K}q*+p*+y;d}y)5 zfR6HRgT%C3=9CMrdLlWU*CH)1{?5Wwgm}eJLznf7&ZpGkH}yU6FfIG$t3w%;h?h`? z`FzacYL7WqlY2qA0A$_{$1~czTcvw%5b3-m21 zwwt7M@(77Rc}+@qaPIXH@Zz5HZfk5Gj{kTEhuwi1x!HAxjxedDD7lh280pySthZv9 zGKrdor%KC{P{S3Xm!nAy$s>FUk(DK5~jncAV{ z=%8?iK3CDY&=$3iMTG&6rU5KFU9^udUyb5TNK_PfZ`u(Oo^l70$DFsdy167*^0X|c z=XUYdrfV1I>pc85P<5#Q9I~Ol9JTZ3ya;%=Sq{21|0w!`+9n4XlT%dTR_{9Hgt=T_ zv0b&EV;`F$cb?HhkAz2rHqB~-G=g_u+`bMlh_2Lz1Ixh7=wQ+5w>!6OtcE1I38`;m zSgMLera$R(Ngi+>Xq1@BS6uK(iGYnKrMq;j^P?%?FEXJ#d73+>zn}@o_0QN!G_`N= zksTfOT>2&;fQ;phjb&m6Mb14%hA+N3&LXeUfL( z1!r(;!*f4b4VX{J4F+9kx5f|D7Xaz-ze@Y6+mg-MXCkK~(zV+ff*PNn>SdJA^k{7s zA+gT!7T%Q`Ywzw^A&YPsHUsC5IO4H{>rm}|6nK9;AH{ouL*;F=*Qqms9I8tpO(h;4 zTMVxC)Uc5_)vM3n1ut=r9p7=Om7)P|{uv4U4U5pM$k{P( zTR*07YJB-Q+_XMX%_FF9IycajX2b7RNKbMXqTW4CA|q{*VZ&FF8ZEk9lJKJ8bduox z3GOgHrLeQcamIP>t5;(-*A6co!&pX)I{4b*)F12I>9LZWn<)cUk>jfJM9xT<#ad$! zigWnu36-}cpgY%=*3&F7w$bty1q}0OooS9F5niE637?3Gn9$6lri6>wTQogCxGS_Z z7C>SKk_P*oX6Ah*td~*F>~axn-OEz)KCj(WB*%@!5~osAZW(f5nnEE6=Mgi=M6{e} z$lTnViUTR77bdw-$%V#=X-D^D{`yXS)BiyC07o~#obDq|Nl~p-wI~?YuGP`FnFl(~ z=XAwg6)P`{GAwJ+DLFJ{zpd`R-8C-8ak_B7b-^NS-pmsUcg)LUY(qWs zjmyzB`6>6{p9LI-z*TVQSZ7>qof{kUt3K|{cg(%r2WU10d3;W|P0Q(+;>;pwW5rha zXE6AH>ja0=8y%F-RQY^{mvHGA~NI=x^!ngR6A9{hI<=R{CBB zNR=Qfy_tp4t<6aci}U0ykUV;Be(fvdldGU2>N-V=t z^gneA-MP&CFc~#)TfI&*c8tr~%X}q-8&me_b_T|6>hqWE`_7792;ueS$;l00oG&We za5MGr%=~tHT(5a1wTOpaYn$5}S;ZTf+YdEJ)RIkm z^e>k~bLwDeTdSY2Hx%`o5fegdKazLR+TpdX>vw1!PllJ@ zuIPcLYEqEHhHle*3u1cKEomYYmPHqrx)KB#tV7J1x zNG#{{w?|&whS!h&3{yjir%-H%m5=;JK1WlxW{nGeY$vS|Z}}F`kJRtzFMVqK19#Fa z3b>Db+Ekd)g`=k)F@{?fJ`%DRGW3H0`6+hl7oz&WkN4Lis^$B$LPMxH>*S2^@Z3qLK@K=9)&Z6d4aX2 z=3?inx2yu2-4+3Qg_xICwy5Vxm_^Z^)8SS*X1J#a*uLfm(y6LaVz&Tflw4g1RQ(tq zSbw0W|6tQkrsls~S<^XE`?>UUdbCCh@A)&aFYzB85z>0!#Zgpn&YCILNmfn2#zvu( z!f&tV+CmImP{92Qp2ziG!O$No_@3OW*mKsxeUx)jB4WE2g=6f0)0Xl1{i7dvnF(3p zaUSo-w^8z^c%_cK4~FNT-1T6Vxn}Ptt^*;4DS2At(jvHzEN%_|*W#Q^A%d0t5KAeuF z7Q4c(8ae4V)mKuYD-16@0=!Z8ouRtxpyh6p97nbsIA7-{ser>`)j2*BfA!Q==SF z^!2!REgSD+GsVm=2rO4qdLH|7I$y&n*WgLQdPHh+;o3y}6T)>F-ahzxtC2O(OGPzv)g*j1exCPIU(p=4-j`m_Ez93A zYtuV0)zI3nqGwJ#pW4jYzVSobmZYNGCsGjkrbibY%iyCxZE`|2 zYPu~_hr`|fXv2D~#8``BodRw0R{|7`R%Ut(I46sRz*wLCA5_>B8&YRCd``93jYM0E zd;_u=61g7K|zgwr{0EJE?VYana z=&N2=Hzu^L^M+0G#=dwHkPzla&raDlUB1;nn>;N=MMc-O#@UOh9(skYuzVgSv6VcK9v+HsvHo)x{Jf_MYZMAT@$pu2 zT8=jIq)qo}ZyJK+G=YMsMVk|+a|azDYVpEKq2l!-67?=$o^cp;__IA7EE+9d?>V5C zSKax9N!%%Ru3`gi%7vMGWK5ww5C|+DiBfCGIx+F2G_^Ez_^t(m$$y;V{UScAo*39Y4h915Tgys_7IEo zB3?}i>eJIt`?kWuwd_jVmlrV;4(bs)LZ3S2d+;-m&0AE`EB&3lGb3P(}#^03B7!Q_VXrA154{@%ov*K--$NpMug-yGRyikM4)M7EegM=Fj(0 zmEOGK(EUhZ3w9S80UN3ax5-JDf9fo-Wjh>XyV#a@Z)_a;fwy2X)1nn!Dz7KPFFiO~ z0zkIQ61Q2CI-|bxsV*ZqfaN^bEJF5WY!k8#8m54d{lvQ+_`LWVr3d_eHDu$AXLc*a z#61ZxZ(G%Q#*<82G6}ofDc8Utp#lbUHGMY>w|ne&L1bQi8nUqxcm!)HXyDK_92``F zHSmzC&K>N6`UMtD=VO+OCg*C6!I*SQ+e;X0RJPmZDqV3OL^szi5>9*ZF$=<%?QupvBID4p~#()E4j8Z=pVkMjyCia6PdmUlW48C6}RfP z?}LYji|d7eU{X+Ah?aO*b{o6RqQpErS1NC7INM}#Ad!Qk23Y7yTrY|wj_RK|1DRW- z_BpEco|8Ek>u&|Bh?vM^p4W7P>eHY?*w0mMk_^mSEEzj4RPb<=$#JDo?!^)R%Eo$fk2U z`Ky3XP_`Q=(x$rx+*QS=2rw8cIAgFO_!!1Yv$b}i@58%kRYJJBplgmR)4Dla(tW+* z9A6RP^}BbijWzVsu66th>o=nvidTbmjPThV)^p-ZSSU+#M>TE^y*>>(DjD!P)fG*_ zMhqJnaLcP5(FKs6>0%tO7{2+*g_sDLvbhqNu3iLjqt8YRdJw=GiQv$}j*LiN7199{ZiS9JLCB=4~S=o~?hle}uwIV?H z|Ek=}2s5+mGxMLysF%0$<+hsmmI6SZEv>l#Z)c>>QmzkA56dMm!3=2QemUyu+lmER zEI3-6Cm}1f{tWQTki!=%{=O3yLSZ>6#A=rAzUBj;GlC{cA&Y>>5Ito#nURU6NSp}h5L`QI zwIaP{=p8M`h9+ZnYpM^2~(7JvjTMMO)`Sf}%Y@OxQa1oEzg|pwJheIIF@a?SFP}I{6X?Oez5AR!g#H~AO!D>jA zz{L(7KJm?Z^yYPbpFgV^xPkDgrhf>c5J2uPd!mio6zf+6vFW(wI7D^X8} zh>VU!pHoY{Kjy*HgR9z!rK+Qd{65WH)>he{BgP4<+I1*H9Q&M+&9!0Q1y1K!4z9rm zOcn}jm1VPE*9v~6!-|@HJKHB@no&N4UGDXpHYwOo{w}W?@1nWDX?k`iE*t;fd`FYK z3w}^_b)>$k0;cQE%={5i%rM|7FuY|;c`|e}iaM6l#5u<$eP+l6+y=Ge>00gr%Y{7K z5ha#C-RA2>-NW@es{GkvHyB7-6VYJ!w?$u0(-(^B{&QsOcm}!=rp$qZ%}UYm_D26z z6kOl7y4<$KM|5>lC(on)QS(*0b$kWy@}w20l`ESr8;$umn@QMwccO3kGD}8%``qfN zffE}1;zx^)j7X4rUF&HDmB}k98Tg zCXkPR4O9sZRFM{2>!BCT2kyOVP{{Rk5(6#7F1&klbCcRPH8rh7mJPHC1d-F8#<@J; zF=S!hx|?Ra>PJ#FeDNV;vX3ex_=RjZk%2JMQkNq{tFit|;{66(T9L?MJ}P@bGwFqy zvfI5}kmfB8(e+^%3}rBtk4dwgFhj0)hn;KH;)gjX=`6s80RIt69MnBkvISqo7;Kpr z3`Hw22ZO#%H157Ot;5F$l>=A^ZX$fxgbn{&PY-uW3$pQTv!Q$~tCw#> zvIWb8UxLLe23tll=1(Psb8}p9sG2pk^upVPq$&i74w4trTA!KZ-7U@k=)us^2%}9^ zuPc6ddEBpWx)5bqwAd~%?KYq9x=5XOiS{iM&LjwXfwtF&%9fE8P3Rw`aE_4Oz~S}rPY~pD*R6=l%7R`F*)CS-ymz=h@vl0l-xGeD$Zi<)Nwqzn5pM}N z0tf6-As|IFq=mYAI|dA2EkYQWe1}gWl@U|=!^I`xzUx@|^$A}c>mv1bU5nI%?%&g; z(EUi*H&@q&uEC+mg6WL-{2GD4;QL=k$f-AiW z#N4JKeYht`KnJ*G@#Wp}xPy(nTee`7q13XjyKXBxZGc(%>79^`?5`mkS(Z~g z^gD%RnNCshdHql;y-(aS-v+|Imo@_AT=rbLvikA9{o-|(Hqv`d2qZZM&0BU`Vc*Aq z2hgGKo?0r*o$>X%?NZ)ojd^lv?>&SFVYr6N7=#4Z-D4eC&T$}Y&Gkz_O1Kzs@rR}g zrW!0dsEN?567eu*va{+<5GB=1;i38!EjrMW>^OdnaKG2kR~O!8Yzr9cDp?AivnLQP zoGU3`RRSfl{9kNs%t6FP#$t!tG!2xO-p2P3y&Sq+sn0A(cfjk<9u#JNh>Fp7DqR(* zf36f*s}I~e1twV5yWpU6j&X1*Qb?dRB}FcXfC~3qrPz%yliPE7-dsU}bc^C%J!pAW zs5H$)Q*-Pvfr59k)#PUkzVLORig^jOj0TsLI{Is&)-4nZNQHPo6yi~9nG zo*X3n^pba)jFssw*se1-;Kfn}tOB2Rfy{Spi&SjNL!(blpz{*8Z@JH-u^?z+^1{=DQ~|IK zJxxxRW#2{fDHFXr{2p!n{VUm5r*AnHO;A}hK~h=MgJCHaA4Fhbh3aQTBZL=GTLgt( zw|Ql{KO#82xiep;WQ=HM_CKNMKCk_$fy^RjKgvDl^ZT@2r~q;hTr^q{q$jj~9&OXq zJGUm^x)O6DMNeuo%t;N|GnG`@Xe+u;>uUPklj-Z*F_8T=XOJe@cj?%P`BZej#Xz{3 z6T-SsAQ!l%)nekBgsRx;Sx7C^%*;%IK|?6`{Kw8WT9C!4gc5brrluh(+|92r?)a0v zMI_06fU?{pfQQ+`& zS`lO>^thw3Aa8-$N~W2{WG%~k0)(iiTMA@vV4qq4bI>2WGcdYAtnQ3B3qY6hq z>g6IN%qk%4z7m!may2uyx64SU(8Ct9#!-B1oF(vTOyg$Vl!Qp7O_tHATdxi>Dr*3= z2qqNktU4)3++8lmq}r(AI%hMKr8sB#i{9qPE#KqY-Ge4h6S>P;j0(!HHhP(g|58sf--6=#|Su1?%73zC*g> z=7gWU07e^sxzCXDvJ*eljLUY^aUv}2F}ZRo&CH|HH5MMumhTDC%u$CfOI$3*#t3qV zbKLfN<@c(=$1TNG-8l;##NIsXNm(%YIc*_6H(c)n4cFHu^R@`7S$=n=9CSmfdZjjH z`);K2bWwSm?)0I?qU_?~rFO8Teev9saUOmb_u2@&&QD>~{1m)Oi2_DGF@J&>d|J6lZ8jAfD zn8TdCRizG*@(!Z0UFuaEd^I2C$yQ70)(_i4QK-(v&QVitdYevXZP4a4by4`z@m*yJ z=7)b~a_r$vPRmS6$(%^S;=f5OE-ocLB(^so2v5J>Df_ehwav_LIj*4H-nWjT4t6M1 zspz(Y$ho7WM>gaa5);evKX2w|AqxU-k_)KDV7fkHd+pI#Y+bt;NVFZgX<9vS!)0M6 z-MP_xXeS!Y^`CPWS7p@IH5G?bl3Fbcp|}GRC?9A4%61yujDj$v@ICUF`%4jVLXe5X z=aB+qSN)`a8npWpsAPr6TurwH9qVc*7j|&^f61R*1{ma|0WUxvAX(FtJRv{*) z@pyZk-yt}RH-2VJ?4u9MJf6@fB=S$8J096_laP5ZP~97LP7(Az8;_irp(IRF952-fU6Faz8V)k$A|lF{1e zR&kZEuIa6ieDwz{(>31}uAJZ-*^HB$`dV5v8_D#rz^X^Q|GD7VU8OdF_Gj;9UNtO< zX(H|_iiGb-MbdvvaR%Af>R^P8AQv=FpG^XFtje2r#Jy>ak|9&f%sa{S=B>(s_Kdg4 z;A?KJ@iBZbw(5}C%QvO6eG;DLm3z@L#)+W*8LjS|qrIwUFHO}&z|iaJxO@>U8HXfvshZ-RIEQ9Hz(M?AiEx@!gL`{$&k z+dCdi;2YcS<`H8H3!D0UuKer@Ps3d$!U3rIjZ~;sKobmVyL~&LHzUXuwcQU#J_95G zBVhBy1mKk4fDWjvc&%BC>^0d_RRz!S^X*){R!6tRq>aZ!rM{sw-Hk{6OLqBAsg<;0 zd~3e&?r`c4Ntc-?1xojA-r7R&&9szN^{@`XZC(|X1JYp|%EiGfBj{%iZ!fMLt&`oh zUtLFsOOTT*E+=>f+HbL^8mK0jsE;4L77`QQ1_T5gTz}oXBMGzLR@yKOP@1F0*qh+^0Hf{`=__u_S=@x{3BEkA~|s z_xDoV)dVAT$yH1S4O4=Hf^%?JcZ$~d@nFu!qffM~U@6+)mtvFLR#~7eSCWO(qU!|R zp2|Om57gXn1wGPjL6)%DvPCVipl9-lxU5Qj#WyUmF;L&*-!c}K(Z|O^T ztwPjFPXEg(_HT7bOv{M9X4?vh6u5Evz{?MIv|^)17unx`P->ueqw?C2ShqxEl%70# z@8Wx-z#|7w961U8;Y!6z(`O{I*Usa{HQP|a5-5y>4I)dmn?t&6yv<(K5!Qx;wr{zm zGmsp8{KtXQf9xZd`Qu;y*ff49%YOWf)Z*RcJiTE-ip*R20?UKN;n$HP{O)QzUmB|# zDzAk5-+!*h&*HOrw8uaYep6$^4rDP1oY-eN;QB@G-~F%uel6nPM$I3)=ziC43M(7b zXyRSGz1pg9tV+G%dc4*%gg=wtD-pJ`!vznd88%pROH4MRlorfrWRNNm@}x~)(9ga8 z%U=G?sxr{i_h_VD7si-5-7170)W4OBwKtxs1U>a4!YY@QGsz)r1=yF=0wb}_EKk;P zM_5`H#s!~{+vgv?EOtP8?prhIzdf~ov(nB93!U@iWl%?3W1n=QbkJ$a^oAk{^5Zo< ze(O_5#D)D}`e?AC#HkZj@-Ju{%H4$4Oz326A8)b)JJ_sbx;8l82&=CO4D-!n)TJA+5o%8I z%h0T(llCvNavJ`$Kqp-kx*81UV5AkeWkN$SHZa1InlMjiA$FGnDkUGIfy)%v z(AIhshTmELlMeom8?FmK5U+}0WmE7A;tlI1nO_dlFLJ5Zi1BGJsMolt)No8@CA7?6 z=s`?<)u%6M@BQRG$Uy*&hO6G9vS>LpJ>TAwtlW=M4mN55>uS{{YVul#C~zzOjJNjGb2WX2X$N0E;5z46|o_5^C@ zqRp|BQX`6?;#Mx&cvM{6)qEIEMCZDQ_Oysj=|wSxKL(&w3g2G-^?){5>)!LTkwfaW z+ZyQwwc{MqBQrk}HVzvO{tvF1@jwM!OrKpj@{C?V&P(<6$?ojr_cJY%le)z7R^hIz zKLKuf^qBT9a(y6qX?!c?&h*NmK%;p1=PRg(7JI(tdy%L6V;XtGluL?>%8pI5uPu0> z0*zu9tx+c^?B)*~g^xHt)A&^-T|o#!8@Rn8NaesoC)mkmkxc8oB?gEYR=R^Vd)Bf? z$L<_Rx!JEdSVC1<5I%;o>C^uf>$-er#%xXaghxX|8&D*ZU%&K70!8A?98L3!V!zmo zq<~;m!bS9oBZbbLTDRV}1jKz4$AitU*YE26V&uGciDy!|x3)k(xA^t?nvLz`Red?_Z=!?E~V_9+^$AqXfj^S>t5!%*YOMcqN6mZHDHMj9sDePze>6 zl0-wKTpdCprCbt{3UrQoHl&3~!0IUXtN`rE_q99=?X*jxJ@7#QmZTIaz@vwR`dJGg zUQ45%sFt8F^Hb`G=lO;MT{LkwfE9LJW`%yAZ~3FBvOqBJ&Iq*oF- z|4=>~<8QQA!+!b?PnFA`eL+R%6IHH1Ox2~8ks}XO(ZSc0O4@r5NXSmN_nth-!tr=U z@Jrk=KKD8W1$T1(S*XSz6G_wKf_FAjx3m`#dM_`tg{#u-@c=y8Pv1META;mfY1w)+ zU8jRH8md7s8#L?|#@S4_-Eu)MRg|un1JjutDl}B?=Pt>#%4$@Oa4^6j;&6s+i7k6X zZ$!O!<#V>FiT9>fm2}Rr0mqNMsmEsB1Km#(-b}*T^f%x8`EDN7Z3@lS+LsU)=1zM( zf_HED7nnQK<0NTvwb}*C9<(M)T5M%grbRkzh8yv#vMcmmkG77U@tc@>z4~5_MK&(C zr5wRr2w4`P-Lw4<3zX6DHd*dX%kuGstBhkAn3hFLSpc5rT_-!{e8*9y_|fxY#rP&8UdwhiZ$c3nZ2ChfZ1^J&Qq@0bxzc&>WNcqGy@{~IIDE;11W7Kux!3Frd zeC9bLc<}ja#4%3$%X@}_qR;>M(l~-O1Gu*6l{#(*WA@9Pzf!SxLj+#r7&gp^?aY1f z&M!xQhdw*3x99ttQD%KSgK~Cc7=RktJZKZ&@nTnw9`>KzJ9-a$_J<3az3b(*ljIP? zn+ZT5WcR8(fvceg;IL(HggnqI;diqewm~A0c3GqU0V-o>ecYRRUF6L1gI^j?{>cfK zPiWe^cI^Xw;346T$YXH%C3cO}{}rQu75o23#K#Va|XUN?5lFHfwgvvYB zlJ{o@InXz7b)nDme)z042ecQq#G(w+X|>$$scJo%T{Wtj+c26J9tjzy#p^MDW!k+y z8XGz=0wR@xgSe&{AVM=0Kba{zoLnw`*57tMmTfq6xtQ}!=#B?)5qCgDJGx}DVE9XB z#|TAZ{t1)aJDi^EtQXNhi3Y&cynv?4l=_LjD?e-=opQbZM%QV*a_)u3^3XO_;wdI~ zPdJstrG{CtMg_-n2})~3vwLjNwC})3R&VP5+wY-zX9#aBt$xC8eh$bpyEJ!$G!LgM z_uuS1Tu)!NhcoR@hey45J04N=)9)S}*q`>WjJtiLfpDr%j9nY}N^PX80y!b1as++xEWLL)>$tywW1|)59d?U)ci^GjF0k z+KQDSXF6`Jy5rN#5!u1|67&jaGXp}KHd$m}f;M#jG|51>V_akU1W>Dme@OX74- z%e%s|I#8@wCC;^=ym&wDgkmc6`(YuQ7ktRzJqk=mi2C`+9xu8O&mcOX$-=tfEj0(DZ;_MXl&kfF(Wdl z$20v-SK8KwRipWRnuxa($KAUEln<1J?vrk^S{qpX+C94-m}9xd+;+0l1kRk+pgPB; zUd4UXq`^}CexlSeSAC-dgN?=T`0DEG)oC@-egwfBr_I%~U!$4*g-Ok@xN zl>sy~l_pZ66al4(K!`$InQg`8YxpMCAU&viaK*yyR<$|0EIp!ixy&;~IYt);tg=Fh4-Jm#%v z(FA9G^vXtH$vZV*6*~N9($}vR8 zBtuUwx9R&c%Q$C5f%7;teIp~i-d$Ta#CSvXLP8wvo3nXkY1N%!NwP;q_Q|oY44tq9 zId!n+ITIda#F_Fus@fp^7&iCTPo^>{z=_jIG$?@b!JU9NUyOg`yVkf(R!qQCdjJ2; zjQ@|2*8S`&FU6%h*~2c35ujzc|53^!_y0lg8&W$&6IuR!S!$k*M_}CN#J@Y*zP;mT zcK{h6=TvxLGIpjMIcXNAz}K6M5}d0yeNB$k_!I{+StNT%C+MHw@t$~^&@IZ&e~pn< zY-V7sB&TVJYndrn7VmZtqw4GIPsTJ}dzjZ*j;7ZWJa|TNeX{__0cHUbKym#UIGWmh zdGoCUTRrA2cfApb;3@HQ$lX~C!aSVSPRH7>B<43C9M7Evk>c@&ToT0>OO)Hv4urHR z0f#>A;|?gYj-BBv=lR|!%<=LnqTo9+A+4rty<+O!t%8#3_;ddYzW7?b9K7{JcRV7r= zDQa-ddxf=Y-yF*|><7 z_9h6tuT&kil2mjP+F$i~7d1IGF7||-!7;l@2~lpgrd>sdpPrmO3V)hGTWF>*eZHl> z>3uO+vqUBP3C0HGI3S)83Ind2&@O0s;`^6IjNn3$#3FVenm}I2? zcBgU-!!<8=->=-V#|*}B#6)wU(iB!Y&JVh@boLFi`S|tfqe$r3YCZU`I0ckRc_3@G zk7u;DU$=W`F&6TKsc!iwwV51g@ML!^VCKgXZQ=B~{4WzXjmKYk0V4HfNS5_1AP-3X zhqrNU-C94vTWoj1%(F{x)U+sUPalZJRrRNoG38gm(P@uL%G&Rm@_EFV-xIADD%xDJ zsOf%q<#x%Bo;p|LOOdsb#X!He4jHK`i$e*;POzZT?ggFjT#-f_Ka4fB$TSjnd}Ua# zTV>we^|*bUGICGl(#0c-?@#L8^R@C|7bvOcehMl(Z`B#sn`6VrOCIahBt=GVJ^;o0 zr)D3!|0RuB*ch!r#o49k^1j3CeqDCUg`T_2BmrmZS=Do#+$?#2Y&|S=+>rl!EWbqp zx&OEFzhtM{cddtaEbbo!RR@5jD$U1_dX*|b-j@NUuC!)5d!?AStJeVBBZPh*+H0#&P zpemmI4mE7F38eX&`Ei;S`(t#a{$RgdknnL6wLZ?}j{vG_QHEu=LwYhv)EFPCR;&JF z2J_)jyaqW{?^4TS*{f9Ur*$uiA~(0E#=AttZvM4hrT1y)S$@cQkF6e;=55EpEBB2Z z2Tg*&6+83`doiE8D^RlWSzg$M^hgYTF>ZbZCzc)1AMh#2?6qN>7L!uomZZEEp?#R@ zZAql&>P=_u_}ocA9#1WoY#!O}#Fw5xY#(o;y;1UHs*W zV@brHo{lN_l`b7TFTkhm{)bNk-sj9%5S$;ctGmwrAyVV?2=%gvFDAfYY=i+n~liPFsme;L$^ojsiw{#FFW zqE<$cxrl`zbMd0hpyPvR_UPKzeP~xLhd_El|G4u=>*AQQX8nAi-)!2}O2LW8-(~L0 zOc9~p*J1myoWI_U+@sQq1I`CkEZOJwY3bqrU3&Hj4ZJ?GGLJsb;dYG_n8sbAl$ICX z73ZN@z9;=wxb1!>l?uSong@eZWnRIEn;9LH6<4GqF2nh>)}{A6`$vq|r8s92b5&LI zYIqDL3^RKCt*W}L{vLazCfy@!^!r`?*Ur>%ZU+aQDtj$jYXj`GL;>LQOtS

~^^ ziRYyOM|2NUpdl(ilOm-UXOVfdA}oJvx_8o!OWaF4<7a&8uU{?U63mN7+*exwzA5hA zCNC9Op!U~dUwLhYMj+ozmOjfB@1?8PM_Kej5;a!!jCP~v^BiVdf4Hs|meua`TSm?_ zT%^;^J!L6zqnl5hp>9+K!(II~zxk(Exn&-i69H}=Ng>7Gcb@u{qy(cdJE;WFxIqEQ zf7?219NZ&FuAy2`h75wRHz#N25zxlGyqXoH|^)}sk zBt*hmvBktsBiIM1rRJS-s`cLK$V&Z{Lw2hy?O7K=A6>UMm^Zi*vC#C?&#I`R_EJB4 z1oY6%Kj;eD$3wbu!c$H7aN1`CBe_4_5^lUSKlYO@BdA1k)1?R~ifU-;Xv%x6U=qV{*aEXZz0U;w&ofS%9( z9a!dKHqbj@94==t+uXtZ%!nc6PQ6n3api%O~iyb&6&=XB^Dv&ODm?Dlm;hKMf zG!INCV65nTc%mI}n7UBI4863g1QeiR^i5wDT(0}5x%-u~YRXk>&+Kft#NnxZ&75CZ z=o*cg`o+c%bFu}4D`T0706~2{F|dJF*s*+lR=UP+i2aOR<3wN$5Du&ZR15k&-9v+* zRYU@~WknY#Xa zMDhB!DGRCNCfnS9HrZxuY*#36@}?_0W~fGUT@uIjKl80ov{5g7Y#O53KP(Gz%PuZM zuFlOivI6JpGj>)|-*%i{JL+7M@&V(u3!ga*8%1WUA;c=WNvTAi6?L# zax_%t)U31$&@(byb-_muF!i{`4{`7w0fc1Om}~^l;`|{au`>u7EmK^#;kY(lRGzko znk$5eDKDqx`fMC|buj1@l-My+k;WK&q|w|g^V6;LG>(p>rQZb#9n07-@ZL<8<3#k> zc?VO9Sc@t&?v3YtZ(vBQ*-B6&AXQ16P#+ZVA=Ahb5x0be0U4CG%(K+W_28mQkrzK0 zL~HKk^Y=YXVx@PB%dg)e&q*0;10QZe=9=Y*_ls7)V0QKJOAZ^Nm};9Vc5u58huS$x z6W=xBSErcqt1A=fGYX%D4&RRhNi@(>uqtujjs^^CEGkE>Mbu$_MDRHf2>^@@lpV5W zGlA-v+Y0a}z(K~09lWAwVMtAbpBJi`SrDCD_nHK^UxwQ$QN^u4AgI5y@`!uj9!PG= z==IoX7+I_;Qt12RfoSSR}WCb&yuhd{yFx=Qkles!P0%Ui!4GSK}4^YgzW|dNJp;d380Wqb$529Y%?!L7E}n z`y+yL038wOhAg=Gp!PlUO<6*?>0`f(fv$$a@LAm6*qyO zEFD`Hwc>9*y-e#m8_fY!^1+7{l*t)E(AovCXGrZQQU!Zq$DIdM7=6~9;}M?A+M;VF z?%{kn7DHJbM(n+!RnE7oitI+iXsXy_I5?rV)u28L&4@dP!$J8lMo$xWJ+{N{UK-HM zs(m+BV++?gnYX)Gy-t60;=4K!x?Mbl<5Jhkt!Jp>zw~DJrjcG%-(QQwA-#f^{b{8r_4cvgusvjd%`1n_(kIlr)rAW&ha^$2k*l0JKIe`(3Z8; z#t^U3GKnPaZDt#VxV?g!nHbUTZJv5|suJ`xBgO=abkY)S=NH(s0PQlI2T_(oJyG91 zD?*cqgzkxjt9UjX4B=^(k3{Xmpp0)Sky6Qz=svsZz7gZjwgl~RLqlS1{3*Z;n~{t% z&`h^zEF&3k7@o+qNd=l$!_bS=f}A=QmD_VHJ@qDs4w}Ks7}E~zz85hWHw9j|Dz&CO zJ<(xqX30C4`pIh#=a@5tfst~#ydLw&3B8yM+q3i|l;dyJUHiqyiv*L`+Ch=)ysI2VeBFWTn#FnB9cJHJ zbVC~k{K-&pf=FTolz2V?Ao8z6V zRY*EW+FuJU(jzS?qVTzVdl=;{Y7ykIioo?$J3wn3C~3s{^UFqQx%nnK!KMZaa#9ht zNNYB$LmwNw_-0}U_K1$rIp2tQu*|v@P%f%J?^G-iMKgl#i zAq%<06(p7kHH077;pHKH4a&N~xh?GVU7N{r2mDqJ_K^|D4f;K%ua1&|uPN|g&S~0D z5B@EwuW@hChs3KpPN^qVdcATV8w_q2oyH%F~3G{(^Nqjp!XbYC)psZ5pf;+T&OC{$uI(XJq| zL8)EQbpDOBLLC+>5wCmbn_zSHixHZ2d=b83z~S|xxf$q}aPWQZp_nmL8C*%(x>)*Z zbeu{CMhhD7G1ZrNe^lWUMnbWM(fp~05TVd^Ri0y53F{8Y}>qc4%wfU_Ci!ehKi}3J-|I`8GD7eL?`m2+=g{_OfcV`;CKGw z+DbN0tz_o#&#Yyw${Bq!&dzD_Sw8Mlgkp-92b0;65DNi>`&nMdcW++TuRg8|J#O17 z?yd(>;-c5mVl<4XCters*qo=kxjvEi!Af=EV6_PL2z6so-Yj z6SW??#-RA4f(lRFQ)!kXeJXrK!>>A|hX2nAgu9$_&pl$j7%n-q%}^-~hAVVm{G@YEH5<9*c+= z|3^G*KKJ)%;&P0qx$R|JbCu~lV5A+>W}`t+Zsc5qW92IM6Ak&!8vE4exr3cSEfmS0 zsd8oKzVT7U7{I}@UK#721h-lCwb-_0dP*!etg~Op#oIIX!DkB&^2656bAAcjcd0|Q z+TQRCl4Nk&rs7vh4!fi-$l`WH)q&zLZPpmX&U5OJ8JFI;B9tD~96l(1{eoph9$_Td z8Kfu6D7RtIq;7>a>2UalUZqd>;MDZqoOj2^Hum+&P;;9>+4XFq)oa;yKf8=5${fDATUZO#sP%ew z$J0b7eni!!-S{@YfO#%PWue+S@)?QN3Vpd-fDiGKxT?X;wj8coWivl@E!@!HvoFwCKss1xqrK&n3B8{V3$wt-1$@`8HM_&oo9_ zW9m;uHAqUyUyEaBp_ZeA~i4e^cJ(I!l2&>Du0$<&{0&MnFKVoa1}G(#UiePZ_pnw`u( zWE}Z!w)(|k9mf)XdB7743h|qdk;L-F?0(0?P7!pg9rbxLgkf{Y=GDXeNCE1*@BTh7 z{a?-tOI{~C7G>HS&gd#;K$N4vnm2nLQ;qDZ4h~6OPMpbOExIAUShz+T>R!$mH4VSLkKI`|9wwN<&ezw#th`YPi zYB>&Br5=gnfk{`Qu*A8t1(kQ|?Tejj7D1kwlv3&{`!a}j8Vuz2E77!!F~}tIzPdYq zc#=f&zysMH;8h;I2vgI9de83)_SB>oIV&8#uH))?^I^@*b9c=IxBEQ6aUkv4LTD9= zckF1Ng?9iP)}VwjSeP@Uw*qD}G=^kJ-U`x(OPH1N;Q9~p2l-zU2kKV9uJP-zaS55t z{hziu3F&YM#9IWUDvL??sawP;PKJl~fuzH~MbF%faLP3^i9QD^a~29bfk;oC=}`dU ztgaIsDq@P!59&K{e1AR5L^A;X6aVZ1CAg68qFC!&d4a`m-?QB}1~0cHEhVL}-s}n_ zbu{@k@%~zu<7siULB=3U%l(h@Q`ZUMB!#<`1?2_YZwkvRa|_%El}xT2E|$xoGVK{A zZyF_O!M)%j{vx-A}_a$Qe|98X$UwYshSHK=EJ&)Jh%et-UXVAhC# z^*i^QBql&xi~p&uThm#Ed+`$$K^LUIvr~vHx7I>+;6I|unZD;p-5EG~A=gCJVl?2; z@bYZzkx)gVryL?&mOlL4+8VLr@MfD9t*H7+7^w5qdE zcPAnAA7YU15ku;6u)&>WuzIGmII5%#&v=3R{OUJMX>^heN5rh`Bu}|A0X7m^mM>#^ zbm+10Da+uYmZ6vQh~T6h6wxE$wf45w4fCrl{cJRj(?;w|7~$^Xhz-XMY?`jZJdvGI zD+xYky7LxMHUCSNBi54V+U$_Q&4eA=5C%PQChee%;rVJRyS^Z(aKa#P7v;a$_TRe7 zS1pP3s3O7GU#va-y|2mm18^;R=y9Hu%^cxg)CCYw(PMsSQYOc}H1DXO^QI1srv9Z+ zc+bN(IZn8@_V%TKT)vnLH1N2V4^_M}@Q)?w&2_Y`)z27nXJran_^Y<(K9Ot4SeMBV zWT;_b;H=Q{XyNQCut;xJ#l&Y9E*AFs3#8)%RF{G&V>r?sCc^5nx>g~M3C?0VMQ+*Z zqqbk+hBg|wJht5gDeX9DXLwQ=tM)+wR~)RVejkfpz(SB#5(nn_kOx>V1-v#6*qJbt zUvtozsbA-ceZ^1UZX7F#$({T%CaYOJJP8a&!s&$dYpJHnVV=sk*^F2FeJZJEJr%@d z8$tzA@s;@^uPlqW^ffQN^WvW;aN|(Z-`pxL6dvvb_UPyIIWEEIN-mB+ISHITeU4V; z*v^?;eR5z_fM9PdUTq{h#?n{F!6bpRxqQVp&WBhRv_<%3oJ(6fO+Dwx>Q3b;DQCk5 z564MiXo`B=l-j8)Eb-UqV(C9#T$@PIOww8#CP`2H0pVK{a`6vPeINb!=bXDUZMpi# zxyI3U85M*9x}6pSV@&0MTstd+o*&4nM8}o+eH;l*sV$S8&DSS#R3xtv56^G+0M{!M z>{yi^8G7$mXjj{((g0l2@@Pic@))dW8C6wfF_P=@ytwoiJ(}#`RBY^)byS#^C5OG9 z$Ij<<>aIC=7ud$m&BZ#QBDg=cv>0mskk>wk%fD;zYhd^VD~>6H_UoJ(PegBdC<)T; zgY!N7ppP!Qp6D`rO8oDbW7?vdRwuo|`_KnClRjPE{Wn@#6xAxyeer|ptU)mK*tmr3 z+ngD}o@W->vbgj?mbAH76X1tz8uIU+y=;9;oPAkruI$;Dopu`h^hwv-$x=fjp9+J8 z$xt5Un^{a(zg19Vgs|q}_e1#;$cmWYse4(|R>sJ+WfA%g)%Mg~Z9)Ppi9WS)rbki7LT=o8*7B z+8(uIwc5TtA2GoJgb$B@hL0B{AXKeo(y)ZZ#!IDs!tfwV_Wh>0w$x8ml58qD=0me8 z&sq*i&*wSc_G|}W<9q!Qsz;G{ZZ^}RQi`ZH-SIKW^m3ZotYN_^mLu9JuJe!hn`1;I z1nsb#g2_@jvkwv(dTN+S1|AxX(2&Ot3W8oy?;2V)Y%U26cK)zeb+Z!ewTHP#{4!bL zN?&|HYsf!a+EY`f%T!ZXsr%;Z{tjpt3HRPS=drj`6ZzPw9!uLW#h5I1E#qInFFAeK zV3Z))hR(yQd(U95aDYdB$g8iNK6e7Wp*1{~%53=SDRRnOYfst*&g zIRz5xvydE6)%l8wX7JqyT5PWp=%&}z}U!Xz@ zO(AT`9Q7-7y}$0RLR!5%d%AqK8KG{#nkxiP)WPk5#q_rr4(JtFQeEgd?p;dR1$Ud( z0WVra&Mne0lE+<3X%DJ(b8`?pnuqyG)MiX4f@(v*Q7?ReQ{Rmh2!4J441UHNYMO^3 zTPj$Y8$XedHN&hz&F%(=MAF2vxw~+3Q+rEBqOQj7r*9N^VIG zZ$9mFDQO&*ML=Fo&ii&PDPrS5h3xC;S=36q?bM;^?n)4pmzZ3i8C6vaIhm> zu<|@0(%GBM#IgP@UFcBdv|%QN<*BkQG4-)N&(I;v>vwAHG8<*JHPf6YlM%yj8MSA} zgc)1`WMuRY88rvVG)!x%4Tse`%!_2YaMUgO$q0P9!JK6sz!LtAGcY%87A|=<7F;ar zr0OY0!EM>VSRb`|NXSMF>{)XR1i;^>?qFy@5x8Vm$?>j8!;H=7qF1yOOpg?F7Cg$uxHIUiQL5v!;PLwsVxG_Q?2YL%~Y?i?M83vIWH~l^y zeBCcntC;PIYitT-HZN!dvPqii`WOun25Mqu)d1nO#Uxf^R})^>+z1I|c*(JwG!WzX z0Ek9yRObVMq-0sdw#ravrflzNm!X1}LH5kb0f%8GYHtpg7O6X-7}5adtt#qs2EzQn z`9sMQhI#GZVcrdr(?}0^o}Gn%h56!j2cGz4r<>21+%0$Phsxv<%?%;u*)MPzms(@r zmT=O^My#earLD`o&E;Iqq_`QW<|^o{RAVP?dn8_g5>W;4r=3{F%#Aad0dr~S96$4{ zNW5c^-qfAM-p>Ms_<&M-UG!(_kP6-fOX(qFO8-nHjIsc7NpeO8-IhxjdF~8LP%UEH z4?NIMsM@IBanGq(0SB~eD*z5Uw=NtK6QTsj^Q~#vn86aR&5o5&1qr~G-7*bjF<2ea z%{h4S%TLB7!mo%W^ay;1zS>ksMDpR<~=*AaJq#uzr{p{W&=W&z2Fw!N95C zCZAZ-6e5H-via_e8Hd;Jw<}CFVtIB@p-X4L|23lh2c?E`eg?SoFLf|=RM@oHYtL;` zl>c#BG>+~T?7`2P$uL#V|A_8}b2x`#>9#&V-EvFF>K-Ut*sc8164<~0CNtXrm#5^Z zvDO%v!JhnTgiPd~xHj%$Mwm3oqUOG&hV>T zXce#8)grX-aIFfQQKf-_GHQM2>k(UMg)0!UmK1hjujFh$c5N?Ju%)dD3 zXh0C8S2FiLxcz*#ylUGk%Pp2W+H)3~Dl;f+lUmQ&W42$?oj_hSwplwUFsE3BaN4%G zkdMiWTG8`pRirmG?Ng;R4I6QtO>!vf`7-8xQy-HeR^Z%h6{stQwHZQ3bzN`QsY zpoKQb(tTE~(pjk8q6ZQj^oyJ*4+laOz1tHVFVB^o8$3g^BK79Y=|L+Hbp|U}V#{;} zzV2Nzy2|kRlOInT!sNPCId=ComSE*MPnvd0vA6+;Wv48>YE0;m0LDtwO^_J$>+BT; zcSl(N+hdsIbgXhf<&3qfS(ld9kf6ENy_Dl+R~lAlB*GxBf>Y}puU_NleqqVTfJ+Xl z!xN>N)S*lS{I{wXF;yV`R=rKkCzah~I@}{IhPt51u^MuKHmZHc_#gfHZ~u~Hx_HcW z%&qf`R2^rJ$iHbLTd=jC@G1uSoN4@Rbm2n6Y~VAz$6>#uYH7h%eH2Mu(4z;5Un8va zmDyEdSZQrm!S7uK(TyG8xhft)!Dhm?=v-)vYq^-aWu2W;ta55pLVGZ_Lc?n+R1G(; zF3c9IV3!}d4EE5zm1T(8MH#HuT2NXMIifw=^GmmcDnRg{-*IC;1F5wr1LXy*v69A3 zE2(eG^ejJIDM=0B*DmpC<*-lWi$x!JQ{j_D>?&=DTg?Tsn4)!R=()O8{xyAV&oFYW zxq~9q6AX51y@S}k(1WbL(LIb@ahIit_KP=oZz~qx>l% zG;o;DLwmLv=CPvEr*SsT^XYo|?4t%oYyMIi2RWvp*+60mpNkF4;=4@`4a<79a)%FN zf|JG#4{fo@`SOOQMf*7d#>@ZH6mLiD8-aJp%?{26s5-fO)~iBR1xB}yR)sp{&`&JH zI=tHaJhA2QHLKO=&OPzfF`_Jvy`JZ>%;SYY_i_m*Z^uT0;Y&H47q&+M5pk@&3BIP1^M!O zg`I2Q`=WI-pHp&~f<2~!E(`f8afD#msY*1lCEqstpmJL&LlyICtn{HGI;%1bKN97s z;JJLp9qB%0?TIg$1r&7b*XE99S(rbbD`wzq8l|izuSI+L{x1hKtWlE^2nqm%)7Jf0hs3Wfn<`^hUXNAd7 zE`w#Y$-{0Q_$(u?_ayZxjpX&PaJcZgh0p>_@lh(#^eOem*%RdqSIp1&2L7z$|Nbzh zQ}^28C3!1`(_fY8C^qj+5!UWI4N`1I&)Et)FqicfbDJ;?y;GU+CCToejxnC)E2iSG zxh8+0M*70SzNDCOewpuTCP8l9$|tSc>}$f(V4Dj}`T1qFJCz` zDE^_625r|mC@{Njx!BcXRy{#pv2{3RE=Sc#t`KmULvAz9tH2sEkBB6W5njhHDP-@b zN&b?bv|oG1wU*UqQ9Yk)>EdmX5yEHlwJ`P*Or{m5jWoP)gClR{3*Ji;5${{JQ|b8v z2uV`!^t~@SF>H?KmUZ~6|*5T*U4W!}q0eK1AES5+$pA3=w&$3 znC-hnM}b8H@@`%};W$gl{T*$XQTaetpEhW)zW%nIZEVDqBLqO=TC2a2xQZq302|Gd zAHRe;+sulhO`CPMNNNq)hu&w*+_cirtqo23(Jc0Ym`537xuJ?lFfV@Cf_{|Ka{8=k71!U$bf^tG*s&n7v}SHYN%mxl?#~&vx>2ScgJ>9~qH52n8i{cMWrPrX*s!|zB9mo|%KUzQiJHr( z&3tN~bS3@kUVO*HF%Fvdhf3=OkoJnYsQ(6i{=+*-I$ilGiW$A#R$jj23y6+on{?(c z(T)7lsj#F9oqjifNr-Lh{)nCb;Eg3j5lG@)0$svJSl#%lsYWKJBS@0;UD z%FA=5KQDv4sB=N_4PeK8hwo^wv!52I@yQ-`SaxWT*6-T*kLG1O<_ml9 zG458Dfd*{T0(8u|ypc^R7XPfd!cSw8w%@;3PBfV<@2Twyp1q7>c6k5#^`i#jj4%a? z?NweC4?tI;1K05;d;q*I;Vg%mE7Ot3d}lb%u`1R}j@WG1P!1~^)^=B#eWFUh-&ixh z935}ebE{8u)-1&KOl_e8~`pJ7~+M}<0HjTo+f^f&FOzbZD9hX<)*{T(z7{-18Z^Y8rvsNs}{va%js$q(deUT^rojDZ z`ugFfjF`e<`royX-X5X;)raUY{w7j_#fXbfH4hK^SCT@25J zl|DAIfO#A2BzQ)3ETE-Xw^OS^L$&hI4q6y2e{r_Y3xT)_!|LZY6#<^no}xBSac48m z1` zhRozXoelGLRUz6$>iW;JCluzV-@@Fq2Pz}S!psVMaP~uP;qOkLJE!kaT&=K0zym&( zzc)jQ^-dkkJw;^vZXq%^_WhS{DPAhv!g&H^Rh3tu9n_~KKTqGo&fWeyaA z>zfxmHg-wGxw*1+yd9!%t;_`W>g<+<+dlNrW|SykP9yq-{)BP{M*f~63uq|pKCP1< zb3^l2f09)UC=3{NNb^K*Nk{~u`j(5^|EER>H#EDn&|UU|L5fsxvD$; zz$FKE%;=@S${%jWj-Gs*blx9s{*(GUC8X1C-%#fi-$Ucv-*Cn8+GiN7v^ndDJ_go$ zfI7$JR7=%!LZC>&(*GGKF0MLXw1@+~C2gB@0r-|B>L*gqOZPL5VvhER7cV8UAB)ph zg!Cs$n+O^7KWZ33f!SaaCj?6i-2MF?RSObF3sa!udOF$P^>p)m>L)G9;2c=S&=IEl ziT#yhbxKKlN5myJnLXoHJQ2a?%ltH@WXo2LRHkf&1$c+fdv}wYW^!|-Z1@&{wm7!~ z{1GOXq~+!B_?mO(2<1hr()&E6WaEGKgdvyrx_NiKE-#&D8RjzZ1vloe4g*S}&U5BT zU(|s1-Qu~|EN>TwT zqWt8}j$mUm^X=y+V>x3U_%6h`>872cD=q^dX|=~WPbr&?d*_OCq)zq$f4xj|*E4g7 z1Vq$RdZ05$B?{~>m6(wZ@Vph@gO!u$%}&&m@;6%HD>&s*mATtjEBd|fo#NlbH3^Rt z3(bX-$?0ENV!|ijgG?V^^|b{k@Knw+`IJgmjdLKY#ny?Fy~2O}q8^!d(`MW8!Ny9Y zV3ucV;QwbosrZ+fWWaADIayu{7*otW$&Ir8UOqsfru;Qm|H2{fsqaRHy#KRjdioB~ z&G?U!f~4R;dPiUA?n6;cb=xsG+S?p37eyrQd@;zifMQGAH_% zdik2(lSCH8%Rfz_I|q_}kiSQxoccRFoK*W#GDxo+S~v1403~I?)De-tT5hJF{k%VW z=Z_wU_v#(Ys65NPehNS6-5Z*Rm$H?QZS=$<=~q2R3Kl@cvjD7EV!vh5U(FU9_lzSP zemkfWKTtgBXE!UB>sOaZzV}@r=KzzaXaAWkvYj~xO2o(m2%uZRyrQ?sB3>g^!2Rmu z4xx{K^g#0G01_qg6RGq2X*1FQD$rSW^*=J?@*Ai9pgnGxKyY4QDc3RktJ+^r8gG32 zM4iij8-+Q`v2*Eodam}#2Csi}5cD+FUlHtNf#mc#ouE_S`xZop00-xEU8mq*t!|l~ zS1m0qyX!r+dk)WEL=bOs=@|pbzV%JBuy=oEObX1!Aq^#S{W?_^Ht-6Y*`u*WAJL&) zjlAc}s%rBHS`&M@Jp~|hV0rj==8#@u=zm^dN!Y7bkJk&UCzTvu*7_%Qld^yD#jSF+ ztjn2d?UyqPDBcUqjgEU%1>Wu&92Ee3L&$$v#D(C8vv_UOg?wQZ#>px>D|nUFqAJS9 z`b>R^DoW+Tp6{RNgfhqJefYK%KEMq4tA2kQ9Sfy?I_9=KHQ>Ao7l!xYWsDXOTkH5i zBm?j(F8#abh_5a9PM$Yd&?xhZYGtng0#TzT!}LZ;;E!AdvzDY(N||Y22{Cb%3XdR<}C$q#y(`0w&~Ao%J6&#TwKBHlNaH+ft+ykryV^y@0)33Gqs z7n&lNA3~1KFc}8KL`&*&m4z&i9c91!enAin

vHYkPh*e;wB{xdJad5&c=;Ou-EMqk8@4^ivdo1sJz?bemy#8rP4fOQw{ z{M#n(8l2nzQJ0Tp(`Bqvx2tot;l=-vQJnJ4kONjuoHyMXUba;(UCGKn(nb~B-Ll<9 zUmv3eyfaC%|9{lO{c2`koP}vmvXzM2%IjLU)rbk!%lA$k00FDvOgiIX)g_XnH6uc zwsC-4H37&PjUwNLB)2>*vuYt0$qn8%mGOTgW1yrO$7@y?FW{4Dfod#RMwEG(LjKI_ z7du~+j~766CssA>tRY*S*Tz!~-uH-NG>dxe9pnKZ6p~(i{T~w~DRSGctMCcu<*7$y zpg;5LuWHYm_9hsadV{*0*N1`X@@U3?{i~(h4Kzu5x&#Ir0YTH~c0kg<6$I#n{)Cc$#2-wZc$(lDuUVjDg8lG^W?Uty zpb#1_WRYu8(f!y|l8F58-$!eJH(9w{aj zC6o>n=jv?0@VP6wu`Fvgi-t)r0y(hr@z!`Yd7u5gC>P`rn%paZc0E%Dq0Zdt$ z*AxtanoswWzfn5S735dhelc2@>W{QjCq&Yq=CO^;%+H*hr$e#F(BaBI)gHYmW%9qG z{2QYE*Yx#&Mfty4`TxnlU*}wpdo=4cl&L^Bl%v+3VQYPf^h7!e5J*5<;js@vQwZ}% zn;%2~iS1>ne}waLaf9sc9P`Qx)Lc5oZ6Ww6PEq&QWtTR@x2HH3AW9OL!{^EN<;#N^ zPGuF=w)FTG7s131waj>6VRFwJp(V+2Ia!Ry4YAF*-7Hs>2oz_>Oz~KB_e2)dyw-U= zs!QDb+ddgEdVJ1_px{;;Mlgr!cPHc(U0a=7Gz@WrfDW+BRC#6>PV_W7g?~9O=8pf1 z^Wq=9$nVjq&bvIBV=}w`y2XrefMvP$iX~FtVPJkNe$!CNnN{C;oh{Re_abnmFO?WC zL&SxwqV~;_i~*>WAbuz8`5D+z&%WvR<^a<@+PpZUFp&qjMSbC->jzaH>#bvN6<%y? zmky-Aby?%oKLZURbnE%_gxV^9xLq7OVVv%gej15uGFpHsi7jHo#Sc+ovebkU=!-Nm->}lh)e~Ru2+abMq9f!@w?vi;iloKfx zfO~-5Uu(9zWm^JA8`AYA*3OsuL1PVfwYDdy4Qn1*!+K0>3b-fNRF+PbUKo@Aeuf>> z&}g+Gyh`RpfTJ4!H?031SwnZF_P;BUL6=!l`5=*}mQpXQ+X(1ExL^q=GT#pj9?fD$ z!xv3I5{B`cAW-*5T_-px&{-|8br}ri2dCBS1+D6RsD!^|DkDnNjWjsv)jTCLJ`Y0q z5RIJhpmkm}yn+|DxBNX5N^!RAgo*uc3!$>f|C)ddf28xybea$DlXM%nsyq}#@J1Eqw(7-83SES2dpqAwq| zzESHI#u59#H-CEZ8vhlMlQqqUviwFT17U)iiV7H+cy0g?zy2Ta6b-0)G}WQ{CGFNN zP`JdrD=?&;j`WV)-Mun@4ZzQ3>(^u4fHrE^!=c@P>BSNmM&nUEpXh)67+^A(S4-b1 zeRccpg*6j_33CKfm-FiA2x#P3wd)TKL>GWzF9DQ%g7r;c?$vW=$#hiLOrZ=hnrK=mbuhp>+YOwB zvXNle&WV>GcnvjpY4JKwe8=*xbv?x`jhCbgzlI?YM}YUKf?7e4Vd=4StKaOh;;0Qg z5%sw5tfpIYIhl4Sp! z!&}#?#<)w2er^32;(;H*Od9dFfy+}hk=m|5CCduvx7j%gu85AIJn#t09thOE*IRss zZpboT)3nCqLcD^Y!I*qSUbXC=TIF`1NQbkM5Z-19u(L{#cxT+r4&HAJGZ%7U?x77!qz3S5P_rU{_)bx0qvQ@EBN})cA!ec(TIIOtY9U z^}1okHN+}bpS3Tco^{K%3q|6<_}Km31URv$2e<*M?%Jq6qA4!NRgz}SGX zDLsWGY|?~t`tB5O3UwIKGwotemsr>B4qi4)8#_nQ>%zkpiuH0}#35;Jx)FuhQfxL--}rNU23!YcWd=sr&zWkUJo(D@MSqUIiFT`Wb!l% zx*y5Vo1azZu+mUR`k-34`)_HugXU@t8SvN3fOFM#I%Et8_f=TPIh0hQ|6Q5TLl5m& z>b=pgmqD0)rKQqW`nq%*b-#Y`>LIQx!CtR7Vy+IANMCSj3U?I*4!XJ7dtdVu`&syI z#SU_~&Bc9c5=qj_O()JS;=G8UOaM-jF~cbdpB?moAU|-IEJ)vZ-{pC;MJkstwP!Dr zfIq&eC4AM^Qd?nT8y{cR?LmEn?2Z=|99P5`i0hwHIpb8iymtZqF0r2Hniz$Rg-_Ay z_Wkcu!1)xAGY7T2*k-KEt>rve4;0q)bDKD?Wr!acup?uXt5$vQMti8&6DY+ji37aT z^TicP57$1i-B@nEe3yhS+mvtw(?5i`&iOuHN83ea z$-7{J0SY~G^}b$i;yE}^!{fs$x!y1KD~h0T9i?ztpUanzHj@{a=@g?S^KGvI6CY5- zR3R)ki-9t_V>(NuqsHuLA(s0}(b&Ed%V{q%hrr4f%89-JeS*=}lsd9tr0`r^ z)r8}9>grgJ!FQ#Qu)`Q(p(E-KbY!Pb7L+Ah{z4MvRsR)9Xk0&xn*Mg~fr;p^e#ZFS zG5sMzOv=cTT;Zcq%QXK^aP=hRRA&Urn|ERg-;~aMoArB2aIYw^Ic2<8pNTlNjgje@ z1wbyA`A$uteyfsH^0jN;gN37G@XFmum3A2pGT9U*KcCW$yFcY~KOn?t$eC2ATWD2T`auPe2TI#; zx`3vF?xE*&`Dkv)Dw?vp%auyi0>20nf0`K59Heu1MqArLT~j0~C`47o=+7AAI~^j<+e34Wk`c8Wa3@pbDP>7GyR^j8sbw+jziP;0q~ zv+1kG8ZXFRbq2*2!S<8CXizBakMyNAeIq$4ze=`ZTfA;xbuY7?v9$S{b!l^^n3Y9h zH;zC_80-0zAHk96e+@qoZES*1MZ@ybvilHjGXEmbIX}$p7}7$ z?L8NGz`oeU9(tWc(b1;UX=8}#^2dVSTy)*_HIO4TR8X&dr_#5X)jCC43Rc=7WY&{_ zzHNNe`a*LgN`uE_(lk$@Q#04)!ZqR^COYipnfz5<+%W;!;L12tSiPG3VXBN>G_O#s zTO42dHb+dfG$PCTaeL{nOxvwBJ;zpP`n%HnrMg~to^HDa+=g0Vq{4bbA$`_49spSwF>$=>WWjv*fDtP21;npz0~iD^ zD}DN%xi`2uke6x4QG|l1+0qVtaY2&RKHXtQTwk|QIA~S# zg}B!+nab<2q@SP@)%^PJ{3~5FHku#a0Y@zZ{NY)F|B>bz7&Rzs0|1`wW%c3!QG8bK ze!fep{uk9VN~KBQ-g(ou5RCroZc59}M`Qv7>>;W{3D<#3lDVNt7Oj+wa-M|gHIzFo z1g$$Qc4d_-73Xy=6Eai}+=%aqF73=aEbT6J^B7!p z-!nNF^V|byXln7{M?j36Kh?ms`X5s54`~90;6myYF9~i3L)~Xrhqm`lg zYq`4m$*e@-$k*@=)+0WF*rJBbFkLRqb%()C85xf22aQ{Lp<**j52o1`JI!X9KTeF` zyDLeK=Jq!DH68LaSF)z(c_*{JI+R>IVAisY-j+^elvnI6`HIo)bb4SE+2@F#MA_DK z1*m5zYMMu-q}gtL)pH}DZO`lEL>8kyqN|2mO0XvU!-hoEmx0ctErTxaB^Hj)oeuif zb^~Sq-UYz#f?X3{5HvGbyDWEQV~Q7?k(C*9f^PT z7`KD8ZLrbVc^kz1!P?KT5S6yv#Qa;F->tMK)R84p8h4Zzaiy9`f0?$F__MR<01OxfQg`A|!_d2Sew90*5u9GPRJp%kM=Y zzq9NI6ku{ce@c_c>A_2MBoP`%hdqYqA!5j;FV<->ak4L&Rgi*WKacC+`?tBE@Qzgz z!!xgHSLTK_pr z0`q0#9SUbl9MKxFN2Bz>Zl^9Yq=_81O9Ku$?6>6q-sfb~q^>C(9WBcIV&Q{rg#v7N zQ|=)qD7bHf@)@kJ{4TFPm+AqxQTFQgT)2#caN+l-4e^XyLmKx?qTAGZAMACy-!EVK z;I|7m&hZ6lD>#0mc*9aU@C*($viT|WJd75- zVhrXM!_B=~(zv{LT9M4I6Q7#c8@*@fc$8z}zJMo6?Oiokxpl48nDteYZ3uaF^Jr+p7PMM6O;sni2mY3 z{pcP_)4HHvATV_G+M#?RueOJ#rg=;c5zqccTfb(z{0k`jXq39nsBn+4Yx@jo?0!^O zhhE9+Ygki^d|m6Gcd`br6pO#~OC;!@`c>*D={xiu=vM#=XypxUo~eh^X7>G+suGdF zU6Q)^==u>RLQ|4|hlppMg{3Ib=TI20y~R4k?&lchzY)gh5hkkFll$>#|C&(5w2Jxm zSrf33hSuq*zH&z5cFWt?Fv8A7r+G$OdzW;@qX4?}xEz8{x_vaEioGz!+@^-mlo#FCnJ)2~WIGR+OM1 z9+N;$y|ga%%R_Zq&W%SKiv>Vp4{7wjh?_F<$KS3#Cn~MWKZ^s+`}ESVCK!l&{Wy~| zBBA66p&;+<3@lx-Hjn&_5)wsi(Ku=QI)_j!=xbbB=wPj#kIyk3YBJO@-wcCDyap8+ zylUOtx+pU-^57!JY-WYN&4!v37*=){ZcI7|QFI-*4PH`P?!Yg9eL-k-jh3gVvGjqL z9gNWt-yl5~&yMLm`03e&c728zLprt>HLm4%7iWB(gK04-?d8zaPQ7+3IGTlF9&44} zGdHL6fSXeXNvuu#^aNtdu#|`GB*{cwQZYtj>7~T;QcGabBVI0!-pe|ujl4-4+P@87X|K+V_|EW#?gTFo2CcZtk z)KYOb#+f#rJQZ0XSphPOLwjgAcqgV+!O0Ubn? zI#Q*^GKz|TQbG-=^o~>^kPISV14N{khzLkYkP=FOL}chCl!VZOw9tb<5|R*d7P|L- z_niNGX2x?}=fkeEBN`C?*_Un>Xk;Nh_ZNHmE?kcEBZ;l4cZOU4{#4Q+@v)5cg~G-go!}lIn&~ zaPcNXTlM!hqD&}lm&hX;JLTtxQSvlA0~D31#n^noy@yRvu3q%|Er)UmnAV|LuW|@% z_^x^2;;12|uUk=e9cXy9spnx{7z@~%=-96R@=2$En#Pi*Z$HxvGitvNA}tPEJij*H zU^qL6vW9NBUe^)Ajg*Ezp#3vD)1`**FmZX_Uk*ktD@sCVR?QR$zbC{_7V{)mu(C7B zIc`o2^(6qlPxZZSyHp_g{_K5pQ6~t#rmyzv-rS;}X5p4Wl^K;vny~{-D(nb|z*?^i zcM--|92`Z`2kP7e*G2%4AHaadh*F?Q*#thJ%{O>ognxa4V}H@0{H60UHf_%zZ=Ef? z{v79-5Mo#+lSp6cYftw8iGm#F%bg=k>&6t^L^cU-w$MpPWx>RgXYu82G_I4bm@@!R zPJcidCEqa!S%>3)tDoSO$&{cTnAs!S*Uork_^_@yc8yf8_U&08iG2g#hWvbIFGFig z)rlnA)2h>@A23Ga#UI7ZO8I9%GpB3y7c9yrEy`gDF9VND)BX_lv$zA z-<^HWpl(wgg~?d%7bzR$g)Q>%z>a^mT;^0@Sgnu4P)xwbzoZ1p$0>{Iz8ykNw#z#H zvNBq2l**a`{Idl*9=a&~7=IjmxV9VSIs(e2oM~sS2`0xI)O<2+myRtRvg@z-gwm2e zEi>qcZt7-k53k!-u8n3*wMA_?HO7-rW&QIrEr`tz?kateKf_q}G4{4-_|~mgQR1bD zK>#2NK37I~YndqnJL`4sz9sV|4JsaTQWWF9?h@eNe&x~j2bEtn`%P<74X4au(=PZ2 zWxhwQ$+PNG)|ZY6W+o6t3J1qRsS;~>B3sJ_qjn_GmruqE2Br6G0dIH?Bn4e6$sW8v z+!@}m68@Ajl;8!W&n|ZrDw}PU&F>Yboee79n8Q(tyBpBrTbeMS`7EqDki#v?2<|O$ zTzR`=$fSrswATz=wwKpnyqPu60kZgzYLWmbXYGh8h*p{O+vEzoh5R!Us^uOry*Xb> zChH^a18V~JtW|Lkp3P#R-m;rW+CTFny(<3BPeDoM>MC{O4j4e6_q+a|7<-brf{Epd z@u|#5tZTlEP=3vLK9X(w$$%AX9Nt#}>j=f=crOwPDfYY#H|moFT%E@l!Q$S1AsxkD5VQ$E7AD;p0fZhH zd}pz9gMMLA?PCi7G1hlB5Ru+Ef4_BME4XF>Ur_k75>9JIs^`R}_*Pq#2ntu>Pi_XK zvxfOMaza-zsO_)H8koeVvc)%x3w1VMg?e1{mt)izC3}CJXq;IuQ&1w~)2F*|0AM%L z7ULh9xk(f$(-~f3*gk%DV@SfYodWfg=!78 zwhmskSjjC~!nV_w4Wa+DA!6%aQ}W&ao|1{X zpYBX*1f=%=rnwyUfFzXzW`b}pbapKk=c+k%S^KsL392JvL^V$AA8n;f$ari?O;rxM z9xVf~+BXozQCL7#4Iiu(IJrx0@9@XK$4CA1mOA7$kLygL1evF?sDNum%<@vtBbEs( zu9oKv@Op`Shd0x;p)t%QCeAm@Y=22x+Y5`r_nGjo;($);Yz>OJ+=^wfw}-7b(-RKy z#M{7<^yaPmP-@Uj8)(cKD-5b9BD~BVnoE!@Sk73pDM6v0G!bS8VhGH8$y2(u+P=$v*9CDjE>=U6T#OvHZ#;>DbSxW z&he%1F-rlcpX$`^=MhKlBzU`4q9E@Izn|{&qHBnw`=xH(=pkoT?pVSJ?iwofDJnWu z2#gK2l|U8GA>JK4282u~*_Uy6X78}aVVcxZ^X;0~(=Lq(vXTU8_>0XT0LFCp?08VA zh~yTv`bxuN}eMwD%sLpjA%yUFUq_`B7Ow3JB}#@xDR zT4B$&fWxIb2Z5D>B?vzp5zPj`rC;6nZ96%MU-f2fDqOjlE!C?rz5Ic!Ro6 z$bFE``Oe-&ukPZi6${iCn*oneOW$9ZuPys^F!3IW? z%SPNtTp5i>vN?)8bp8u|sdA7|?lIi|QixGqy{u4Ky%MJ!&~bNf$``kyiJ=6za@E=U zALEw_VpeZbx=&sgJO$gzCG;w>1y}U@xh9Gutq%zlW%BC02l&vkoNaMB@20*udRie=ImbcMW z3nMs7J*kE$nxl_iB9w%eVQ5r_@K+MjXvMb{a)I_s&4YrFdDbB)muP0yx@W*~;NG$a zw~AZ~lu=Z^vF_H$&td@X)&I3U#yW63mj0_-5wk0Pee~Pjw@CxfQw~ zr>ZLK`escmWMjW3-mukB;_^B|?5myNhE0`s9CJMT!c2wcveMP4gR0G(#XEsr+{}=% z5jItK)GMfeO8YgA--iozuiNB%?J;;dyyIrGxB#ywIQZZc21H&7TF>jxc=7($W8rZJ6>x3zc7~% zx9Sadbo)*PD!xB+UuHjK?(7$({dfWJfDECnExvSTj^~k;H!GCK5xnTOJ;IVxABJaC z4@qZT9(;ZOu6DBm2OsTL$93^X_8GCy*Q4|oo#v-~8#QpSpDx6X=j}ErELJvwMO~)J86-Q)4xB_*tJ(#PNI0zESG z1|PYQXsvJB@FZ9ZsZ|e443T69txkcHC^s|H$hLuHLQ3?q59%qlgPTsS>w^;dW?p;n zx4=GiyXngBt$@l0q_Mf|C;J+;J>u0t)1>C)9UOL(K~J@-0q}>nMH|p8zLRXc#cDNm zu&hR%@zS=jN#Zmq4aLL;i?-+*h%sx*}I-_dFJ73Vdg4 zrihfx1Pi{GqxxQ7J8?5DmBz^HP0vAY0Jc4|C>r}0T;p}{O^DMHu@m4=?d=!smdMD$ zC9*hI^cI#|st`6Yb)c}7KB)Uogo$A+-ftR@a^3$PK%74)cz>AxzV>iv8zL5-fAbUm znyTN4p|h|xWcB!K5a#NUPnbVopNGj}aG<~HmN5M7@>%V(l;zSsRrwod5WMtCKq^`- zpgtY{W$SF%Ig85%j}h3)-T9ui)3!5f3eD`i8dJ$>->;>@jy`E1I5y5OxV<1NLDgc9 zA%OJD$vl#5c9^c8)XkItbpms?D}B({iD9+1e^1#MT87JMMg3&n88EjeuTe)+PR6(` z4?`=P8j9SFt{8E@TDJBSX?Op+x_n_o8)tJoJspQ!T5t?V*GLi^GPHII^|Tp!Z3?1p zlENL+Ar`&l_jS=8_rRa4SFO}UaUC47n&o@I;^Vk;&eD_{ocg_sLcBW{(3btvL9dAh zjdKa4v6wx?_xcvsOm#$-ix1^9f`MzOJ0O7|6Z*^bWnv%l`I?Q6OZWW)9U)wa)@S$d(Jc3y)_8510_s zPcl_?P#IFdu`CO%t0N)mj60fAPPX1T?6jbb)9e(!ei5;Z_-PZDE}fDul)~cb9yDzY zA?9v>5~FQrb`QI@yw_h1{f5E(xu&m75Pjqx z$lvqt%N*W*(S-U=*AlG;0!#2al;==>taoZMs0?*k1|7 z?|vQc18`!UFLF{gs=+4SL|u!eWdJ%kf@}($mUQZOGwpbu;krW#u=MSS?>LlnF)S|o zdX1^^HbY)U9;?~Srozu|{j1Mz9}hD5M~00{ecu)5e#fQ=Chz9-0NaWBzCY>WA7v@# zy_=|4z@^@kXO1F9f>!ex+KdxlbeLl(h?SYz-0=As6jV%69H!jAM8)(4I06?4=ULAiVmq=Gpm;RM~(6O?~lK(`qz(t*`jl8W|*p4jNLL=9O=r?Oi8__vW*4Z<>)|{ z?JI_Z%bXG7{(^wUF;Elr+uN+&+arzjLmF*c#n~fwX8in@E{ifs)u+6ZCzLPW9u=e|T5;Fjb*1)$Le2pL z0lrFa;pEv9pn0hkLYYF!Lj=r4BXHT7PIjtfsWcJyoqCn_Y3jW3wn%I&@(Vct5lV`z z@n3L_{CVCBZ0jK@fp`quEMS2DzTM#OGGkco(H0rA+@;wY|4ZusN;g?@d!OjGb3h>Z zk6>_Th0L&Axy;zj?q_^}`p-Fk=ZoWVib`073&wA5QD)*em_g_Wa;~#OuTzYZ%+gm<&@~`C8D!%;e|h};c^$)N zsg7G~vw+_5%PlWl(9Fh^r_b56lLNb7;YUag@uPz&otlGxfUuO5fyy6>%}>G-uS`%K zO&0)@)7g^MN}#Y(xBWx2$7^JiulK4wj&9}?V1s|il4NeT`*mBw;6)x?gU&PHt;BJs zaqaDzGx{JfqUi?f`p2(TSr5oRb}HRoUNWjy6lorB$xr#CjSSeFYPX$JZ><(l-6w96 z_o<4hnJilUkQwV<^x+%QW{#65fMXeN&a?tbBV#*x_`Mhv5SEw|sc)K$d>$(EL;0xa z!p@u1ZSE(-CGs4P$iM-6?26d7vfZF#@BX>%?e$0lZa{W}kbxO1ykdSUa znDzp4+}L*7?3N2KbAI{WYqvKSvn@#hcdPd4x7T0y!BjHOkyLi}TcB|M6ewN}5I=GX zKY{LEYIsX5FpYCHExd6}kOz_?_boDu9ULfxMU4T>_*X9+r{Z>e({Yn>R=yq}Du!mViY>|oRU>Hr#H?E&c9 znIE7PcciC6b`2i^3^ZYeSN@doDiuA?Ay?Ji=(g{yJctm>x?ps{yZeU1|EOX8{;VI* z?swkj*EJHf%w6NO_ZJ`*2t_4E1@N=mr*1LST1Z_bEzM(_`|;!F%Kj8YLV73IJa^q#GE7}`$G9OH0=9U4fv&N65{=VHX1TC*44fiPu zv_L?a650^Q2dsPE=0@95N1w+yO@4$t-1aOr{3$Stlhp`bN`Tb^u<|!yIX~c54O6lH z^cjI!WC#2ApHIF1twzY#CIhcG-ot#`q$By~m)p;mBl(n@s)Sdd+EhNZ{jH??2YzhF zZt&f(IaGJ1Jlp195%td6;rnl506H%%)+6BGhOGaUo{LYYy^;TlBI=-t{y9-HrUfV1 ztRGq=P&BubS5W@-oDz5CXG@xKA^bQ-@>?7Qzlbv~s~%|DHc)N+xy&?HWo;#Mk^tel zZGlv)7`s6Bmr|2Zu zXl>x)b(_EXas}r9H=h1^Lc=XrF_GpL-VEcipn;j=r#6IsJ@BV)`atMa0k=%^J3ln$ z7M5Hp@iD*IX*YQy=&xe@!-LMFylp=c+xJwCp93G1$q#ZrDWULJ2iLSH=)vV5%Y;ok z8{6(ns>DXyP5ouOJXbN4J@+HQ8AddJYp!1Oex~H%%i3-rl}CAaZYR6!|8uf4DVce; zXZlA@+(%*FV=_?*1TEE=I6;j($F+~0a@$_bV}EM61z3^N{aPh&e<(+T?ZO^Re|(+V z+;m~}3WeYi?0r|DO)(U3-BR!_2X36QW9XkN?yoKt|iQ9$`+C z=1L!ZeK2T0$>)GX_gj9wZ1#Cyd!k%CH)O32X!cAh#(aFw+hRlNQ4=*(_?u`|Yg=h> zQ8MWy?{V2Tb4reFO(GtrvY=;4r&wa4Y)ET5P^?;qLcdN(t`bS)7M=xCg1;-#8dY6- z5X&QO4lu(wkTkg9oPdmpdAKAvHff83fLBw)9DtIACeqpNv{y6IFC%lQTWau_dh~mbcHdQXxe9y{$gw>Zv~_Y=ic~Lc zD&4MXWuVi^6>0D-LPLDWZ2Q5kG?49a1xt6p2bJhsf0r-Dp@N~a@tgeo z5Qz$#9eWrQ;Y~2h2pqa^C8QR7W9F4Q0=_z!)XRF8i6uX=Ld7pt4r@FzSRXn&s%l-~ zvt!^)YNY#2M~dJ{*9=RyV%0Aid_VJ@)VeZc?H#jNM?uw-jt;5Y4|J}6YA|fO%%;kb zAifqK^t{Q%xXxGYduY6Hyv)DgkDX-UIo+9 zN}!#CyMq}bvI^K$jIvvBWVzR<7SRJ(yRcjnMNTDce|NN6a-B@+}dA%r#O^7~=Zpi=3L2;jm#Y()GgHWysIjwo@ zeBg+`Z9d0gu!N1$Z0nRjb+O-kFm+t<{X}e-ewJt30+n5=3gZck`;dSom5m8Lc(M>GouzGF?E7IXbYFLz$#mMBRyUh3tWcAlSU zh^2c`EnO~wu?aL%p~MSo|G_a1D$YSK=~~QL#z&)^TZ(UOE6eB8gsD-~=jlz}-$QSi#J9B<*9na#Y)nqZ=-iIcp6^zSWmAiB2{TB{ z#@aquVoK~$V35g7g2(~ftYiK-g#p?;qu=FWJ5$*IQwMoo623=otK41@DevCL< z|2LZg!c48+cB{)mY1=KxK-GVzh5yn09ni+R72_mE*+1V7qR;o+2&em5&j(fPjqY-x zG`Qv=j11-HhUBd(6{%c&CAnQ)PJ_8tev2et#$7ipS603_S8+J!v4ca(z4hd0iVs&g zIzJx~6x$!oTvNb3lXpv#*$KzsRw&I^NO^xA)hxqkFq@z0+6 z(wy1qmfWc}YMwjtF#dK1J=q%c;3X*U>zvfh&Qz7h6!DO8EcO@Y*|#vy^)+K^mBiHQ zm}FMP9T2au7`)?)Ph$?`D&#TyxSZt~U4o|4qs2j8sdhbiZvwpgS1xScs9{Ln&fWGn>X_c|DQ1!&-iKm<)7Ur0_6Y;khW4kyMJv^}>K-8L8)y**X zb1$q2ocgl2^{q_sZ~ToqTfh~}n<2v9y#ZheO!NBexSX2J($&V4W}iB2 zA*$}KSm;D`N8ba$>(UiF20D$zcZ+z&vBiUb@v!YRBor94M+EjNQGomD&VODb-o3;< zHA$-+TMPRdzaTC#_!z;`F5*Vt=}#2e++=~sH_H+;*2HEC+7;r$N8oD8?nC`>#5{HM zVe`r3TDVf)3Adcyjzr*H(`&?8pyYvoG&zpA35c3#!~5y2Z>~gA)i}byG0nV16UXPkKE}P%d*|e#rVQ=eo5tO)wHbsXQCianR zqv-C9OXp;ZHW}9HHdT?*f&T~K9rcpLIH4!Lrsyu;;)S`IBZ0vRX3iJ=^X#y`ACu|a z=g_0FFHgTP-Lv#-WKYA^CT+g|D|?+IH1XM!UuX3Vd~CJ0ECgLvepE4YR`Dx~g|B{V zEbW}!Be~>KacbqYq~6rWNns{7i7_Wli_z9v(t;|6Hr;mVuF8Q2yTr!$^zPRk&PpG4 zUNOFUeMt742p>#UW9hf*Dr2`4CR?^gI(Nth7F+;F)qm-rk21J40S6lAgwFbbK_6fN z=<4TfbfE;rvbPw@kiSu+PYqWxKqA%%;AQOsV8G0W%@f$G^*U2Ud#~Q_=!nNy`$W_- zyY~8geyDp3H^z+0_B+XU!Og#qAG9^XpQeu31F9Si*0X?fUgNE1Hd&j+xS97&Eyp(r zu!qG|Ifn4Mc57ZJlStB;I(Y!*nll~?V*Y}-Z{J3U3c=MeMocTn0SmQVJq9(@iujX3 z17Z1Fh!A>?wayBQu;$KxyvDWkcsY?;azNk~c5f+-#OZ{bTO!2A#x4NMB67)YS>$;5 z)C)+bjY&JN!eQ~L<&h>h*y%FwUs<^6ZM&ZNEdv-JFpZ#cgtvXRihB6<-Lf*t+giVO?*? zX*d?jr#&KGn?AF((kNVS7=Q0?*0+2rkh}BpWa5D{+fz)zU*^5%DzQ9eL;mye1<4mb zzzqi-3*{2ss{3ctva{|$U|ME_Y2gdf6nIYk=x2WJiNd*m@me=)6z8Tg2+=<6x z>EtLl-^hYK=vGNUIj^jtDPk+^SZ$^|R|qiNEcw2c8E|VqwBU5JFjU?PIupdOfmp2C zBse^yyRUWHe!Qf7f(8f|new|JG)#8v#fw&ffjBxnt9=@Yskv8wWp4U2!mH{4Z0H@4 zF1s;^qc+NW3!swoD9+x@Kl<=}QAY$f36l$K$Pr>MxI_69b;`QpKQzEEh=BL7eGU%f5*`|EF4W za0|8bwg%kttTVrPY_TANm}`AE@as`5%{=&TZ;?c%zQc=7wXd-%wZ#c%+Iyv|K)+PI z_q7t&AgwL30*4)R)yuxL)@K#P2Gf^eaV9A1^8|&7OntpB~8W1MLcaIi4ro{cma3|HVq74a$Lk&&_ob ze|JtEZ>=tP^;Xz<4SI*uLiV`>R-@jk!HAOn>VpQ=q4v_dlaa;X&V|JSK|maW--~IA z7MmzOJp>XyMd&6GcLhI-JlftXyJ3;&tH?&(STU9f0k&Q?^5=)8g)+t<k zKBL9F#%gQkWL7!@Zb%`%P*9A zD9L(??WpVZg~vPfK6_+UzupJyN>d|8B@@8A-ZRG3hfM9{AgxNu@DUw8 z-T6`~#;-0?esZT5rDA@b%$SD2tcx2yCkyuz@1a5ikN>javNp|TT`?#G>?NSR#hVLq zW}-nBvp&Y`^%_%9+RzZ%^{hw{&SpMYr~6m`1Zhb!U%)+0GXZmQlJ!y^0-g}*RM|=F z6er!IMU(heV%M|{sqHds|D{Gm|V7dfFO(y@)No{xX{hrX2Sd|Z; z^?9s?jP|e-dhDTY%XD3heP(7bw#5{pJ+aZWRSnkl46=zQTnP2Hj0fiz6uIo(X;}X5 z<7iq+j2fTFIl$nfH!d6Jd}m|kuvs3g;oa%2@n;qRI=@c$6P#L6aBe955HeBJwbC#( zwIDLT9&c=*rApBEteF%O(!5^^Qr&19!(PeLX+6HFHPO8*^ahD%7*>P)O~x1>AW)OE z1)=%QmFDC>@9>bnXl0h;O+4zm1I?p*1Y0U{~}{a zl|kkc#nANF+I8%hyQPDyETYPFVeS^<$MC-vTe#PzH2e8ff43)NdSY=Ko zUF{0AxChjV*NRB*fA0d&8uqNa=KQ>ftt|UtN4&s(m=0~_OixaKw#N!B+gJ~+%ZL)A zmN!(q%NhU%tu}&7>@#`wG0N+MrCk~-In6oYAvrgYKVd3)gI%uaB3JWwT!!$N;d`Dt z2`~3_=SSXC{&@{(PFo`~*c%6YV#m}@oAb#FZ*~+-vB(J6*CfY!ymQO>;Lbs@1@;k0 za3wGtSp__*;?-W7mrkf~?$bYBBkk4GkL|239aXhr-l~u>`s&-U(VE{D>Hzty6Re%p z%)-!=JnBpDh>toE*HSZs3jEvyVHKY03^z%w3_STx-Joj*7K*T=nB~=_b#-bp6tt0S#Ia^ivW8##V_C*c`SZMTx08}y(wZD!&VA}6{c^(+^Dr;AVthO}GVqIpb zV6#0MCKm>D3UBTK`J;lH#j^T%tbp~%*3Roi1kMqE!cpa?psmELdbS!X;{+efl{OA| zEpX+`<5h>MO#S+)1wgqxvs9(y(o_3zK6c0Ie%RLB#oDdP>m;Ci z&RKXdlqp3~W|AovRlbn0;_76MCX|fyabRjaD)G9~?3Wnpk z!-H{65S}VMZg#$trNy0a^Ks#FBgD}d3xt?xDDm1tyoXyo-dZm#s*5xoq_UXg2NaJ0 zS7J?wrQ?cmk}3jNO3jPcnNM;eITSK{O@AYrTs0NG*E0{}GU-s!!xW;Q0H=sfie5j!}+4asN;a z`Cic11X8GX@X)AT-<7e4^>{-aj*(^H?40d8Fa+IQTQ_43Odu;)DnkN%D_;ia$RXAg zK?^-A!O9y!?a^jQ2LhTP;V`L;z6qd?$jORn>xRSY@ajZ+<$9(W9 zVgPYEspYOb!L1WJSrh0f;goJ^E9)-K>g)yytCs~qHB~68!kYwg7Vy|t9IGTL#o`yP zE59$m-34tioPt(*@}^QKHitRdeXc~|)c!42njeBK@w5;%ds@W}wIowDFRto*4(pT9 zrnnwywW*@zN4=O2pru1JXX4kAqq!TQYZQeQTpsJUaDqyK83SGm;5=$vkz9loXOr%Y zt7J6`46XA!YWUU3RtN{6KIMx)h5v0K82?Wp`1QXF!T*3r0K!A{4)RTo-Z4Wz0ZEmR zcL1_xVqRzx2+^@lI!8M6WtVZ&n*)@A{d&NC&Qge=yVu7;parREJi=`H;yEi1Ce9&R zWxOZ%sscQf=Sn4j0*YKwwjZLb*1MeMMQ#ng?>TSL5D{Fu2Za7*zyWo@++t`KzxHXM zS+D&gVo_e?R}J&Jx3?qg`;v$>EQ^Uk&4}*i)N-5aC}Pp88@sPAGI`< zA|oHM_-ecM&Lmb&Q1Ye3sWQ?-irC|+rH7JCUqH4KsekSGXFVg+k#&{d!VzVTope*P zC3K^Q)QGxixwMUA-~2On^(9|xwjCW|#4YzVh7k0_%O2`e@A92vJybU2wSv|lt811?)_FH(=&AlLhVaTdg!{ec*mnYL%MMc|f<)MY%;h}ZtM*ufs zHm7i9t*cu0w$S+b%OOf{&GM&9m&!2Gz}0V+(>3B-R!6q{ap5YiCJd4Xw+OgxYc?>7 zM}(%0&LzUr(xGwAG{;c6K>ey7v)!Rfw7`%LtGg=Qt0T|TlEJoUN-)1i8IfDyOf6kI z=(BktDhF785F?UFgMOWgl-peI8K>wK*PgCpL4ch+jD9SY_C(MIrcD#E-(}A`HKe%G zuTV~Wo*f)JMD>i1%)5~>w9vAqz>!%}BXkkT3l03*mzJxkhq+`8;Y-B%4&}e)0IL5J z$*zAH$p_S96iqGWChtvWTS$P4uf;;u+cyU_0DXYag!Ciruj{<|>dGPS z1lD3ME`GYVLU@eGKbkms*?Of<`qvrFMNzv*W?MRtBMvi^J;0gxNKj6I~%hyXM-}b zY&;;a=`iopWtq}9w1<*;9@)jUz1K+}Rz-~G$?p&4h8u3IyaGG)-ST+2)aqpOdMi8< zd1u9U0jZkg21&qN&&xO`80eZ&kmz*z?U~02%dk60_nT0tK7gdGWJYcfR(4h%^JDr< z7wLIJdxa=@pYc5hnI@qfA!N@gy3Zh-texHPCOCa*P3ShoLM|un zDR>w9d5H#TvTym7B}&-YvF{RB36Gl(7@+O~5IdZy-{m~iWOlD_ngC)=`GPXBQY@Xh z(h6;P(W%0PYvDn{v!PR8ogCH&p1BZZ{5!7l4;jk4kIEu<7A}(#fnqI_RyUQH#vRW# z>?^pR`0@_RmXCU{DtH5ENVqB6@yVgxo86&(a+T~)<=XJi zqr8@N3c@R;cE_6zh?ePKq9ZrnAr!R1i6jOqjoR8+$202^q{4BRI=rbhDn~40yfQ@) zT2)5IkiHA!40F_rzE4Bvqi;!{$n^W%;{)KVTW^5iZ}2=R!_ZmzfHR{aLJGHW6VkDq z7(t*^oUS^Y-NX*cK3zY}2ea(9+Do|ip6ASDDmcsiQ9!Hsac7)hW|frOIadVH^z=sl zjzvL8&P|9sWcbanZrex;gc#lyu!7B&GFFL*TY2sBlJs~bzYD02dNd{`UQ4Xc3Zt*Mj%0HF* zyB&ge{KBrxe0-;Hc3UALQuCK$i_?^zk9~Hiq4#Jn9J|N##b%QkL0Gj4v1H((;{A}; zoo0krm>V)MxVXH}#V1YmR$)2=lVcZ>KHa<+PANaeRW#bHy@Sk;w!yc!v1i&>wgMn%ih zkYPF=7`)hmFCRwEy1WISk4Mjy)`=>(pZ3D8VMn0cY)Q>goF&p&Cd&2KmIAYxqnm^_ zN0vqR?@8eCTHm%^I)~Eaek~L*B%W64sBD2y4%52UNozyC{2NG9*ARM7tB!R+!WI7* zf9s!glipW!`v7}C`FQ4g3ENWwoDcJa@g?fycv2!s6#Uh=!DJN5f7`U2_|q2^Gx`B= zGO+Ax({4vrh%{`zbP1pZUkl$KMpho& z&3fI;*8y^A%;`zm`)-B>!SmXc`w;*~HqP9v7&WR7E*$w#>=G1?V1<5Q6jXeo{e*m-yXQtsn^W2w=_f?qvf&3k@30_p%PQ`$fue*ZXJ_#KX3pp)+gF$L{X^w6aD=lNI^8yJxzBz#pkVyvc6h0L+Hne4g&IV7|^6&dQ;CcNoB zR}kbnYhpFakkiP5c{x8dFMI;~3b)NWI45qpSi@LWbe|d^P|TZq$*Oe>wIOegiEZei z1F-8<+l35;dy^@c3bMoYnotZV%i2~n>!1kS_IDj74hw`Nu z>Tp-dI_vpa1|D*dLmzXjnzV;0zGItApTXa=bZ&g$sJvBu;LI(5o4U-8uDWOM$c61H zjt5paH=1R$rz_{iLVG-u%2}z@&ft}rVhvbkJL~>ig zmRVh&4ru7^Rj1(FY>RWl^c=pa%%7`@(iY3ig5S!Q87gm7_Z=L@NS5=rEKVTA=L?Q_ z)^sN3+x4u;`Q`2q`?>mrYS-n1KhISgYvd_Xy=tadPGGEj`ylp zm87Y5@amH|tfKvF?W_)R>>@LBF9&r&VsBo7{zA~F)o+`5bYy>j)Sj%3`u zu+7!>bpq6}J$XU=;Oq&@Am%7TXBDBusRNY-wNe~_^J}?eoaBq|KYKfMo_d;ZV7ZQj z(s@~g3koKy2H-q^=LfqM*TjGz#lTI?{LGJqqk+3v7{hfUf+q z`X};HZENREWxEl@5e{0(m8&tyN)&K1z z`lfvpyVfbSk+OU znD7T(3rFZdCf43W%7u7?3kzHhIbVB>7(^Lg{TQpY_40PAVkbVQzBG&@AGeD^Zdb^t z0XoWd1fbUyv<-r!R<%X?yCyxcbJs#{7M#9_91%jcI2CuYA}WAUG_s}qmpM)OpWk~B z_;!V^CW;?Q~(ScGg!!!i3oO z*{^4-TC8}v-VhopjO}MAU~ZR3Dn!-%RJ`XsnhI=cf-le&7lQA@3U7&OeKfSB4TXdwDx<}I(ZpOTP*E|~^oTXAk?^>^5MNC!sJF~A zvdVo105k;~De8giL5_P2wU7JP)&>gpEOfuR-;nFU2`rAJP-{J&@Ef6kT^)6Fpe1U5 zlg~fbVDh5ywhJPNqcx&6;oAB$?0TWv0;B#p56D4hm!f&@pm*K_MmX9luYb)Mg~iYFi5f_NbaH7QTR5^1 zzL##ZJf3Gm$aBYxI@fz2Q&DYU-&Qn`Wmn^ZDbO#o$w!kGRTU^}K+4Ofg-^>nTBXdZqQ>)I4{QN&OyEq1%H%hWigt$$WWlMl`jgJ>6yN*ClT1E(cm!9Qnnp)-*4dV$q^Dy1I3Wkwz^^qD`hkW}?N zWoVI8vy}?l;y>z+>Ok_S+J-Rs%+PD~1fyk*W+6TUZ(^AqHn7~a7*?Tt#@$q#ra=o{ z^SFBy8GuAYIm-ZBlw~-sE5=qclz!@z_}@V}=P>_~j+^|AbUgbl69D?|B{9!BS9$#5 zzsm=#+M%Veu4VR7-p@dXJ?5jA?6%<;wNx3`aL|B1-2B6ZGDRDmNlWp<+j%==`8?;1 z)&QVNzj8z#;0w-TI#}J8L#GSeyl(Ttu$<+n-hB2ntcoIzvywJLy)j!+66Ps)>v3pe zj6wr(wd^YZI>X}x2s0_RIC>95Z&&o*MYGbMA1bFi*x7q_euyo6liMz)YX#^;;()nI z+9MxW4$w}ma31ai5f4OOEnZHu)EIt@4AwSYu4*-iD+NHzr-3XBB!?NR?On5tr8wBk zL#_sgN4~rr5vjb-pD2*@robqbO7Wdtu>wd9!JHlMdL(#mcHY=0PkS&L4zw`ks@(`dq8XIxT4Sr8*;(%&2m!i>3hhhk8H}W=Z}|* zwxfyDy=m#gWAMavoj!(s8qY>~@4LviQlqLP5jCWC6_drU&MO~f$&j8k<)?uU>asan zwjnh1tk2?5jMnBUp{bp9S;zTcGX->EZSG7-V4G;*C?#;PceZCTt64aaK=tqdB%l}M z>|N{Y0e;Aw#|CzD)(_`3R?GD1xT(*X1c+TZike$`e_+t#c#TVh3jkKOvI1~-Rc)^k zYz7Nk<9Qf)wvZe@1daf5hwi#&0W_&X=SDtTX&AV?a{2>Hf7271(KGG@s2JDD)kF6e zKe%xi)m{_h=Y!ARW6OR$cFGkzM!&FJAyO4FvI+w+!1Mth*7qg|juO{r2NqM#z8AiYGTcd3z*prBwuP-#*l zT{_ZRKrDa+2)%_6kP;w3kQPY&4$iIj_&m>y!}I-k-*>J1k881noSbv^+2z`MUz-7_ z*T&bYYhOi+Rndo7k*NVlZDpm4vC=N2Xn4u+Ww3WYncJ(A^u4tCeIX&)K!I|h7$+h{ zs+#;FQ{5sjF#wCArp~?NRrh_EWYDqhnd<^J3%8LBiPUb|$&Dy{qwFDub3IH)n76DtDSL_hEHadyB&hl|Bit z%7dykF2nd2`g+>IaiB7-!fNn{{PcXFMfTG9j8I?)?B}I?=vLl2s>O+L)=zQbwt48Q z(5{ui>ybT=EL`R}y#;-i?Am>OR-cKa2C)^0xjbRvKri4pVdkKE^PHN)fV0OJWC-<(5&s*mSIJaW%KJ>W{drii_RCoYfQn57mytAtgD2%}BDxTD47_U0n&XRFqfS*t1Y}VL!h!V7$q5q)MwYuzgcX;V``#Or;YzY@{bZVI)p?p{FBj_ zAuC;rnQyz+OJyv=6fUp8I91TmA_QN|ZL~M^l&#^3v9z1bSGY>&Ij55?1}vm@tQozQ zbqDI&K0{nV=t-ws4 ztKG7%T;$qsXO-g9G4n|l@Ep~50NGFztS`MfuggALlqAei<(k8Be0o&av=5yX=~Xed zL(sm<*9<~jyZ(e7rNVDuVibs+PL6aX^84P0J~rKqv9jBDq-#9AWWef$mRcW*VxF)N zb9#9KxxF*o4MDs z@6CW#5|LR-6ae2*L#8*i66WJv{GSz>cr3XGpN&~Tr64~BlX#IUYfsJRL0idK)KL-v zW@mMk{icS=!2qNJ!o!YwO>SqziW!AsXOICduEwb=CxI!}j(-jcm)3f3)M?|$;Q3JD z#uxQOvKaHD+IHYPou=U#TAM2=Lk?)+)hR^=J&a+E0zBdi^P`wXVI*RTJT1uHr8u{Z z48L98m8+}RZKj5#@Mdd41Px3FWC$<|P_f_NVI?^`A`Fkb&7M6`)J3&ppZ?U?zSk1e zu0=YP^}l1jQ3ai#y1gGa|I95h^!}bhMA9#jmCqJki#8zGHTqrKwLR{eY|5szdJZ>HIwG?OCC)Jyd8p&Xq5uhqP zqSxGCg9nLt;W69SB-rzdUey)2&WI+?JJbZRl3Yqy8wDn>fejNa6>0X4gnJWl96F13=j+xhQoZn=!tKLp;xHxHoKlRGOCo-H5dwIp z0kYW)4P&IJzIR|sYi$HhW2I42m(6ICh~PPghQJ!Ltq7OozMY~b^Jy|dOXex{r* ztHWGP@goC$+u|><{75`5M{iFqobl1Xtq!lL?;wLhpcJu5IoRgjjGNs8(Ys#U>@^n$ z7TBet+~AWh6F{hz{O@-1~HU8OZm-02M^`p>pfAMB!qydS}&PNHYz z&DGBHwq`MyVTV!F+u!*H{ML z*9}%tC$qgj`$?dhN zrE{G(f>@NIGG3S+JI0xikrS3O!mDiXJ^-g!c9FOK>{DZ9N32unWyO7PH&ANZc#w6L z7dI~GVAarWarCh`u@V%c={Cr`?5|Xl5Ioy{V4@|D$7(%2>&h;|pb|-z8T@6-7$@&O zUeo<~_T48`nkzol^ELsV&&lRqEwQjfo8FZW`il?kX+tS+H{~mo#mq&clZ7eSAT1n zEa*?mzA?Rc^}HI<#sn1W8It^0GxI01IcGh%t)1G3p~sFXH^F;ke$NwIGWx=Lh}lQ% zOq(GO!fmDo-0w20+4;MAgA1DW(EOI6E@+|XsOul#Yg3tazxFjo7zu(B(ac|q5C-Pa zmBC8_L-+>|ORT8X=W2PEsD;q{R=3q8R!h`e+dUxFHVhTbE6#>{WV;;VdfZ+NaWOY(^t=8ZF@Ke(}L$Qogbx-pO z(-^J+rIX~lECv}WR~J~fYGE9QMF#Er3<4`sT=pNaPmVlWa)JO_C&tTDIKo?Gag9MM zSkOXwDX?Cwb43iZmrk?)c#R70#K$7wl^32)chhbg3sJN4Yd!IS$kplhF4fEC1JcF` zOYihs=p_W*FH%yg-bvYW*$4`&ASGDv=M@isVUMhW6gHW;eIwfWqwjZps3b?d?E_IK zo_b@@oR)b4TFK|;Zg5-#`Pie>ql~35R@V91B7KKv9hKHW z1ko0%`Azw1x2vh7vpG90fbgENh`92f^zZBV|qLhWcL1a=vy$v#%bHxj5#VXjGJF{W{ z8{uM0wm(+rfML`~+8mp)Qa4Vzo$|CJ+ra+LRS2X%%BPIjGHBf0+1&ITIDjmkC3bqo zKqxaLX2gA37SRIGZZL#lEASY^$MX(i!IaKpoF3Py*oK|qJ2-nDdo73M zm)pNo0?B-pt}J9pxlo@NXje%6!kuUA6s*61BSV^u2%hu7lG$)$|J*YJGXbrPqU59z zQ_UKadEG#0>A9lNG|f(j`E1cT%p>vJSqB|*{j%!%3QLawaNFuOErFbVw!lZsxWWsG z^3pWZA0)>ZLuax|>}5=yDBjN4X+CI-tjw9_z&`@&wRkReQESAm zmnO8%2Dx>gHAFxCtNdAiP&%i>^U{*+YEDN9?NgzyYF z^j2t3OO*s>OTzU>b*-WkO2Z-6%&A&l=gM*aB+XoboP+P1t$}Y9w&H<*?&?HR_FBdl z4G2%a6kE;o|NRxZ8|#}x;5@M!L!>zLyne3FDT92B^g+dF`4<&`&d^`zO0Qj}#0lEB z3-!uGFgiqBQfo5Qcg{-0xs+arz$HNsjjE;0A)s=#=!KXL_y5=>Cims5+ zp(d~2h>m1B_9iEN+zz|K$zEsNX&+^5CtB3z$(g~V3sAY1<0j#y*)g7b?nZai6eBjo91WNp?|WrS`m%w}`sCroq- zjX30)sV(^&tq`imIJ?2AaJNHn6d9}FMVAs~Z8ki?ASH!~jvR3Te9VW})e~ezsFdn~BD^*z1j0p{r zWi@M8D}|||dhvb|oHV}~2DbBrrgdlrGG@h0d|4AhwDFpcL`hYR`8gv~G%HK@vX@9` z>%JFGM5wCc6i(?~yo!&`9&{L3hfjl=m*KJwJ&se|BztD;(mD1HFSmo0qoK-!!95); z=sSDZ1)|2QL_i(r<|lhiWwhY6#bDpu(buP^=Bnl<14G+4PiLr(+d%KgR4T3-GIy(r zAgmpHv>ggg`!MfQ65T$(>8PTfPg1*8hZ(L50aesP7(JezeV)zh&14d_E9px!z9lM4 zxmWA7c_CQYMU-LUFu@>i!A62iwN&kCz68bt%jD$iWtL14D0RN|pK6Evgsqa2%-Q|t zsz<%rI|IUM>2n$tWQN8Vn9BY9W?ph$kZFnC$7}S2Fn!;i9iOz7mILB%)1JF6{l-fD zvRrjl?e#c1S~L44t=T<)m3g#{*ohwiA*=MVJf?a#8^Q~;@=EVa$hJF4dl?=}ai%B$ zERt-RahvZix?H|fbnZB|-WFz#_{t-q^8=4a>hJ%i<6Ey;Zd*LJ`6AEIHuG4s%14TQv$^aI+i6r=+Y9|<5z;%6_R=mrNot4Qj_xK0;NOXI zB06@X&GtG+7|Zmo;G{r)DJ1R)Xw~51Z(`Mz@yI$Tf|_f#9zlU75|dp@H$XeV-lIcA zLMuon^wM9ljqK!+>)tMbUhX*V!5SDISJk2a5X!}2}ryT|E$9DbEY2^ zZf}p;Md!0mt80$JkYk2OpwK~W4nEx8HN{gY=#7iaoPLqH`8g{H?~Q7z7L+AG&Y+e{ z`hMp6r>yMp;P%RWXIlru=95*G$!1-XOM9(As;LZTR7Qa-4n0lj&aQR8Ic3{9oh{4< zyHCyG+lxzBDQ^SOP0kaOA%<32Gpf`zCHNs@!rB2;GDePGQE3o#9$MPjPL|CZ_odq> zaoXjArOI$h*S!W2yz3>Ek7GdTO%jKZaisR_s}Tnj$8s1;vKHy-BN&eKzP;2R{G>id|=QaUA?xp!Y$%4X8TkNY;`Qe9``FIH8Q4%vm9w zBR^CB#*L|_9pTnuVumIec1qx9(9io4@>#e0;S%l+O11sJ4**w>~()0Dy z9#M=HOKQ)64k6)M<`u3e-tn{U?df%ipmdU~siQ%5xO>X@_*KyQQ3OSbXIk}o;sGe^ zx2pTzU?5rVRQH9=5=lrK9e zJ~Q0;c!^&Hi(9c(&|?o|kTt->r}w+mEZ1Dm)43RRTAN+ne)WVg|CI(2P0i1X+zHzF zV3wyQlrtTmK4}5T#Xce9(k-++Bx`r)aoN*EE}tCEQzp$RXj4ywTO*A=bRuN z;&awmNQdqikI*Mx|3zVu*5-~{`BWCR6Crz+oB<3QZOs6AL7Z_HQVcRG#=FtX0p_PZqrE@uj?_VHXTBwa^f8DCKRs8FdL64eHYElBT8$hBgKYgde>JDT-}{qmEH z$N4?SEO&`e_IxGhc=Pw<9Bu+;p{QJw4kHtfDdYFuyOAtx5XRV(>t`^b<5f-`1CMHU znAgyAp+*5ZURNgcHy^H~<;&*YPLFJrj0;jt^qV*jy>>b9?7nl&Gtskae_$9;j35G_ zFoT7?beaWwM*?n4dL8%Ie864uz8srT&cx4B?eFF~H0UF|jcN~%BG&->%=tXy&+Dx& z*$<4mJMx)}3D7LFDu>l`fw7~ApIC*P2DrQXi@%b@yOLR>q0PF{IEMSada){dw=vUV+R zhozBm4Z!)2+;Ps%OAD$g)(Gy~aX0PC!}thrkueiLg3V{L)t#Uw*qQqsZP6PdmE znVe4e$J#cS$;4@jY4k0XGEal{EDxR!JGbJC3vECm`x4uV*Gvxv&!(VmbYgZFuY8YQ z1>kO514$phyQU2fWpk}FNv@?Qf{h<7w>Kao4WRzm;H(sax}Zz-7nZ{Jr6y?a7M@Q& zo|;iUp#m6!>mCpxOxQ2Cck*_HKjsvvzhcsZd7}S2eDaNr-`m`?4@zz|FogL-Sbkmh z^mK&PbmhY$a&)sNAX8R(Uis&llS$!J=QiNHq~m#?YC|r{f>EeC`RB9Ek^R;169tHw z_TWp=PmC|kdiEagHh`VW=7)BaB*Y^wDdQ!KFB&rMo{NU|;^nBK9v~}iBFVrNpTfBG z7y`N*uK^W!a@_(bJL(En%T@Stw+44EKQ~8<8K;`B7b;Dy|V8NTDbJLo@c ze>t=1LG#V?67v7ORMam}>z=OeH&$Ih5w}A*j78WS#v~0S=`~3#oe8`k<B4so-}4|M-bjz}@n@?##cjy3)DnKvv8)S?oM56({~33b2msS-+(>%a3}RCyEz zVRS8uiyF!8r|e9S2mJ%<)J>9jRO{9`88MG;&E*m?H$9c*5^=M_>roZVDgE~zF?G+W z>B{0*=b{JMn=0LHSG!dBb)!%B?s%lZ=XIuaxG#je7Q`<*$!H6ga@C7%*I9kL#3J+R z*Bi|;ZzEY)C2V_@WioiC_YB1J#!t_aH^UQL%Ew>yW}8%Xy(o`?qbamS38omnT<9(V z107Z-b{QFX;p|x3os6&mY(JjTaAir8-C=9Fqn1eDkO!K7PO9nyEK{#29ol66z9NbA z*m3k6*&n>Zj?oU)A-}$oXfDUKp>^EV00n9=+KG=?XyF|K; zqaW#`TlNKL-pcEShfH>it{4}eO%W$E0OLyKTNexYg@VnqyRnD1`++9;g#lg{^rJbs zBTK=X+tF<&GZ=M^b{Tvm1XA{qzz&r}53;o2iVFTgMeYxIa+J+88mla~=|bhbEOW^AONJw5F*@>rT3;ZQPan(MP{U-)q%E@H~}bni>P zpY{%pa~<>8h0YWGDs*Lku1_9kzjZ$NUku5A05{hl0*3+X?6w01g|p^L716?&9CXcE^(Mic2sOO^j&(6?_Kuk2`2w z0mFGZd>ZndJF4hhtMyy{{mh>|F|at}`1iyK;~y{nJ#P8C1^&aT_oH3@4^`xkQSg7L zB7bz;{%@d)gwU1DqACnbCclz%j;Cu!eB%Q6@w@(Dtoe+oNsA82{{ zN>_aAQ`rB^-M`NLhxCs*s2ME=MxEx10fZ$Cj@u9u=I#qzhLs+fG#c^paX@$NSgx9| z6u2Ft`IYT@{BJ8fDNgmG>zG1Uu9@bEb#~^;4RMklcqU!|st00J7CABv-vs0s*!=UA zEmokCHgkS}n%pz<8p{5ELFEf|pZGKbAzWCt*8}uFJX5TD5m8?`lhJ=${UNjN)BGRd zOn#lF8QmmN+Ztgt$C(d>nPpzRncLUfx)FleU+(R>_wh+|V}7T9wAX{>kh8rWxeSYRmk-^u=4k_a5fQ$MTuLPiQ9p3w6 zweV<=F_0sgtVNW6i>E{H3y_7cuR&~RC5P|q%u4S8rTKH<-Txkctjk}olgOdZ((e~I^DMBk96BJy!Io>=Ox`I90eI&R``-FuK#N){vu}lXf&+1&1QB$ zCQaw#XY1^*#Mdi{oQi-GxKIpm0v8oO`ftW9~6+NHwFx#%U_kU(}>fv_EQa){}&CA)(~rVy=zG~Q(3Y?HpLonw^Kbc700#PQtQ zc>Qzst35yZynowFIC*N5-FVZ?>3v`@Pvj&;xlS9SIR8Laxn<82KZ{iQ!M-o6ul7K< zM*ghF_D_D;LAv z#_AHS61}<_ljN1%2fWMTor7Ar!IPCb9jon^q@`W-;Shemo&Pr0T>_Csgw{`uBY|nJ zUmyE|czy0~PvOs|2^`+!&?Xy9V&vxFtkl;A65R5)E?nV=^BNM%h)Z%00!n*l=)_*A zZD*(Gn?^bH0l{}8DGh0vQ^h;^O%DnQeSAb#6WV~TynBYc&(_iL#bLc}9aB)Y!k*bY zb+fazYTQH0qjfh>k9F;SpV^#zVukjt)nA~hy;iv;_ZtG`Pi>xKKol^_oOBXG*nm1^M=KWW_i@$xD&-cEJejI}+t z=s+tCJ^VueGFY_~Nl~3GZSrcsmtoOZeDqx&1qB1l=9HgidhZo{cG59DlZ}1;oKRa|-w}n*=rzf?pLOre~1! zOVp-UO}{=Ot8Y-+cc1pER=?6Mx24E$+7W5zwANge<{75!$rpf3lq3Clog_?(b=DAg z$iFcz`$=s`RpQj%2B|#pS$=UaP(%lKUQJ_CBlf20muL4)U(vu*`>}HiSc09T* zhp!LP9ix(a#%2sRQ_@$aeRnvc0sYPDw8sbrq*pj-C+;A<0q=RrMCXx|tVE0{{%WgVLi9AOVBYGW_rEZ3x!b3t>?CbOP=X^A4gN({c=TaG-yCk#CIrPbB5 zaf#NC&exo(oK|b=f~l!lJfrlCk-5WvfxzbYZnk}pyReZ1Zu8v4v(Uw7Oj1DwMkace z5NhC>sYUv}b6M3Uy^oL(gQH+?2H*C!Q|10S$&IP19p~;=o7}73hH(PlKIvtIM;Wawnd*FJ@VgMC z0O#Xxy6zu26h4oEX?zFoT=#V9 zJ%2bJfgA^)hipvTHvM*hq<=u-ojdOq2>e*{>eRKQZU5*b{&j4xGJuvA9hB1oroq{- z$ny^qKCE`(NXkZBv@@^HY-z0GY_NtDOxaz&_c?h;$Jq4H<|mp~SMhG#J^5$VW0>_Tf+y18kEM9I8vEB?z(jWE zT|u!2AO)Pi$3F-Cw>9t*E#ckf{eGYkCQJUss^f-SwQZ?~Pa@u4jnS9CWX&#cI8w-J zYG>@bV-MnCnj>FYi3hhKe9V#G{PmRGko#gsCo5(eclCQX`hQ-$x*=EX8T8&%p!%oJ zNqeobwg-4E>p%1^%ac7GL0ngj+yA{M_%@zZ8tz_8^IXj8zRZmTj|$K;Hou~7jTMWt z-`%G(l)H#Oa})X;xX|?P?#Yf3wKgFfvcF2bM*tUhhq9pP~H*DBQ)yj%_4;1VV`E`8kQy_l+*TCD9lz0??-JmpG}fC;sBgxZjq*~)n=GS_QpUIrAS^3)V(~E1$c%2 z->%`ZzvX7T=^v-28PC}d34?bkd1lH_a|4Ra=!tV z`z$TERCq{mBS_FMn+RoZAG{a-#6QY4F@8`%gcKDWN16x(o@(`te}8qqZ6cvF&osP5 zwX5Ye^4)*kQW^Xfx0mbFyQICk{x4e_tI`mi%9d~S>$Ks{yvD`8pmDfQp-S6+Z>~G_ z95I>DUTU0g;@dPjARE?2!LO9MC zgK)+c)%x80YH4@SB9F-DHWK~-TtR1&oLY$RlLw=W-=OnWoNlJ6kp15 z?T6fWUF};hlc{W_vgn+Nv*h6asvfA{LY#%=gD0^z8^t?++ho^DiOH`v3A^)~H=^;w za0airN7!=WOa-pWY9-Mr0jGg3k`G&XN({d)AQv>d7gtsWFRo3b);IPB&v)kLH>O)q z;bp2D+YZNXFI8+Ks-2nuu;TO$SUIu{u`-8Kw7k9fiEG0^Ur#^E#G19p(~IOVD&mlh z%GX|sT~W%+#nvl(<&&by2z_$uDk}F!pL#CjXD8ZXa@&8(0QzT1eigTaHcrE)0x2bA zpf!fX2ywUWo_VEKr!Cj@68#3w>S|$&GO@unM3!abD(Wwtc8<5LN_EHY2UwQ5e1TEo zP|?vT{nu*WUB(}Aw?8lS{Z}RrZjyVNAQBxGAGO#yahMdfsCDYhfi_~}^TUM4j~~Zs z`z-G;8XQjc&0!yO_jM$D{X$lnE6*)$cj#Yn9iQ_YuqXCtgpJleD%Df~@)Y)6o?bXV z*w|AMIJE9#c#}gM+zq{(<1E{@xa~kxj;NCjHM}sRp((S4^1ZT^6&AA5tLV=I z11dEr7e{DSs^nKlc{w9)?1_lL#Y*<+|8c8-bs9b|-)?3jMRllrlvnor4p}>~KvI5H zCo4r;_;StdH=anWYAA&lYnhC@T9nh18~CTfTh?)GlTcE$po_PVk6r7AKv188x7ZAG zaa|FO@gvW^0oP~t(>&gX0!fp~%R`N?hz}wzj$&8ZWt}258erH4epsruhv?V6O3dMp zVP?&^P|}1yLk<50H2m*BDjN=0o+=ujz9!|i=fXF@_0@YIVSSSV#K}4D!8fQUsXk?` zrD36Xeec2j&b5REPfdWKr>rOA z5ANOq@g>jk@2gDGARF?Q6I>fyX9rhSZnYPdwvJNH=i6b0BCDK|8``N4du-h2*3%Lr z5IH3gs(qN1yJGQAay#GU`|G6xAy1)lynw`b!>?$6PZOM(2HT$B4+FH#xxHXjxYOr) z`|QCU>PkCxzQiuO`D^CDz<_CN&vT{3Q>5#Us=ohzsQ=>)EFHM_E1ddCgMdry+?(-j z*KNef_LTL96>wHAXHozwP})3KGN>GySminA>Y;?M$cKCQVe=1< zXMlc;*UuwtwmZejDWPFm^F&yv9roy~Vn>5qqDwqTj0GT7yf3*4aP`Hnx;lk7!AEz~F7HX%_4oGr$NxPb2K-RFi~EL7 z6ZGagxzew9tV9W;g38~#psY^&fe&1|cx^)<{`6w}8`kuHKTb|@C8x6@l7;U6}p*e_^;Jc{v)2Xa^9fSAAC z56cPTG$2G_8^WSWJOz65Q@(yaiVGU&*tEWvX7y0n|51<+{Dr_*>;QkHS8$t3R`8nX zljqeb_Vum0Na*i5kDcmUf5mY4jPlX$^9`QV)le*UNG?ch@#*>b&gp`mRonh>3ym4z z%>1UkX8TZ~f)KNl2Z)0!8jT7YZvTuy6a0DQ0o+X>?gE3h#L4{2^>h& z=lX-IF{$eO8&ZYF0Osp_AzJO~qi0~iDut*ln_%I>SvZAzBwp{;hx*bd_tzjwopcgh z_{yM5<*3;dI&QK2%{D~kCB6-*pRx7pud%E`4C(_?7~6JbSlDcCm6dBlO{wujDH*&$ zqHV=20joj!*n1RPxW0t7$OXj>Ik%=;wjm~x4f^0x0m$~Z$-Z*}SPeJyphd0=Q3)$s z<7J#~vc%Jp_GP?us4R>Z-CLUbb)8Zn4do3T8qfNnT@`IU^={ZkI_D+YyBG`}Ef)2e zbFla{&?#%jr)cg$;)M*PD-&Etf1^2u9dc^m*w5o9$8HmA$^0~W_pe0wf0B12kf>;m zovwD6WJjn6K52J{Un*->4((ZMgE#Pg4LzV_SgMGgLnv%n(> z9o!73Z33V_`-#ei0U@6`&;wWAu=1k4aI(Usxx9v7tc;%Qi$|Tq788{0s*54_G2-3@ z%4Jc;*o&KkKSeZMtGlDOTvE8yw$k@TLQqDF!WGT3%=5XkUj!c4>MRd$+p5|fWI?sm zZn*|6640AqjL@UIwJ&(8nU(o7I#h%f-oC4PM=(X)30A6f_rBArirCWhnA$Xt*sRsJ z@=`icK4JNM&$=uSQzeaZ=nU;f;iu1N z(K1$eGm6$8fwpw}czeopQdn$-~Cs(=IMb$A*u(Cnr9?E1zzG$3m zjYGdA{$+np#q0o&@PI7V9lzLL0~}g2P8wgQe?c{D!gJ-#n5Kx}GRud3JNYHZm#a@Y zmeD^PT&qiue&g)Us3AyT`NeN$6m z$XARmD)>3+4%%iob}XlY1nDu_lqk6lXA2jPmdY`g<&Y01{a-ucca$pF3>Bai_!xlHBS?*)FA`DwK-jUG6% zu zRp$~5`VyPeu+V534f1`0&+Yt}U+>h!Y&Voct;qGL*jcaSu(fJL=`|u-D-UkFXJ5zW zk`qlJJ8hd@W&NiIMqfWLI_i8!YnA7iz~m^GEhla10oT{FN>k{lM}4&QH{mjvt4g08 z#R@+ibwFwvnOLL)wPiKG$lsaw>^SG)%~5I@S>tgI{&A{ZWw1B2<>abBN%lL-vYO|J zD`Saz3)A;<>)y}_P1icd0mdBAnPMH)ke~0}q+c>XEZ04uz6Je`lp}wHm3XDtK0JSu zS+Y;@nzqg1K^^#+E3z`FZkgA{BdrJP+$e+G{eHB&T_qtz#>niPbe_UgwV`ZH;Xyz*| zeYp}@u3*Mu#>ASK2!Sx|L0v=n%9^NuZ%DS)=esxSgzRIdtVhxd@VKCP!(+J}w=lmVlp9;?*_0;f&^Gysc5d|e8bPnN)pl{} zBl(i6=EG$!ta-Snp0CujgrK~hGmoOgu}%^Zsjm`s4cYRDuDnxpO4zsAMHPRT_n(>E zw!K>&%`;LkiC#mwDP+~!o=x0!+R)pljC*=2Q0HW4lbsx)IO?>YRjIa4f|CV#*3N-i zT!OLwCh0{?nFM7qX2FaB_qiv*+7XHk5<8lDUOwv^JUALKnjjM#7_$ji?;>}wBU9|4 z4)m3y2UFFpVfBGzj>nDc7!J>PgI$hM8RVSp!Cn2oc^bKzsTh|5WWlJE53;{UgM(q& z)swQHqR8l^bq}V)S>3IBvVj1D^YB^ImdtJdwHWevX-1%Tx;k@K{T;`@e ztTSexAg;o6F3!VsbtgFZ^8oCftv^>bU9Yeft*2eCcE5qFjWSKw3*FJWtgQ6=b|T81 zq9wZ=tYwwy`hPAGwG~#P^!wiON8M#Vq%n<*g(Q^YdSqy z#?d^??$ES1Nf2@cle7T6h*$aO#smiPTuk?zdTj;OJjD3Rn4+_Z#W}1s2w!X48Mx!C@~?|8Gj7c}jU2B0RYHu0@63<*v{_@z zGSp=8Twm#zX1n%R303rN8uq*mp|izIz}SZDR~GZ-wxv-TcF?zaq)%mSt;gVfWo6MaLGmWDTYGmc9?~TP>v=!vD)wk4)FqD&X)Q@vu30R*T+_E@ zq@I#hZ3QI7>gdC)Bn{m36}iS}gH;=K51Wv|J2UKS{XKY_5T^OwEqtAOw$K>qT!C#F z2?^b@T_*|07I!WAv@2li=PpiA}W$P zLLN7MvZ#te7pf^9N{c`A7cuaE2_SH~Am}3d>V6T@<(zw#fpR~!`3gJb+bCf zshMM}LLz zR9+TpQ&xkN^%+{o_7uBDoicymUX)vwvvjFgS#uYwmgr{ij=wU}XFLsWu)H4=|(`5CdrF}8B5IUY%F7Juy-9rE%}S!i5-u#SKFwVHBT`>-Li z!ap`ftSH&L^s+MKMwD`|krd|T?UYto`BqcBGh~qV!ZBOLQhP6Xu4AP%N&H$3PSgxi z?!vsW=0R;PtpOpR0$U24L%||Fh)qPZ3D_^BY!arzt-sN(Zgx`ir9xnG4QoPz$BXT& z143_0?(r5U7|Xw_4wkQTN$L>&=qy*_0n{B_$PebU6@u%x_S^}tRcxM#q+s`(shplC z3bhoW?CBhrU+G0B!{p>vr=o~6nZ8=bTrW)B4SDO@k(1@@uPkuGno66j#!5Y|E&qHi zQ?3^8SO$^3N7(N52v%fCF=%-)2mFYS{go`m+kiM1Q^eI1id|-b zz;D*u8Z8Z)OBo9&_j=V7mNT3p%VZK~UC0-EOPvaFWw4yNeKgwqpo!&eJLX+9yW6se z_;YRXa*<=bCx`rV=(**Zp~Rg}BuMz*yQ-vuDN!M{x4NX;U!{tV*Hl|goS`CEqNAR) z?y^-w%;`*gwEi@yt*EKq(89NNPhO4%?Mr+q5;h}^4wPUE>V5kefYo*qLNzgm^97|$ z@_1wp+lD)xylQi*`)Ur#hW#3&p%Nd2jwjw2m_Y1B@Z$H-ItyF3A*MZ-5QyqESZ2(3|gKK*A^UP{V$5*FMd2D2byjA z;n3UJ!x<9I`+?RlNwoC_QSVxlBeR?my5!nW+~D-o0&7)7YBa=w=_&ZsCzf;1Tz`#j zWvnuLAiR84MYasaTWjy8`?6gBirQYoqP=U@T>_`UfLF9v+d1vG6)MLmie$7NTFdps z+{fWw#VE3+IlU^fuS8e-JGHo6zFXk^Xo1{PGbHMwa?G}}XrjQ+W%Nm@Nj)Q<3&+1j zrsQPYjbTp<>rdS&)KaofqAmg=ai!x$I+PL*rUNpI(tm$A$u z^*Yy_Ny>dPN7%(JA{P>K*xJo-ZFWeeFwfql_z~GOf6cY%x{u&q)1KJ}a(E0V8`IoCd` zMgE|t@_!vV{g8mDd~-JrGTmqa@bEw4sAs>?LV~zc2nzS}n+@h12qNw-B6%u#gD^8# z^`vXZ>Gsl#1b%*Dkn$+f6u4a3Qr%lIQt4APM&@dfoUN?TRK^y>^X~MpS@9$OzV|3^ z7vE76pF34b&elB5UPP!4&wX{LJ6fClphjzV`rtL&nKEDEShsw)maI^QYCeKjx4H6%o*L~JSJXD3zGX~XoJ zCZ!lpV`O@QPNrafcZ#d8pw9B;=0}2fMUqdp7XW(F4y-B;9hsFLe$b*2>fNXt={7$_bBlSMz~o17J@>jlO1V- z{N-P{-}hc~b!mq)8pSLKtmhker^lcMkP&^~FdVmj_pFAAdDF|SJ=jNQ{~6Z#Dvqsu zEhSXR{?9lZmt}M5<=~d|-2A7$ny$;P z1Wy(niHd#JXCx&uOo&q9TYwMVuWzC*vA4=x)pv-B^oo8GCQjt{a+H55efy=TG+HO@ zIM8^zMA_7WO3gejx|k)>TqLUHc4nP#08g2TyQj5)RdxtWI?>$xKrjYY#jhU~bH;v9 zw*9`!>_TsCKkAaXV!2G7-Bm{BdGdYq4VO%n-J;n7y>%eQ=B~q}{*vk&Mj4Wcm-8Op z#y#Y6T>_J$No1|c)04SL==@yovst`X9;A%((5fxoovpatmwH=C`Oe7G zWoz|z_M0&X3(`w3tJRj7lXa~m7oX!fP$j?QL3zZrI|9n5Ju9zysN?M-Qv$L$HN2V* zD@JW?5R{RM*>y~uJv7`p3gxvVS}_qYVdTn@C|UGNzLW31VQPlCfO+RoXHB@?Li12b z;pm5(CLaX;S+qWR?JK&$u^#XpCZ7?kJ(C|UtR4IDp_CjH4A4zS`Hcf#-Fu=t7V~-F zLu7*)R?f>8JK+HOxbEleLS%Jj?29{)5_9~jT*4PO05c~S9hvr8?b^NuE4p8b8?t_9 z)M>KmruLkXP`*GPPjQLV3B=I-bDq|%lj-O>hut|jLef0BWduNl8dW9bR*6PwfXnS@ z(ip$4YjLtG;_+{)t1Y2%vy3!4?|46oxF^nk;vMzf+pM4t^P$?~r6>To?FL@g`{_8X zldd30KdOUODzX`DA&9I_#TLtKCOD09HGX_A%vxsXpWfblfoglFYW@BS7XIbfzB|>S z)OK=K`0`0(|L97)zB~OR%?@@i7Y5j@w=OFv61AIiVk-6GEEGj-uI^sMfOs>O^tz%} z_6`0ky66%`s)XJV1(l{$=^!Pv5PAya1f+x(lqS7* zLPQjlP^1P32_U^Bp@tgx9^Q7w9%G-qdH4P<&z%?W;J?rLD|05Thd*xxGGYJ;#b2NU z9=mK~X73kU%uwK>5V_0qCk5#bxvn{|=PVC1so_?-A?L*OuDNHcyGb~$w;Bkqk3@EW z8nLkUH^G%#N=KOEzQpHubl1-VQNX);oQ5fRa8H!swR0i$M=0Jx$D!A(+F|%~z4O=K z6Ms4~c+*U`eEBk6YO^sQpy|h1xBd}OO?{vrQD{=I&h^Dmdhq6^s%h7FxPdj#)~|^f9@-ZlfbuKNbvV z4u)c3nE*GA}t#8(#Vbb?zcc1TR+L48>`-##4+M=f%Vi_t%NxkV<+nvdD>W5>pa=(htjO{;H=NA7Lzi?xF z;k?9d$MVJI1{dt|^@gp@@=dPdN}$wU_Ub1{)?EFDcl*;l)w_UCtx>Qcd`bM7eM#f^qKAQ^w4fD+el)o3|JJKWw)qI4ZkfRj%=fTtgpR}I%s*uZc8J! zU9*YsK3oBM3|wPE{plNNd3|GWSmCB~N(!ju$^hr3vnE*Xvul!t@$9z*u zC+6C(j`7moX@qK!ixuvTkE}GI0}Ii3pBTqIOKtpv^9ZJULLh9Pj^pM9*I?md#hjP6 z&$2_;P0qsgLDue^r)OtnZtb?=~jG1nObccfSR+VOfZ20~nt~wl( zO-s``=}MPtxBqs!)s1ok?OTAbuy1aC9NHBiN8kh8oi1kLg1eizwbZM3DB zZ8P9E-Hj>xea_KR%>GWAX-Me? zB6tT{i*FZs|1MmoCXcUan}fwdo-G3(>X>k=b;Pz>b9`ss#B%M{#~+UiRtHJW@qtojzhsUdkuVXd7OoO)MJ-uJ zj~`>lX6nIyUxaD(8v(bVQ-#swI7?T{l->=f)@Qv##rfI2**Qk8JJ1xr=g%yIa_?^| zhQxCo)CW%MSMKjMS%uYMF3DWbF*;^|YIWu;;_7nY&z&~05ESGLCw#M63|&~aX0|xG zSh(3Nv|tfP%Cf7c)=qi1Pvj}$kKi;r`0i-?1uB*angpc2KkoL&@9+(KLp92<#T#As zwqti{pFDbE{L3W=NmwzGORP7a{(UKaX2^ZLb>u|+19S3nOoUGQduyS|TFVsWiF%HS>}AN9pty63IlWm>>O|)zXunaBG^R zS9+l&3sjGA|9B{drR{wk}W-K zp~7cF;%}q(l8u#_e=N}P)T8WoSOHkhpE4*b=* z>loS-Py+IF6~kcGe(M56Op)kD{#G&X&(>L~9Q-G1mgy3PccC`$pj6H4CYYbR_V#0s zRPP*#2~^S#SgtJ(DzDMcjd9lWk9bLx<=bCNMVgT!%h^Q_);ij=iTguwlC&u2l`(2R z=Ju|jX#w+TyYpS}D3a2joWJV<4Ewv3WcVECF(oeq<0Hfx-iX#tmmPG<_uX=z3W*^J z;qFiE4lmsXI+ctZxbKxLa-dOUAf>b4(5$D17*9?)z1Ib=MB)`)?Og_PI!U>c8w;?<^w<%5e^DaTiV>OW7snQ2#A8X5LAvm!y0|3?lifk)og1oV`)^9og9ckN_dtb zRVs>wTpeMS7=TJ_sgo?4bfH1e0+6@u4M?Os~1 zK1k2TZe24ffSa(9j&gB&f50dIIThO0<6za#r3yYAD9dyOaOCPP(>$|#m2^=k?hbfIv9)DyZBYiJm)OxY(HzJNi6|D;l`y@3FFRefJL&ZK+kks z!?pL#nn2s%kn!wNqly6;&OyFS!1{0+8}d?6aGUMVvg^NbhKGFeG2BgEVQ2H?c)!k# zzYq~w#$SY7P17mlWDENy9=6~ri_H>-9yBe0Lqndl=#cj2JfT;86MykQ5XOL=$17r! zdt4HsBAWG)(YhhZ>xr{@_<>ZTQLVtUT#)UeW^%xA1%*JNihF=?92a4j5^zYE0I zio%-tytR~+35!QhkoyZoOQ9d(Ak${-SnIdM5tAe?qHqk!%o=*jrW7PbRN7p#QtOL6 z=jkyIWX=miMz-XMa{IO)BP!J*+xe_eWZ)ZzSN9tr5+LydhjmKGZ~ChQh&>%7LqQj1 z4WO$I>l6D&Hl+Yh1$g8=hudv?Gw;(7gXad5MuF+lK9qD5R1L1)%LG^r=8i+ zv7~D~eB1tWVXrIvhO;M-o3%Ty?d;V|S#CzV?kdnkSTfS(-5yrmJNtQ2FdtvQTwTy_ zH#mNO^C{blzEXp9K%Rj|JKN4oninsAI>Ua@gzw}wG5fG%E|umOhl0@1%PXB%Ik{RD zP*(Q!Xy4zjTPOq}oDJi1s8r?T=(tf;hUYQg^+rHoxm*I3r|y z1=*XKroZ-%V>r)rANyu$9jbu1o6IFvf-W@!%Nt-X*&9<++Vt$xTK`6i_7`=uZSZiO zc94kt{Lv;|h5C%}rf7GzvNyzf%6n0h<$|l+S>!C-;jD+(&JBOsM2*!DF1y^D+QUB! zUo5I&zew<{=-}9njQ)th=kt9~aQ*UZopInNdGr=nKm;p%5BX)K&dLXGpzcdw*``)H znBi%@3-5f@qLOI{yX;ZBjQL6`}t~HGJ0`osq5vp10nwHWPPyu z(f5QP~G3Np0835`ZL?{eK}zt@#=a8C<$`yZT8%CFe+0IRw6Ewuisp z&)y!bzS95nf2t8af;Tg5bkq0FJ|V>|E?@uWmm6(zWfG$`fa#d|A5I5dOHa*7;Qx$o zf9#a>_o;S7bxFkSHhMco+qK``cDz0O?$s)6&q91XaQ~s z+|Nf=?4Rf1lEdx2iO!q*U+lpij(r!M`SRi#$e_R@Nl4re4-$n#j>iTt_&w44ABxu_ zC%~|;(IZt_g<6-5Byo=yd5n@`+!+vUj(XAm;UxSwLCE`DU^9zLoOdn)STEvx8%a{X zQ~bAtqe*QaoN+b_LAn3_@Ndm&HXykGX?1;NFEp~TnW}ez@~0}>Nj&;9N(iZ6>$74`~TiNW&6+}(3?03#G z{-XQ0&Y2P5EvP?t)%Eb(1h|IxhYOxx6*j6i;<3QNB|w%(;cjfR@?{7?jf8VoLclzgXc9!p7X{obe$7+e zb>S9(5>f{x_HV6G)zKOWnMhA2)MnUYn+xN0uEt=qR*RU4yacOb z!Y6;JABk0b9xmSYS2UXfCDOSL_viM4lUw9}KG6SsR^E#o zeKC%->&_C2;$l$q$&QKPEp`vIaW-yl-!eZM`Ro7T$o-E#_{b+)hf_7hW~U^UvI@ry z0I-0&*#Gn(8LU*^`uHPy3v|)35 zjD<*m_6q3U*MD!jD<0VoRI5eXmc4g37CgD?6RK1{0H1{C2FCE9Gbm~Qi+QY;+eXIx zVW-#@dD*v3;u9`1gJnPRty_jj@DU185!*i#1u#xun;HAOsG7~fgELbWvL0_cKQ}24 z>62pZB7D4dXl|$e^X30f^dDD3fSd4v-}#;Th@w!`&N=N_XS1Y2P|2i)2)VBDjM9-R zwdvy&ItK5T`!kFw1_+DpQ*2I?j^zlrdDU}HR@|{_{`#r68g?-|9KF5w*Vf!F zyglBg_F@6HGhzBs0C!dw@S>^4?{MN3osSM%VKLnIJiL&q7yoH8jv1-0C>DhOdkJK*zX6*ya0&7jfFweXaIGaouY55rR6@k_Opq1 z>T3gz8Io|vr}$hCtWO0ncKhQ22ejsn75@U^j^Bo-NB4I6hdgRgqEHInLGYd2bwbelG5fab5OOa;JHu6!vd!pkt8TBn(=$7E zH{te-YrkYnOv*U67U)`TFUS7hJZJx0SAzN9a_Uy%1Q;B4yAdlfy52vZ-=gE3jg2~d zi7zbYRm4Zb$tO`JUOLwgU@y0x9)-hW^rY!a%ZIBDZmZ6PK~w16zbb;)f=8szc(Z%# z8}*koeC)>8$X#>#wXW(3Ww8Gh3G23WfoTCyMpU7;pryD=$e4t+`^!|WJ4`>fq|jIJ zvum+>Gr8sKFPEx+AE{@1qI#YHc|j(@Ql!Cjmny8i%u-wg;_VYD^%FGdJm%5+HFetM zAUEr8oasMBvOVN0ZOf5h$1bE<+}|yOe3qg;)#OOjCYXID&lJI9_8iPzB#PBs>U;$f zP{&U69kBVX0pgw?2l4Ho4(TUGnMoUQZ%##Y@PhDQhz-0U)c30c?XAXi{P~omLY;Gn z#bSqffa2t}f3tPVV>Ij?>^IeD9hww;oAy^T+XD$&j1DNI`t0t}-@5P_@ zKg!`Laun~`PNZl0re8X1+j7xT6|Pv0UdcIiaYjIZXDd#VU0RbULt89!^(9@}p)iVs z0VvKz*6Riq{FGF0)n{XdE1=QZymX!X>%4zs(TD%e2tqPODq|R|gKQ5Vx_}_}8HQq; zus^pu_qAq(v&nz$`==8+1nLlX0zjH6O4l3$r=i(Q?B!Fp#XY%9kPqVJWcuXV${^qS zT_KhO#lZoGi1$ZgDL@MP(2$D&NrYKmO$2+2!d8+qOKTK7$u5ASzZIG5($IIzcxQg- zZ=+u#eE`k_-F>-r6B5mMV_+gr7VX&gNDTU;?v*B(p7wi99Y5XwW&TIS)83KuzC)a` zfc_l(K|){B^@|moq3km!&29Zqh{5I^Jg3~Ka%lLij(geJpJQC|6>qdxTvt`~p4w%W zq}_Ye`UXha-wK1rhjJ_3nFKg)WMSzwP!^9WNZ(qD$i^t|qIx1=T$*vCF^pKirO!50 zd!wlXOIumIO=2up#xiE$Bdgv+-+>}D00fTYyuVeux&6_~Ufpy&gLc7_IY@k_B2%L| z1f{ySnFOfy?fO@xI`^m|%%zy~>AhKy*+%3>2_#&kmqXY1gGh3H8l@Qa}C?vSoJ`?j8781*2>J|aiDPi0Vf_KG#0wL6D zG3X;xgdTWpfA2Q4o&LW>@kM|dcn1IlbyJZBnA_$|7GU@w=_9EWo00_!nWFCN_12;y z`FSpPrZlvNnQjdkR_Dn9Gw#l|_kXWApW5@oHP_?2R^GPqIAb5^0b}H!di+N>jt-9YSn-zvR;?Qa~z(jCU5V zlXFkB@IZIWUEG#adO=6tuCF7^knu{sfrhNv_xmsLOS;fDHz~%u&31h`Dx!k`;DRQy zG#b^MYlM4Dn)j9JJo&Z%H_n&u!=h^qvHO?w(Ze8vDA!MUpV{d6jIx(%`2k+tKK47( zCf$9Nx_UmO%t7t-+{055?DCO#qQsOFOD@ZQ~fB4hy`OE<7vQKbTc zxMS}4(G*i3fZ4ZQ7w|}}$DNr?$CqW+^xscPp1}q+f{QcW9+Ut+GXLv-7tVDlsV_Q* zuz})3;&+!j`C57&iOH@LLX%zNJYzR?NEVR7(J}n9JjwmhlsiqlBQYsCv!uA?L#t+M zsXu7>-?vT&JCRB;gE(f_{#JcS8o^vV&_mQvJ<$)Bxkg~Q3JYpvcN-khgJ)mod^>xA&8Ds4#oNy14^QxFp!lhtntdYP_7!rH0x0vKtjq5t zhb}HC75fPaa_gJ$1<9_5i|Vr(K4c0!A>}*N|9E{x|5T=><({qd)aUMp+W-JvvvyXl ziql1$cMt&NS}PHwVWhWR%Q+3SZ>OJ2@hJW3tMCUeNlfha9cZach-L+QeyWr{=cB25ne5Zl3ex-@GbG*r3&G=A(RZ~ zGQ(3<4MH+iS_rgrjwu)Cksc@n>@yx^=%Bn8>gmcfIuFIu$e~|34|<{7rJb6&yBV?- z-x?bq2KGVZYn=U4dDpTJ29614ncoKN3x(6`L4<)&R65t!<>jTBwWq-1u9M%qvzz?I zZ~`S~lXbNY=sR}vYIT^Rx6k*2aFcvN{v>D#Ty|cr$t5$;iWT)3vc;5px!=rh}@k~6b*jqi(NEdWPWbPHnPzdHYr zasT4hdod;4d=rT&pXJ93wyK>(>Gi1NgaKF(-Ph?P3p`w9=w_4& z3nxTu@*BYKvL~uDmq>sx8Z=hy^K#|elX*e!mXADr*#f_emw!Tu&pde&{3@`FBc5y>N0(*?>?Yr2*YTh zDS$36Wkk0yWW+?mFTe8pVlHMW$dqR5R&;3Q-EyHGv9Y{jMxnf80A-Da10E!CP_^(m z?JNuzaL>A~$Ob@a(<$3EtLrnrC-p+^ZTkDQFI7@~Oku?kp*LvItS zi|I~c069sE=V)_-?Us=JY$sqg>UyzAFt^{c1A*p1xkQ3}3L77N;)gRQjB3XitL<3! z2c8jYwXAyR47`glqLol4CnQ`i>BC{)TXllQ>B^T*^Ey(D_6INxX`H0TsnA~KObAOY z#(zw{-sRgs=58{B1p=}OZPJpv42Xh*ORr04xpIP}YyugRoQAh;v4VNKjk|Tm!i4UJ zjjds77|iPTNX3#Q>Rwwzp4iFIqcHgvpn6D3_8rIms5s}3*&&QVP&--&;Attxr7<)| z(FQFVV(M%EY`n3G6bNH{xujV{ma6_4_9xFc`oY^yBbqgfPqpy4*;W;2!Jg{lEunZ3 z$&a(;EoJQ7L%%5BZ}DLPU4tE9Fk;|9B}_4TtN85#dc?q_t;7T*+hZkb` zu+bqQnC^-mxP5@_qJ+H6c;~(Ycvb%>rWL*=%EpK|aB9~z_O~?>_~IZ<{!ZhVu17DK zR=*4XI)r2s3o0jVmJ!AUo2AnHpPxcln(r*#cy(3sFB!Kzg?ey8Q-GCoVIsLR!}SXp zc=)G3O~!{(tL^hwO1}xRa&!F>{#ZQoPjYzl8;qC%h?f6yrZ*GyYB~~_P3WHrK$-5A z6`!N>fK+}Qe9vhqhtg>lmmSW1w8r(Q@6vPG9Q-s|S;sXRCeL)#);1JylnrL)hrjTy zQTK%{ebBHS%ACzydd~8J)&K%Ii$A{3s66TpBqiTZ2hP#wJ>N=g@0I|0Mqf*3qylmp zv6CNag1ipxS&9Zm_`UIFXiG%JPJ%=Hgjp+2mLuPl1iv<_4!%cw&j2+_eqY|`xNU;* zCRq5{gvw(?)?tBwX00o-rZptMoDbpj{c*kv0}K7@ZlzrzL{R@bV})9d>~6_Fm&~35 zY>vyxIo#_y~g!*ZodqY3pL0!DwiP`TD{K6X<(PXoT8DmHC zP<%z8pQv*#gL66v1l_Ii)oj$wO`fUub{56jFI6lxXccqvLQw&Bz;y^zQ^QuwY`3h) zlfRH~hHm7)3k2e<7B+f>;CHW!v6~DUKLs{M^^LG=zFIQ!VOcQz6mnF}_j* zg_~R24ZlvOwbN_G+e;1pkwf>F_q%ZQ(62rR>)i`RKLLMjsD*Q9WD1}N{LS?dN_n}n z^vE~D+nvce^he8)3nr;P@t_258pQxQ#1`B!IrDj3X)oN$zQ=*b+yvcm+J*k=Ry@L9 z(x5S2bo@}MJ@Bw5itJ6N%W;Di%<#@t$$hxEdvTZsS&aOmG`beYSy+lK5Q+M~%Vw4T z2tLH1_g=;Pmfcc3_%-l4?A)iby}*d;CZb_c@nhe#)9mwP2H!vz=RD)buC)7fcwd$) zof6NxQ)1;}cEZLbfd0hEhpeBTJpSBET<3Ibt1N`{T@~u1G*RsG(qufTa+T58!*$Mo z!MMSdDz7HyMpizIO0`R#QHtkvLtgN7;HiELW=4d2^eWHoD>&QHCKZYs`~?u<=Stxo zV|HSqpdO2$%n}oB{EAaydD^M|J*ET>I-SZ!lY|u*Hsw@uL*TmIsdhPn3#$gyYEWUa zb`q_N?G2wIyHmoq zVAAb{sf!Pj;FjE(9y$GYt^z$@r?&~7fTQRG1$nVh%3*KjC$1hFM(O?%m4a zu@Mf1%bpo|;pAZC}L$U*-wnIiyU%r6f+*=Qvruhw_P`-I5h> zR>XIxs?%|yK1`m!qbGCEIa+wK_6gbVY+@jcW~==izZtKYQ878bNI!+z(WX7P)^&en zdmdB?V_wR%QLe5NL@yL(Rv772{X8#SQU$_jnq*)k8}W_f^I+(@PjO-neaG1P0SED- zvnl4CZ_y#UjGo#Ib&LCQG579h#%4tpmx?Ybc=(tFwBnB5>C5tA(ptQqCcqV8Lh*Lo zB>2^IneF_^r^Q2a;^usf9jC9vBtH#ce|+8`N33sR?%Ex5=Yo9+5ef9j>7|2m?>C-} zhNzZAkLK@!=87YNdBt5yO*p5EJtrvM8Lie*=mb%TGAcidMHiIedVoktkrH2zi{(95 zzF&*u;`;zLmQRiw?e>?jK)5wFH8~f1{-8xZTG69lKlpChTDhB+K3n{S-C<{!Il4(% zZH}poP|H*JK`BYtLzr<7IX^K6zGHU#^#YpI6awh8r>|j2t=*OI{c$czBO4W4?e%Le zPa{&jzeEh%+OsUx!BpNzEt9Wb=z7)4*e(3NvAwfmZoe>rt|@cIO086X=PkoFb$*Dz zjWliUVB`enWdEdBGrRF!$l8UJd4hsMcA&apzZt(d{o4821??b?W(j3swkAonm9v@q z?(1K-w09x?p`X;*oo5b*nwN36*32n#%jm`JH)RRZL2c5Nojh}Rlj}aPI7xh#0y6wv zEj>}{TcdV8p#;^W+-C3?D|vyZI8Zx9J6)WWW874@S;&)WkrxWIrGGrCW7Op?fRLr- zu`ZOKr-jts&<3 z@~~Oz{q`GN_NpziZM^H;`%Z-YSFQeq35LkqLqX-#oL97c=73W@&P)l|O1Z>6-}0W_ zl(?VdRVk|L``3Gtcp7s|!!xz%jY@n=2`)bV{L|^gk8{HagD7u%M2mS7FM(0r2aCZb zjxLSvMTEbUP+rn!p+>X3TSZTmA9wn7frD|gngu3;ngEMM2*mrl#wr3#OK{FjMg$Rb z;IV+}ZED&$Xt=jS3dfyfc8J#I=M<@RZAxpRju@Kr28~n< zlQMvkn?#oahH|2~Z3ApvnL}jRj8}k}e8KD9eEHvJwe@veC}P1U?RrMP5t-Aw94$7` zw`!DXHFZWPLB(5U{f=bwrCtI#Rorw3-S|E$7yq#7rm<)vfDm6p6M5$*M9$I=QdyG zzC~Sw0tG5K+j{+YWQcMV5dLMcY7&B=0QMuk`dxsp$BN)W@ zsUVtshF3D2DqJq-dq^DCnBrbzNv?53DRl&iEhtYvPf)YtR^>Cu6}=ld+h3zOE3qn zwoXEQhYai5Iv(Weo#YRojuw6h%%z%}#oUE*BX>;}3{!pI>;Usmp)A$aAAC$)BHcg) z1#YF`vr~71M!C&sApkA|eJHjNPd*xElq9vs}&C&GgVLyvz zb=StMQ>uXGXU}NM&;3fAyT2A?JoX=E^`xcU$f5H?lTNVQ(*j}cKy)C+f7%Tu5c|ng z!SyC#h`)sd-}dg0l&z4`@%xj{=X^o6feC`8V`Pgtm!_W6;P%?nVCDgXRAp*mH$O_} zkaGE6&* zqisJ9Z2Qf{y*%QC;qU&O(Zd%F&wOy4KJ-HqlL}L%@0%CU1e=)Di^i`R;$=u#e6|bb z77EW^w?|s!uUXy#r@S{o7Rgj$EeiIsJoe;Te>9mF+ARc&`HqkLa@MF=vs>?lck{}W zI)nieo93YGF%^p@yMt9j3mg@6m&w21>iT6RWp-{v2U)IBi{mC=TrAUElEk zBR7GL7L4o8SmVN&Mv?@shO+&9?mf+mh;7rmGd*rc>8M%!aW5yJaM;OBFUfr?n#RU* zfrP(&VenoN#^Ug854@C9Tt)aUyJL)ySc3idSYxlkVR?CFXQP?b7KgS3@@blpPCdYxQr88m*VDFaqc= zrYZx6>Av)lR#{Lr=+3uctSu4{hC{nB1iOL-rX>Q7n~C+|CMFvGuc-FFT2kd3NPws2 z>+k=IWELm6Ju>nDwL5$kL5J|iU)~q&?x+kF0{_X_kYGrX!d4qSu`d;8gC>UPP)0a2 z2Na!|lCYKdBsl^p&#+@)rbXh6KFS_#T6Yo2@Mg8N+Us16pPzRLUX!LjaIkEN5MIFf}!T!u`(@a$#Z7Ck} zqfiIRgPkAq8eaW1$|tnu%rbS%$>xx%g~p1t2__lajPG9S8PXqh6!L;~;o92$zWb*A z)p3yZzJ zbyu40%=&@(SF*Ere(O`ZqhsuRwYqZ(4L#(8}NR|T$aSR4!=gmFE6hhvlBv^ z#1RIPqCPnLhR<%L5)`hu`4eb-x{s*2y`6u{uhlYG-9@qCpK+>r_}qpBtAdDEtHTL7 zWGMT~z?c0?d=vhiJ_{(M01pQU8>S4SYy76rJSNzvfD>#iZs}=MGZ!4;Um(|8aTGF< zKFs+RmF0nxG%FGwq}$bnnrB%z9oDXT%e=6h@_F(Q@Sg>5MK|^_GB!vtmwmLxq}wy> zVIcmE2H$`DK_GpEwOI<&Kq-YbyIZ{lvAgirGjgrzzHLCUPrddO>w+?iaoyL0Y;wbM zJdz@Ar^>G-=X{q?UJz$|t5~;q1}wDPVaU+U8R_HIg?jG-EJi%(%{N!Y zC)f6ui-1enHXVpo8Lgahnj?&QAh^_>OO+eDzm^}sNat7*(`>%RO{N$cHF*!q#3iK+ z78KI)UNdruJ%iwtI*gXdU+`#OS+}#oddwWmRezlYqR^Uia7XXftr+QXL&pnOItKp0 zr}Ej(VJp=&z^S34y=12M*Hfv=D@nZMEu*=gwvwL|F+8%%eY zlh3{DhBuv1&C@%ILMWE|nA&AW)YC|loYi;nC9%Xyr%lpVnUd|6cuWrqDmm6bUHX9b%-M4tt z;;N5nD~gB5Gv5{obREm341J=dh8}P$D!JF>OXK(D3dDQnq)ueS92kl>jkWI+_s3Pe zm7MNXDB3+hndD2pSW1e$Rm-d-lKIZ!@LR5w3A+8jhG32Go!Y%n6%pDsc$i|fr>dDS zXd2%AKJ7!Kp}96n<`zDNOH^fOE3pVV_~w{m9Obz4ew9ijq*KwlD+zfDclzq%0)V$> zg5L`&3OajOxvUh7a=Ip4U|nl%Z|uW3!X-7SZoVe}?0d_zKN~jo&Q@jzSO-8&qjZyI ziF&X<2X0f-gsm0s)uPba0z!)t0Z?f5H|5ZI?^exumokP%CpXVZV|$Z1?h;37Xm-7& zeUCUl=wsEIO+;Ww@?=TO=FXm`m1ya0(Ym@%`aZ~+F$u5v{piZB8euz2mj{K!oG*vI z-z$s1R(+h!4Q`38r;)_Q8e9VNjTO2Zb*;_p5uB<&wx#Myzv#sNS;(9av1s4JihNpn z%X1e`g4p|Sylq%4!}-b0!-_s?FkX;Klea#VsQt(sig^fe_G^@u>yae-#$Tz z6~Q7wPYyj^Li_Eg^P{|eyiI5JS%j=g`Sgu9EJx~6ZPw{^MFi!&V`U^k$_?dkC0G=yMaTCJ3 zJ;ILw1v1=n1F#$0mxwpjesD^-p~e6#){AW&+EX{Eu(6U-uZtg;AXf$-Z%{`}YB^BV zg$>qk%T}%$iZbWl$Y)Lipp+(R=Pi@qLJhv3XCB=mi8h$gvX*ACNRWN88z$F4k&^EA z`by5+Flh5m>U4{N$oA&TYsVkl%Oef?(5Igui-er-Xi0!kGCrdlFglbUG$?r|hk_K&#ESr;e{$R=x7x2QVJUAc7*ae34wuVmcrv=U8u_|bpZ^#9GE|9S`HA9ZDX z-!oN0QDF);hk?NPBYwDaJkiKMwxb;XB@qKT8RPHwp9-BTBRac$Db15WQhac zSZ;zos0HPE6HAlK#sST_hXS+xF|U%p=@+~VfjLRv{P`AedQ4Q_-bO0BxV}FAsXgmu zef0ZVlPv%5*Z<$t9sg4L|9{&g44h;`>)y@+i#W=|pE|KG#RrsV9=nr4aOLwONN0QC zW-6e@`H}Xr@7)Ag^Ez8L%h^EqVpJRv^oU7n;0N&d_=Z5g9skz3QGb-z;@1C~aRDv~ zT~Ox>ASxaaxuwp>dVy)XSO+B|6tU=R>r1j`Ar~?ZgI!Q3#O^lUn5ciu)C{vK)UVZl zcR?aa*(O~)fe%RFpLk`IyS`o+@#u(zWl#DQMbu1fKEZzI%*e0U+I}wMIjfmU>D6j@ zg6ZUhK^~cVr~!QpBYmzJJ)yD^CZZ|`gewp+jcKj?2sc)kN3k<$(O zlt1`laWnYRwZ9baFb>I*!u;tCImd)cmo0cCFS1d5e<1Y)l?I*6EXo;g%1sSrIEaq> zM=%&dy_g3c>l#Ui1^_P6GY`pGNTT!18r&^zSt9`v2Z|veOlAG~*yA@Jd^N^QIIN zlFU^xNR75hSj+2sR2(fwuPDFxfGxJaXM}OPzIgW)nrmSDy*D~G6f&TS%b$9DYL9SS?V`~m@!b@SNRt+$T|=AE4?xXq91vKUBgVENdx;Q8USrQYHevTq5b6Zx9xgI zMec6xbVuSfSGLf9lpgwnEd?45{ASj>g;B?xVcjEBWkLI;egLhA$9y%Xg>^fv`p@FA^M5Cn| zl1x1vl3>Fe`vZ%SA{2|MjPo_+z53@NqLZZ#CO%FLfsVf=T)iKs8~Jzfa1a*%{crcBX&oEvqJ+~6C0Y=TMOstn#9|~Zc#v!kHHLH#fa^7 ziS?7z!BFBeRCo!ZY#*5kPiKkk{=UFd*mIPu|2z{)a+Aeb76*;!Q(Y{E(3Wb^flgzr zlBWx}t}f#p5;4SqTvOO7AD9y}mLiE>@T5uHj-P4zbcy;R6P&3LGcL3Eaw|nkeX9dg zgLb}|0Uon4Votd`x5dGK$ZvX&BYN3AM!jOSd2;TjU*$k;ja7>pMfzIuo|%F}oWce^ zsuWpfm9XXY5N{HtpY#zoGl&*S0T>`sdQzfYME~4&wmT6V5~&oyOlc7`nAX_ zvC7gpQ7g!PPt4aA`s>B19woEYBA9Sr6fgSxOeml#5~!# z6Qdl&fpR6;-OIlB#!U~Zdg3VT0SB`irpr6j?s2X(#^Zs|nWq46oX-tJ8Wiw8c7GvG zHae~7F*dbyx>%?#YE2-k|L?`0Mt+ z;PLnYYqzv6w91f%_nkh8IzJ*c{4*x9{+prez`6vVTN**2%Lf8o@qB+!CE?fu=Mz@;~hHWI86VG~so z=8z^bvEFqly3>50WN%6^s6F-CeACY~#n*Vp4Nw)rowoJx0O&NcslZ`E=p7=a%P{XZ}Rx}y2Pht37dm!shg!Wi~WhGAV3|BfUf|1oM97T zSq6>IuV^eEEGB{z3wTgcKe$n~&eehn&t|D?_V+@FgSwYC+*G6F8(f=V(Z}4%6E2r; zZ2L2?rMYy*V>-F|BRJTFVJc$gMaPI00&{gMI4)c zh}EZDz`3%^YUN(NP)+bjwHx;KUs3KdWnJ4ubJ+GacLg_`k@vwE_sRF4rmq$%6pX)y zHL6|QN@uGb5zPs@J&1kRj{Qz+A3i8&D$^3a8*3V#VbdOM`^J|v&nR=?iN8GJ<%L2k z_9^Wr`B|FOYxyKUjq0u-k{A&It<^fN2t9zbi}rf@dwA)L3B;hVgwBarHoY!q^rIkD z4pQrfRwYudI4HJ6ckx^D3*7TPEeDAS0{GoAJ7Q2XK;SSvo2mYGI$vq%@!GD#Za7-D3b5t(tJx41l zl1t-A??v_|irPIgPmph(l-Nr1m08N+6PFcHb{HUyg-5yOzMPZFB}^MO1Ug}F@)wp| zko#($C6Z{3-b-2h{4o^MLbv5B@qpj4CK%>#ZMex)Gjg~^_!LqU!OAk&bto9a~%8<#Jz-DO=jK!j|KO=%NfBUi%KRX zcQdSF1^}=OU>!EoAjE;pVQ;=BX+Ts4b0tiguWeB6$jygpKp;DHhkXXjcjbDKqn4Vv zA>Q#Hke_U$PyH64_LwBo<5%be#zhn4uhHdCFBC5h58dt_r2LI15Yngz)Srb^?3j6B zcH^58f|PsBhA6yh$4GcX_{3pt@jJU(<2D2!MPW<8>+ELRKLOG#kE(J*@hc7F6<0a6 zKZDXt#m!2DkS`mD(p^5FN>W&U+O26-PQw9Q(KZOk8UR7et%kM*abC;|S00}X!r+a` zQe01x>SpzGdZl(M+YJhjdoWjRL%uem**;?vX(&BFmjLsk2uo-&`gD8xl*FmAoTJw4 z@pUE2+vG!f54zTNGZ=~eKBfW*2=BGcPpHu*N>CH=qw6f;*Wvh(Pg|IWss!|zq}gp9 zsNFA$NKo7V*WP(XHI=q|e+0$B0;9+@%E%szK`{|&VEVAc^Rwp7*Mh9nspb6BrD=6awUd5}DSxZ9ukm1* z*w6abPO{Lc%3I48=W2BN_7*~2l)1yrmb+d)X6bPywR2040fa7Mld5nBGKX1rzSd_g zRC+Vx1C`*fYI*P6xh)rv8HjPvXMoQz;n|9W;0Y^CWdD__j}&L2&0;MZ&5WHhtkt`V zoEC z(2XuVT>ZB7TgUn4`-*X+2XuPW_81$rabu%x*~d6wirSymZnXeYGqH=SH7F3~flX11 ztcPvt48Ca(9s|DHOp+x;J5N57x6QiG;Kbl?R*@ml7N0({)@64=Zvhdo=IH0MqrHd6 z&yKgUvl=#!Rhc#hpXJ1A7EL0-NUES;Ebh!_QsPw8n5T2usP$FS%WN;DK%K>=w9~I$ z*s#WKw8|DL9^|mhNK(qHx#!4uN{Mby|O;Wu_LR0OA_~<9|%2*b|^RsR91UF zygMFHt(#YsPc+L&w$SXV_lQEGmN&Q=JwyTrE&7xt+Y@MZhy*%G0v&BNzRT`i$MXF@wuuq zXtw*V#~fc%p}{GZBv@;-P6ujEyjOH{!#e6Nu}=74T@KdWW+Jr?#6Q|_op%+;#q!O_ zRl&-8%^#b9iDtrlwmD<63ZG8-6wxIWwT|R1tT6(H zVZJZ$HAy|kH(4Zv&4Q=_*5!A+`A6M3LzpKGgEfnRarJ_wK*Ap|JN~VU_=-0%ZLd8| z0Dm6lloyS9m)T!>zGkc;`O7ylC90qP4d!OT!&iHFu*z38(#K|{99eg>BpC}hiH>-T zqn6kIBC7|r0UbRX#}CwpE&(QYx*NSlF;9hDy08Cww$|OEF{bm5#=W!X_Z-iLg7AkV z5l;nrIdApbHu$N#3RmS55u_UZm3b7aaG!BQYvN&eUE*O(SM+xh|zToYWl!udvUguibj|Lk>l3kD^UDSYw!GRy7<}mR^V-nHvbD6mAUj*J1rV z=X(Q8u@fZv=9=cF`PFr|aYF-|mFTOqfDYS;z@(<~;KIJ}_^_bvphc(bkk|Sn>8ZKk zr^7ApDXf~!uZMyy0)|xQ3whfojCNTZcf~UA;`2Cj8}lfcPHA9S`CM-38S}|-CK7QL zN4H{VwN2HgT)NQ8Sut9tE2p^O(Up;<7}?&cRwIo2-)9nS69_QsG6VgKBA}ClR8&NM zbfNeWfe=4S04<_!7q2RyrWtL*eFv7^Q(}NvZkH*LbYATwfm!+=of}>$zs*9%6t64~h3=MGKUqiO`s zXIRsVglIFkfkM;pg6dZZq0+mS6B5qP#&oHEl-%4F-(vnp7p7m^zwy^8N=BI(y(9`A z`w|9(ehHmKds@hwnO_Zl_F50VV*VV}i{w2Rl_xi`6EybzDL!s3Z`ODYB2Ii9t}?l? z+X`~MMx3E#>wwIt@RN$LS}$jvJsMtWCjvLcNs{WL5p}fQgq82^+3`Br3iipwL$FUI zlW;zP+in!2w<@omYqO@5VjI@vKzGRXXldVB?(FD=G@2^>av%GMvq>b7X`&!e>kN;v zg`@6~jQx|eM+a&?yo1)5FJ@b$Xr&ozWOoq%Mtb!_VTkA@YlkgxR!C{EL776<88MlW z1q?x(9PNEzkgFC?q;VWJN<1~k0!fEomR5AR*J;X#c#6^j=|R25K)=CU2N_68mfP7O zasg4nmh#9^`Y#IA^U!F`$b*2bE?8;aELfI1*lr+&T9mM1=wRQPt^A50*-H~jj&~-0 z!ogt?(qv6?;rXL2Y}LeKdZRNAfE@$=&eGcU7ZZ@D{$l-BwI*pkI-ypv)#~B%R=Ce( zL9!ELG8jV)T-{^Kw+F`n8(w7e-7i2R0v>6j+lOP9t?+iCm1sozdsIQ`O)28Ko$Ur- z6lxa+mQU%qK4KZPUQV!6u;NVs8t>H-1Aqi>M4}cq!(Ipb9TDEt#}a?c()X;C>=iGi zs;EgLku#EQeHu34r=+by8^%1@J}1M34kNDTHV45Guih&UmglHwmYMYW*n1$G4J7Z4 z+$IrEDI4{Ya1z%?SQpJQz|sT9RYsjx6+xCZI50&YFa$j7+E^QspwB}Q#?S_T$9^{~ zhGXne*~_4kPE5nvJZXBc_{ zrgW$hXgv4uuG~BTqcqMYUS=gE3YwyC44Q_6aUlDjZ`=tj#M3w~qb(Id-{Kz70?j}e zy~>5FXuP4u?KM%z?>PN&^%WhYGy-UpWWIlX%jEk`*bW7g(g8fhWQ|rEzwebBe1gC5MK!~+0@DzklelEd@4|XIp2i-?gA6t} zw)K6}2d%DUw*d+Ctk5d{@)6pc^PNA;{Ujr^Et%5fB%4@9!wy0XJYq zN*?N`fF*n4uP4ByMFBs6gdY>Am`sd=;VORZ4?J(p1gx&c#awKsOQ}a^7 zo5m^D7#x3E0UtHF0vVQgW$5R)l>BKgzy{!Hrk>%#ZSyxi#!3OlK!8}t$I@Sbde&K9 z`QHlT75!R2q&Qa75?6oa&yOU}U|9od8J(%Cm(jV6;6qd3m=>%_3rq0N`it#UQRlu9 zm~Z91>kgB4IbdPt<~yIl+a?!R@w~XXLI%;v8QQ1mxCi1xV)gFe~forD{Yq`Etqg#bW)6xqf#{TkauD+Css9!!>XHM3&_Wwn% zhs60$5GP8jN%)H3lI@DqR-Qn8Oy71_A@mw7^9)^biSPM-!>t|9?em4k7N7&wR)ELW z>2aFOC_2Ti4XyA+AOjhm{bVIjX>jq~HB}epXN`8@PeP_61HbMh@N>LIECU!H$4v?^ zC=sOwJ?PU&5#Ji?0GKE7B-!@K_$YA`nscji$Z?{|oIt>Snn)`)Q>8vkcYYtc$Z=qo zScDFjygIitH{SKzDLJERyM3LUupN~-84(Okxkk#g&>p1*AeTSeotVmfIn7%ie24 zt~%0K)m^Wm$>O!RUdrXCySIyT*W~pgtQ?8xOQSjS#TI!6%Cn|#HkL2FoVtYy8HKmm0giv$f#eeK3)uVJv!PnazYjrv~=dJ)agLc zRRce5sT=E*<{MRVXEnPdX^-s$!8puSOg$i zQT^H(8?B!%{2e$r-fu$zFKSmf;)kq&^fxfD>c_>2rdkh7liLRjTR9uj2`#xo(!J!h z7%QH#rYY>Dr`3O3q8efSXt3a0M_RzDL-tW_tKkn?}X}ghfFTI$}{8;Sw#RQKy`+AkntZr1uXYR&=j@jgZE%DAg#TEX*4#jfideTq>J$Ey$vLGm_%Ez!qXJyee zPOHiT+4q2)Y|uf>QA|bq2%wYTlhcdmKx4HzQD^$wt%Pb{d!!2rj!rTJ4v3#6bkuCl zFPhqdc}rWf`Q(Ab-Mx5-oo|UMEhd)^F%!mnH=JP|W}&@F;GPE-9+|`uc!$Hu3FS0@ zywJY$1l3E+K;;Wo4r<6N@ z2b$!as-|ndPMIp02|^+A;ATn=22GLvsR1g304T902`!8=unz18JWjVEX#LkaK=>o> zm-UqMix`r*K+W+>GuN-1YXN6reNfQ4REF3T`m=RxNO~wl$0zhMszCH~Dk7#S-qzs{ zme#&>i?N%DkY|myyP3!|^Uzx!(MM&a{Pa{TvZkjjK?AC`Ihlr?2F2989vdMHiGhhKeYEy(H|N&1k)`_!~E(AAt`HHqsUoD>cvD@RDut`)V( zR1I&v>DIeJnlMSuYMGL+%8hW?AN;4QFULTvXhg>MZ@HT@#E+LAMECDM!RQ?1bql*e zyQMX@43jl=*6g!>KN7b@m^J3%GhlV}$IWz3#ai7*Rs+(@XF$@g&h9Vh@=k! zVjTi@q!572_H+)mARQZ!c~C!kN*N@d<2E!Vp`sN(-x!F0s{>1WA@9<2oVt6*U-9dj zDwce)m3UEWAl3U)WbNEL*T-OyiOWr`dqe~V71f~`blXZ>w@hPOy0iZ8af-B85O|fcj z2T<=;`WpiG*rXePD;o;{tE87P%pWViK5?9BDs#SWSv7uG3Nwd=<6X(v@B7y;spog` zJ!XVI%JkcCB}u|5z*vm$;|^*?gH6&QYF~E^-4IZpPnpA(|F`#e`5$nhjpctm;`+Zl z;{X5q-p23$p`mNn+1>%9rY}#1;K#R*)(h_)X_Se(Sb3PbOfUlM;`H}LqAN}I)-Px7 zO9LC!Kw<+A_*65`YtG&@bBT#Q)w8tc9nKA6Jkl<5Ssu5+P|F@|KmvI$-}cD>*rPEQ zzM_Rzq(2DdIA&mKjAwPduI=$tW8V?y3&s2S+_#=vBxCfD+8sLNpGREl#{n80E@&JL zxz94%?xR7t#^c88bN`G?&b8qY+I%`sySI2%bJ2DYUyyx7gaMrS)S1U*a{!1rObJdQ1zL)CFJLiV)f82s0aD1fwqwNCww#~D%=1!VC6NZrFWp1-W|(|m`SDdZ zr{fV?H|+Wx|C^~xk^TTwd!=q>8*RM4@DE2=m?xp?FqeEQ$n(+L^tZJ5V_ymI0@~mF z^uKv+c<8AyL3#OxTAVP@7OzF*q@!UXHXzFWX3CnywARc&ysW%xqXl$l z`f-+87Px8CUoGpdh;$-w5;(<$vwo@tlQOXWF}m@@#D6#lIW0-odS3yBLN4m4@Ok`w z^#&D#AGu@u!v^yt^B#pqp-lT#OF$dyf2b7rKdtQ-SDgdH;-~c8!QDkdeNFb=@)HLR z>cs3;bJ6>RltsaKB#b{gb?VUGw4=W+eqBA@7R(qf`x&_2ivhWoh2&M3{SYYj}@pb!^@33JzVHK%paw;HHOc7u|Gn&*LT| zsjDa97I~E&B+6LAF3-28!*9`;4u#-m^n^ihza_WI-lfgw1%#hFG&1;vCj<4*Lrfx`4&LGz_fN?t$ho9nF)N2X+o` zitHB4K#!e)sv#kU4fi_F`Ml$lUb3&=LGz{=`#P5Flo@)wd#~E(-rnfVwlLd5Lq5{W p%E~e@QS{I4&&20Rawc;e-l0>5I%Z0fia!9q8&}P*lwNjz_#fl~YWx5I literal 0 HcmV?d00001 diff --git a/docs/source/huggingface/pic/hugging-face-sherpa.png b/docs/source/huggingface/pic/hugging-face-sherpa.png new file mode 100644 index 0000000000000000000000000000000000000000..dea0b1d465e487be9e56da84b03594f4191a7b2d GIT binary patch literal 436271 zcmb@u1#o4%jwl+&?hYp%W@hd%GczZhq{9q5%$#(XnQ@1inVFe7%*_09?%bI>Xa0Nt z->UalmAAcY%hr-5S(aQoR9;r>D;yRa2nfhm32|XX5D>Tk5D+LY7^u&hUrckxARu4# z&4h&HC4__s{8x;Y74#n+{?)3O;7%D1i)Jly&0?ur z6lIinTluLMYhx_sdx-(ouB@4;V>oajdQ&RkxCPutv412)xSq$HO5Rrg4Fsj8G#Vcn zb{W+^nT4HGGH-GyKH!R=cim4mX7>h}#Q`G(HRlQ|>tq*Y$KaVmX=2ZGf8lb^^8|RB zmcS8;6Y-H|fK<8pktS#-$n=W}>6Jpa$ZF%e+{&-=IJ*!d!uwJFYF=9 z7#42zGkmGh_~3rCQzO@~caLg=>Hv)Lt!ocE!GIRtC0Wo;dN1OkdLl~l>dh!|Mlv8D zMh*puat7A|Q7oDHb^rP3wf_!_L+h{RYApA3nvt(+N96=*6mZ+D4p&G|whq9z9fg|L zB?v&eiyNEwuQE=&HV|P@Iv7xz>;PJDX^3V1nHh~mSnm?C9uWK?0dz!AjEF*SQTxph zRNWl`!bg-?7z(I;$cKn9e$Y8@yeVKM9YL*4UAB$rHy1Dt1YtkzN3CUcDb6ZQWS1|j zvG}SOUFr_{N)d0xJNz>7qIsteqcca1BtghRAxM1;xy06f(0{yKP}`uo_k(KfvffQQ zS5Pt$HDM887h$Xdv`|Pyt}hHQAy=|L7-R-8H;uzr1+p?-7|s#eg(iQXu!GVyL2C#5 zL-!=`SIceArDE-7eJQWznUhHPKHNn4RyO@h(`!@RWc0cDurDtq6; zJvwvEdK5T$%yV>^q&Gl2Q$zKZnIkLaU`xX2|Q-^2s zqx}iJ^!47C)AlB2tq=~0N?M#=pnS*;Z1~=Bp|RN zxzRsJASJ|V5Giwo3yB?K6n}Blpf86j6Lt~>NAC=2LzeAnEy=h{6enQ{y%)A|VJ}3i4V?}7 z9dZ)t9um=~x=n1r9xBX4#u4`_!AQbOnWZePDYPQODcT}!PTo##5kos0*mlf+y(SoWKMo$_{O64CfoXAO3vM44m zaW1K>ud6Go^QaTGkf<}ZNU%7Y=ghm~KLMA?+85rLnK6}IF<5cZBtAnwW6h9l%!8g* zI(oY8zomMHefE5Iw+gzKx5{jgcTaQ*bSl3$I!oe2#nr-%oN&ny%wWoRLZN7Jl$j5&vzEG?;59u!jb%E`%ipmOLieSz#Zt1`EGiyv~)@Vu@vy51Vt(A2sbd7dx zc#^!y+s8%!@*4y`v|sYKA^N075Y0F0B$WwsV}M4grl>jV$FHktu2`XQ5uN6{mM z?AoSuI7whh=1I^(DMEQc=wHjRpY4;!tLF4agEH{$`{!p_7XhUw8C%c2V&CYoHW*%Q z*vZ*_wE|aFoLk(JUuYiO9@OwA@Q9+m;<58!X6A7pxU;!>-Cy6wy3e}+TvlC*+$(Q7 zy4ijX)?idmJ|cSfc^W_A+&}#}*-N`R z0-+}0`KiROm2E`8SH*Ox9}S3RFx8q;iXXQMagc#u|=TbWHqa zwrl-(rFd?jJ}RgAZZTTkx#+5>RNjdJ{}*~=k+F95mnDhn&Iq!|NL(GpXNk4TrX#ze z9n33~ZRdeb85mjhSX=UGLWPv2`24u|MCF8AIt~(7SNqjueM94Ljx2*%B8d_?(gLyq zYpF9lY)+^7rF)jiy6w7?ozp=XOlPOI;m5TrF|tGHKXOZ1vf^^Gnk z3r3ExO@8x!7(E1AlKR0zdUe{jEVFzLuKTq#A7yA7xZC`8DNRZ}31tawBauTJ$!~Pz zWG3#iHrDp%ncVieZX1Lp9V}ui+|wT0@uB5RCK}acRfRb#U#|tZ8s8XCy5A~2%FXw$^j~jd z-@NVHX#3Dd)0k=;cG!;h(3AGWIuukC7!_op!=h)rxIRwq@jcK@Xs^}uSquji^cBn$ zEG9wHtyXK>xNcpGQHz!r>kMmZ)Lf4PVM`|xUE{5^Wpv85*H!UUbUO!($7fix7Qz;F zOJOROY9}1mdjJ;;@QZpIh;=!}dCLPc5%}y9Z0l<{9nB3h6*ZRdc2eDmq!MaDUQz@w2&=3Qjv6El)|$lbo2~RIFF5Z|`ikjc38M z2x>Tt+%>xtcZY3x?SpVp0QM0qt|${+?>Uy7qlr*P@(Q*nYikZn*Xyy8DkXAR2U&cs z9q;{1jm_HNiR#Juj0a9V_wJjX?oA$VzNPJXMfd3!gE!JFsjub4no+vn+B{w*hZ2<3 zT-7G^es?Um@mxKRW1F$BSi{-ywkEir%%AzK@;DoBlCIGQ|?!Eo6{_5n+hI@krFzB@Do%Yahpfl{g^T5Sh#h3n`{CZ_J zxht11x0i*>$LWI*i-h5 z_%ch~lkOCS9>n}-(X;je8(0+v8`sjI07xsZ@kLwcTXiXgbWu6HuF>&p6LAk(&?vXui;o z9UmBjm?0<}Yxi-VXO1u99hX9@H(@q%LiSNZb<00RDxdN2@> zU^5VifA*36%>R0#Khs|@|B-{o27y3-z9D}mmu#^A>d4@0R#k{^sfXep-6K1IsTHFvbv+Xj5MdAjTN2#FB=0R zIu|S3zs3RKcH#UiS{XU&6S!DeT03yM@DTl@2j^$`uWEWCf`4>zwBR99mystBvavTJ zV5MWBV<6&%BOoB)w*O_!sVFS^Pw>w-9wJjmM_W#MdS_>6I%j4&8+#LaMh*@RdIlzX zCMMd?9<&aw){govwAK#9|1rpajw5X3U}$e<>u6?UP4L&a`UW-tM;;=gzb5*x=Rfc? zaxweYOx6zn{H)Ipr2ngho{^4${=dfl1m*s#mQ&u$#mG`!*v#saW}kELGO=+masLD0 z|IzfXDgOgh#lgs4$j0gu(vkOHRR1UNe{cMsfd80N<6m$!32&;C!Dy6#tQbmLYr%fBvRA z1Oed(kq{P8b^$%rf&Pgqg5h-(K@uMjfai8$04gkBMZJV3lK%(~1{W5V>jM*l3(+U2 zRt=tOARw=BB;!^|m=8jkzxd(lFjno{EJZg=+T7(X>E!(E!KGz=pLWr_x4XNmz&JiW zUY^V$MV?k#U*zaUSpWy-=R-gM1M`2A{S5Wdl7Pt>$kh%EKcYHgFd%;UzaNwUCLSHk z)zx*EMX=a9pBj-#C+8~`!t(zg_{y)q{K!BAVry^Yk6l;j?GR`Q@FOBZK!E;VWs=yw zio^lOK}qC{wcIBQZ%O5!bX^i9;35yUcW|I2Cr=_!3xCtz2!s7MX)TY0XzT1i{3)nC z>=FnF&h-~ZTM9_2bXKT;LzRLE%!IAHsBfL?=(QU>9^Ex|X4`;*i!#QZi8(ZsUW7&_ zR*eGUvg6`RCR3n$7XwSwtNuj(=MEX_S`BNUW6MfGOKh)4nId^W?%MZt$)yVY2uk#es>5 z)<`^attiyhI~WYqSr6=SyUkr`T-PwaLoN8(czSBdEYe&z99N*z(H@zk)_%W2yye%m z7XYHnS6fp<$xJOp>8mU2nbh&`aYWbNZ;%cJH+R>Vq$F|y8D;6eEqK=*lKAh?LKHnO z(HK6Og0Vgb2Y+yEmfURga>`-_lmf&fEuQc;KWY>h-?BtgS#qu<0V}$mNBWL_2gP?{JR0wle=>H(L;yQ#`IiJYv4AyW z9ywn2ra122KpJCQc8_Z*s6Sv+?50-iYDGfuB|)7t*=Td3giMyb1RK0-i-fv6r%e6y zv-JIAbWkFAmw*CZxqMHQvk8CCZXNfF>qx63f?f;?vVD>Tx zN0LX8IZ3Evui{{8e(Fr&rN-q~J%4-MM6_4V~TEOJ`p|0aFe zh!F8JJU3)z^>xw~89(9^rghd8Bk}WacLwBpGBbcBGC{mD^)G8D$K#3jDg+QjNuE;B z#&T-36>GVy^(j9Rz>Y=Y%@J&^&{GhBTFO{`BQq3Ajf+j6_=f{VItXkjiKBZ-QXu}z8UM3e#| z(Qq0&85EmC5G^uutDu|{$T z{H?L?NQ}tHo<^NxH4oa9GzV6W#u*I4J*B1F(9Zo!E8~oQWNP$veDjzC0`)_t!*s(Q}ZYm zcV<;KMbS4h6QIHWYg3YeNbXBXJE+Kg1OzI|UF60SV;{X9>|662m#CeU>9USu&Io8oMZpS-`rM z7+~Fn%tpe=CDHeDLR6u3ItGc+s@{`lK5UHk3_@-qZprfv$kANIQCpf~gT& z%~@9XP8u}zYa~GcZ9kzTI(byZAWUlH6T+X zsZb*XpL7L@rlQ>QOJSC)_S|oTiee(gimn(h6a|6~5`rHBlJI|Q&i@;=_g^gD3CdU9pB)Lv1i1!FEKGb>;IbCn5$VSO}0rkuHEb&*g*T%r61;IPP0cw zijlgI+{IIcJn!L+{KRJU>q`a03js#gr4Ft;ztcYp2zj} z_J)Ijk^eBC2a4q6{Us1`O3kd!?R2K{P&6)OUf80#CJA`Ie2 zvt_`X-Nn|-0_)Y9TE5{3 zJ2*Ov5gumbldpPky1tPxjG080%_L&TLe4Qx)UHpZu-s zL8V~4Z6~?j7;kS7sSsgJOwqIqFoKfWl^ZoX2*6PBR2xed5{O7k)L2)HA@~`=KhynI z=lb{)ZOyIcMERwGt2{3LaH&GDxLonKU@DEIrWOFRH(evKTKnX!1~}NF%YhC-g$(u> zlNg&?sQZUnv9+N5-_^6IK_Ejq4AD&^y=y6^Xn!u8eSfIn(NZ)`)P0({hf{+T2 z`Dxaf=7{Htj2!laI;5Zu*U9mY68JJr&#o&W4LioUD%+ZRIZvV8t>x(v_-^BiPclSj z%ccJiknqs?4m&NtJ#n&7ZV~gXPyV$v-g5IDyw2*y8z^}9e~WvwlD#JB$VeLh$6FG=5?MsS4tbQK79MdNnOwnCDuB{$l|BCIf(v!wN&&9( zUE*#-DfJ;}E!^?>rb3H?m@YB(i)X6&L-WHmSLcjjk`RIYa--wVd&g_oa@qBycjRiVfBy)`6R=f*QaY0f=#*SUwl1AZd*J}!;-2KQ=fJuM-P zF-)-LpiGYNeZ0msSS|~4IiKe*Evff>_uX1kt<=rXY_yia<#NhDUvHgFP}42hw&l^L zGSBHO*A~(9xQjSWdcTz%^2&jqXy(~KHM(;({=RWEw$hL4 zY)Klp#vLJ83N?GFFJyKfj9)_2l=O0wo51(+m~lLBBQi~{rdX?~7ZYq!*s?JkCo#EU z9iO-*06ScH&8m>l;$$Ti6T^7S;d3Ayg|~h*xVNd~(ZZFnQkP6+l_v8pW0T+FEW5iZ ze8husFXx>W>B087RpbKV#^5oFhmF3S9{zx|HOYag%1KVMiV zgLU#p%E^6yOrxIa*Ga=!{K!bGIwMps90NBEXPu3hIxRA?Vmbep{V^?Zf9QL91{skM zVZJIK>nWeGMU0_m@}gvITxE!|f{1@l!~FRHMj&Lp(j;L7;<}g9fxwaZge;ztm?%o$ z{up~a9KTo;ot356dpQ=v)VQ^v>U4Y8ldqHmKhW%pL18sY{6r!?5tzBVxs z6=5)m5di2>V^Q96w+3&v0N_Qkc+0i(EEI8I>=|(UDSokdxU&xse;c$dz|+lxs=t_u z@uPzLUfXoa8{$RqK}tgMqaY%IT1}OtJ4_Vr(@B$E7H>MjjW5xRc@R?4RrVFWUuq#_ zT)IfQBiL>G`Qb9R_)s{EBTlfxH=)G%EvO7;Ddg3^_eVj}vucG;U$R}!i?#qc#tyb2 z)<_i!xop0k_gg}4Zmx3j-O}P70nfJiLYV}Q`+Yu!o`(WbEi#xjOeX8)?%7*lT0nyRM3x2jjp6K+Z_lFF= z*JBRR(}gc}a$bk^!#nP8Jnb*~-l}x5P)*9+@CT&dDo{+bMDuXQH4>#*i99dZ|;+FS4#hh`vn;Q32(|P*0b^cx`e>BIUFG!lBhWaMZ+QtE%ya>j=9?D|@MM zVsL1HsoeClQEFPRrk}{!P#@VP6hUY%kDwPPCpgPzVKX*nMFh4S#6Iq574FhC3H@A~ zd*OF`Yt9tJKLedzy%<{3uaY|8!9)Z=CIcM~CKT$;A?amlilt5B7380g$^VC=E}j%j zl&QGl!Jatq{q8`o&T^S){*Bl3c?z&zIV&Ikw2+`IsMKh~7$e}ik;hQVYJ>#|IFS{@ zzmPj}m$42w+q?SNFD?RSwK%PdoCM>{hNJ{Mo zZnmNne2f5-<#w)OgQPE=HdwLumU4;n9aTDmQ%4|&kPrjjwwouKUjSoWTtD*H6`3Ly@WUTU1FN45^LG^uf`j zhO>~|GcKYOL@Z(2)%tAX@>PYsOQ`r0Oi5l)!c)!E8x@e&5YTBqNO;q=COgMi$SNQB zwE<$h9Xzc(LOfwJ%PmQ~D}o@|RF+VvFW<9cu}0bU%VA#r__SRuw6di=_c%-{yDVNM zp%J1QCJi1fTa6yxMxjuoU5tY^dP#r9Vkum2b(#^w_m-|T9?u1b{wANsW;NG-8A_g+ zjli2MvD%1Fs`mgnm;Y|vfq9v10+=epaptu5_`z!BMk|xWn=xZy?d1!ebjwEyhCX`ZeCOp0DNG~6D$UR%OE(POuY_9 z=7VE8p0(WVkF~q$%0>UCi{wz$vl|h~oT1TbH2d)NFN?)2K{!$*38Z_!wkUwGIRAy@ zL_lbbgDr||BtjN|^^&tyY??oH^GZDVY^3_IXTQeFx^1=xMnIH{H?GIUeFJkp5=P`bnx5kD7L`y_0dj6xDwla`tzez(zi+EV3CsV zH&{5`?`Z(%Ybn;vcZtY}WXl<$K6lN_oi==)_sb^0u$S}JsFgY^rQBoN4Fr4+>8k_e zV%_!@R3sz?;O;4HTH~eru%u)Nd-=uy$rn^S?eR|&+w}NENu~Dr&OIQN#p>8xCjK7P zuWQA7aprn&R0Sx8=Nh0{Z>=K!E!0T`BHeS-Vctu3;%sF5p@)6LMX9+F9v(g~CQlUo z33mb0NPFUd|C#P@!u3ZJW{JrE7+qI3e<~D3Z-=AQ0eGTPs#>5n(4MPLaYNgo0mWoi z(MD@@LdMB!z_53@mcVnaJ~d%CZ4v; zqfT415zc`+l@mAqc>Rrr_S0rfDvcFnRiF(;(_gGHm7^&w6)*+gnb~L*Jo#~AxNd@b z-hET2Uxw;7KEEh0FP}|?VOQ~s!1w)jR!)}O|CrYg)osBH9gBgUJk5Saakw`L5|jJQ^x*u*O3fKefl0pnEL1{? zYi}wlqIMt?kA+gfidwXHeULQV9pm1l80`W~^8mWx`?5)v$}ZdKPTQ7S7oXS{Fff9s zZ7$M*A0uu%5$s{y7<=M6MhT)YITIo+ZLSQ%gN;+u{X)ZWHN%|>R+2Ukbn|cshH96Y zw@Zdm)AftGqq~!5EY9cJ^LE9a_nwCWWS;nhm2F0IKv;)vZt3G}O3DsLZhI0|^SQ)@ z%8w+~a!r{>T_(#V2F+&s0wPhH^_B!bf9mP!JUfr5$GDaH$IwW+h{9V>3FZ1h2DpoR zz9L=})w@%{lu%fbj2v@^U zc1uW@Al&$Nf|S`2X3Mfs2an?+!`tg^^I~fxIatsaOmpkO#!XM52yAvX{MMNX>dNrEz0I@`&K8|T9cSEjKEoE? zcq>f8=sr5oLTOi>d9wTxCb0ExbIXi09WLh-*qnzocZA9nx{ck_doV`L zDm;K=W#+3UuQwPsgP~}9i{HxHOc3dSO==Bl5@$1_D)&X}y+ZLtMvp#QpiXN8ucPTn zI1>&~9Ql7nL<+)vV!psdNgf>?0S8j+%@yZLDC)gAq6MAL*CcNCCu;fK|782lwE!%B zSJiM|-rVVLCzxocnklG_?0U52{Wj~8oy?S& zKX1=!6Dpk@UL=$6alYqEcdxS8a=Y-(6)!+HO1cp+ixU7mXh)#JsZi1YCr3sKKMxC2 zDNX7<&VqbRKE~&IUWOv#vRPxh3WyZ-RJonrKG*r+f`qdHG__ayY)Kb8XCeuHeupjA zi;Tk)N|dxjoz&l-!HnchnnN2G$<-Y3D`C4$9js;13;OZ%Zx6?XOQ0!#grn(bps_@u z2{DQ21juw9dR(Hm~>2>Gp8?c0tRg zc`kX;irKpRNL)&W8LGq+CR>1HEb-3s(rZ6pH_PPa=7HHk#B$G}fWabgYWrHbY_xiD za68eak~_H9HQc0#Y|8g3N>O%30uBLz0EjMLM6{8uS!N0kdC>X9wW{O1ULfoxPY08W z?{NuAUnlpgyOq#V?g2y{kKFyy*y5|zK;J9H$vxBBZ;gvlKGoI z*GF>fWW^5f zL7VL?f1M{)W5VNc3xB>nH^f7KSog*Uv{qox{}d@6kV#GMH7c@1RG$QHt=!G@Et17H zHaagW)6}De`II~Cd@-1N*z}eoh{pK!_=h1}M>HUab5dkVioPSCOj+Khz2&Z0Xz*US zKoTuUc~iU27U^ucHW(NjG{c;iMGFIDiqIMx8L@@j9*?V~kHFO`1hgs|MgFOZ-_&3$ zJ4mRqbP+q+$P1u?aojoE|abkp+9gMht{usaEg{?(L9WOBxP zK{Vz)7h#_G=N93LFOl&v^l=dXGwMPY)K#KXba>V%O@e{QZ`1K)%uH$jc&T5fzazR* zXBTGIRPxFls?yq7cv7)2B>Zn7t|zfNFo_8CB(4 zdt_HBm2p>5zI|;*<^36L_K}A`@fu1_e>z#%w0yBNV*TyrXRdtvku=m)9|5f@uqS6UEb|qR}p{5Ou5~ z*dYZ1rl&a2*K!av_=lXPIa9sma%w8O#Ry(-vj-PIeV5)Jym$+j+okG@Ig$BvE~Md9 zjr6g7J~A;O3pJ0gBGE_T+T;4JjM=Wvuv*5BG#SW2wRvh(5tpmWiy~fW(Ei@Fh6*sH z`BWrxk`$%SUW0>W#{-LWki2Gw!rLau9Z<=CT{-UyWa$h96U3&TDIh+h8fC%q4}`~< zAwaM7@!i#EcLShz1Esi}P6=ETAoNkU8orEVYl_?+msS=I4T<+SsKsUGG%R&`dnf1x zFH;q@^(4}M#B>~ZPtl{>Sllx|OwOpMUnI{D4#E+WI6@;tUe&spc?$xROrk(etUpy-<7 zBjo%f^EVuOneyhk$o(^-CaY3fct2*IdFn|oM^ zq2b8$`YuL!>MKUulWvFDe5%FC;LfSidG2=ncpr|87J8r<1Cvucj#QH`Yxw(%xiZxJ zk>m`#NxZ&}w^_WIPB3prym-!n_4d|1?)m0#gOAtN$bBZpxy*zE|0&%xIJia;DBPP@ zW~$LS_DvPOY)3CO@<1|Dtr3C~r=)u)=#Qy%E@xkR(k~)YY*v~jRP2^((w$WX6qbFL zVQ}1<(0+ZCF_Qz6$ zg1_MrNi*@?%_$|R9q6<*HcZG!gevfT)cL({C+spvo3DAjoN@au39qk4;cy6!CJPr~ z_;#^I*NO*+mI=Q+Ly~HmBx+$5A;wAVsP}&_oe&^`NSRWxyf0gJeC@NF$5B&OrKL{C z=>|QxB8W5L;pQ$OEKT{Xq75PGwprT5)7hr^P35#VII7r05u}x(u7Qz0TtPrxp22ZR zM8w0erDuep@H1TdcYl8u8WPB~1s;kbkYO7BSWZD&IfcU^(VPk<=PNdAZh=%1@G|)M zo)xJ_yPBBPa+bph;Wzw7uY=H)o1xj>DNPkWQKr@?n zhw1ifnVFZq)^qJa%n^J*@s!y6)1e|NsN`6lh^49m%~-l`mZ}Zs3ZyW|Iz*`T zQKJ83qjTEtii^NNYTS;}cPa~VSgqC^oBhmIL<^j`|MXL$9i4T&oac4CzP9bBTA1FS zF8`%8Qb+v`>N-E()3DeqO;w3$=OfW_Y$^OBy#e+}_CGRj=dTQPbSdXjhcvV^!vGxj z@>JGO_ahE0+^pv8xSE&WOJfcc%2&L0&tX+f@G{^`B}{}eb64g)DY6w2A(@)-w|Se$ z|71`SZhgs!i^V%UKWyXic&!v zEufSl9eF}UNSMM|_m()ES53ymQHlHGY7|p)V@!A&J2SVc!XM`>Y~8UFaK_Iey>Z@l zh<*JPzAbm3$an?K zfT_4mPm$##Gv#I#E#ORpCn#<4cLjwT-t4}J58#06nVnKew19$(n>0(g=PmE`lY9PCU?-3_{jq=m+tl$MDq6l<&ipo!S%sIH-bbX-r>Xay)1tOJ)n zy|4_ z!fs|f`N`KF#ak_FW3IhbB2Y2u<{-<$^r+?n4HGqgs)mg)Adz*OwDjV0!I@Bcc~fo} zyZd9*p9p;Bqbu89!`zNKj|#C%2JaS=bHo$5 z7^P417rMTjInRX&26Qs)yQ*H5nM*5{d6uca{#<~jvN@PooXTi-$0$v&8G`5`g$b_G zf2}T<2z&^vDQ>Oe#32+!R~;xor2nA}XEJ@o8^7Ce;6fWaGmm5&fOvfD8GZjh4+bJ4 zAaLFb0aLk&3P`j5w=Mhhvwr)*&%F69m;KxHbLg3c{E4*mHzu+9J#`Cd_A`SVN&L*WdBamO5lYq3eE$ zY9?9sr0WUC-g%BrfwbFDR<^Nm50d_ zW@^@sk`mla=Py2D#8lY;C(3DV_om_rrO-F7lln6%WUeg-ilUlOXKUC}Ck^SjO?@op z%Jd=9mnrZUp@fw-+%I_c-0=3A!ZdF9(w8eRGTnnxt4%|w|C<5Q{FebTIelrk z8D3Z?R9b8wf)A-`Zz4z@lHoz=<~{?u$wkquC9bS2>BC1LSkI~Y(rEZ zmMlj&KSLRX=DXhh)Uh>iL2b>;ok=}?kGoFElSf#3{LFlmHC)KOZ)dgD5*j7<$D2dB zb3@d8%>`R6x%nLGPC3k_3ZM1+C7(&JheT4N*&zt0_bwvf`6pdqygcisG|YSm zTHsGJ`m;93f4HZ|&2#UEvinPv4j8S^>U)z5DPAV%CSzKtIVtDFw*%51i-J~FD)3{SlNfI=I|%RtRPQNvNP#LbV)JVADFpB}|be%_4l;FV~n_`d2h zPhKgTUglRQN9_?9$XLmIe}*Jz=)yW$Z8%67?)|!0qLil6V<|WHClHpNo1safA?X_* zUH13jB$9Aacc?~)Xw9hK5lZLL#6-t27lF5rTRH3K@uHZIc3HiAnp4oX{if>rk@zf+ zND$8H47YE8eElKV(*P$6Ir2`Ge}>vI_PP>!j%JI7)6_(T=YIb|6X^y@_vO$fdzJKc zSg)$-d5X*ywMP-8vU}sP8ZHtu7`7^R1@c{8T>&%u0)&C%R%QKhJU^f#HNg&khjEGw z={4%=0EeG&kBCPGpTfeqMHt)Q`bJ07dU|`6y~IwONOUAtf^RU;&_o!g8ISvM4=SC` zG@sVWRxAd$go2Oa$=~cYTzY#z4{Z+ZOzx42VzJ0KGq&kP?P^QpD!*UtFoN8qW4htK5w6rpAD2g0TP!Nb_X2eoauEZV0U|gzN3Ot964A*l%7(g;m zc5?c*St>mVx8uqX%Y@BREcfBQ4XdS7SRj=+6Z92Zc@5WjO+cr`sXApMhJTIO4mab6 zm*4&*`&(CUAJ3Y4SNK^i9l{RO!wC*u3ng*Khllgr9j%m4FC>9(^LqMNz(_g zuKe2YET7Jho@DA3WB`UdLe|aAXft}7|7APekUT-BPed$B^#BN$g^;f1zBAII4T$*p zy4{hQQ6j#XlQaLctpCC@y!@nq_iWQqxmY&-FwcKtJ~nuN?(VXY&W%|zp#9w&0=7(f zIDHV4>%Jn;)N6m>>dxm1ZV$wQ`p5x%5x;`Ht?^H4`KLE--C60R#ZG(9VxuJ-!fQX1 z!$FnDq4Xx0apx|wV{4^{E5@#E3A^ZU)XocIu)V`g9RO7B;tvK6`}z2_>J6XIFHNiWQ=(a9uUcwx+5t!c@ca~frG%Uu0Vl&5N=VLXpm;9L~B>4(Ez1&4Lq8%YS*JmJ>w zC&}Qx8;7?ZKdCOY+;`k3w#wXnIxN>}EaraX!5*^?T&=(0|9T=;{0hXG2;RJL`*u1s zomc02W1NXErK+ExL6WUjsf+rj%Yz0(#XRU~#%MH=qu#@?6P~I`E{oSZvMHOT0Jlhm zb9#TSi6Y3ONWK$@mk3@l?r5jv!muP#N(}j zO+F>&qpmBLXA1`st4$1{J9Efc&qlU_pe?U>REKZ)JrvNJsN5Q^RZlXg8(VnSUxUEP z%coUVcp7N%cQ9@w3EVb5g?(h4Yk;PQfo;R{K}Te}FFk?GO*1k21LkJQrNc*>)1VIq z5UGH`0EOi8biF4XWL>adARk4Yj3_9Rujfe@HmYQ2BxtHR2Fm&4K>xiWb#&7;rEY8| zW-SHrxc|YEEU*XgDsf>!F;UBfBI5tZdp0a#pVF8jo6r`e*o(z?!%gQc))Fm-B)II3 zRV&eq!}l(#mx2N3nEe=lwco5W>tXUODM12YaZwcX^?m0}Ifx@}ldEQkHtmFLAN&n zZAqD+i2R_al#R;q7?CA~3*6W29;Nx7Td8D`>qrEJIb9h^|RgW{4wK0g#Z`VdF4g)lr@4X19*eHeyXG=+g zGwlzls_vM?3zrBlNWC%@HAS6#5iQOa*5Paya+xeC9q-TBfHwVJPUc}H@~&~5fFe2G zOhc#UvK_pmhpK0nx6jarLtCg%qt(lT$8JPy;&kbv3~&>dv##g~e*ffn{yw#otJI0- zamt^?@ld{pWTgdgLbC!#G)6+kuLHPKL3fRS{&c02x4%rdKdJt5YHy&9a5F~j@9(<~ zYs;Y;?UBP!DZIhlTQ6UkPDW_wp}joPg80v}IdRx5w6lifI5u{c7@`~p!um5Iw? zA3xY5tyyoLAcFi#5x^C4kcBU^C0$cxL_k2J-4ud;#TVTXzR)Crg7tK(6!4zdC%Mug zhv7L=N{F#ALsYB>)aj8vQ5u>@#i##P3AW&4{k#g1)~fvgy?7`86c``$u#n zgQ#`O5gB~XcVy4kX|aF~P?ILxl{#0Hx63z#eUImJg$@~p)W9pj z$GZDPdsx2V7xwdyt3mwR#VqeV3obG&xBJr)@kGe>gU>_AeTYJfwY0Hx)_ccOx{h5x zn&XA%1Mu+E`7ymc$*1;WOYQiz6MLm5LK;e^Ctv>i0LR)0#-gpi8ABWSB#uP7_wukgNP z=;iqZ=3PErosWJDz2aR2?ni=po6U*j`L>VcqO6oboS|X@75}myPxTV}EDcWY4}BlS z0{&3w3agD)MQD5qCR-)FzCe%}%O9UiVc$rP}ax%~Yc)D?N{xuvIl@1Dz=T>A!cxuJaZ z@E}VKF{DYf*R$SYvBGh7oE_8u<}8WjBl5%PvPruep9Gm^WI)|!x(2dxq;pjoY7VyS z!d-pqW4&>651R|dyJ?e{?E;~tEY~t(u?OBzH=FM%vjZ@SeslAsqNq;s>j>?U191Z1 zrN?C0%g5V;UWai|KtS=0g$2$@Uw`{Pz$={fPgp;%Xm9&heE!sHRu-@HBO#h|fNe(M zxLdkkSLbf)#az2`Q8)U|>z3DB@{g2{4u$eV{E1I z>4p&oG2JCfI@y6MA9rHyh%Jq%8|ymNaxF>#?CU-1|GdEtC-nL62+7_vzfS;eXLuXv zgmd2uXLsIEI^{m^bd~#SJN>?AhLowlxXkf4PaA;Oucp9RR29$Hz$<7}~l&`k%kbDKKj#N^^&iU_*q!h}-< zoQxq^z(ktR7C3b2t*GGP5rTw+IE$xkCx_PSFo%!y>uZ?gN1;^vzB#MVM%$WfB{LwI; z(xJpm73xw)j?iIL4s#--vsI<@fGLz$cTXPgK~ROb6wWY`U3|nY!!}01?gf7`bYrH; zMx7D0$h8NCcs^(XSc%!J@m!4xJUl=v17SD79~xmA{eO{l)3g{oCT_rgSzE;pRA*%lw zm7`Tsf0YwTktXFOH<(PB{zf;LlEzam>0+s#EBn?rF>_C{`TJry$*-|nH3D9~Y+y)U zy!q**(>J<>m=@P^g^jMPM9=8Ge^g(4FO!+}`L|VXnbG#$Mb#>o({!h}Qp+{c-_&%(GC3e#RdEvh)i9?ijBhf!A;#AF3$&oxsotXM0USpYc=}o$^<;G}Ss_ z#BXICDGuw$g}r}Qb}YsBgM`KXtbMO1|IB9#6aZseURW#Pdpj5HTwmmy3Ec8+n!P&} zVj}3;{O*G5h#*b-?NiDr=loCQ<9wt8_fCVIrmP|`r1i20S-{wa+qpAA$Oa1F9s-Fs z`{@+KN7gl{HkI*mFSR6ij8W>aRWW0Gq1|Vj?73Gf0hp!J60LXseXU+JqJ$ije)lpr zWJ7VtX)P&$r8)J~&-`Dgu+$9;4bxRqQrW+Vst)BHws_#J!&2Y6ZbP9NAWUEPijyTFei3>?%~>F)qguq^`d zY!0Ihtr|;Je%~*%`g^B7U09c6FyhEmjL7Jr&9DN0{ysTc~?uajk!EwW6kyCd$@Z?-eBj{WIfr@(BqL3uOa%VVpW^L|Mv6;jwxb{ zAnoJ?u@SP%&jqgiclQ_mC=@w2LM5fFVY=XU*j9JiQ{L8fBOH;PT6J4D;n+ufHWvQbt|FbqE4LMtEPfdJ+8n0!WxDV*+{-C{ZO|SP|Y8n6sY1ITA{eWvwr4QV=RnTq?JhHh8 zT`4%x*lwxl=Nr>=ZTp3hK*`H4Ep^2#a{7@f_=BONk2t+S^33PAuU$u&HaN~vCR+#; zp}|X$Ut3A<$oK5@df(xH^_q(ra%El2OkqWs1h+2K;m3To%FX7`mAO1!sYwv&6qDsm zwptnZO?J_Xh0a-h-Z5XX+|c&RqE!X?mtT7h?}3aO12YDW}86l3^5 zF&@Apl@Uc>% z3J)&NC1>4Wm4k3N$>WED+Rz{#_@)&pB3_j36XocxTRSvtK%YGDR=*;P@yZvQR1PWqv!Jnb?o61_hu`Uk=%5x)7`} zj!X~t=(e3ymHm!_jg9AALGt8v^pw{22kxKF+%}zWW|Ff&LrnXL&RM8B;oJWt^|DUn z-v^ozs$&{+Dj^q*3=SGZiSlH@#&)gNmPZo2@4xMtRy(r6nsM~yPEt#BD7+rgB6I$_ z`Y`5uxLs6ylMq*~)E-S2e)yMX84bu+80Fv4-|COD=nKb}Wqdmj-QtcChi4>|)}FBH zv6uNNv|1t#h#kx~y3Vdu&$#ZEcYz25KvQ_A@p}ItTv9;WAP9KsJ=p^k>(k#?(>zw? zZ?;6K+%Fmw?2%x-O1%1*j+aK=42@(lepG=i9Li*fD)}x_#8e?5y~Nf2y&ZqN;Zt_o zHL~5YTNHMYUBN(Y0m?0k0Hn8VFk>(;W*esc^ZrxD;I@q-8~Odl7drBhT!NlX>h)M; zGJR+5`B$LqLS2GDn{Wbi6Wg;{1Q#kaJp>~cPHGebU%S2?Mz{W3FuUB}uC-$oE1o_OGAJ9oeL*K6ZSx;183wWlaFRu1ViHCeUql%J`}8`by9Sk*xLw{sGV!DUYUo z{SMaCpo4(E??pAO(P9ZUbgA9_+RJj#WTr5ABlkOJX0F@&nk+M)PC$FE?l;-dKh7==hsYF#JYEFmC#r8~&6l7;1<25%!S8H_ zWL3Yny*mjxO_`1_kaPLM`shnMTSaLg;)+a-$D9a@^qkY#U3FWP(5*Zkww+;_-c=}% zz3Yh?PV$HE&ziWPOyyu&2zV8l@fEmQBC{pwoQA(7a{mu^sm3TIyi&K72WT^Uhj8#w zU*mTD_UCtHynDOf9VpzJl)9~M!2M&5lLWe<8EJs9`&kan(sQrqgvjHSlNu&KDT+NZ z!Ln@_Fl1Tb2eJ%5NVRx}9^^b3>4qu6e$z%_;w>50*`6c#&BVBt^cJ22t&*M*`iiZ0_q zAWu5-#lT^elA6ODBFzd4l&TD4E1!-<&p>74Y@_EuhZf^~J|J(ZkREc-wg9rgMx>+E zw_9p8qqNy`2%tRwObeAnI?GmAi8<9w{YJVHF<(Uq)#(%GzA#f4p2-)&`X(07xs)yR z_HG1Lh-&(Gs?dhiaxu5yfdg}%#_RTk8&^$C$G<{b0as|tedKbKydaZ zph{Y?wNd#D+bHLCDC>vNU(IgodcK7=<5JRy{A4i1jFIW1{-=Q*(X?sjld10t<zG1d{p_oc59$!OC{;hqV;=<3K-XC#N_JSO)wTU-ffKAy2t#JP#_I;D9pajCG>o` zt5+t9>wrSkpiNovoe3UEuW`vH&u7% z!8!i+?gh8^WJl2TU(Q;cqC}>t#L%&+gzmOq8lmY=d%G_>;*^}!YRj|pIu+kkg*^&h zfgS7q=A{FttG8<5YR%p8&M{DgNt{0CeuN3N&L`ty8 zjypw@w$uG5B3?N7sZ6xl{Bua(XWeqDo~dm5!-PykdqW2S7z@6%tD!3MeOy!lr*G+e zY&G`=>JYYSi06?ov8Xsm)T~k76a9dG)Sj}U7;ZT2;EGy}OeN^`FOE46y4_#+&8N<~ zR&v53Nrg@WIdFFW0Ee35OTSfg&pYRPx*j?q7N|1bl8h0+cNCe)Ne<>z3Q>2qb+2_F z=J#9e6rx5Ai_GE!Nq7v0=RaFo&$vpJdnC8}Zr6RhZhsM~<|ulD|2!-GPp4RQ$j>;c zus#o)_Xk;rGV>VYkD^rF&LGGhvhzc2s+!j6OGSZTgO;km()(3P*#?xi9wLU6CeR~aiF z<)dF3Bbit&me-Wd7rhSLuYbP2laG(1gefqC7!$b9$FR1h%)x*tGyEAz3{tM|l^w%Y z&+X)wVuA}1V{7fLsb7Bi8!rC3>+w@2nVR2d=J!TmR_f95BZDa=h6G)Zsku=&?bs~t zqn3Xdb_zjV{xz7|Tf~%pap&bgVdunI%P1VU+H;vYvYYTLVFbeAN<7x>_vxhH_OFM5 zhpQ_ajN4O1EJ+LvP$HWp#E_lro;CipPkF^6t5eh~>zz<7V{bOB&Clh+b|u2n`FG$` zSsLJeXVvc$;5VGwPvQlR4*dD-+aSP50JEY~I&`pIbsg>L`YaW6eLZ_McwG# z(pLH}uZJLDUP_T00~*G!}7cm`(Sw*y6)# zY;T)(5v+a2{onNN*32L-+)KNam}&ZOV;B9q8X?{?#GPH2g!7205{z-;<0o~5c7()&J!|&K2We^avG+;BYWZutcsgmgS^`<((X{>5HZClK(P}{zrs<4%kjpN_TRLd+9Z#h@Fn>D{CmCFS z#2!g(^}?rLpOLRl6GO?{SiBgXv?dMc8#wehBIsMfIQbaZvPb0lXyJVRT?NFb9_eDf zKDly}(%;d`GANI!Lr&DgGDhKDU6ooKVoyswLu`^GFNF#f_F9AEpVx({_5p47VfMGW zx4*&?*IAzx<0!CDNbj#0cSvv_+ij2fY>C$D-A$=qamx>oYGZk5slSEz+`~(Mnoaop zxFj@9;Da{Syq-I&*>$0i9;Jx_~I&%H2lBH!OW zgUmUa+~!{xyDe4r@6M|xR&Q7Ga)Fxr({OhR3bFHK5&9+eO(bOWu(CCJEt_Li(;50m zXR%IAf4t&!+rBG6;P_aMuXuYFz82zMZlT?F$3<_{I2E#ctfdaq_5}(nv0xtzVjRv- ztGH?)wQfZV;%xv23?gLq;P`L}x@7@SAW-wkPwk%#chAB7;4~c<<^(^E7k)O zH#*ax-^sRW-Cp=*j?}L7K+F(vf5@DeIQ42=@RTt~LjFlUO=w6+Sv8)+sN87dDJc93 zz`0s%CP|!!r%a$}lxuz$x&b*i_>$*Ia}q~YZUtxYzNPrHR+1{b=PKRY|Kgr3PbAtU z9~l@=g@+V{d~v32*J0w@5jcTi*u+D-kIE+=)gT!~BA^wQUc{M7HveukS;Zc18{nqL z#=O{4Ek`W3K}eth>V)Wap*4Y3xk-kDS}A_xHHp9kbPf2Zbb*iwey80DYS$p7p5}fR z+vkiw!iu=6cForW-On8#gD>B8)Aqy4aP`OBd=6?t1cKj_gRrr&O9iYzjDw!Anu{Dv zr4^Fn1f|_yVMl!WR6&*qg3P=17*zdIAgkoN#v|ttd*f%Y0vTlTbJhoM576HDF}y zxzgnzY>>likI#g8O-)UBXw+w&Wg;U-Z`SH>KW$$5vx17FxooAPzKj_V z>6Vjj=~_@mG;qY^5hCT<&{5lICCHz*nx6dy5;#STpZJ04=&cf={9ggOSL8^I7IJ3E ztOcz z?3yr>>y6>Ckb9zK=Os=7xd2dEDcT@l#bZ_j-aQa-iZ(19Rqt*7<7EdO7H(yvv4E4| z-`}u~(NoWiO-FPWD&)$NYpHbG`Aw&y+9e87I}@X4Wf;v@Q~5Cafa&q?I1O~LLEqnt z!-Mk1%>^~<|G?%$`uIo|KL&2PU6`s%2zhRQZpBs7_zn~@6qpP$yuh2Rw`>5v=s6bE z{Nr4I4cn9d%%Dwy+?Ar&{%MbQIFEcEm4WHFSAW~pm-61z_@d34pv>1)nV`(CFhi+; z%=Pf-WJ^ZRSET%CptO@J>x>_n`ime-dgpUZZgDG$$9Y?>2H51{`qqT^oC%MRSZ*Nt ztLoK0BAOw{{A(z0SwJo6f#I1Sw7yR5ie4B(m__i05#>7|vn!1> zpD*`putfF)aEfVs0`A~k*bDWzG+LjCbbf|j9H_>nhu4wbKYl)lHF`A(0M^s{fD62* z04^qa?+R7i9}rbM?tU>T+?~q7#wZ5ff*q^2W%go2qYiqO+a@b~*4fL67 zU2Vgja^0ez!IC--jWI;@KY;1)>}1UuGSB7JZ%%&`2n@yr1~=?H{jN_WC`3e@w?wt& zzLmpMn&g;50T%4GXShAiH}9HmE(m`OVey|PM11@pUfpLab2t%%UyzYh77iPpz2>hy zb`i&vicj(&+3KnOjoHm@$0cH#}@SXX-pFVzm2sgj1i+Kg6x!rY`zLEb+;rFNVhtua`^tbWYor>s5^X z1NDrK!CS#>c;X4oN17pcL!;QB2br1M_l>Skl7%6`F3FiRntajcaP~^oO)h;f5@c^Lt(@I8Ghw{XW=X9XH%g!_X2muja!3V`P zsrW2j4dG@=Y@J0(!$o8A!GGKq8(0%Z%4O(8KQCVB)9ZG(RCCUgmrt0C_?m8>uc7zC zlRmAIhHuw=U`Ms{SpxLF_Et!0v>YhcqiOg4-+|8m5|p$UQx+{lDyjpv0;#&tPr|; z8A;w^idF)pyNzSHWmJlo%p<)21NX|lyFIqws;nr74?WHhYp{Z?yzb66cn03#Wt(wq zU?VpSrRaGoA8*CPT{+X0VQS;@#O-dyh^WBSjsm?`=7J9y8OK0j(+?yPS=~oc>a)Q# zT%*$OXYOS|fFLxs!MM-$Hlq>j7c@B2nEJXRjZ|5%>`pO-@IS)eWCkaRwArIM3?c=9 zp!x91egT_fZCsEI_E;R|v!~DS;V!17Bxu|J`O%|h)LYFPc)N4j@f^)lQ_BAyqN0Gv zX7S?l^w#pN5h}u4LK6fOE3T5L*W3UoU&J{I*V=9QP$hF$km$o&n7B6EP?QDvBCj4g zn8!CZtiG{4ABN#$2U-M=K|Bi^3^+Ysp6(L|zgl1_DM=Xi`>YYqw_!Iigb2&Hd}o?p z0>55aOpn0^0sv5VUJ_p43>WN!1^46bB$GERVpc2)pkgk2&S(^@Kp<8l&)=(yg_PL= zgcS`v5-a|5vrN6{|2Kk*%?{CYEJG&b1GzdCUj!CfjBbxR+n-qsOFJ&J(J!yT5?)n@ zzU0?%7*bk6zs;S)i-hwzT0(Nv$N;;_l}kUrJ3MxQ4i$>tSs>MuPZ@4!%7uv>H9h9iDa%IftDWp0e0Tw(?-hP5k+=^V?r*FlU9Wa z<%0)cWzaZR8yYS_x9|ilkWI)Jil$O^`=zp>3-h8mI`onc`m6q>H|>Ed&7=Q#MGLog zMB$Qnbx1eD$iXrBB_)2qXYiw79F1Ek^w9t3CyKdy!jqteT8xr3+jQ5D6MCiB!T~Btn4r8e1 zk?DQ{cEt9==+y1VD8iJPYnb36@;M=3(Zso7*jH$B=wv+g@6PyIxzKm=iG&yqEl15k zIf#p zItu9Hib7(8`5f7n>h{B3JMSOS%Acm43k8(?Xh^)AgJ=7{N_4)y7NieYn?03d0Ejny;RrWp)z-C{;F99 z8$?TSB!z*j^JH7Hh3WGpQ1R2f7I|a(1BIH6oyVzQ-HU#KZ8#zZg;xd*h37Xj*308c z-*zSj?!H{T|MF+wm=PE#3jRHBdDykJY+I{!(5Q(}JN;o#Y^Mvaec7}fq8T9v0@ge8 z4oEiS$^eNMxJy;nO?kL^1s@cJ*;DHa{+``*!N+s6uIcAcWAEzG&x7x#4}dntO>YbJ z+AdTpRz%k;qUQD{Wy)iUS@CYs(Ln-cZn}|XX&6~=8tV*kL_bH$=1RiX{IFOyIWmY**qV^$MS5`3)ZU`$VD*H=0O3)11-XG+EQMpMzBRXL82Bq6}$UK|#^q>BiZ`UlT z!jO&AO(i2EP}fCSTqQw;K2sbQ0U{c42n&%dSYkf(gsaCIDP;*k10rR^Bn_j5Q;u55 z4q5HrDia~&cckchEDD6(yAN*<4IlH+rvXos;r@E{|7gz z1Z9%aa-fw49nRZ)ZoNk`*oRMW8RrBSqF?VaEf>{Qu9Z77H zqILn)cxe7r3f^NG7{<#KtF3jp;ys4fLpoTecwbDNTeb$2CXO&gz#(_;;MPV9weo0o zYxJg(tpNq_yqb+Veh3S%jfn&xsD<@p>oe9;E0^z$d_omg3`hA;r{q+Eu*BEA_)mlR zR7qOPk;Ghyq?frkAP4=2Onh{XePl1gL99}E%leirE$*9X+Y5VPbrqK7N8Y(AyApfr zQ9CH3%`Wm&`Z5lXl74WjuN-`txn!_qfTci>vC7uE#UId1&>Q1JZyBl>%n} zVq2Jq_7(B*lFfs%<;Q4{%!Zvh6tY1vNpXP9^S*MZ^KKIzA8y8WQ!PTTyp~N{=VOCP zqIbI}kGii1qTBx7Y%~Gu2cvO~rY!(KfA7D#mZ<&o2M~`RlrBZi2MBLE$z+rEKmAsx zS4!2%f6T;Q-qgF)Y9~N{tC#M4PEqbKp}l1o2iK_o4Dw$P9uBPuw70l$BGll>``GzI z7UK2JBrJ+2EqaCbeS@y=Pio51!)me`hHEZo~SW0T+nry#>|L zm1d_!T;;_3-k#-z$+{H&?*3?QVmv*1WDJsgp_E1GlbIYeB|zpQD$;FR+!NdAGr9Mj zuk5;w{`H_50IFBst@hZ%lsIrQrWBPn$hMZi_;j3-+NK_pyPhRJB%cAX0GaEV5)@GI zl_n~+gUaIE+!+gM((e~E*6ZwvF&$;$C4k+Zc-3v9-0g~1rW?AO*aSR%IFRJ#E2_w` z_k9e@MqE4f52{Nq8+OM`orT(m+K-SWlA~XJn=6U^y?^i@aV+NdztJ0;*%RT4s`;wb z^Ro;>`Gs!r33{{SZHvu5u*blcG7g=@DYUnJWQgIJXq-qQOR!ZLZQ@!+~<%DE`Gw#@DDd3rxB zPbNCr;|D?RFS#Qe%!g4y<`lMTRPLPEGvyEaV~?_QiD45Wp<8tXq!m{e}?q3Z&)tv>yAp&LzC*VHf0WTgBAGV{^P^${fqOe+K}^(?o(!@ zOcB2Yzw_O|bh$K{w2j`K+z$Iewpeboo_nE%3sU>>*6;5wdA^cnkH2{F8Um(vK3gPKi^n#W>{4qw zsA^xff}zbePJ_~#f+sZo*DtMhCtgc#-`Wpsmlg2-syPw&>)GX$1-gFZI29z<;T{hZ z#o8zpUXL$g*Hu)Xtea$UxU^$QGO^AhH?QuS3H$BxC6fd5CL+D6ez=CMsd{VG>qce9 z!n8%dI*q%##3W+(VPdB-rFF!8Fb`&;R$llNQ0v!eK}V&i;y}67pMtKtOZD=5u_59( z#?OR<3W8-xYN7x@@<16*x+%9wcV!-4giKhJ|N8P8a51+h-)3dfBiv+*I~Y!d)Ex-{ znZ1h~+>RT=NTz?3U4lArPgPqI$^Qe0$wj_GxzyWEzG{#HD_$o?ZP;x}- zmWos+@YlCrMz1+%sG%8vK@DcVs%Lnt=Hvt7Q)zccZ*10Thbl?mW0fv79ph!GzO#h6}b$>$xd)T7O%Zn)Jt61goEf+xqL6))yl;Vw>N-cH|~&a4kV% zs)v3km>RZekm^{eGvhs8cKfM##_vBx6}ox{wY8B15hV&Xpc7+hhi@_?8na^Y*W)M7 zFGRyLLh20Js=7^jERT5$JD}43Eo9a#aMCr3xK+JedKr?fdb1XXCdeykm{{Xok8fMd zfvxh5Yc8zq`GF&@Nlh-j?+-qk&L)9zBi<7kH6&L)ue+=g!6!VlcmQ@=C~tiD$1)ly zz`WkbXCz5jYuJaT)pJYs1J$v!!U=NOVRNWn@b~Q|_cuRmQ!-4(sx|`L*Jm`k%`5)unBHsrV1=q)%2IE%vg zN_-c_A5Zip>t=)Fu|ORD06KZ16FBAS+gj5Cy5;i7#qVwCe$Unpw47^nV=rsc)0EEX zsqVX;`YmJRy0NxaK4=liivv0?!8y31ntbW6>@%=;nSti*^Z$%7rt-0>bF<@$Hu{7( zQCdtlt`Bi2SvFrGIrWmvE#<(t@iS}Mqr&ZhcZu!V?+H4kGSnPyUUUY~DQ%mN)t%~T z=Rt7MKrO%Hhi9>y7DW5MT8~(0gl}s9gO9t5Vk0Gvh;b~oHpg?Qf zPwh0wMEoY?hGYOir;FDdOxbn+{#0pla`M2#`fx3UJ@9{oHRv<~bWR}P4ENEpQR9Md zRx0M%tq+Xd=@ni6LLAyLh>CjKq0Ocgq8l)b!qH+P0q!fh zVScw{p;Gm{gM_T2pK}UKF9WmA$*9Cy@DXJ0ce5>c;)!wAvSUXStQ`JfI_{GHHbMnym^z)u^Jd@Ld*g}aKs zh~ku;CLdnWKwbQym7QX;qR{1AD#dFrdgisaU#j|O5|A3da(HAp-UxGQQCKw2Yf8@* zDH~X0rozO@lgU}{9~0f+5n3{G}U}w$np@<6%v-kcc;B2B%3lGs^uL=8j><&Ah&!+8R=n zhpxl=AclkKqvv&7)?e8*wb8*vvGk0tXU1Z?KGx10PW12ytqU@`qt{GSsP9t+cI65` zx9y?)sK!@CdRG<|29Z|-hc^krP(W!SD7Cy~5Gw!VjG&B%1|{2zW|ht(!<+@1c*FQe zQ)LGl=_fW6Ud>L0{^S+6@cK^SQaR&zCgf5vGu{NWyNP=BMH}AQp^)Cd0(3>a*eMA_ zyiUUzX#&28vqOtg6z;Qc+3Hs0O?g_=6{P>R!xBbES@+iN6&6S-Y}m z0QI*XCRgau?T|=zs7U1FADE+z?BA}~mU?)cy=MUQp}*57Z)vhvHhB#!7nT)1;JE*J;0R!*_8rW}Vi^(yj&l11Fh0u7PVvxMUWy&D@mbh%NXM_#u z)}9k)UU7eJm^Y{#H%13Bv|UE8aNZD1@4Dh6Q26$4xHLAg9MxV{Dl4%?l6xhPIUUtB z60cvoN5SN__fKXBniQWeV@Uc=+i$a;uJSymT|AoVS&+_xUh`tMQ?mUyL<5dF%f+{m z*vsKJaa z#0B(r^T>R@0~sUX zx3mnl;;x;yH+5-mG8JVpXA0Q3x6TkiqQTHrpa@8CA)9jt(_`!UuNi}$ZQrAa+?GIn{FlcrHJa1~kcGC1~ z91Ez7g>i%gW5_&`zu!KDz#^p7q@(c^v07q0e+r8UcX<{>(E%#h8II&W-b&8oQQ_hJ zlKzSKe;yiYF`#)Y4r12nzl=)q2XbYNqm)lJS+Ap+Wi(TN?Qpj-*ByRmcwY>;CL!yF zoK>og5MV5YEZ@~19}t*aN7ER$+4$Z2WyL4OtQeWiYT4lgfHNb;Tej#e+V{@1w#1Qp zvm;=wlBILyrPKhI3vf}-wT5(S&rMt|6PNL#d6c_Tw?jPl!Npp)Wa={rb;;YI%8I2eOqA@ZK0gyuMC>0qj}_eEO61i?y@Fcd;PjA<0iSBc~%f ztuk^2@>KEsr?+9u7dk0~vG~ky?2=MxYy=8r(RJy|#J&Iz>FWQeyiz?uh`t?etyNGG z9qfD;Pa@Qi?mBS%o|jF{^1_tJEjK*NA*F3GYx4VwsH5|0&6mMq5!#-JM^H>uFZS-> zdux6D*I|~jorG5Q6dzY@mSysZKJODTEP=hP9nTnW5fFa;Cr!^W?Qn;Xu^rPip|Y#b zIV(K97>mEO(zPa;_v_%@k??JD7w^h};KzYw)58knkq?+61_#eBBN(-tQZACJA-^1N zXfX#j!*GR=pgUIcg8}pU@jLo{Y;Uj4k5Y2+AvC?u`RK4KuD#UGfplED0tIbYZU3Oo z!;w)u?>@@3+G9JW^=A%70Y}cA3LOx<~IjwRTtbE^LT^`qS6gxLsa+<>Gbs z%;!jFVs^o}t9i1%oP{bS(MOP;*A{DCM5_g$)k!i7d+22DJ4zO>AK0OP;h6e9u$R5HbR^CTn!imH~nfPa>6beO9FpU2_Y-*!d< zAS1<+sL)GX?0wG%QF$z1ym!Rz$ks`VvVScug9!D$xL5-jyZLk+?vvPbqYxUU#rqn4mpprS3*(&#em4;5pL^(dly8b z&fvi2{0_Ifa|b2H!D)Atho)|k*9W^{8jsk+o?V^n$kzP+D8DeF_` zE5Um;0z2S8y97TjT+e@AMVW4MGM#zVBx(V-q8WJ`INf@W9-BJNj!B+%*(J9@{PCAc zivLI;!ep0+m7Q$cvv75_71rzT1n z_fhP>A1qXVX+&tKqT_n|EwPgZe8A^?Sg4mKG_{kWYwP%Uy$oVgQA!=-FvzN|ub+8O zzIOB6<^vKtc!~yQ_TP+Vmj~n#2w=|qDxpS#)(UthBi{I&uF3r6loSyZTu zYqClxI$WB50`LH^>Q;YC*0Zk&&RNI8A4RT(YmNTcSc;O_p4EFdjLV8k+;O(q3M10QKSZ&8AirAfL^WJgjSMbQ1^Bx~LAB646d-Ia^Uw`i2pfk`!F8D z4DW6wY8IdbCzfH7c~=?Ki)f58P*_k{G;ObebQk zCoaqX+^PBS&)N4?u>|g9R>AQ+`XqcKEVp#K*aCH9rFnE;bLo(@@_?uFM%m*k7gGI& zguu?X21ei}XdA>_K0k8dFekP=1g}*canF~$XF2wE<0B9eS~7q-rb9BCj7dr4KEDKa z@(r&FU2tR!MO(!)U|vAoMusnaV_4nywZ8o6gkwK82|Di(-&{z2GRh7ZysTnRykZM1 zaJv1(^j%m9rwnb@z%2D6ow{sG_@9KoZ@;PH2n8Q-gO#;PZV~Vy>K_k1GAs20m*qyH zpYv^CUGeQZpN9^3!CEdyKgRD3edaVO*V~z|E|H=?4?DMWa;PPbr!niV`!!^^Dx2-M zR&==MWXT%v?$zN>mCkoeJ?h^EFO#p=W{>kJ&yD?}?>X`My7Xaju1T*AjmjDFx90jM zt4T;mcIEWZD9TB40VPU%!0GcOZ+s4{D04ehj+OwTY#|wUy;ie1DRvC$896y4@Qi4>Jjt4S?-%M7q^j}3d;2X;3uMGvs0fw)S;Id zRi)A&W<^@7LWL9<*l83)1d#46vS?ftKBeNqfPv0SSSJJJGra*gdlSpQ^(as1l03P&m$%Hi&VJ8ufQe_Q56^z>mOCKu)|WogzD_{ z>YR8R1I7;&3gS4bJK?Qzu2D~0+e)#Oy?j!;mPr6G)5dJy-Kn>c70`$H6W4TDPI|Zg zR%>%WqBosTKr$Ot{}uRliC0@f~>-h760N$`e* zW`aGcK{!0yv}@w4U}Z`mCO@A}f@hKh3&6!Kc<2RA(Np7nAc5~$zRk`j88+^7dl~+$ zf{}L%=>v0~|IXkf9l8)5E%2XPf#8Jvg*1% zR^Ix$>N79=Zi7VSQvi7Ran0Y;SL|so#kS?VFcxc)KuLZ?1=wl)J!^o}X7deU83P^UwhQ)!9k!R_6r% zl(tD3&1QFsL3?%Nv*tzunlHKbh)_Tv30Rr2*OBRXSz5(Zu4E>5VZ`>glC0l-^CAFbwwFAt?YAS>(NS#+w-TNu zWF=FmW1+=SdH1pou_T1~Rm7R}$*3U1ik1j2oq(?n$OdD6TEPNZGFoNs7y8;w$!`~< zSbkRWQ>~)l_rKn4#he`dYTS7L)^Ora%e~5%LmwvX^ORuG590%U#$Zyqk0qg;R#q1L zX8`=ctLRzz1=l3}w@jm%)MZ8OwD@H&GhX2EZwv+d2B3d#01!)o3KH~;KJ=K|O3v8E zAFW?SHQ(ovU`nqqC(P4)7#h@+XW*Z*KFdKgKg@y6s&+op6`tB! zsAJ!SBtY0XH-xs>&s7K?s;Bc?cM`scNAXOd<(94boFdeMvyu7*Go8CT(=q}-YuEWd zM}^$bk~L`_htv^QezF_qt^HRql5g|)#icQ~+xEIYHNX3qSap6lrA}}-NJ%kvCQ_b> zi7~Oep{#(>t|D=FPbMrF4f9`J-9_#@VL#jglf%aNT+n{#Jwb<}kaoP1{m zF#_*8m0C$1w(qTo**zi43{|zFw!gKKeIB6JE{oh#JAZrHN_)BO4lp!^4~7yV$J*DP zzr|lkdo5%?Zd%$(!7pT88mwHS8DQ4djhDs?@E0B6F>r1_5Cj4ag{i4%N zAz_Uni&;2Y@Vg2pmpDDu+3UeyY*Rrkpm~x=!o$OvkHRvmWEOk7;+dN?&VfCb*7uvq z*X>i3(_}UjGMeS62n7I!&hI&R$@4+(7Qt-WYsX`U7YSaYXl6b~{HI3oedadfD6Ug- zUAQwiShc*8Amo$aU>y*>_${xDjWG4cA7Oi`tSWR!(TKOUlc@IrN{&f-zIN*PEXFp= ze)PZfB4e>`=l51LujX@`>{5XAw|XR~hsXT^zO&La1;yr?k6VDT)q;i_i18AYPw%okc8Q5!CwdCYl=IjUFWLFl>y`<#{D6LlC=y9~S#Fh7 zqwxvwSaT^4c-q9+u1(W=sc4~<*Tg3_w#hJrOtFckY#e`I$N)_HtA-5iVJz@HM^2BQrs2>u1>>BV-MAtUV-WjKbvUo;KHX6Ssn7%{!8$>1`-dvb!M zUs7vV?Tu9ZZX*bPeQ^s2WLxQ_|@oF_zRE?sxyzE;Do*A?T)Remdm65oLA(;O*2| zzTMeLtR%deMK*tI=j#cNd3eaAS!jaIG9dcYqlg~TA6{v4`4Yxll4E{ZS-$7aClR@; z6?@hh=(_c~S71g>S-I$X5Ob1WUto9L&e6517iqLz!3VHSc8nxml};ziOG}G-v#v|- zKvMZP3#n^w!x)EY2Cn@H06cK}(Knl*8O7=>`nxfBy8f}n{X&%K%P*v@X^_u(vRvBJ ztkW&fH$ncnxx^B56Tz44+_z`kuH> z(j`3L0@}Ef9^?5qk`$Y2N}cy5vL;f#be5m`7jb0EC-1|`-l<(eLRskApc|C-C^wuA zUc8~aAj4lF1h93ZaEPMB`+DFP2vL8(ro_oqL@f1Pl$IA?XHDkI z$5>>z&m0yL!;pwF4~dW&MU0+Bg-E?s_R3lTbeXT}3^0D$%N^TuiMG zFIDsOpdO;Xk@qMPe1qe$&E&qiQnek}D=?TkL>a!xy4!0tvMr%KvIJ$e@;KI>I4d>D zGd+gUc3E>)QO8bAnf<}k-?U?H)q228cV`mxq5YED%6;zl#I3cBg3gW$z3f1)=QykO z{oO~p8OzYSPnm%IyO4Rmd29I9Oxv4`c2`-^jzMi|eTTe8<{FnoaHoPX8qK^kCNFSb zKxK!RCGy+tYb?g)9jS$j$~2A-XWD!gvMHAf7g~bDgp`Di?r!caZ3sr|EUw5byU8cV zh4tuD0wnNakdrT758_!Me(0*S_HRUOo((a&o1OR4anAcFO0C+|lqK-&+L)KAg(~El zwInN)t<;!mc=54d%A&oZeG!d7y}Xx2Gq?Sw!-{I8+30?8HkP|!@K=PwOgU11Sy7S7 z*m8!rs!Lm|_KWBVi>`LcYSkkiscUbpOU{q%Zcm&ZSGK-6uRrQp=UjwZq&dfv(T`V| z4a(h;5Hy~Jv5iEfxhzPhNjc3}s@d5fqo8`M=E6phGW$g2F?{Iuq~G?vzdmv37(-LC z_@J=DfozwH!hI^ru9<6$bLmwvu7AeQL4p7?mM-9drz+s#bnR7+9oc4FYh=Uu=^e>J z2OF>K}<=x`ccC#58rojgX zp&1J{vk9Beg!t~08rx5PFSi&z#4N7NNUcOyNQIWyrg^$WW}6=ijS4YTlX@vN5xdkN z6M2abcf6TPzVGU+I!)sW-?Hmnq9Jyf>#t9kSTrLb;4j3nsvmY?yQU-{63P;N3ftWFe1yITEjOD?$bIg#$yH!M8hlmizP1V{bWm z4}a#^@*S_ZjGMO$9<1G~huudYH|(;`uMd%JXk}6E$~P}OWp8nAYHr7!jD!}@PT4^O z6xxrpAoEx2MxAvQ6%L3%wq;Zfr3`f4dL8UiCg|{Gj(peXlmN%yz9@TaJL=!FFWTgK z{2e`98Vh=`SvMk$omZVGGgn6tq>lkEQUsoR?b4*oVu!nb0$?q&5{C3Eo&XUvNhwSx zP&_BuRF0eYyPBA~h=@GHr1Nb+&!B`DqUh1jEA(^z+bJv<7AkLA-*L3N`0d?L3BG2F z_;Ezh@xFODqR?kA=1PehjaZ1Wq%Fix$#$j4FLBlosp)9hdQ9USppUSE4?}UNT$fxb zRK`WyY&{+q9=Tn08KQxr z;C$(ZA!TomB3|G<-bdQRDPb7C??Fq(!Y!})pR$5Qp$}a`JyJx^;uP>+l42Oo+2lW> zZP9*QF_XA&Dr>hqmCXVLUm6R$T?$^K_^G{Iz!P{fu9sX5=byhl9x)EzMe@#{KY<)# zPMZ)gs?*3Wy#P}$8pC|-h$~b+4u?6y>O({@c|>7fW+m?2wBjMbKT#$0Ei1`cb8{42!8C+0g0kJ zM_Ap}&yOgxvo|f`Jp37+kJFl39R0HBI=FD`HLv6Af|X)h?*O*9$Q2Ra^*D}lbx(yb zAIp{`rH_yPXo6B#1S~KmEL8Bs7=G~mNBDCrmZ6)G_p@KBI#Kx&DeB3vuj8Max1X)$ zhKyPw1@E+W7k0;U3WDH?wh}pa>bwjN>z}=0BH>*+TA|~TBr#iq2`RP^|C;-jkUD`L zI7OxrN-BQIPXC%TM72!sNKl2#h-}LlS_QL|_q9N?zzkPBS&d)@AzuAa7T)!tEF(}B zDI}2;V~dvHRloawC(SjT?kj1ER=K+_8EVccMRPUC0_YqdPU*JftEms)lvcw6n^mW| z!D7-2M1=0c?RTEm0r~M)1yWU~S!7c@vJI3kG0FaZ6^6ceoka-WnxlY?{t^zMW0YJh zsE;PYGkcuc@8U>dxK-jX&yf`l5jHnM$!WCpDmEulaG_{oRW=}0a_*)_F0`UubjJI} zdyWy3=2_?T+<=%r2AreYb9tTrm7i0lBz3T2V-hA2xGEM6BbI~AX23zpr>J#0U=xeC^aOkX-6Fu z7mY$<(W+)uWApcW_tK}@{>`N<_3fkmF-+SDvOh*LKFzW@d0SpD<8z4im~cg_@zFYJ z#`bmX*_g&X#kzK7CT9G0bouZIezT}?PtUD>l6iln5Vv$+cUH1cczXsdL2gw8Jp!u+*V({i@yLBwFmi1EPg%^ zTAz1z9P^n=MmhB0uOVH->#-I}9sX>FZuR7Toel;om5u^HtOIO^7izKU{V9fqXB{DR1r552s{ckuW2AG7JHx&>g8VFv zhCcCzl4%S{UibZ{y&0WKgzQ_RspK(CD7+eeeP8GNBV|jLy=+q>#!QD=EIZBYAP; z{lm$UigvmTucy3fI+_f(mc#ZH!K@=GjYVUnn&7l;@ITC|*y#WhGAP3a9%7%85=w@B zZx`$zY%u_UX(g+ws?@j>atx5$F&|cIVsz0F6+LJ&oI-$tHZnC7m(zTLjsQLcgI2O* z;je$vWStnQsDzwq>8j+pX{+v7#$lhcrk)dd)knCGkSA>q8BDhDY*Z$);AH;$vYmf_nf{iAjziQRLN3HbZq9Rbcz8(+&F z1$z<8BE-+5K%N8LHEbu-9~J!DA1_%7_0n!KrFYSGfZT;& zq*d~G(~U0WDWT;rZakhpw7X)XONZZ9{Bs zpKu0g@Dp0Er6i91D$;GPAx+87AevY&jF$C9mA8*~Nu<-Kn4DkMyP{nC0bVVAR6b9C8y`T|bKYI3+u?)M_*l~i+)>rf}dNh0@XhR3mcl7LM-T`v!_UZD2luJXRl?28(82ZT4wzd|k&r;8WxBc_+c;{&z|Qb$hOW zr^-Fgv)G1eWh;}W~8kR=y+(-xKJ7_Z{)(ZnmT7d34^#w^Z+5)NUi-8elm0dF@ zLA*thuxZ$1eM|xw_Ocf-3txaQ-!Zq|%JPxf$DoK`*cmL5MciXNz2vAwEl@J6w0WJt zsf!^otev}#+AkwK7PB|$BtLeG1pSoU9J}4aU@M`Yacr;mr&YzTY@lrd)9^EPs0Myx zQrrPp0iw+JzaG)7r7}74-=~^DiiMjMGuo=80v4Jm-k|Js9=&G zQjfl1e_V7Lk-dfzAGhrsqQw&e#}XhQ9QKF;wfuJR0-+b(*nud=nHL`lWDCsNppWN6 z)g!SC^~eg;S>9e1?sGTCkdsY+G|rZr0RHdv;ljK;f~7sX*#Dd^gxnN$C^C-_1T?|Gx+PvHZ02A5M)AfMf?D2;WDnY?WX; z=X%j`G^+QP_ha2y0tJ(crp?bIpq~_HqMcBu`u=Z+<-+P*w|(q(dN6oezV6|YaOJD( z@1~RX7;)T;tv?3e7?L%yQ@KM45HeVW51=$C^8)RJBH9d!&yfg!CM@KWzd7Q4azV*h zs&FxSw^PcX!ITxF(N}DYf>GWvJvHStpQ8vu5uwd*c5c$lXJP=OnQsp!XMd&rcKU63 z2y12wANzu^@Kt0Cu`cC65}&zr^V|OLmV)Ia@^u`8_%cO5JoJmTMy%1$W>+N(y;IC& zuN&17bG>-<5UN5l?4laWvQqua;L?=aMzx(s2D-RgaC~vP=zFZZ(i$0{E3C9~AfgZ( zI9DtmgdsQPC<^&)W80R_XM+f{??g+Mgg*^VOCqyU$5Cl$l7dM#)KQ`r_f@m19B!Mi zd1lL%Z7-vGZA>FT!|BrDNim{0WOo9zg4yQzP`CEUsQIXlZGd+1YGToFClx3XRlW8HeLDp zjV%NiPsdNyVY@3Fb14oaXQanoY2#0@ik(2FDWr=K&bx6NRRk_6=Cxa`^DXCYYA)aI zzvNcWK(Kk;it2|jxLQjvG+`%DP%R2_huj9GR zsA|k7vNLk#i!FnA=-a-K3L!KEUgh5m@M+iQj;tqn7V;3rds50JQ~?>*Cg+5XR3Fpv z04~#MCZS~3hYQRA-mG2aw(dTp+zrNNY3pWGa%JoJ{Ww;uDXDh^;%QH~Vewa;L9$i6 zZS+dM2OaeWan6(Z*-~!|xiqRI4=rZ6dfH_k6;kiFL&02h4Wmcbg2GjiLaib}D#^`y ztvye&;Hk#Zmn$XcO(X~zBIF{4CN!zvCQ3elTjWKs_iHmytF_`&lJTaDJ_Scel63TT z>RR?ja0pEEK+~x7BHFKdbkFba;J|&oxWT%;%e!h;ZwKAJ^K2O9kbjCRJb3zc5Na)U z`6?wPh73EMT#@%IH!5mm7#mCeG<&-RU>FI=exkrxBdRc(CPve;C%hK=+>Q{9B0>uO z<80q8SW1b|_P|y4yx!4Q5>xu&NfS=QqXL5NQJFXp*i;z6E5i?@%6`BAUq^PhbZPjWRq2Q3wv2IQIWTg4HaBT_I0sXiQ zLXze9!mouU0~v8R)kSN@)$=Cne$0QvE~a*7DQjK%_f;6eZ}g9&1_WmQlWv>=Uc7l! z@LunAK*oI%^DX*in%l$=2~QKvfZ!&{DM*fPrp4^+ja%RRknh341BY<{#IYN1|Dw_Ns)30g+7-hQkkRK#BjivE zZQM>1T=qUVB4Fa@zSR=_evtY==G{ z7-J(Xwqy;Y1J7s?M9@WHJ5?!RoxBmu5(|1JxZ!tG7@IGx{I?INB)l02nS7eseucYN zYGG~nN5{8&HIR#V+itz z5f^5hc=~rJ#)>8JRObVe!A~LiclN)vU-J9#`YG+)B1`yMV8P2q?s%eybLL?K`D9Rp z?4^53&THRolZTv0S^W+rL{S^ky>N1PhEZMWXMUIOh!`;(Fjh_YYR<;Br5b2yqb+q( zDmt#QsCRO;`qym}<&KJ0lSQF56OB%8=kl_%qx%7?UPPEm-+#U82073gypMZbLiN+7 z2v&m$Jh63=>*QP>yM-f_sn^+aAke0AavqfytEncP}YBXQ~R%a#qU~3JEJjG>hHT{(VK2sp9cNd_DXI2+v9bwKloSZNUE^K1o0A zyM5}q_6)8);)Eoz2E1xJY4fm|PRSA?A|ZJG%DaTIPFh z@PSchA~>QTssM;@6efakE)d$t*5G)+6LnK_i`J_&&b!5!DNrQkxry@6%{VSOGMWSy z(7h*0MNMBY?!x3Kw|<1?Xy)rMCI)~=18 z+d&h$QEJ_Ow5LWpL`0t;&U1*l+M9%opq9P+aL;!lr=(apX>-KIze4P}OI|gt!O`yU z5oF8!+xG&q|E)(5?<`KhT-8cq8vv>aYZ!PNKM(CJGs)iE*0o2~q{^-mC)j>eM!Aj% z_orTfM9L0c;!;^5Kd_1>gz1kV*W3b6nkHpNjL z#9>;xbm9KT&vRgRiK8ec1DKyfzl~l}FTE0`FxPvd&LQ_*=p*XvqvQhWUWal{*tJ5n z(W8gzNn`_O)I*B}#luauIMT317qRQIQSDnc>FaaZio~zYnX(Ct18aC2ue;n|b~+$C zfBBYqNsMzwF~96~^mM9y0VLcDIg*gSd{%_&fKoxIKLdZbK(uBw%XIRLuer?n!QC9k z?-NT8XKG7`O0Xu=c%4g)7TWBu0V>{RBQYouNbxC&uU?|Hj)36k`#aQ%HHJ!wKqz4Y z)7sOj+bie&)jNXXIy9b}Ri(nu1w#fi^#T!)U{WuwgTPnt%3lomMcZ8)#0R1mubF#* zz89>U;zY(4o+L9g;$Io&t`065U-F?#Z(Lw9smyhJ(W_2_y6yu=XH1P$q5}?xlUMxF zPrO#(w%FZxodg!9<>s!H?jW3{CH3HAJjbHR6N{eM{_@Lq_|R>$fJ`UmB&U6au8bUh;n?S#i0y8#!(K zYtWd?R|l3B9);rf2}E*c6n{Rd$-0a9ql9QI3guL%XW=Z;A+DjPa$&mNyX6CPe}YK& zGftwcR{FFGJ?1-Gx`$z1xu~dg^44^aHf#O-%&c)>2*Qq`TSV-`p4}@HT{|H?kibJ& zh)Bwe6wV2N~%eNiRmr=l*kycUrxPN&xT~~Q=%F6iB2mQGl%!wmH zdnMp=gqZX^ThDrM;7Yj@DLo-rCiBKgwZfO)Nl#miih)fv*HNPhku)YZ5QG#+dvlU1 zn#3=l7{3!Q6^YtkHTg@}sAnZT^0vLSL3wO|2>V zF#c4$^A+Did#fawK{*hEu^lD&T+(MsL`nHI#yL7de*>W_`87eD4pci zpO#pR4XZG0G{H9cMR@>9z-`23W$E`0_-c;D(0!VND{*JUrL9##q)`4{LmLqI-*tb| z)s$XKWy%#RK*r%O=^c@oHZ7p+E7+&H3_a112@+&1E=1-fl9nGo(Cuut$jvJ(_=MwR z(|Wo2(W+>Wv*^c!&a+O@E}wqITG!XscxXjcsZMu>x(Z2Qyh@J5v>B6Ln*DO`0 z*H#6p)bk^6-*V2K)Klq{oYV8a91F%sFFHg%pL=D`4Ee?&7Rq45q9}dkqJ{ynM1vEMQ`Vfg`p=7Yct&s z5i!2J_s6GaF53DB!whmQtwME)lOvJNUM60r_u=)Kg>%<%sfH<9=Q|^hk_O&T4($>n zy&P$D|8&r~G(GAfMY6ekKBG7M=EFrKs!2`hhLy&FLHhay z6lEk?r2f%!ztM)KOJ*$__VonNbS(S6lWZUTZdzD}qZTY!%zj(p0>vk#wpq1vstpN? zLI>fdD?SRoJHg``aqITSn@oCfMi9ftlzH#}vrFm}Gl+}!5zGMZ{E+~U=Hu6r*;3^! zTiLy_EU%BYV{mr{jbHQTX#y(b$4QUJDKGVvlq|XPc44LJB7~Cb`7j2E$vrME&hz9U zoJO~xa3rAyM@P-wMu=#jLE5^vZOh%F+WF^45n;{T4EHoXi`mm2LVMC29t%W7MAKBn zhG+%eJ<53(zGno-W<{dexVt)iS%Hi%!{U6>0Um@MbCi3VDv|v?MQFe0-7(fvom2uDlo^tZdAQi+JA*6}?d_w(-PNn16O-@REV8^>kW6!gqcD@AF8 z-8IXcM&&EQyA%TB^;%FEJa#3|jsad6^&4kyp6ub;{o9M9q7t(#Yl@hX!CZ@G_B6wQ z8#a;cGR8PeoIA>0&juMExeoWxx-~GCj35t{Jg9bhmjmJw<$~W&o@Lok7q0(Cc^7U! z>h+*y%msfgj|Y6&3{MWMo5ysTVIrGmzUOt4-0ilb^uSKC?%7vl|_r={>r*P@^H=OT98Hq1&GMJ&1HCG~Zpw`t+SaW#eb2xW0G`$prE z&`1ks8~3rAEOx6&TbI=ZHS~z0a94EbWi?}qUa-@iODu9t`x1~_BI%n`QA*?D{BRkN ziXPJF2NxTkQr658O2mbTyy}j-D==0JNzzUBs=i;Kl&XKVFldi^ z84ZgQ54VkLBNk1?7CO~z`QwuJ0LNr;d%R<0~g)w?TOK~0EJ!f$16F;VpLCh z&619{>C9`=)uQ)`Q#)pRM|@Hx40p1|#G|{mBD?Vkh9;oGDG%BC0X=A+KEs~&JUDm7 z=$*8sW^vQ2Rsrc`kcouUxfE7c8dc+<9Ms7!rBDHZLIK38R*I`iy?4!@YGF_$nh_)N z4N)<3Z(cw9pmXpO(FK6l955=OG5BG^L~)QWAGcD`q@@C>iSrVA&RVV;zUGD(C6+yg z6FjynAc;3Hi{5N~+J>Cp;u`j21d~!o61fx>5DVQqLmd*DXIp!=%dz%cyzI6L6Y_n& zZc8gd{dkl*J?)DT3^A9zo_%Ut0`HDa{vbUI-=W4ZIM3@%7LF$bdqJp@A3MaZ?vLBv zB~zmq)R*#qf_;IqjKTN)`wRQ^(+30$vaS!a`mSYXsC)KdO1qLIUvm3{;7TE-%tL2C z*N&N%A@Hh#COs=XhqpT!G>`YQZ4UAKXyqG`LgQ{_0aE*e6>X=Fo_T8r!=B{G5ksy3r9|8xIl9!3gl?G>Z#! zw~C*yz3wemstPa8qmV-(W?jaYD3SmV={IS;9ZI9>mMxI&1 z7vr%j>6jTHYPKd~1dG7H8flp|@zU>ds7>6VC1`ln#a|HwHRU0`PdN{TjNBrjo&c~D zoAO49?V%)-5!YB^=P6}spu{>7G;&u-fMVSdfRNSn-OUY1ep-I4%28%;vP%r7zH-Qt z_(bS2$i1RWT-1bW$=8C(PI0oAVc2ALh*BK4x2CyBdA@N3Us|5Fpyj6&W;0#k(6`TY z47PS2nj8*@&}13`jWg;S&{rsE(Yq5&2#5*zP#4ef=|4BcQA;sNgCnf^33vsS#ee*u zM^qf+{O$3gD?}ZK9yY*LsTFs%Lhl)7p9=WG0qm)ER-dhqgm>x`<~FM zhhbbBY$w4dr+&e;O9FT3Qlg-J>Ddd&^;EU#YM$owY^s$5f51ZgLtEjzb1GD7A2j`C z!34jYUr$@#?np4Z?7;q@R)2P`Wx=-G9Udj}=V|+an2=kY->d*7{M;%O6=v*DOAcO_ zMReoL#wtH}AonbTpZ>TXL-t0%Vlzozfk-#c&>Cslgg|wY9O&0$viW%J$Jq74ouAtRq{T;jlshaK#pzrIEthBC6i(Ldo;mQVar7TT~ihGV}*6o9V?w3 zV}N9BA(|{T_*?pqf3G0_BcPeyu+;L&loeJ^g?AEgpf=uq+)fsLJO~(rdlwvtn z#9o)mtfqZYg>s51obrJ}Cy1%oi(f1{d7+kh%OY#il|{MMP{!KTs*Bv!0@8KX+#!9J z7y~pxDgq7d0$7J?u3v^9<2MTqrmLI$=b8v|Kw#2c{_V)xm+8OC>lqOOFo;e2W1 zT{Slp{g%fdiOiKA3 zicr+VY?#Wo3^A1It-&2RwpQxzrH2T}%I;1qyE5%~%?ZhAN5Vhi;B!GY;l(hF4gm)3 z*ioTpy{D&phsO1g_nYO-$E>KHL`Ef8tWWtV_N*{U{ImR*V`9^dqt3-Yb}h@}wlld9 zXb!{F(Fw}+Re9k!X+$iYo#QrJcy%gvNU4-Ne_L4ONwMmQ9GEq_x<^9;-OBs?w#Y zpEjCFXFC;6d_HSR&paum+=GUXo%Ni(6U4jJ@P#)*56}?BE~ZfWP(lyAFn)v+8JO9d zI4BmJaCXy0*o>9x<_}jX*?ON4FTD(=knSIf_r(QpP@8=x{?2K9fK}W&=)7jj&<#JL zswuC$jZ;P6m&`xAND#bF+>}T3ezt4B_WgqE&$}#g#=~d4z5|Y4YFy^C60EjG%Zd=M zKqIZ-pK{%4u5aDve&m*|;Dj#|aMv_x=xK3<$1DmUTc&p8l#75X2tf{xR2rVgE ztOCy#HhznM zR@a)!Bn1(S#%2lV$OHuuxY(u)$Ul&N%&LYqE&_CW6a-fKeq%B?^2A;cQ|%;8Mw-KU zb&c(W!33XDFf|=~?G;{cm5LaAf;Haix;j#>Y5xb5?iOC!H)VzF9ZG7wMK8v+zGefo zIdgHb^U~ssUX(c|u2xw)FeAhPKO7X2k@&jqI|7lm07@I63~f#fzyS@pN7%gVFG@&A zun6e#e}e*k`RdISEgRwRDk*>9`j(yR{#=%SMJE$CvTiM?w)uxdlUlHm{32T++b#g+ z2=OW}NA7FkVvIR|EhH0%SW{g+F(u2xxDKnWi5@lDt~!3VX}qUjWV8nD2|pm5E!W<+ zs<3%OY=HIah?EqeN43!WfI&DgXJ5a@r^$(*T$YzKnSpNMr!B}=t&D!wL&C3AOOHN} zfLbuiEdEmH8b#V_euM}6i!QH$D8!CWE@=5eY4MOVmK;%(FvNDUns4iwF|JnCC&3K_ z*KI(J{r2s;hejEiP6e7lMax~CxJRy(9h{BB&B?62Sa`C{_)Nuo)0IZc(e$w|59L@5 z{Hm;XwD(>o;U_tH{}-YiD^45o=Q|lbcc_+Ag-BbA7b6@(1FiJkok0mpZd+SBJA<#D zy@jbQY2UQh0DEbVaDF3Ky+p%XJ+U1FzH|m*1`~?9q$B2y7cKcJd~FNYFkZXbBIyLH zzW6k$$Q~x#*><0#e_PXJ51e<$qhq!VYu<1(d~(ATEMU;)*~ov_mt$TCM1a~4i_rQL zrO1|HnOUBI>fu36L+?Esg6lpcMs7o1*S&S!;MadmngQc+tZxNl95yop6Wy}8x$I4a zpYZn>a&4~d35;9yt1plV1d75ZVGKmpL@J8}{R3#7*5Yy~pPMU8V5UHk7!wyOksN$L zp;I*!=g&4TtX^?r+;uC}*T;@(ca+_%52Qyfn_@mEkZIv9d}8TPtYO)4@C(Aw@YO51 zWbcN`dhfuik8D9nUcOXd8kl>{CR#W zP1H@j&5GzkjRN?iz%*gHy9|6y;+ds$}|g38OF{kxG%iV{3-QY7$2N+Ot!k%n=Kz*-d6- zXv&ifV#J!5QmvUO(#u*Fo{g1_WV{aZE`F)zR3F;)G5;$8>no3vaQ2e;QVpi^qZEuY#D!IBXAyrxne`oUt# z%16TK3PA@R#qq6v(t&du#hLX~|vuY^7o3 zV}NtK{6)iOHDEfNv1oOxm1YzkGfI=riegKLqTgCcgJH)Dtc7BxE?i|o>`U<2rGZLf z^Z@H(6&B05rxKOOU%tY5`_ilOny<6>MVizCGZFTP)!>_gYamyj`wwfymi#Th5eTJy z$u3o8+97sbP_Ju*9>3Z3rIse+_Z8~ZdspIptGrNr{h)8)Cg8b+Clek#=I7^U%|xEV zS5a3iQEoT@xfsGT;1<-~7h%o!GhG0w1*C)#7nv&nB!?xE-pn z8J|>TG`6I~&v>N^=@V*VKW-4I`GLkVU6qa~m5TnHeZuqWJ)^!xx0xOGR6^(IJn-8>~oDw7drvNt?#(V;!IMnSu3a${6_ z(4h6e8dKFB1~$ubkILL>XS((%j0_Aq=2g&buM8NhC>g4=s|Y6a*Jql5x1Ho1U zHuIIQtkQdR?6Npvk#e#cPw_LIB6^08v2HO+Y~(!hVBMPN*qM3oLVihZ&3-2N7MJHJ z%QBKa=PZ{)O?R@Ds@cF+juYt&z9Mi0YY$x8APj%np2z6uQfxOVJwr}j$S*_oT4k{k zGKIYH#{dW|;Fv`Iw#ZC;$D9VvQ5G~6oV3?}d)1@T;OecZA1?FgoH@j=4q}8&u)fjO zd=$^R#9o-&oG1P7L|@pE>cwa#KDIySY-jJ67~zjVS}z

pdVN?9MO}cp)M)*ufHl zwsB#i98J-InOAP)h>sh|gJA_@Lv4?yt2nbV2T!k#==ErP8_@eQ*ksL@d=juXhd9zj z>YML>zrpdRSa1ZeoL3#PW4j|IHju%H=5S5oO`iS|!GVYK1A529ri8V_#si5eVh4}t z%YI(UF>LtNAQc@AMjU$7r72ZE@BSjx){Uy76Nc-^|KI@pW3~QmkV%g4d8Cnik5eDg zzmLH#zz}D&o^njS-X}%I_~*5dBnZe~Vp@KMRb$Db7>9jyZ1Lw63oxzG3Cs|AR4av1 z>DB3E#1MQMWtkml!k?6re8t$J)jeebNTh`1$l5IzmLmOjC4Etba0} zan1!iJb1RE+8BI0wqyScY)4{g!V2C#M{sjw)4B5TY>(A}frReL(sC>Cg7)`|)V}!& zFVaO{;o!)7B|-@^+#U10sppda%QyQ5i~Mqg$H9e0ROV~d8FJJ75w~k1ETJc$MiopZVfZ9Wk|+0wJ9pA#mI$^6{g1Xr@6I>B6;hs&%C5i&n{=}=)cX%uZ&;oXv+!nk%FH@ z`*Jj{((k3}-$i6GPm$uw^1}7qXw=C-PV&Lz*ui+MD;k++gBw#3iD#LFo@GBO-C){?o5>+WSC4HKb zm}hd4`NQOtLg5RXB9kt<-hnR$E(op#X$x$sr)R(d#)f+hgU8_XC3n?DyJJ-WgxGDy z1a%=Vrba^_a(AC+oSJ+lKd8W#+@8>D%hej7P&=YxUBg0iMNW1c^Pe2Ph+WMt`1mYI=2{^+hs%GoRVcV4M~5KR520hHpK zMNknP9Y7bMj8ap`t(4y}28# z((3F>*eOFg>pO%f>Yx!&l-;95g&hk}3cS}B1m);+aXQ)(2fybimSS93I4zk%-{4@03liRuoyQV>E8pmH z-R!%f6V2ZK-ibl;54Uyl*G-0`$DnB2-boG0$4XQLc3c>EDnn&uI<0@f?ElJ_dO^Mx z6BC0+;H}Evut$Nf15ybl$QbE(Z_u@ z?Z*o=33NPp-I*m|Poggx?JQAxoIDb8u(y!vb04tzDXD!9{oDV|V!ktf;5>hBP0Bbr zXrK@6to^dklyL0zIYF|TSW$QUa^7|Q&N$>|9 zhWwE<;ZM^QLbA(FPe@*T*etvx2Gg3^C9@HOIp>F&*8RJ!c>vw1b+;A#9%;4*FwDIZ zJ@zN1y@L@Zk+T27A>=Ra%3seX@RDSk=ufmmWW7)nK ztw77zH>%#SdT%TF;XmM3{x3KD>&^c$o-0L|0t9UAXLr~Y??9(r@cRqvgSeeKUtX#4 z?O+=dWYELKL1&J2n)X?Zx`jpQe-a=--v6cRr#6<9R=vG`ZGzkQ$c3(J-)$V^*1$O$ zSDLuJu5z32s3qQDfJ={@(_^WG^?!u}c8Ai*M9n>E)iL z{f@^pbRSX@t|=ho;k?DSWgy$$*18`w7@Cmw+3RQ6e`Ejt3soEzJXUKn8wAi1i-)b% zy<>2kedXes?g1%@aBSGuovqxczpOgZ?VNT@;OXh8y|%%!YimX;IQcxNwcs`%nn2718G83Wp1PMQ!X29{-W3XRa>!ea zfnis5tr2hHHJrTe`?_*|*AebZRJFGG>nw|>dA8?y40%ISA1n{}jur>q=D$F;IRCp7 z-(Q@Zf3n+iu$kI&iTtXxH!M_+sr_sPHKJ*Mr+R28S>Gr zIPBFOXpZ(r0v_i`Yghtty}o_S|NYkg^rV0O_)VUShesPzqr$)rNE5+Gco3r;+OzB;(eI--0F4A27_qMs)0%CO;cH93ykNl;I9)f5ulGA!re{7Fip5AOv z2`Th9g`jM^?c7c~FWfM+9^Oww2P1<~gdjtIjhy_yiNecZVRiNCH)vUXHR54cpoS`~ zgGM`vs&HPZV?qDM;K2nE%hr@xT9FTq&Nl)7B#Dj8aI%9 z?E!IEDz})eyr?{;Y>sG+_mx%>a$-FaCeX5sp_MZU5&ZO*^XeB+0}IHa1u5mkqHEQVjVHWbg@!jS9^)c@@ran#&xPp|FU z@pJDxFV(E*JWFwl|DT{3f911(FsP>xpgr-4fgiJ&D@)T@C=2H%pAn0y!JNdpcc`@j zs+60KQ?51pjCR;#F&P{FH=>N)%NH1&sYLVqx!*AlRe7_x2H+3?sdsH_0s@tA@v47&gH|*!MaAb#K+_Lg(ZkJ`dpw3Y?)vub ztb0!zG|f_&Yxe*qd$XULfa#E04-iInDWA$e4DY`supk101>H{I-)HHF5%%|&O$Gba zHKRK6ll*+| zMuTuoY-IIj0#mK%j(_^P`qgE`Q1SnULo)afLj!&l_fY0Qs;IHe|27k_ngI}m^HXx5 zw`IobkiTogWr*Yc67j%GurQH;P|x3|%wZ~27WK0TdgNi!8wD@I9>t=Eui!G@3Ox@43z+OCSQP`H;nvsKlR_WPK}}w zHz~$fz5u5T=lA#dM=JUeukSU`t{v*u`a1a+-6Vm%ii+r7yh0a)fPtQqehLZ-Xna5Z zG*tok7D>XRb=l~c7%H~ss&aC2RJ^Z5aPjb-IDmJ<{vGiN{8~3v24oxot@4+~0ML0* z!DVG#jzVzoyX2lg4vfE$3%ghRtI=GMxAkbkF>@lb|Mtj?AisFQGxwjraG&0E@F+67 zZq%K-(n;Ak&PD*Y7fxIZ3PKV%fQt~axB9q3&fIx-zZVR zf?~9pf!2eZCClrVEoBD9wRyEJqvs{|h6@O04$8at_4WDn+rd|C?tgvl&kz0ilYlFjmXVRr4@P6;2j2D#_C@{mcFTLPC*9^&rPD51vt+99TW7l5 zFYhF>OyCO$5B5(S{NyVCv<^?m@E4qdl9GL3zJRQ;Dks6?xPlOG38Ry|pIMKmzgU!~ zH2vlKf851?EJy%BKmZ>~Y939PL&?}{$-#z-`)g!h#s~$hjcRx{#t?+p|6LM(KLrPp(vs2u32%c%*9bUrUipAQz#gkcc;c@`n~Pm>A%@W}$Fpr8Ppa3me+ zEJ$^ng^_O)^IA{<3`QiugYUMHp=Up$CRS+C+EZW5YA3;aYyJ5zOK81%D-gqPMdrLP zo0J2K6jZPM=-w=&x=r|h`@!EY|Bo?=xVUh#AiKKHnm~CuZTr$V6_6i;i0Jj!r}d`| zQfz`&#<&mjnEbfi%3RR{g+Pnx*(753i~i6Aw?Dq*_qzf~;iN8E-wo@mKfbxH%P3HI zb=f=Eo0BKSJWx`>_y=_-i!aIkZEe2m7Gpp`R-PM5jN)jVh(r9k!XS27F& zw>mb00V1s;{2={DB9(<$u74z3BgB{e#Vfhg#io<1GBzQg6F1X+cC?ID%D<4Qm$RoQ zwd=CNDPPUGM<64^i|n>6+}>Wvd;_UU94hL6zm#7LL`L!JP#{eZ{68KFYJ`A@2ofo$ zy_|;O*e=HL6?{8H9wI9$etz}^8?iz_W_rYz z;9v=^WaWavMvdgB`v)kEa0QR9p8e&a|Av{sOz@%+31j!|1h$t(#HlOu z2TmWcfR6Eu|z#5qo!>;Ht#7Z%{Mft1)2!u(YF z^IOrppxG-ouPRPzbSik(W41p#A zfrgL(^zY;NEh3;!mx+6sbW4pU8#}iOg1^u;qE`~g2%c7#6&U3KNE+!YB$4Ge&D6fQ zl>c@=&)}15;zon3pI2RIf7Np>@zUVFHttwd*q%yW6Yj)?hx>IMZy5vsaUFC8yRFI! zH5-+WP{+L1U`j}xiucjT}kQ50MRp+EynF2#YRUSabA3zoAo%FNHe^QUZeX zrZ6sjS}^yCR{p34IkL!Ag<4FDogt+Mr{Xo*&(GE`ZX^xF7kjCs6W+$fN{h!4myoGR zt_85m={p6*EhJ2a%3Uu=79yBew|)36WxP&;T9>lFN=OxmR06T&=Y5Ji^TpTXgG2Xe z)#T|zrjhl3<^C>zPin92oyqPWNe}R)D?!q!-{sw|&&|}gU;gX${z-`a-u!FurI9<% zD?>#X`#W**Ec6*=SZxVq?+aehM8iSZ&LA5F_Zl(TZlKuh#S5B{V4Ea)bBQ;5FM4x6 z2e`c)Roqgd(&#tIl&8*V=z1r7Cn^BS!~#;(tOS?b8mD_5c1d+KO)XhZ&aisV5W`yM@0D?+gPw9(Llx7< zp85`(;1;-PrDx|KsB08_{L2x*2|}n5S#j_ar6>#B9#*$_6y1T=qq;?+%)bqg|HeM} zPkj(s0D8)+DShB3CmayY*$_bfBhCo6PTZ@~5b)zU9_<0i$bECbS$%5)|FmclNq&f=-!X<3%$_JUsS^2B**|z zNs5}j4g-Pv5p5i~R@oryi|;buHJqc%uB_q*Y&foHz3I?K3y&3$<49+W*Cd`;KFQUz zjj((94;yWq4c;DFFhscfhHN8yq1;)0YJ>LdUw6h0pM-B>&PKtS0ag=-g6z6wbpLzm z{!bccCG~2&ZOiD{EeXw7@pu2?=0M!US?j6DW2*W63p$;#QKwbWtXuT=bo~-|wQsIN z*7MA$k*FDQ5U*ptdqrJz1XcWpRsz4uEjpo+GDB{)_AK9Gdzu|>)8uHV?Om1R_C%9G zN}QPe_H%e(J$0`%!J&<`*|+1;bk~1UUGr9d9|FOb$98i1s=qoE5-V=4_yd-!7dD0F$F6U z8WYVzdlTv_8$p(t=^?JQWT|XFnYTaQNH^O3-={fqy_Ls-ovQeL!gN zwBv%A_-ch>SrC^BUE1)((;(Jhj3PxLL1e!dSw`CAjU%6*y&aC8E2QhawK;hyi^g_T zFmwZf|HT1VPJ+xuQp5yE-7g*nOh2={+nPvq{b^XT;_wJoy?i+&jd#b{foBZ7y=nVX{Ci?1j`Aan@-J4a{q6KXt)(e zOc=Teh0doeMuC0SNMG}X4Np7CRQ->a!U`FHZU_7u~$+?Wc7o?5$sx+r(CW8 z7lAD>Q`?JfLM&WBDUxsL4VgdFj3-|K7sl<PQ%gDQyj!LA$X)YaiG+6mcjJ``Y(jA?CVMG zm*fkRLL=}S=I~bJgEVFs7{+em;UzDD-I~ z<%tm#JXK&=y848gA-)F3Oe@cuh!N~o7rNmlO-Pik-$s)q@M!HJ=8K~F>J^Mh$Y}>X zQaDP~*q6@V=s77lXCU7XGRa3xj+S&%iIHTsrPi<(FyM7DT(qNS$-dN&HFr)8R=SVi zDjsicr+wj8+ebVpWqIZGSi#;kzv$&jQGUv&082k>g^s+9SLTPS0eoK z0x8NPIrln+)!+Woi2Y3gbjJ~L5($~BreCKtKctc?<6jqioH2G+jg>ViON@PUt+jLe zG^cHv!g3{yeRQX+MKe3HGlX#LdBS_SXYb(=y{%7->CN}N*w|H$39A1hw4ew9 zZ3xavpckBQQzx3fEJ*71Mk|@VgI(21T!4V$6`P`4@J=Z743?vsDm;NdDp4bfj^$9EFuNo97!zNbuh%!d;Gujy zg~(eRL=pmc8YfqV*yl7i6YLH0No{SEy*=P+5}J=xB}){Kuq|n1Qghp~DBCX8q(clw z8BH8(We4lLC}1#{Kmazml3{9GoXFmEwW+o;B>%UkBCsL=i2`K~HATs|U@J8QTg*fo zy_ZX)^tn##M#iXi7pDv{IM~kiE0O{i$qz9`JQ3&F@8*`0)sX} zn3j?jTZop-*b}pNsYk`j5iC?bgz8w_+x`iuC+8_o*_0TUb+kiKiK!_698-f%|J}BY z`A79%p23mP_Y^DCMlM4}Z&$0^|Lq(B;#=ICa#744K>^%`_NnRhp*Ihj7!bwO;1Bp@bK(5DnMTXz+YobmB?&mp|jzyv?_(sGYYY(5aZCpQ+cwex+ISmS8H& zNxt6TdQFpnbjmvZvx;bP4Kn^{12ggMvDLE`wRC)je|l0>%z#-Wwc79`-4tO94&fSd z@(Ed2#$E~e>Mn~%dl}dKfzjg%ea_?Ij_xPWR$C&1_gtm!rEI5`>rrPFFsnVzaHT73 zyiBX5;FvwHqf^ovd4w2!L*iW@d<-+iqt;JkkMq55eU1WI?`fz(tjLI1Pq`%2Yxc87 zD=NMuB)5gAqnsZ|naxz?WOrK|4>bdoi^bH@-W*dkQuN!Cxk5iGsJtLBavTo(Whzq6 zq<5R-iIchVE+&a%{TR`1f)E45nuA$U(@BfG#zy#V=88%d(=Jbx7~dh>7GnU6={{ix7K_u?LD@$i5rr}S%i z()fi^)AP0A;M3_Q7xp>Jc1+uENQ92EC9xEs4kRyO3SaYSB?*f`Owfc;M(Gnd;pRIy zMVJTf1=3gkO%zmxC}?>n|4VxP3ZpnWT5Q(ENT;+-?qfbs=4Hg#3vXH!iU2vxwI;NJ zfa4zsu|kTCx?WkHMU{_m6lDd!GQ5VO*yj*pRh0!}d@N0m7rb+JIk`urQ^7I`d(^cC zV49Dzp9Bq36q>?V^<&d#^Tq{(;y7ZsHO21VbZhj9bl8v|qAyi4_tKZ-d~3Qp?Q@ZT z!HpoJZ6tKf-y#boXT${T$+SCRDPYRJFbqrAdhJ9u*lc*5Kqm5ZshO$CcZ9Cj1%70l&M8>ryx(T<=rD}C z!pYtefeKB1po5w1-B$}tZJrQ!)9I?gr4w^0(&_5Fs;buqr}cbGTqQD~%yKU?T&P8} zJBhwyK}izGcUYQqW+VqSTMt3`kIF{GakMwXnI0{3lp_V))5mC;8aTOiL>iw;@L<6d zn)`9CUQzj7rNKb3*d195k`PHfXneXr+ zZCeadBiIvMB!0YPgOuM|%MaY8W=H^gDA3w5R633I17Drc%-$IUfjH3d*KJ{yOqFTPzm6mfP=DOABW!&j1Gr~Fh_R~6->hg;K*40;PRj?Lp z7)n4^wMx6lBeT6TPI6IOIQI9vU!^yM!^wChij~Wh4wOEQFcLHAB5K%o5tnp2(6fif?Bb!@Pui`Me^;UHom?t!GE$w?a|cA~~i{e0M7nqVUr zJbvKu;NpDv4Mp)Q&|xZz*X6vYO_I?(DHewi5!VlFqKra1TO7^6p-}?L;Pa-V?Mc_~ zDEUkWrNz7Zbh;%kmmWOu-B{qdL=JiF6yn&%9YkqeVNxImQZ$YX|EiyYc@&I^)>cq2 zl#+Ip0cQ;R2=3J>K(S-7vE3P_sIrzhYmiic5?G!cM}VOk=I>ZPNGDK^XC$hp@HT#! zvr)J`lc(cYYkPX^M>K_;crrCah(Z7-w@L0E`xt-S_Aq@YZn#nZ8@*{ux9{*yA`wY1 z-3gc6_w%#Z*c3t4spi4$+ySBIWp*h~z5#?bB6RZ&g?xB`ec_`WNVcuE<<1PgD@gYj zneX61ji_iR+>+`czPcHDgR?_%Tqxsjx-^+X>+Q|Sk99Bh`6aa&RCC99UPtom^_JUh zr=if>5x!CvG|KZ(;MOo4i(f2^jH~UtC(>%Vj^~w3!*O8AOxzB@58&ILr}D)YZy6Ew#nJ zRBfXx${o*>da^xJYf-m~N#_nsKC->Nw8~vX91_v}1`?P%DUM_7#PpW$wD&De zjOP?CmEpgOEp0lV&Ny;#u@2Sp*il~yzNyqJQx1PZ1`mtov))BqODj>TpHTL?KLNl{ zql0@PI-}8!cLnkRHC$W_>MkBVg}9L?d6J1x{<|$1QW5o(V===iLg*`WK$`heg(@qW{Vrt|T48dLe4xQj`3 zi`)b)_n%zzEgt+!iFkO3+t1*G1VFr&1;3p;aAW07g51w204yYDVa-I|t_{qDc(=Ln z#IOqDBdNx$5Lm^u(~UEMQUZ~$VjMU$qxc0uiVu#0*a5`~J%bMreh~D9N0sFx#^DuP zlevf@gxn=pI@(-ybm#q#4u~rD7nrKEj06#Uk0ED9qQ4?dZ#D3s==}BRz2TpqdlZ|; z&R&(Y6Dy+d9@pxsHP)=7{k)R#*N#%sPd}VXvVQP!x!T0W&Qn%i{G$9l^k{UTQ$bZt zjF^yL?7Lg3mDrj_6 z{y?XPlzQ8!T^r)Yxcd#Akef?CCNSIxgY1Z}wXS~VO6_xA!KC*PiIAp;uKh0TUK#;TZoywiSqXoljhsTB= zaG|N7^vvZweJ3lw=erbp%{Tkm`RVRlS<4xo4}Dtqqbw658pay9(6c+x!~v)EDhRMh z)!huWzhUqYvs#d89Z`q*eS9VaNxFD0(YO|`zB%M^Unz$jR^p#jC=jUG=vW@CQfEmK zM#^1dkVORLyFC<2dSjN<8+4EUSTuM-1`&(2^zK49;QUDAJ+8$nt{3=vjKw-#8nJj8k2ND{wsJYKel$dN`;iHX12`cRH=3g?5 zJjF98?Qv``d#t9LE*_D7V28fGz3)%qN)uo;m8A9^{;^*}O-)%_7mBS-WC5iO#Aa+f zPb+$fgPO_M!kMu6SRRz=bx!q<__5J02oaf(DJ{^ed)p%mJYA=byw5+|PKb~11TSo0 zP%o!>F{)oU<~v^~pAVI8gg)N~jAM*?Y6@TEZzHtW0y!LQ4;h}xQHg?yTZOSH(Y*`% zw|9dlbdWdnNZ4;t+>F3E^qg2RZ2Gy+6<`WbzuOW|qN#AjugjVJ`nXUnk2|~Vg%*CG zOnt1zYz7n@PZTo|GjL7qe$+|GYCg;1w)wNhvkCKqK2le%5Y986;xFOoqj(n(8xSB0 zF72VTVh^gjs_o`iw@by;+{8iuvfpIB3e|3V6EkxY+v5`9Zc~)MLK>4U^U(Q6?mI$n35xm8K-UkRrjtQ# zI!t_%1O|=XyFvx}2tsZ<5l~p(0z$=ZsYXL=v18e6xQka@ERI0yt-CKgt-_g(vyE!rpj0_b$MSfz3vYA$-VLSbknKAAh#)3ZCLLLS|zm(PkoK&H#fxH;4EB5i$P$bmDCxzF=Z( zkQ)H|SJ1jP_OQbheiJTi}SF}07SCD>x>cR^*gyziTW+03-j)fyVS-7kfi^bM&-4#*|5Z`&le zti6qnHxF?eLsV3&P$SZ-298E*Jv#U6I1U*1rLy0l*vyZj(apo8-bxw37q!^2g)~$Y zu*7}TEFNUJ@xNH!els7oxIsW+3sj;DnqbxyR8_H(u$jcx^WCAOn!1YpFl+UM8?Vqm zLmwoIz#0Ygn$bWK+qMr8Ii=h6scdm=3fBiCdP|(uaW7F(9E37j^o`Vng#6yGhi&K@ z%6Q2e0gAdRoCYL=aH*1G@r+ufT)ABR9S8`M>2Bx51L^!)b}Gql%p@%F*ql#|NIG^O z#QNV!?w)myYVPX)h!|n&&R{=8xX+hKSE^g#1BTp4x>^1_@#X3|^8ifjDz_W^=>~_g zYe7;D%ZxT`pZ!+W*rj{z3{ujYO2eU|hz0Yx`jH7`Evdbk0DBv?V$acmiP4%QQ{VL&K2s+GHv{yY9klDiII=D&nl@4#VK8uZm^!_ zA%!T97A*M`P9bURj-YzH1jLPbIT&iX{1^~5$d?Pw7kG+Rl(^7zmo*KxpRas}lg!>h ze@8!zLO}r6GR>bIPPqmWR%ezi=Efb@YHke9yOAg3IbZXn1EXBmnAOQ=YCoM;789dU zjEBpua8LTe7)u)gvjOt;XE){%L%??ORuMj(;*&QYVWmS&b#FAfGKB&P^twI0^?=`H z1X_%Wtb?Hi%`4w#roqs=@9reyWe%2Ju2p7JX^>nP?V^*;!p;1}FT|yo4sBZ%LUVP; zwqewKy-k0%dkgrs>>5^`1eyx3^r?v8RW?1gPrpK-;PnrOJOZ?>1KUYpw5)jqY#!K( z?c-N!Iq%8IIV@X)+j;yW17C6s-h@9{`q77HI?0;sY+Kx<`}sb#Lju!nvVEw}4+Sxb(1?$KSujYKrwPyU)-R6072U{Zm{MgRfO{pfFe5XklduW9U z*=GqZhItI;`h~=!=v&E6VoS85z^13-2X8;^lqE_$6U7)2#5A87M$P<{fAG+(&@}w& z`ST#^TJ6#2pj63qNjdZBt4BF7Fk3M`H1XT_j(!0iP=<+bSUZQfC%Cr(;)eXb%jYo2 za?`blKZ(DnElNF#2SHU4y5_H`-{yq4eeM<6d&5D)|KL(Q0ik;({J z$L22y>`=;RA6#k=K&3Z4vf4I)Y_`2>HLIOdYPTbr-okc2^oiWC0R|9{S$N#2OURT_ zTQ*Egz}i;CK@-B#l39|)YmW|RWttV}$n`|AL+Ig7;%fOmwAl-A8vcm9XLS@9__k+s z@Cx7Qb%A4&a)M#yDAZI*#*(DUo}O^qL-bS5_R2!eo6pvDcZv_1b!+7TD~a6pw^Sak z_*QqEh->mRLbqFEhE8z8IMuonV~+m^En5J<(PUu@ zyA*Q@RPmvb`sa9&{z*aENZ~9@r-0>R)mVw(MMiw+3roNM@CJnTfEGXD)KWyB&*$zJ zv$v@dd^S+0e!^eSAbx|}7I5@v&7$yqs~+LhT-POkixGQLNpAKebUiIwdM=Ohxo=v8 zi-j&;AK!eYQL#}a42XKy>vxoz91U`O+e0ccGH3`n=aF7LrZcPhY9OV+UE$#}zMdxP z!VsYn0{)y-KlE}!>J|S6Dc><#KiNi-TQMMfp5&_#A6uAg#aAt z2vNWWirw*9uR9#)y}sVJl9r{L&`=N%3C?dFMU)+S$&6%c1AYd z&(s8MJf1=kd{niwyE|M&tAdYp)+2_R=~z=<_f(i3tO+1ITeNf;W%-#Fra~aIE;Kvo z4>^x;iqtfY(C)ir#HCBw6!54eVd$5#VEh*I#-lE^ghKSzPh9hbu$W=gVx`ZqJ#Zc5SxkXX787}`mVJ8{zS?XAR*2s4Y(j@pIJ4bRY;0@_mCH06Tc1^2 z+ur*(YkW0$))34uuF9kNX^?KzaEJiL5V2%=Ksq7Qvrj{|6_(vJd*`Ax=d&Y%b zY(Gj4!4BPyKpcq=2sDTlcYBFH&7={ARR31+9LlFU*XMgrtoSwc7Kr8# zqM8}u_CD9nO8}kFE1<+SqrXrHvp^K`_=;L+0kfVtxwowJtl#3(LDBL?#?*J#*dpZc z+ar$lN4&&R$wDo9z6GE_+zy-|%3w!!jfwN$}~YTvx* z%z(MuQTZwDoX^_g-Z{sHS(a+y2!6H9l|zHlrlSw)^|iVKDIBN>Z_IR&G0E{ixlHO* z@IC+!98An_wThJJ~^1<|(D-f15+I`-> zj7*O&wUkF@FL*J?YA#vLU1UVx8E{h(w_`MO&RgVTxZa(yk`t6r7fo_O&I2uOp3f13 z5q)97EY&G}%CGjdjHUhT-qgY)CotXe%oOmS9;;Va7xSUDX$fTbBIw{5_gI1GfwFt}84L{SS)n$rO;Vz^7t97#m zqlqLbU(>NY)U(7X|COI5mKeEuDjM@Hv|I;2s*ytbt<2g1OCqu4Lfv~au5zO|K~uCyhD17&*SgOo=5B&Ik|&mZKTGd7yfv0z{KnUt0_;_OYJ^DzZq(8djUZFpE_dX6tz^cBVx;^42!ia#)EpCD z?gaSV8FEdQOKlmAVjcr*Qw0mTmK|t3S}iwo2QQWT>lvyPZ)-9KW@cWE3ZL+7GM6K5 z5tMdHU3>+JlCsZQ`Vfjj02n(*wb42bRW|auCK)hOtVh_my5C8t2(g zls(WWwhBm9Idky=uubfv{Q$I@s|Md2`1f;SvtdW7A?t2I7Nr4X&SG<9RU(ae)mtu) z6s>M+JOKD*oBfBY);6bm>SHGgTf~Zj7?GQ1;bf5+Kd(lUH1f87$6J4)K=gxUp!Z<& z+XbSfg}Z%+sDY8g*=&A9?-IB0`NAQuW6G;A%MwX=JX|k-X^`7N3{JR{jn)* ziHS_x^KR~|77VqlD6As|l-Z|jrv2Wb0J{*T&#Ip*56-CGx`^PjpY~R8y^&&3R46&y zw2tyK zhCDrbO^}#6kMrRAz>&xX`J?K{cn(`c}N zg-OgTOkQt3C+1g5Ornk0bufIc1Ct4uHmmYSVjB4A6FBu4^Bd}((P~fBV`7jm4R}Zr z;U_621BAnmx&N#7e7D&*5Xb`j4D~|2Qu`Zx{cc5{f?`WU51jg8<_4B)qE6Y;VX$&L419+qH5V`4v^@>5&454|HIXg;+sgJvjeZDr}9!H z2SXtYXrbb7XAT6_19~|yHZT=68@yBkoS;pX-DG7EBV_@tp?Jhf7kf+x=sG#DA>w8i z0oJORcTaXI9}nE%UVz>|B65F7ciw*C=Au9jzB7G^?6g-sgH3EbyRqrlE`(`i6L8?Sq`1YpoQ$N{q`O0ANT;9(G} zEp*eyVs&iZXTs(ui@zf52rD!UsppePY-mstM2GH`bnY;5Ac7%YZ)V)BjEFVYZb7-q zZiS-f(swsjNht~t!zw4C_jt2J7hR^8CDkIX0N)y-L=Y?L2YELJ7>eJGBN*C8YbR#v zY^ThomH9g_iaP`yrFVvrm~u${fQ^73M(ho2x&#Z5q;1#YlYwc&RvOaTesy~@%+7A^13icqbe5lJ2!&whn&t1 zv{)0jT_x?Id7+XDrye?8ISICpJq4XV0y~wpQZ*4M5jxP#>I@umH z3rSqbCFn%sW@Mo%j=nDyqW_$QndxTQ&SI+?w`^H*mhlqYYV~>gb>c9P)wS)!Va=M5 zUak=wIFn}=PD!w1`=iTxD1wwBqJ>My$Sc#}ZLdRcn1I;PLswDtTAbGRUU! zhP)TLeT|i@b*hn0X&6XmBm6N+G{Z1)?{^+lr5r(y3-sq;qU52ff+?kUl|!sH40 zd`XJ1u(I)04z^)w-PRA#;HWEz{~z zBOE@liCjG@eUM?hTQ?2(Ar%_VTe)W?+AdoCOm|C{*VHrO1xZwmvBGZsZf`btUkkn+8U7Y4oS3P zck&Q(cZ`hGxhYr^yOjhYE7d7HPTGY0l9!%vq_=dwHq<6*o;t)l@be0_D5_f4z>1~~ z{Af<~#-(Y!Y0?U^w3tZ3DOa)14L%+b-6Hg8z|I(ep z8)Y=6gTzW(*)UJvTs`Js33dS$xdYjqq?J6v|Z^vRS#cBF%$;J*pQKvY(&v z;KPSqih8mSQ8g_q=_&J2{{qIVqw;QI+jw@aQ0v$1wjssypxU%J*jQ7h%X8p@H$okZ=T24y`R z;x<8NHpHcZ0-;TBL10oKt!EpitG=QmB)lEp7@t75B>v`XwpO%Ylm(SauDgh=w7qN_ z4i*2w(LKGlXK1FMA@Ou{YrAHqBq)OH(qLC&iQ^g5!7p~|@I>n|}uw24LwDq2|WgpMZ z?Mk~N>c77d#^=m_xBd1oq%>B*1S3sbdz4iyA^XIXgbR--l6@d7#Tb_%iVPu2y0~p1 z0%WUu2=|eM4|-Z;Pa5tOZakK^C1e^Bx4H>wJN)K7SU)GgQ%%n{-lgBjK<>j2X^Voii0r87p@o z+?-|M5MqAG5`^(|=VX^lDpYRxG<09>KGt{=D1QFUjyn<8=W41VOG8bqXfUq^64&%_ z1W%9AQT~KjT}oO+n5p4PtzP$4n63y)hbfD=JV@v#Y^FV@;!Q&^g@U-3D3w8rf4F35 zW3=L1D{^g1l)J7Jr&*`BG(6fI2gWg7$Wc@aH=F2OgJ=Y=7cDEp1kfus0@4}K@LN4H zb!g2M0^pBS&fjhy68&DVmAIJ$Q-`i->V6bJp@DOH&0vSp4* zgQ^!ItR&YFh{stYgA&T4;rgX3*idcHIdVLfCN5seZTBo85R4EnO~1B6v))9S^WGUh z8ZmECt4@1kKCh!M^mRoAXNnd4Wur1Fhq@2orjuhdkV!hiB_}aYhQn}^plVsuT~v}| zTl1J**+Si%+$SV)!lCS#i99mec3mhO^5{0ZIhtmjx1b}f^Rg;Rp^CcM0Vlr7g6jsJ z;?wE5)LP{CeVfN;HUH3qw5}#{oIM%xWgyA-JEm>ikA~J4;U+%7Is{*E0Ku8K5^?)@ zx!5;MjIUm0v;R=!Rgl*?cPO}^t3)#gWg5BNup|Xa;Z5_Rg3>D}x{#lXKfoP3|zf{E;ZmH{hdD!dk~KOH3dx5jWF0yjh*?wecl)11^b?;23~Z*&nQ}=+U7a7K=QXsRZP(;O7;Kpjf2|_*Z*ZJi}saPwa*I)gnqvt}v z^WqR2AN({U0~%jvCpVQ))CdO6q{9D0)j2=Z{l|TOYuWbF^0MugvE0(KZLd0+%eHNs z%eL)ho^XHmy{_wr`#u4 zLcFsExYS?;-{&}0#LTblT@sP_1+Zi;6#S?xw;Jp|3fV%Io#E%___=U)n$gT=*2yRz z%rTugEf-{=R&T^__Mi?t@I{Uld=ESH0PW)_ zOP2NY`eBAfGIs_4rVg8ibndVIOeM&=3r4Y8{2l#*w=Ko|eAB$9lkvMm^S$lh7k-~v zQcNg_YK}|ddQ|k@v&GOSIKHG-{uP||EiJV0kz2=;IrS-L_PO)TEcb*3>!sWSI6y5& z643d0Lg?%+*nA<6{c@zXt8|wx5*b1bG3LHNT|1wakCcJ>oaM_sx~n~JCx09xMsfI} z3Q|B7EDOmd@%G;zhF&~ZjZi?|Rm>NLEb8&qFs>*pb;ps&P9w{VE3p%Y*f)HPSJzLb zln|N7ceufJCRO+a`hiX2Pb*@^PQw-nmu;(vBag+ju$hzP#hC1wy=&y@gWBDu26&a1 z(un(!r_ug3SR0nC#kG%Y1<1JKQZ(B-)~N{!k2Zchglnq`bySqI$vr--JZA5bHWd6| zVL0!0Oh(vTLB8Z~0iG>IC?nd=ij7p^BMRq5@s$|V-fM>PQhadX30sFM?mDO<;}cZ^ zFkb1&VQ5FiTooEUBwa2TQSBy&dP#xwJkvj{A%+wI28IwoPcFTN+uXrXK%n1lI#F3jJ{h)U+Wp7g?7mZ z$&2Q@mW+}9aQ%fJ6SBT#zX^rpONX*nPw>Yz40fk*DpHj;tNR&aKDD0;hB&uZ9BmGT zn#3ymTM&5C@$&b*ma^@JGiYO>bHR4XI>=%1oI1QvJ!p7y7eYb)+-P7~7K%TziuF#< zf0(37aLbu=dV_h<;@F^)#sT#xB3Ub`pkz!pM8G~1b%cmH8#-&%N9k~217G(0Jir~{ z_^3u33%M6(jVpl33Ln;(B(cXXlB6IN+I1>8nn+!kdY6C`8so`nWngpx6*h_KmfOa( zH;Zz@W@UtY9P1+s$&N5Kl=<9ny#>S08Q_uh`6+~gaVCF%o#sf-k|tG)BmwJvzwe5P z2WjlD4HK)wH|~WjG#0??TJ(lr{V9lvO2n+$5%hdgqz8$e$@QE03k7JQFtcjWwqV|p zvxMxd@>DcIYt@s8hP3{FB6rNZaB|>XTQy81{&)UACqXnD;ckMxQwt`n%)F~4CZ}E` zu?!mRN}un~rU#Tl#eY57^j%@?N+>7E&zCWvL5JUE!Z?iT{ZF$J{Ohl)zU3RBVSiz& zx}cEYp~*;06**eW^Q|!k6OHBx7oUdX0F|e-teW%1hK?$VL9xqK_y_7@;bYYNPpBfy z0=cRXS7qJQ9y&TZkbCVo$;r+Q${Ivkx8+TkI9n5Pz4K3$DHCRFVD|_5( zF}Vx)&YiN!Yq9FpPkxfDB(dtb1b_0c^N~+Kk-n`KBLn$g<|SlJ7cq%z$ky`4&*eNH z8IhFtWU!A3bOqZ1a}zf##+jg9{=zFNmG(H(tuoJzTY~q;x)jP8@AloP z3+Er0{;tp)f{K!J40GcSn?7Lcg0Pal%hlCRZ%=PCYxPQPJTYC-E|0BlKecu#H%q`2 z14HE2{e$k8ZERK7+lu|#s*@@*U5|{D6@8 z0efeAy{PLd9eX_G;lssu>WR|6d{mwLHsNZ%DoIepNMDYufXLT{4Ff@3t|mvheqpptHQ$b$Z*KJ;7st|NF@_?eIJ7#XHh~Rz^y{`2Y9dDMi|oK2w-u!&rEm&HjVh!Z z#rE)aw%c>rpe7n{4XwSs4tOJ?-73ifOcCK{F`~Dw+H{Z))HUAD1c+d@4JG_KQhvn^gkbwhee?O zO}N0D`PXr))CG&M*T(9usUq0ASnji50-fSRGgnp`q;qQmUgqlRqE{Lr64K0u-r%1- zEV+f{=IS+Ia|%{9QMoR-3hq1nX1;lC(oM|1YQXu(Q|Z6|6vMB=5*hD7@kVyJn*;_^ zT*xooI_Ny>D{HVx2NDRnI~rHT>cS`O8tmb!)`jyy05zRDeE4ItYBhmcsD5A(2P5;e zajUhVHKRN*(m+Ox>Ji6Mv(&%D0?UgjI4TVy5j(92Z;g+@SL~SSF&9k2lr@hhRKp6| z?}#BeRC>pVAKIGz+j|S@R7c#`jG8oS5%U!Pm?y=KSg7{gV?S+FWTd9sLzE%hSl0%&ZkyNFQp;?%xHje)S?R5Kl&SLl`B_vLja<{361o{$J3J1nPKh=o z8J{uk2+Z&mJ+oMe+-jp7PKS3FzKBdkgIc86ok6juAx_IV2LZbxKg09BOf=?aLzoqp z$Wd^X*YMsV1}!nBP7Y|LP&JBS0azxx%)b2%*3&Td??+;3iUS@Ue&&i)&0XAc&gH>+ z@kpluX!7wZGwTkoHA@^bRT(o!zkh*->L4hWG%RX--~+-6u!7dk!~sp+rD-fXJOBIz zHF;*{GpVywm*%HCu9GhzpaG+XmFe)5oEpjnnNJ;Kq>sy$#cbTZGDLi{4&4;IIPT<= zY1nb7CSLoWi&`ihb;=vdYTOqvMqq3qWiJ6Do@b@Yb*z$sI8=CNbn1HjT7=CF+@c#9djPhi_uL{eaz|Z^_qCoHlJl?M6mN&g8L)}QP-K;3SIz*%xRKjGDh!%$WJg2On z$w0K-ieIWN9|R}M{~a+Y1~+&b+X8(?-Y2k3GeVi2VUFrifx(Ky)q9_e2HqvrabF)Tdjc|ip+FQodkS?l7A8<}_ z74k|<&W#u}O-h zEc-egQ99wTy1`#p$8bh(esLhhA|PhAZas~tTq3CZk_}LylR5#iUcmZ>quC_UwJWT` z0;bK0-#9yQe|maKB-{HxDG%+YlvrQsl73N5BX>lH0U@$sKj2HvMZ9muI(}*R6d^3! z+!c6sb#PRJVR4m+Lq=!!<{!MB=c z@1dGEwFP^|&d1zq6Te*;Vy~$mP6fL)1Gy4jmfh9V2*>E->5@4lAr{7poGjFKs3K%!WNuVnG_iIun7z8+OGEOpmK<7W zk$TZSb8XRIZ24A>7Gc+4C!RuhV@l_AN~^)|j|mtkQI`ci$I%|Gk3|)avyg&*D9)vQ z{^zZRXXNSoo%sFssZYh@Cv%w%GKJoRZg8DFipQSqK>gCUTPKCjgsktwKL={JG!FA- z2dqvFzZz|Bd+BnJY37eHe3Ko@~`lFtF+X1a&@@tqPcc0!sqG5s*XV>Dzm?fkA z&~X2RiUvW(RiOT5`?v*oOUj((aEyy_f*kv}EpE^;bP88F7VdY%U*Z@0>e&G%8fgpMY{2UDtI_EFI;8V= zTuM5ayDF)Zh0yJ{Hh}5)dBbN`mZ)Wd9PFNY1=3n zsi{?j?UXBYvQok_Y%B^}32(Gz0~Nt7KX<0bt4Fjk6u13#+T2hiU;~vV4iUQGX|auF zNI!v5F{L`#Ue8Whwt!Uz*LaAcv$Vg z*w(ZdbVv+&PwK_Us);vXqq}wk{JfprFYGgbFyMzGd2_&vBR%v9e4p#%?NR(t_IcJ_ zHc`;-G6X&d-#*$V<4mgDkdEn>aKnL#+V=(Q<2Iz@ypF?G&9x$0ZF>-zJjkM3praqlsp1kE8elGZ1AA0X{GCbN zHy_<5d*082_UNb@i6i+_2Wt{ewX>!-$*}m39E(>K?pK7Jhixu@vjg~6Qy-?~u^mhM z)7JuJlp^xD@l=YozF}J9flP{OI8fJFSXj!>Y5+*epr{VvrPN*PjO#Oz-mGLRgqWFvta-m;qe`?jZx3| z>u7v1Qiu_d3y(()<11vTU#=Xx777})g{3I5J0eR>HKb zQaNtHevOM^L7t!She`RRbp!cu;N&{E4p=fL&|q}Vq4IDD>De2F=zkkT-0 zHBRnxNXd+$^4Vb1z}A6|cvnz-4{|te)^8J!?~1TRe(=;|MSrqiDyzr>c9tgdr^c`aJj$AXxKUAvYY%?yKA9#SAlRArOnA&$ zfF$b}aBn?Nv;UpXJiGeP_c?9YLQCHTZYc zxjwDnAftQYf`arNvv9n+#nY$g3gyp=uU@yd(_KeYk#Rf49g;U0_fA6 z^iViIz*e~8V^R6_XnbDTgfc!*RsHd3x~$2GPv8U7AEVYRiS$K~A~-m(a#J4m4j)4& zdUm0r(Gn{|XeU%sCJb9Tc)}6-d98<3^QQTpd>Xux8K4poIDeCETA)BPl zM;;<6co;AVhmkssyCSd>(I~lTElGUNzv%o&?LUpX8(B8E7(nuYQ1KP!PFIj&;0H-wJ(A=3h0kC<;m3SoH@OzT= z;q#_cpg-%&tx^oSxRQMX+y@-KX{Mz$CT`r_5b9Oq?a}f^P@dkv0bBYW>o16vgcmJ` znc3uapTppOMl1)`0Y)E7Wrccc45N4f?=>9hR-HrMi*~~n&L|)P)(Ra3ki}-GgqWgE zMsgP(tGS9IHerao`Vcdkl~W>~TGDQ{{Bls6VyB;}zx)w5o12m{f8$-x7~?OT$uV?w z-k)X$j5ufgRA2%`a*suboVPfkF1$R`YV^{jU>lk&!8c};WzpwPPHxQ=Pkba5C8^d2 zP|T2B1vZYtCM#k~3gjZi%NI;K9}InD%O+dVe8)WDptRkz1N4PI#?R;%!W+=-We+YL z0J10eu~Na04rZ1z|-ZNhGnt;q?8 zUjb)V!$tuHl{`5eaJeGtQY{NM=ib$UYZ^D^V%j(0^*X7mUS-_g>K-oR`Cjr9D>thA z9sA(F(lNKoq?An5ti%EVkTRj0?bz?VaamMj2NN~;r&QK~)N`#a+WZU{2o&xps(h;NO-980aYN-+J|`Cw_p_(z z4;tJCpY9)gWr{L0E^%*eJX2K#Z)8ehVH95~W-3}oT+(-|PaNXh<~@w(M{dhC8Dpz7 z7lrigA5XtsYE}RIsN=G19L1jd?*oT$Py|ziaFAlx2fMub+L%QlHO8(jq+i#%~6%$V#!aawxCj)QSqh(eGBhd zbJwd?I)w}TfXSZY1`o7r>Jh|pIy?=^+w#6^z<1U$w0>RNW!O%H;1}SP5pN~a9cWI~ z@v~X2QE%V`nFoN+Xp;2s^Q%jCbC*vJiQF&8HoH&CY_OwHTOB8XZJb*|>eRfk&})t7 zqWlYAy^76+p4&P9XDn9I`o_B>j9;aUAtNMu$}o*hWCPw*MrX})ZWoIb z(|9|h`SN%@LIcqqXLvq8c+RA<{D2C+iXdzKb(9TK2DL>s4*7@c@BUY+lwb|({J+0aHjT6p65(5d?K1q!c zbYG@x`@Y^;5F22UupINCh73TU(*H`T@p*lwNiaQkSSAOptTy~@D)AS6sW}Yacb^)# z0IW}Faq00veJvyN#%#CNWN0FCxhcn*v^M*G{2O|9*26uVOmuK*ARJ2lpXQ^M@&6x zAm48-4_rVaZZ1B$d}cNPq1!+lezYdQK1mazkc<&b#pKzp{m$kfCZ|vX=a7=A_f3=v zB=GP};GADWZZDOaF3_Q?@&gh^x||{#)|0ZeyF7|?rbWMyxCMl%kkp)ki{D-fq{Iaw z1Pj&mNB6h9MlVEXMR7uLE`kxU7#8sl;?ffc&QqloWPvPbm<4{AD_&w49*zNBv>4wP zqkLKoT@nFe>)ogSIL9zWQQ@MsF@S90`)}8f>&;H)VmiH3r&6v7=^QJPz@6q7$;;BK zJr}$^+bm&TWR8a48ceR^{Nk)eu>KOn8fc0d@a5L>fdg{X^)@bhHpCv zAWpIiz}F>YoErLbll!?ga?tFPP=|4ArGmz?2<3#m_IzV6Tx_G!RHSnZHPIgHDh%t| z9w}wJ<2Q{LhTSM-`pitbCdrsW{J(Kkuy0SKJ)du+U%B zJTFw^T%EWCAei<2qh%0RPC|)3M^{L}I)AzO#`4FncJ11*mqNu;SQmFqD4E#SZz|}4 za|5JE!t#JYIa4D7gZZ)YXVS&*SP^09SEnA9g2$lHWb+0m-i!V9kKAw;*-WS%f7|aY z8oJC+R3X(cnIePi@)kTWwwdjbyj3hyG`kidQqx^BXzUL*Fk`97grKac3NKr&G3A$nB$ ziJDKblw=VfNkdP@^`y^zUACFqTa9N2ob=Q$lwLHEyq5ZBA=rUOyus=E^QD@A>^+))#!ok*99*kbiTQod$~PI< zdE*{}sUc!tSw*!OkWjxcDH42qs8Q%Q*ntjaI0+XrJ0M>~fGrZ}n*A5WJlxu6j1*w; zf0@BNToqG@S=>L^RdqL%HV>A{)x}%c?^#iTi+rW%a6MHW^1f z6egg3FxK>|4}7QI8wSkyENE?y+Pn(Bch_2M)w;NnH)UeG-zu#7v~DkfsR4L z;LHK~Z@`;lGvU4AewbnL5xRGr!$6(lTf6x`OABmen|{-p142(gR-+w=6PN-o-mK6! zPE<8HZ?kN8QRQs_MWTi`wl~qif=YVhB%{Pi6DQaKaA4_}8oDo>5CHQ9?A-r!TH8B! z9l+d2yj3cWQgd^TC#xk;9idpZs2istnPlsi5D=+4AEZbtmNhJNmLJ1kRBD3`H=lgj zqehr5I5LM+Ngs#PXm<_!FJI`_M$6a;^M>@}gkV(-Yp6e@a6!dMRe&V5oFm#x+2n(- zzgOnQ>|}?1fM+Qt^LL<3jD<%Pm*6N+SkR;r69pGdMSQjNaNXJ&n81KA`vicQKWv`; z;S!U;#FWCra+fIH`GZgU{QO?bl7-dzXfAV{IewB=BE8aNHH%kDJrIY>r025rrI?Iy ztCF(-r!3f{Xlmtrzy9plAvL3v*g4~i8(3{q`uTnwfTzX_%ecbKah>gxB}EVC55K0? zIM9TDWHqmk7|wg4%-)mFqb@iVyKW#Dac)AW?9R&rtRd6XAYGrQ{aYX*E^=vsw^-we zg!=2!0J+L1nq*j5V_eyuFn6b4D-DiA$P7Kz4r_#u)k)!g66LWy!O)F1CO()c=266g zL~Kijty@#a>6BPx&Un!*?fqg|akCsiotO9tM?k*pc>C}9y6a{GH1SptEg=wCU_DUR z%0GcaPB%K*__>bUCNue?%mI{+a0>cmHo!`ttih1ZvCCL%a0JJg^~$}dmo3m3E0RN| z05j0NDioJ9t~Tv=H$P`u5|DqrJ3x|oj#>W(EDN26##!@H>)nncQRuMvWF<;jurkXR>tv~5ecoKJwrBZAn+tbBA$Y6J{1Ny>9O`eq)NyN+8+_8!!$e$DQuGzXD^cK99cR56@6EKkMx)$|)P zFxEJ>?g;~&nd;8x?lXs^Vdsl*qdlW*ap-iVcwH~l=YQ?N)fUBx|NDU_KOpEgUDm}< zOvWxJ%I=$FFveQvUo|W`Q^%ssC5Gj6FY>O^b2HhiF=g#g`aN7_8hCaBT)J7}gKv*|EF>GoK-K;hl zPt}}!d%imj(Dpld$!Vn29Fa#vOi&owZn~)%BKi`e`18P1xXAqIpUSB>!5%2cDzrZN zR}o2s#G_geCk0*q+kL}@qm4ZXu4sVo*@rlRRAjbDS;I}}F0BW`nG9eU?HsVh57A8B zd;aq!dWzBK3c<4$#3PRhm{r#PnfOXP16)E~;9a|`fitGG13#R*eTBYF3=dBS3u}4) zsLOJ^n^*r@gJlHUR83htg$fh8ae$ui-@*xPu2$c~w;!|QX;{HWBwFyT`E;Fgc4d=& z8`f4M-_Ml`F`V_x=FGVOm^ZIK^HU>i5-ECtwx2}W=1qK)3_q^M)A$XX@I-DH2w-}n zzS0|B1pH5Y14_Tvzhb@%V;4iu0Bb(&e|y@^8#|DqW6g(yz3ENA9qtusP-=HcGcYgq zf2M7<0tUqGI_ycA!zWVcrOA^zDXawz_{W$jJ)-gzNIt9{MYray^5%h)1={iFr`2RDHtL8X>i z=twBHo|Id&JCU;fM}8C!$1ZWCrJ?y=M#{W}Q5i}4{zL}yhzuR|-Qf8hOPIGdI00Loz)*K9^;3N~?p2AU1>OpYT0`)|MryC@C6c=rQ(6MpZr z<9-ufRF50teU2P?*uPPvMnb$DRd|93^Ae)vq*0TLwN@Fu8?vsDzv)quWIUUjT$mofWVEzr?6OM!zj{hhSQ zEm0VCHi+dey8*dxfm~~#6gA9YBZW81p)CGhNh$WYk)TlGwo-IMQcC$Q0~KD0@lOX6 z>WC1XW9@z!$5hbxsw$=ricOn+tCG7|can_S^t}#Pg;27m3E!T!`b2^Gy|DfE8AdGz zAjoGT#4}Z8VXZ@2mjh*-qPaOM=B;bwip-uf5D^e77 zZZ-p&=FviZ!eiK>il}Iz{uJVMrc!z1){kNsI_OkA2-=SZ1QO?PMwH;i30Mty4W*hPUnreQ}v7rM{rWCBWAMRJT zolvUsTEIC_N79r4qyb<&RRAE2S`OyX>`*oVW|n0++}%HnmeMpyL9cT=N3&P|V|1LK zSRQ}q5*#o`&}?y_KqA1-VDZh5838?Qy=+3bzD7L~>CEy`;H#O~&`KqWfNMA7vj&P}g4fKpRo1NG+x z7iHm}X3?q7S^!5wg~?%0Qg!X|Y8TEXK4VN}lLbrU_dgb=Qmqw z482!v@aV8s0JFftM(i{BQI;6M6171x;;=46umx@NelQ?F9sjk>Z>f%;j`jlM==6^@ z!IREx7=(U4WuLs*_mas$M|ZqblP3`2p7zJOk`$ObIKccyd-yDUFMosjOqpgg$=n4k z?y&UF79hFL^LdU{tKKXJrJ`be{5cHktJ&k_wwf1LF9CLWcO?7RxG2e*Qvhn|aFV=D z+n3{q>?ok+=uf-YQ@oI)_V+pD-4w=VR}Xc!RxS{n>FnSR!lfa_bf&znGw~5zfDtjX z-bmVB@ojcnvrJw#g$aG$4kktx1Lg|n1b}dS&ru0xm!?$DR|rS zcnC|p!Hu`|Y^`spB}gmc-ar9L1RO$n2P<`UQoK~8i+!PuD@hms)HA2lSISo(Hm#cZ z7s@d19VLcmzDEz(Q? zdFgcxPq7-joV>|o)A2E1QaIxOl8#oY{=?pOcVLMW_eC=CviwtF27dkWUa_q5+qb`_ ze6EN4B?dZ|Rz9gZ)|BW;D__Z>$qGwDQ{x+j)|;TGEd{YJ0D50hvNU5MGu|cW%o|CAXxrn2P-x(V{`emE(Q_tM$U1js7R4v`x!HJX&=P$Jc zKKj)s#i)-yp1yGDuHA{3`Ou9ho7FJB#QH0CbToSrxw2XE{b|%)J^l zK}0OfBH}u8;USNA@a9F=?M8=Z(*ts@$M9|$-{^>Pj|Oa!G%CKvG1Ts8mCb$bC~8up zm6cgF!la(DX%$bMkStGp?vw^*VQ|f&?j5!ug=ekL26z>P*m31;to!TnM@9w-cM5~J zKXiEhQ|$MUc%Fz}i}Z^t>6(Ekiau}QNoV^=X%6B8JGH1!^R|3u=kj7@cBhe+)*EY@ z`szJOL@(qFNcz+MEiM5VyA;G??>kRP9o`*}{_LdY%ef`L8DQ5N4Wk8<>c7Wu2$^jASQxHkz9tsMGO56lOEo@TGwj_@v7BgW~D=>VjRjNTSZGdh<0vq zaoM(AkkM|V`by68pe3oitZelmxT#{i{pB)L<^_#4^bM0dzT@e+?AeXq`}+9sRplWk zIiEx&*D|yUYo^^Ztc>q^PXV73b7KD8`niPz%HdycsK6)(z?5oJ>0<*Y>!Y7={&Qgo?P`StIy zc9xQtQaeq~-A7mN&A%(Fd&3x_YF%@8iLTR^eNW!sb}irWOTI_!#pbOgLm9dq5vJ$iZK>7m(7DyYa|)36%T zW{c0J+19Msnx+ek0nio*q{;az(TPE z`Y2Jp{q~(ZIzG37@Q&zGBJ{LD>u;|zxuqoWN^iiogiOhO8(#PUiQhJ==xhO3{({G; zYSt4au;&&=PL6TXc;~kXcz;^qK|gS>4VRdE@GZhbxR}rTBp{*6YyWFY8QE(7gQh*f z%tZ61nU-Nv^EnLz4PC-R^X2bf_f&y*BUJ*iw;ZM52)>P=H;&BJH8V8O$=gHbLp#;s zN3}(@+|j)y<#UpITHHoaUT&Y4W?x_Ijha%~1Pp|+Ih%yHf4~%%i&yPkFjv2SQ~kr( zFdll`juD(J`vTIP;D%5?_)$52k!E2*{BL!AGV+qW4&#Whdlb=eaW zX`BN2cJlu8Zgp7cRPjAU{LzS0r)y!uUB-t_Lu%u9j7vR&$iSte9-m2Yw28^>?0p3e z=W?fMTl(Y7DGQb=Q%Bq8w(GNwO_7%LJI(iEf31hOPTuWeml2b;ZLhzt37d>}MX{&u z6*JT`MQR>hsiD7N=d3*^b8Jf8j%C+x12}GGD8U;DPdo}~N|wa$;}MDUacNy&0zddq z#eYRZF;a3#!cr}bsO*?r4JCfX(UYGo_^WSy$iQj*B6hlVl6YSEg?-^i-?Du1vH!FF zCG3jR_L$e&v#1Zx(~?Z_rsiN)x~n25y;mQ1$}V*pWDa+9j?LCEz!o@zYCw_fGqSvG#K2Csi+NfafLDB^r zjCc?s6MX#(5bj@a`cl!~K3}*6Kis35(s6jVv`bh1XejM&%O6SjposFKx&VcvJ$3kT z@4vJ{eh}9v5OW_(C!tK=FE|zpmazsx7QWG`xJ2@OxynYh0*Vydd_2)o36Vz)dDCM8 zXfGZ+VehlKxk3X8TY49ysFv%Q!z_KnetQrA?4R~T)Nxqp6;K`-BKj6AjrpuhCGY7y zo2V$>HP^S8A}`Bk``1d)&C+~b?~+V)N6LLLg@ax2a^_P4_-#M9rDRhzM$F%oPU|dC z4n64HNrXG}5>sHSuqV+~I?&b&Ea7tImZq1)WV@d-Dk~!AUbD8>u1I7%>MK_$7Jvq# z_2pY{X{3@hYfRvGXl4697Qko{&(*q$<+AJ4hSRJJH0i3fd^W9cjz~*xiAFjFY0in3 zJhjB!bMN!wd5be6c;{d|#kgr>?qMT4qvof)h=hHlD4n)Sd%vM6k8MC}V+9x)Q?{n$ z{_^c%?W~Y-(i2uGGdZ|%K2bHO5VZNXvZ2xy)y%9*L_FLH0l)PW7FYSD))G&=jSC$diUc5FQy)3P?S z_P>&gr@T9Dy1v%D%0EbC%Ix>lcV%~0oD%yFS`;zr?1|^3wmOAdz7OB0^W8U|x1A3~ z8xvud66zdcReCS9>Inl(l*S4h-&jka;J6*51=~+QSrYciQ(X#f@h2f4aGg{=2wp16 zlQ4X|XEE)oUpuTE9vs)w;T`ck%QL^AYW2bH{aM*E5;9PTKBMPz{Pg#2;pb|`(v$y< zn!CavfN@iHKHoRi+_39w_^$WEalzq%rEEcu6V5%36o`n1<$Oc5#p`rhwSTY~(@(*E z{rOeaD;Cz8TE&iuLmQ|n_n+RM?fJ#f02lGUu|nL?z$*)eHoD>Ez=wH z*6h`RbBohwT6}4%j4ssyWrHQ?G(MlbDKf~w3jKURhiwGvB4Yokt5wP!*2N{u$&J@H zAKam#(*nqxaO{b?@ChwzM-v>MdhA2uBaasmMjBYv6c6P5W#ilFqD+MT96{k?F?)|h zj7-s^r{8>xCwQ@$G)DyuCKzzSnO*`f+bF?uUGdFk8}05bT>PA|TgtQUWiJ4qa+R`1 zlW+zy=eb#hH38I0{c0+3gLh#sKUqx>nW}4SX?A5u87f{@JF*< z&byuJA+DHW*9~viZb!#(+TJ(Lw5XG!8~B{$hr()KHb#jw5*7b(z|#yaDQ35>f<&?h zPc_6ei!3r8+spKZjWTxexwgy+-Xf~yW&>mRHO~dP!~{$bWl?ux^=%&UYjK8Mrqx1~ z-bKeY@GpKY_Fw`MpW&(4JmcT?8+qQ>+u3#2B$szD1>$5GF>qbh>)51F!lxZI1Je5E zGTr#HD*n4U<$HpzD+0Q!ww>OC^VP=uk3Sc>a)S4yYsqcp`ML zNcIkxB-2&Tk$<6Tl9hA6^Sh#mwB-+(xi3A(U|2C|HI5-(%M0^7ikNhNe->%+k4_f00>B^nxL4E4a_C z5oamxC`DXX7nypLxDdJZQr<;c?nZSIdxE$+o3B$C+79mwBEAVi6vPlvQJ`I2O{ks74P)R0hH5Qa#0dUoB;GQ;67o7CSiI z%2c`WFUSRTXMC20pd#n*bTv7+AS3lubgNY@f}fGHO}%T^dq%A#tBg}g>eKC`uTQuJ z3L_SMpzc0J?uy35C>V0gOBqskh$0F<&}B54G)MIPgv{Ap?;N^!01~gW&ew({3s(47 zI05kL1%kb=G@Qu1?#{CMoxR^*v>ATL$jCuni-*({R&3|sYfYXa^)^;yMd&_*ZT}a? zz@B(kZTBI6lH1_u^mqd%m4r?)(5jl(pII)Ya{^lO$hRRpt2q-WQwHeIu39yNuo zm?txTM2qWnfqTM?%chiBVKIMK0WLWptJw$W+yAOeDQ{RNV|N zH{3Meon6=w38$q?Oi86Yp3PlFmKFXO2YC=LI?lO2t;Ik6E;iq#K%}QCKAW%}@|s!V zHKVSgNu}+)F{a`VdO)|4$6 z(wcaZo>JI(eudCSpWvi~i$|vVIv$7gx%W?kQ#+T`Vz}6zKGmnQ+|HQN$$l2p25Kr? zs@Zh-@5Oxh7NwgcQ8Ejp8;PjLjjq~Bv{7+e=RL#%sW)?YhccCVN4llDM<6YDsc669 z`jt$6M-O>LHVxt8euvH1w>+0yjSX?16=vtGgdr8UXUq{aIhu7dRM5$fQO_b>P&~Yc zhDZ2~1GlUELBMaWio&gUT-K1e9dCUdlo9ff)3Y%*I5`Z^3B_QSz%6JP!P-}N!*E}m z16l9+}M8?xIv3Hc8v9F~jwK zq*~MAX=aq*eED!ON%o8VG1{TAD)5S`Yp)Yvi=`^y>SxBCp)8qLAW#$A|M%|Z!u~>V zwlE%qp`UzjZ51>xxl&vrP+i}+O*PNV)vp3N4yxK4mot6t2kb)p_l=X%Pe(5B5-s14 z6td`1m2p(YtR=M>YFoa5q{rE%Ifa=nl!k@58hSro;SSR#EuBtTHp9O{{pfF`N3VMIndE<2+(? zfu=&-jHDW(cT@l;BJ;Tk$RO9!gjADa+Y}Bp*V5iEDYy7ZYC(k!3kX($20P5|)3J)V zK<5-OJ^@bGB?%^p{)t`_n4NTv9xd%uWU5qZ=)1k)d2t6(_%;){@R}!w`Xj6kH1vLn z^8S9>mG#_rmF+~AVBIJ?2|$X zn?NG9!`m>aB+MbyTlViJ4i$l5oKj`Yq*C{iRKlp^JN`AJ#?tt_>3A54Tl;Oz8W&t|j&**FeGVfl~{-;~@xf4*>;WW%UCQSm_3arOSGGSRF8;T9=lF!^WOt-xdl!MxrJ z(1LlTMeO#r3Ur2D3`sF&*BbiXO88>NK4_i<{GKmU_q6@90(=BjFW+6F_0v(HN$SIN zkBQ>HFMTnZ($F;wQ7vP+@ZEU@YRf2BFwwgYA&tMH&Z_iTfln9BE!E?hZ5LzzD-8v# zD!#k@g6*7x{~*n^K_yB7ENTc`1eEYml+L=#Bq#i4XqTaJ-nplEn7nJF?*4J2M+atn z+DHEzh*UIl>9djyJd`Yc0o}9|S0(O`W`D6%_4S`vSb`P5<1w3Yn7XWvYuMH;&aFOh z7;k(Hwpeo?B0m%>n0orYH01CIdxVGNKZW^L(4S5-tep4!b$4me#zkaGcF3l65B}la zTeLDgvyXk@Y48o=yV+qB$Ei8Lc}+;2+?@GrL-q`399qe{PFE`d5ByorQg| zeDusg<*BqA>6Nz;p!{MPJ~1)o>vgua@0~N$QS18YKR*%H9wlxtHO*t(UQJG{L8bU4 z_o^6ArQn@_9Sr=I!LhS;1IJ*kq!?wvUwxnW_(M=oRG&xF8ugT6j=$muZli-vmgbFe zufqZ@U`6&*by4Pbh+T(wkGD15mhdlRFhCYvoeMq}o|{^=V~T)$_n5DDQ9W$&7HNdo zw*+?v@J^7-s|TCwZ59>~58LNSq0&YbiD1(9WEOfSaj|0e4lI7p6_TsUyv4*T`i(PG zl&O2zK7B5UwIi3#)x*$nM0ml?Ht}#`X`5Wg2(LU7?5#|xP}`ucYc;zT;1n8!2~>2c z(wZ9tn6U;_#3_$?A4ewF=yh|{475?nUKL}(b^+Fw1UNeR`^5!M@ z!HbM?2di^%k*4qFD~^aBb;y&KII28ORc-2CLe)i&y}3`oQR;0})2Jf0Pq~}xcn%Ce zWq#gm|FI~FD+fhy8)rX-EAC%+nqeo`+X2k~pVxtAJLV$ruF4(F6g4`?jedXoc8&e3 z>`w2arPX;jzn=y8WR&rmh`piGym7wL^~~bRSydahGit)|V4PFi7@cQGSh+Dgaza5r z@W#ud!SK6H%#x60p}WtqA=179hc&9C%Z9F>dyPPP}f~S!CRW6EtXwh9JVeFc)lz?~3dk zqIWsMo2q+|NU&34Gg~N%w11Q*4~N-#Tfi7tyrwisT(6KhD-9(#TY0GVV^El=_3fnU zZNzgQk(e0=14NrunEyU2_&N(Qscr9$-uU`<@$NJ6RePz)^T9^2!RQjTFxqe*iy6ek z{s0_y#Lsn*ng7lR_m7>dy{4f(&H8DaJUiMA8sm04*+e_$;nf1hon5x=n+JAbQss4P zp}@aWsMv0tvSq>TYtlAes7 zE_g-x^)g9IT$iPKk=J?>N1t!?c6#?~40n2Tx=DD`xxr=TcMXA^*W{OwW++`f*_CvQ zCGN2-nq-lAeOyYtz5)j4LRxTH1KJ(A57>NS5a~K2KX@`_1u&+&j=Dn^x-HCa7o3_( z1UAs)C+9O#3Qw00xM>&+5Eb_4v~`3cuI11IP`xavL8wb0Uncqq8|k3$u%T~wvP5pTF*j9BdZY}K(_~AaNm}Q1&^eEo;hM++0)ZKjSpf;<* zK)ZIyDUKakMM$k7HBe@?+}jTp2f=uOVs0AuO*}m2`%;VOX>$J=tMjMN{>hOYF6i1G znayF3>VcjekvJ~t?f31sJU50uX|Wr}I^CIf2)IPxbkjXJ#pmnT=+FpkVV{uR2p6tP ze0Vi~)3z*=(6Hu7vvuH0+#L&7a}gUkE@B=1w4H*%nzX(_`iS#knI@b|3297q1FwbN zTP!J@cy)|etuZwutJ_aa#gcuwu_3&C_LkX}Qt4~u?zi>p9Gh4lj6+%D#R$&INr%i; z4QIqdOfHlOXmn4TVj(yS^d*N6UftoP8S!@w6xMX^Z@lOCa;C6hG9ZT)N!1?0sim{j zjgodY@Lpj1o?25roow=;7Bb=89T=aLREaRsOc@EbHdc~)VN37z@aBr*Rqjs|yKd0v zip8$EoJ@g8RBx+u>C-)JonYg+S84cxNAXh?kI9pON9Ys_-j-v$_uAU zc!fJ3H&kjFInrb2{n1~D{gWTkVNz?8y4c!Xsa>;R)qmQ;#wJ?CXa+i7n?ANWJi{hv zIXEWSm$l26=i_Fp@Ogsng}juB#}M4%A^w^EdlR&wvyIU)?}nMYm)>sBWIr8e^;3TB zISpSc&9ZjamffcBp5zB_U*V{DCgPpL=HBe~fN3F%Q|(IDGWTEz^p3q>nq@ze`IhkO z^E=v(h~Z50J3S( zx;=6}^gKEh+vYElbXH2I4V2TE6Q7@W0Uwhmh<>90>e~rYOqXiq6wkQ^3IOsluhkQL^j)6q@7UXHFY;`DT2O-ROs9o?9AaoD^}Ps$2TkK+&(x ze_O_VHY_XeiX02l?2z`HD`47K{=DF*)0eYUI=Q9nAZhgeX*i%O%;j9xzrATe&H1kQ zy`OrXn-^j%4(knKen{opuV!&2=KGGF=2Ytf^XQs!LEMS0=h*?tO=^lAl{-nxinxgf zxGUQMW9hG6thBfdi#F}R;b>T#)-0Fs+}Pug?g-wZ(+1_X3T-B(9PStOacn=vtTx~I zQf}S61kSFCwaqLv%R~zyIUXOdkS)R1pL&IKB)NuSEhe; zNW!Mv#C57Hi@8fW`@%a&dvLcb62^(<2ip4S2$0CGn#&B2s;P4BMp zk3iUv&B4Vt&Cg!Z2mrA0~cAMNV z4qQ?7797}U&Z1$R(ps^P29A_$n;ZEi#A-OVgdTK!>qNJQXvoNo3PGwK1U}EiW)*7f ze!QeclYE13QswdwIbgdbz~JJ2gMkZbfjUl_M4gT?nQLm3sUKS9@lV^sB@@!?ySp>) zJfmz3zp^TnTn*cA;UnpVS!?TQnXzgkjmrWabo%e!w{+W@dw?3XQW|NpR2rG?vUAwn zzY3pDE7(Yb;+Kio`c1@waNa2Obz;x`a~1qHfqDLUxu7S)Xx*x(4b~28pKSWr<$B}z zPeIDw;3&&)m4oZ@ufwTQ$_M7DCU_<+B3}V5K9DvS-`(~ECr*uix@*B+H6I)bY(2~; zyoU%tRyfM%ll3%+t6EX;4hIuH91GpOx`&u~jnPMFgFw|xHxhChD@tf#YMSX7+LHa4 zx%v7_+sacvIgXHqm(Z=)j8iE$xMpMR@NkM+AZb-&rJdA z-Yaz2My8W;fHNn>GoBnja_B0O`|OVPefcpC1=|yq!3P@Z=QWEW&?vu*-HRx+wEM?u z{Jv%}!M4T=Ui;ZkoxP%c#3n^#z3aJ%lzp-P*qQe!W2s5kr->KVoV=Nw%y+B|N9*&W zO6<@gfdTqU0ow(;7rRuA5I()9-rKp#NU-`LQxOfdqa2!=9k{iPazrhi@hJo!_{&sx zaUikc2~v8(_mWIUTYVv>rdoGwS}ZMp;GF2D5iazd^3-FwtHN)`C{vCv2CJ_B;0v6| zEGZ_TVF%I27D-LG#=f20m(bxAu9s2S7CC_;A0t>AZ0WN(=ol`x&nnQ8c7EJ{3ZVyk0+zeep?@I~@VqD+`}aw$;Jxtc+i3->Y(Pie zQ31L|%HT6frmd?k2+h6^+q<=x1P6kT_QhN0axZD~HdmEMK0X1UM9!NzR4q^M0h^lm z$fum1_Xnc!Ezxz${JUl^8>}CkDnjW}(L`t;P43k;D;@d}r7fE}ws$#m8C_quW$b=j zNkK?S%OZL2^Dq~`%MF^5%8q27q?=IMB_-#`z?}ixT7`-g)eKSJ;TH+ZEH3#t#&S%9snn??ac5{XG7mU$tibuUK!-OIc_z!3KNsugv2)n_EQ7@&FV zR~WCIvB^(4VVq^wIoEP++Q`tm`@t$A$+vX;lfAvN;fiJ=X=pVsV#C?4a=n$1sx#Q# zvr<#va(iBP@dPgev35i~*(G**yJzrNR(b7Q9sCoAO7FqJscjck&*#NQxacFFF9OIdA#oYr$5vp6SMXWLSs^n7jwyTs+iuORrGo&7ciG2v#ogV z^KRq%J7ot#Sjq$ax^Ex$gc38K{}|c;wP-kf+RfbkHqT%6raNWxtmHDk7xrsii=UAn zG%iU~8*hFuFPvetAC>tCIsYvRe)9yk_;Pp!U_RWk)iN%LQ?9UUH? zqZR?n?8XJo^O-EiK5_0BId-tKJfwLn>lKq=HIRxsl$g7`CgQXQM4p{cd+lQQ=l8Vt zTtUx@dU8NFFSSbpRJ9;6c!bbjxEXAdZs4+NGGz&!GCl-YjebGkN5o5)D1MicII@)ZeV1_hMM|2*d4HGyqFO~x8c7fFWXL#}+3*bp+)!I>kxx5AjQcSis$@Fw03Z2AFMDuu&M7)Ol}|J1QD?6X&_ zn6DKRg^09MH%5aOMz74OQVAaId8uG=%MLcHA=9^OFO+D9H&|W!zKA&a&{({tSN334 zeDzw;ZACHiI=%hJIuQmsKKO4)I_L(z7!ie=F~A@giP^@G>mQxB%vLckrQP$`xz0>X zIba0YcVfyoEAxxHB~7H>ypV(CjSTZ_?N(glvc;9zjqa}q-kw%m1Ov@9B1an?lD#m2 zL%tUqn|Y>pyoqtQA>Ixey{8-BAzpyQ!z#(~^H_@U_`;J+F=kIF93-6n33 zoPA2}RVD36Joq20fQ>qhECae8n?Mjj-JIy9%gt3BX=9Q4N`L~Ar+>)$t3UwyXqgI_ zq6gRcpNa`kbY*Ip#l=0)T^-m66xg@h^?%0@0o{CM^3CoVzDGX9?XKyK6NfB*g79MlEM6-;7thF1Y`6;u$R+e;YgoU>x#OE{Yt7%zm@uh73t9GjLNz&E5GAC z?2R=QW0IpbR9R|F=hX$LNe2`eMGT;UzbD)X%0XUK6a@(-`@=#DK2Y+Unf05c1! zgJgDIPk9chQRq!{15H}{NI-m~sky4Q$)|Q9*#WOgi89GzIW+|o<-l%`rr;F_#0hOU z@tkZBam4a=P;0DJ9HTtFWS1c4Ncb-*_ABSi}}2le)r3bHQ{8)@UQ>gg*x~7hxJSEULB0z@2vznQ3|*BrVkaK z4=%e(b1o&)1u`+#G&D**boc#gvE9?yKtEiXv-c<=AADgrQ+;`PH$(4C0Ta|-re2Ol zVg8oz2b{#~S4*-b?a+OD#rTz3Iak6R12?}w{qwWR7bx-~1l<)LzL0ojxy)!(6tV6E z)>hXMf=FIBt&p=Obgd zMs^GagN;7ii^fvJeo#YpmImI4U!l^LAcwu@@>2zq_Rem6;EB~V^g1o}4PeC6+s9^p z{HCqRq%L3p)P(lY2et$PX{S3vdKOGPbS@R3uF$3EpJNh%T>997a}1A&RYF zU*k~w)84A3B7AADG+GL{g2I8Ag!aR4NpGY@ir}~FM2XGn(z{Hx4PHVEdc>{yCz?b0 zo1U9f#k|E1_l&uWk-|Opygk+Fy^N#CYLs7n8q=Vot?unkdmE9kp7Mlb(WV-YC@-g} zwZ@QVs*E4^l-|p2CzN&$p0TyFDS!_4e`UB()M4}nkUx@6F3Dc)xja~| z%O(Z+X6%mXd%lp*$C$=}vbIiMUff7F50Rv}V^Mdq(*dpe z2u3UM6N&srL?6H~+R^;>->;zLSF}POBws}vVWm)@+o`!kM7gXA1vX&Q>fiOA#|+@b$~GiAK8G_jBTRx<9lFeYtj*X1lkz#%XE_JKNr5tRSLx;ExKn>2J0d z=05}I&|(s>AE1Z76R)W}HoaJEF(otF=xwAxdUnO(17zK^0>N#aR(}F}&T&{+uP?@W zb#ljA!4t+x$JbYhxTY?T@F=$(OnzoHiX3vBLgL6y!im9=p8DqG=`E8F&I>O?d_>6; z(0U5Y*Ipy%)NfGhr`eYSn5Y#-SMa;B`JYq2iKkg}!c=d_1Pnr|JoBGGN6ICoEcA=n z{3bA`%{6j&R`F<3G}?pkZH2|&apA5Pw*9j@@`ttG{>jDBiF#+-YF`iK>R8FN^I5Nh z(!057_$_B^y3TmyS@JzE$~i@=BxSPy3{Y=&3e09oM<0bD?3}Of)#Y}034puMf|jV8!>7wLf4yy4V`!*WEZ8^N+q?A2G`lm<3i1Q5|QJkRk6HD zKzOY0JNGDZy&;l^Dw9j8b<3dPo<`nt&>daHBS)1KOmW7G6XR2v*6JCv(leuyeVro5 z4n4m;bX?OIj=B9C2Kp&Y!6e$VjG>Tv8+ssXbQg_3wr#y@LVxXdp7?|E`{go1>F950 zXh6utb+&i@m4g$hk|kw$RoJQ*H`7UJ&3k%xZ?PoSI$5L-lKiSr1TyIhn({5}o*o}S zH$+QT*~Yjo??*zmy?X9q8Ds{59d>^Aup%yDmix?&>fa_BF|@0bB%eoaw)*j`I*olS zf}e)$S$Oj?y={dDSewI90J(B9jiDYA?R4~7?Hr1oJ4c8KC=HV@H=#DUl();kCu*0s!!|D|LAl;?3d~R4*(EHKXp@LtEt|XJPS?pc;443e{yOxrzR^;tEkYnXECHX+W z)M#LTaW}z6bXeWkbZc0GKFhkVOlGuV(RYz{$JYdMRWoF;j+rmDqyN|wp>5T?P>?Bk z9rNRwzjJr2`5+&pNZW;oh)D>xSA`iXDp?ho*(mAV`13NZFwmH5#24-UD9O+7P_Onz zl%aVY2ftXSh*s@%6ps|2%l+vK{<4PuL+~%8Q5pL-4yAidE57GH6;Hpz-^;g@98Z_c zc&bW(Gt*|1M)BlpOVJ{(+eOOE*R;7$ke$l6y#0`OQ>WQv?IR=UM7_2`k17Qx*vEcoXojsHXPBCZX6^?AF$nN@|68S4Y2bl9&cr&nV zSxu@^_s2vENCgb{P|5|}bb45v=B3=2=JDhyzwz^X{S=%R2lU8bE#3ZkMA0MVha=C) zF!SaD?b5N$^aM(hw2gX(xtCWMyW-P8y6#(`P)4#B{@Y`kuU>wAHpJfGu&&;JtvM^3 z6tU2kwk&sqH7x&L{GUktXXcR#ZY-Xv4)l)3Oe5#h>#^fm+>N7<@(xKUA#Dd&bva(q zF~m-xh^U8OG5=c~mwf($Dqj!!;M0UQ)^{hCB&30z(oZAw%-Y!zaYbj20b`pQ($fsJ zcN@IUi-}W1daEQQlA(=iiX$K-5()*Eo<`DjyHSM2{+T>n^i)}!k5^e1 zIiv%=WwfR04H3m*aj6U=fBf1X2>Q!qgwIjG^U<8b^S8B?OaWndT%>#!m`kvm(#+QZ zQ7K465Bzevtk+|8`s)N1fB=TRAU8y}G`f z@jI?{@>j_Erxp2a;y3-sCw6KfBlJvvB<`8li5k9V?49j*1S6~r$eeKGaxL4e9w29b z;6NoX=lPHoTQeyd#ofQBkcJ)C1f_kcBO*JE? z=_tO`0YBX7bDRy|{rSwlA+l*I0LGICj}&q7{E@^wu5-32Y*Nz+7X8D+OERZK#La92 z0~<$1N9V9*NB=Ny3Yt`wqcSoK!4SQC#*nU7n7{wM_2yV!waP+Go`q_?;D6%ZKfJlT zsV&XU&dyD%02YM(bBXtll=Y{cKLvhqbDH+6fi3byETjQ5=X3A#KH10m9io0=cq?2s z;R}>i=39E+sHk=7AD{iC-NnAqXvvsTFPeN1^)PjPR2qHROTW z925Vq*XRVz3|Hlmd(dna^IrS7n36LNzcc*5n!#UvAyb^p@1=!5m*_Wj@>gH{w}bYF z1CU?#{B%9<-W%v z@BW5F0d6h@V6TsfZ~dyU0wQ(E+Waq;(pP8co0V)Sq7IaQnUo3l z=-;3VKw&pP39FfD(%t$m*_a(LQFCIy2>3or+3|nHssC2eUNQocI+5~cSOqV(Be@p_hGJ1LDholBoR^a9@ zx&=QPap6DLbAVuJ+|R73NxX(W8Dm-f%lg`Rai9KMYU(oWaAkFMf`nB76z_eGfhyXJ zxg+f9StL`3&KtqFlmy;2l?0>fPG4gU^WtMvhfolauBD9&KZ6DTxzJyF%@~T0OioVi z*IZW;$qxVyHQ#dz|73}ugItxn(dhp;5e~TowEhhN;{-$Fu=L1+r4K}O@%arTqSOT%IUNZ%iQjb%v zDYUZySK;GIYg|}*|F>L0GGExuL1p9{Sv0P4Gtg#Y5Pv+`^{|GTREg&Ty~lXNJDZDc2q>#edV?nw3kRYYUXB5+ z!G5|E|FO)Q-XwdO{Yv!7N5g))2LGwqQoc;FG5@Kms!B(*mHLdOZc%&NC^~RH=fx23 z7y%j8t%IPy{z5Q8G%nuoNwRE`2@*hv#fG#0@r#lTviuu)H5C;V7bByzXwW5P{@nBN z8pH3ED>OU8_r%j~Kid-zEIh~^jPGKKV&sq;7QBJRhwgNu!}iR(9}eZm$3BU!^vs&R z;qQT=QBNXKQ;VB2m7W}@SUUnaA{E5eeA<+{l=46QSH2b!bPb(5sY>OQ`T3cB+^uoR z*BpOC7=p{d&99UV^!3xi`}JYf3STaqZDR%}Z5>GtJ~NBglV08*u#T90TyHg7iQ@aVxLt{}7{5IeCe1 zvOt;n9Q&KO|CG7NTmnQbjXkFZ2WO>2N3FneXpGRJLdB@>X)gic?OEm86Oq+v!aBEV zcSN>?S+2c(H3wpsulOkAk$%_cfh?>+PC#z0Z-kedu9wY(aPHiS=7)nh(j=s7vvvRE zlYjttn)`t(Nuccd8?=rciPU8S>lJY%`Pr11toyZOU=yoMcHmuWOxR2Ln5!HM8M?m6Y z{XvsE+G;)8*Z=EY&?W`?X#i{nM~RIgC^gBs!e&U#L@pG^C%K4K!qar2S*~G`U`zV9 z{}nfFDrE9t=~?bX=&!u$o+%V!=5RTwsgHlWk9Ln;euH$||1Q3{8Q%ooo7bG$o;>{HP&t<;+E%|63ucmB@uBooZKD=4@ zitS4xw2esX*16|XpDOHzpYwwO@A8mUk!q9*9)Lt6cm$$d3L#!w+`F zGok0re{oUHj=tpoTUPG1dggIZR^aFV-dF-r-Ewxt)ycDqNomYn*`j_%hQ%gzwEZ50 zio$Xvmuybo`Psq5E{;Tlw*m%!8EEUS?zItA3)QXF*VF|j?L+4aD@~>5;PbN-A zb&oauKeQsMnKffQCr}6k!oc8t;dh~-QfjVjo}7=7U$V?#ACJ_483qmg60@pBxvy&{ zKi-e$RrM*Zgd*>Twzd(_TdmOfN3#b5lbhxH9?S7xc-IV?-l5S1%%u)s_nc3z2J=L5 zzwdALDt z;2mjOj-BWNCD+3*vUhrd7n#QItyv6nl}3+u6iAqIl#%}`McgG6{a_J?TpsS zx2fJq!L~ruQZb7q@n;kR~Fy{Jq`#Xca#nEBt9pI4M}B?I*ekK7u{sA0$K57`#dG^QxJh z>L`GQD6pJ6$J@!ch2vXE%=-(3h znfwSBJG)KIc4`U>-4o!lT^|z(J3v3~+F7>0{jC-NsT#c%FB+!9d0jM8Bz z5{#!L29xAr&pYl_QX?oaT!;Q)z`ta)Itbkmf?6MZBL)*Fan$ReNsilngr5MFj98z` z%;C zl%=`Dt|*i)yUi~8l(#S9l5AAp(iN5bu|z87zSO|&j?GTbPRyKJE7J);+@cd7eX?0O z*Y%1_c)k){;w(C6-U7C{dQ>nl=?Djc0uu!NyIAfw9C#K%QImTOZQv&&67a*8IbpE% zyg@WBK9)y4!;nmQ6Fa4X%T2lnjVJDUKZOF|xk~pJe=ipC;HWXZzK%}f3-332Y6$|j zEJ|WE#>BG<3>g-_MCv_oYWOLg44L+DzVSp8dtlvjqN6!hn7UL{GH%GFniv{?E!n!R zmhmK=nq~84H&~^Q6yP5O&O1EkknyW`68JTO<99dYxr{1_-BN0szy_$AFS6zw;~i>H5Tsu-ZL|LRIA zuLjfYOUjE^gtxOox8ttmdbY>djAx*Fb3FVjmY>@6q@w%Y0l-Du*jno9b}K~Ox#=Ln zU51$>DzS5;+I?40IyzvVtI0J91(-)3BPh(QsRhluAL?}NBO1Sm9@?&ubEQ?;8-Q+M zpOenu@9GW$3V0{F2@*`Y^?FmqBx;;#?IbqW1>W3bY9c&v414A$Wu6iKEyn#{n9x*6 zd2Vj5j%LnDW%@w;IUMhoD`+&veelb2>?+ZzW1i26CjJYP3TxLx0Y0^iZzUKYF#AN{ z?>TauJe)n|Pw4d*sUi^Zn;#2vw|!@4=Ol(}f2e-xKysPf4s@@d^vvA~^K0t>?Vjx3 z6XyyHVCOO_w8~v>b(^))mzgEzMuF>82~R06F7YxGP6F-`*;oB9+@stzd2sYl003y0 zjTp0jfe7#P9sj&>_tPi1d4k5%+Z))B!Hf@oEG8e~4nYsd1Y>6C$@85LdL$?M8dAo7E%xu4yNYdlolLv}{6A+9% zQ}LHwQ?ljv^?&edaA4EtIac03Q)Wqr0{x(!Is)+fBT@+pHH=Vp8N@+UyF*gTB+rYctY2CJLiY>gz9eVaQiROlFs&hB% zBH~LOPrA-_!?f2CsLPgr$|4=N*+JLFRLr)2BH)tn;<<@2n`h8U?cot1W>7@bro6CK zz(Q8#H=Q4jg-E?(9UW_qgHK+VzOXe&*-q{jANZ3|QNv>ZpxXf~Lpz_F3r;AJ<_f$G zujW%Lk?!0Yrk%y};40FX1)irJm7Y<6b?zL)Qv${va25b4KuPh7rT3#mQ?7kpDy4Sf zRY$~dcMvxWs-}Tm#}5-PC|Flkh0L3Qs*O(1Y1a@HeH>=r4Ytvj7I66 z?B+=IN&;2+qGrCjyVE;g-3(a2GQn8T{5to|H%MGm6eV`0bLwOCGLXGm;P$M??ydch zJ+H2&lhS7oL|1R}utj{3{zHFoKHIv4S#jDuN1F}>Sdm{5yv{e+K}qN&f(~6aaEtvU zCVsVTcNXWlM0b$h4!{r~g!5FMjAWYGL#Lbf$X6FuvGN>lmiZCh?1~jODQC3omDm+2 zkK!&|{@}+NgAzz+(jq0Z@XGhdrfRz70yu$~Ma($Z6mkQQlnfb3sSZ710&!)S+#(w< z!A?F|ROQZ^AXH{XdtCNA1!(OU_R?hI&)Cub#7L*2WyFEKcYmW{uh;aRU2_kf%Wp?+ z#g)+Ia|+Aqy;zK5mr7@R)}^^L_ki);<@}G0gUZ}%AD)CY-?vA(RMsH!u#KdK%TzKI zcIhHgXRcBlji(o1{*q;XPvJrl#8Y`F)$vSzud&VPubX>;ts5x;hq*t4Sv%qXAi=iW z&-DbL9aSl{9abLE`GK=ilrP7#ChC5tuA`A|Yg1_i8Ypae<2^jU42CFcY5F9aJKNG! z2|zQu<%ey+uKwfs3eZ1CM}oiea+bUbPqWbov(w~7=Dg@>H7Rs6ej0O$hA#2DPYUAe zEw}{#>NVjtsdFlbrNT^UX!NZ~j6u4Bi9~I;Nze%6dBmG`Jv7eTbwQh}wAcdP=yz!- zZ&X;YvaMGWnCxE!9wN>kZ48tNJ>HkXe9UQD)7q?|N&Ks=%(lukb@War=Cv#{3I$0O zJj_-4?nZb89_3RnLG#x9qNmMw)uoK}p@X}P0;8Vn{a`)`Fzxs@A2J zGH!4lXOoC-RR)r8EkkV8WxRXWnDs#AN~q}i`g%iFnLDg(5e_Ihs4gzCVd}X-6r+PewLjN4U%4C6LTDy=V zWyN`}eXlNDvGz27^uD;bQ)YpNaE{|k6m*%iw|-&J2A9~i(M`8J8RKBYoHc3K5IeWmHnzMF)A-bWIbtRCDIY6km!`wAQjH~k8r`XTLSOPB&Y(srP?k3dLiPHvT^mK-FVKWo9v&^xD_nEb< z&)|XLE3ZY-n_Z^A@FGORO~P}pAVU@kXm}it|Mj)*&q^I@mQqfhmn*x=jKja%q6E{& z&HN=nNqH~iY+7c^qTX&xwNZ`MV{@0$i(=|E+sRBqvZ_(9TC3{?D1BjXSY2W{pdabx z5K6W>X1{sEzdNrKgx8fP%~*d%zf3vEVX=;wz}M?B*U(ePz|ZVeamJCi-|U*R{xkCg z$0%gh*CX{)7w?AodRSY_$VNrfMdI`$w1E8gu5~Aw8Nt5Ei6NtuLe@c*yR2-F8_uS4 zx)vdUr41+UjITDGA5+z{>Zzi8!BR#W{P~Rd#E@My+tkd-TKlbsi&rY$T#vo^i^l?I zA&wq}cI~BxYb+*kqQ`HqZVqWT`>%U;TLv`ndtW1+?7!p3Tgjg+Cm@z2Pzx9+APj1< z3@|UdpnAr)3~4BWMA}L$Ldkb?n|$|rMlId79A3$Ypv7F?@=W|>@)H&Tocwz<^tcUm z6q`#0V0>oSh9iaU>0c-WR_TQC*?iS>Q{W87H~%~cp~xvQdPw=6{?NtkU5R)Mp$Gc} zIo!mwk!jJSO}9aNTrkTaW0ixGzYW`seD zOo>ced>;pQ?B>q_e(Kw-!cdk{wrw!FlN1081`)|_4~;iy?emX`r%_R-{byffAU+yN z71!2Ie1wzyQf^t~)eBdG5jLEzD@Yq1o%}Ru-`fVk-!^$w-@e|Jg{-V9!N}I&=4-+T z&x3M=M?MeX6H>>Jbt z`l#_bw1JZ~(X!rhw*2bdeRP;A&md@?=q%px)X?NiX5dg$xU;kh;T;U6w`9E8$&bHzB#pLtU@>!~nOnK@hgDQth=m+V7_#P*S_V zQjbMAoLo6f+mJRDZ1@qjvve^0e*h|a5vOp3f5mIS?^T7Em`67UD}2*FQRLkdAXqwHC{(IrB9D+-93Tc1#6=9l@9dE9J<4#0wR_xHq;iK zvLJGUmnARKrO2tYDzJSPLSIOtCnvO0yP$E((fdI|c`xPKdOn@@229>%orCDm>|r!DXR%~3-B=nJ)%_V%)u$nb)D?cz3(At86JhD5gn#4WfZ|N z>hf@p9YJQEF3wZN zpDHqnJw{WlV4J*$#Ti;*^+WBAVb`8P$Rz~*Y|KwC_OT-5iHnkT(@(_d^k~mVI`JdZmzE_eCu&nbhlfl`z~m+6CbvHl5jHN+J2RsPV^;$ zwu$fYMj#s}{?n{0IccHJYxUtmifmvVF@>?%aB1+kY+%C2(BM=PfB*cdb)?I^=qkN? zHu-9){fSYoYHVYrK7loU*Ja#W127T;v-5T;tYGuAm!}fI_h7RVKm%hNSf3wdy z069;ZUzf*s$VZu&>HyDO-}GLeb^yt3RcP&$mvJ2IBDW2XYmiJ=#aX}sx)(su`aq93> z3Fl7HSEZ|%Za4lG!HyJS>-6C?Asshf11CCpUpdiwPY(ZxFo_~L-QA7NFMsw79};qT z<`6Nq&UaB~X9)hXD!P6{016(TkxpLNqANWZNH7V}@I6la0;})7{6iY&xcK1xx0~L* zj{bzI-uL#$fGC{gMXXZ|P&I~=rg-LE+Pj==1^Vk}Po{DTkM&JKsycAVGK#^XE4O|u zo;^cDbSVJ()ZQB-2-S7cD$>$lcz9IKJwzjnH7`{&e$%3NJvw%i3l1-W!Ek6}1gH-+ z*&IW*uRM=v8+$M{nEFoa88_<8-H~t5q^+MmV9`Q6-Lfy>A4JP*aI>%^L==yO5L!* zaxD%eA95uqZxq`G-Fo=TFXB%mc@~But@*Q?i@6m;5H* zDq6e{*BZ(e!iB#5;IB73&EB}P<$y(P;2j9f1ZR&!XF*ks`QqbL7^Ub|JxgWBtxdzc!uxqrO`Ex z6*Sc%rNir9M|Q1Uog_QA&>rm#IcA!B{AFyw=JSCDT*Fu&KeySk-!>n1K=NeVT^0;P zK)G%_f!>FW8D&(1VR(Uz$1omnlJ)yuSs>>(qpclK-Le$FkwRUu8etKoqi^9}-uALevpA?((IYfu6A>4e?|2i@;)U7Q44y__c z0+cl}!_`-1lU_RsSsRa8_152C_l7$#ghhpJuVjx(SAtD8}i+p~8ZuY?(%DqzmpUhlVxSVn}!7*g|L)LkMXm>)w>hc0Sq?8o2pxA83Hy2$O&J?df`U zADB2{6DZ*V$DZ)V)jNNSF9A)m`}{CNR(NMLqVpUu--HqtfKramP2XAScNbRO3C}#P z&`~A+WveE^g*s31DqSThB9R^S_Hr4fx= z1lQzMCzx3DdfSX;x1opy-S#Ps_6rrC%qKpMC>3pg7kiRP*_xkIV`dM8d- z4U|=+WnhpJxN3kZ7n}BJMVSCCO5E#r234pHO>Qe@vQWLj7+cM9RkB!iI5V$~mDC&_ zWE@s3L(hgE?#mtK2hK8PJkM6&#{;#&8`4HLF1(NjN{?BSE8Dc~Ua$v-r^kgJSPfjj zX#th$n7tPrbcj{f^O;L+6KNeNVjH)q9gf{n6EL}qFJ>QQ{3k#lJW(s9MYcq<&dBkT|)Y^ zoKe|5O13pAMSK`;g8rDvN{HsO6NDDq`=s5-Xe5ypw(=gIB{GIA?p8)^EO64aDJ|LB zx(%QC=KujkBkL>a(7Ny{(&4wk)bwj;y`kWTKp4I^zu>3SBp_x~7$6yk?dSj@&6qiO zbE5mWbNH%02nUL*H>ke9*I?2C@Ajo8?ihK`>(9zlW%ZF}K$cLxD6iF9MXWT)`OVhm_&CNa%yWk)zclp=ROvpRTpYH8FRk`K_`S#U zL##v}@YY*ahwsELzgg@?x&#{B?RX$n;)L`@f>UwR^v?D3lDNmth|F&Oa0C6o<0)W2 zgpR1=Rz5da5NHwcXmj`COFuen!{8Rme?taKE`wqy3EyE?gdu%t*p3kcbKKXxew-K3 zHRYd18Xb3;Rw_V#?3aB#eJfig>LtWuC^9VFZ3NE|_zlpDD=Uv4!4MOLHDHc?Fh`kx z)81W?_z+Hor4G4upb$c{aZ~ko5rYbAuh08d(xKC0JiRVCyR4DHuV@tyF$2692h_pG zs>*e~Pwer|4MYSV1`YSf?@Pb|9cja}8rLUMHf`lJ1mhTjNt+pmaFQ%xL+-V}>;Wm7 zeV1JUs3wnH{2;w`;W{|2bEYlV#J{=X(<&TxX;E7^Nx(_!JcQ?1Rv-LF`P4QTwKK6$ zJ3&M0*a#HXxs&Pw6h^>FnW1sey|+MN%Zn^4tC*>E@5#7pv9n7rN1t*EqZJ0M*xq5K z;@B18yUifrWz(xo8j04iyar+S*bkFnvU6hi{cKcscY?q=ul9FmqgN%@Kn(wWuEhKa0|C2YQ~?P^R>UmX(>OSkEF4!2Fcd-x7! zW@5;i33JQ%E~_H<`#HPj=T(%zVPjt^5nV9odU@*YqVHTgd);tnIuU9@YMT_y#^dwO zChVXC=7<-2{bMh-Z^C-fi|cgq7XD?ofNMxfZC)bu1T~1Q%m!LJ4Z)0h#xCHdcB zOP?qXQ-lI2bn0LM;(t)Mco37q!m9f2TMiEF8?py<8~LJU04z(Z_f$aS_Kp;Hjm+zK zpupw?H+Zh`D15Pcm3rREiqosWntgKusN^8%UgY0dXSo^j^5w?LO1X|n;E3RMIh{f zwmg+UxmjeTo%#*wW=^oXVB^l46u+9!0`+URr7)F4?pI#m$PEIKa2UK;zpATOYC{_L zSzh_mYn8b2$C%YzGzzG_F^x6~_+;--6WRT7)UST~B>}%$LU7;_0IG&yov`18`aXUHniE!Abeu*uN^L88v_~+D0)Y?MR7!I4aj%PR8iyKyPbh(#^uT zMO)R56SGd+K(|M=L(7h6_MT{!mRr|%=_MZ{M;Cfh)E2jJutVJU;e)Ny0Q}2f42|l~ zj7}GIo_t%+Q9$?g^y$&2XG3gVt5^IVtJ34YAEMzCWoRczd-;{D>*oZo@Hp8`xg7f7 z4xKy}zo`?imdz?s$C#C}E6y`(8OvBG80mY(DchZ!BWkAKd38+f%Ap`?KMR~)IsfdZ z?dpn>-mh4kbRo53M^ywp*KForMs7!5Q7B+~N}3r+6_a}*zdP_qN2f;4+xJrd4k&^H zuRqRn0;s;5h0PQn7fr73zs8qAI7oCHTHs&j{%WP3L*&XZFF2NELRx{33qtqJKZ@oH zO7-*_SGCmisJS>+HS+s@-p@GfJQSIQGDV9iENRqi3@mU}#sMB;!Bkg=;HRmN`aY10 zB1L0k61SEXO#=FP#8gXBlDP9y4$4-FKVn?B6zE@u_s6)36j1&}c!7APzn*20%hSch z$;k*J%FFbuu`{s6DhQ_eXPQSM0p`5z^ z!`52|Mg7J7qYFqVEr^7$fFj)pl8Yc9Asr&Jlypkh!qQ5rba!`mcQ;6PcP_9j{6633 z-uuklJC{EW?CcD~?kwk=_bdNbq_N~Qn|m}lB5#I+aYp2>zZ7I;ZzTQx#lZOaIzq&h zvpIp-!~*#rc0}U;@JpxgTgIcfL|e}J@lAB1)w=)F{H-HL-?3P|rSG$P9`(~R1?!O)UZz`{41LPrePiSmrc^Lom}@?>L&{)T#D7?v-skWOg1$m$lk-RU99|A0!$8 z$|(!!wi~5Mq&@+{R3{};=$g?Y#!#l*!`eFrzBnFtggs184i}enl$uH(S5gQ?8hIVI z6-JMjP{#-yB7_r`p072I4I#;!pD$HKX&_!7U%DmhHhTG~zEP6PpNnRI`#uarZy|+g zM1KVLuG1{gK{?}@WDO7B`52CF zPGCpf0>HZJN(<_Jgl%ifjiD(euL)l;m+{_2HlA`xkAWl@sq2UXvVXWUI+$T!>~S$e z-8t2uK&M)X{MNS#T)&?)bl_U8TyG91YSl@^Ug_3Y39{=p_@C1GvGAIXWMSb`;($zm zz>euRCPu6ufj!HRNDV6-LEXAbSz6lq5WMVp@mYRu?MYIPfK>2i4a!Wb2k5P5HXV7D zw7CFn^a@;t9P--%(I}V00xM(T)XT98R$ZiU%N6$nc5a&+=%<1souVom58f$58g>i09)M@`s#1B20#yl6qu_*b+hX$k84x5i3b-y;b zmRse#H$;)KYO4QxJQC1*`)P=CsNJwTxYElGMMUjh7sv^<8WrcH0?{6}`vrY$=HS6O z1e!xuo2@ad=PKq7A1h5q&qT$)&YbDKd_365zTODHed%^`taPzGT$%hBR|&fJdbqWF zXhYNYT)8f|{vGagdNB$KiRd^KPpwG(VYN_0&7hdnSG7>AC-Rm=KhFw!jUOh_%%HTz_71GBZ{^h2jGG7|Hz( z>hAq7;0!tXlxV(A<0XqK69gtgj!;|czv<_FGPo<_cc(JmaVt6$<}>T33z?gSs5j?T za9&fL=G)bxt`nEgopHp|iM;id%{M1Gy)sy}O2^9xS7KDqB3R1Qu>J1_i?)%EPlH&o?AGABxeAoHcT1Xfphu*#|$x8UD@j&J^D(6~}2grO{4G zo&6h`j`vXohQEYfT__^<&*UkUdc`u9^1Dbhh2y11$@1eL!|ti^7GajL`4T!cM;VZK zhFaN7A@yQ=H`|WKzPZPD;${k?CsG0~A%DU?LHXvTT~DGT9q+d>0pKB~ES??>(kq`3 z{w${hr8V^tUBc3sS#c@&9aq?rr(MuY@`@4(qBQM*&_=E4z0%}~le?O4oomawoVo`{ z`0#kJcelV)VK93q!b;*$=P6EcBq^f!u^Aa2*95IQvhSTv*5h`8Y?pAl!0HbMb@h(L zR*y0MKTCNtVH;~{KT)uN+?T-fpKHH|B7SOaXo&VRy}6n;a2$wm%{Ps^4s;SOfH@`p zz9M$ofaHiO2@jR!HB7g3hxfv#W(Q}F3f(inR_BGN4Nk~ncj0P+9Vq>^nB_-Ke)l&a z#Jv}__R2H%kgw}8Y}yjQJxTy6TO-NRU*`4k?xuD7w(j0yrGO4&xrxogU5sMpXvGBK z9GNo`JN_gljG&lm;uscWeo-Up`*cX*G6`;7?)6ba?hjkO5vmlP?0BIl8DM8q^&tL1aE~#Rd-KT3gEmZ;M!cK>u09Gj| z^m%Xpw_M;79e4LnAVZSoB1E|vg#+1i;|l%o=A`w}OSv2LV;Z0AeO?#L85Yo5O+_dWa#1 zNy8NKrN^l}XY=<&nfX{5Skw0=sI7k|MQMQu)E%W9SOtOIIbuC`!7C`~!#*_ze*4h< zM7W3qHeTjC%b6<7SbE+6BHzbdO@_Kq6{O~bGDV|O^NU}59B^j`e|#7!&Jn525}ke! zGr8pWM$S3oM*{cB`sA@+{v%VG4>)@BpI`p~Sh!28?% z@)UtPWg#vPKzW%Wr0+Z_ToJC!h|25n3nMj_*q50~b9XBI<$xmedby?@3CQ~&(IAk9 zPZ6%i#{E$~koO(f$vzB=o)h*=aOI+Yd99LQONaTYd=ap*y!=DNjTwnmzHH>1?H!4| zE?Y})!BZrX5)#zlcpH3U_Z%!?+Q@(^}bYdUQM5O!`=2n9ED*^Emo-(qk1s(y@l*dImPSj7HUbe z0W#c=im1_y7XA2&*i>@Tc^2O})VgR|Gh?J`GeKlr_2_{#jeZQPR@*a!DY+Q1m23ON zyF-(SLH^*cn?~YBQqC_JKkhw>1GUfoiWle->8VLRDf;V|jG)@-0?ibj3qSYjhgzGZ z^cR)_XM?Z(t~yfj<23`?UFsdy(+h09LF%=^K5;VP$x-`W5!$?_Szo8)ojQ0^(P)z! z%>oGFGb5iUy|}enZ8SRWB5%`OqQQ%!NsUpNWzzTZi*-Np6|bJ%Pq5*$`kEhu?~K;j z2-U*>0CW0+sFq$nl_=E53MM`;_eEK`9pPr8!6<(}#`^`i;w92t%T<&A7=_ zL?XXcR5CAjLF5|rb-BJ*-_Pf0h++(&%g94ZFOp!=XLDU85cXmSKybE1I zqY}&nnkV7~brw-%L4RRw{%Y68}-Krr*`xkmNcH_Sw%ZsLuk2i&_d4Bw0+m&Y4 zwy94Tb$pB%>KIoio+fG-^2T}SFGs27y50VE{Sqv6f`Br=B952X-vSpn$<9`C8yzqg z`onE5CvBA=2E8$A!?0M!&y_-AE^fjbr02^nppItumZu`+&#;k_DbAHCiA@LRw=(E! zby(eBrO3dY=l(}B2v^vPF!-1k9!~TI!vPh`P44%na8&U{KL=jy7=S7~r3Ag*@AOVF zS_LLZlCU(M2YULz4nWDlflqZEUnKFVmw;M`3%N-y4{HxBz>p{p1Q3J+7mj}4gPAZQ zw&4Bym*$HPR|hlPg?du?l$SrA_{~GzZ;eyWVlU8!^4ydCxLd3j_?_MKhLo!*QVD|y zUL1zC|5J71x1LvEkUxXPIX8#o9Sjo9r*~e6AtNp1Q;gWb@9p3pK>eXiiFf;3QVg$% z_i&-nPJd)T676=3hAAgX+r$P$86` zfX+lct|>uo#SlY_ro(;C{h}x^5-E^QZ>mUOk^1K5kb=;KrgeVgkEjFog=TenV`v{ zdU^`0DgR`3-4Mj3UG;0z_v)KE*&ehc#J9@359R3tB6lk26CG$0B@0!4*QYWTME52A zF$|gA_YZ!(X79Y8JX4vIWS2ujukfNEC!&l#Fi*P$M+>qff=(-_%l;7%J<~0l#Cq-8 zLh?+07if6dMW%)#2Y9lBReG<7{g`_)9AZ;tKgaRtaQ!NHdh_#oOVOICkbg#%anX~f zRRjwIXQ{=36ZOtW%p;MT+`<@WaI{x#)C^<#ysyqPu=C#Xc%{nzO1i=0B!%Jgg~ z0KlZ8B&6CDF`cb6Ll?vd_hHf1<M^zmHt?+kyPU|-7SeVh4h~dC%i9zZpK>7R+t;4RBCvUU^Ez4iG*WL{$8j)1*dhNqbPKLz%joRJnbk;lB?Wfy!Oo{&7ufZM?|p$boeM@de!gSn(PDqc6_dCy&sF+1NqD4;F~l4d;QibNE07N{~hzo z_-woD`z;>wDPjfUqq%S)=?X8qM%ZCjeA=OSzWY{16mMhu5Jrr2)zMm~%!eIpq=BF_ z1;?OF&r>T5PAXsGqWbMH3$?#lzq)T|#N1WOAp35w57cu)hfY`DSll`brDyp$5z|vh z$b{bqP)JbmN}Ab&#hJ8H|B8|XsWOJhjCMu5{=#9q3{%ju70Xyas1%fn45tZ!Bsi1) zNjXv6&>J7kSFfwbux|Xo%XwgtG$S*?DbD2fr`d5!Z!pH5#QIig1jRDa)Ai2m8D6z) z8E-KRxKPP|9?<#9VSy@2f)&xG5 zrrvtBi9_fhvN~h!bj1l+na=)7{TINKATg2W+)u> zAn112^pt2cijLhN^=VYA=k0mbj}7T+*B=BXe4tL7r51Mbk4JE_j?Y)W=}f$3UxR}7 z;L7m!Kwil_jyydsymShl?>Ns6a~5VhS%fJklv_ypp6mblWI`+t025{1KI~s#f=Zl^ z=DUxJNfKn;_yU#Rq(L^g+dSKSF>_KxJ_zc2OC}Hy=r&kUXkaKWy!Poi;E#A2hf3

7RT5zlM<+zr6+T~hBf3!%v|T#ec8qjdObBJk3qCs!X<(9_Hr z&5%M^tvy>Fl=Ul;qv0=i`eb?X{9D@aar z^aL!lxp|!`^266;D9#{PqR~x{^Gh0BiSl2#lcwn*s~+qsIu3&nEJkXT+U$Y2j<0lx zzY^q`K-^I;Ip9W1KT2au=FOvp8N%t*fjQ>g2Oj@|+i3^E)@5V6liXCVg15gMLbbOB zqPuNVuMTD>!>NTm9aFp?+!n5gsW+}TPB8Z$GG1~Vl=d2-y536WHRaA6tf@otU+V6u z7~n|hLSYVETm8wM*l(B*D2`RouUe3Xc4KeV^>asvD6J9*a71C(BKSyq{~LW`+EJgA zrng>@K42UYl7Y_xbI?qI%C7|eqD&d=7cuZE084NnD@wZ<`VxG*n6FE6(Ljsaa{~7K z`LzEJJIQp>0QgrRQ-zm4&NM^AHshhRQ#glixgs3%D9&n2L~Z9ZgMoFIArV0PA)TI9V?JW6g^x~+A$4n!P7Hy4<-MMmu^SWdX<5O?sAByB+*}0-{}kn z;}E$c*F^WLcI8!6BMkBLrDF`&@Bf8eOu0;#u6s%}*TTf(fE_=ZP$2Z>*(a^7T2Wl{ zy<(|>)r?4++)5BmV1YS2Nw6j$xtc) z3s!Tb0)^j+QanVgEoB%vQ|6VAHR^N*9Z%GgkOHuIK3{W;t0=ZG4c6$KZ*!Hw=PD7R zNP_V|_7*~2OD+uK>!NrC(dwm&hjFKG_4e6$t?fj2gcR_xx1=W&th75C##;Abz#zHC}qpeVx|^)^J?q z)Dtyy^~H=I)DDXhd!t5-ijx%@i9=)*Mi@O5Ml=BqB3={EG%=OmQTY9ua&ND`Kc8N< z4>^-BQ56xGJ}00S{cu2W^E4*W)6M?NImGiOuIKTcR@2b%5PZ&bkj&I%*mUTg@HQK~ zeN63K%9KskO8$;qv*|#AU8y)Vbfwoy7z^XAO8R|0+bo?hY*X0d8n1o2_=ykN>iIaJ z248%jKt22Il*nIf4g_{rxUYXHru3_h(I3`EuB6aa31CRH>g%ptzvuDiw>8*bX>^&z zAV+L_1Cah6@}!l->5q5I%{uP>#1;c@Dio@^>@a<|-0B?>Ov-HdIvA2uah=bYDszTrh{@^(B>T*f#a^AA^`8S_J9`6Uw0<~NMU6Yr7 zdlTC2ejv|kv+=*vVa~ufBIg(>dB^TS*hFAl-H9_bYAyQ z#P)!Jl}~Jb;(WSD+pn}#hs_vgrsuEzkMa>l(I&SGvi4=u{vn^^C66(n68vNTUxuBR zdxTG~0|}@lcJ(wsZ(tNwF=zw7!Y?U!b8xkmEN(%&?M{wOu653CNRsym;Nbk@ezngq zm8%d>Ka&F`6b@AW4vO4Lw7n({O{hx+k!>0gXDxSoinN2Q$e#*NB{ysguQeT#ORw58b47h@LySE(Dt@;YFbr z%3Z8vzc&J`1d!f5S6N<_Cc|^6#uvNTtIY6hkiO)}{jI~0^Lwp2F#+5ik}AvPrdvFH z%%5GtEA;f96cT@I*SmfhQV$@jjuvX4j-?W;ViDPmB-JN%E|eCac0;khY#uKOo1h|S zM0@n5{4k$7;fx$5|36s(fgtiiUtct$;YdbKKk+e>%%!@~zNmf=YLq~j*3Jm$9sCwl zlhS6>d9l;`Zk{{{urN;Gayn)Ko$No7v`!81wtc1|dn=3wZw{5P32Dv`LzZt1Yg<;l zy=n1jM1ESkU+D|b)Bh#ogl@7gI+zRoc=w$?&XDvn4$%NYMfTKl zo8=}c?aDj!a{NH$o!ehms2grL!SbCSjC}T^MgqUJbfz@QW?bzcnn}eF%dqE6wzV7F zc7H*5tkew1{48e>3A7Xv$p?)90EN}IQ5&WK+Jzle&&CzMqhaI%lP8h`ES%Hv;#@%= z-J*w9(HGfWfaINWl|?>;myZ(d3)4L8=Y~5t?n*_&&i>;;CWqF+&mwb2-;B{KCX30W z!fqo^OU?kkq)L59zbsA3TVLe`aNb-~Q{K%XntIA-Nv;l-raM}2Ovc=BV?(O$Z|%7+ z42;easW=L~sfWSX$zrS%Sk1_|?B&~7YZ4!S1AJf+f4gcW-*lDsm*#=*Q` zKkoMX(^T&;&l7Psh`g}fz})?|4^n=HIQx5~+H)WiI$aA1&*_v24|xL*du^%pqLqU1 z$70{gTO7^Lo;SB^X#$w5q`_<3lY_Aa-taNq!?5pNr5pY%@Hp$Q{8<_IjVOM-g|?eU zi?A~f8Yj=4;$zQL|CNA1tNDsZ=?IkklV>l7WH`lERIBh0?h3(PBvzSabBM55OcGa% z2QJdlcgyYwqcfdn=A14T!{CHXVmK5erPrNAOWT6?;{R9?TeA~RcZKIN#*APk= zNUhE<#=@5F1KeD~t~jPx@7=5QYk{i&_f2^5MCe&6CH=F24nso>71};7P>_4azZe6_H8YzuyRC1n;ZERV*+5)H}upI*J ze;1#}R0NvHiO2qZ000(lktjHy{f&G4X|Qt@5i4j<^?T2qJpS z?bhu*|LoX03@*dmHYU>6HW7}ZHL7lAI^p-Lc*~8E!yy<)k@0nF;+xEg=X}{*%AeEQ znJ~7Cowu!dXhiIcjg2fi4LG8mAUP)y!dD{Fmi%9QQuc1Ml@7UZRq9Qrid7oF#_y4|&sQ!>JL93oOJA$9ow&q&8%JC+a!|qQ zI`v3g*{ZNzg&cZ|-u}v(R6E(H7rH&5Jb<+9p^yAbqQ9}Dvw`^v87e#b^s&dNc59aZPZlsZJD=g`T&W6~MQi+bkYk8b4kgEW{#A;R?;@i1wvA_Vy8M z_z%Z4TybOM^~uhedrz(>+>oI$t5)4)Xzd{usR3X?Y#=qz_r|8nzvP7Qh51_BQj@LN zpNKtktTrRDXl5f!dLQc-t5VgX2+)S9hlmgtgLhqFSi zeifTaM%Jk3#6_+3!C>gWq@4lM6kd1OsYLU~U+ccy)$*|%e0Hm}Xn$3aYPAu_5Qm*n z>}sz9ZVyvH%fP;oIp66S1yT~+)yG2lEqxTe8XZX%eq!_%KJV^9RO7IN|5k)+x+iQ9 z`J75~LBMQG{O(lJ4_|HuO@_;1<26lw>TKYg^UjEF$5NdI;oYn&$M?~gVRU}~&+ul@ zdDI<1=v>2P|5)IN|5}L8=lqfJQS3;k(U}?;XjKY=UxTU!V>oLsXG@kE^S_K|kcmD4 zp>t>yWNEh?ww|;g!=c_Nr*`#cY2b%H+zbDUiJ}Vd2FBUk$kK&;U6!JU*gei zxb!+gCH5TfHDZtUI4H*^3%jV&wP^04HoDY9BIx9zw>`}G{U3UJv44>;*2ER7=l=!a z7i?g_>H?>AujYrfLS4Rp^auGC2A-*4TvNfsLha*pK-pNrV4Ly8==$MMXxeF! zc4eMyz7Zg>OVazV6S&ApVs%_3m#3oG#cR zD_Z%#Yfi-v(HHLbUc2U_{5-b)eaC@y;A;(PA!p5-GSnaihanTzB5-E_2h=zvo&t^< zL#{1B_hK2wfka`MvA3BdM8VZU_lbJDoB6yM_amGly@+YwuNc=rGOKX& z_=S|bXgQ>r*DmfRuHsw#`laWBNzA1_`t#pp1Vr|SS##?MJphgOuf=RUUxlF|MsJo@ z3e5eJgIC^bL%9444g`dD7-u@`FJh%cJNB^W;1EWITtEhE%Ci5h8pD4;gxjeLwGz96 zG8tjlR+N6ItFtK{ID}>5K#*;`iiIHM+gZmAZS~B%>MymyQfs^AkyZ#T;@NT-E{obg zof57!lgvD)VK$w$ORu$J_!E1FuY@lRhh~^ppgP)QXW)| z_Zlj3k3yDx*-yIkayyWbAd`Zi0mIR!swIdt!s}$7*Nd>b2c>7VV%xCgwvt=FVF`n7 zUk5FKj~`Z#36HNv<-x1*}4Cmky3vogiI@P5_iI9|uPMOy3L$ z3+mmT;NLKI=aR7^@VPYfqa&?`w|(Y{JI$CVp>8A`i#hTz7%7FnsMNeV_Z?T|1i*Mc zYz#qwukYj!d(evTR!rF1uqiU$Xt{lHNDPZ`KN#;6tEOI z@dzcQgzlzYMX$&S?q;4)g>KZ@2I{$qun`{`DF%cqZGm5E8az}5+mOc^1%r;gCtBH>&GSBok8`kW5%oh92CE*Q#uQI z>i$MNo9AIOy?C9kfkMJ`R8KLM+>@1oB{T&)e#z;|Ju{^&7Gg_aGXI3?>(q6kKDh+q zJV|HNK}qi8aB$Qg-3s9_qq?(=+@W{vmHLDEde{!ab=Pa%T;kcktISnZgrHPwGo2h@ z^3HYMG4+=w116`mcO5#Tar!tKM}v1)M+*@x_Y*IP=BrsjXv=q>V2!Wi7FV~mU)0rL zs}&@R9q!|$_2Iho3EFRN@~$|-bpRtd$Ki0YaM0>st!IlKsiGc=o04wTma`ezhi|_< z0|lg4$z$3tqhVENvaOgHum?SGlM}#7O)cC&Wd|i~Yv|42 zWy=(i;m8&k64S`+54|h*QDR9gSpPyF^P=n5Eg7J>h-U+3HZ<#b-3H8YQ7PYD?!E8# z_PBBs4jfAn^_T#(dA&8B3ocjt?+8h9s>wi)#zS!*qP%xTtlQt_TTJDtOk#vrm4dgo zI9o&2E61*n7UF-*E{y6EVIhlIzS!EG71hcI0unI}P!mrCH^PqkY-XdhHOKXfWHuSM za^q6~gq+S`L}G;cJ8TaTJIVg0r8~H}COKT-_LsxbtM=T)&NL5yHP`&&Gzrb$FT&Fi z=1;|!chdRgz^IJ`EoZP;=g06x%|ib2Pl){{)5+%+xUx4YV*x1;_VU}L-S<}(K;In#ysb`e&4fMGVzYR>*u)irAe;04c05_GUj&^jf?>Cv5z zI0B}Oc4BZkwV<=F7sWdtZxYStkH*$iwfhoM&$eqgfGHS0qDj zAh!IiLKG3znTj!v6gDUeYb-ul{#aSqZ1aS~thWTF-Ykb{G0w-6bvXVMCz;EbzZhy+ zMln;U^O@^?L9|bZMO{Il=jk$)FNON&-rG7^Uh|_n_WMKz>gI*opp;i!Y-r|y#6c!3 znWN1hU-fNn4!*^F%sU^5BPJ_%iubBfqq5Y@iFYy`>5(d0Isi@6_KP4_K>itGUl4;@ z)@j-fWQicTF3Hm~!^9m*6@xY6o}Zm7>8W15DViJ?G9}a!ycN}g7KNpJ5u&vG0_;YA zW{sun7bYOT9g4?svLA_<7Qha9Uk2m1QPxSy1bZMJl`k2;$RK$$?asf;!p;Lph;=T6 z8^|S$sEGDUl_o_ZL+_$(WCbJFT*B$b(Qw#fgs}bSU^~%I@@EEB0)<|lFDJ^0n|kAE%$ z`e4s-fW!MPPc^rv^6(*?BE#y^*ozAvC=&M>f!2K#C|UXb@-kMPWJsJW7HZb1kogK% zxNuJ6-bb>RO-KrND|_Pb>$098&*OJzX6$WcW_{Bq@|g-+5xwE5Cb4T&*#-LameXt! zQ5SLGzDI6@7|f<;-BHYc%NK6$qh5P#r|tWl)7XQ%F1W7qx3DKu@^%mK(qDT~et2QQ zmtCii1H1H};3DU9NVpNf%KKHhT1WzyK%_RvM9d}Uk#3@|^-kVbXfr(2*-h=LwG#~p zrPnx2@CDFC?P%$ym|F7N*qvVnyghi*#&7*~$`p&x zP z_W^^BSM&Ju{Wiy|Smb}>w6D2p{i>d?t76NCkuMSXuD48?BDWMesw;vf#n}X_ej>;A|!A-L3gkQ%J}IgJ100)ClsP9ue;k zy41$J(BM+E2l!7#=V@2J+a-$?=1p5@CXGxujd9KLqT<}sp%kh3y$bzw3H1!uA1pd0 zuNwV^*Z+ANnW>e@wu3?<5I?9&Pk6zwOX5n?7TbOS zyH#;YK3BV_9z8v8CVj!mNKKO%RTxy%B#>SB_dS5!G1FDz9^B!~?o>lk5peyFAccrr#uImUxXE&Hp?CtJ<^L+vv4e0bY7V>YFkS zmNez3^SHCpj%X5UxpE(j7)OXc55~V{y>hD#vC46K^>Fn`$m_gHAMDvI!^jI2)ow7P zZnu6%PS|>XDEf{$&*AAOTwJ34AK0n%ZmMUE%O!1R-ovlCDtv#Hqa2n!jD&R?fl&bX zH+shXP7i2!V?i4L$+g*@9gpm45Wk0ei3SKzBzfO;D>>c`38MX^7Ahmm zP-yQJv%@DepKe0a$XB0HP&>f*%{*bD=`ll=Wo`0o{Y}lbizByxPtBk!oGr4;L;7;_ z3cI$O*k-Ps@~bebh@!sCshlGS`0#%^*Ts_G=kdJIt9rQiDoCkkx?(#8f0nk# z2qfqW0-&&=hwJH)+*{3rY2<9dOlnOAj4%a?+70bi`0i0XMkOR{M0otxZhCj=c&$iJPioDTha8(dj(Gv=*4fv9Kv+-w_}`}U#~ z_%x#`*r;W3Zm!y0R*d%!LGZ1I_f#?3^Zq=|(CtOnTJdQ35_Wx(Xn|0)QSE8KdEqLl zB)F#b_@4};VhT6KGj@t-(6&ZzO_(UR(zq`};T~J9)Qd7_-{wghPh$@}R4xw3)+vv~ zi<5gTBSFUB;lZnzT1d_1m!B?PmgYF%eXl0jC1d#=kA>Z4(Q%vxTn;Wx{%jiKL?^UgjlRLM^j*XJ zD`+ItUeI8FL}vaGl$dHt^y59Ulf=Y1Q4DF@@MF?-j4)Hb8&kMMtTp@FhQ1U{H=UKTx8RoyBCGD zN1LJbS+h? zFDC9yl?&?bs>7)YJrO^M z)`D_R8hd}-adiv5Xx}Ym#lrz^x5WYpqeXb>Jj`~F19y~yO?TH;c7N%n<{p3ut2`n< zMvXJ$-%?{9qGLO1PD?yTkxt(eeGbvt#AyBMlcp0IW0M7JL%4gPC!=4bQy$4 zF;H-Q&|T6BU_urRJ}F`_p=$jxdv(l`y%o0|>WKJU^1As%-9>K_jY=%0N}iGsAh6MO z4U+ymwwaMZKt&lfy^UpDNASdy!~1pX^QHgXBEP)M{1ja?(#I}L&a||Kyl?V zg?G+uUHk6b=a6m(lr<&4Rh&gQ7BABrh*f8>)+p(6ggvfof8j`Sxqhp#bjQU62X>fW zS}h{lN(sz7HT7DqJdv?<>OD_vNBaz>YZo%{woUH|e7{@~i@6-L8IOc|d?*z03gy4s zAH2%Y%lzYUFk`J=ME)-f|JM;vxOgzyY!?cWxTR5LUjb4-9zu>U9bT5;M6SUJ!WziE z(b%5U5x`6AK*`VX3NWsxc+it&uxa_cm%kkfZv50H^=kFvyJ060XChh6p*$||i#}-~ ze^peF&j?(|LxlJrBCj| z_FrLgx$KAD)?7>w5H*nvszm;~xA?MFkNM!^iPoy|DdI2xN-E?e+ zM`)EeZQQFF&{BJK4(^nKUchgUPCslrOhS5;GVv2@LW=bDyT8_uOj~=7YNh0&rE%M77&1pFvfSL8AX-nSRha9zNL1 z#EkJ#X3BkfeL{CTLRNe?Rww6~%T+qM#G_b!nAg0ZfF@eHqLUN^)nZCFEL!tR^jUZM ze_X@u~LOUji?o#+SfhmOhQ(24wpGy*yA|OJey$Z+cxRhC(#&dBa`x zP^8fHjXrFp4DQkwqNl<1)T`|IoZg%RWhr_+@}h7|pbT`&%vl|&3YO^_EMwD)h}~;| zQE*0Bcr|;@c~yJyq>Aj{5}&9;{zcx3k)NaaFsRE#S8SO}Lp^G3@_XE_vc$r{(^nO$ z1!ugRG81>htV4M|7}(qgQ?w5EQ||w!A^$Fs2%0jFbJ3v^w?*t-CR}Vu+JEq|V__E% zPBLd?mkO7Ej8W}eQb|n6Y5HqMvuN}I$v|IBd$n8D`;N&>LV*H#Et?FI+z}j)_IqOZ znDgh*+aBkfk-Nl_l{)kO`E;k(&gLU{0ZBAw_cGjmKM5|Y$S+7hH^q{y56@O6Y2Zg> zTuhh$c?O@uZ&-K=EmFSC`4#A;-N~5V4Mg`e6?SlNai@U%M0F|!yFdSqLQ6kn5i=GE zDRgP%_a(X{XbgsU?4d0+A^LW`g{2sdXDBe5m^dy&v zPv)m+jf>hVK;i2+kEg(btcE)o47W_>RSHP^trrlRXCz-+jTvW8>g-reEkFJO4kO*c zJG5ECeeB=$j`dFkyyJP@7IJ6wt{X)L&R`eC0(%FgEvj;a50^vnl~H0C7!uJxP`9R3 zfn&=svzwQANotqhZ>fd3X)hxnYys$L81RJFJAmMi{ZASoFSce#oyIkKb2k`qeguC> z#cz`}ku4{DOepHkLwlK<%C>cVFq;abVvwkCEb@Z|*J1ItzB=_HWf=Q->89~?>euog zbxm+|t&DMCfNaoMD17`KwBm8g^LH<+76nm4&j9}$@z~i?o&B{az4^1)<1Q?%iYI>F zBFK8(2kHeE*Tn&k#+4_9?&r6IrPrCsu&f22`=)H|QTK%*%jWw*Ee^FUxoA&#ucmxU zI)`op*XfOEDzekIrQ86A`jiHrLa6J9CyPeWC%ep|XYd8xhNd@nNDnN&)`z(VBU$Ucxl4>LT+UK& zzqM{jpjR0Mn`MTqCrWL&zS}(ei?rneq~Qz^?#S;=BG;R+8%Ap|7RK&@ZIi(WM7zc6 z@uNxA6k0k+AMk4SPmDSrX70|8B+u!({OR!Z@i<6iSJF=7!vW zafOu<;^mvYGUc3iJMF3s5p9+TIOpfssOOYFd)J=h{o8vmruhC$+FZVw$aM&z(*V=v z2p0+wIEP;e^Zfl_nc_s#zft|k;2nUq*&jZmQ)TMobV_nVQnd+Q8@ar~M0T!R^g zSf_~{;l@+ycJKfEkJwGkl-;Cl|MPX-8K3CrDCXhEhUn2aG5qPmP7i~h^artYuzm96 z)^h>P_b8$~S!ehdkWMhZMGGM3WLUy^->LA`HM#acQ;+uxPk?DkId8UDBY<+cv>R-) zoPp+d>+%YB;B#UOaxr3aD}qFazxbuI7$m3vDUp!-q56E4aZukppc{8a*%0C+zeCuz z|9sYc%;~~~RxNPjHuGDl^>vxg`a_99b3%qU;u!L=aOM<-9ug_+mPIY%7`4qU$sAs= z8=7H}(0cJNVI{=o`1$nDPtYknCto?-Kn6aIjx5snCB!_nI$F_pY#fa_-o%~!Pu3S2 z2<{zpi7<{cY@FuD8#??Ik$@WAiV=gk_-J0Tj9Gd9==R?cP;oQjR3w2L_<1toj|L_z}3)8*svu5W@6+8@PDe zA+Y~vsR!@MxOkr)6H-d%p&duLK%s9%EJY^9f9?PIZ2#?5_{DO4zCk|tlai9wMZf#r z7FMv`h?3Du)x9k=m7Bq3)D4ubLH?6c+8dDLDmrx~S%wHY z5u*vt6(%4ATkbH_vJK*uWb*5Xy4$^-Dif95#UkwM`l=L9sF3zr`U_gRiV=>e>5P^s z6Cb$*1<}5_l0s;#Rjdfq0RCgXiXT2^iHCz919^o?1kKZ==TYc#^Lz~x~f+Tmnb7x2mB!A6dYG|I|~RTxy0wI*zUIb-E|;i@+F z?36iy$C#(uX6Y;QoPGdiB$R2Q1?J(t5w@Jvy&EwL^@wg;%zq{x%&eNFoQa--aid#p z@ot9qu;sMl!i!u2w!nFTDYSn;_=1GCj3xSZXtv}v4jbLIz8t-7N$O*0{U7H!ipG{mwb>edmmK-@9+r7<5&4|FOGvvG&?? z%{f;h?e1O7L9`@53-gJ8ys-$$NuxaWa)ZNKwt-p$-h0_XzXkWWwXK2oX5LF11NZ>7 z7L5%ygqu;HfNL%Ev1O*p3n@bLPZ$CwNOH*c%S`sqQaW(>|M$R^J0CE_*vpDO^KJdu0 zF`0E&j_1V&CZE$yF{so_aNp~M^h2)r)IZvjp+^s{)Du?~5Y>xzdFA7i^m9+YCFTi+ zIE5I0z68V>dqDSqKm>zWep~l6lIZ#Tot? z8&Su-fD#7&_g3y^9PsPi}bM72A>RGlZP+KZKe&{+@iVwtsZ*k zTD!yk436FYw$Zv%g;+cd??CSR6|V%{vwtw{k=lQxeIDC=Eh)7!XhhZaxuUs6E}A)@ zAI?7VhS%XUOj~SCH0wPiEp&q%e||UIIh<~a?^fAoV?KP@bNUPe!LyqyQ$?aEt%+Es z*=WZ-=2F^CMr&h8ooS!sdW8dqD_9ef;oA{#> z(!NhZaDpt_fltD53p@`8Q;DrMP6xpk{G$gfb#3!rGYbg|{~hoK@b8DgTOTS)UTq2<7|N4Fpr7fM6uAAs+>dN;@d^+_%@c4?Kzgykao^E>QUv=Et<*VXi-0 zH8yq7e^GF8H$fZKs@=Fx*#b4625Om`pbUy2`BEQkuZasu8n3S1LC5v!Pn=^f>Rnh= zliMauI)546jb8&*$alCIWGel-b!Hd`bKbu*Z9vivm|fwxwR0^~dfri=U6HhlZH0_K z>(%Yvt)gFSGHDq3ji(2@jXuTAc)#^{_NddzyvFja{7w;)SqQ2xK!_x3U%|htB%ELo ze^kgtrFe!O|NHwh67b2_MDdzPLVP`g`HC$crfq^7tC>J;WS=d|)4dCQ>Sp^c>=&J3 z3Ls4U`>o(d`7RBnZF(e3uXn5ZtMC?wZk*6UE$jDZ>HzG4=v-82>aIdn-Fv1LC!qcX z2^%lB&4n%9Wvq5#l*|{e7Ol!fXzlBOxF6plCdBpF$M*;LE`pZGWv!A4^OT=Gz`OvFlCZZ z;H?;)X4D!G3}&lTS#AJtSf-0?s61{)h}TRj)a#x#V0OFz)DRk=S!2%BZtH2@7e>sH z|20d(*h9%{AXp-tbVSSexuOvulTfO&)*cZew|ouc@CPq81|9SPz4 zqD>x`)&Oq@axh(y`bZikQJ#gZ73U9_Azu4F)Bsuakst}5;WvX@4&z>7zI2bjn06y4 z3hVUoWXV9a>TKWSSb-w#4Pt63%rEW~alOiN2XI=(EXrDfc2n5R$LZW*+O7Q;SN%tv zX+jU%)RCIjWAE_Dwy#|=g4BOi0Lkt&@F)p9alRk}^p8f(sjZ40ze>u?kLaqNZQ1Dq z>j30IX3eN)btVsJqBwh)&8nid^w=D zbdi~^{?mzXp2c#Or>Ek4-@3k7Ra9_`TRm^Wp zs@SbY4WYh>L0d=mfIcYB%NHc6dh)U4+CIb9EHzob(9W~6~lYQ7B8 zqmxiydpnq2vlkA&(J`>q(Lvu4ulp#h@!m)Nj$6IWEHD3=P&{K4OJ0t#K{j>f47jyzXJk8^NJ92dh3TAuly;aH}x2JDJBfW#&9)QZJ(gyktA7%k*e-J|KqU ztlt;UQCfTdUcdkG55tnBl~4bb>l15lSH52dk!oJB)uFa&FX+Za-Q3dQeWb_Zn$FSV zk@k`A)H!QFHybfAC^lx*k(rrL z*sC2Cah5B?7B<`HN8NHZ?;8+*+5T21pf!D3^&rRt8X@A`uVPbKO;`)6Wr|Hha(tv- zHNxbY-@K;+Wib8G!weS@-gZO5$39kguit~O(d3TS7fml{HP;aNc*HXpza_*%ln%e7 z1elvV);wo=vYdsgsi5pIYhLK-J0|Uh4$iser)tvv-VH&7Qbn$KWP;Gy>aQSOi;&~a z;7HSbLl#$7CV%0*DI?n-TYP%Pt1;JrP+9sDx|pdT*USYO^PMbAh-EbTcyzpax1k_A2c z_KvwGhzwyz?0r$JE|2OT1n|o(gYbfeRX@DX?e2d!;9$Rj-4rI5GWpD^-Uu8qGaNng zYIy*i@XE*xkkon2m)RKl&}c#C{xG43$|iGust=Lv6>fPH=k0d@Epw#GJa_5}z0M;5 z#Q-9FI81Oy6J2ZW4^Z2qmmyheEXz&fa|&TM=Dffuwn(k1lFtyWcF058ls_xj`l}c7 z5M@Buox##FpDNQ`ibkC}ziF(Yvjr_1cowg40xUuVp?`y>dL6R;#V*FhZ#r81_ zN~b>R7O}GjzV0B}bF^|awhOy=3ljWf;>blL3NBn&zPtnSPe43Z2U;n@`=yoAbvT%s z4OnFi918r3#@*Cmo!hN69wf=Q1^hxn-t7DCB3Pv2-zlXFBeC(6KYYoRE=Va4Fz~RR zDKS&RynL6@w-02ZTHR}nf?t0H$jk8V27Ab~2TKD&yV~ilno7%=%J}!h@7+${Z1nyR zL+^TO;C6RqC#401PK|muMkvBRxd>G+rdVs;#iNerNjbM|~) zSz^Q*2#(U=Vo*>-FBFMLdoR=3Mod4X_(o2`AEVjYPh8xP6Ci2-Ri{l`bW1082@iT- z&4}uo&tykj&uwABd9TTk)4K_0UDFr?zF4WZ;qL4oiHKHg$noTE()CrN1OO_v$*rV7 z#e3{ccqip6W4q56-_!m@6>0hedOIgH^{pf7#LID<7b7)ln)IcM z$aZF)woLm&{q?@(t0BPMh<9r1AQIJw#2w`ZvA(4X{McfSnoW+3hBTRuCI^^ZX zVW9ACBe?dXDzT`pzVHE1|0^)heXYb{VfXpjQos4_z}N}BvS0Rj_3=1$Bhxwr*wwE@ zoi!5AQ`ma6A0X9X{6C%eP9Gxaxe?*JucW=VBgPLo68a5U!<6&%TZxHuZzhSqcJvY? zD~7ds(h=^MU$ei9H0S-IYZW|x=7OuN8=pmSu$z&ab@zx37&1KG)##RO5ja=yTP(6a z#alL}sE>9oud=4*aHn{$pVUtiN|3%PK26RPxs=L(IbD++_li>&M)pg*M&~fFU)A)` zkKm;yXu?S^oyPdxLpX(r(*%qccu*9x{N(O8nYBa+nRVE#u!QM|L&8^_tiBs`JhD4(#V@Kd+Gi)}FH>XJhCF`Rhd zA6TBuq8*Z_aMs0$@6qd1K9dyp-uP@VfGS3JNjlxgFsUz02l4beW<5{1itoO`nr|zY z$?mfkvx*$T*C=&-iayJ}=V!8%#OegQ*2g(3S3djLS<*nOy}RPNzWqs~q8lfyTPvhk zF=;gKlhv#uB5QQ1+~H@>EH-9|%%A9!eqYd;id`z9%mO(+!gR_Rvdjy>TZO~@0Aiwdg`hDqh@D?Ft&}x! z4@NUHz+&6%)S*-7Lb&IzwB8fp_c8k^1NgZ0g6D48rs5X-1A29}#gr_U2q@U6;e@#U zLUA_GQxDzh_kKpBfa~$F9?Zx~GiZ2|=|j^NKU;u%-*;Oebv*}C*BZ#M4s8&y=^q`t zr3NSiq+}cc@v*oWu!r)hrCcl0-Q>w~?TmFPqvJe_a;uad5-cVV+luv!w@-IhTITkp|T7+EX;^tk(9H z#&TiBN9i5@+hTaZ!bop1CC)!^tnjAu$Qn<$)! z<(IRjY{`<0V7H*@vG7C0@#yMMdc`^%4~rBI-bx~&Kgg!=g+HU&thLxUTC7P4Az&<2 zr(q{X?T_ZBX&lNmWbM+76IikTWcsQFE`mD=>L_TJk4LdCK| zAnrzph3tXa0Fcvb@Tc|xf~u*4vAo2Z{)83Ftx|2Je7#p`4;UOaDvNVYGwph(qd7$T znPFwEayY8zcR$+6o$Wj?7qu%W4obnY_aRr`KBC`p(5c4(lgmLZ0B1@$`QG2g;>1QQ zYIQzw)44dPiF<0SVxvuga!2?9IsqdW+jhY1g|YsHY(;B|c2)D`VW550E;GTRYxi@* zs3SQ@!3@#zoNxCXTX$`$P1_l_myl(mc-z-b2~LAe+)yYG>Sveg<(#7v0VYR){ASy1 zTg6k#nEg=M^x;I1zy82+hS3rddX@fn{lI)F$)jML4nJ|g4R~NC;IiJ z&THAzBdU+_1H}0nU8Cm#NSRV9#U`6w{jVt&Wf>}_0&G7_vY>DTbgwtZQSJrKfgfhM zT{_sESz5!AdA?_pC|~p9N=YM$vYeb{fQ23=i=X&3ZuG_D?>0hbGU<-!-pS00=DZq9YBqMoRD64LZ5n=#jjwcl;V zV})EPAI>pQ7V$TvQe`xqN4G2PJbcjFdiVvGqOt7-E4UNNG~_^eqOcY4??q@4Y0l>) z(SQ>ZK*cPIvBlCh9dx=iXohTDk-Lw|{R5o7Ut2<=0Mp_{U89?p=m|@a$de_dyll_4h7Jo6YC)XwbPHpGmx`u05w0_02Eg7Npl35SdLdT>gMi)UeJl@vuo z`?)r?CkPl?I~SBRfEiBWf(1=--=}aY~8h*8Cpi$!`1}rl_9(TPYrGe&! zYps7uAn=mMRj;DCN9)I?v&b#sZ}8_@UM>sud)?YR|IH)sancI}HxR(hz!f?Zpj4uK z;SMKW%ol{jnstv{(J-#Le#Sfr0t8?uU%)K5cC^;tlC6LC>-K%|_vY7RSeL2mmhw=! z)nPmxNTFnF>?}^EyrHhw__&IzkBl$Ptg`T$H;K zU0;&F|7?3wD&^lek(kr&DsR<;kF65kNAOwUiL(*S247QMa*H0XZ<2f*U@OG7e5TG} z{sKi*`4c{b0t=K(`wXn4S~4dA5H92=eiH$;yYQRHR8C1BDJAjpOys-xHx-xtr*kfc zr~L)tcf$e&TN`l2fhX=M?avCO;((Hz^~%rye_Mt_sBSN%p6YQ<2;q8E-D3wf+jnB6 zG<_ap*Ogc{6rxwnJI?nByaw(ls5qVP_%zKJpbeAtI!-@~AA2kye4|c5!+-__#xMbu ztq*IX^!FJKCKU9Mrvz}s**x|DKcp&=56W^bMAs*;@%9tGus#A3zs>XsAvNvy+2-q1t*u$W$%EfH%zoXm=!KnTp6y(&A*MtNDieH17xA7NDC}e6t?Kxt~@GBH{CQq(rU_dSl&Y zN%4>uCtvSS4ag?bu>H{X)BydEm#_n{D`i6*EiF}(DB@=L_VgF~Wfd9hxn5s@-wEia zQ%w^qBc)%S?~IwhNb@-B2_=Mkn(ZpPhfoOA!>3*w$a<{J2-*f>R|96xTp{I;1S~pQ z-#HGw*FAq=0luu6fMs)ky4(J2O_3?j{2K$A{rQGaZP?>$A4Kl6G)}A8*ml-}NnM#s zn8(~!xFQ@mFOuwQ%;Hl*tYf)d74bV?IL+!~JZK zr@(Y1Qy=2`OK`_zo~#HSG!^$Rv;cUICbl z_#qV2Plr3bUQ?7=K)Wr_BUQO*ZWRSsbhAgYBpL1AB>MG)llHXVd6L--*z9Y= zA&<@FB@6~oUjk*q)IcJS2$rhi8m~q?FHf-)OJM9oC+K*6s6ovNuVU!taIzUKMq=nBsMtksJRQkN~_SOoQ8y1_)Q6&qPfT{Yn-)2 zR8Rtu;b=(?^m`8(|LCu4?FID>hKCqH2y#@mJ19`A$fOYhl*==XC2cacHtR+>wUdxz zSM>ph_GDd|3H8nKYG7F;0X?rF^G0x-|JUedF>u4E7H^= z(W+;7EV(qi!KfO@pe5Y~sGRI5n5kYQflqC|+dOi7p8k{gioE4a#iimUKHI+vh+Q|h zNDi37U+BqaIde{N?{JjgP%&vyy%CBB?1k>o>z=zBnogGIHpm`0-gW)Cxn>WLSi1Xg z%_b;$FiWS^Jx4=l2S2WQc?T8;DUqG*xjIDfZFG^j*LX;xzc;SS2dVMy7d>@6iM}-W z>ec$HGwRZv!=aXt^`*6t)Cvql#OqDG(NgA_rD_7_0QwD4nfc_Tq6&}HE$vI}aQbXM zg>E7yF57sGo}ZX1HjH)^m~Yr4)7|cEf2y_$MqJ|VuVpdP2V$@108mvzM|vax9X(p% zW*n6e%iNZw=JKgajWUX#6#r)6XQ9k_wHI!J!$J^|{%t>p30vQf` z*7ygttb`;mt#5l$;psmffK?&+fI-JV5&ST>tZoQDeHztnElC{I3ob8?9mk3pb(!$6 zZSp))&WTtU!NFv*OxXVp@M-4isHR%)yw>MCZS>rQM`t|&c7eXs|FzMyiL67fr?R{y?}ruzp=!LkjmKDS;2}e zk*MMu8Hq0JV;qqbdlgGHlTD{9rl2}MGVN{CrT z5~3hNd1JQq>uE6_({xMnyQzp*O0%-Ra5Zcd@%Vwg(#w5Pb{UG4@v=YQ&7>7LhqYcm zs)0WPNZ439ErLS8{Ua?AU0$9TA&UV&18zonUGR(~x8?iZiMIdr0$`g)Q}WPj_>{oS zMPt-0-aK7zk7&FeZT$Ly%XumJ%ecu%WesYsg7)%jGCkF1R^;1Nbz?aJmxoOGBKcY( z0==p8O2N?hQUg+S9IO|^FUz&F4SZI`nQ!qqycbtmRW@cSyU=b?HV3<;REeYNv}9gx zF{O;t)UU>G{9acu!3@M%{#>Jpm2kp_joD%dlF0ObrQ~C2!G1i=0bjQpc}8Ji{N%@% zDs#8K?yz|Fdm?|4gpyghn63(Tj_z-S3fy}do1wdGL_xz1fNnQlYd~zAUpyq+xJBVX z(0NC|?MBprywgb+@!9A%)baY1E1AnXX{J;F*S=J%B9`W{)nC%|J~wr9)~6B_hpva& z;&4Pkj_{F3Y9GRCFPuDj_ysInbWO}nOg6LG-}USBiMRHBgecB41RznwMTUo0J?oq%XZNuoVbA2Yn*Wb_tgB@;J#0x6|Z zcRvDzdx=7)lHyIwVR-!2yQcr>MKIfYkBCdBLF>BG1tR&YDyv$+rwU6iY2i)`o%I70 zuBY4NytnazM!+tU<+ixkw#IzIjXXAMUcmbanZUs9_0rRsJ33Ssu=HvNToCdHugO();MXZz;&ul)JrnYA-%L(5m3lOR+h z7ke>z)8ZUcJ+iDS*QZ-~9JteSm0kUQS-Z}Mqk$3+0;4^LE7ZaCx{J+)g>DgEfGhdW zTB{$Qw=d@#>^1ivQiw!Jxvbw1g&KK?3L@)K5dUEree7Cux)@Mzl{Kohau+kUXV%E3 zt6DqAX(ME*t@lVEc4iy1><~p`+5_Dq8OvaOj&DpXnyEC(3ji(A?QQP3%AMUCGoaJF z%r;3Y`A7gW3gV&BzOa#IlcnJE{C4WFkolUN-#K$Bk!`+oG5;|qG=Wa=ksDK7gFfI|>-s^wCK&n+ zn5_N^91ML-nFMaUiNc{v&98l>g14qyhxg}*a*?|XMQ-#b_Wp4Xj>#Bo1e7rVq#mjy zPq(U%jf*|UsA7}emJ28Jk-i&xJgvIKA8a$Q8S&}p>~i~%S~$*HnfrcfjAXLt)6<%w zWV2IL(0kAd6+`=TJ(K!IoksmsG*oQCa;nMDd|AK?2M@=Q9@tYMiz;CFh@NEZav7&7 zp;6*bmTA%yyMN-ZggzPv$d) zKb&=Fk0wMa*Wfx?9p$G$k#C13)a`;P31E{s&d!t=vBLg z*I7uo>SW*ueY-ubqD1yH`oTgBaUs3BC|bSb)9RE<5`q^;R^y~w#)TLah>v}I-p(}U z#|wBnJ=6gTgfSOiM+!kV={#%g%-cxHFAV5BL7mRT`C;Cgji`CXfZw-}q0bJ7oF?Rn zxQ7aCMtsRSg+7@(zzN$O@Dr__0OI(mZp+cPf?MJ#W_SQ#CzEUZ_&W`B^KhDl>*j5f zX#b=L!DhZ$;H&a4YX%nZ%k0*kWE!%+6!sa2^F_`>6j&(zl~~ZnI3mF(WbdN@<9@^V zm9qOv==KrE7BjZ$IGE8 zSd1X7BH`7zh0S4R+{>v`-~G47E(36`62{%L-<^kUX}Tn6jILitYK;UtFtJ&8^t+f` zSh?H7CK}b>tQZM(TkJ)6ReMcA(~1fD6S6%Us*+AB9}Pb^1_2ujpgUGP!;~4|oA!xL zO?W%2$7N@-6X(w7_H9?>EiO%X!VleA!A`5{edNlU)vN7MXFJq0h4%+YL|^=@oZf1f zftOdmD_)}&1b1PtP34Ne>H1Ep>f?LR4Gxpis~<@!8}CEOo@rKK;`kg-ON|^DnJUm< zTtk}8$7P`TXC^oHen6th*+1?p9%zNL;0yNfi43&EBXMEA-}x0~qU#7as%L(&;<>T< z=-SE8&QN(wpI(O0Z{OA-SkHdv)iUi~lSe8F9MNo(h1D0t-iywjOhM0?+Mijdl*I$4 zwR${cxeXd|#4nk(=+v$a-Skk$wJW5w5t!-fE58B((>SH@xBjoH8+^}FstV|nG4-j9kvpY$*M_Uqq>Y3KaDxt(n(iq*fn z{00($2{QU;SM}a+^-vb(gG@+~(AvzSeE^>~79O!>9i zgE=O>&*Mj$&K^s7a+FdRfU6w#vNNkihV??DC?Wd!1#+xLO(6?@*Ox>wz2D!Sj&I~7 zSWZm=mV8X_8lSwi%<2%;0Q~!YosBcqA-9uWX1}&bPc1ok%QeGO*QBA^OfHO&9(Xgt)aUm}=4; zBzWCYPR$41QmLABQWJFmNSEmK62wA3yj15>rq=*8;3cc@z5|*@#GihZEQuuVENZK? zh1|ZYvfjNrpu~T9(cjH5Z#vxKJb47G&4x8E{ES|EZj#Ge~>;r zqU)@&S$@Cp!0q0i3O@9-y`*j+t-sA`qO89KNa4VreIyr%?Inr}B&w*R1vvA6^teSr zi3MS{9LxQzx4ZfpR6)KhlJ(;3XO>|j?5$%|ph;B0kpg?C29!f*myKOnpd;#Q%M*%Q zn4w+ywEQU3$tcWNx5j{ItxXMy;-n&DI)N`fOQx|aNQUneFr@li&ssAbk^$0*_7m(; zA={;OqCA`0Dw;XC`C&k7q|{FB6H-$I=?9mL5i$0UwV_ivLfh{eMN9Y9lZb!sGGOt! zvT@k$3TUz>n^RkChV3)Y;i&z(aE(9!B&ctdV_VGjtY=-0^L3BYtAn}6i?joi#;$C&s_|sDa=Wg4(AIEi8WO zDdV8KJQQ;8O_!&a>-i+S^PC`8i@uQkR^$~FX$$T7$JCw#myj_An7vW+^lU7`s=NQW z-0fQ#-E@%X7>;nZ{K#yZ1zcpd+Ny3qMz6Y9r6SDsx%YYfiuHmt?kVc>Kz#V#tcAtK zEuCE>lUTlI!S9{R!^OO~y;_ZF6R&-`rvTKHtshO$rPzANt)svrS8?cI+%Jl8E&c+~ z7Sf327XWL8XU}}_i@vnEExu<1SLD2&0U~j3#X(l0h&>*BpHKC3wFVcQpp2TqpRQO_ z)#{4Lf)??_CN7nUNptPYH$bIPEt&~8BmMoaLxJ?n8so(JJG4C+qlASry|VYh3k;)%g92n&3)qC#S;EO z*nxM%9{~`-FPVDON7O2zpEr`zN}G}II_=lbEmX@CmRbSdH$)K)y*%hZr=4m~Up(|W z9{{AD5FJw>Z5K2NPjR4RbGx0s$O4n?wmI#otc1)($jH^vc4kS0>nF^!^rqX5@5?2z zC+X<7o0e1Q6juN}#sFTNXPO=@#jQcl!zV$&d2hB93H{(}LtFGF0ug)#VD*gFnK^GM z8}90_=%kpR$8U*<4x&Wrlx z-Fu)>RhO5Nd{JW!IJF@UrMnMx^<0-P_|lawk^s1jcG2lR|8iAXX3hZh-{cZ0TFQ}1 z$X6}XX>OA_P0@YphebZndG)bfc7HO#h-J7B{9ecXl?IfHSU2@1=CNnd3G`9n-9ckJ zxh?9CpD|biyj#WabFzYkN2~DCK3AJk}qIs)O39jO+=jl2?Z^GOQCPPJ5$lYAsKu@;K+CEI8jk%_hM{ z;`QC!oIB)M2&Zs*g}^+aVh9|&dVfRsx@UyaWgw&aQOJ|wvtVG5#u8~}@UVNMkxb*v z_ocRv5T-QAk{OO|!CM{S0}g+d7sX8U>2fb0Md3qt(*tL%_sr`@L@k6ri}>xhFa_&N zw(_Z}b;!?Lq$y1yhnEm^#$^=G!jpUNFdZ}WNic9knu}TD-upm>A^HeWhjhwe@OJRO zy+!g7f($hL^ieP%mTIO{zbC!r=ig zgqI)G&Q0!Od?nX>;_>_H(7O#SLVAilyo;)sl5(VGPj)WK72i=TFEc&%tans1@X|6OOK`z`0keril${^ zvKf7j+?Y&QJeb;l(9=0oT0b#jn^f@hMc0c-OPPJe730~lrE@YyQNWwSvlN5eAJ=>W z+PozPBrt(9?r+Z1?=NO~tvOa@#&t+(@6Y6;Jt%5}*0}WvT+uTyGyFl{qAoftPa zwDaz!w8Ynl^dfufcd%gbsW%^&QgFA|LX`Mh^g56mHdJHrpv=YNkuHIWf_i5c{9AlyL^f~QQNRK2$uF{=oQS&Y7UYe(ZKawt$ILicl_fhv zrxAdU#`b24P43QEUN8lg#nks690uwhl~7uxDT9Le(39Rzw5cD~y2G=IiZ^fFZb~HNArbG}rHoom#S)9hF}uo&wHxm%%lm$J0w@8?lyb?`{Ja+Q zWS_oqHH2}WIlA$sC7J==R|@VueXBoba?J*lZ!vl19pHk>+!X+e1`xbH`%e4S?d|U| zF0+rYoKxaHJjA^;{?R(S#4WL9WXd(=s*e-wTV9x9`PE}H|NJ^aMc{!!5b6nix!=(|q%pIMZ=XFcna(tR% z%=x@0JhS%3KA)o%oC}q;Y1gzR<$?0?xz7rs~;mv&Plz7S#NZ1nj<$`%1;S>9h#)sGn@rcR`4iWL>xL zoM90wj9aS}WK0ke6$sS_gbzX%2L0bI!U>QnL@+P$8J0eu)sfThs>h5WfjN_F^{LBX zf<^f@b)?(0ezDrtH{sP=C_CpCt%w*KllO^~-Aaq(!L+YqPTo~6y#Hc-$+C8~QKYWB zj3LmdjKW0RsY}eHQF(A${0=L+Txn%uPis z54{-5l}e4e%{7ZT`2^WJgCKsunfdW6g(yaa!g?vcwZZHk^;*BmgfjiLaz~*)_beiC zmNE7$?0Klj_Gw&E*wb7x{sox`dnuXFh&=Q&-OeBf#u$Kyj?ys>Ah42CK<_U)>D@GQsjQ9NjN z+a3B1vuZyefK^jcDt=R*@*-@umYmd6{G9qJ^y^yLpPOUBfW99s^GDh)3*sJ>>;Vj2 zV)+L68anEq9ULI(GewZ}%u22Y{KCdw^N}R<>ZQoqea{yM_16-yLs}abDVS;P&F4bq zdB80)R%ta};OpC@e6^A9qONFvZoJ*7ME3fOg^9&LEH17aq}lulu|EBHujt0} zq=SXugoZLE!QwOD*$Gsek9HGGyD{zx`gD4I}nW+>%~`-iK{F=!?L#vSNHxR>&*RKlAo>7YS6<6 zP%|q;!UWYKo1$D{;CT!a&l7=wh&0y4aYhE_%pHhK9?+{V@uJR{CCeWjFVoR&3?-AVZV-3>$Ce_3D4ljT5?$h8PA-b?ht!Nm-rY%u^Eqq zTlg^}{RVZD$)NFJ@+W=0nKWL#vqA2>_>yU<)Z0ViW!98UPl z3SOJ8(J1MXMZxdVpbvO0*AbVaIv5W+d)*ZYG@cdd+@@UXHxB}jatx((IbL(tB20=U zUS3vjeHvw+`6e}ZkB~aF_6l29FH{SgA(3d8WO!`nGiLdaslh?k!e- zq+6LFj+unEX$OAPIyNq_wj(jnuUCh+sC(Ajm z`()If`wI^=g2+yiw{kFPYG#8h@X$A&h|E>}-K8?!_HRnNENb;p?p3Ngu81hgM~~PK zt|)bzfpdlPHO*;EYYD`23jNYhGjPTT)o>!7XlRJk)p}e_YzIqI?`(mbPHz{96{kgI zEBb4jTgcV+*?G^vb^+cgyU3-r_8no_M2h_*v}I?WLvEF@k(xz%vNN5;-s`bU4{0NQ zYs^{QtH8p&$vqJ-*|HFGJ>g~25{sgQZ()8aXW^<)lvY$;^GggvH?oT43P0 zeVB+TWY$2HH~Q)LBH5w$d9UjQe%~FN+i7YwJmXQ4V6*q?%Mf)RBM@{aJwKdHeTaO` zg*I_l<#f5Qy}!^?>3FWNd=ZKmuSXPQW=0ziB%2(%y1SE*23&yW<>uSEy&i47ZM`iY zWSj+mL)f9nuI=unG@*>^83{&4Ik8E^cO$nM!4$7~b(#wicI`+Qqd<4gKKqe!&ujaO z{C=2O1vAhffTh@fd+UjZ;CdC|u9Ii-hdq^~{lJz7+~UmS??t<>|4x|d*{3&Xo+KaV zxJw4i7!d`!*Jy!iSrgivGHSB8LV3{BxD(VyW)=wRbG9c3BQ1;vwf)Z3M>pf?%g{JoT zH|uTzPV@CsY=?4#hhTw#NnD;{o@JGZGu4Oys~w_%TC2Hu7<7}EZ}qrSOwf)+pCuhI z*kF}O=A;LBgzb{C@MJ-Lrda()x8J%U{$kEWSI9g;xdc%c)}RP z3Y?)aF0fTrWifR57|J3PDeEApbdZy*hCRk~()LpqVdh3Ln^@%B><$JfE#OT&+kB2U7yjt72Ay zcB5kk(EBSfEIRrb&|257dbq*;d_>Z0dpP4~N z-?Z)}>BuBg7tTRY)6&Ty@@6C)a&`CI=*>3DInoWtOCGfyV;SYx*Bs@dA9zK&0(qum z7JCa}x?{#s-mX(>viGrbb)FTIP(x7MH`qPtBh>^XHX1}f7t$A0wz@CJ8ox=b4h+^l zjO<~kyG^)>j{nme$ijm5W4YF5y(Y?_snu}qUTT)9tgtnfQHZOAWo$$~=7H40iw)C@ z1_&Lk)%=v&Mn0RLR56yi`5+A@q&HU+ig~3=K@pdW9yXo2vo;Q+lu3#X>bm;)8X6kL z0;{5{f`Hc-i)Y zj=UK&9MaN>aPrQ3Jb!-B_V`e3y})N09XEpe4rmW{N0R=lwq0%8iJg!k^E{3RgU5j@SpV8Z4s< z7y0{p5qC?>hwiPl?G`KZTd`uWC)Dpj)SCe5n6P!J^WjycUC4)JNkHum{Q`QL`Rzk{ zPv@^5lq)?iVMy4QoQ_4fjBrzua_RVk-;s zC>=~Z!B~Lak^_w>9%cJj0j0HU7EqcS{u)dk?xr{ z26_)r0||NHzWPO1J;Pju zo|;JF|2fZJ^CBI_#CJ?^5jb7OJ?}uehj)YS<=^THEQ0@iP2BK}wdBT>S)*R-`lYx$ zt~M?uXw(R_(}H(ll1b!{yp?pQvx@1u>0YX#DkelF2^BE~5Ri5>V*hk!WCmjj zw@V1Cd}br{EiItS-pH}=5|g(}VaqpiaYYZLe^(ZDvjkanAQ8&@nkoYSAJ4$DZ{sGA z8A<;c+N55au?Rc?_qpd*crGSzY#SGtG|q zywuKj(g=Fw|Dd>-pr<<+O4PX#Yz0{%Z)P<^oaCos!*o zrwOB;`psDMo4;Duj8R})?eUwbixmTPhXa%p{i*m|R1q-lj}LBvbDGf`m`U32ro6U>yP8NYB5 z4)UY&a04&w-4QO0z9|B=tU(L<$$rb^(HrJ`x{RNdHPw~xBC#2y^0PC;c!}O=vEXE3 z@i06gXa6!*;Pe^YN!mtCdu&c&Pl+3qE92yttD72g8M=H18(dKX|2-C~knX;1prhy5*~O`yt-{zY+Y~_@Y;k3SamS@Bi!@=X54u*i7F{BMha_RRPuKpKYg+#-Zr0QtbsCXSYwAV$cZBob z;LiUK7e3*_h10&qRl&oYt$H{Ta1?_#FCz^UgKs^6LQ%L)=+4?bg za+)Htuj$?l+WcPqy4?9!_tUp|YZ3!e!;Nk2D6V>fAFiJO3GUwz{XeGTe?M9O_UW?| z;c>D2pX5hL{*~m^EGfNxCoNjZ@BW0LGLp;byyID$ko&ZOZfH2GwCp1I)`B*oJGTM%^*TL$4iE)Dj1wkYTkpE+(FOdF8 z!DoHoivN|4{|&XvC==-W|9S#%B zb6ju+#UmrdH{W0ZaK)2y;7G^#x1aZadTV^U|DY&D zRr}TuU71D@g=HlhE5;hTvSjB# zJ9(a4x1`AS&#|UcL6{AB&$K*+0G)k$K+_}JuG8}QP66ym4zDHLawi4$=Z`n~+2$?) z`;YGRDW79c_fMn{j=m+Q`#4s9iz(C7eb>M8=Zs{#sAkxN`ssjjySI!2jb-7&KTQ}$ zV5Q#EVM7PaeZ5X~OJ+YtobJ5Im}z`xT`cfAY58Yh>fsZy|4Qut3nTqMivbrsLRVIz z73sW4X=U~4><8o+Yt+~-UHtEY!N2i$J+v>jw6+f27UIMR1cJSCTUFTEyaVh^nCdOi zp@U6Lx@Qv(#!go~lT8g!LUHZSV(@10cbhf@aj7zJ1|O`s#iw9?+N6l}9?GtQvAl2p zy30Ah=DvJ3*I1vE`t;{u{Lka=&p|m!BKId4ND+C9{BuzL&q(3#L;YCV9!Fv%@gpXt zokgbi&aS`J6|TI%@3TpX+3_i+3=F>`?817rXRTnO`uZhLcO3MbPQ)bxSKR>}Bx0_~ zKO~IGlGBRmot?N%y-Z1}g>Mc^Hp_oQW&i%Dk1GY;c0!E@m zXD4bsKLSkHoZpl*Ityv@fdvdERy)J72ZOe`o9|W|7-Pp&w85cuyf>C>i)8l>$N)s~ z(clT+zx%TNV}!xO06F81A$5yFW151JE#uOqo&Ck6e2}_!Qn|Jr3@u^=BuObaUPg3e zV_omA3rGn&xLeMY<^H{Vq>^V{zjb1RSl3cfo`TRGZfLcUdclJGg1;zP-86tqCp+ET zG+SvdEuROrnh{-TR$$VR-CzR%R0{dOI|KcFB*eisqyYx`Dl0t{M*iAU6&;HYrT>w& zj_-L+ApIUxK8UGLgDz?Bb106iGEzV>$O>i)DjI;X0X{q~19DS9mUqJU;OXBSgv24) zJ?%iyjdFbdrNdg)x8S$n`xjhJr}xu|P{rf_!GM&`oAJ~Tdmt$x5k)D}+cw+ht2V7j zPVTc7 z^Z#{NTpnk>AsfnC-{(OoGbEd$?Y)T{m*Nxo{0p(#Ou*UbESLO+gPQk8i9p&jhH7C{ zNecce-C1o4s%v}~!MVh0*G;$*9Z-=|wdO&#arQ0$F+2RXhjj#*4A?7G;bqPfn$DO? zj>sIEKpo@M=te_Z#trhwIQO_Q-)QR`%Lrghuq%brw!Uu1;?3PeA@lJ#uJuNzlCi3m zAzY)Kh(M4FZtd~6jI33QfJ12%yPtn!HQD~)QQWf3pW+5;VM}*M#^@|)oIh5T{9v>} za&~toK3&wE@f*|(1td-@2CtFSKIx3uX$JFj_61bV1R3NbU+s88b?NEmdm={p*W>$l zZ<+@K{z*&E%1TRqpU|r}+t2i%El5NDs1__srrz|i|MR906%7dJi|ehD@lxf`!V%OG zY{ezN;t{OKEhcshJrHql58(FE{(!&qJE#RmNs8l#9G1R2ydpvMXfStkv%42i*=!~Lz>ch1jyWSvwSm-Yoxsk_L|nBBO) z&)JBHe=P`~QopBH-oG&E`M!oM+^0*!{dco|*l+(;%Mc?RS<7Px)>B{f_6)~t*5%CT zXnod6+BCl4{@sGgdM8Gwl9((2JOHYyK4f!LWYjJLW2;>Y};?mg)$ctrnX`by2%)AeW1pZ_1-8viQzs+PNLV(#Q*qGJ&`@BLM& z;j0=kZT6%uFB#wc;C)|koOy^zbJ{iI*E_g$S91tYo^Zyh<*+rRb(PqVt3?R0gCF6C zwO~Bh{6gp+5C#u@Cuy?Wf8t*kRR*FYB$a{bcqd_52`ExZGoPHVppSDo$4Yg+fn=Fv zIU)Ioig@YdlTn<0%EiS+&LZP@Q#*&DQ0L8a z2!L3hJNy0duil~m?16a_M}OzG_sa$j6AevGMUEt#hJnG$urR|ve9m3`_(Xu*S6^30 zhruNOS;F@Atfl$nbn93?bbsMD zXDW2bCAm4d!Y_$1Gj z5&GLDYVza;9<1MHe9DhxTKMj7OSiSPz0LAoT0b624C=N9_7eo;3 zl|HJ%pZc`DudfgDrUeD?tQ%;B6^_upp*;O&{e`0dcU)AN2qcTyPQhgVS9x6|_k|=A zlh88P82pF9PZb`8mc5YzgY5S&0P%9i|H)-Ig?^M)^|)+nz^blK;IO*?_V(fGjSTFA z3x;{$a~;f`Ot{KaMAlk{jEWjp5<=D0#XOC~nnyZ{dYvkHVpH7j0BKd?gv-qH(Dhx@ zqJ*85Q$}UiO0TwMm8i;6kyYK$D>`|OshVQ)(mzVG|CpitZ#mXrmrGoFezYCdr7P3=#^iTgs}K=7816P%HOPdMoy(r7W_{roAZ z7#TH3&mUO@LMbXLKi`>W7k#aGXE*+J3ukIlR z3cG0vY9XFx=gF7`dkL z|2JqQPx)pt*nymes_HYl7YIz?h(ePuH}Gx^P+Z{F%;0Zo0#=Ck8#XSeC&pi1VeRSZ zz0VN^_VSZXaUIa`RHUzRY1x*oe}#z(;*3qH?90JF2XJYT{0vH~DPi?P`SK^?ux$wB^{p-brzmf|5&W{D)9|Dp>z1RR5pzQqSa0 zG7}S%d;#Z-AHN$J3lahXZXqaFy`5ysuo$O^(9rJGezbv;F<-p-3eOE^W4H0(RC*7t zA^+u`B!I2unPv-Zvb@5D>)^*QP{m)f5oY0Yl+?>)c_b{dY4Joy&aG_hiB$0Js$H4e zs)45FZKCNZh6k(}Nl01jNlz0gV8*GQpM#1xUm}Q z0zIfUu3a2;j5q(A8|%NO82^(oi}J7OxrNN;%3p`Pd}a)PrGtNEhJ)4n@5V|maLvyA z%F`)IEVRxmk0mzD`8cv0mfm%G`HX}rootSD>$ll$H|;#$Sn%F0wKUBieIjWEk}(@c zPO&4AnqESyjXQcaFa09eXL{9p%cJUB<$N^)=(!K7`a(5m2#tZ`b^3{Jw8T}{8@1MCIkoL_`2C{N-bD1Bs z^f$gx4Du6IU}2=VD|aEM>3jA0$G<}o-eIN9--u<*Cd_a+EDfU78#*4Ow5ekUp!C*anvDq)5h(2 z6Qi>>^4$&_EgbSO1QP-LQ)drzmi9s7-+BT3YZK@{2tHkUE^*n#UIRI_GGh1KX99;e z1H-8K4K}X>Pt<_Ka5m}uasnG(yFeV6D-~0$qaNHc6Jmm>g{0uoc?CS4MET3?>^S= z<-OlfDVrS8VK7zI?~bRlt%el29`NwH0_!hwwwQHaaIGirYbe#LwBxvW^KxNWUz&pR zmRKC`u=SNAe^@%C!a8+dtIZ9iXj=K=X4<@}V$@pb_{j&wiT7qP3yj-8ZCofGUuZg< zjwmme*@~9{^Dq+v#D`6vK;pA@lCsQb37`#%{Y(EuCwhWl`Q6GL_o>#X3q!;>7u3;INDpm3%{5cCxQZHYE1D5R&G-32Xm@^c3chdM7#j%T?Ux~iyFE1Eh&(P)-)?+25xU7I)4UX0Iv!KwkS zKoZ=uJ|++T8JnaZd2^AZnJlnttEIh8!qD^|eGlhE=Oj;-7ZEN~vuz%Ax%+I#%|#l0N9YsfE5%X12K^tsmVZDiN%+;_xdfP>A{&gU9Ho z@OC$6YzQ3_i$nEzU#4|b;>eTFrE<67Lw~eP(W;EzhJ2%3K^=ShD>?SMglp11`2BxK zgqB!5UZ-Kz=~)5cIZD(nfBqNP|nCJ%5hNfp5Q?KO2;MJ1Y*3xa<9S z=501vsR%fecAm`%C znml|ukB0rhY&+D$RYADDSLRJD-uw_gO}-pz>|Ru;pG1-AL;4 z&>p6wxoV?@al6HG{n6E@Ji70D@=#-SmaiWTW!sux-OBn-xOx+5?_W}M$jOmFX|nPt zy5du1OJkhXT?Wp4(A>>ky5T^jWh66@|8(u05Iz&Xi}@1`B4yJKWCPy zieRqJ$x0ui62z8avZGCpjxrhG6zxAmC(?773rGAiD_k!^6k?s1E%g}fs`B1V+h)^Z@7|)YG)=rcjHP|wUZfqJ_fg!s{F-&n!M(eLo%7` zSibU2*7-N$4J!nTVU{Hi&dA!*Ch-Pfvo4HhS|~LT1vQB+#Zy8G+BvGq2~EKU@|ZBD~T^T(A?r;5-^B zU*hv|da6+`vXxXO*G4>lzUa~&9PTDgIxF7>tgNtclijCO8##eEp&4_+R&8g#=0@9% z1oo9VJ4aSc@|7BHU##k|k$|db>AS~A1O-i%!@?c7zsIi*LA=k|JPSqzRW6ip>lsL zei%ObxWGi$BBx(uMbY8b>l?d9vfb-}>2!g~*0$>TXbC>$G_T~bV!zE_u?XqT@_q3P z-3ZJ#e*9Y5%998K2N9F^#!s3Y2TDJ>U!SE zxB2N*;j~|JIuyh2mw60Z+NmAOZS*>4uye=r(t-;o^N^Em49>t8sE@m`)Two86lPf( zj_WMnV5lGt5QY_BJmh=?@}dd+m9Q#HNyoW{LU7-Wi*qt%omtbzw>Xejrj*sN@Qa3eG2nKVyz47 zRS}WNWK_eok723SQdKAXA%6b;zkn6?@`sX1EJySTmgXNqOlC{zJXI_E<}>~_2H4_U zifwuD!8EmjfCRqlVrp~!)TnbAi%QrYcw+y9;l)I z^Hg{qId0|S`!=v!J3F>(eVT8c%3><`%a&3v44NiXH#uHxzz!~H)bhIFG+V~HRAU?{ zoVv&Ear^7!zg^G0MRWucKh<6swuvBdG;<$~Lbe^Rmu1E zvqvrhqBJzr`l5e1lU{lRTK4)a&>P)r08`mVO4U&DAOM<=W83~?m%0>#W6OUKP$jwV zJ}fHL!~BXxXxaAdiKGJoXdbql^I&Y^up0BLqiQy6W}{@ztgAZoDGcFt zgb2YP1)SgUl_XIp=jIv;avdCS+w-8G;ZXkPiQjBK9xGHwx|d0!Cq_a_uZOkR6hykG?=8dm*jWeEjFUU^uCnj zog?ZC@4nx?Q~FV0FlyIdr8V#7UJ>saXFbY`b!+nv^R*+nfSk2K zPg`teOp0zq(0inTp1Pqyqd8W@e^x1@bQYW2V^_MmhU})2A{q|JzeZjig{qWJj`r^3 za<|2GSvwim2cc1!>r-HzEQ7HF@$iXy`^7=qBKnd%6{#OY+j#HXjyA6^l!V+?=)9c-<2%@@M1t#KQ zd$JF!n|IG3uj$~B=?2s)=IJF1aAq`)+iaZ#B{JcIDzUg2(=ZW(l`9i&fF5R5^*+a- zO&bNxYqiDK2HCO8E*9dbr#mce(_*Eo9fWo!Oy!oq&bvD7)6M=8Rv#v}KX|mWeOEc0 zk~vuneRu~-GhZG@{wBCuTq%m;V}0;ld1uQfI$#Y(Zer;?^X8R{>_aYgw()`+mH<^` zkXXt$6F%fPDj>Sr;$4sdv(axyVZFo403-nbbgUfXyVU`a&T_n}>XS>C8Vami`k-Ut zA;?4Xi$(reD)(aFz3@1yT(}i zq1*Wnf8nnVTb*O{Dtwk2#C_E640v%V>F+u$4}a!0X8)X8n;f;hSFVy1SDXeZzZ)g8 z)`gy6FjUcd>ATF<7c46PyVw|z2lBcKFa_l`(zkV5$LX;dx(Z;puk;m}R|;_g$Okto zreJ$QEKiDNi`cmT@a-W}$L~$-_qGl?sqE%=px5uz0QerpfH8K&wAHb4^54kt|Ik^* zf>EjR4N9^e5!ps^GRuxY^EiN2SHkCVx`#WfI^wY72<+Y?7(v;{eDY(ZV>fu%?8(LO zEbRuk?zE9k?L><(-}8;Az{cC#57z6fNUmlLISC`DuGg&!)M#c11o~_XTVf9mZ$PJ4 zM_}TW@2%@^M^~h^jz0$-V_&;Wg^4bPVZlZ(%(p(e?Ijuof$GcUqVo_XV&cT(U$J(e z35N%yQ+pFI!KxScNaG!8Slp*!t|tj9%0`X;W&GrU)`T+FGBiPAI2Z+y1JH% zd2qAm$;IsQ@d^G0p^*o|hr4C0L;Lx{V|Fui9f1tJ4N>#Omy7Hxu#{_c9TJ}M`mXN^ zmKrQows03SE3SfC^0`UlWmTk-UJm!~h6EysIA`&00dg>L5UC-CUMKF+fcI*v+L=mo zeR{jK3?erwfh?+bVVUy~!-*>1*8>s*NZ7H)yyR=sN)!m6ZbE(d$IfD};FL>R?|QsK zOf6V zT-7;{dst032}r%NPR5`x&m|_rn>5f@XyU#~;%EdG5j3`&@pB=m-(8z=*ggM~y-9z> zx9=j+c9=q-winT!l5jXA#%AQly9IkWqfBy6a!wFyM1rIe(RomBk8rp6oCis_(Mptw^38q(M*cBT6 z8lr_!1r0*$cgS^a&d^A-EU!0vm{XE_q(ob`- zr6lYzN+0j!O6nh}NSI!%veKZ#Q(Mq`O%wa}i9k(ztAdXU`nOxTrEPbm8{<+Fg!iHh z&l+mn-h6#9fhs}J+jm}@^8;bseS^PZGuC;r@G@3)zT;>vm)WAbO31!xjH){ydSa?` zgKw=u-?Qr(;guBe6N}3%Jv(r7P`-3t1Gy)|2zLQb=@a1rs>vqux5safC$~`XpSq;Z zG%|J#;TQMLWH7+(Bzx|R?5Xd)iXi@t&vzm3Baup(ujz;p5!jXM=l7+NNOZAFvO33Q zAl-0VC^%Xdaz$oj)4ard5zQQ?YtXy>o$+2z+BF3VWQ#d69r8NRQXtl1XTwM(s_esq zktRKXm6hTx<@b7q)TMjUXrHcbLu%LIX2%|z*IQq-h+BNfuIHE-(>WLg92oK6+0&Rh z8KG4QT0uW1v${}&%kLq(rSw!jDv~FD-uiY)d?!75RCys$8PhFImYC!N6zG6y%mzjt z_9}R0N)70qhhmVSxaCS3kCJk5&KOC1SyebyNRpD@Y4~6@nKJ-`%{$LyUUgfmR_~Es}6FO(T?E zk&|o4Z|5Ztl+g`P4#eSskgVCap)4rt;YJ4SvEAMsKEBwN!9i6SM)y{k@rAEnqsk(a zw-`xVwbJ9-MaJq~=&i%fp=y8L;o{}xcN7#93~;{p{238i)j7+qJ|9Ka8pk7&aFww@ z)NY@~{|SgHKIa={h!LDzm^6Nd26(z}pEbLZ>*u}a;dv7&o4ZEXTUBNr+%j{N5i84& zC^*{dFmKQM;xj}^Z zNA&cpFfDag_~Td1i`WdzwD=)B?FRBDCO8BXu145hoxUBTSLov3@gC~F`R$3TYb1VQ zph39r`OoIa(fPw@$aPt0=+8oDRr=}f z!obl|zh%lyBeOPNDMth2Zm4lgr{TJnVPffJI7h*b63%!PsEhW}6eQbQ><=t;SYzVC zY~rW&Sh8H6hRr{h*bQa4Ro^-XDTk&iWnNvzh{J@thn}vYT@EibT5^EA?#cW*G}+8; z3wX1Yx_wdS_!hX?_1U>_H z-Q`%PTuKE{tHU4DZyW|zV0|U86qfq)B(`2!6hvJoewTp8Pl-?Ux-dEP%Lq3L(gySlE@Ka6aOKU2y~2(*8ZHXB^mw1L=J z434b3|E{V@>QYl+QnViKYj?2=#|PuDd8*nQgBZOsJ8$nwSWsI81(PYbKU|qtD`E$#o5{kwTx;otth7`;sBzLmoYZI)f8SIS_JQ7k~fF(ydMNFVF zeDyWPRe#Cc@zC={`!!z|a%H1jJOloj!u1ujwqQfhV|A9`&0hR0O@Bn*QiWD3(6t;8 zSygn1jX??{6hH~V)(evcV{K1Sh!id$i|ZiU4yYoC#VTZ1WgqUaRlRQ)*U5f%MM3>O zm_23~PgTp0zID@!Yx?F&!9wQ{#eKe5R2D9~6D^UGqlJ2wQTij64v9wMQ=UaV-BbDT zbm9am4nA<`Bi}w2MPtsB9Ok{)aL;^G7Vb2@rCPOSO_PPGvC4)WdyOnK#k%9Sb`1CB z=DQl|yyXMle43FWhT1P;Cr%#yY&zLub?C>@_GUPbD`XwdGiR(dk*yTbj@h<8cBdYZ zDkK}ROd+gN`K=fEcOE`t^z5RhieIKc?X$?F`CxyiHT6Q2K(gu0R^%K~lkl1}R-lpq zcTD)@)Ag2Mcy6O!kgu0j&{P3R5BZ|j{-ufec@rylRLtaIcs`v`IdKk-&|8+^CuKK$ zafsw(ktJJcQh3f;V=CaSRRZRZyRHIt`YO^CQ0j$pU85C51DTolR(WgSKEDF9A|~cY zWhQ5NV&&S40L9hpt)P)_bhduB1+<@Be!K2RRa}GShS1_Si;c{z)Z%6a|G5! zA677!1-5vAFg|)2u4e_$D&G~UIEpMP8Gl!nVZNQ>Khweo30A68Fi-+GjmD5666dkD zT(u*hg-wzv8Y%qYMWN6=Z`QKaa+0mzZ>+lF?L)3ZK`NlFKj6;^S;k%di0DmV5r@U( zk#!7*Qg|fjKyZ3G&wQey9&X6t2Gm$?|EA&FCw1n!OIqN=l+PDqSF6@1@G8KvVz46=Y@>#)*9jq#O)6F%G&$BL0w0*{} z=LPWl#RiEE9XvlVAn#BZ>Ft^O#!EDN{Q6;(P4dl+t1V)#cM+2*`+b@kyIKrkd{NM) zi!FDXHn}XW$WUo|c12Dda591eE8V{>DiL3x>zSIoR|;y7yQ7!Vbx)Efi2J=zEk36a z7+id-ZwBaYkQsfm3~hDnO8i1F(>GuN?=R$S_jXnt9NmvDZ6EMl=<)B^rJNpjb>;%^ zO)uYV3}$$0)Al-|OGKA)I{mifrO$HdBoXv#sO6vpDvZ1Q$rKf8$f`ayUbl$XVOk1h z3{8Q5(B$BORDN13-%}eGZGUx*rnOvN5ApIotK?R#<@Wno*~RBg%qD2EHJ7_?JB~C% z)PQgVoesoyC6f>i+`Iyvhl`}LI~r%^wqOr=jt+-o1J}EHU;bpjDpCC#A+}29dU&AK zW}q4{D_rdTfojxEFtFH;Nva4G&55U=BN=+4HG*}GytPKysEYluqQpzrQeJHN^sbAP z#(2gAQxDp?3kM1t+%bqcD6ouZC}kMU>PrOkYlHDZZXMM6Qir*}Bkl<^W);!8e%_PG z`&l=YwqUS8{t~t)lWCyp)29ou6p19dnn9&i z8|A&qgo%@V`)Jc&eTr;lh=*1D6{`cfUsw2vghj0*yAG~vn{~g+X~l_yb;tQZ5W$vK zZdlgV9XGL6!gtdOuwh@~d|nY5B6ja1lx&V8w;McZK%95X3Zzs(K7vS}+bhdF zpjqt?n21MpDQsNZr#re#+(uve%kBDK%7r{XNCIs}oJQ~e5$t#G8lT?2Xn|Ejm0HU; zrJn}hk03|7YcEHjua}*SnT!IZ7e?OGw;dfQbxgPE1Z5@fMBtt0;j9h;^sNir3SDn7 zu^OB%{SKgBJp)6k@x(*ieP-+a$8O=F@o*<=|_Tf?(q0Sb&gpTYetj;G(f^n;WNm8tX2(z2KEFtkRh6l5^4<5DTOF!Bjp zS-8l72dU+AY0=oEV%wlhG~6P1H{VhxHC-@Etgvyu*3eKE0R^ z4s>J^&R8#^!5l}1X5os+=744xVB)}v+M~4B+=?btwodNlyI4F`>ek$`+|W_uN9Nt3 zIzjS&SBA8Jo8pS`JR*#52SUI^CMOBvR1@S!x+m3ONU1X( z^ql8iXSm_MYRnF2v2iY@S($8Cc(2)}s&wb|8W4hij;s+Q%mD){*BS$-#mOh7EIjh4 z$`j?qT1^_OqcWY^V#36V+`T!r*Y#>x*{r->Q@5)?3q_u;WPZ~4#kq&C3f3g=qdD8= zvejd2QL$puy3he7q}-Zrf<+To+1u{x46ydV$i#V;stEA0U;Izn z6(Xdl+;_FI^{NBTgm!<=@haoG0kW65(LZg#(TKmk*kn}ryoz}b(G#Just}8KSC1K$ z?4Zl@H_q);hY=AzpGQ8=^tXoZlvy1)eJ%E|@hi19Lw z&32ym`@eWb)crH|P5d`~fR<-A`8_-6(>Ri?*ZsA-!M!` zz`A-WG-rgaM5v}gx%W#8tLw@ekwL?|%Cohv zBxcgX_*b)!2hJ0N4-4G`XNyi)T(%dBT}!zc-x-J*(%G+v@OzIrRnau*w|C}xc))Hx z!p5*nmBu&?dROch2!m{}d07oc@7P#n?T+_HNaQhv0o!bD4{R8uHfJfe$9-byx@UsK zqU(#>21u-p_EQ)UG2u=k^Sd`7LuvanCN^t2ls2~#)HC?piucitO`lku``#rJipB}P z5HlA!pX;LT`{TNc{O&*IN7j)70%V)5g0!(n?~;K73%we2u!_XI`Uw&IVNba)Ln(p| zkuR7r->6)H;DtxE&lp*a!pwE%A{t{S#5Z+&tYJ`{YSNN4d+hhY3gm8S@S!IRmCWY_ zyajs9N104;n&!tGzm@oCG8r4jp}Gy}z_R1ch*%AKJ_qLY&+0G~uYnU^nML%XXm_lV zcg%Bun?&f1^_If%EYo)5O23O|_hE!LuIy4utN0Nd28~FEi8Tms4>)m1{(y`>{y|x) zkHES7jIycVoHMyymI-$o_{9m}Ru;oX!OTeb6aLD=rmUH+LgCfM`+L+MY`cXv&P#s! zm`NtucvuZ=AKlN)>wnkp{}i(@Sy;YsjmMiGtLUXwv zJOafAh6mX|pDIm8GV|%%Q(i{6vZ19tz!=o$48UDwHPFFQ2R0+Nvm<)0?M?MAS4{hZ zV4IS}I#htlRp^1T=};~%Ka^R_*X_yPnm&7><`;Z;hzo9SskYW|*!ZoKy=!=M8~o_2 zecLgLg+A5=w8W-i<k3&GxA%Mbx)LQo81O4#`WIz| z0$-hJry^iACw%Uqsh)&Dram23!$$ZPN*3tVhmsQF$gk+hRi7Dre<^Fx03iUhv=l%^ zW|@x2nmN1}A8ex(qFdDVc&77sxwu8K#q9jaISFU?u94Wn zw^w8cx!r!5eL9Uv97*}Dl0Jm`ik)!Jg$X`M`2*Oc{Npt;^P>H>$s^maX8TA}ba0^>#_ zto`+nuu?*?S~>Ev(QI35H*@bQ9C<_KYh8y}*i{qgx834x_rV;O!m@=;OUL_-aUZ&F zMbWWGPYmf|BOq?`j2hc({W0~wn=|=bf5|Jjvm?5H8IcXcFsmv0Q6g4Xj>K@eIew)0 zR144_I*7ba6_)xgLv+aTJKV)91x>t-kDmp60c-{7%HLgPljge(%2t`^R0a{N1pa7n z$7VHKV&$t})i{`Q^_t9xw8Fm_U;{K*Q ziJc%m6C8@u^<=0U*m28TJX*2!8k?9?H^_0l8uo(uWf%u+PV*wjO+G+6>9#!vJowDP zsd})N>Dok8iAKOv;SK&x6AJ7hT9!k?$Y&~4B*h7O5LmoKisYeu2d$rOFHYXWA*G0l zq7&{}LEoPiT^&X8EsZ5ewCiVoO(yP9K-QoBW}+A?gU3nfR^_wt-@chu@iQS-h0uYu zd_Y@gjbrCG8~ePYTJGY$d4Sc4DP+p0S1v|Fp5&kf502E^7UzL_1~=)-h2|#N^%~My zJL7G#6@Cp*I-9il7kGzaLZ*bO^DfJhD^2-KxTX1%3CT*yR@Ym6KPsi~!|6e*iAd5G zN4>hN>?<-*WA*`MvO_z3qh&EnVeWmSxOddSB_CqPR$Jp9(5dryzT1krrTF2#C4U8e zfb1?h1UW_PB=i*Q>MAxLE$nqG=N+-YijG@4o4W2Of4lEorNH_RWJeNUArZC%@B4ca z?x}3d_v&qzi*IQIi)F*vP@4tk>kmU}Ah)dw&7}Z0BE=-YE?v*N%F|tK0ysu(=Ch&Ya5zdEB1a(N*ldmf{P2M|rzbU0F`H+XtWAwvT%&-@1?qr61oWi;u z&NdEjTKVjP;)UD*@AfH`Yu!)itUMaX;_e(T98eSUVa}C`{bPfXN4IhmG}A|u{XyoW zvf{{9cT0(PnnR<}bUo`Rf;$~Hh;t`Fuz_;7UXG~Ss(pd6$?v|sZk!=S*uSwQhXS8` zDVmsvi${QKIOb#Jbgzzw=Gtp^9aBlk`4cw{3_}i^m}ltUt{3gC9pygf%#Gda@fjjn zYk^naGCxwMTt7ciwEK~4lckFR`Ff88cjf*@sya01W%kQB&T`W9W44WREZS6)a{rUH zRn-|3=lwngXgy?InM!bL)Z;ReeR(+p=xu?ZvXr=_wB2psuKPn2nX`Ud*?n1Tif-NH z_~}ln;CRl=?C<@e_Tu+1Prg;5geHK=J?YXTRs^CL25fcqS|Jnnu$7M%?lI_m3!lK`E*7D}p z9QIacNN^BaNYR)h>#_N9gzCb5k&EEve3}c( zygn6926hcQjgQquB2^?EFs5%?V_v`}`6tL_uAB#`hfG+p<%!|KcAbh;Zw=XsQEu&A zR_=ABgBdHm93f7h4vFO(dHngU$U__zAhhTQDAXGB(-}hFGFWv6M~ZH5(>$N!kRIlH zziv_|AFzv(1AABK#Was+=Ie>jVwH>8;}ndt7+y+jUR&SYmolsa-rJZ`2Himx8D%3j zWMUIccz3%*XGsh2A{2;@C(RoiqJ-Xka(O98`Y5`Gcqvwko|6)rU9-q*<&ugk zERqfjCOq_Rvpc!#i;sfGc1{w~al$dN1ADHQiT(IXz~D(%=ar9ZfT9-|JsC(k(dc(6 zZOIPu*IwVqV3(*u@Gv4ej(@b}Y`!Vq?V}5=_@o)Wll(0foYkf#RGt>zv|)Ssjn$3D z_r`4~)MQ z#%zw_chB!8r%4Yx@1$`uhK$!YMlVSheUf_H5{yHd&YKC-_=|Rf_clORlo|=FZWjA; zpA8U8?iMwrsmpvX$ap6clBtG}&i$3>`7Pho2Uea+D1$Jyx%u-I}E6OqLM}%uykJXATNs&x~-AX z2gT;u>nCEgkVVF6Kj|{M!V#Rh!oL2R=f%Zk?KLI5=#LK8qRLlHz3#8LB21T$J}b%W zDF98VH?T#4sPbP&BLQMWA!~%|24kXQ4zi~)iI0EN5p3L&x9l=*$y)Ck+}!ybj?mGX zURwG5^u(uWh~hb;2ra#l?Hskl!V%}h)+4u_cBCw>7|C z-eF2;P-_;e8+~&tBf~9T9hnGSgXX5dlIZ%aLQ>e}rCC9~%{~6Taa;DwrsmXo9rG2w zcH^F}Y%aSn7wvt7Z?SCHjgt)5jFXgZ<>6)-aN~RYyu6X5lGU6nNNFTcmvvTJ8XKO& zf2QO4_EWTboYlIeQvR2ZM1+o2J-a&W0BZh|HFauxzKQ88!=qd4O3MO78z(l2fgfd) zd>Mv@2jKmuBIFtVlMG&^B69EC%1^qRgbe!}lNTB=EXJntx&%X~p$&@8B>2fr3S`az zvDZMDbH3D$5)(jE$_V=&x8hHngK`qCdG?4JxPOYtc)H#^4{?*7*T3QTA#q1&+&OD( zu?7wmtje=Q*U#9L#xP4iMP?PYu2`O_4w^UX7d90|$iqI2v}3zp8*$Q&ghHj@*rQ6! zPDEbtoBSc|Yl7xFbYRmK*+w~8QOHA^50yh2Y~ z++gkVgR^2g8HVdUTvV3qLy$JZ;p}15qOJf#94OdF75>65AkigICn7>2Z|-4YzvaMC zLp-+9r>jf7E~U4Y=+kl)$!)p`5GV>FYor`mBa$-FOa$!BAmOk<2@Y9BP)zn zbBpF{6u5&=rfbM**#!@vP{EMe zVy>8F?o?-Ik!h#9e#EbkG}va`Ew+e_h9sp7%1m1q z+tZhzQt@oP@+8OHada?6x+Wup^^v@S`lH$UgvEL!kv*HJe2JjKJPE%91y+3&sjlNQ zrivo0tnC5sIy1b$^!Fmg(?eR%dgRHxen>@ArbJIlt2gY99=f&ZN34B!n0wd^3jAr} zh1dXJ(GI+gvgmO9Aqk7j@o>kbqACH=h>o&-i>fC9;QqK*2a5$KcPun{uTJ4&_uC%z2jiY2S>R9YIg;#_VEv3)}6 z`8(J6u)sTvp_Pm0N=-nFDLT01>f<_GVbi0T{ujV3G`Sg18?+gyG&|l5-uCP#0%Nu( z8bwe|)l{YR%Qr5i1M4*X&vL)1bX5m@!@m!A3hbIue23(>0m+{fx%aAJtFDC8YNWB`8 z1&mjq8618hs!3nYQ)HqZGNG=jsVOhpQCctBcfIZv|C9cm`K6C^pR9|78S8WiROa(- zTM49jfH#jLuw?W4T#(R%%{uO^igs4l38BT9gU5T_3B0?lv$b?y?i*j4C4hx88U@K6 z!Cs}L@$bYf_v-gSdv;7Vx5UUG6S66VHMmxob+e$#r%B>;@1CXkxe3w1I{d573X7IP zBX+}@&VCifvZle?l_YRK2YjGNht??4o)s?dBUTre0{SV1guT4aAO>^kD=C9=ZD9SG+x>rkPmIzTj-sF zvpp!nwuX*C?+(4XBxCFzf_Y3DuK+Aruyzz6ow91JTYjv2XdThYhzsEiO3JLg_OjTn zO&O6|dnW{bZJp!lH259R?1NHhopg5{W6a9-^Z5V){<(tfq) zJ^tGAW^ED&we>$+QwDEZFpssMeF z%L0=v{$Q@Yv?tfD)VxO@@oj+by<$QZleFofd@Q0*B9>U%Dc$t6g%>B#nQ6ik`ySTw zYrgx_XY{+3ku0!fkRz6W@7;!&|NcSy-E~f4%{HNZ-TL6*^LE!vXBRsPR@I>V13_~l zpIsFkav)WYF;B&`08&OaZFl@!sH$zy#&Fbn4~x1}Vah7>&gAg{Bz}qbt5D4jS#n zEC#S~ZsK=54;J=0R_J%0SG{PFx!xKz&UOw(IORv}us1Dl>z#Xc*EdLJ&J)E?&Cd*|*q zb)b2JBFwSJ36NUC3;WTD1*i4@Qo0872|IO-T44z7lGEmcWmVo>kTMa}9N(d?_xm;3 z*LXg}%3H@eN%+d|CXU_w;lS|;<9SYWxzq5n>V!Q8R9tzoNivbKJqp`;9v~!4-d%ZE zn?Q8^M$}B#k%O<$9(!mE?=LR@;-BF*GcBEMpzNbQo5v+ko9Bz`23pdG*6k$&ewWazOkvAE=+;;+vs9gu$ z<)!SyGo};Fs{}Tf5Y;Sw6-t=zgloyD9pMvbRn*HOIgr;9*q;{A*6|4>GZm9P-FI0y zI7a_Jw%$A}sRaBNpT-$=GSjBT+%wxvtxPQ!7_)L3C#}pS*UHRYb3+A{DNQrC%F=Qp zvobeuM**dBOHt8WP!UW~Q4tUU5fJ#D?*0D$xX-=+!1=?+=fL5-=Y2ohd-{2f7*M!O zEzsgRI7+&?C+>7!@*e@gm>LZwzlD=c=t5gr&Q9w%a6^eyJn6diEU9l)jaF!Htnm+Q zEuxhFc&2i!YFTH)y}}oio~7b&2faAuYjI7NJ($RS17KPt`}WCB)#d)U+Am5E~k9nED3Cu z*+@oPGq28x4Fz6(GVWj+FS_%|mFx@cFWI+VgPs(Ma~<$Q{Wd#r$kw^Lc!`2;d>%+N zujF)phan38y~nZVD9x)k)tPR$qw3=3ds_Cp-O~6JB77r`DeQgDj{P_OTO+DxI;d3( zo4D}0@amhb_y>I#qgJc)0G&6dtsZ1*MeMQgZ|*vvbNdmMvU4AsOKA2+p)Y>wF$hhciMIb4J8ap`TLQYhd))BbbNa zR5X?Wfn8we`1M8KRDr?{?&l1MwB?dBEpxgC630 zj!9an>)TJ3SfFm_6mvwn@dI%ik?j+iGDVP?NDI{c7a7fGoZUDPuLXM*O+ugk`c&(Z zQK~1=%AyMuj#A_FAJs8FJbeywD70x|Q7SULos(6LptoH-XtCC3q5|Xq7chgg@^2)aeOHI4F7>&BP#uLKmU^S z8~CRadP2n)ocS{~Fe@JX)8oT!3-HoW57&s6?pFv5t+!GLj8c1k&Lu<5DtU+>dv6Pf z4!L$(k@CgDi)&4AYxJ3+^427|6~?gQQ5fz?n^)!aAK>hBDXiPRe}@}?9t2&;&Q#N% z$}q+j;Acwi|A^a^TKbgnC#SR^qiZ`&pB;ZL+HhYf9u5+ zndiV|=eJ(#fjZe+^!Tawa38!^g@CHyw7gywcIr&8j=J%yB-xCAx9yT8+wt%l#=VlN zY(|RAGCxU@DDP#x{^wK@xn}9*6Pqg7y{)}v;qTEilA+AmZU*vQuHz7UvCVouC+~Fi zmb`R>K@`TdcbA;&(l7t*IP8@+6%ZPJH&zSJqJ_8S0#7SbeP3O5A16=0q#)H3R1j02 z1}+dCM|?IL=N;~DO{s}sRNgdaG# z_sOa+aa)35lI!eAOVghqA4r zmo`fdqo{Bd;h_(RPr+Jn0Uoi`SbE*@Ru4y|G`HL2)~%C`!1G-`^Bh?6!*pf1tZ+uw z_v(v24G=AuJ4x1M{_K;#)O>0NwYTQq+_T&A)aepVNezO(>Tq($zk9mkPRKvm@Ahfp z)3JB|QHmWCA!-wMZ`}B&r%m7yOuQAf(f=kY?7_7;^bB`!vW;#Illh-Iz z=uIseP%Gh9a2O^;8>L4*<2{WsK%}G%F}rM+Ze)m~i}PvSjt@+_z8%9Eb#0y;de`a! zSw8IlhGEHuuOj>76~`3LQ!veLLkh?EoBOkkr_X=@_N`xbL)e3u?#jwLUuk1~$LOQZ z(3wZ=Z@4Y{^@Ao7)h2IjD#n}Gjekk`Be#>hFv&3^6ySN^Yb5(>JTh!9<#uOj_S@li zY~EcXv1r7!7URxy50)IkJ=FC^U1cn*rPz`c5FbD&4qwt6&3s>vL}+M2@Mq7M-Bep@ zLoMm#aGSCB3V*11^O+0?ZzX1E%2781s;Io_&zHT6Q^Xyn>z$&Sfyxa^(<~{hW*>xi zgv@+nneDwx-%}Z*&6S1LrOS%=c3CZFPok&Ku;e)|_i2jO`@3~Eo@Sc!PJRrn;1q;~ zS6x0;<8HVo$6)@&vff#ev@jh0BO2?etduUGOVNF%--=^JpDwQ}H0MxPzOPeh1w|Qo z20bz35RJ&B<%mIhPC;OA&0Jw9?#=$>B-AEDN5j~_+r}EM@R=aw}zEu;|evW0w9lO0csU~G`+tP+0 z;v?%dZBE~HE-WzHMcD$b-n)VP-=-4$1f6lgG|d(NRlUY1?OyQ%h7N4JM)uw^Z&Owi zhibZJBj_A47Fi|!aW}uX`D$iI-+-~5=~o$tJ~P*}_+?(YX-G3+DLAjo2etSmxcRsG zNO*60MqC!c)<_2Rv2<+G)KpAC=S(>*dm} zr|GB)1d?m`^|MNjcPV}AEv~Q;i93T8AhxyOwt=!Tt7XR)1zurC-wezS_bfaO=ZJ2D%-%zXRpnD%J)ZCM+n=mTfMwV5txnYP|qa%%4|21^kMKFc*v z|HM{5QR zWwCr!42{}Oi5dkh)Lwpj4ygm(PuaS&eMtd?O?#r#uf zft0lgn=L_T?jl_mx5C^PwoIP!ils5zy<<5y<7>AT{b;qca)dHOK{w)&@km1&RFP71 zNUa9A%0!s{wHn?e40SGMWr*{#AyJc^(KD2{j@>lIZ!JQmoRVCht*t2KVD8|RW78n# zdVAPdu?t)8W{-8m*NfRB(_Thw zI!$7?i@z}AKksTswQWpHdPUl(2_=6O|1}_TR4nt0bylv)_lzFbG&!e5kcuC}|2W3h zJNswhwdjMyg6sG@7>7SI$k$PhZXj(`aO(~J*IxBzSHSb)c}G|s?>w`9i@8_R#|3_i z?{C)%?ph1mh*t#a^4@$y)uz;<5fy20N=4ao%6C!C97iY4<|xeXjjJQ z@wrQV-KU|v01|U9xU*Qibn&Wb_hd{t6K7s=WPP%a1Q|u&C`pp;J^fdl8P(lt_<<@% zzEpc>IuYGM|8}UyxP7a;!tO8SAXRA1{*zmB+5}>hYcGVV0w!fRCR= zgH*Rw)8>ymd5L{3#2_B=FHX|r1)aGjiQNJ&6uX(&iT$x_&TmN6R!eW1)8-ReyuCmt zPox?qhGS0Xxayw1XsS~BSZ7CW(MN|uBdRDYmJQzwv~l%#qE=hhIZ_Z-6-pbSk=gq+ z>fG8j_L9v8f)ZW``U_S`eQ5B;O>I_M_NL%dZTHXtmKeowFNpP2GC0l2(#x3PHK`|an zRM(9sJwEw-(|^u3`1+s5y?Dk`E$~#JL-5my5~fP~bi9AbNm-n?G3zLqcE7$o8}G^n zv6uqTdgvagVpbpgaAMKX#6UO5@`q_dtQ`3e+ z_W^ed$~H7%tgN#Ax6&`b9wGek>);}vWV~oR2D@~^2Le>&2$G=Br{{q;C5GG-b|>D_ zT+41^5LW`aKaH4EkB%Gux&7GG)vMwM;YsP;XTtJ?!0VZgRgXPBT8WO*o$8?ctGbq9 zI8EMX8i%k)`iOuCHe%}tbdhiZiurou)2$`F=r^4f^$}_U)dDLx{&2#NP;6R=y6zx$ zYP|;kP(W9<5Kt43#&+IO=wvQ&U9kENnaYXAyK-iOo4l+C5$8?|zZ2gk%~fD;HJtP5 zDT6|6()~Xtca;U0#LG8w9{lU=ANN_`lhm)UD93%sW)5$Q{zdyQg`xlR?csSlg`|A# z-m@q58t1g* z2w1ck1vvj87czmdiT=`m&l z9l74x-9))(Y?YRcKam-ioFy|$TS~1# zUK~}8{V6NL$FwFZ!}6U&*^D77^F+4!=r#|~9=hhM50D?NlMck<% zM6rdAAvrGNGu!5AOsp#_d}&?qPkc$Gh#w*L@F=Qz_ki zc4%`Xqq;4XF2Ti-66WjtC8so3^rF|ET`1EpJZdeMe2u>cGVw*cM?2OQkiDkaBXQJ3 zUay+e>OxLRdC?d}a-`c_BkZMNt!+++nrA|j`0n4-Zw_rCn5`$vDQ6q#7IDog7i(@Dnx@fwIzgr!rzEqTpc##E>64|_HAaERu< z^rjlAbHUz=UONN0kK9nrLM!wFF-`_G=N2kR4u?I9y;gIpHMKh5rwb2XtG`Hd_Cs%0 zge?l5m-lsQP0d0%hcI_~!nq0a zW#Uxqax*p&5tvf|G^ZGT*B=jU0#2)x()%FnI-3AF>lE+3`&0IDRYiPr@Htb^*fWrB zF&q9NZRm?nx|z>Q^KrJdrgh2+U)kE0dMvdTD#iDaD~gGjA+DPp^&_r@{6@J1!F}YN zAnPFBP0>|fgdJ0dkJ>X|tVe!+Wa!aS;q(z(N9(~Jclzk}^4}vhO^YX}4+N+myf0~p z^KE#K0Qg$_x(h1d{6X#kPgzXTz8B~$70|Vk$M4&-!8o7CIyHtf+3+TSr0OV#z6PIJ zQ>~a81=L1M~I{=M6%`vARW(%T^ZYD4jcy5PF_&#EUl^o~< z@xQXmns?zLZSSq*Ag}!YIxpQzITss}c?#B>;bGOULPjT8AL)L> z00Sib2-5?j9l8}^6-$k<3IH?uJ~D#a?$WA)CAFR|V&z^LjhJ4~pFh?1IsI+E%W`># zKY!|s)Fozx9gDhh;4<_7TnlcuRDC$L(#W8d#_4q{BxH(;w5JO znV#5h&yR}G2dj7U&7eS0xN`PjgBZl@TJ!tlhdrwE!jLu=a5z7lMDO-sMVn7Y+IfZ@ zo!rMwtuh{)eD-?Ag_zB?mp-}f+m*OcSVGJuhtKA@*k01(#um%4?8Fyqs!-u2VAEHQ z5O#qc)%7f<8jSF^#$wD2P_IAvoB;D4eY|`vxSD!-1xOLmVf4`rP8&ft>&kt7?X)NQ z42+DaEk>VWq{`4Arv*d4#DKlJ4$E;bM{4K>CmIC-(w!I?Cr*5Wg4uGL93&xUE>5=d zp3ocwF=Mr}4`H3>EMq|~KD1=~mb;a^7n5*F=YsJQGCP)-%&PE@$+B36Ii2nUT?Y9P zbGXL1gJ#~&>=ci%_UA9JMmEGv;F|?f1q6oRXX&r3S&eSS{wNwLf}XK#9+?DPok_wh zPh~7bjh2k#+a1Az6-KytD|zR{NE6wy=`ccXCDaxI^JuZF4=h;iJKBQ5=nr61;?EmmV56R`sSyCMslOsOFcZ_QV^tof%F#CtX=7 z=*;6-zz>Fo9}0Kc>+Np|)yb?w5cmzpugRTq zCGz~{cmcC*H_;lR9Te-D(C-?>mma}0%OX{`hI8Yi)T z*SX-=uTDpKTlywn1I8)Y9qk&ymAh|aJ0r@4+zpTJnUNk8n1H4^w}r>Bfx#elnk9Sh zkmMEI3aCzjJIs5Q{1kZIBPbIlvNaBQbmLdJg6B0_JHcmVo_8fnn~vwXzY55;yV|Ee zs6iq^lj*s=XQ&;?9MA!pa?c$)c#{yWgfkC5Pe5g%+DCUm728~`Qd0S$I7otL^J6^h zhiy{7nAO@44F4u&*~IH_v@0Oi0)wjY%<;l5iANi6xI5ngy*XVZ6Thwt+gzKGRCBJK zkxqCS>l2RpEuh^4Y~q!o<9mrdKD1Y$ybK={BaG`^9dk`KecNmD*R}>W*i&W=i+;{@ za^WwVWn*}{P(^3d7lS8hqH2+=54;e@C{{T{Y~7iLj)JX<*!@C&bI8#$+E^`XDj1`4 z_huk-xvn|E!zY;qABB06AP0ducXVnvvOKo+o8+X&*d8TOW+W1~HQZ9ye)i>P4v#Nh zDP+&9uf2KX|GCPT?J|TacJ$hmQUrD)V;sv9FS3SfqIl1RGqrRuO%c3VN?dAKwh=u6 zy;WI~6FnRMJ)`XB!GECFI`4D-*TGwrK(Eb2UYzXJagT0#?NfooF-ZWiN~ z{z!iFR1N3Rwy!UJZv@Qx1eCiXMtg3$uzmhRea`EAg;O;lIMUIjT z55*YtVp2tMiWI5LEQVl=zBwBG)E^Ln3oA0_$ztUvX>m{7%EJvk{5;x;-?VN*LhFoA8*${;N_-J#cfoJWUgH_dWT3d17M227SW=2_b*el-u_ds@U}FckiY+ z*t8$pmXZUOgjB4Xecr|Dcj{^}T_XAqmUDJ}$>Ugt;y>(9spKG2@aa94>K@%Bf2-eh z{_-Pi)+SYK4R!d5lOw71@1`31^?3=ES2yqycl6LBC3nUfCc~_+nZH$0oc&yg53~_J z5Id32P}N>A@|VwlcUMHBpqPKd?L$bipKu-e@A04@tq(LsnV6 z$h<7!ldkjF7^`+?dCRiN{cmKuy*48@d^dU$5d zRLhF&>r=8taw)4w$*5r$HVMs(1VB`+E++|GKWvNm9CUJe1daB1@bLG9n-oS`x1UO7 z#I4HqZWHSi_hj{jET0N)!^$nmtwf)X7sXLQ(o8fA^9F3iNHedJFExP&FIQexj(L@F zlBt0@((UXV&#oUEag8#Ow@V$k+IgUn1Jyo*^6`q3>V)h<%7Kv`dJZwA?WCjAC00?Jte$22or9QLzW#tv)?Wz&U|4qV4JGS-0x@BkLVz1CzGzJFbo%zRCJ{WOL7QSEQHA~&1%k# zGVY^%%dLhX&~y4wz=L)4PMVT+%0~Oj1@v4bp?j=D6r?J;4=jUzOO7IGn~>o^Dco)B z+9|jZ>fG(*(Y|c6+5aBUa`feKX5+l!T-$B82V@Q-{D#!Hb!tEd zyu~_p=6dC^6wgm`Ik8m9xunS}!F>GHl5HM~CEEaoyqgVspYXArj=Vev{c5jx%ICGQ zI?=W|Uu)aA0wbBfi?B_gWb-BAHfaAg7Lb?9`|KhoEBQ-hNRIc5^{HQ%9+Kp^s+fs) zN3}v|+T}`F_Gb`egUT4jee|q|aV7XVjB2|yskVuO9S)c9W!HzO*Lz~J zf#H#}W*yP4krLK00%}J}ulo*&*{uV7rumTd#yuW(K_Zx58TW8iK2j!S;l!ZHb*nQ0 z33FehVNGK$>0|@5Bx{5Tkv&^77=J*r$;|g^SiE}JG~OC4PI$0-j`dxKj*)CkU4tm zA)AUERrlGh%cvX;K>85vC=7V9xdWR30{>ZljM7)Q?Ne9An94{~VupvpJ`$u_*U(?D zI1E@bb}q>tz4289@tz%CufdI>%N9JW5_fPrB0O48fma2ne%T^cx9#+GFA=ILZRnD_ z;URE!n18t%QP6DfL{`{I}~aqt=JRsz}c?)3ZsB#xx(WlUZ{FWYay) zpDlG*?l>bsR^~?#8a7j`2S-%cZ5&*1)7ph}4gha$kCoXzFU`*z)}&aXK(qog!b}Qe zhF%L0W7sh4V5pINfZ0y#6|#bV3E8&rNk_7r7{kA3?{DC*zRyvY&QCB^9DpfOz8eC{ z8>kn*Cgu2fIdk7@E1#faxJ%sERS}IAz~7As*&hKVp-SmHy&JVgj51$(RFTG)fTgj9 z!Vz7bshi}6kO*W4V42wu+i}uOxOl*|OJ2hToXC5eykWn9h=#hFA4F#lo~Io`b1rMT z@#7L)W_mGXS?$v7WhKgt>^`aG3hhy3Q2(9oEB94{iZ0Mf0F)2$NHo=~q3w0RV!~O(X zrEet(hviFGvToYW>+$9`6VA#4If@B(GI4^m;O>EOC)^cHN6REWKbG~#9@=^${`e(Z zM~*6HQKPDR;b|6Wg?zeZ0N^@HTC$rsV`|wJMQ=;?lnu$ogFOk4-MeYE(G15NS5H_5 z%yltYp1(0w=FhVd7@tnDlvO+ZDG^^-KU$3{G6bO(tDgS|@8A+UA??e)UPSyW&)iq8 zPpM|b&Pll+|Bdi3j;$bv(3HEXr0sLpJ*j72K1^Y{Tk!^4D_*iAMd|IHEmCJ&QD4I| zP@0FeWu66|=CC~H^XteNe+bB?geZG;OZM?cyNn%*jy$RjzvyWc0+vKxnh&)+QA(>Y zoEZd%szOr$@L#yZiHlFYY0M2e0Beey8$Wfm`EXI-8{CfP-R)4n=JgYbvyBW%$K?UX_`MOEb9YpG|*Ptx~rYc{&~=` zRi=U`WZ%=+mp>Dfk+gh!?%LcY0i{oA0e1!rv^N??Rul3#q4O}lq1ohvyl!9G60|f# zJzTyNsBIsBQVonf7lnCMCanbejjRklO$>msP;% zs<1bjo}{~n&y?|_d+wSjI{W?&oWlDY8P^dlG{XutpwygclH=;cXtu+PM-kD>kUH30 z1wFB~HVM1Y(>1tCP2w*HSON>4o1@ZJckb6rS;^x7dchIRKIb04Zj{Fp$#-o4;noSa zKz6_LN5G%MPWZnSi|lh+53BjS8;9PpZ1zi-*K3R$(~{N(J^5bW>*!GTI&IrOSHD=$ zNl!-J04;mq@jb;84fjuY!D=HRqXueujy()_%mnh z;jD1{LxQciBE0R5N#*q4bsHqzH^E9Vz#Z^$e@JL8wE+3XjZ9pLPdYtsfS&nc*VEyi zP%WFRAZD=KdQ^5sqN9+u6kmvqE*^ zWTKWHm-RSbKlu=Fhacytp1^PbJUIj1{pOFPrc2{53?mB7c9)fpUTp+%hH^h@=y*qP zTKPe!&vKvQDBHyJaBX5wOTy|M z`{;kkBWaK9{0bMh)k`XV4}TKZX!T=Z(;uohFs35z04$}y`7E(?oq|wV4#+Lr=cwyg{>;$ZoPGs*&UXlramO_-*pThCM zG(-}Mg`w+_gztBT#hm`u7;RYsg~SR7sE$NSOwBrk9Ks^UGptW5z6z~0T{1(f#fTl> zNgk?LEuwSzzt0m}{fk%4sMmH-ehs7ASM4uyKPQ>@rN}~$tIYf129>Sx4~oo6;uqXZ zB3cSXah5r4?}QhSy_oeiAJcB5j%w|~>iNo4M~Der{ZF*hFjS5dVt4v4SYVUNRFF`EzDWyhT^DRsF`P6ZgqK4Spn=})M6 z(iVcHb-GB)^n|;I{Wojx7(*=pf632A12H4Fac^`kL32}-*15nqPX%Ca!D7aGx2UbF z1>K$?usk%SW7`waynm~r^mZKR4y-`B;5yUFg?UzVQGQiwe4;#VOYHfuQcf;(rP;!_ z1}eVPr0%QljxLr6>7DM@X+|fz5q*w?prQOg;K}t=%fQ|{AM>CZS+TEU*8A}KMA1aHe)SW2+6n3#;&*e-lLp(XJIbDd` zr&1b>_HwrRySey7M9M2HaA_1W(ym3dsO4ROj~mh)iSgThW(9R_BEWbi%%YM z#OWUZlwfyWPK1RNa#g1|E&2=SWnV)AMew7Hbui-K``OGvZAzW?PAbkIimf(m1(7w)1`x^j5rYsH+9 zdx`T)Whu!|4MTwl(SAaj`P`}>0%9j38l1#e!yIr4Q>M7ZR1Yto(-+6y>Rh($WpCcu zZ!F(e>|jY0pqAZTwF3eJj=9zp0%b5z<4A^Yj`4y34-aigda1hACduB$n$pzAJ}TGM zUYe@L-F@aE?x~|md5_(Vd)X>c>)hn2D^JsgW=;35)83wwdv)NC*0Lm9B&?5YzHkTX zX7sNLA-~e6(z60OYd(Gl-}IqetVAFSANPpzh(V}{&96!ID-vg6ZdX9a{HF*OF@0F2 zE2-`z|^AA=9-J8F;L5@-ge_KT__jorxbD;4mE49Shc;zdTA^@eM$8IJM z0gW7mY+1@}rDAhSt{I;`m>Kp`D6&Rjh7&hAbZJ5?CBg%p6PLmR_t6)*h2e=mkgXgD z0tFvEu=rLY*u(ZRcv4<-d2U-&3d~#Q+^7k)9mZOn(yhn8tc=iwTXcFwcEz-pKm(`! z(wNg2M)j<2Ge~!#x+47Hyo7A#gHIRBxW$LMmF?y;H4hS>qyP4v#b|FWkmmGwK{Nl(>)mX-yFh3I zaw6)T8KEE_*^(P1U!i6p-|L?pj!Ds|c22gKmb0|2V>>Vsn7;^U*Vc5cpaCIC&OBlCGBwSq#nDs3BukiKi}0iDMW`<+stQ^EV*kUPQK&W{Qv9RC zF;P=Q9|YC&VGby2lAx^94E@}-Awg{7@6zhvADu<79-oQ8)_9UlU4d1M-MqUA_o=J^!H8nKjz8am*az^?imihN%O=#;e&>beEK~@k+tj=CpXsA^I$nI}sO3#0tMYQT z!mNy?QGAt9~8YU5qy0M#XOAb@&3#E>ot`;+jh}-_~Q^i zGf%iSsfkb#A(Q3CQ^X%FPkKa0o<`YZ5WqvJS^F1s;46cDEjq$ zzVnh^Bf9bWC(V^6Cv+6)0TKWtmk=Tq#;57`SDd!v=I=+~{>f~ZiD&E2T&4nXngY^H zlCjPOXT*T*(xvs|;F5~tzPC(&vwAQ?@!HLKzxl-uabyQAQGcC&1E?V`-mhyRD;Cr* zcRdR>s9GhiUYjNR_E$05=qY6?yF)`}L9#h}bVl6aun*mmFTy}^55fKy-$!aMdD2pH zmwDJp)Z7SGj>5q#JIi_X)?c1pJB|_hK)CGTZ-}V2P9kh|6n9S1zq?)ut$J8KXsB(k4%UA zvnBG^1&Sq)^YUR%9|ZCx@O@L4N*-^#su#w52i7sN(*3>H`7u&0V{X{*@nsug z-K0Ifgjb!uVU0Q{{zno+%KSfvE~wYmSjjc#nddZ=Y%|UgH%ZSg(sMYAuW{fn zaym=DSv>r|MSuUBzlG=T+yX&yGO5Yb8EW&5@Xd%7Uf^B(Gvg8t{kNGgk>hDVCg$qa zEwOE9hmHLcYa1LV8y=?!MP9IZ>l7-K=6o3b$7E2$Rj*5a`mPCt&C9G-HJ|jXOJlZ6 zA_mIi6m<@WJ$prg2{!&7Jm1h@xn2u7qeIbXe_nVTe^o1Clfz;E5`=5{zMUZ6BFO~J zj0xfDbc}na*LR60x@%E0U9R>1+{@5<7@v=(c*(J%%smq;Jc23?C&E?&=7#hgqgQ^v z*|BpSwsosKCeq%x?frhkBt}?!6T+$Ny3(x6>W57*r#}k7w^85nI9KPmXf6n83ya09 zp1RN6Lq6lp1?=pu;K4dtx3Dyt4gE+?EZ5#*RkGV;&&9-6weIfP*bKX|ENPR?G%HS6 z49{Q+8lBeUn}Phua*wz1N&=}JqNT)jV*@YncnBEPeE{an3^}8UgtqNG|E`Q)X?6r7 zuD6gM=5et;i$L(XIy&v#%6T9{ce>I5H6QId{AKPv`@IvkrS1&%gS+vvccS#V4|Q{N zA;6>CPUpff0dH^2_#O__#u}T>N7r@3`ybshE(|NyN%Ksy@UBj%0MZaXK5ljeC?%B09P!Nr#K6n*}Cx7qBXo^kWI*=%Mt1xFl4r_ZH zx07)gS*?W5%u(`7xW+D(gyX$4-;NtCg`-FFIO=1&ujG5jWn13Hw{4FbJb{QWZErJt zmwEvL5_PAAsfR~AzO@PsF45??Xdabysq|*N;qzAr>KWHiT!g&M?;c-KF8~%n z1aoN?GYiQq*AcUsdBpndk>S`o%qqb#ic4ej+tk03S-3B1PPQMsNizkf-t|_ZQh&7` zl;!L{i7bhi&I(gJ68xJ!1ePAXO0Jyl+Xwu^b!I}0$6OfjgTNTp$rDIKbKWvtYM}D; z!y}Dc>6ZQby}%gxvz?uCa7p;wmeT^Hq!|ql{AY=bQhKt@$t6BimP<@+u}5J*clHzn z9C$K4RF&r8V1*4D+Nf{#+<4QP5DK4e0Z)(3o$*m#3R1Gur8;CTLe{z(CDnj3R@oA9 z9dHLQF1dDjyBUHLZzj7GTR!JpFF_z@I=TwWJrKrGE6> z>~w^ z5H5517wey-frqk{@jHd3ZP5R8_HP3iguhfmOxF@lS@J$xL5(oqF050$sL#Id*sBTM z^8NZ-1j}Ub>OOz1J3pz(u6tFY7?QhJ5-qg&7-=YJgpct8NDxQ*(d+4sx^4Uy!Q;3J z*z5;a-!kGuNf<=)Y1*}5L2JHug#VL{oyy6Fj0p!CJ2=I~(meFk2?K18F2)jM90O?t zRxWH}yzb>w^i>8lQ-Z`RYqTHw4r@Y;9J@FX3fp!w$r!g)Y00b(0}-X4jyjm0X?)OG zs$daHiVKGI6vaV9t_3cve$p`?vw4wfZ)8Pc0+1@xDnlCq1m@DC+d4_Y`QrTD@}=;n zyJ|IlHLjbui|qP!fEMw|X6cR{P|XzhZ2e|O#vzeVnueIxvn#n4T{LwIp^1$42ycyl zS5iVxRIxr%;;c`1OS)D8o;?o|Y$pDZJd_&QHoIu)iD;K3ItFohgH=RK#mGm@&Uz1~ znr(Zg-P?GLes4Q4=~Yw#-s0f!kbCSg!sE$Oh3W9junv}a5+M$-gN^p<(iK!ng~xXn zqr>!3VGSLs8YwPMhS#7Bq8V)ccgkyz4 zvE^I8NV7wRwdDCvqw_*yT7@4q{hOUutm;J{5`^?y--Kn3@>qLRChnvuAho8O81qW1 zqIY`gYRVSqIZ$gxf=jHqbEcPRW1eAWcq{9M@TC>XI5mO6Qs+I3E0tW&IQ&TZ7SC`X zV}Wq{F+<$Yg%@f~K3-mjsV+Xdd>_%3aGpVO1Vd~iBeFYP`kIAu+sZ;y1trpt#MjUF zCxf&gR>(Gw$n?YaGDb!cyr*nGfo^Jzmo^ZCPsn2h8~E#>qeLfKQ@mmm7X~DKdHPIZ ztgQD@j5bCcdY+zMb&I`)DolVY14%upYsL4l!Ad{nFbqgG)bOMhI?Fys^kCZ2>cwN~ z*&1c*FdmU#6!r&xE0XZfNw6+!RrViDB+O4723AE(Fq^p>>07lkDqYTrvTZ6oT!L$# zOAA(QwhP;|^LJejd(rEViIfqS&@cVo~F+91R_I6WM|F zSpwmfOn&7AOCr7KsYQq^xHrf}ww76FRC1wXsD*h1S?}k2dZS3^h?qDwQqWouXyQ5G z#(7{K6kyYS8w*R?8ugDD_ zAY1u@!tYlos6+3`Ob$B?U%j5z(s=Z~=1=8I3x`IjJ9;VnWTo9AOwPLqGdu+n z&l$`J9?yN)(q&Yu=NDRpVgibhhV+9qPTD8t<0XcY>0ns~_1?)vJzft29w9q_^vti$ z#B^65r0ju%w=Vxs9GxO)^)Hd3AnFSI5*%&Aqt%woWO)DCb~E{^^2(c*fj-g6-Azs; z&iJBUqfx@E4%-}S8Y}Tf1UPRk2l@?UwJ>>|P*rY^MFj}M=tW1UW26_3S9W$P#V2qn zIa0rZXAOzou{C3n9yNQ$2?eNzudY64myV_qbf20J@vJZPlrio(#*2B+AnWgL)^aaG zwS0C6-Af+p7;#QE7F#JXjffXkE}6u8>n5!~#?Gx2ZWlSC4EQ|CRou%o@4%Q$QFRO; zBWgS9V6DPmL+WAjOHLxzC1ZKvbmu-GQ9!^q2XG`K+;@RfKG>qW%9vq9bXAL}(oR*=jf17cnhv;6%7fdBsQPdF&i(aO6oJIENnk!rcX zi%RTOP?%A%3Y%Y)eyDch4Xx8zVRO!IhUc0|JfBcG;L2&4Y9KIm}iQ+^yJ|APP{#$6|ePQfWbahi3mfP=0C_R?XwIw%kPK)f{p4j5t485V>$;;F-DZpRZ3v+z7+Y|3UrhT2{f+-xGh4 z@X@n#{J>0)=;xO@8RW>Y^e#067b^$XU>PB1$+CcuEvf@bR#4<2@ zMc|cnXT4HTbyFwio;TeIHGM07bVA=Q>qX5R`~@0O&`Wj;{suu0DD`c5{UO@ucU&K` zh#UlIC;?2Jt=rnr`^xCAmEy| z>70&URM5oQ^}DO)!@-m51jED7gR-|3zC$rGE=s?1LZX6h!8M`Zh2+0Aj_D`bpl5VM zBPf6l^hV@+SF)^RL17l-h>K_dsa*ECHw4#0dq;Bm?`&~;5Xg@7VS8)p=MAJLy^l`s z&n+BC*Po}FKpPVRoxO^eDn8);ZmP6G%f{|3_PdC*jZfFxTgGqx6}J z%8C05PWyfo&-NfLStfA>y$fBo=pZ``TcfUC%17_w^8VbXeAyD}<5mbe8hr ziowXbDIbw`BJ|{kxemKb9v-&h z!U!6zz{ih4!p2Lc!nSza@VV&VI_TJlh`-=&h~Ha5%L4S?PZcI(Iw3YYe3MTBrXB%P zU|S5$4AIAv8{OS@ApP|+HFcRc)uD>Tj~72#6e=!xhQ8#f2Bl^`KODT&dwAN~dmyDO zPWtKoh_KfOlQa=_(@{BK6lb~pPCI}1Vw%Km`F?z%7Cw&b1MKrk=m^+_a&IIrt(x5XRgBUq(GM*!qz# zOL{82!_Z4?-22i2eNMP*8|^vhs{-hspJeXf*6pNIU^m_W2XiYYx($8y|FQR$VO6bb z+b}5;kP-w1q$Q+V8Yd}I(%lGxbT`N(B&EBhC8b-sJEXfMC!OCw*WSyUgEh&=3z>x)hNtWT^`Hy72=1D*6nRx=_wqq z1y(H@XO=VT54AiTIfAn3WI1<3k>@xpKeVZ@B2X6a;HK0bW$*K|#Dr=WMm>UrXH5H=lwJ_`8=o`_zXl~ zS|Imau~Vx>c^vr|>71~2g)rT*1Q0UQCkgka+;bhjxhvgr@S8H`yVkfB=&u5mB3ko@ zYE-ky2SdDQo32YkEdCpjDKbEV4BDneEz+Hiq_@?)N@F1MRNB z#@G1_q>&Y3e?Qyjsuk=-{%A9{N(>EW>AUgCvom1S@}r2zZRn{6u{`K(!2Jej#qe|4 z33lMK*2Jj&UYx`kpoCEkKs`NbzNz}dsf69eqDHeApsRV6a<@#n<8Tj&wE3Dh8TYPR zZkx1`Pz9*e8wZXko1k<9uXq{Td%P~mPsMga&sUp%3&@#alkwKC)J1{yls7n%p>#^?Wra>CSH%Z*ZY9f%0}^BCP_b%b1#z1 zM9d9ru0V=hO^cVc9TCyC6U&eiGEokslBO(54k9Hm23oqKxB=mM&S1*PJOA00w&Qi- zTT9KJHvQg5K!r~&pQOjk`z58Tj7sgGbs4vdNjGhP`t7JHpdzZ7E~vi9`(ikA{CMK~ zg6QoRBmU-xg`bt<;7=$z!!A4CRqe4huDwgTUKt_XU7{_Fc(EpwuY_8g?_zHk7(hxm zAyZY%uomdVWH-g;aO0y~L+o+G-uNiMRnB_|JLK)&O?_WYYG403oQ8Lh#{V% z5W7F<1&85gBHk9XlPV`*J(0N$-fY&h#GPbje!8|y^Htf`W>DEzQE_ZnhWXog@mV_R z<#E?(9GpISy@ks)AtfMXHbmg#=3Iwf$GXGGY>~$|HUL9ehnGoqJO<5KQ>T6iG_f4I zD^ypo_qA^eymEdtQD%F&d1YJIa3^sA_~CDvDFQ+k?UHf-=e?L-gUD zXa2B!yeMG15o#W6rHRIDg>SOW-PwF-x@TGA*l=UxoNH#<(BW z8P72=jM}%lBgJ_rn22?}p!6_cSNm3DpgbzZ^oW=o_on|~^U(%z{Z;;vweMB!5)c~O z$?#oTd+Fvl_h`QLS)Iu^9b;UtnJV%(>a)Q&B1pGMH@$C3TOdxYi zT0?amwXJ(N`qup7=<(9yvbcA6jhNx1X7NQ;aH_f*TK}v^;7lDjy`mi>|b;i9| zEq*q6KF9^;V^vWfHd10nrMX&SdO7W2s9x0gxI}Bq$(M`MmXV;ut-I25i*>vTqvPwz zOrV}h7l{u5izd7&-$DI&>%hn8|H61qD5p_N4>M0JN&y-fCylXe z5}u;ipm1G0Yg=v0`)>XM`Eug$XG%AFJNt5=yQsiD_kg{OZ|i9hEAPVV7`skh-DD=2 z^h++n1y7=MzdxRb@JB-uY--RY)`78WCgWi7#$k#;ROzv~vgd#7k6t=SiI)bN?s#>;P`i=E%d_4QP zX6}a;?{lW<@s`Nkkvis_sYZITJ0mR->jGLJWorQgTV=9%u~zL*Ap)+H1qn{3ZQm0K z3acsnuRYYiUy^@Z!_+!%ri{Gq>w^wqi~XGGj!NV4UPZl+lpm2XW9t#}T1i~G%ig*t zII`a}E4O?#@#(`OZ#^-NiEpTi%F12Q2Q6Zw3IGpl!9r&v%J}Bp9z_cKS!4CUCg5h> ziEwhYNjKvy(i8wG?Kb)?MKj;e_-qi|rP?0|HlTrpO%in2cV!-D73(fq)4#7I>AgsONla)yWK-1xL$M*RK z?jf)-wN(u&3oINaHKW(omIxPrAa-b)g)&|FUTn_Sm7E! zqaR&_@x$N4EX6@$5B;1MPjfp~6}J@Tu-4m&sem^HM;~8{h-XYx#KqsMi9qH(xCjhg zBHJ`0Y2z{8&DrZ^Gx?-+e6cIe!WV=`tDOggLp2)QK>JPQ7(Jm(iB;D07wgQB%>$q7 z=sdh=T8`bC@oWvYy#*LQOHF3tSd3mI7b73?AGZ3hTLY*cI$+XvvZRx?07%B~p!$Yc z4b&@wpvGpg7SCz%Z0-d?Pfm4XZ^3Cyyh+LV5fQk}ntzH~OIn2fvCV1Vx1LOy;Z_aM zQd{?S9ko(pn9BO4lYU>6zu%7fDK|esk<3WiCU4|NMgG~8By9yD+Afdki+H8^!_5nP zMh<>6fqd6*0_V-tRYZ4!{=$bGPtLjR0+V6Grw@IX5paC)|D1IAyEOc0WhfqNg8p<) z8(#k^R=w=bvYHvwY6C}e_l-KE4*_Q{T~G1=uyPkzvX|U@Zr>gT?n(qhK?5m`W8K8iWA=jLU<3DtWH++;!bYr@M2qk# zHARgpGeeJ{e&;P$G#Yr(%@;au#r*Hsafnp!JiXXrO)<`oE)L!NwEfN>Z=y{2WETGb z0_tc!Vn5z|UBy7>bNm%}`{iIEARMbb-sS?H(^W z1l*c^+mG)?DcOIRA?d+gogux4a!x1%CE=qpIv)PJ-KOVAmfKu(QmK{$N{KK!40n9j z6QV3upNq1*Bkf8_lB`HO5cT@}L5q1(nf zf(k%DT19D{n{QP94dIFnCbs=+pt)Y|xI9Ekz^oB4b^`ex1S#b)fFL)Fm)wbOk%d!1 zg!#akak8&1|AFzji0wiP&~XE~Xb1jhBK{7L_pm?GXGcw-A7)J_-)sG@0q4&C{T*H< z|7^+;#0Y}`+d2V1h+hw9{hv-T;FtSw&nIZ5n}_wc$vyWXPo>YHQ<|3UVfmH zZgG9fY|aQgXKh=_kVZ?w*O$>+j&$DzX5Ag#+qO)dt!J=OEU&ukyDGPpl^ga0#|^`U zX4f1R>kMz0Z*BRRJqC&6;Hxhk7Q^m}=iLF_+fA7fsP;I#YPt^l8OojTe&kR8*^v6b z*Vg}JX#Vlk7S{p{>qAG7qFb5WN1i28oIq!biNtukO($@oQ!_8ksLVLx_ItapHUEeN z{$TvdpFoF&q6se`O56vc#D{C73cTuDCJNXup_uq_z0Mh3)T}iMuS=FzUyWIeQtvq_ zWCMZr^W@Jr%bEJglHLdE=eC#j?$JOHS?LPo)q`c6U0ZEl@j28lz=nQ4#&#kcQ3RlI zC$Au*0e&$NV7u=yb~Uj-x~0Iv`N#F;vrCk5_yIp&;zQR?DCyr0U1ID2ZF@IH9A&54hE4%ypOPe-vDQj{-sly-1dR!w@oOA(-}v4 zYf^51<-7lpyn&eB`JEP*Ub(7o9^xy%Qs}Fc1?qGxRbNuQQ*p5U*V`SK%nX@lXQMl?rydjJi5THSf~Jm z>Mb_~vwF)Ft|499_b?m{3dq+Gq3@kY9U`1(8W#{5*HCLd{8_!(zj%^=FiO2Ccg6=O zEc9{jbebvCH`t@mu_l$ofHoiP+o)uXuG?yHC!5Pd+3&=1GAFaS@t9uw8~EVBFzP89 z+`m%%e?>6aB0beyfGJ}ENKyyqo7mRAX?j#UmC~odF13S+%F-#RZQh6BMd-b@`F5z8 zTlrrz*VH#fA3lhIJSSRlS?<-ijNZHKJSKMQ=aL=au+gU;YZym{mM`l8eEaEv+Lo9d zoL24$d!xebN{iol*o61dg#mym0AeEa%*pf8bm<(?O_+E{3=lMhr;$z9UYFh=y*eTT ziPTU4{oggDdhwqn7#<-3XDweoJw@8Zm?5qbu=Mapw)9xJ>AYsHBy{9oIQLwKPsUiP zC7&-ka0P(f01_{;Bb-eaG;D-^*#r0oWaMVLg*&Z9Ge5LCi*^r*^3{HN{%=9}-xL~3 z3d^no^rSj_M2G8%*`=i=^YlfD#ugTVRc1mq4>uh(d!q8zwZC@8&H3$&MH40aCCTIh zQK8?pdTm>~(K(^rNtegTGej{*CSoA6`G;r!XTxRw^b|I6q|seqcbv!3O{@F6sv#UK zzM>C)<(@Fbv2s4#2h$q%I8IW{^1Lg{z*kcikAJM;w`t;OYQ>Y)lf7(5ubii4d`56L zQO}Xco<9?y;E7Kxc5n$&R&828@|n&tG2T(Q_v#lsTXd`G0epaT@s;}*9Lk!}lg`IL zMG7!5rwq^lXzl|nA0|zCI`a-D`n)j4hy!pXq z4*X2{)Ne7VX}y$FJc>W>3_bd{m!x+R0bg0EMzqBjttdAK7}2++-Iw68#_N-pS?t_> zMA|LxsKbg8JGcM((DV~{>dTvw3tcWV?bE`Ei>ct_e6e64f@vTs@VL%FENIA7cS}h! z5g3xrbFcB}L!Yj+-%+8PZ^v5G$J~0mZ^VJ^D0Lrfnvk+`Y4}9*gP=fA;1f64Lwz@P z275>-=3N@b;00vh;b)8^#dYtW?msn@?4G@LG5G}v!r-0e+$qp5#?TJQ+7GZWfOJ03 z`Mr852O$&eAb!&=2?*=z%*463y_(yDGsz_uWT3a?M__u ztm1uAcgek2d=70Ce|SP1lP`9yw>)hhV{h$_0P59(W8~#BKCu_v{`YKDldD+j+Yw6Z z{x<=M+~Xq+g{e=rTrT_I7}{&mU7rEvwH5#)D2K*H{z`&NI2nAsTFRDGQ$28_kIAUi zpx_1t_K`8Z<6A=|@~bl;qmE?)XsW>2Ah9T-)v&hXP-bGS#4 ze)Hxsj6ckn{)svDIu8{zz{H>2^Et6sJ+WC?U+>WF5KFWITA22LZ`Vv9w}m;HIt(ru z(5jcoc6m60IFi>BI;g3y~xLl6Rv3DJUueU83*B8RUL04m-#2 zuPshI4AFmMjbVXDh5AwF*B3?>ZftDq-QyHHv+l*&F96XP9L)ABD@^i{($jB{4eLWp zOYGfc?g#(UgCda?gz+APqX3>33F)sqBrmQ=K`ALT^>!o8gr84aRiFIVu*@f9fC-fn zURq^Z`_F_1xQrJ$@ozd1>M!$aI`ABp7xJd~zoSXtSoqFT_Qhq zytG~P-^-rO?cA^(Zd&an=DC0>CA;ZOmA(|wNr3ex*v^B-P@0yH{0`usi^ zTl~Ku#lX}4qvi3c4r?0Cm7fLX4Q*0NCdP7It@XuM+ub#0zb&_X!~fxL@lkIrLVF0o z-aU+)ZmASzawx^|xsIljv@zuXD#A~g-}D(Xmg`Hm+jXCVIWV0d>4y3vdep;*=Eq~o zRzQ1}B=q^JKFgn0jK_2Z!do};~PCvtlxP-K-~eXECj&9il6~O@ZWh)G?G+b zB0MeEOajNfmbmiD89i)=*|C@(ac-iNl?q{8N6dPWE*I4?GoCLnK8St&Y7Uj+ zwBEtsYuc$BIWZp+QR~A1Jr@J@+8{82lGYh$s1Upwqnc7Yq%DgYZlY^}w(A*G#`-NK zt+$8{qgY6_z^*B61y#+~=F;b!oii-`jmLV!=kKUeeHn?}k8)i$7O6CEVNWk_q`A|) zfwpA6S#HxxyI=z4{uAcWaoBrl%M8};KwCYr*!RlxKcYFCi#xa)QF4kVo zi(2*@{Tt=v<@8a|&aV6vWpwo_khLCur(rv!aN5bJYD~$Dwd0XJS+e=|tg?FCyx?+L~ zvL&hh0pEE1j~;-LDIkJiXDU);deS2;5rh{s?@3Mq5E0tm>+K=#jk(;OUhv&Io!TsO zxxBG|GcbvbzF0Y6lg!7%V>X{u!Ma!17+%^{_U=6IBQ@jmTMc~WM5&$DH{sV();zL@ zkKxF8Mrjb-XDdV3dNsj5Pq9A@C~~F6?$%#&n0zZvJ90PUVgV+@d|b1A1S{%^rGtP@ z38oGetFB{(3xXkU0x|vTOVigN7+T1)xdvhbGJ&PyFMfE(P{zgw{xWBbJorTJkDhp3 zSt%6F-6(T~t%lA57ZnVKiF^S?=`lXnH;qibhw)vArb`IJ#3sz+=A6n6=s%2h-l8os z&+vIgl<9G`oI)Dw_2@!2#$~hgmD=ls9<`CjMv0w_YXOE~gSeOIqA^4u2C)y*X_o7?m|m~UF+`f{@8^N z5pFK$1vYb~)N_;@5e%KP6I~xj*zigqvea3v|6yg4n>)OAif6Aiq+M7}`Su`T=&6G5$QEYwB zfOICd%sYJR&?mrOlGtN;a&G{G%fa;*RN`I1_WkM_wy=s)&(f6Wpu zN5KZG@v$pwUO=Pa_+&0Rsb@o_!3;$_pT&o5dBd#W)U&ENga-pR+`wlY1u{0Kj~>BI z6Rc}5YORN1 zwY2;p%>TULKYn_?fTiOj+eUlj?YJODXn(?gWsLEZ(r7ThsPQe2PvQVK)HYsCRWzou zo+68iScs(`J*eqL2#mtJao%dLrF&GhWUymBh+pz^uFH4O@xkPsrN+0<@{LA@#guB6 z^qpBbO>y`f=QVAIUsA>1)Aa}v6b3PovXPw`i@Rz!XT_}PSw%&;ledHm5m$j0GIk0o z9R^b&uL}(jcu9NPPd9EB+)4SKP@nT!zJgny!SMo2qwg>a%G;d_slpc*$Gf7N?{S=G zN;;}80@wPyqcBkw(RpXmYvA47X89WxmxdePs!n;R(Anv z8w>hAeM%6V!X|}@W8~9nnad=j#aA8KFR+*R(Zw8H5)_r;?GupADe!92Df4J^ZeTUJ z+17{bwURT7rb}OxBoivO5c-=C7;JD~i8YkVQHOYoF|xiN2er_T(AqyixuF9z%7u_# zQhc3qdljSfSwjf(UiF;PKp&v}S!q}jXvAc9Cq|7j=T zbbco$Y;XeGTS?ZeNOk=~=i@Cd-DBH6h^&m|W2cZe)tuchE_w)TKi2c4A;*A>UcI54 zor*P&b|Yk(XD6obdEw)T83V8}6ZNGHxuT`?S-_vE{3F;~EFEwTg8XkQ3Up)g=f%oO zjg72JSmS?3un|Y(dW#f%_#_g(#Q(azqSD(7Q&|#(<^_4;DhShB*t~SW<}pN0TH^Bx zXmBZ;MD}WZE)pB$hg@3Y-Xwe#UrzA0SQZ?(23tO;#aDBdE`ihMYCVI*0VfGM-n&jq zqVJT-=$Y22neWxTJY^%qCSO2@`K;T`FMRbz3~$Q ziRd&!RU=4mT0oA1O=j52I&6U+&5<(<+Qk*@njtnI5+ZA4xe|0{;c_cwLtTX6rq9q1mZNe;VHQr>>SWQj>g*x)4p{c>WE>+Lm0wBG7r z$2de7kAV~i>w3Q>R6&FLr370!Ha;KASC*uioT1kh@=3fx%yv-LfuSZ+@=fB~Go#TI zW@gg<=7%UEu1s)4f}Y4}H|fs(lSV^lR_C^gwH0d335_&1?}D0cL|_u0;?k)x^hV>- zuv(ucCD8~_MM?7<<75(FwiA0?y%;fKNPSY0yEqkJTjH!8*_& ze!UzKx!i@?FA(#L3TMPHMoY5B!lAX&YD8&k$ibft36KpR5wTH}`^+|5jy1sM3pwuB zLhwm^*GAwRs;VGfu`e?OhMs)PaQFzeJCeXB1cgQOmIjE1cG>j3(6~*G3}=f)|5Od} z_QC94em#-i!}<6p4EUYpTp)P?+-EpmfjVY50w>3o7<@t|yZ87{6BElq0}bYLhC zY38T3M_~)s&L^K157=BcV3bgfQDsA@K(LgMdtVoO+M;5#MGc=;N9&amFVkzYw1qLj zm3zW)e3VS$N}-faCJI-(G@X8}HEwJ&R$4vOUOex9i6r$oo-`OW%Djz4LNC@}IqM=O_!gRSJDk<0-nIrE>PAEC)LoZ; zk}IpiU_Aa^5_-SGa{yRblpH5se0ao=FMj~1eoFM^L*Qm57EZl20H4J4T0!Zx$Vh8* z(I%`R37xoZSUv+WllIBdtD+&#uU+F5DNxlo%q?L^(Y2t?Kyv+f`XMmJ0PBG!1w@vo zRQq)23$RCMKXG7LytX5erk$aBP2JAZ(PCYpY4s#NAuQXiASYaArPGf_Nlx6yyXTmi zbpDLl$<8e7#)F?2apC!_TCJ>UpFIn!FHn>SY~Qy`G>}*V z7PFSuk~7JDD!1_Y?&S`rTDfyl8KVO#=z+shu_O0MY{qQe0iS4UKdGnB73chvc`%-R zedV+qyqS%vDO}pt*>VWGyOyg_&=bxNZ~9az(6EsK4ZMhwdAs&442ww^2F352ZTWF^ zjB!Wp-s_eYjBKTXczM6=!ZcQC0mc0LCREVd8z`|_ovqKevPe@N#H2V(d;~wcG;$Lz zA($3^zFObwz7~iqReG22O!> zpb!2jd0v&t^tBEd1WuhOble_!`RI3gaEFnBUy>+cHfi8tgP&ToV6bEpTh%pQ3P}*g zy|nO0emMd!Jor}8Fq&pe2%W2y?HlYmMxaXqhrp{@7mVn&B;+(Xm2b>o1pE3^STrfg zj2#|(-_{i*QZ|ZV?@rjQR37^VF=V*;q*{C06heglpfW7mFS52KqZ`zj0}A8lEiIEWCCvkPZP7E_5Xkbq(GUUDHC0x`t1O?fd1P?TtQNKg}O0F2~7 ze{8+~Axh6DnI_1ftctBLy%8vgfKn01N2*v4oP#~oSp?X1@_{EEO`x+^pth^8NW|`v zo0Dy2$CG$b`$Tr5VQg*^T(hGG&fG_X&JrFa&dWOFkoGN?1y|-jG43$rvIGERQg~lV zY|t;h?CDgTm<;2j3?Lq&|NafV5unlSrxbMSIl%mAE&gKHlI151Zl z_r;A0?Q!z6r+6%+Y;cw$vebjTrB_xG5?u{>3>hNaUFG2(g$ZqM-5vG4;!x%K4ExpPrm|S2)wE&iyzm`v9)BZN;+F zexZs2!@~lHMY0bJw-}D?C3Du>ksnrx1@>$3d4eH_kP6mjx8n%L9_KLtjEB(c0|}7G zeIcadjFnGcb#pyfN!gB`2yZ>;vnGB!_nem;1~AY;cs_#w;A<~cD0SS_FIumi>&bUUT!lE zqLq@>&_Z04yvO03;;j%_8oKX~VBtt;zACt0eZ*`+#T*%4L&^R@?>HVcPtm9xDIsq$##6M=Y|7x!Q$jo>9fUfPCWx#BM3!LM?b1CX z&IVV`YLjA`W@TnG3td4i(Jby7?G6UQkS_!@WxEJuhvjlfJfx$9)HHW+VzB%OPrEym z<&Tf5hpCLf#?*A&=}N|+bV@0FR8y+ujT+OUbm!wbnmuMx1z%$W^C7y^u+<x>~3kJ_nhmNqkMq@r3NW7RKx2vg#vXwPaJ-Z$8Y z*x#Vium%gM0tzaRsAtpd&ED zF+-mYL~Q4(jb7~~JYRYOUOg^Rl#v7cvF+O9>^KBq$M&CL1233b&~LJR<@F@)W9^d% z3_yGz_!d2Ecc;8P?xQrig^;=#dIwoTXPTEk7S_0;T!-(aPB?dadR zM*KPKO~Jf_y`4XVGyJ&@y4zU19hOH_cRvs3= zsteTim=&9MI~D2!@6b@C805Z8b}?r)8M=bo)?_oWjZ}{;Je6gfFmsH*Xh(~=;Q0tj zll?YJ8~K(0Vgq~X`m5Mc1U|XVa8KaGrNjgL7kAD;*S5s zIfwHhMWs>XtdQIJVZMd&K(b8Rx#<*i(6|~_O!N0a0qvRTE#nQdd8ZM($CXgj&gDef05n*r9I)%c(t18M2cW| z#M%VfIW;Y66zPdBD%C3R8M0Fzp?ObjHJOR0PO+I0!Yv2v53qGIZE>6W8`UsutLE=+ z*u5f`9~$P&WC@YjID0t~ssBW{#CFlP=D_zPIjR+tV|)wo5ON4d_T7(oH+NGzZN(p6 zKcGL!JQX^?6zmSytU_58QBPuMd4 z&h@RV`e8RhL%t1{yDpGmc-f7LX}O?E2E2gtM=hy!Uc9!Qq<$nS1;mf=Ca*UWEX<0& z(kpG{V`Coq@XXDMcs~$TKewOjIQ9aAL@t95?q6^OnaPSK&mAVR?;_qj;VGjELS*E< zCCsm%NtqWV(RDrS(wbuEhVtyyy`pWs_i&WwsL^45QuW9|Dwm^uu=KRgd>7;j?Q@6> zla~xFQe`}TBn93mCWWWwvci1gW}A^(=5e)YH1)J$uOhJKY6i-vzDh(i$Wie?Kj)26 zl5TK*KQGjl`|}I$)Z%h00O?e(k`m^_luLm`Jk0u#{P$#nj^}ahoLk!Iac}6=HN-oJ zT!MTAF8cLDxu$lk^iQ{bAL^jPClq;HA1jwkLJhao`;s}Eo}=!eFMlQv z@xS3!qYDv&pYQP+G}@ecJWJT@aRb$!qHDi@fJ_4^N!)5Wz5MoG(vVONwH`$>Uh3`kc8zXkZNKe{kQrLg%`jU} zg%%`Pd?+yS2&C=glqCN)yd&t4-fk)=T#~4a#`0@QTs|Algwo~S|O{)@0)BA zRUV<7TDA;)ctRP~2*4?4>sir!x@sx&>GQ`_y}NS`dru0TS`Bss8S6zA$G&84mDy{Y zR8tEi?c%Kf?YRlxq_L7-l){s)Jk+gu61btB;x_c0pYG@r1xDI5&%6N^;VhhY%8=cT?aHP1JKax3q`7 zFMHk6R+S1rM#)^p0ZJorruVRAe;U&Jk*{XemwCu6UGo&27gFx*wT_Ux{yI&X&1}te z^$WK}*+YZQR><=g=?(EAyoaE5vkX(6gRX2B5tm~&saW1VJ7!)cFm=_j>)8vF>8$di zPYR#BnD2v*)l*!;-VA8I zlLk6dn14j!GGSW8UL2Wa$B(7}Ri&Rh(7jZbyKKj1{!Y4+5)`LBjppRG z6I^L5cm)dFH?2iV#v4eH;GB1K8(f2_JRoT9?q-~7Ro0$=CA^(*xAkeIt-JPqx)e?q>#{za;GDhSw*XIaIewbgT~CKGWGQqR7>rM1GoV?_ zhW}E<%6U@w_4M*!&7e`PR$^h26*)w&?!7ZJ3wuX--Lg9Ba=yKC8S<9Cf)k5XQGpY^ zs@6<5{FO@*Ra7P{=Oqt80+Ql#P*X7;s+@7R|JQMczEeFLH9d)}F+8M;lG~mOi_2uU z!h(aGB%+i*V=(}}UIamfRN+vGjMQdoNaTTELbao0xKx~i-j3vagMN%P7Dj~QVtlZJ z`uz^*oneb8I{?#2VNylo_t`<>TT9-4!q|5(Cfegq?i1v|jTeLLZ5JQR9QC ziEn;tXb@~H)9u}fDtG+NDk2z#E;;=HY zrFANOoO9L;jXn(y1>>tihPuXB-(LInEmG*ml@N315GT3EJCHT9z8IYZHjfx2i=lTj z$b?o5u}{%qqFG?tQOb$1N4{8)<8YVwW_G^d<(A_yzl%^RMd3zA)WER~QJ)U3^TgDU zi+m}Sz?Q~2KWBc-ngt<+RE9EubrJWrds(HwB&9nSqJgP#drL~vJY2&MRkM^ru(w7l z323Z3)ar|&@NLjlS|!?)*ADOro`RBoCOI^3r=$rGMlGj7E2ygu5R&#v6Q?jLxx%eD z!9$mKtJrMZeA=sRoDM~h!KM=2_NUWx&K)eIXz3{vU8~OT z2TklW5?>=PaZZ9VyZxrC&|0qmIv=!*OXvb2+gPDteS0w6p)f=Rn?zp-GEx_UOzEP+qJ^(7 zY>U{-C1y`?_M1e7#=;`F0`B=+rR2)#1Hjk47bz0gP<3AJSr4JHx>A5tD8O(l6syc% zMo{6X+jW*{3f9x%>07=tQGDVMh^CVN5Tz=QiE=m3+TsDn1A#~D-!V;kW#j$WBaDeD zA+iGPT8pkJV^RFCPSEXkf(S@)pct{N@CWldv0Qe%ccg+g=(wD(=&1`ssIHXH3dKRQ^;Ptx_ONkF1=A=Q zx@q(FKjj$f)9`qYvKh!~o4!!6*Ir#a(JF>P85>{rnLK^`1-FXfBxk8|cCYRl_f-_= z6D^@^v@`f4vRLLSKCR!R)ua*KXvjm8cUyImB`uS8}oR68?T~gNlp2mDa3LQ zQ`J?9Q)m-y%pQQ5kO0p@>=%K*c;|Du=%cdas83v)D99SRE11htT&zS4TZdkby4Gc` z%{cSzY_e`r5B2ej+yX)*VORCuWjRVy>lv*_`HIYaIRH7_H}iAg5rVZ@OB9Jzm(4oj zP*;(!TCR|8hghiCi{r;_)_8rcW1Emm=496oIeGAjf%yrmP~_K;M4WzWratorKd~d{Y{Mlm=^$tB>!^KV8YpV-+lmT#^M9;!E7iqi~>~IzfW_B*WkL6wca>bENOF}{m)@?NdU~6DL^(k@lc|ohwHqM zUTi;DmJt|HDS=64F=8*N`&h&4$f*mvGk<$inHify_R%v3g6LdiV#9sd{S^#CY+y>` z)>DYKKA_mhbg7c1v8oYCb754WYbA@4UTmUAyClNR)tQnm!(ua<(|h(T&UhZ>^8S|y zRsC?}wGB!1*kx}rY8=E2D-D2QZhn6Rq>b?&)SK&)Qg|ZyO_4$Lh87>DsYNGiu9(2} z35}c9z2EQ=naQ8hG?&E8LH>N^eW*!><7hOXp1%JA!Dq#)O*Q)QxMinjw?8|lEd)-= za-dRIx}3=(h>A7No{Z#1Pzv6J%AIHt6I2X>?)INjvsSh$!gibTw><6mUc{YdT6=J% z;^?i)CxA}v*@m(4qypJ$gm**yjXNgzn&mO^t7EvcB~2OZ1WrNsqf2poj*s^~^a%p9 zQ^G{AfdU4#>zdqL-M17d9k08D=-)?0L}u<2t<;@`fl)cibTCWBvmY{88GA%KL;}$xEH&(bvYALG7qx;b(O`4Lia&-^ z`;VEwiB41V4|e7@w8Wf0^vU~Px|(l*sct@!*S#qdwmZ0(5#`=o|9ToCTQ*P}gvI%s zYlhD6VSg}YO=CxzUFJY|{Uc2tasbXne6?jpM{T7eoZHk zA7(%s;yYpAt+VI?#?J8LF{yD69&G=1$I-ZZ+J9DW0z;0ueMbi$`ZhncxhMKinlM&) znzWM@`NLZ|I6NGJY^CU-RWrsC1s0~S$A`2QvI1!*B;?^b2Ow1biOR;Qi17G3FM!l{^F8!>$aZQ5zdvS}#^@W|h81+7FQj_DoqitP!PtLcl zuBl(PZ-F@5L9J?K$X{_=;X~}WHbl&$!e+u;qKDd25m`Pn)5edc`h6nX@`;Vd_=l-? zwO-R)Mkyj%QXjVVqh}|=dap0YPVa>wdj%EOmB@9XHv=q5D1VhHZM+4sT6ju4)cN0smH&E8FOg@hj~Xm=32 zK&ED>djNhoB)sxH^Y0G8^M?b-JM=me48f0!C%@*G^MNjfl!*A`a5CwegHgu9XzD(h zZB4u)euqzWwb>bcruA4oR##XQks%OQKqA9usdm~+N)?+HWplw+Rtqg;t?_%&0e^%C*m zpq*gKbUO>k?yHys0iF|naw2JxDf2z>A{N1OU$o#1fCb@hMH+R%3#j=tbruLi4K>t* z?m4X|KY1``-^BWPdY|o>Al4Wt0!iaQ8J>nNO&y#(dj~nJ?hSALoSq)XYN|uK%Q4XqvShQyTP_X81;)^K zZuZjo>2qj_Y2|K1Wa%jCjt;kRnRiC&Q+TusiY7?hf+YzhsS-()4^bo~PqpB#+qXRA z$JFTB3q7{pZn5e(Di9-PU1TXnA!qhfya?>9@$nI7d1wZvrfis>Y#5O%Jx~|vaW|({ zG&Bl28lz+9rdV<5+*eR$C*;A%;o&28ACzWMc%hbu`A zGvIa9!=BUkDwpjQ4jUiJWe}YYjs$Q)X`RJy-OwC#)s>a`yw>*Vei-tQ98-o8F66uk zcj|=+;b-UY)?)zc#a#GYc`6y<-cT3~-1WsVF#)Lr3hQQ>yvzZu1T9g{y_jg_;oeEq z%!Bl#f@BSe0!H(+HblrNT-s4S3#>U6OgORR(7fd}yIMGLu|0X5{dda(*{gvJfU@oj zX;}MA(Ad?z_f-&tC^(W8Ia2R1yOuEQfwlqbVLLD1*`k>)<*}ZW^u8%Z6exjWBRQJ) zc(L(buD-jqW@khOOg&X%j6D=)p_mfv61(2jPiwccZu@NHQK-=h&d3E?0+wN?mwUO1 z-Us#y?dug^_+-og7SBFo%4@{#u~jpMlJ@RefwONw5nDC!V^TJy_B6ZP0c%`6O&)A< zfB{7h=WI_bMLV{iiQS`P>|)d3zO|=Z zIdvR;EbzaWc1x1G#8HIGAE2+2m zB&){A-$!w*S&8JtP_OiSvGDX>{Y2B{iN~x<$u-x2%7XjC9FM7Fxlrc90sD3^u0*2M zyxy+j^@h>~ainhVrzpjoyin#kS1QcJ8SsJ9)LEvWr$CmZo`MytW=*h%dG$ zf*T;IPtzWb>uzi2_m>6ysDzGfjfhUm7th2}{SJnFM%xzT0QxBG4!K&|DFmzIu_V6k0^|h~|YwM`Z?P(B^f`P`fZnCL&^Cctau`edHMkn6GIadva^^)B|sj9m) zr-V&uFv|&ft6F_iRtg#cCxL~n;QVa;G`wwvES3db;=*WebM6cRkR7Qx*K zuEE_kNI`H3?(PJ45AN<7+}$nQm#npKJGY&-_We?Spyr(K7=1jwk0)OiVl}Ze(m=jh z1WM!71+sGd83hLN>-xD`1Q?(7GjJ^lu@#?@cs4&S!<0dB{%Gtnik%SW-sAyk z$ebZ<;K9C>9Q68|Z9ToT#Tpdt96XI1MM8Chbhi z^4_NF7O@eNO#yR%eojVb$i zpKBUUTRA!2+NZr9>&kO?5_B*V)ye&DUSA52N@Xg5aDAR=;$F=M=CBRnTi&wIe5i^^ zau4iG-BufWS0xW=m2E+Pu6o-{?5^ncoWG}j-Gb;h>R5=`S23@BAjRDHx#VlR5GHNn z3uvkVi%pc&OSLR=j4eP1RNLAzyjz!@o>s`+WXHe zu@<)hK<%+vO3l$F|A^aB|1dtA)v`C2*-|@dzb)+8!oO2v=sFP#tIH^Q`7k0qSy?Z2 zkV zR__BwiLqNkQJm&ocDK#*d0Qk;{$1U$FaX(4xe)+4ai=~eaBOsCP3}g)J5;Hi+R&4{ z{P)1HAsvf~*FH8xB@lH@Pv*LhJi}$k#;tWIv9-Scm zN1>m(;4O#Tee}nKcllm-j_J7>tOhf0pb!FPsoQ>SG44_T8W>lfqbtIs{21d&*YiW@xW>!hs2?V*)oPiC zZ^F$~-p8q89bX#iK_Y`_KnMw0DpnBg35Z4}sFRcV!(krntpTwL_H^P{t{=X}aT)0+ zr0mhE(;!GlF@U;MHlO8}hWZ|gg!Jy5pVpHCXsW3#FTgwmT)zn}r)O7l>IM5HETnpl zcc0IQ##(N4vttAi`iG@5^Q4!5QtV1Cs0M|RqAoeB2GTGyXA-m}qDi2k4zP+{J$2pG znpr3&um{`+onWwckgfk9YV_dh)a@J#)H|E1l|G1MB3k*Ngu*1EMmiO*4BKiX=++TX zWJ1K1g^g+L9)uil8`twFsvwakJojS@cGY}i@cd`1fD^G*{gvB{vUSz~dLTZp*BJqoG^zpd#hWcYmyBUrV`d%yVA!7yW z-(DcB%yXuO1vdhFGiRLt+w-!*Qn#9b^HB1qL@OZce07^!u2kPt8>p3};4^0kuK^9? zi$~cW-DF0KI6j-4#eylNR{ed2!pcK+es z#=XTFNT~6*c_BE_klgMsB6(=Qy98j3Oz`lNHjF?X>LGk3jFf~nh0>zK37!N7tqR|s zgvp!Z1VU-dZBOMKs@w53SpMtR{@Hp$_f1KcFu%}?B&~|nojf=;o^rwgxm0K%knGg& zZ8b2-m`o@k)0993VQ%l_nBjP%-_CkIn4HIaVYNMXprwx#*nqvP#fFj|h8Wm_$eZTK z-iWbK5K0FsR>;8BeAvW@W~nK{!2+*gnEf9`!mL=W+Dqe1 zdTY!~oSilAi`F4U>i`xHjeZP6Ij@YRvSrmM7xDIfDykFCk|IWb3FW{uNcvTF&n9A3 zuQhFpZ-)O{i`Yadj1DC1r@@}DFRU#c@uOGm!ly42u|_LEKAl$Q247nyZ&@mK=+7Wnj;fi&EdBP;nSUwUx;YR>hth-U_c{ ze*}Zlj^r~{;x**9lX%&G=#)$&Dqb0cYKc&OP_v9b=nP3Vp&e%v6n9^>Zljjg^7C%J z54yr+!0cHydK-_d%e#543a)aVkyy2v4Jx7YQS;@``kYpV=`m z-)Fs#13yDgKXP7d5!5xpDNwUnrrP+X5QYt_o74$_6p0jz2jC3ElBXcohH-N|5T=HG zHOBm4tA>W`AD7ric4Y&q9LFR%)?VAL`!g)sc!XC(2xBACKSy3UY1;Zl=N_TZ#drsX zpYTp=%9Vr(9jK>c-HtdMA;D!PAZk@w=DfvNu-a6({q$wCVx#K~>y(eKy0mk<(powC zo`eEfjO2$fhRUJggY}NAB_};D{2~w|6vE`hcVe%4dbKf6DVKwxEv(JFFL{Z}>)}9; z6==`$l_Bdf>pWw}SzGrP8if!Ba{&=HZtL@(?+yWsp7e@z)lYYhFg|J+9<^E>>J^~{-&}^t*{{&{Jph0}zp^kb zBY!(WNJ#H{3<^jVk)(tBW{o zr2GWWZaFphn1!JFmMgL^_`Da?i(&$ldcl|x%%K(E#oMyYzX{N!s;P(>r+elhr~dnC zK2rMtNxF=1@zL&O-Cdn|A;OTQVp_i~+UAuH*vt~8#y;hROX>AOb*RHGJ4ypBu#Ki8U#}}IkbY;g4_DQda&E$TF1;`U_tza;p2<7DcCRKFU7m|J5N#el3 zBJza+6RxZ{43=doJ`3o=BsG2N?19$KClZKn0+;hI=7Ax+V&AZ4t4N_`uZL7;YFopA zDNN4$r@~mHxOvxk!gmWlfu^m=i=n!$Hg)bX)}ZE-*vW8Rt;SPv(MQ4g*(maCt>f5N z-)*7dBu-b#g9MxkN{;#-JEYme>2Q*1E5sG9JPQ#cPMQ|(`8qGAYWNUG4T&3_04Zro zQPzo4Y0>s=T5r(s?OeeY8$af*&JFPuv&wdj!`81Dw8n!LRJz!2?2cnLt=8VLR@trM z&qq7;Uv9ka`2N??$xHn%!iLUpw!Obei6Z6F@bT6DP^-Z>W(ai418VfPIL->5SRJl5 z;5B62N#bBu6qkA>!)Q5M_28HGAwW8Y*CFTgvb*3Pss4=95Q7tA9XQH%Le)t#?Y@ja zj5|8?oQ3&|xZlj1?Qi;qOgH>Fmj+`(27G+V;R3ojV+hwF7zJN;l3rX!b|cj+F$!?;!B83wc*NZ?dtaA~I|=n)V;ToL|;Tc0rKI5Y)MB)pn! zd(P7QfY?p<0iI7~A0ob(n#4RrX5&_vtOg-SO30&FJqTp}Mvl9}PejVvtTqhJd2u5`J|WG77}BZ;kyu3r zuhj!%5}#jM&*aB|;u;HsuqCey1Zl?6r^v9cjPX%eMc-_GMNp69WS^YK1FCgR9iK#< zJO+bQv?B7!-%mG3(_s@3JKmL_@f*Nex4gkCefRzg&p7c9B6AMM857hoekF-Xi=qjnis@0}+bVV(lek!%wGP^&0v(@8QFi5af z{^q(4yc@cY(716eLuw$1S8CJAy}-)UKSYK%moKBlXW8#$s=NupCW(7 zD3uEQJ?H(FEL)0l78P7zb$HwE>8!rGNG0LyQ!YS+hRvXR2X@-M=MJsVn}^!oQHDK* zyjyL1Ui}6MuH*U`+AB!kn&UVa$)u#aG){lxV=8=4yt-%5{n$yDsX)hFE{MO;!_vJi z5)Eb9On|ri^N0~|b!z(tFRiZh>Wj~b-0TSS{na_~{z^0rLV>cR_3BS6)2@N$xKf^k z&_r}m);4P=IK@jL|1BzuqG%#3$Fb{^o%;Fk=+Xb43Tr`KM*@M%H9xOSUg8L z=%MetH}-3{v{G2cA4W3DLFSsnA;eN?X`_xZE2d`pci@UwSN}95Z%88FQ=JIn%W>?( zNyFunpX{zkauf-wY0}UqX0YlRGvgh4d@X2^PH$*uhB|2eZjQ!{GgL|jO6;4`GSnk; zoJ7~j)Ra?*DP$cCyEI5dGppQ1d?yH~7=sq|@z>M}hve|6zOOO!0q2!f6gycEqFyF; z5=oyQfQMSaarS?-YJ+=Q$?AX@4wp?vcQ90?j`(E*QERwj;2fGgAqi56Ipu29%c_@k zwbEpTs31Fuj>+zuf_?GNx z8u>jlf@qI4UF&tK-Qr!Ac83gL&^=|f5de*%(tLAD0~GCDIPE@mF=?Uo2Zd}A(fNqA z!TB!ePuNP*${eYQ6X=@1b%f_SMB(6etPGXQwnH-mlK!DaPlk#dk3>wWpt|fluk7`> zT+M@qZ?^_hmfdm9Uogaj_gGXIn)SMg_DF6vuli(16PHmIkDsOB|U)%+*?w#?%r zS#SaT86_I~qbR5Iq74H7uCv0A?_*4&-)Ss2ymG#W7ra#Q~f=^aD6>Ssh)S#M65YAn4)ZTRo7;h!~ zg!Y)&&8rW#N87y{Z?@R1G0ZpQcvQ6xs}65+&5bR-t)=L-klLzQSms?BXb(o5>sHzt zL*~jkHHTU_*e#Qq5vd?a8C_ogU8RkY141>th*xB8b5~}vl%!N(HhL$lIL`0GL%$Lg zDya_XWX@G51TH@hc0EnLPv+ZJG#EY(A#09Nz(P;&xAbb&_?WZ3EQ#=Xw!2a^dqdrO zz>$Z@5$(|ufFzQ4crW2|)sJq3kk^+He9^U#PZo>?+3Z+idXX%{a6UYeVfHbHzg%l33Hr!kwv*6kV-u-KLlY5RP8WHwb?l9%B6xVIE3h`u~+^kUgkTvcE zML?8EOp_#+3JIDvA$;&d3$|m;q)WGIJaaRXYiqr%&@a?ZK(iaWQB@5`lwXSFOA!?% zGQ}MqPS>Iqn`RF{Gne|oOgifDE+^HtBlKu5|4DjIWC^b64|PqWCx>M~I2svwC3iYY z+`$3BBM5)XBF6#eGVpW9%J5VaUSQ#C`8mX?DrHijlKv>n(p~{}0xAJEds5qb!92Ji zB@Pm8cI8GBt0M3dH9ohXcvojVk>i)Va@dQ_*y*YQ53pmOfuJSUXO+Y~yV!LOGC|jG zz!t3n)n{n%;|Nr;b~$H;Q3zscomnYxyoHl-cU1Yz-#?b|19O#_0C-2oP!=QDDT>MN zuc;g1ydqBA`s{TT1Z2+Plxr|0I#JYFc~v7|!&@NtsSYsmag{=Gx8SO;D&tUln`5@5aM|L;O*?eC#>((X(7@jEWs zlu(WE)wro#2dE<}>@b1bo_TrZj%v2sjmQ=DtZ2xIYjO%`L4<)182Yb>w`ynzv|ZrB zC$O#!~$q4Oaipi6cgY(z08o6Vu~^|m|s$bQdPf#N$+x9TaI{_Tv? z^YJVn8{YX(mT1l>_U>HmFeS92=T54-Q-=N6vl~fp;3Inh>jRnFQ~f+_Fn@GMK6}Km$xMJ6b`k~|3@O2_o$Sc ztLd6Z5dN*EF6q=TVT+BtcwBN~l(;^^ARvNMh9AX~4k$)KNB9`ipBJmc;z04tX>46xBubm|8>IEoRj9aphP zX@AYQd5-XW%kX|Yh@F-Cx!~=ppp`{c%{#uO?C4Pda(g2|3h@D9`7c%ypz)EZ#X|>P{Wt(&}}3` zQ^*1KuNDQV$ZXbdf0W>VMWO?$Ab&(+7g1rw7Ws@I+Wsp1QikiDWOd+7su zV}_{V)i(_BVX^-#4Rt5K<2q$#GTr{FJw9d8{7>b=|4&E+2umrWn$6TBidL_DdyDqZ zE;bu_34r+DU%P=!YzM<-ZnQf{AtGFO7CRqLOEd(0blL#V56ALFzjlby)70eX7%+r| zs?vWXhBN`~6QpWC&m`sgd>|F%6ODZR|C8ESwiXM;E0!>rK1qr|XYwCq^R#6iN> z$>)PQ;+0eDi$7$5fhHd?{>;e;8q@velHSnw*?4KW`Je(MW2?|!_tBY@Nk3$_e;&_& zv9r+tc&S6_9V(efni`?dicN+m4c&WLnm23<`Ccu=PRfm?L7L~VMbc`IE4P;_{mXJ4 z_H@%hCe_QY1V#Il(1UVc1*{cF@`r+A1qC{Gr#A$-u41CK2JHdqs=)Q5T6M|@&lh^ znR8CvZN_rT$RJr#f}ZrQ4s0zIN0z3+w`^r-GqOoKw>Nq5V(^)b734$V2GLfY{@@1I z?APHW_A*6Dq|4A2_df$F;ks3a?)qXxpnf3U9ZWygq;I|7AoWlcSmv96vOE`FPkt(g zK{m^>TjYR|wSpc=5_|J!ciU2c0y0r#&w|Prl6%7P5S7KW<5z!U{6CL4=jg6`0BRU{ zQ~E7nGEC7nD`kqXE0a4zrnCP9?D0}ezEF>fgN+`hHCZLEC#LQde@wL1i0Qf7`rJW%V zf9?!GLOXGfoQkPAZXu+D$Vus*tUP6s1n`&7k0GF#1%A(f1&*KV8bvS8$0vP4k}}{^wlf z;@My)^J*gLpJ}N^`+&1DXX?^}QjcXJ5s@OXS3NaQWD%LiajPdsD`c}}cOrtq3i5Uj zanrx)q&XHaiUrUHjmpphmNLd9M%N?~@1g1{TssuHCQ^2C8^&K95V_pJH!(eeFhGZ` z>=ql<_xsLsJpKcmeNR#Cv;#TD`a~4t$V(;+UvA2R=FCE@5-@GS8g&7HGRLPNYFmV@ zj-#>Iws4}--17;Z!k|mBe#|0|BWpY47(PXaKIcH54mI$vK@qBC8O-ntI-UrPn@~{9RO|&tP3liyEyq} zzr1q(fm!<7Ilz*PwBUZ+IK6Oc3pi89TRq#xH}}7=>P@gFstzJQVJZcU@}CgqrrTH@ zmi>Kx}>1NJ7Y7oaIuXANnZ;%~PbP7Aj85fiMXZ z05~dC_$+P6_vcKAsIU|rTrroaSQs30>aQAjgDe~Y%$`UQxUOlP6c2)7#7QUmb-7o4 zc1)-t`s;VGv$f;Zco(@;)KC|{uiE;_G)vxrr3cC;o9k3PP@tblV1zQw?xn1^pmJCu zAulsayI*9$^U79A6+&ZgEK{o#gu+{$hJ`dh7!qF!@X(svN$R}Zu}(`vx-%j@Z(^Yh ze#(5#K7z^P0_TZ0VfQzU=MsxA!5vVzC3NAKUxD-0-d;8DO>aT`zed|;!^6}PW6(V^ zM(>KiR9z@2B#5`TIU&odOrX;XJSMZYMyl7Cly-~OWmscOmfxN@0ezkDqSN04CeA^S zVR)h`>ILnCV7PrT!plwJ!rC(`X7~*QEFaN>s34U~(%ToF%%v^5(X$O5WjH<(tE@E6 z%Jj$XeGt4YtNtyCnT;A$E~i^)C-zWj*7a;{_Dd!~@EfCL>0ipiWR7_!v$x21dj4`B z8t=`CfdK^Ie?u%W(hcAPG{@h|9!-!NW2>us30@#XE((B*jv);?Vp!^ z7)4CeWQyi~J-wNHPTyPA%v5g#SQMX;PtONrq)K(|R=yg@0>S(RHYXOVrs&?z-%g{Z z;wO>pGxAL$G09SqP=?Zbe8A#eL%RoXhZQN8RW54!3LH zVebUzULqFifAtF<`{2w)vM`%6etIEN?Z>`FDx@z|`@7u`V{g89VRAEkgG@9fyNJ5K z@}=JJvz8}QVHuTWGIc*rPOQUdyGi9mwyAO1=gDjzZP=u2Npq4T_?I z+kIJ+GQ8cCIQJSl-!49e_0uVYV=cvqIbIcuNB6M)Yr~YiJ+cOhB0j$^KJ$C%#;l{< zPZXByl%w-+{J5U>vRegG}#^a zu@}G}V@d@XSb;PphZp3ML$Y|RJnr%K=ba0fGhZO3A~^(M;!H_dQMl9m-1z4Aaf+4R)MIujq{`xfn|cT3G*ZW@8OlP$xXsnGeF*MH7`8#hD%=CCh+&Wp=_ zvS|sEh&;R{+o_q=+$-Wjom>;9#Q4dl5P-F!e6^`y=4l%e>708&Uu-73#Jj+%y|xU~PA z`Q`*-f)ya2A|9Xbe?g}4UaU61jltV7(1>=-K^u$KZTW|f3VL4|xG3r}ke(d%^x3ol zeR~!zlL}g7**6x+JTciyU(skA(e_1|pzs&_;Ab>y;+tO>q`7H*5w`=O-hY!@R78iW zy(|@o+Zc3WbMW>Yzu%ql*J9cE0oLr(9`OJHL+fK>O-3EyP@OID1|d##AkXSxRC5@@ zo`=Zk_U`Z`qk*BY7|~ZgD^Decr_Yj%1CwBd$vo*26GzMBbko6g1f{m{8ZPv(S_0f# zLB?o3(|v5yd#){p&1}^h`k1sNHN_DJ*AT!iYODSASPj7P)lFxl@86pWK#5||Nj>P- zZIb=8rrLUyY$O7AQDV%_%>vQ2n;(gmM?D%$6vzvIR44 z-;1mxtZv*DV1;wM;vN!xqrFmi2gZRipU6*krNi~{l*33wL9S39`?HRCSN5yJK6eB;OcdQ{B_UA1+y3+{vbxNHIyMa~-(P11S-Q<7 z)>`=jyDpRIwHbo>(}BwhIEBllTDUJF4vzK5$pMK^g%ie$N&dE)gvjXtFG6x}?R!U7 zKUuNaJL1HNHC5m^lhD4HykU^=Nj*PUg(sA>J)RHl5(>WOT!Z8(J&y^QPQ)aFKiT=P zCcsmFP=U{^QTEDh@2)4`C+ri!fZR<2{A$xS8<67HeTAr8E|g*NacU9o{^^FlM0gnp zvZ__=n-^bVCXEXCyq*oUuhz)e;sV6+sa3ST1OSQQZH|%5{fKj6(+oAF z5xts>QnBE?whcbBZx(151UU6O*MaO0u_{iBzE=`5gr7x;m-`mRaum*+fqXF&3q6=r zkBJ}^9$ZmXxN~(NvW4`FYg%o&FT3_U!IFxHqVg4NTaR^tdkGr;tCBSxpr(MJAsnvG zU5DcuJ@|%)AnBzKy|mrV=DBiZUG)Oa3D?O^JPA#BI&rc; z)md*okDOO6z1S^ALu^0)i-mc4>@@(SH^l917*f#s;K9xQIp`S&Xl6_WxbdP|{_3R`bbx;S4VXc*Ug0q=p^)*8{tM|S4CfBK z>0W47$=Q1Sbf;EfYA}-`T-lpzgJldwZU8nR!GBEu&y*9N)0b#TcqcCAd;=pIpI1&($M56tVkS;jq1yEb#Inm&aAi^*MjcU;)=?U+9FYii!&hDOGVw`i37dir%?9ZT}Xo(+t z{+>47W`P)Pk;9{d^E;CYv55O%w$fVE4^hxZqW|as?P{u4QVTU=E3w3TevDse0>|vP zi*b&v_g#o$F^%L|#%mA4)rxWWpU-Kr;-%#H(xojL_Xldz#PGQYZC0T}#Ib~@wth9e z4h_;>K&Wkz0zWC=ENFFoIQ&XY`m6oS^a2t5Ma%7!? zU7jNhdeO%gx8o<|=`etvOB^m*Avb* zr?tqno>HviYc}|9UZ~mpEvJn8pWZBbYrf2bYhwmI7X{j1jtApDaIb1xqv(0WxVo%j z?JBr7exg2@#wfj*9MTxe&7EqJno84Pwde7Y`ID+DAzR`(Fsw1gJ7lD50BMP$-OXF? z7`4;UfXEHi9lAK<0QRAv0je6`CD#4Pi%c{}c>-{BMf?#vzVXgYVycMNah1anAtNp& zrWS1P&G^VxrYtRe>SkjUX7XIf`D9oxNr7kuTN#$A>)7zm*B4lWUnj4Q%24F5dwHY% z;4`T}6~zBVp6Eg^p|^Wb0~#1^A|0-z?_18j^*Q2~R6l)NkBM8dA0KTxA^-sl=)R-_ zleDo-EmBgAqy0VD*9(tZrYOO??5iH4uKq?6j${NH1*pjQbIJ6U%M;T`X;u>p5RTjs z@+CJP;~+zYm7K>$>d^(YnJtzW2R8l-o^E6rl_X3ja@^16nJYa@VREneNy3Ix*wV(F z7Cl44)m~3aff7Dfq8aRrZkCYG)JH8#uivbHnA!i#x?uHt4VH8gu~eqbE62J~4B?$< zdYV>j5ueivFlKJgIdI98)BM6aIHKP-PbD~Atje&dO@YEihgKKN@tac1!X~LnH__;; zBGw4RCD`^1GO$@rWwsxwR;EWZh6j&?T2n~GD%!*O3BV>PsCo)nczL>!2;i?$RkQ`TEqTSj! zes%m$761=+%rLieV@2Na-u>5P0fbo5I%EbPgj~`fE<)h2WgY`!D3qq3B=Bf{CY_Rj zIbn8HVrzNaC#Y2%!yEWD*W9pP@qupoDI@utRy{h{aSKB}2J@>z*?TR`Hcq3x`#qQr zHa4t$BW$xZEQzD0Yxl4Au2FF538bC8UhIJXc%`G-Vz$TYQqUBi`+tDpSkAoMJja$d&Dh9#1vJ@(aaF z?rW^=JiETFHtoduX~efjU@3f}FcwDfQ7;lF_s?XA%NPgX+k-+|{qL%v9qlFe(7O_z z{f9=6jFqkKyT=;#l`PUu5!w%hi#m(pfaH?2Pqdy&a=MfLoU5wiuGpaK``w0xpzL36 z(fM0qDC~Q{wh|cd!5D<&pSkksAN3ut`Maa;ndjLA7B~!~ipK}!Zrk8K5VS~k%SLY9x^IlVJ%t3{Nv%y+ZJzW zX7>Hzzr3Rn^2|)5hC}gsPo<{Y9L2^ZUnF6bgmYPD^_3ibbi#jf4{*KF_7j{!eVU%P z$I-A|MR*jwYyNf=xPRK(9oLHJlQDl^3^AqqD#;czE$Dcj1=mip=m$wj@Qn9 zt)C4ig)|s`ynI|2)0xc5$G7Q}V;CF>_iPvV77VJ)B8b=ZusvC>8=u+H&N^;^{mv5} zT#aZX6j{v{Y}c;?*AiSNTm}KR8!5RY3tA@mz#3pvsoSQDR4s$uMi`K^3u}V~xS^Kq z)hT|lqs5ganZ0@8)FcV495~W3rxvtedBJemxP;K()R0_3uL&s!D0-u?hq- zm=_`!lZX(r7iM{@*EIKCHBqSSycDq{d827{T?#ZRdYIHcX}}lQa6a`vKj^Qm{{|7p z6U+;&Ywd4h!%~fHGiX{#D-6D2H=RfT;@}uC!%E@IGbdF#4*;wXyJZW8av+L4K&;=P zvD*`kB~K;p%Qkx~yqMK4_WTXtt>C_4u?yYl zo2KJk&Fnmk1Uw=yM}M?n@!NmhoIVl6mu(#Uer`_Rx{j$_@EDPd8dlmj_taOiLhHk$;jg{%_Wz(Iwoj~cW zuxK<;L+zmuO^q@;@2k8(4Z5T*!_fqEj_U%4Z=PRJoSqyig^i5}M2GO(Hh653DQPr> zq~WAgJSR6fdqQ5Sh?g`iRunWA`J3^cqMja-w=d?egN{m8JAZQQ=jecL#S6sI3!bkb zTlZ#Q)GvV1Ss&5oML{g4UURB0w}&IEZE{)im@I0UJ*W-^xYdW$Q7~-Zs?ZE=w*Ri^J!6TIyoVq9w^((y`UFvFf0Myx*8i z&wgi9tzruO5 z_W@LcO3_>M7xYwvP|c!mfR=PxUnD=73r;!Pg1kzk29xjUriQ4-8;=DmDHrqx(58{1;PWj)}f`6eNEh57bSB- zQvN=?Q_{R}3BH7|!Znk!+}-dM=Z&R6S?7q|o|A_}E;Te5h%E8j><$K7cg6i#C=>Gr z5lB>FmMyYsEZLVDYn)C8`Z4m+Z;xlN!|#e*8r~WWh?Wl*ym#G17xV(=v7iu{cLO~D z=c6rGZ=#uiwpxycO8GhQP35X*lbggGfWTOt&XuezL%qcI@$%B z#gddK3=(Q6SIP5BHXao0WmfT`mdwcY1ZxK%GQc8|PC-n@fGKNCG}DhHgmXoXUepc; zTigE6A%P_b{55tda=)6jxUhp_q%tKYH|NPebrwl*k70@>)7&=OEg{KGV)zS_Ni zRrZmCTdz0RBE$UF=QgVshRB2_YmnRg&u|D;3pN6*x77=z#?#Cd{yA)qQb zm2#*b2ejjTiZq!YGbv3}7H=hh9LfV=3XC7y{um&s$tjWc?VFQ@GizDQG{|1QHV)>* zXE2)rybv{l1%LZ3`219sJNnmOT>hmLVhncwIinetV}&IKRz~_?NK-wXT#|vzNwZn) z|9?GeM`Sz3C7!Ery?t|@w#+j7mXun;t{qpe72|7Rht`ltfRt1AXewu-)q#g%pT2j$ zz8FUyKA>oD8X4k*_&3>n6*AB5e1y#bWQrS!0`C4_ zLqBm8zF4Az_1@4Eo@?ctyxvmH2dgG;3e3B+*!pG3I%!k403rbmAlnjrPDNCGqGja8 zsEGr{E3v`n6MH$)GH^&{PZ2XwOzACo{$Jk8J!x%K(jgvjg+ajJ*E!8 z<_xDPy;Pv|SSJ7*2hYg*kL4ofBTkGtrUf#-;+^ow;-7Cdz(FQp5}8jIfVXjTA5?7LiuY_+IyiWd#gd5cvQo6Dy7ycE|`2U0!XFFf^kX3IPl z&gOSa&~gv!rAHbsTi$`As9N_1dSxr_*`j%%@5g~?nCEiAUq=MB&Jm?CX>o=;y=Vzu z6whatF+Y+tCZ9PsD$gn5LVF4Ak$5Yvsa1{aET-C>c9-GzFnUrj^XmH8M3)}PWE{;NrXz#VGhUuxjZ4|vQ)dVyI=>IL} z*6zh(@Jmh@xIW6j^#Ni8zMDby@w01wlUBnpb_yuZ$Ry_|8tNgBc!B(z!8Q6sfz~f| ztkS365ux9BR3ieoub2cKT}UDOPK!-ZO-g$PnQsx{tXo=yw)_Vq?Dha>wHuYH&t)tX zHoMU_>QNVh-DH8ziPb=TwxGhP0rT8Qn`bzX2k)={8}(wI#{Qf3;QR9}SW+d?N*LEy zH0XfUf7A60G;#B4$zc=zx3|ATp7pj;WXuwMCC%1t|Uho*R-=!M;S^BT)$Vk2uz*KKCA_@ z@${`bHg^9lg2HyLk|X!J8R0aL=R`&fqBm7L4jr|Hkv}V@V7nW4=190()ZEze|Sxm z&gINJ9&IZvK+B9&g#5NN(ur)Md-?8l57gYA6q#Oq;bOg(l^kmUZiGPfw7GR)-I~jk zD2&~JG@I88EQlG45vQE6MO?oHw&#HA$~BAV5Pq;TeiK4#H-cr0aXfY~=!3ya)TzJw z!Q4AxTwnG?I2@G|s&Kf+077}Ex{?YN%Tz8=7c#FvaXhOX`57?y?(?}Y39 z=({5VC68q`649n5Nn)xuqII>Dgoxy8U%US)fn35Hmw;r_L>u_b-3WHRC#0)G?x`A3 zMq87rg_^xre8!)y{pL90t=Ah{KWw8-uA3<(JQ(8!q$Kk&WKQ$8Q4VWc?!s$aKY#iF z7xJpy+y0PAly>sQfTt)l#rpqRc=WI9_Z7L#D-!QJbf90=R^kXb8!=I`-Hpl02w?ro zdt#@VrnYd$T zu5XjBChAKYD}M@So$F`y#Z)tCHxn0#CM2Qwti%Q1eDD&l&1xh^hqdfz#t|ZZHnm#tW)7b6o{7CI{pm+Vu&=LMovU z=c*uulI~10+oD_Bz3#7mWrFE2Rb0&z)6-b07|?5FAhUkl|ZL{AO`LfiPgY|+<2Y*t|+_#WCFIJ%l%?{)os#+E$wT%sQQLs zPfDt;@xV2|0{QH0GVuZI!?*P6E-9s1EVsxsfI^qra2hQ97&2S(5*Kkk26!g?yU}&og zM%doY@PgAXI26NwFDniY<(XIl(_i#YN-#)rz%H2@&cbeaYv`V$ zAWFbnY_95yM%77MYz$x3a}n!WZvFC2!z32uN@6!>e!((lALmUsLiwG`uotb_R{L1xP0g|#Oa)b^(a zAYvDQ_-KR5#84@cC)O?Y0gtBO)yhc7MdY{gHC2Sv41} z`b~L&)lhL0)$&77>m?}7H3(NJ7T_QkSmDQ{3UEY3u%bx`RIHOjC&H-L{JS|XCG#olH!HmkevCc?O(O{r1-v7iFu+& zU6BTcG0v7Zd;lGCpjE2XCVr>6%dcDX?x;#tpLyZW6+v2_j=%alY=z~Rdi}qb6s8`) z--Ef#v$)`WS*F6rX9xdI3jSPHC1=T#1g{WRQBHLc`w*(HPAjG(1E@&3dHqhvjL_pb}tsvm^s^D z3s4ISFJTHV#6JaxsRuf+U}#?Omhn}xB(;Swq0^5G*mN$nKFidTgoSTv;@TlW;_35KdMN*QE4c{`|x~(FS`ALfbEN1S_2))M2#Sj;1e_A3%Q}p5Ceex*mk0Gg%lslObisZW;!Y7`K zeq)^hWva-dS%TKI!A5O6uZH)|o%by&Xl1ZB4@g8lTZ<=OUDyzw-WzY8Y8a-s+?*n% zH_6V73g~z~{k$W602gjgR3E|`Ao_`s5mfO z8M`)%@k2wPNNBBLv}$=DXIz%=Da9Ihyf|MLS>BO7ljrHplsVUZ!8h(-j4h{F-$-*Z zG6Y)xI0b~JGk(!_J(4aX)8Ed22RD6wl73%c9XMnS5`TK5$PhDCq)bZ{nX?9cRP(hQ z?|RO9Vnz93+2yChUgh_lvgH~aATPtcr_NYzIUoSNH|#x4({W|C^%xBisXs|8Qn>R3 zVhh$0snXyCq7M5dkzv5D8+Yq&-$s+Pn2bB?MdMt_!&L>m4k}BhnkzCl9`(?YI2Bsb z5YE6t=SiQ2^;t7+NlWa0gP@M{xT0LIpwMqFq#_-L%tkF_`Mt5_5AVN)p7=8%Gx0vJ z7uy{Ko-wW2w9J(Wd3T?PUh+}e>)br}WHeN}6bsYEJ#L{B0@r4lmKkU6{RFm7* zHmq1c0YOm^kYWR+7Xbl7FA54My^Hh`dWeK3O3@99NbkK@=`BP+r6=@AS6YA&LP-dn z?`FIAK4+i3ea`Xu&O65YGD55I zCoYXUJN1vcR2MVAPOj1!=R&zln$F{T*PVq!Gcf|?bHJDldCF}DC>=G#^Yy7%53z5h z3h)QEE52WQ9wI+kR`XREf?7@N$xQ2)8qB*qQB}nzZ{tLEpq@*w4ix1~(H;n+V!^aW zh_7{0{BdRoNW#b{{vPM0Xj^s(7{YS*~QTC%I64FQu-X+9Wd0@FH+ohTMAE?J~!MOPy&~`GJFC%nrpooBY~=}UG%lNJ%g;rN08@$=5&T(1q@$_ zKA%gV7zL1%J#fbjs{J^wC@S|umq0bzLMnJn%C3|yQ8k-0TS4}O4_iG3`Ox*lzHuSL zDlu3n@I>J3^I_RR;b9V85hisXuIQCXJpBr5|3F%I5#xo>2yV2oF%;oOqHts zgiwH_vzS$X#F@1WGRj{EKb%Bq3SCdMWPGRB=HG2zPlLkG~H!Ch4F!B|bv9 zQb2S*zN#D=Q;w$6bTmRKmD!-(Ov!?=qM(Qaug~p6Y(Wc?%G=Ovrca~C^-=RGS;99i zz*$OmUR9aK$>b*c5y;Q##;VY*%QprtyjTdC+g%c zB<3~x!Z+>enWe2_Pwlcj0jFFTE5!G|+gplyg+SS@dFiCD-?t=7gFPd93oJ~KCz-6m z&(!Q2OYCo$m64L`KP65cI*4>O3C^7io;@jT<(}1@ga_ROyeLSQ#pO%Wqk0c8_w>{! z)vlME{P!mJ<%>F}tvS~BSq3n@o8a|}e|8Ut-XfQeY#V;lU!$`ZD01@_Y$3~{!fk4v z;`9~xXBIyNvU7kw_=2v_i#7zkMyIypra3!q)6EkS2%QjImTF6AZEY>`)0?ez1_{s* z92_L3YN>LaHlJZ|;wcs+jvXs*Oz8Q5wavN24}O4RfON%#Q8MsGr}*tZFo6*vZoOYQ zYu(12t!t|1<*d551eOE5X$7?&JefPdU* znD>;J8S_KkZeN!Rt#_1=JH1eITVq%z*LNYgi#Ces?j1~FeOC+RC0&Vd}utx@67iJi9ESb_Ec?pp#EK?;un zM>4t#94LY(JkA7ZaKsa)0?)J-0|!;_d)cV-nuptR!d6!t`^!tfPvt<$uf^c(xW-YW zAOq+enBTYdBwIxhLGP+}7`xkA@=XuoZ@nw3Ui@nG7>#~rqUDDs&__Ci$&N_Q ze|adMs8Nb*C>nhL6i6E#xAl%?Go=sb%X?v(r@8twvX-YK)TQyv9v+>2=oyykNiIoz z1>8x)2#pS5X%Fgl_W?QC(rU_^!6Yxr>#}Y+4RxI=SbT5PU=i0J(wCFsbDb2Q>g$af8Of*$vf6{a zrj}2*+gfXgv$JE@3X2PRTZ|o_6zeGgecTWM447!cSmhCZ-^mzeG=8roZo~^MYUp%b zyw);>pU^6>Ek*dKi3-u>Z(veEe>2Z`F`8#!Q!mkXyv?xMZXwZcj1+j_HEqlY+Tg4H zCAs?5^MGYKI^(xP#EL?=KFCW+pCf+TW(oPtQUR)_Pb1Z>fda-y} ztBA1Y^Y+wTHNTmgJ~l}N_}$=PZ9j-{wPVt*2(;0AZ6QYO#wO4DmWWg)g9S*_Q@R(C z0bB&cGYrA7TuI5J?50s~+*>A(+n?8b*M*Toutg{yJ3*}Ft-B9rs{#~?<01@;6M}3{ zuWDCH4WS~;m1)A?%;B#f?u`yRuV)n9q`9+HZD0{sg|&E|9nQJy!`X|ewjjIi7}vgA zs)-)#`G%ko7wGFR2Yg^j6K66^&7i`@5jT6Q4OT}S9CatpO0Tx8PqEpFIu)?E;eL$* z6l}}C(SE=Eq+Dg8RwGT1TUZbmkzAKIUQEO<;g-j~R3Do#uVI~$)q!BE_`w(7(i~~m z0n+v+Eh1SNvAbw!JwOFad=iw|Gn|;`x8xP}TuOa)YQBVSPl+r{x^M6Go6NNqugY5b zs7aWTOd$F5y9)3)yV{ZRg8iamKfkAY;}}P2N$yVDa@c+^Tbd{^nYA7)vFw)aqzO1F zHNXG>NmwYb%IVt^3_gB*nd!= zTv;qCH~Mz7tH;!UMnHaf)A&Z54VfKhTrydPS9+g@sbbKn`)PKwIu8Jsr9Q;J=S5bf zfdn7Ya6jzq=(w|X^(z7LE~AWJ#1{@=|^HnGHG!j>ExcJ@VKj-e^MTS0>h=Cw+q10%}(HTIaN#!1reI!hLDTYD<* zNsmqiSS{{6^J8g1gbHP-x(i6Nha}%BmG+)tnNxlfrhs}=i@(6}+T@pZMft!V-}8@G}|*#MLD(a)|i;KFfk$QcflMhjW3Ah;rn{H zhr;P?5HtxrC@gxY?*vS5DDD!o+`j;N;3L_&9yY=`ips|$&56;2-qyn%7rk0Rjq>5@ zjb5QZlJYQ|pVmk0?M$-;UyZHPZ2+^>jrH`dGw?(L`YHh$xn5hVQWOn1bFa7k$(D|c zx6+$OD+AXn1R$!=T(#g=%r-&Np@<_hmBI@YH#4WFp7}Nz#a*6;93)*H3JP^6b2MR< zMXHO^=9?}e_U9yBH@0-Oo~~D|DD*>TBO_%vOnimcptdKL}`bUFh~J>`pH9nTz{W#FJP{_JEwo3HJPg44uN zwoOknoY>Iw!Y14{3%KO|p-~V%_Ss1$EN{cX-6uuXmU?>xX8x)qM5?&sI}p~CI2LE% zn|^bA6umO@pqeTorh6sdq~9!<^;*k=%^rL(t(hIec9UW7twE1f;65-foK6-d#SzAb z$eJY1lI_^JkTS^=o$qFeO+9?JG3GURfgxt+{=)mZe(qK9Tw)epe<8Vop?X=@l!JU3 z5oWC?(uEUYVW_GUTU`b@y#sm*isg2zo*DKK2i6(|q%ZZRg+(mvHC&NQ_Ydy9dQzeV zu0;B}tgX|NGCGC};HAyq*uE-`V7pZZK^x*+_JDiFSN6hR7w60_(p*Q?Z*pPw^f6lx z+D|^1?z~Ib;*FEJ=N=bA&+7uq^qzU2lVP3XKDlB3F%`R@bAwLyCe*NMRAIElfnBRQ z@A3y1=>2!A&TWFQXiPjCA=`~fun=K$qo1Y9xO!)7z2q*M+so5Y z=OB4$vf4V6)1c%6(M?$>L-{tCG%#J%l9z*p;I%QoAk{_Kje2o4sD1>xB4%j=H?n^FdEXAOC@#uo|im2yM#eYwPrDIVRxIX4eKrbKm;`eLj2EBTo z=x$S!;8-&hAUmq>XX`Yo^Br!|DzIyHZq84mlHSVIdD&`XEsmwt6<3|0=99XO6ZDB) zv!GPZoW4E(DaV#ZdcmmXZtc5M$*G_oYPLSqsZw;2A!pM_JO2>n8=5-=n-2M)>xjSy z_I4B|{MY8+HowXStbz_C9W^^%Qa2DF%l`|bBVQB-8@Nh-ztnD}$o!F~feLm*z(0Rk z*O*@TT4m7@I7J3#yKyf#i+490elYISdUx0!WlWc>T9E%Bb9$_3V&RO%$QxTigS(UI zi-*8v^mRtg?IOJiyin6)tPF`>L3rmWGkXS!h9?KI)#9=Ru98~qjIeKp!LKK?r*(#& zJwMX~n%!S;EnqAbpTAdKfEmoTsu{E3+)K>AER9=*r>D%m8;`C}PCnol0x>GlU>Eo( z-Mk*O27+;LhsBE4EL69=@$*Qh>0^!yxjDXkyHlM`-8=0X*h$)(x)O`0wi#DM>DorT zP~+Zy^R&ds3PNegY%pLSkUnQ$e!9;->o^iI4U-A?P*1+Cbn7X2NAmXmtM=#u6f

zNDkTzF)=pXiEe!*g}6?4;}xH-X|NM~-Cy`PRPjKxV(+5hJgg3a)Y9k}H`xWJ=1~)&&ELuJ%Yd&fCaCe(%6hY%bIJ@OO1?Ghp47%w*-Z27FJMd`BrvnH`K;A~ zA-!zY& zA1$WG&?zRbpG|%937Yh^93`Q>=IRDQCrLUrhF%0UZSA+aWgTI-w!WYA29 zZuTVSt-+dy^3dv{YS`tSIBU~)zQ-Ji@LaG@S!>r>tG3bpE{%L@rU8;(i`T~I*NOm( zP(r@ui-$*jxeoU-nN5_Q!ZOco#1~VJy6q?0D;>Z1Z4|3rXtI>C`OL6xF>iLCuY&A#2R%MNnr2xNDAdD`8_ky5_q^SCm}V=#XZ>9xrM-6DN>I*&Lb z(A4d(=H$8Tw&xhZD>j&FOA!4|k(@J5oLZZ;O45697^F={55z@M`t-xe9RhYF0yRWB)V6S;Vd5^7{TjGTbR1;>iWv8neu22(iIeSc3ak zHGu%LuziQgl!5i?Uk%3WLQmq!Lo#G_=L)*44ia4ynq4HRj~@>gnpaoYv;n z6jvr+k7y87@8ZA@{fH*L_xR}vxN-S?$ck_Hy#h`)B;At&dSPm;}zZVy?>D4ASGd4!DLK}*p8l zq13mwsyL5B#ws1Cdlye&Bi=}VW{mIK*taT>C)}Y33$O*x zG7M`y$E9e|x--jrsSfYJbPKnk*hY&hZx`Z+9fjZffOOdWwgZjEGs5uLIhFo`WeBMF z?nSLLi(d+qV@E#aalG`*6Q{wIt-pDfkYFxpG&{H8MGTYPTRg7rART#wbUchvnT_bQ zFuZpf(+pKpt7&2!#1|J2+Llq&eqp`>~{p8)Gfxfr$$n<_ins69k5tseITXm z*kD__(1bTO)K2ugKpjwbg3qW%kJWQ&K?e2>vD>$v#OA*+d2LysYrC-!)MGNdr-Wi# zvki-sXsF-E#dly_K>U82z1F9cCu1#Ote|~qW&KI{=f30h#j+E2>WL(ffN{_nHfD?W zr#kx(f=;)j`FbV{_c%ajWOt2=NAJ$M**x$?skkSf5&}3h{5pT5Al^`H+m}4kAGfRv zeZ5tGEp*b}mOstBRb6G%#AEwrYsmgQPaOVP1@)qJg=~wGS90s_$je3yNvMf14jmu1 z<%UV^Zqzf*Cco?2f!0ZfMHieG{!0nt2r!0nSi^|aYBZ|8GahHj-NH>V1He*i*1|l1tOq*GIa!=k}SkIK}LAz5TqZsdy~S;LZNxK5$ZkYh%y-DqUIXbsO4^ z*bEv~F7|AOE}|~}uof`=)`3{Wumr4)lOhL`DVYx*SBj;^IFH!9N`t17O!QWyUQsC`@}-*7Y_K5 zT!xVfc)kB?_e;T>U~C{1+%hsAKit1mF*s$>e5~{mc9o`~L@FwQ?mR?xG8P46^ zkP`ithIR+lU_q(TIWP2{|L4p_2daGWpt zYABdXo<|wY+v@{bmfqhD*+mgf-ga!BQ7CBTnXK+ic3lm$mNY{H=b;~WU5eo|`b%vgGd{@Ar^^PKfq-JmE^5dU3zqLhj=R2Q_ZjI+Po4M|Ji@6Os zU@j$e2&}>U_ZrI8#hR9k-z|g1eWeL*gV4zXb~K6c{L>Qq)zLE0@(@vOT|U@RkYTB1 z%%tDP%&I`Z{m5dNmC%g~!*RKi zKuon@*P2Ar5j}JP%6d6Id2)Z(KDoipcgDTdSo6{8EG&&B5&xm3i^g)a+MapK_v7{c ziTE9whHj`91Iz4JM7DCTL8V=-y6U!$-=#e1bF2=}4NM|@ozg&ij%ZJ2EM}kmklM?g z_by8CZH?^k(o?LyaB#=RDb5{zW7rxG>R>@{LWnNee?(h$fXrKR|9BLvynU6a5Vb5Is7{T-;yk8Z?s$7BuO4-a#74Gcm1k;iO-v4{1S%EPa@@JrShOfwqG2Z zjq%foIH1_GqaYQ7g+7Bj)_mG#7(#7^6Udw*c5K`>je3x zSte8f#|m&b&}pHZtHAR;c-3e=DBqJW(e^$Nim#h#!qezM$|$!kU++B}3CUk2lAFa~ z>fX1~lNNmG>+x}qFU93sCp`OI8O{;5R3EI!A|i(v9uqR*2DbBU)GLbosvC_#tyakG zmQz~50O%>%xRrd#k8e+|ROA5n8jKDxKB*a}2Xi?TGhFiBU0~2}u*h?X<@MVWK@-xq z6NL%+Ck>&KV9wssnElZ*pZZMxH`}pHYP#P=kx`KG+Le8x)(AECIo5 zJVI+rOZ@9tJmz`oV<+grC7_~^+??2p3b~TjNG8P&frR@*O>L*esKRz@s7dZ`jE%mm>$9eG+mwK_Qqj2-SZKHh`H#y znPVc`{YIPnv(<5GDED%!gJnW#zB<~&ZP!IvA{o}?-O5rOQ6%Yb6S6p@>G@vwRjxVH1$ zcu=14e3;?B0OmFhv>0P*)e*|(M-Oe&q#nb)-{C`gm2Jdl;IxB-)MtGLP;y}6dfdkO z+BU*mG5!>6*{!;ysUMkCwY6iaY&%${n1!No!*oM~zWS^!V2ZT+KV)wtm!#C{6zg3_ zbjRby#I%Wt1h=o;9Jv`p^;^Sy&cSf_cI62l-hm>+)`iuw^b$%R79Nv^frbrVq01lG z64?y%awL=1Cp9gtg{c-d=7KFoW0P?WzC3T2ff_=uAy^EtFQG~xX5y4v43_O>q<8Gw zWc+CR+IL4i5{k$?Cm*0OqC~XYyOZC*7)em^g)J%;oObxmi8&vo7uXRghlnBWIK(aHg2Qyj8)@f z5tCf54&a@Lw)a9M*GD%9J5i?HVaKeIyW^gdLtiVr8VB6MHg-*cN+xE#(O0a`Bc$8s zAbU;HX>nul-stmW47bEWts+VgcBcFR%;=pb+o>st)2Oc^X2IU*AQ?Viwmfu&AQ`vs zkL-Q-zl za_@BHn{^{0TpJ#L4#aA|%MK z)&yb?SCMJ8XvzG&v)7O=eRptT zF~w&;YIEvp7=d<^0REb`PnpbTKM0M&}Ds9txJdh3vSTFcoqRkg*cwd}%*_10mjM ztTGrP?Ns+Z*vNO`M0(4Pj#ZzuySrY8FtTx=JXV=C{#q~#jMiU6{WAS`q0pN6Z&A{# zr1;9u0_-xcZaYgA{uX;-TIQ4=2vCilhL2upxq@cc|q{MWguv+v{F;o8Tc+vNK z)*OE>ZT;~AZ*7l~rO~glHz&$Qn&mN-{|b_Ycx2dbwS09rbM&}f#j`UA7%I|M$hqw< z)>rMfLcqI-2(WSTQrFbf6VI(du^9j;R!J!aho&6j0081eo%uyi?8E)vl<#iA2O8=9 zE4m(thmCuVO0~F4UW9K#-Zs*`q@4~mN(R1YfX|2mlJ@k>Z_Aul0Z3N=ABi+ITZ((cmLwA_kAU<12;skzSUBFZG2x z;;TTnNZ)F!mIQ5`-Jl(#9~$nuQ}b#FXG810G=I6z1$$;_woBoANyLBUbYyh2gnW&u z?;uggZ3sWh1sgaU+yQQJjIIKU@>lajG1eskJg!omvD*uXq`}Zjv%T5Ga-hq!p+V~x zE91Y~+^;{(&Pztch;L$2h!uV*bSJ;n;qdSLsQ}IHW*1G6JL1p8U+RIAh;t^LXB31z zhnSYsERpqh4n_6lntk#b?lW7Me2O)$Ei_c`J(3UZn`i*Yr=mxr@1r{J{4Qw3vy&o- zEsr3&0pJAj$qulF*U6;YW|>R%sN{E1QQD54Wx^HEo^MxMI|mlR3{$2yZa1PPytM2j zhnnMF{h<_pCNH!!p0UdDI?D!#zqrxzNW|(_GSEWrnT$>^6<Ciuov44h{6xoYw15?u4>Hqcs@2H?<&cbP=qQlLX-Gj2iOz3?~y~yD}sjh6Y(QEZsgw4g} zzu}Dz9bMGvc?#9za=gLpaUsa)oKIb+(?es)Jb&FMjq8ASDF{eNcph!&U3fjnl2h` zSu$?0i6+REI|`$y{7Qayjw+o>buc#Y&_dpt0Und_q_rCb`$`zCUZq{-nAgEA zMhR~Rr44AcG4$PG1To1DE?rt$3U!Eh{aZwnft^rMQJDdzJYs-&x&_?!V8QD+52-Zr z{x;lKac`PEB#Y-qeuEZR2J!Ltpu#$6F8XWmJ9!xVXcHb>e)d6H zR-FSupd@$PYXQ2{qLX)2@V>V}*3C=5zOH{4<#`4MNwb~=$69TmVHjJX{kU_(@a{nZ zto>{-3s0rJ@O|%*)SG|P%yqa8T)Z+gY4puM3>Nzj$xfU&u?AdF+uqg-?En~E#`#9* zusyW*vN6pVg0ktgjO9(L?<;XWHP1f*+1v!e)y=C)>J$yGATRd_paGz)!U-MC7IVB< z)&I8u{bzT?PMdiI6ace1`RlHPEebLp{vi8{!x;OrJDXPKD;swyjL5Y0HACHU&8L3` z2bIc=Y>CTBxt5dS+OP!R?nfmi<0?^~B7W5^ASiEB*f^z?`=wk(-Pw6Z#U|eP=)xqd zfHw%gf5Rv{}`aexS5M*AE4=@Mpn%SEStcWQvNP*3e^~YkQ$=v-Nnt zNU;eRit*%-Q*fBXl!o{R0vT{=L6RlFtwhgtC!9{BmKKcuUU1P^KcQ1(z`H%&2o!(S z8iiB1v!&M}@oG*Jby10~eJ*9|YNM(cwb3>mmXUi!29@3^Epk87AqO%7?V^jqa|*YU z4}+A6v9JH(?LSB>rDkR-MDv z)XWy`eU^`mjLzNU(VA9nWcYRPIKys$M-4p-hIDYe+|8Oz>w3LUKJCPkm`X^WNKR)eW1oV+2GBQ%Y!aKBWlIHs+7CCmw z|LAr9vVh-~GIe79@-35(ch39?p#IFS+~Aa9uefx|UxV`0cd+Kl%47dC8C(-1GG8`t6qj*0h#-`JaE}HGlY1>`DNhF~2Pe zqWDz^33eYrKdDSI9}Ev6k?5Zx(Z6ii|M>B{Mw_)g{a;BDK#?wwpU;o~6O8;BG5#ii z4%XxUYg710(Iam|)tXKIs|d_BU)KPj`|nKm$N%?~vPVAm6UYC08&eaH`1iP8yYj!^ z1VOUv*RRX5-wMiHlKpcg@#9thWmjdy0XQ&wcmDE#T=UPWBE@?ki|K4Sl@3MyQyTL# z4-Y*e|GCxK0Yl(*7R2u2NAx{r;dKfCFbYwXbzCn%>X1EHr@Y z3>6x)-Q+;sGsUQwI2K2=A(6-{uULA&o%tEfli?zhXjZ#-FYkQ98%jzsdhrZBhWLkt zH=ow<#)}jhNf%Viz>XPpnT1Qk>)e8NM zRDa}k0eEuJKmCeuK5+zMrG+KSkzrA4eAu#`&T>&mn;+00)>dv!e+_DXyNg=#X(Qvs zqL~e2m%UE$e;kTr3=VN@qQ*R+Xq@Lq3h&9gb>2nWD$?HSE&7?wfq?Gv0kmxP|+ki#15mH5k%xQwEmUI`p+Mr&0TsEtJ7&QzNihh`~o}7Pl)U)y6jp_ z7=EKghe}1^h^S8i?Y70>e?#7SC^V%zqf#Qn^k54uZ2lrzAoG(p9=jeNnWLD@-vR%)r1v z36Mqac)M)%yP?rVRARhE@E3OcY4?A^(*j!*pe^G?J~+ml@g$f?@o6YT#-=cMz3q>N z!Sw5$S-H7-2BJ<^hKKbVQP!7##w{Ho)4rMNDb@|2ov~02v3gxl#^oS+>X4heW*FOF z7LVsKWTd3$je7U)k?nX{-k0bd`nDfo{eSffzX?$2hK7Lw*E$1VRAFIvrglg+jI11rWLa|6L+y2?`}W z1C|8RAtr*kUj%(?|3vop;NtJ~3+T;w>qYrZb~X1LkUc(jFjMQaYXtfXdj!1O^NakPfjVV?WX{Ee;Np6QNIVW(EDOkWND4TFMrM}o;og+?LhnUE zOF{gHjb81iej$^K`3CTtKXzV!&411vF7W!p>pH8nslghQq5r+r0r??0@N*0A^Dz#F zjDRqK-ZXjuM5s$2C|MzsuljXusp=H!U4P9a^7ag_`Ym0UwA>=Pz|awq;<>C1-0Tu{ z$#>qr&uTpyxsFI!E#F+ImU31v0Vr zT~yL_@MuMJ5cT4xLc~F%S4S0#WLu&%rk&Zz?<aJGXV`kj7tmCdUSR5PbEW06qQ4(FYndv|7&J7-j04z{{;$ z3_Jp>y2CqiS7*p-nd@-CR(yS6`broP*`@E-X7AL%=@*NmkKY3i<_8YP@Xp3wr3r6n z7>k8`Xa4`gpc5c_sGb5#Zjhp5heTZBKkZQHFURF3VI*_U{ZD91^M~P(13*z{J}1Yh zmS?|jLSh{+PTj$5n$tw!{WcY?;sk7WBcr0Oi%mY&uEi9az?GE3Qm&}R@P_%R)V4Bl zfi|D6iSJb7}U2C^ig;nc~huavrvv z?3>P~%mOMf58Y}*ej*fT!DX`DoU{?GgXJd=+0Ze-Glgk`tVcWA+*@+*vb#5b%u~~N z)RIt-ugh?#oTY#C$lwWCtZApI8%A=cey3QnEfAATUygTVA4t-#c4QfDxr4YDp_G2P zW42RQB4lIOOH6zxlx}P=akQxg|5h<1MuH@wQHN%g>@2h7wiwAci1?%$ExsOhXO}(C zxFFgAZEHW=0O?dYAy;YL6K;-o=>V5|au1u;gEJuob3!{L#LQlp6a#g{;;paQ%Yv65 zP}ix&ua%Y;RxXTdk}~`kng|i7#%Kmp&(64#Z%99uim(~X?5mRxA=`8HY_k*OtRXxH zD8T~{n9<%=jJ{e}THRxNIvbqX^P-)u8+EJe0IQsRIsE&8*l&XCO~1}@ zek<@2btF5!wYhlI5RYFF6I>MY%Gly0g2t;;N%UP}1X67gDBq z_;1{JJF(~J_B@*DJd0E=bjEW;(q{5#Mq;ZntH;|*P(qqu=QcId^c{R|3r)0nn8dZR zLLpFq-DQ5n#v7nepj3q)6MU3CvDHJE*?P%nXOh!G1CqIwx~N72S_}2w-V#fP`<~6tT~#LqSVcn?UPMqH zQSaZgVOSAv!a=wwJr44$+g<4|K`u!u(Y)jtnh>ev)JWPU)HN^c377SZF}k&$?{~wb z0QG9#EU#+HD?Kvt# zt)^CAcD#~2;UzdO!wN8NEKD>6@1mvKqx7XUvL2{A_a@7SZIb+v>zhAAZBV>8v?;SM zm_-Bf!&F`TpQB$Cb%33o%W^}YklO~EkTvqo)@=1~*^^l=Yeb3Qw=VGgyZuRHGPxUP zMu<}fb>&y9!=$#Jdp`~`N81vLe6FLY@{0R0roNmsYW&#C>p|E$l~GYkSrzNFEvJDm z?Q5+mZk>SwFQ@|SF1pPvi6apwDCw1-7Z@-UMM#d{a<1r?Ulavgw}@^c9_iso+%o4J z!;Im*e8hy0`%q%`UykC#r4QmcB_h53=+Y}6&~d0m>CbI>;cG(@y@u{pX~+Hysj2nq zSLad|A!3mMlxoTQeuswu#83j7e*rfcbKtdpY^#kbr5I#2l3h@j%$Za*&Q*5fAv*z3 zO%eHgznMa5J3Z5O6hCEv5)Gr@%2DH)frvM6B5NJCPS;wt>0#a@(X-21KzC=5u}J8) zpHm}0Fn;M8^161s0iv3Negw^yv}ia(DZM#opC$WZd-+P-{+IG}NZ+@a*0CXep-G(f zEuj{SS`N7CT>+mo%9@(~Z)SUb82;lfw7DCRp}TR@c^&c+W)85BujI{@Y3Q2 z`>3DWp`yK(sxm4@pSbClI=^q&9g#%(l3$=0Y)%i3Kws{!U#cwNF>QR5EwvWfe>JCA zcqN;)FZ142IQBlEHc|q(ds|Zl`0*(PFVXR#<3gixX%n0eJQaqVzI>%JEou2u-+vgA z+uoCI(=dxU&J*T_OMO9GAC6lCH%`b~HxBJ33ah)PT!A|=Ek|Yv-{M&m!&P;jab5OX zxIBP*Y5Bu<{L2?__5JT-CWPvtBKG!wd1GM44_Bn=H8ZBCH50%9nJ3Mg$YcaVX@E|4 z`Krk;egT7;UeimCK{=E8^7XN^z5W_3jmE()Vad08M)tO4k%h-LMm3SQXKUJ~FecvP z3yfOUOO4nwu5)<@;UksyiqZA*Ev+QleC|7|U8-x#J1I&-uN%ud)kfPDF-aT-)=;K9 zyJz$(p5_T#?0(%x*Ou2R{wXW}pYzH5z(buKB)gs&b&b|eT%bxI$5Lccze-4GT;LIP zfG^M^x%QQileAYyXco4_ut>Hpnb(n}k)&2)z1PRX`ha7*=yu2CtdQ|nUo(q1%O%&x zzqNEnr8)*SZd(P7i4jj>z5x-_?s<}AT}a~S6*@ckM$l#C-lj}0^QOZ@;VS~(?LpT% z!N2~F6s?5QzFaM2U>v!9u#fh&%H8@@))8P+EowFhu3li5#P!^p3ahVo+P(%jm9T=J z3SW@3fZi6>)dUnJkGulrjXEE`q9N+*+`HYWb7-BwCbFE_&(fWA-6(3+W)PQEhAOM7 zs?NU8F^Qy++8&F$phc5*;zVFy0bU@d3+rVxNA&>j*pJ@`6L+&5sA)%Xk+bzEs(}{f z+ZI24`h*l9NFPXa#xbx_!0RwatbGq5UI}ZAQgRNoLJakdgOCT${7f#F;=6Id6FVMe zi7gsY4ddny^-M;;V3gkHqQfUG^DJ8gWg-^W)*EyzrG5gC(@Gp_s~ow;Yc2)$0oT$$ zk}r!ovl{=NvBXQYOX=lzu%nqS^0vL{ao$@*lIwxIx&+qt=|iuHLc{7eHmKOJJNpg0 zQW${Imj$3naCgGVP}h!eYy9}k0A1L--QC?bkA~$!Q-myx#JtQiba|xS#5>x-KInA{ zaVKT1xznk`!40dn>fe(<{yO{ux2|{yM7jcs0-*BN%(~M|-U0kdeB{tgDcXE^jvaq~ z9US}MZAJH7Q3sJ?@s8l4G_{jGxkpQyZlKEt6BERt%rhYm{%U;Y`xI3G)EydXje$=zTs#&R*0HF&OE>@|@D zYwp1ymc;lEr>Aom{2=2=EQGn`@CS809eX>nRQ8$ zSr;xEH`38DSYWxe7Ilt|!iPwNCiU2K(WSEv6l<#jE=+@uR${u0Ebr zclcpiV!Sb?ShrMZe~l;qXul*7k0!15S<4%xoLh2WeqWxMJepkMru!Lfpg+`x=CaAb zBGwVl%ZHj@{yXCuz7P}+%<4n$20uYy_LVJx(Z2A#P82HQ5Jg_{*=*{|AU(GB&G+xY zn8M_Nn_WSeT60G%=ZJltB%IV_y|&rUUq=IS5^*2K2Nh&`mUAE_m_^UG!D;SrEcdKP zXm*Zw9v4F0p4F?hU%Ryhz|UD=QoI?BgCBgu2#)p%V1})e*-d>w^0#g=P0HtPT#H8jelxEyvq0AL`jr3TY2dVyD7yRjR7w07@iHms|LorW z^sT=Dg@pu(KFFuI7y>ZQ;;w%dS=?Nw?2aBdr~tj!g=~-Z02`{Bm?T>kJtz+fOSV}oml<*DRpPr%I*<2I8m&B@eRk(`<=L6Y)u2}Y&P+@=~)Qr?=Q}%w~{V6R_@?U)CMVVpD zfmNVMV;uqByfr_$91M_Y9d5Po*w3U~MtJs_oe94-9I_AuozzE4@6619VyrDOLmg%% zRU!^wiH{}&#KHZM&@fhmE=wXPRjd@xg9GTX~(N()a(h`2f;nRA9ZWnlq@nCpBw z$|;^OczIfV6ZH+*v+C(ti);YW!45i9!!_x>JN=&CUE#rPy~>vo7wpP@rvHFl16ZcN zYj%+UK(MA({)1qBn2ar{UAOGoyjk`>_ePn1<8RrX z*XNIbR=mIOvuV4v+X=LY8v=Rk&6|ATL}pQixfJi_a91E1S7LSho1vN?lBK!D06CtI z3eNy)M(_VP>`S(#G=f*Z?!Jj1R-UeEcZgAH!ngncrF|aj(PhS%H|g>nqxZ)l!Y&1A zJ%^mm1VBW@Njk||L;#iMK0wWf)4}#zB*aF94sU{Tw98Me2zj`o)?Q#mVbkXoR(f;* zmiE{kI}e}>2&g41Ee;l7sm`)h-f)Ma<5=&9+(%_I8Cm{JSC3sfpva4ES^%7ME&Leh)x*YW4oJwfvTs(aIfGbMD+=)lUCswi&3N@iS8X1RMma zXB7C7T62-tZ|9Draf_tq2 zK0j(T{Y@7?aA+jCI#q2SM9iH9C@CB}pW0{#g<%VOsTFXZ_36rIMaJ5efwuD6d8jHd zS;g6e)9XMNu*B&AoVN}Ob)N9Q0obyq0(eRE1HOYFT>(pdV@J!73bqVLGgBb{0ufdChd8y(TJFk`|iRG%Hu zU`o3wk#+=p?ebY}px%c4M|DEvT`xvlYCT2F>}}|#Trq=$mj}?WLKQ+Pbih_{cK|{ zTP0|s9_IyG@M;`2_t8dR>M?YV|IQOUj*!uwoe&*-=GA^US$0=3_!Lh_M7$+;1{YIM z5TkpaiL7q136_eoBn&&0XSqM<5i~Q@dY>Y|I5ju-%PHX!vTRwrc@p(tQq`mYlmxrVWuQSauxvb+qos`KiDADJ{iiUgMod5uFeTDT9WmF19*4 zI++ah^XwUDA4Qj#+UnkQqgy4Gz3ZBu$%e%^@#V73vb`Zl3Z0)l_f_8{g)p$?VQw!g zinF!d`eiUZ%#py7iM*^w)W7dJcKT@-Q(xE zBjv?z=R_bqW0f9{E_xP11P`u+>Az7wY03senp*lEkzr1wc0I#|{r1kyo#+$Rw;sEn zdXo-Q-r;TXwe~Me*`FQxfcw#IxGjA)q`*qVN4H>aA1Ip7(w;oaRUQ@I4iK0!z;yLk z8KNb%(tcL;DqV6vA%4h7Q^TTv8lh>()5=LGp`jKpnbi2&ppy`bh9sY|S+ubCLOwr# zux=;MBx5O;DnZesl;83&{S7dc5h_!MvTh&nhy0s(w8=?%+g~Gs0zCds#wOO3T#fZD zt6V2rIy;#qKzm)FfV^;k)msd!SjRr4esRu%+pjQA6Mkas*~>F*lZE$M?p_=i+J7{x zSt2@+YX;H`?I-1oj*n!wE_;)z3OhBT>seQ;H*bI2ZNuJtAjPdy?DD+7;_P1d$4M|6 zJ}VL%j(#i--R-s)>;t=Rp|49N1Ah~}M|*94J#Lu&-==ieXn_H@BR3R64l$+DEtHQC*NOVytO=4Cjpo&Iz>-s=jG{-3`<{Z5g=lKbko zzXm8(Vgc8oEpgvDZJ;Ov&N1}5n2B^Y*M$sdCX1@Xir&Q;c=nab!Vfq9%$~Bg2L~LC|KRRa_(-3N%Sk3+>;}}goIeG_LWUU7~Hbw86F$6I4;)x zdhP$C>^-BJ+_$ywr7lH9#IgVZ0kMEml`1VLsHlJ-y#|%urI!Fv5m|x|0RgEI=`~16 z=uuIMl+arUQ6U5fNq`Un2}$1DwV!>?KI1*la6Dgu0pu?KIp;Nh^O~plqpQc>0Hg4W zLh`m071W}!D2JVG(w=Fm`qE#2AV)1VH+OZZq(A!4io9$0Q#ur@9 z)cDt5f87Oiw%q)HM!uPu0{mdh)1kL&E-w;cckj1)dinL45w2RCiwC&S;ah6#v(F~| zR3piON0WC`<0r{@1oq4vjSb?j{VmS*2zUazBA@RZ|J4P*S0SlnczoiFo#ws*5tq?& zpkxZ(_7PJ6#|F^*b;V*ESDDR_`0nsG^w)M=gq-X~4Jk6FfwQ0Jaku82K1h7UGo+R%~O$hoP^`pKc zU`_LFaeU_9Ymv(0^FK<;D#u9$L!0jmf#W;q==^E$$+^+P>Vm-Bj;!o%zW=?s->T=Y zpIy@o^1oyeYkBgZ2L*Tx_g|X+Ba2X;0yGCkrZ2zm^hwODQ7NpFL5(%>B@TKTNBX$r`kni&zU-@bPyuNk%A_0jHM*~WjVUbo#lhk*X)>oRUbn10^D`s2A%n`hbX ze;Ulz<^bk)T0Xl{JX8o$ZQ(@)ZtXHE=WH-VHUYab-kL9(U6QnBS3-;wY0sJeqFQ)C@&ha^)_)}^SnU(b;%gd0u zVRq7rj_t4-_iuhXB->X>*|y~2?oZ6n@Kq|SwV%=(FD@?OyD5bVXjN7V%8qCCs0apG zi?(X5pMsf{8$J291=1HSNNRRJHK>v6tQ?&j0y$XS1L2fR$}@fwsjr<8ej2(BO{QWPklmadqln4$go2JXFUu-#|zM!Mx(;$>`85SQ>p8nK9ix z)0UWM=rYg}QU~EkT0v+ zza)AJ?A1Lfm_0Fh-1o-ue%+aV{IT%0$b(QwEj`@FD{n2Tgmn3LE!S&adzZhp7hCa8 z+`9#|DsEo7f4OVpO&~x&7F9kBkDr#)dEWJW+Nd$_k5>>;iO?WoE2>vfQB~*U;I4O} zHgn4^VyiI3tjfgr(Kj`^#zVMf;G$%RD@m;x8b8y2viiDMSO+90iv5U@L!A5z|FP3g zK4->BhGHm{gc32r))rjLP;j~gBrjL&@75j95>fZDG09f-ySUT1aiMSX@GA0%-~7D@ zPwkj1r4z?|OW*I2!7zkpZl~v+`^*$61iQL>pB|x_4of_JYp(R$oTljF>zY~ngz*RT zKlf%+0b&07f9Z!QH7`;wmd#s@@x>O?0&J;XXXoMI3;b<0lE?%TqJ))63w-wDrE5CM z_n~}`n@(b7c`=sKFyP}1>pa&yb{}zAntYOe^&R0JPh3gWen5N$7#xR~QJTA0xiunr zl-C0cu32z#*Xm11NG$$UD>)AJYENaVXN#7h-H>Shdye{e@ zQFn+JE!v^q3p{83)~FMwo^E(*ihSC${&U-cHE%x5h+^oZ{{A=spK%0^1MiR7y}uAV zQ{WT-3zI$P$k(ZSOytl?lVs86Tt-GlPk-}GrN=9WvmQqB@I9I-@U58*1Q*uL>E-n& zrG3AAGbo4Cff`{&-Z5bB<8O6sVsxHJJ0O3#{n=n1+D7fKzpfhJ*3TONwgc?$wiRVt z0HmATmFpv}CGPk0ya+_YoQZ*f*Bd(}(>}NQu1Jkj)5T_m5&!3Y9`s$2m@C;?Rui}=6H2K`r^sC4evopkf!VrCBiim<5vK800}VBXzJG}iCRIc;N0%!5h`J&UOd zc!G$Lf0Lh<(VGx*p+l`)!N)?{u*+fYM(DJrqSEuIA$n=sW(P=#(E4E5j7S)7^VHvg zZc+AvYGlas+oR`07-stuwB(K^vVN|fl{XV~a!3@amMm@(5S)C}m)N(r@3X?uMDxS) z0xS72vuVg6A#U|H#PmvnR+%9{%S6Mx!Y74 zYtLN(>3O7I7gDf0k2cPiDz>B7yCw>bn8)T;+adoBRh@?m2`Fatipa@>+iVUuW_wWc zag8k=yp0#jGHKdEHyEM-!`a^&vUeOK5{E0-}*$2WG_ZIB=w({BesUxrmura0b>G04)waO#RG^+zT%OzR1{IQWXym50O zZql}es26(j^Y=|8+l`WGIKU(LT&tnOR1J5f(hDCbA>Lo)Brn`T3X70bhJgHDDCk>9?tgmfBUbTpY}y>J_jf5ohJ?)F#b`uQ!}7#3PSGT zJ=h_EYA7h9a4^R(c+RJqw&raUTr-QPMwtee-K1_Eht+Yt0u1pGXSyo-3bR~y-|Mmi4rEWU4BTh$be=@VN zuqkBEGc6@RJE6O)@wh-I$*~G?yRyh#M4ID8m7IsBx*1Uy$HXPjc4)AIMZ1#qVX@0S zu<0b1u^!ns<&Ttp(JU^jLEKX3I(Tw#tjcYB3(x3Pmt&sY$V9Ds#xpOs5H&1UU6p}T z$W_tkM*h+2=OqNG8@5d)Z&D z@k>!#fnU*A4p-|1_v?C?E!HlDoVvWUnE)ldY6G_1jBRZfu)SD${#I?jxy0Arx$SNK z);?Y(47r*<3pOp96h%fm@m%z!OMB|^+X7$}p-rH1Ty&W4AfLso~tYFHor>2P+4D-0L< zTCLjNTI5_vb7ozH$z->VojJzGpF5kV36CzJ*&YZQ`(Usfg{&BKf7+~&#;*T>FSDnP z50&yiPp;nNRgM?WQiA&y06$0+Sg}@mv&+LKWFFV|!rk+QNzA|w7xW^+WB%*J{on88 z8RD<|M`)Xu{jsO)m`Kyof4(FmJRyPh%>%kJoZ5m>XZgDJjYHKZvJ(xup>#SbaQ$_P z!k^hS7<>N@Sq_mumPZPrA-MA?f82R-Uaiv3x3=ZWmqVpTGyKuqUNK6BXiciIYVZn{ z(7t#m$H!W?!L$*&bHrFp*S;)r{pSTcvEWp1B-bmt$g)q6bIdl{$)e4sD+#_d(;!nI zZ?7_m9UYWeujt!JYWfixu})BrHvd3HCAHY8%{*>*L3aB)WX{`*s!l^6)Q#Y71s1+& zDr8~b?kXrrsRI}LE(yoOmQ9x$iDIuVd}>CvYA!C@V?LB zJoQ6DyKKx#^W8Av$glhBZgDMfs8`i5Tix0D%ajzKxzVgy=W)6;esaj8H04pmpf zb20+gukpEfy{*2+wR!i6=cE^l^C=$Qvp@5wap>e;4FFQC52nu8#^hbK<) zfD|?zVBfq_7nIh`7uTUg?k>7xzCdPx-puJhdXk~Dn%=~;<#p=@a-CQx=?Cz)?~nYL z7Z=ctSP2IDu1{?UgW$?w!@1bz?S~_8JZnyPvO;}}++ltS{Mktc-TdeuNS8C4wfb9= zHKh{i`;Kbe)(oq=7UJii?5VEsg^evL-@q_Hufdv9VIx;;h2Tx|E+d$$-7^#?>>rjb zRN=1MOo$F$#CT#oY-CwT3cCExvNEHN3lGh-WNl?+)i3r4IYrBI+1twMo7In&f=p&* z;|Q?rn!ffo0}xFihD(LHK2$uWQ%n=y{-dVzVf?C)mNnni)j#qunk#Bqc%Eqxy2+r6 ztL9lZ=q{0gaku+1TY|G2Y!~283?DAgR+>8#@tf4U*6h%N3v15(0f}1O`*P zKShsErA@W2wZXF(=I3wrjx{d-_Z7f@RO8W*XPw9Wvs0SihxRQL%_kcj2&iH!MK>^F zVW4W5YqTu|O^RF|v6V=3h?#BqGsH`@{{i~(+soL8C=Yt)EB~P*>J0(*W@a+F$vNLC z#f?lx8sHk@+7w1s1J2p<-OChfT=UDSX*A+50fCiMfrkoaRBbw}8{UH(^c?d<27U~d zr&9t$%9xt>jG1uqkc#tpWp-Vik_LNLa)V%vZxXAzOX=}eOr5Bfx8R4>0D6ubdM_1fu!x8r7d#y{Q`I!KE}v^`-<5ST35inXxFp*+$B+H>Nn{gc1kgisq%e3Im&0endm)Mvxdz z+ugf{t}_k70ORbiJlCEROqrF8*bOG$ulUh*32l-V5kxe^fOU%WTSL=xN64821hfg9 zP~$u~NG^W&?j!rd{(|)-H94*Oh{Nm1Ki4$?6d-00GO?$&dN; z`KFy@L#0izDX-;Zu~)9+f=ApC11w=r+`1c*o5YHE)-V(>T1k{i|2)0yS1~f8B+Tvm zQ>czy&2Y+5^L?YNy|g0o6KOi#u&nI5wW={vXuxO?FkuJy_%#J=6M8vDfE(3cLW&tN z(GbsrO%Z_+EgZL?2&De#tCTgJjK~3S1lzaS?TftM-}ZD28XponSjiL=tG(N8USc#(Z58xG>#=H%`Q-^VItA+jZ<$&>g!{>0`kmd3E)`r{1!$Q$GLF9RTFMJx#N>R=gFn}>kx zi)gkId6?DA=+Er_uN&o~ZY%3IyqI~^(4UIG>CGK0OSx7P=&!o+r>M28X+@SIkinPy zX9l05(7z}l3}4FCv5RhIVKzJJfy{Z79;(d>ss<@=rjJMnakszXMqXTxhRXdFxjp2` zab9iJQRAGA_GtsspNEdEH2b#j533#2YhiM{%i1c7Qp-c&*jLPNW8LxRk=V!};lOoU zrNyVx3ytb{W02s~jOH$e)dx)4T)G)2xN)bgimgSBxSh}LuJrpPi_u*eERAYB4PAMV zbvYpQ2#EF9v=z1kU;_zs)SRJbn6Adoc45Pa^ZhmEy)m|bE9rU!U<)5zPr->ppmVm; zcUqBx+4a_+{ijDUyKm1(22>rx(z{TfQ&r$%S*;1PS8mH6)AqVeZ8;4obitJTKz zOg@nL)>L$SLe3yQiV3>2&%;WR+6g-Wo7sA{b(OM2f?@qGWgZ1}();3@Cs(Y2LDA~X z(sZ&HSQT=L8Qp)1z5O8|aObAGVSF+ZcS;z-naf?no^RH{G5WKl@7U*gn=v4)JpP3V zEo#^&FX(9qJ_sd7c#b~$n&Q}MvA(l3s9TW| zoW|1HjWe6o*7;={iDJW?q^9*y_Xj)?jK)&gfjz_I-5nMqOXL{pdrdn9ZBA6?L(J(& zU4}cB(WYxO;rkbR?xY2ST2(uDR`v2Qn~_sn&T#0on0h0c!<@OxQopq+>IFr?Zhx%*mn&x4$Nl{jFn5W(O3MWeIx1|ut80$wmrtSq9jbLJ!ui=jB%hhdnjnHF^&277uy5OMj~`+bAKlutv7Kwg_wx%~(g@ zJEAIE5IW#oVHKve-tZ}1FMIfaTy4Z;b!V|*X~Nw)tL1erI6}6?wixdC#P9imrL%Jk zowcS}?vQypGBM`rPSZKp@5gy8HK|LS@tp~m=!s50oaW6IyW@9_UEX_foBQ>~?Zo|e zpZC_e^qyEo+evu3UjG~!E18$r9QIwz;Y`FMCceu%GN#kN?4YUb z&2}SfPK_n&s0?C8eQhn4Ff9bG`RmW5z}YBvdR;k4ul47I_gOsm>OG0h=!ii0IFp_d zmndj?D9ct0crP0rAcP%1sKiesE8@)oy+UFRbK8|Sk45jSK{w8BTDi$bTNf~*eznQ70i#1Qj55nO(0ypcL!?#O z(e_oyq8~f|gI8Hhas)XkY+>bxdc0a#pWMRE>O2E*vae3nbBg@nB9CE%dpMELyJn5}uoB^KIih+_`}lQl6YH&0 zgUr3U4Mu zCR5?4DIaIPey9A|Ic`o-3uFzUnJEU@ENF2eqs{d?3bx9QWw$x({?C^A|9`r6@Kvd=8tYZhG{O^IGG!2h zK!lr6=VLWFr>o8`#;{hU*i3o`*Q>ab13nl{bbrAHC{8B0*!wnTsWm!qcIM&(35|sg^`Jk!uk%(mgiqav z2KA|rtYI`t;b+*^ROz6`XM{dP2SE>xntPmHRsKnmy3t^DKrEUS?FZKh#a`!>q<@mz zc82Ak{l@ZoE0UZg+6@6e&D2>8)oDF{PW|+eLr(ZAK$bC;97{T*bBWiIyFGd%S}{Fj z65MV^x8cbyMBi;t*#s;p<@Vcs@*NmwHJLWzHjP(p2M(=@pi2*wt;QrYPxWpUTC#SR zk7}=8n~uTCZyp~Z6M8==wwR_@*H;90B09>x~|ap>wF)4rwT&47V{lQ6xBnwqNJp&JIoWsu>h&mzahjyy%%-dp5{7kfT}|9Mh5K)8Vf3OC8$E zj^0806UOSv$FXNyDQ$TU0lgZ%S6(kSRs)q1B_DZ3gnhJ*LwEAE6n;Flr>-OULMss= zuBIux)yCLL0W8PaX+$mW!P6QSHgtlrVlY1KU)}23dIKYfj_iD4k)3=EZ!;coG$L0xZ;q66iSI_W1ZWaD3G)0OQcEFvq4D?H!B8`dlq;JIf>zZ)#dz!6s9;U^^($f*X zOpmtiWXAaAYugg*3s2%1f?GP)XsY!jl@&n?293_M9ihFQ1MVwivFJEcsGlDQYfr2P zz?{*K6uv@gVC8B{$6C>#yL>PxF&FMHokIm84}o)+{K^KMBm9zGXIuH7c{`{sg zYH&v7jKF_XisyXKG*s$5n>lfcL*VvBk#)Vr$8;3tlA8yz(1zuzwgaR>d&r&9_a3?< z)Y>YSW}kqWqD|2^h)ZLOpi?;G%BP-jrj>pkuAfqS_n{$lB4o7MITdDS4C`lxeMWMIrRv4(aQVqI24C0l(d0{EAkLa4R;O%)SwY?!aXdL_ z)w>4FH-#GG!n#B1O5!xn5}je0jmKWL%l+*J`#TzNz+D)rtO@I@=`e~_)$*y% z_et`Ze3SO05+&d{G;`5F=iW)aFnW;M;=KNN$39gJw8xn-U@g2~`rd9(XjtHfMv3ZU zUa~c!q2&j@sfA#tm?L#&JAEJ1(tdofb_&1R%mo5Ylknth)mtpeClO$xwT6{}sQ$F;%zWwW#IVaXF`=ohTP#ANqR^}vTkj;yZ+ zFswdIp{sNN!8arC-KVj_)FAzz;Z`Y=;uv24A+SgO%>{C0$ zR*4>0LJ;Xrx5Om_FZylB_<{%vGO<i$&fC@ld#E}lfY*b&&75h zx%x#jB?^!QkLF?K9>d9+wsE6NoZ&6sLZ7G-J&#nl#BT(Jp5|Ybmj-qFZulR23!Wx| z9mC~n4b%VCCm1;p5=BtMYr$bg(TGoxEEr4Bt9IoDHeaLq`o02gN4fpaly%Ut>_{*t zsDFpAS?YLQt#Ll&(JL#|s%eU0{{twm{`TuHZp^FRkRw$mF6f;g0GoT*9S#BHq#7Z0 z$FMkvQ5Em~C1%q%@Hmo{fz)d7YE2Xr?91mly_`&xebU085+g4MJ`W*7-`GcC%PJf7 zu#rkJyMJmhpV#A*AV%$R&}=pD6%bGav(em7r3)QAC->yI!<$M+3W@9;oa*93>-K?Z zhf=i`rwWseKTzRG!LtnP3ocz_e# z@Xha`(qDF1TH=eecFQFl+k%>?;{AI3pxqmc-neoeueX|CSzW_neL;#Hi^vG}R|8xj zh1{vdWx@_*-)bR*;k5fLd6Krcm#V=Kn%K*gMs5`04xh46MetAN4zPvidGe{BGqq?R z_R{U!EHOS$`=vqfJhWUBl#p##CdX!90m?JipZ;lI@m$V!&2=^@3B^sXdLKU8BkNLc ztp2yJVhDF8DnEk&C{smDjURy3y@{2Hk<|w|^dY787o2=LBNZLL%b`HJpb|^%oAJi# z!8udRU`B)Q<-5=;AD`uH)df5%SX|Q437H8N`i7)K$Ge~KYs#!&T)UZQ*5O{Yx2z)y zx=M^o+=MJX>0$IAAT_^cfOy2_#S{IWe#)sqf8Kx77XrD-Kw;o9>$)1WwlAmJD|Uqh z-m~k;)tbM|_nmY4>=kt-PfG1dS^3sWAwIz7^g*aT8z!Xk}8n-*0BS%Ifp?>q+8lcQM$JN*dECbNSzGfL5#+SQ}k zj+Pjwb=M=w1em{WJmu38b4cfuBYXy|2AbT`!Y2>>vsd81^t1n^A>bwTD?$xKewxSv zTFiVGfgaA&fk#6BlGP**lFOp24$4kLXAmTJ;PmH%pyuys;N_bZo{a*Y<6$H4 zOz-dWN3!1SQG?LQICBZHsMo7?qsWoEo4hDcB-bH)`tmQK!Lbo(zj=S7AT!P@Kl+D& z_0!F`(yk?`>2?WoY+q!YsYJ!rrfoRt$t!+KuF9aeu+zk4*sIBR^CAS;E7TKHq_{9N zX>Oh(gLOKtSUJ_wj!R4-40m5`{q`~veRC4a5!CE(l5p}-v>re@LnbNec+MH=yaJfR z#AJwmW8G3lj+N#dn_s7Z1zOev3vMMKNu3kDhYM0-+)j)jou#_F2Q@+2r0u_RGj*v= z^HW>!D3L7Dnz81G_IaXv$=h3X>tH|#XHtc$-{%lO2oL99s!V!iJPe7wH8}EpL6V2! zxC|cnf>2;0S{Fcu&}PO=d$@5xAUi+rNiciIW~=$ zb4PWqZQ;DFmq%_=i+gMuVzf1Y1?hxj7H+-z*<+i@mvp1qZF|6PeV%_-%6A5t zafP9qQTD~5BLHaC)XbDe&-%l0DhJ^QBQp^hC%KxYGf40GWC^||C*>PNWvoiElO5C! ziN8mz@7Y*7HxH~Hll>$PG^$+M^I(>jj0fHAR2vIHcjq#oG-DS~{oURiQdCJFo5RpO zm4&Gq2`s{umBxVzgCb_>bZ+RCed=&e!i;0+*iOI9ms>Lo_8L~2YkXoE?#t~ZYQpoN zajMc3ye5_uCFs$O%1mO+SZRmmE&M-eZVz^j{*v7Ovt;5E7}E&-BDdpacBd+~S{o4X z0W`)^|8#+v0W43Jfd=U|RE*ToBL#ZdLcw>floJn}@j7wsRN;?TXx-d{IgX(}Z0oxH zpa8p2(DH5&P;2B%^Maa5+{Q}yH`Hb zAt29YY7i|~8#Kg$^d5(5T$@4p=WJ?F7HEWcU=%#Zl9eCj;Cpo6+3BOLJ1g-5j`u9~ zRrR^I>h`UESQn|_03CYUy5RCnr)n6eW=ap$43(Dw%l#_-7YDNM;6XYLai+!=-WU46 zoEw)vZ8u62jhQEJKnVi$6P8O=0+ys?da@&xt@3Oe|7Q~C=Jn-m*)_7&%b9418?F+PLFCi5x?G2V2 z`LNc~LIJ%(f%9y7gFzFu&H7QU?U~I6^8?M0O*X%73a>X)hel*?wq+Xj)}0Uz!JgXb z=C)IecAWqdirCZT1z!MB!>E27aXRpsqT(BIT*Q?AI}fN@o8-IC22^na9~S7SvFI)+INRd#>4|=P^n< zjhYn;WYtN-OA%N_aqXSe#78g@ScgLEZK#H`5SwsBhf^K4>)1V`JG)}64t7d7%v+da zwX@S!e=j3nr`38rX;9U_edD=`yxd)qc=fSty8z`G?ieZmPTUZW_{2_?_IHk6kzQFN zYsp*_juXqmaEfSIOnkJ{w~7qU3`**EE|wmfzX%1@$hX>Bdt{!MG@k(og>2(dg`>Z?rsJYxcd(r^)wRw$x{A?F zw`;;1m|ppzMgQIH8+gWpyu9?UW?v=Zsu!O_!NZ|wq#r?Cz<)xL`u(Yy;JqwO z-rq&~Mx20y+qO(I>&1HcTg?vAv5_klZqr^_T4DJ%aBPELvE_QC>mA9CEI=zx-Onp@ zCyZ|!>5mYQr$_XJVy7X+Z|?IAj_$SS&|A{l>Uk6SHUc=J4J}8f!!IUuBl!z=a63o*FK^b73n+SJeLGxpc~IAOyWUab=^Qrr5ez zrTHK?F^QkI(&`mVl`xnie?IfJ_gAs*!O_J7K=LQELVhQpXO7Xu!hxQJ+pF_6jp)UK zFi{dzI6T!T2Q-Sa#(-iF!4w}x0Nwcu$~qHr2~e4+?-E$;D^v1G76;jhz;!q?>Cej7 z&;TT*j~%U(OHsQK)2t(W<+|)uuIERVhVQxc$mM395U`ItOqZTJ8^h4|`>~JlDs|k7 zT&U|#S%jU;^PU?!Twt6iXCvb0{v|2w#$x@p#zzzt(`hZKa|hb&^8E)7>52?qg8!Dx za}Yv6Ps=OL6b}O&)mEZH9r~j}fkTuejbgCI3Cvq8bqdkRe6hYU7*y{yf2b5_I~>na zuU?68MJskNrIg@#fao;QVoac6wuqigwkM({4{Lb+`csxO+?(aEhs_lFoeK2)mp02Q zkyIRV{GFc1HAPXbq{hD)sT!H*GtfCFFw$XB-a!fi);7HBd;NtXBx*O`zTpi=C#OZH z%NQ($h_T2WcK*qhJggeKHZwFRB6ulSK(AWK_E;v$u6@(8Vk_o>S;0Vgw_Z%SA$ct8?kS{2o z#{=d+PXtJk7oMKeouaozZZ9othY!ZFPjn|~C95765`>*TTdC`&1&E)$s~a|!w_7vG zjnh)1JxQI4CagZ5V!PsIBWh*IO}npkyO#evoyqp@@yHH~@?#Z!#AQZ5{3xi4KG=m1 z(N_%7og^rV_H{gBjA!OaYmsY{SjGETgnbY;`AZ?;VDy^^U+{2qm$gI!+?9h zERmS25Jqsffm*w)qs^W`@H^>zIwk0SR|vA1q!Mm*(*HL zoTwGUBs)W2=&C8scXxF^8-GA}YOk8f3-idVIv`M2b3JHdnaE93#z>^jy1ud6nblr9 zxJiYcCW?QJS>r&w35eKKEK#P+JTp|WeMK_@Q1Kik;=xyJB+UKOdFGwICJOZd>#OBN zeQUw#d=inX!i*_9`}Lk^bwfO;-DP&$$z|vzvBkH#WyKM7B=f9wNcA@g9eD;|O&lzA z8!nev9+V$74aow<>@qy-T{zF;Mt%WxjdxMGcE;IC#%=@ovVc=&DrKu_`S#3bgXQSqg0G30H@XZ6^V+*p zHewR6mX4|IrP0z-k^vAp;Q%NFt+r~oqU62hBMeZCE_4Id*I2-0rRtlG7G&^r^UrM2 zV#~gaN0!dKyw!pJC#_#<;&kkB?G~AbQnXrf|Q0=-uin@w`FBAz5ABOk(kZi zOpydfC(OWJga8)c-57x@wwX&xDW8bBXQ49?^-Dnd8_h5LCK$5uL#xOjEJs>)tIwt1 zK=yeDeIt$OnEsfrarB>$=2$_SK2Di6NNUp~*7I!=H|NjIP-h(QKfiv}geOe`7IV8% z-vY#AR;POyY>zxlq?W(T^On9&r{7FxR5knxjMWYAG&BSH=*JK8eVmY-!RO? z&B`&?4C%7rP%E)9j>`rX^soBVM6^@%0inB{)o_fOSWPL3Gbt!N)g zkYZcv>gN4@M^hX_r>theJY>fw%8ynZ1!pF=9P*xsXjrG zv+WO`gKnu_DBbv)$vib4L|(6Wzegha)e6%70t+(go6>ELu7H({)7Dv&#kfo6(9ASU zlZ_kZ&huDq=+FCgcu=dMceF}GFQCgVvLqZrFwv@yY2Ci6auxAnp*Y`-4R8NGO6%_Z zWRb_UFF+?HUsF3orG@x}DKjf2FkvV&r8Sj}HmID70QA}wMIt=9Zqq5+Qb;9N)jPTE zJvn+~+By(dYlx#)kU}c_xSq;xTEuJ+NR51IWu{kbM709CV`x8#0ayz2ih`93B46?@ zFNaOKaL1V*$ExD|L6y|R1R?Lg!`^$>`y(hL?{D(9<2R4D+6|0itQ&RE9yi-Pia(k{ zyldt#mTw!AjV2El2*7MWBm1j;zilXDxu0&wnL@qUU3R z3E4c&kfDlCf}11u78u#!C~t0&vcgEkN8Q_Sp!xZb?{W6ib|2QTrqC;6kP>Hkq2=Ku z9srY8y1OMu5dQvQd}uC5x_NCQVg1V1aI2&JmNjAJlYYe}k=Ao=KKh*keYc+7jD*>^ zy|S1+*Vr!Q&!I&(|C|v`W5f?M{&HA7@Cm>u7To{df3|w14vvaVLlP39(@XA6LSl$BK z4Av-Rq$E8C^Ja}4c zpRzx*%O<#%e9;#&SaeH$CwnQD7IWfC%G$I!C2BV&#B-9V`tYZJ;Ac-EDcQ>Dm$q-a z;*(ZsUTkL1K1Oot*RO^PuV@OH)R8Wxe^RB^Dr6R2*Zg0N2OY86qPN&U8?^Ts{3q zDO|c`=stjts(c)~f4E`eorc>v3!R@Yv=+GFJAal{kf`qpIcK4b-~QJ0bMGIzwsc&& z#qpk|-@7B)MzT~pW>4vhP4*`Pa>_ca)i)m+_$qkpZbeLVgWkiXrckth>%d|iDH+?l z2pu~7o0Ot5mQXl+pXclkF0MtG1mhsl3{si-Q|X5TH})T@Ots= z!S#QroG*9?qMcLYpuDj-M7HysUM@Y7sw=T6-M&iUy|)sm7)SBM!-2=l>2CNB*Ra(v z@Tk+>i@NzvP8FX9^B5EB6$qd!wYizbV<}R2QuDlpW53?v~&o9bZBjo!cllq6ta7;cmBw3C1<10pMc&~a)9<0 z?{0J0iQHvHbgnt4Pf=tL_mEXxk&6!NnSl7BX=yh9FZ#o|J$=Vd9ON_oA6m2M!GZgx z-5mXnIKgPs?$2+!ahI(-@+?kxn@y&Zw&YT;5K&^;zrivY#%U3})taCe?O+UV^?tf#5-)VPqXg!U^<_70Rc9} zKmt0KXA&H5*vNBH+D$7wCDH|ukXPfD&7-DOplPE zrNW;%Hf1fzLXym$?G;VZ-U0*rk5w=Gv$U+YxHKo6Lg_LaSPH>vAq;c0w-$HnJOciFw!LLyA(rtIk%(mIb{ zdcKbJFkIz(&HPuMTzs^ntW<_mSo;8>QJr%M1}d1JpS)3QiJb)dMR^dukX55rP$)(r zOt;TB=dGyE(TftZ1gcf>n2g*^pE!RrBnE0@SN%ruG9px5MyvD(RPL!T~`?7J~Mmq0_!?~?Em ztp-`8$+~F#Cmu@SxURi4ENup3q-!7{j7a+0ae_Aeqh%m453@*$3a!F&8q#3HJ3dg9 zcI0gLX6r^WL_82{^L0L1Y-SnrlC}$QK>)5R>` z?SJ(pg2^SV9b1V95$)?WgYKB(EX*b=Ml=T|Ly0ehO+raajrTbA(t*&KP<&J+cjC^V zyU0PDAL%ZOMeARL0258#-QmCC&4HDzzlzAphw4+>`+muHMgJ+^1zZ=a+#<-{bEDyu z_I8+glpna&<#}pz%%1pd_7N{Xkj-=yAF#gXEA74Iwy3wU1S`O8E-$A}-!Fls zVB9W_#b4*$9PZR<4NDP}&uKMnL>%#=hB_6wKsGqEFx}*|lO|%TQaOf-SM?{H{EcnP z=Eos3^HsW>%{~{x2;ZcWy`O0DkKv71Srh6R)DG623^~WMqE)R(mGN#X_mu~<&*6Nx zrW;qVsLTfMLfAV6e|0<_IL+N&jWK5wWfU^X%}e`oBWS-6>k)(#!i7#UqJE8<@ms8# zR&rfYnJi9b>ePRe!9BusiyLrB&+#sEyE`NG24Tx{j?n4%+08vMdme7QDFVb4Bs*hEbW30`l(G&L$X3-CgC% zrVIYYE5K&`A3%Pcu);lZ==p(u{fC1e-99Ah`7U@pDB^o=XDS`9s8z%MVETr=^JrG7 zd97o|mOsw!RPeSF_FES_$IG>IFQYq3ze_{gG28FYy|(kd3fQHh}T3dEGdVA(`-f_K77~_pi_>BRD7Pa zF|z~^Kx1$W-FNyIftvcMwDSJA%G+k~kXc*Gm2NA-q|B=qNN<(pQn{iAum6Jt+AdSe z)1!@C=Xc{Bq24YaEq!YF?UDK@zk2H5g~?jVN^qRetb!=}keIFnwpg!hZtw3#*2<)R z-_~Zo1%wP=Ym_OgBHKJ<@lT_yahf! ztB}b)La&sBsYiGDggFI|Lq7AYoiN(k7lHzrvFWt_4R^?wh(|{}X(A2v-TSatv&&&O^40yXZ;Uj0;_l8gcwVTs zZ+zp%45z*StcMyDZ*2=+R*H6r{2*sjds|S>;mvHRBUWvs*}Qe*ip5e3MO=W|m3p8u z=rL$|>(1odc;~G?l7BmKFr??Jd!H4+d%7*4xb(?8f%EcVeeO44V#&4p3h9yJz+Q{Ah*) z{Ou+Qf!+z!mU%8!^C$E-e6p#;KXa4@cx$BdFLrbNKacjTxao%0jyG?p5g8!qeFe;` z<}7&Jv`>~LZ}g&*;lfu~#@%u;*;wm>Md2;n`>fM#(LuI_*5=hNZ_5s@JbRb@e(j;! zq(uf3zA8v=-ZgRF>It2apB(3B%e$xR$#2i&qrVP4sut9Qe$wNMuxpw@4laz}utWJt zl7Y=8i*^EI)_*pJRUTZy)6E0#uCYCgsJd(w@I75>pl#808?IfWzr-@GHQp#o$+#?qSZY=#J?Ya8k8i8Cse>wMKoF@+FuY9qvCXuDcW!dg}p*1hmM; zK1Lza2HmH#Pl^O-{le<-!fn5~U{9_s{<>nWbNi6}H$DbJ0QJo5Kh?95{d%pT%kQEa zxMS4V69Os6A*Ky+>h+DF<8Jk5$_O*7>yu{S<;Tr(atdisXuA=J+k7Ktx7tDqqmZ^g zkvw(Q*G?rRUyRqKdM>VRc;#jtj%i#R(kEqR^KP8Kkz|~`R9KxRiTvX3t&L@y{CT#G zhpcXeUshghV#(OUIzuf(PwL;sj!Uc_zzn;qhsqiGa!2_&^_EBE<>k|U{=8sT1v>(6 zX}AHHz@7YYpPas5J2ByKqnAetL@uf?L%yiiRDuuT+|#n6KjtTw4xLbET`DD-OOUxt z(v67KWub*)>$$tRW(!nKWwi{Tjb@yVUXWzk&<16{?6}QyDuMh92jpMp!-DjpUlsVw znm1>kv_{fU6@|5k(~; z2H8t?V_&i-p|X{-jeTcqG4?%*vWk;1X*ZX>1`}Mr8Yg@RQ*Bg2Y)Dx|8_idFnF}rD=yq@=PEoEDnUBVM34j5T;T4YN( z@UgROD|N1#GiMIGzW3?O`UPufn5d0Go$LrZRNVRKWre<%MVf2FW#wCCzMpuj3FYry z6-w%DhJ|A#t;^aGVoG@zEeZ{DZ}p~LTv2Y?1bf7PElk*li+zki&k1R1hnu$D0eH|# zsU)$%-8}PMFEjHF0@GGL{L|E{y`ZEYrf4WMBgq3b=FYItB znzA6_qvt1l>mF=5X+DJ%k5AOmJDG0mZQ8=v!nOCXJPaZm>(Mg3j-owp4aKjrzhyJ9 zE$Xy@=H1Ws-VT2r?z7#NhRIq*&$TAK-Dz6H zxjwgge(ceY$NI$2eZzif2f#~a*+;Pi^h?=Y`%-p^ms7h$d_@Giz_z5=jTI=DM@%)b zr|#Hvz1_A#FWY7f|NU8`E%SymLL2xERsDxygK9WwA`FWgp&Dg~+W(hrH+Yk43pFuehn~Y%`he+ZNT%anPS{TydE!YJk9)06J@?;k{fO z-SbX_h|wLfbAu_~aL^|bw3UnYKCJ{o35<=|PB40xQD3-$9WEo#`>xqjjNvh-1^jI4xA-dlwgSWk&u#ZHuHJAN`_2O7N@b&lI!- z(cad;mD0wPTG7sV$K=&RNozU>u}5;fDl~aH${a3q&O+#|rUXTq2v=X!wS-+Tl8nIJ z(isU#cfY4)+P<-80x`Md4I8>v?;1LDJ}v2&{`AU1$L$0mH)5ld_ncBaY4_}pn#RGZ z0zgfC8tP^yMU?MFdX}zMQ8}g|u}bbfMaU6F(RQu-_b;Mm`Ru*H)hBpfd+%)@0DUVn zEpq;4^85PBEGQ*Q%h>UajnN$E#V*BHuU;*lKAJBYC^d41XY+0iXBRgm7`JLVVAFgn zt>%d{%fZ7u#~z!==Js*?2v{qZtm~&gJC2mO%G!4e_=Zn;PtP8=VXFR*bJ6Q+Y0ZN2 zi^-H;nFRz?2K(ypEGaeGHy(N)j=(kh`10wf@qx-uMaw6>fUalHo=H*4^Mb-NV2LD*keM_AbhD zUM$bO*7AzuHQkx-<4;VAm7^nH$WUVpS1@G}W*vwthf>P;M4903i@dhST`VRP5#n%U zwV=ltnNYhjoidx)_Sm*FljCqN9K;DSA(=DfI$V4oWF@K#zgKz9Wf57QqJNY`rWYWe zdH>A=Jq-Ykp!z6G`Qrtonq6G71vZ~aBTD~z65?5cws(=lCmO`>2*d=xTGa!Cz3%Ar;( zCq9Q-3~y%o(V4zdIwp8-v`p5nk_9JFjP?yB;wV+Oql}uFM-|3f_nYul#4#j~DKPCIb8Z>ohbpyfQp>nI@91$1T5>7_hP_u!yPz=H?3v zB&~gqG}-v>6H_kN@`Cv2fJyp3GsXIM7NH=)to4M5;hlI1yN6o!8`|l99O$K^Fpy411;0h(mJqs#@BhjhVoNEFcUR=Q1uY}bgTYJYLkM^ zL5K008)y1#S6$({S{|A;>`lZm+NZ~f#YHBC6+$=OOC7o;{)OfsdrVoUpyK+sh^}K` z%4>!kWEQ3C8t}sg_g}#1LGi>kRS=ryTd`cE5$)#iQ(IE93Q%CC26}v>O(A7_X9u+0 z-|5Y~nPQ7N?pMRClZo4Cy!mPM!)j0laK*7Q_H^PyGV?yz!@m2d?+H4o8^h1e`ONKMea3%~W&pKoAk#oI z=U)bMLs&v3>8-M+4#AnIN|RQ`0_I;u&znrQjh-#H>AtI5W-)o5bD7EFRmUq%-Ixwv z*O!RT<^xl`uW?GZZ^!b1p-8Wo1zg-RWGJd#+GZhO&YT)+IkC3D*opTD6z6oSATO_H{d4iaO5B^7r%NcEe1Etu#Q-nGr zBz9{zA!(tWcId=kYk$AU2g!AgcDyKuvd#n$eGC7gVZBcrXTMgA1HCY^dZlWf>E9zV zE*H%;+&R3ilI)JU2=aWH_!m!DP3kUY`<4>}n+@Te{z96pV%wG$2b*sx7#$1?KR_{j zDjU7ar{|r?_Jj6yElodsdu@Jp;FgOakfNU9qwfMnvaxtJ}zeUpV_zzCOk}KyuH_no)p@72_tCFSp0&7aK8Y``kAi2gZImT}C9Yb|X=eeB! z(H)1!SO?!cCm)QBTVHsvvq_6U`4k?G8apzgz>)B@9y`^n1|G?pqBOVjIvC2<6&7u^3^f^_5hB%SaDLXWf z<&5l82Jdhx*AR{>h+N~v!;1zD;VJXg00gBIl)B_N{Go5{BpWovrE-}D&iR${KGqj1 zzF!TGt_q!yz+h7x=-IejYx>~DIkUXO{*u9U2)%kl%d`eqXa%~EKEW`$rJc9yO9b=rlNJ_!^H+1)v4blRK?QH=)e&?ufv|QgbmwvoBcg{8Kes9hc2VN5Il##Fl0w3O5Ip@w-L>s}JByA7A13w7% zvd#UxKG*hd{30>Z1OOHKRW|!db84)+;(|wK4?|>w*L68W3s{PqLQ(?z=7uf+LAezG zmcwSHp)eWk`Az0r3ujVIn5!%<_@(ZALwfBq?l9p&fRnU##3J|WPo@hy9Yp4v$(uVXn&jrDj)`|=R= zpMU+|rwhox#{mVK+Gko^KBqHl{%enkvGK=il853x0Z;5h!;8x%mw%K+{nuu0xx{0yJ=;(;r+UG{?|LnnfC|6@aIOMbu~b$^M>z44N~PT4qW-&&VVOHg+%^);>mN84VwL`=${~h7 zCT|@NDbdgSGcA5VxX%h;>xlak^&V${_c<8z53rPFl|n$5s33?ze?vZhXt>q(iTVfD z@K2}oy9TziLWly06y;;p&hL90py8)(?!#pEbmE3facja9dX02y| z@Ok>oBg&t07tT`yccCdbB2yVSR%9dpTl4P!IaYtZJFqJ0{~W7-)gS+rJbxJN|8uPV zzQ6bH9IK*Kx)9Lvt1L_0?q3n4=%)kLNM)$>SBHRgX2;KaKSnb?)=dj0DYF8NA)C~tZwl^p&j&iQPlw#=l zgCYJcr~Li{TAnVo%5pGVT+zcG7+Z+`@0u|*@XQ$zv(5xds*JW(nG=vMdskI6-L${Zy{M%VEm3-z~n_NkmJCCf!z z)R>5lq;8tL1!C#kA7t5*5xim{))i-4doL>T-KD)@u!{Ua&6C?1rUuo{5ul?ZJyIr) zt30=ZZWpZg;>ng67jAYTRe`ytO#F+ks-+GV?R)e`;qBVtbw9{x{7w(BW6IkP4{{r% z<64PKIUuz%@P~O`^fVq7n^oVPka%eo>$y^wW>dXo)>EY5IcS%A4!@%z(29c+`Ihog znemQ|4MQ7W9-GP@mS6E&sad(nA^J8~%bk@XB~oVRZ#&rytq`?|%JRsuYRKF@P~m0% z-IYy~J)-#FL2SIt4oeZPbWqc*tdRv!mYSdmyq!QmHA;y>_H5jUl$!|FZSB{ z$BhEaVR>uEty7p&NAoMsdj4(W`TinJpZO~Mrfd6FWn@$oHJHHX_y>>R&z%9h5v^QM z^I^-ue`_G(Z5S&(W}@qkXURSW>%-0Ny&ljWD{>D&QZ9Rg`SPFnAH3_@F1QoXI(Zqe;vmL9t zMe5&cO{^xWkCa;TLia@tWl8I-XK@>cW3SD)m17iH^Ehwb3|O5b=HlM6m0@}$qPM*p zKYm$XU^Z$iWVljkG8`!5=^70$AGupL>NQoemr?L!J3!mCbJV?f590d{=HVRa>;1h5 zK$9heuVYs70|&~%bL9OiIv($?&sgnec!8#rb+8rsR?vx+$73!7uR*`O!k*E)k&-=| z!Pzk!i9FkwCKhzCoIcHTYY>uPx(6q6=-E}~oMyVxDP6MR<>I^5f&19l;566iHfS75 z%&oGvABIU((XY+M!MR(GUrTRYt*qceo_qcidpuuDP;A9|w#B`*O=kmXj0*dIWm|pw zFZREXHyBV;PXpxt8_-MLye(PP^USx&u0n&a;jG6BYOb>wcV1dc7iPfW+%ugc?ASL3 zvDdLwA{cvLTd3e1om$cul-bzlg~TALMrAQRT7=CdJKuK_6F$(AmwIKBc?g+f2r_NaWW`-qq)$^YKt z);>#H4Vs2T`{l_~wRG?aa5KO3nzuV*hTLsRKQ;FtVaoXh`22m*Rc7B99=|UAQHC%6 zYlr@(0RV<#I-gtsSZBJ3mNttOArEspP1{ZRTSnO%B(%RLyNou*VC+zp%T>K)mh9cc zE)#T>(i_Y+7duq$&`ay#@KuuDa$|2st?c>n`aF+?#(+-{oyBMw z!gAYc_xkqkZ&n4d2kWn^-ye~Ne60dqkTQ5Y&Q~_FCDH2x|A=?=ib0H6mF%XID81E% zf$K`=BfLVYVaIDpDOnNK`L(l)1s;|tGnFynTvv~|+VF~`@@}Pmx~tBl>9Vq<6*~bM z9C0_f_U+R0$f)G3I`ga20H&%^B0l;7BBxf%D8De?Ww9udBCXpeL+fbACz;8;Pd}`U z|EE|?7ye3O?TaK&MNps&{UfXv{n+rn#S4o2wxlbwKE*>QO-`kx2B`cD<>=O3e73tX z4F*3ICRx(z!%oDBxKF>FGpKOhbcyB~%%XX&JM*QxoHPb^dmhMI-v8QUvF!-kUg0>J zk`Z1j!$c^*tudO(bglG&l}lmOVfo%tIFoszZ=_!BTHsRan*Ryqa!bcby63v4Jx=Ir zvX!GgFUkf>mAROmL&pW$q6MN+kcttw`NQsBRoRghyMq%Okn@W^w!^6b$Ez3jAAj!S z@2lr?ww~1UEbaZiT&mFN0@VvZZ$MZUM@sBhV9|*yJpNUYZ|YT{IBMK4EM%gcl)MD} z``KZ+^EBiNP`KoJl{H8KMRT!q(&Tko0Juxwxics;^cyff$StEK3G-6tQl&^-{`{)_USrh33oNP@dLX$djZ0n;Pwi`|EJFmp+GetknS$I5<9=Ux{Ni=NKrc#+K=(`#KN5-&LylFD)Wc=l87 ziyX->ZU`Nujyb?^^vcJ0L30@y8I<|qyW9~@-&e8*Sgg!)xmF|Ux@d@Q$@>4UQU%XEUh#nWL z-UHMCd{dyZ&%?xMC^#qhU6?J{F*x2q2_0Oy@^GZ?z3NJjXbe$s&w~u zC%>R$w2gcR>$GDu;X7j=vRp4?B~DR4$1iQog?c7+NS-X`oBI1H(&YDtsW0HPbS?%| zivd1tA*b=%qg*w!L*=UkfjxX&?LgX%o>=w~wf2aYv>?^O2iF-}h@Ip$QI-L}oo0)J%7F7iQZbMp}5<=I1atrzL>vlx>2>QFpp`#8!o*&|nc=8L(J zFuL|TVhmZ3fEH0kdoCBI~L?(f@( z(}{8yX2z-SW!aa#piU8Lxp;auBv#DT8PK3wFq5?oA z498&{%*&<5_MO5>!DB6b#~l3Bpy`rV7a!FPOc=ChL9o66-ygd$;*`u*+A*MFQ@%~y ztiTbg3fPBwHFLGZYzNZU!l7cHw5a-TJrIQwZfMkclzzOH0vTms_>~Nc>~R{Z*ILi{ zeD?7ZV|7CBDA{Cas4Tm|m^I2fsb+y2EZU?vZaRl91xV`g9x1M8tev<1eioYhs-JvY zE#o=0gK*H4e&`72A2;9c{V==LjNoyzUs{1RcXdhtEB0B{(NC>jcrKAN)L#N07A>)` z*VpIR>~ND6WMKDbg!EQ@$}z*rxY0n8r0!!f;hGZb3V7USp3#LP@<(t>wpRUlZ$@ms zqiDR4BbG^t3BesxnT25Z8L?>CBe!|BCv4>ve6BT;SEaB{A_i=R&@E3RJO%Ud1Q`*84G8Z9WOUezJHv1KJ` z^6vP&%T)Mo7axa>JHjPnifh6udNsU0Ia_|#cW7-4 z0=Fw0^6D+wTfEXtDbLKWYg1+9o}ZBKQEHXxo79%NlJkuz*c*nMUo6mX0u5&-s?eFC zM;koz6(8`W3{y$&MA*irlK=+IoG{V4T+$@|(sE(Eg5&X2hlEJ9k%v!Do`_G9WkKzv z7RqMV5d~0VBiy7Reb=>0v$iX7T<|45^&Qv7iAg<}8|>yiTAm}LQ>_=uFrMCZi?4!J z{6iNP;E#5&Sw;wHEW!estL&b@VEcVgk7e$;b+qY z{Uu?DWJD5eVfT#5VRMlzL`G8k*5R<>D&cLA_j?)5vO8$5*;R+|Ay?Qj6l_DI9bal4 zTe&gI1-tcTB_s;`E%l=OMpJd{r~Z$WOpeBz#1lVV6I zCUxpUOM=^|xDx!T#udQ+sWF5#ifiAH_59BQ{$9qN%d$jR~$d`gX zz{J>Z33On3c=1)PQ>@LbKXDa0zg~sTbI0O`AfCiJC}TS10q-;Bfr(PkhzMA0a&DB2@q|o=kka{gr*|}uZZ*d##XUgH4(&>n*}*k4df5#eK_KN8z~mka+kZTUcvx_ z(ah5k%Av(wXKwO;n<#8o1M9n4%UbWArlQg|%N0lR+Kg4VaCV`TH(*}T^~FI}#%3=W zLifJ;&Jwx3q$PxLK`Jcbg78i&7T8_*e7I_jcEI3EI;dst%PWeCc75Jn)dL;NMZ43e z!uI;@@g}Xnt-0T0D0OMk6@lZF z=Y0t)ZX0uX!|FSQb;H@jzy~WqX7_Q+B0i*EO&vQKRJS7Qi0x;Ey*ZY6sTFe!0l%$Y zz68tFa!F$-q|W4-&N+}nfuMeXWZk*j1XiuJhnyAZcxADxlDZh}vSdqN{dqk*b-rYw zKE{26d8mw4Y~*9Mqgm!d!@kmebbY%ZBQH!;??#(u&i%{17p2}Cs@+G`o3ZcN(I0+@`@ z9cI_#?dQ)iTKUs(-um z%fFYz{EuWOC4fleH=MYI==dEFa+> z^7h&NtwAG@(D@Zi@~d-93kfNCeD|j?65N8IP*3S9_edPn){m3xgd*0<^J=X6XNvUi z{_R9RnT*_WVHZtSduyY zACZ|Jo8Mu5G5jmpWSO!267%9eG~?e=qiV(g7(u=K^rKBDh%y$0Wgi2@7dWdd0U;|s zDWS|C4O;lq%T6PJ;**!YxP}4vq^BxCmjAcsN*_M=AHcrR zHFHvP@<=?|;{8Z1aYi1^TX~n?DJkFOo013sB$_hNw?=S*m%qz!SbILA)5{!4;|T$7 zF`sVDM?U?MOYd3MHSr&JyH9bEJuHRc(-d_}M-6Zvn z?h<%gM25p8c$)@z+p|TN@%@`-V71@N+5+9uS2-+Wf=JDR)0f+MZ|1CTo$7t!|0m}C z&&2Kj_%F3>3SMm@i04ie!zx?Ns>kZ0k-3qO~@1 z%{D)>g8>9213eYSAe4Sd(-d$W#^~HxLkZ%C6~_*i{!|hYBiwY5Lh+|WEboHfOq=#l zqc05;q)ab)by_0Zz$sLd#(<%+Tp0CsgA;COBJa4tiRpA)U5^b$|B24j`w#X~_Z87ja% z33Im8=S~sVnhTh0Os=}Hzw36U#G@3s9Y{Z9`g0$7I{PU#1q3ikpqxaAdAi6fy zFDJ4M6B-hQe@=XAj_%7km8s*_07Hu`4Hi69NJ+`lwOmCn0?wHgFU*AlME$wlo(eg@ zIbmeZap^|Kiw&+O^n5M2VQ=jAQ{Ts-cH(dp#K6ombRcsex4Mp>DLYS>yHHEF8YSgu zmrSA{aoxZbH{i#uhQ&GL`J4-K1&(1PH|^#WV3jW z;v29&uOU{Ag;_B>w75o4-))U&Ps(ka0%Ncb(klpEvD_ouuhsvSq5d5%F#|IO9lzpS znYL4r47qA4&&DFvZ1dCibfPNDBWzjQ3Qws8zLuNk3IAB#Q^5_WJ42=}4?TWv*eY)P6)tG7k}VoWgQ|;vasm3THhEGvj}!$Gb2d1)6HpG0p3_hmSrl zD8A*P&BA>1*AL#8o5zaGo$YJgIf}qslJ1;Mq+8 zC-j^C46HuTb}>80KWFvZRua#1EAHT}kE?R+(v1c~6JRHFbFi z!pL(seoC)f7Xd8+8?J?N@J`2EXV5mDqe3VjWPyC zv7eBQ2Hju2u3H?IA8c)k8&n0Fyn*TW7#`f>vFDi?CKrFo)uRH8!AXxeXvK!O3+lJ; z$K%Z@KWjO-7FwL zI^; zhWFxT(%s0`$PT7ByC_U!?!(ncUxMklJ5FH()OurfX_ye95=gu4y+LTl6>*StLJ#2K8lQjuRa+SrGDz-cK3di*)04<=>|OENa<1Mf_9!bhUt z7EGNI7u^!i(>NMess%sPY~4TEZf(Bf^M;v~<}Vz-1}W~}nE8W0JE{$sXL1(l_uH_C z(>xcq>+Pm$+$PT1nhkAv_g3Z6GDPz|uv(4#{$gy7sY6v<+O}S$awH#WMzmD5axEB} z4eqpq>y}&S7L?V~i@UbA*~Q2b>_m?&S4UUc8Vgk44sbNIkoGW;`B+0354x?%Hc~E> zk4Qp@LNIg^V_sSAPu6=~we(BhTgRrvQ2fS)7Z zuM}uoyUzDXg~ZEq5ey(Zuo#p;@fO9x_!|eIF7ge&E}?R!fJtgs ze+}^vkXgKSo_+7cIjcn9HL>1}^|0BGQ(^DT*Wl~ zST(DLm){9N&wvc4en||o-###BvvXxvqAup?Y%iXPu60N$Ou2SM9yYf57F{O_RZXyE zu^nv?we0z9gMz)4=Q7{>3%igUPhD}1#jnfJ<3O7+3QU_*`>*nDBffVPQX%Q0<>iUrH;o5a_56in_CJXl9?;L7xeDzkP$Noxd-5SQi>G**zLUC|=Gwoub6IJD^Ao#pb1Ec$9o8oOi2;w$=*g^5XrH?KQ5XIL{X7BdY=AL%hLosis6tUMFV#%u&KD zhW&2+ho%mR34&VtP6Gia9&OY!ZlhUmSY#RUm&WkU#b0k#IkjS&j~uF){Hv)w_Pn_` zGWZy;R-T}Sp-(Tjj{DGuy;zXv=Vr#Nh)2j}zx|BeBGAc-M>{WB-_LDgYIS$;>P2;s zGw0jtVqMj?0?5a46Nv@7uYe0gGj7TyD2BX)seyAzZ+!GrjNuob;H#z_oUA-O<_DErBa(uOekmT^~q;3hqKBs=~;#*)7~ zbC(%^)sdGY^*y70)o6cY!Y3he6e*SqF*?nlCtsQBn%A_>Mg!-)nBRrCxVhDMfp9{) zzszG_C@6M(OaKxoMt_u_vDpQA650;Xuhj|wz$%?3AJYb{cS-6el!*pHWbmd)&%-~7nv+siY z%2IFl`l9;sP#PWh&#_B|;_DAS&vXN6ODlJUfHa%E{(qH?A_m6C`|xJ=OsjsLG9L2| z7O_Wjp>cvvvwR{O=m%zT^UZoG@&S)f{JJ$mUbrz$)R!x??SU>!r{fZ_M`7Xn+tutU zrB<*GvNmSqH7hutZdylrhiH=@H43>(ywvVnf-vr${HFXV-sq$&_$*%;Em0>}EYtVNe@~MvIX) zlYCxw1~6vco>I)z?MM1J-NqVlYyy=fC?kN45#`@fC?i@+(IW{-Qd?Q`IxykT)a08p zcOvh*Wel0SJRmwb^Mi`o2NrWwi1qXydsghLZo&nObw7W(l#vUBNNh=(#Z8zaJhm1` zAj1mi1ZRvW952van>v54&s!|+A;9C#MjQEvz|uTp4f^BOPvOEE9GY`m zArmA)lvzg+&d-C@%op2VJmF1Bqm_iSjOn3jgC1A2*WJF|?vhVnNeE}r6oNQUIQVsW zZ;vz`XY1iATJ#FjcrTzv_Vq`Anb z?hS{|cy}{Y2MYoex0CvEz(1Vh8vm;HcKSs_|DnKGsCh@39tv$(9jDcC?~7$yvr7CqM&I4f z5Y)p>39Pk;5u$hfa_lf!ZlZ2DtUS>2Jp{&!TurPDw4zXnQDa@D(2u4_sl^+blUGHl zuX`&yEq(X+phSqq8A^hs{f!-qTIPRmRZ=fS} zzMxJXQuTiHNgCd4-#`L`yoS5!n-1%e_b0yhp1;0_ST%r@e2B-Us!b-isR%BvcGE68 zdKv>$+RNSkl}>~d5AU?;{Sam~vC8MvY{_-$3K)1_>y-VV(lMhFzIk;+RdcP)g*hj- zpE0lMIV~-eu%D}b0K0;dy^Dy)x9s?DPqb_5&~D#;uoRaiRgp0#pnYRtXVvMIEqHZ5qR00F=w*G0srn=C&|%xB2y2cv?K zs4}bZzWA}l&5u%BiYe0!aS0|Z1gCI=XCLSTeu3h#b7nXLH(`n6Rl=6;2_#P2>Xvz( zs_QLPH&Jg_s_rWws0#>ZTqb=!nX)ZQy0#4Mb!-UKnfZ6v?gGd26>_}nA;__XGga4@B-e&G0lZ}->IqaoZdFX% zseBX+B89Jp)J*JsHIZnoTP2{!+Ak+`idAioeWgetJGl#(>fp29xEmXz@3e#2xW;(z zhZk9M4r;@W$8YYSX6KvegqafuTV;;`*uu=mA`Q>IRRUeFbk#&U@m6E~Tqvu^sBPBB z-dAYSP{-BPd4;q+P4kVfoc9pYsh)*_N?fDreDAZr4tH~vm-WKLu+NbrHgW4-~@ zeH{oHt|z1cFT$R_ZMgq<^^#l)N(ML7f1Fja*$|D$w4Tek?Z)c;L7UsN#{>=FLWGM8 zPps+-SNlIL7hyVeFInCyNtmmZKUf#16sj^pm;W_}ldh=6Wr}L+?Lc&4Q9-V9)mABd zSKMbxrhvr;eUp?pb#Ktqt=8b>|rzUTT`m3x9KXvsF!9=B`W;a-y&gRf*1+yG##T}<0BYBNDPdP9r% z^S0!-GLbkrivl=uMZdFGKS{$Dqfls2IB!>vSt4oA?+N`Cdi``%!*_Kz%wnQ#uH&%) z{6jVV!iz4^&Q~#Fu-QU#5~QR(r>%pU98|;pC8#EAK)gu>k0l%hV_>bgb5rAKepANH zhgly89;a!{c3`vjqx;PX>Ra2~`svi}#IK~2mGHJ+IQ_?@)p2V?Bb)YSrT8y`lbO6% z*FF=P!n9mOw+Nu!8_kOo7cO{?aS(hG3DB*x2wOGhc;5gYLu&E9H)=ne8@)m84}j` zHm3=IL4~Q~E!6}+6)L(my~n_JxLwYYoRB%0yDb|vD7_okbB+-&-o_?*F4sM}5N4kG zRD)z~r`n$N9$S+A&1`;1e_+?k$>X#l4Xy2jZoETr} z9i-gY`=z}rDjESbQ7ckhp~{40!PIB`Ec+XGQk1p3d-&5{XRjJmo=xdFtTn?&d+l{ey4~P(0 zj<2JRe>St;`*r&sy7Z1Z<3fe!%~6Nq(CMA<>asC!sWGARVm8J-beHx$$Dig_4^J&j zXc#n*2C{`Gz12iv5dN>&f)dcyU=v??57A_Gf3{| zn1$lpSA?ApkYxg}+Skp>4O};y^Q@$kJa1<1mC3&dhhDvs)AP8l8UG=q(S;@O_zRnD z-r_YkMJjyv3t#kF*3yw9O-f-~N|$Q~>8!4dOA8A@k~X+|i&#z*=D8;>VCKeTy4{NV z*w5@P+4$gZXF3JBDng%h)0KQ%J*cbL(naSo=Quv=MA}N~E}sw{S}u0#Mq%K-vhZ1t z#94g2BZkQ%5?kF(_whbDzpEzXuLAjgf#gP2>Hy@6IM(800hxi<`x$Z?llyZEd3>fv zcyqPMSvFm*#+h6L0Jb<+2y%kI=K*hVI5X;1$z}80a;?e>Pk7+_e6>IS?eG$NHG+(X zq7FvNXI=MQW@m)4Zz)_MvcC}vZ-gMFGI{)vRA6z`nx}gG3hA;N&b#E>MoivB~AJP(o`8Tsx<6b60$MFN)7oU(+ci3;h>SK zw;o|*a{EvU^1MeI+zlgVxRH&Rbj8?M9-_i3^=}lMw|_fUuV!&+>xtEQ*yN~)6nm6_ zk*)1Ik37%abhS6EX6RV!FzM2KLN-Tsy9Ck*?HY9L8ZvK|0@_qiJ>q(@U?Up@Ew*dv z+HPKoC)m{Tst{O&hhcWBL5s2{7lk3$InN47{0sk%Z44ZaR)PmrDb&wo62(7eK7Dts zWP;_i&izj}TzUzgqDiP;XD^r42??{99C{bK0>QO!+~>TyFV_Nqx{Q^yX!eqsOL%-? zJxyp%KFI&P(>yo~6;EjhjQZlDwpG3%z5V`rrBf<(xiq#SVL1Kap$+#EEOlvBvi>4| z|7##}j*SWCo_jtvhgBO^K5CIh3%~rRT!vUy?Lggf&S#$ZX6BS$g2=?3kVA#lm(`DL z?{YPB6+rV_T*j6LB(G7Ituj*Tk0sN;ZM@5(cw_GQsve7``tGyfV2VJgXivlOZW~qu z1)4P52`n4w)WSh_xaE1dnKfi zfnw^@65)+R$3aqe<;*~JM@y2IZCt-(no}n&Q3f3<&F7(oY;^2Eh}~_PujmV|lIPBs z%hpqZpfzF6nv^QE$ku%1H)_bzwc6E8J%;O*N?hY_G@l=Ep3?PptpQ~&X^n53c8E?>Iz*Be9rMiqT{Ulb_;LSJ?;i1dpw^} z(I~f?jLpaSY!3$!r`DHBTw2=)3$OdxP-01?xbxjvTkY?nCj>d|>RYtT zTwzNaGU4}S6k1wsZ=5GR-mBRI!F(EWm8SiLMUw2^zIFAL`bWNL9pa^fIdu~WUd`^? z)kQuxiqVPHaaUAWhHa->6k!tvVrEdegcbKVEMd@BF-sP^0T+qh(40UydaHh7yF>5| zp1LQE!S|SY#h?0%HW8f-ipoHFnhVEy#K>x^Y8IU$KTJU`e&6^Px8o9h)hh_tY%Di= zyQ5R(xu<+2o}6dN_SB`NR&ZW$HcWj;3jd7!n0DtGt%PBJ>HhPe^Wo1RZkK28(LP=X zpPh4Ce3+HX6U#obrXZN}0qTojs#_b$JvCOdh^l&RHO}^q1TK`n%svNy~2BU@4F=zD6jRsibvdz^2yG>gwg4d%SIY5#$eef zY3~PBx5%7>G4WG>O|Qy0k7iA!ei15;xzzofq~sZ3)HhbldRlj>A1Af9wu-U4OC4kB zsS5Y0lR<0p4`7i){8&TZxHV5vJ5N@Gr|Y`+r>gXA0}V6{^c=vOyRIz4T5df&NNFj` zU6N)hs_(NvS?2j@ZxZXPKe&a@ewy3($=05+`hJrhBC)WZokfq$PYs@nv{dd?o z*@}-jb6V9YWpJRobV0rl@gcxYzQ25v<|rZSdmbzBH>U1m582jv8D`aN1}=zvRjjsH z+|A?3M6PBg$99hOd4@E;E}&j*LtiuYHzw999L(IqR$X?et5{hYHh8UHa7!X57XU2zOU?T=~81H%Vbh@&R}@v-Ul8MaY}n4?g{!RD@Fgl?f1gA zz5}_lw`=Vz<1uswT>fcRRX#7YdVS;ZZ!H~MxoS%phpyaMazTId+U6unJ6svNnviNd zKIsv6##sQCJy9=FP3XN_c|Pk#lVsgmA)JyP{i1P`aX?R?`xUakOlt0wXQI`#30+v3 zpGPeY>yAICU0DLs965s#cKI8PzL~1wNV%l$Du3|~?rbDaCNbGtI{y>pHW91st;O8e zfniEeLO{0=T~`CB_BI8o>XbR@k>X+%d*zL})M7zYq-*Mo2C&1b`GI>N*Jb0f)<@k^ zb;Px9-)c|cWGSKX?UV{gubs+W!*tG^+pMpzuz!-;J+HF3rGI}a;;=knLmqPm?~`MU z2PN!bgYR?bnQJ|bb>^c*#k^mp*auz9(ABH8M`9($eI{fj_Ln{ymHLFFup8Ovl!rk_ z+JmpDhSpUhDeM>B)H-4oYC1sUxGTZj*@{CnN!&^1-iEuXWwk;RXlFJqsC9Uk(1&d; zn0+xHWN`d1N`Y!I6vIF&FCXW*`KvP#GuMY0;sL-h7iNxQzqSL=x*a@q)Zo(@=A*Dr zpUIJrNZ>O#BKbftTm) zwdhIgT!`uWngmsO_zI=R=;FT=)t@_Y6G`J~xzAfzZ{lHK&{tYg6{DryOZ=*x?Li0# zVz_=0R^iM@l%Kp_Pg^VSMLbHB9#O?lG}mUlKc%VueVQTc&fQMuYhr20tH0=HS@|zA zjnXQlwLYd(@o4q&;zZgm1SNe38bRFpI>)wWFeYu`pYrO3B}xC)od4V;2WE-yVxc>?uQHv5m3 zu8xa9uzC?^s0A|17Lv3pJu=E7@elgjRIlc9e_c5uOOU%$4q+a#)=uFMh#37ER`08d zhbb`HXbAq|@?GJ6p$y6XV#)F|g~dCBDAv6Uh}r)4#SRI56JD&pzctCM`vzTDFp z$B4Tt6V@V>^01uB{>tc~vVIvCber@InB&`~de>B3I3~QWV7m6fsmPOJs|&YrF;XrA zXiw<67%o>YD>02&mRaYv7ro?g2hJ6ZUMPSMhUN0*p$E6C8_zj6$ht=OqBSaZ{jxpb z(65V80{Lk3aq_vs0kGS615>CZ?veZ?MSp$9Uvsyt}JS`VxH_HXy#y zU2W_ThudN%sXUanSnCSIS+heq@)Y#X=yP6$KE~e4cF)I$+}r+|8mlSMQCZvyV>oHE zB!J)C9o@%oljzYddn|M3bO|;|!~b4|^00y_4&Iu@aXeN^1ixa4_1o%a#;!%#XL3<( zj}%UP3w^oAsmP&jOm-nXx5rw0QWv(f?X_kGmDEs_JdHe)>&jvPGQSA~-N zp5_g4BrBIxT&u_Y2cB&=Wo%(FB+{#h;A9Lot??w3HQ-P;F5 z`YBt&o`{-_oP*((uJ)sd$yKDy@`KeIefrxu+Wlrh2C!{s?~FFjeRY%~CyK)t#*VnfYIjyQF>}fy>fOuaqOvq(z@oF!epjoc4A+GhAx*T5 zJ3C=h1^iRo|TkyO2*s zQKOt@5;b&F2I=8{RMrW-N%UD&?+dEl17)wM9dTAt?Nrz?(XlC+OvpJY8-gFibc>~W z3g$Vh?nz&$?QZ&nO*+cKUgx}Om+B%J*%!RpPQ0@@*ReJR^M5pUyGt4d`37bIL-9M@ z_z>Mxl`gYnK$JnYa2wnH%9CW63gcVednw!vC#@LJ?YoG!9mJ_XnD0i3yGm+SGA^9N z`-yj5?3iW}$mEJ&pLZ>!jK6LolgGQ7eSUzx<&z1h*2+b!VM&l%_Di(V5-lU8SA3c( z?eU=Ls_z61j?%_l^Hxk9)U@9J(Y^62enj656iAF6QJ7r*gx~`nk(a}(hV1(l$D|?K znebQe!+B>GB0oE7qi>m;aQErIy+n|=F}deL12cThOdM9%vy_L1E_9yTccsgw{{tOQ zQi@v`L3xxAY6b!Meb+|)L7qJ^>^UzT1QV0(H6)jF-@zS*f3g3DC3$r&@XfpzP=B_4{9P~j9c*Vj^!tLQ z)5nEmrn}7V<)=QX*UHtKm@vWll5yF_ivfv^bjim(Z%O3Z4P2!i=LZ9)CKmVe85jm>=?|USdH-;AD7uUHquPt)ZL+$p*q!i!ZKaHo0tnx=5gt5` z>0k`xp{v>E>h#ta6TF7|?Oci}DOub)*((1z|Jym_t92z9X-rb|h!v+7S$5zY#^I#v z+&aR}ykl{XptC}2HMs<~U{!9m?-R@M!LK1_Cln2lR^H0`uc6^ch4lPv?dOK~%lf|F z%cf{?bjj?L&B7eCxwp3h>N=?8g-0vcNZ$_1WD_Xl4&O%=Vi&UfIx1k!%@-Xq*?6+S z&B>tEDheHQx}=0Sg>CG38?mdSE(?_fd()k}1Xw@2V_wg$7sj2gEU(75ZGAPIAjXkW z1?>2l**#=Y1Yb~B$?$2)B!(PyG{NWd)8$LSIc~We7BWV(kGi$l_%OK>X7$f}5Z4~U zFb%}^Ri6*&7N3Ty26EuyS8gtah){{sKBx1-Z>LbbSG-c&R)vz}XK3KZbCI$RXxLnc zu5*Jxmt4Hwc#q#A{XWIjx*c1rbJt8%MM9ew+~M=OWR~T&0_0VX>@6jHWb*c==Z`uI zgDx^N1Z^4LrN6J`5J>}Ytw(kuDJg)+Wdp@*c28#j^a|4;LNokL)lWj<&)~OmgMqW% zDEesBh9=Nx0b?}kTQ%e?>c+t<%y0+YT^je#GVWhMS4nPeWZJJ+V3Z?8a68vJBGAwc z?sANf^azOxp_OsE>X%-!N!*Vre1gxuIR9k_-sUja!XBsooLA_cE(m0}-uEH+(meXc z_a_uqIojLm6+5E*1~+;R1;TsuNdo&5Kn+t~J!wGuvSk-+-qwE;8eFqEfU+MdaZF}x zr+DCWE8h!nGbY(_`)$qY?GQC@-cB!Qo`25)l|;@e zOcMm>_eqZ)=Z zcYnY zdUMoMJG=o2xx2329pe~t=SDWAJ4|W{eyVUh+m0L$Bh-Zyw#Rn)f45r@W@QboAcU9;%;N$;qIZ%>h^G z;?i&K|N8MIl5T?kBK+?1CTHjrNnj$|SVg>dlp=k+z_radc~oi{~fkg={q@PPpI zbP3MPDnEB5k6(>63(HJ4NDR=bx*BB36~7jku+qcf_{OgeX~^ojc4YrB>4KP$DtCn? zs*^aJF`b{!kW`U=k!wXZvhTKdGruKN64p-iio3`RU%W-hoc;m^xZ74K$mP8CtQfYa zN+vKltSP00@IoANCXdn!wz!^OU^9&p=X@+aotGpLH~q#~xmZ({oWbUarI)syPDY4S z-N50nBqo^L*uMurJdwYD@KeyQv5D-jvB}VM4}OH!HvSr*;Vi7eIXP)qrnv{)de}=E zwH^l6c=Ud6Pw?hTF`Cn;w$Peo0O~hgEqSk8+lal~( zRg?W_T0MvUygv4^fIa3M!*HJQKsWO=-Y{x4QYn`)T$hxw|I}EYhTQfmiu#V+4Qxkq zN2=7=zLz?C(=RNOip%@8&*+`G6?9Q7kiQD~N*!3Xgk19abWW|6p7W5YE-kY;UpKwr z9($W~U{IsJ*uURgKU;RW`rI+oPbeV|miij2rIPD${Y5>6Pka1)JX8xt(7X|#G-qFF zx$IbQOeCk)+02<`-^4{pz~;1O1m$HQO6a$wLkXp1wJBjs=8Sif;t1ba*??_{O(>teS3qFltp%q<&-V7u~wM zTSd~N+chaFwvgzYsf&eYdRK*Xx!vaLnAr6z2VQMW90*@sWT^p6(!eYv|7sTU+$m?U zUihx*F>7y)noTn=3TLkX{7In#n!|Ew zGm%?VE^>b(@d9V))29c%&V(ZoGIf*tUf9M{xpF6{&*x)>`{ZXTB_ghZY1U5*uX}yE zzdv3RLT=T|H|v-$>m^#nC3{?6$M-`~ua@=4b%Kuc5puXbckp~V##*xcOxpSW4SkV1 z5>;iMn70)Rzx`-Q=|<7r3|g_sl7(Jho)|X3+1`cTwz_+xUtv9D6HqM<*!MiPScPeS zyLI^GvE8SdmrEGFw za$exh+Pui8+N~yO<#A>GZB91|z3cmWpM*I&VwvFFmds>(#=UjF%#aKGSF(fKw|ySu z9KFTmf@6ev--qDvQx?2fX>S{y4QWv{2g&gG z^Lnf9lIz@yAV%R?@JS#lk#a;beNu{`jKXp(L3J5+(m2ZJL6ZQ3pkh(J>-m=*#DBXI z!xfwonn*j0V5Tdv#oDZbOXV=z}ln=B(<)Up-tE$>yZTsI6p@Rs}XG;iWr{PzAMYqhL43KAVt2Z62qzq;Uk`^nfr<395^UJJECNb_`BCYCQ`pZP)ClL*3 z!wp=O7#8?S$yJ+Uuztr;5#)IXIJTQvN>gd|kqL78O*VN4r#xoiR5u11>8%n!{SM*# zS6QjaJM!t>&c}E$vNrwOZz?3C`|FZ^1?ofFSQoZHjXL80v_}4h((!-w@FVuz?Fm5x zih>&e7sVeGT6l!gJdw>suRpXJuX~plz9lQVV2~MD2iY*q$PRY=_UmQ%BtO%W)z*Hd z`R#p)<*D2USxB)itBu)dbBC(7bclG!o|+2>hHQLH;szPK<*429?;dAzN%htGbNB67 z`0z1r^_2_XFtr=YXuMU~<_W1jW=QBL5}ir~@Rv_+pyDPWZ^BC*PO9St5=S(i2t5md6-KKEgG}^C7 zxvXY`(0Fp+ zQsdI&QeUZP#^6(PCY2`iq?L(8gt`n`G2l*+AqyQbvJ`{%jflo{nV3`Tr1k2+*y@}_ zgK4t|N$PdeIjHGPOgg?5URE#;0AFF9t`ajNwclDl5JS$f8zu&HJJpjvR7e(wn#Ob{ zNUNAx^XtQB3^>X7{Q>&-b-N}QSoqIR4VH4qx!7lr=eswP<0YFV7((W1UqMYgrV=`j zp|`Sk4h>|_yd5Z?EagqDGB!wLQN)GvNACd4PE`Fdn+yJ)<>-l zMCd;V@~UlF_mbEMem(UfCuc}Z))SZei3GMUsEnm^%qpKpnRE%J6|`I=OHLB5-)f4` zU<|a9a$2p~vAr>y@?{eN0*u5iS3G4#=4s<9aQXSTZ`%a3 zuA29%1EamGlq5EYA!=MBKJLvo<|WGkH_N0^OzbQFX?)!tpTaI%+o{fEC*Wtxf}}3> zYlt>=b=f-G;`sO8G}m#sfE4C(U&Snh16n%&lp)b9(X@$-op!&5t|Y9~SrsNWC1;&6 zba=+$mZI*v;q3aMMo=9Sn}hn&Xx3ZfzcV4~>fe0&#gic~>#FDQ8>b)P2e9wAuR)PD z5V%3M{Cs@w%f1MVlf~XJtT&_Rh&y95r(doq8p)v_x6+$Uu|J)-_Ac*8(47iRDl;p+ z;ENp^hR*{_N;B;M)dvkcv5yH7i1Z*;2_}*JFW-%+R37ca*|P@KSWzS~2H|>835ksW zEikrhaF{Mnh~QRKSY|9B)yYscvrz-D^z$}*5Fxq3jJXa>nz5#;8OxC406ya#kIvqw z3#VK~W>bhu8<^4NPx)Nc1G1&oRS{OHvL00*Zc^B1X^?atnYDOr)>aOQ+|)Zlx_lQ+ zCsp?qyDfAk$~Gf%@b7q2AHT^n$_!?V^CLDIPSqcXZdYyQOgMgm^1WQx;j8(<*=y_7 z^Jjajm#3!Ua%F?aEYtzX;>(5@gB%se37dCicC@CDE7;;w4+nE>>!Z0&n%YA|75m+V zUqqCr8~{*Lv99as4aOPPFX@J7?yl@D-&|0sp8~M-hP7GqE23$=eNjMS7A9 z`jjp?v_FjtM*eua%l(R|Q@p4NukDHbij~Q%=AMYcKGQ;P{nbUL9lPT~dN*%BteX`Hb6$yauC0_OpU*1qR6uep*S0ZZmrRi| z3?~I1)Y^9YnX}-3*sdSY^P4`(N;jfnz{D@tLn8Y=JvgDp{%gEuXUx48QA*<2M=!>w zwX!`iarDDWTGvpfhJC`r37FQ#r+u{`=t*>HRnyM3lK0hD4pQr5DcD0teRXk0|2Zgi zqRp2-WS{r`J_+Xz%f6pq@50)5E5|%HxudrF==4r`L`!aNl6eqb^x?5@s;ulC8Js^rRyiurzGM_L=&|9|s@G)rW^DzU{34DYp zwk+u|Aa1!{da`=@*Oi{Zp?JA*ro2I4+gxzvThQ4`r1897|Etp`eS)*2YNpnw!8n%H1P^{g}OH`Bj4z@(-F zj$`2Uf>#z%ckdv3oVT#L>>?nbvNN9Sf^b#7ZJ zURKJKUvt$p6B#^otLPBE>yoa%TK_9Rx;jNGt0v8oZxlI9xuoEw;}?vJ_~Arptm~~a zlg}b;vxm#r-8i%JrBe0Jwf~SDeOvI?b{)wJZ+EdMCI^z(8C<=i%e^^hISX(H^srR^ zwC{T~^RTFV9}Ru!HKmtZd;Q3+U1c#x*N76oqE<{c`BHPH6}0BK#N0 z7CtNy1j#4%-wXX(MQx0gRj>>wt=u_&gAX48u;!8CV5OZ1 zhvB{y9f68RUX{f!OW=p*m_RE{Nx&ntH#|vb(Y>&%#B>em>#L38{ zis6LOotHgEq3AE0QS9G!z8Yd#Q1T&DT~oC^z85%sQ?*DCi+cgKl) zo0LATB&w6jdNpM`-J$cb9;B=Lm{{a*bGn(qd&+~W$si2A9S)bmCFh#WRHa*tNipPS zjfFxfCS$JhI(&w0G!V4t3;yeJtU;ws>Yf!t{(Sa1{LkPU(@a7pS^P2NMO2RL{_Tl^ zTQG+6i=6uE;T$W&OqDVDHOA=9ap_Fg9wr{U#7?XbYpRm~Khg`dAk`qs{MH3797fj{ z7lvIEgOAM9CF@QnV`XL~r?V|xyIfcC2|kv3r6a|3@b_t*{fNTE1pjBj!wl~MT_Cug zvtC*E&?&D;VX*2UKZWVUg-rOTgpuT0I2qoW&qd!$%~dS&Z+9_3g%X?z9+rl#AY)K% zD)=QInQ53sIKU*E(iZ{4eRfN+QSgg9$*e!WhW2;_kbls-dd^#yUAwh&FyQ*9CyZ!# z6l)DGd4zGMdTLa2J|<KW}8pgeN)gTTD##*Ev39A4lL9>C7q0fKeQRv5U${4vEL54hR;!Pvf7N-GZ}0 zMMeM-=ZYKx7%`7pK+)G=&jGD5v3|B5Ee@S(H=086iB_vN(7BVNc&0YTNf4`%_c2fT z;GK7Cbg~NW_K0PcgoVuk3QH7g@tx8K25qr%2^EnHx)jO$Tmh0gyW>NF#^4m$tQt+J zPRPzylD9`y|8oiKy8u;dlN7xfifUkNC_z^P$5YyH&g)CDfc&Qa4i?UQ}`7S+lqHJ9?=4^+%tYflkr z)+^NMm-8{H{OU$%Pm;iGGbdXkckIj8V;i4l&c=3lBt9(x{aqFn8{S`_ri1pUgOXkA z5bM6W`xciNB8IC2aQe@CloV!=UvmWy^u0KszH8Sm zL)GgFdQ94D7P+^+Y!)qAi`n&k_HHYab#krSv09~^9i#z;%KbDf!yCi?RsQAozsXsqdy5O`r1p=PDvomG7X8Swcz&v(e{HYsT&C7An7pVR8M@OGEaK$xz5OZasywCfoT>cNotanm*C zaD#XWCo>0S<=}#nj|>?}BzEE$S~#~JtuaL2BI9Ais$z0&q2#0jv<`tsi&yzmCf3XI z!xPFw?fVt>ZdD(C5dUBE0pYVncte#sKmJ%H{vw0133!g4T~swq^n zbbI=Ww;z)v-YtPReS3B|6IHvmM(OlEK5$@WXvkx2`KpImX_zcY6qGhN--@ojd}+D( zETWveO;t(tjM$d(f{(gTt_|BfgT8In8PZCt_N?DtTKyg@&3@6ybzMKpV=*PX?-p($ z53L@}R&FIhJ}ALe`TC)yr8xQ8VS3h+exLG)!xFZc>))H~gbx;vsuB^z)5&hDvMV6| zGA<3V?A`ylRe*6-Zsp|< zP%q##pLlvz)YqXxt#+-eNEo^oikXa&&P$U966Kujm3C8h&Khzvk1+GF?fuxV45y%b{CzxQS?O&3+9HPZ`fO%7lCem((xFP)LkR_nFkowVHQ>qM!g zJI3R90iidARi`Ekw&(hAlg}rpR;1*|?ddLk_xBGm&&uuR;@!7zbEUPM>6W`e?CKo` z4hG+)FVk18<~L}YR^#~bMp>g3hnS9lAIb?$R@fZ0=n^F#H1}ILGdohLRvtO=5P7NeIk$2JJ^i$L`AHO+RbjdnymhELfGsqYTnOt_3+h7CXTvdFwW*khg`(oA0N*ZWs1KNGG&;c>`)>hlgOlnD3HkMj5UQUKN$Ay3tpzwKH^KWu*i{Ris|XO$|hQZ zHjyG;VyhXUZ|Uqr#6WjItJOK%;xoorGtY0S?y;@Y=H8|}W-t`Uzav~`gRF=Bwrvxi z&?W7xZei5(=kqHk+>KjH(x3TVa3C8rj~L>%yXQ4dc`wODC4s@v6W6#+z;tw_)Pb=~ zFn&A{QSmNQhIH4PdTLN6;N{C(UK_YB@kMT3AA*=+v^GaE4*bWzXAPhk!n6qnPZ~ao zlf*7D-7b3Y26x|?DF#<5c+iQOktI6NO|OO2K&`Tti*8yehPB0(eBTSiAuUSbnKFVK+BHrXc^uH<$_H@&oYZI0-Hef5w zplk!?iKNMIIiG{)3I+2Ek!6Attb6Dyk^%kCcg!(*_4*%FFOPC^EXd|S*_dO3fdLcAWyk|Qh1Fw z_929I2GY8tRW&psOvDzpiQepM96xd=e(mm`sn}yiqaMkr+sFR)T0l45`Llmotwqj} zTsS-`XcDPC`S zxa&}QgdO3ZMMlc~zdpj>4G+*}qG!L-ye{!Q{V&{)(L+t_4dZ*Y1uH)H2TK4hn&6(& zWB=tQ{{xRg+L7JDv?jMBxf*5CCqhdf*3R8UeSBNwec!T3ymt3ZZqeHje*f_8|BM^_ zS098Q`1Ov2t3p#K?LgWF{(OGCKgE@E?l*ZjrLC;^4U#aL4d* zO8@1PrM)%#kzcO@M*#jj%FfQt=;|1>(Dc5BVO_~pLyoasw+*@7^^JAk;f2dt0Ic!2yL9#Y%YB&s(8}s&@oCeJPXTPi<``o;LPFM6(!fZ)^R~-w zD|%DM*Gocd&w%OT-yaNNMtZ8|y)o2NtttxY$+`>~AEnJ^|5qpYum9Fvzc2QZVDyd9 zxLan92seU`Ja>hkdFQ+J6gEZ9H@_cpxZZxnIxU+=MnN-Ax;i>ukIABfh|$Ua`jP+U z`w41kp478>xw#d`=uUi}ZnEAFogBF9W0lulSBY>smC7g2E&8Rw|5j7lZ$UG40Qeeqn_ITq$K3YN8=4^<>7F6$9$S3 zkUrtb@@H@jxT_Okj^R40il!(2uu9>2OoxP-@j;4qwzl(^(#6ECv~;z_q{t$Ji3vX^ zNMDY}qwkfzi7~usWfeR#pO%o2AkGXo5?kbDk=2AQ2(tJs6gV6OI@IfT9jXl9A55nJ zUEPyD*0UTM`)1fhF+DcG{3pa$9(8o-W7^29qv^`A*q5X-+cCQ`%}f0?RS!v3<%k?F zg+1vJ`Ke(6;9xoTW>PPuiyybmrZ=m-bR6E+?6G*)hKoKzH;4XbbU63fJuxx*RxvYK z?g^d*9Ye-viHGheD=Xi%+TXu;TV+8V9a6Xs*j_k;|G1@YjE97e!|!W3A-egGF__*p zID_=9bp+x(#cN(xc2%kC+0X6tEK6=t(d(=%%c-1E|5Bl2=JBZlQfJbF6~t1f>X2o& zE%OdY7pInaS8In*`P_+CUW+HyBR2=%cQfZYnw{+9IwUBV|Arx&orD-2J=N0Dg2-Dg zxa4=I;49(V4^t3-zQ^2*drI#V>KPiUWhHEAu9~v2#4J1Y`x+|vYsl=vr*i5E)RfsS zKZ{=C0X&@N-70k@K140=j)So6{nW24wO^T{mCJg#-!NIQV&Q`}C%62SpHPrbd5pGdFov}B4MZ@IP9lky#RhL+7qrQ$f^@;)^TlWWZ$9MDai{;BpatzQo4Re@;9qgj413(2cg)TB z``9=0&Lp0LHq=P@+-Cx8&l>xcje&5@%) z+(T_os3*w{!qi*i#4=waI_tzT^ONiOfuAAxm65x-h=;Xilo*{mj{)v>3+>M{nGQa#{ z!d8KpG3aw~a=71ZRv!&+)vXH_+%+RMntPv>Wn}D(Cd){AE{%1e=&o}R%KYjiV@O9rbJ7Jv@f2Qxlx8EJ!d-npP ze$GGTegJ61&$=t@K{gyp-+j60vbB8w`v-;`hr;BA-2Z$P{*6EJ+rD1c0#g`%5xylK z2bQ#Z;{Se0iby5yzJR~sL3@z8y1M6pe!lj*eg;#~`PhEWvpoyORR}&{J^v0+?*8w3 zP?~h% zl<&-Z1&neyg5o}dJ;RKD|2I5nH#V=Z(6YrdF8uc(@matTo+i1Uaz1kb_j~9F%Yg+Z z5Hi@C{2nqK3e&p!bHU+t+NhCFLO10-;?IcgYGRo>l*@i+-WDwBbK~KkS`s}NXdn9Z z#(|lmU`hA>Skl;gdf8V$=Nr&pzkRR!iQJL7BiKA)5H!?3-<<#b_JQ*EttO41TG(?2 zX8Q1%Afo}n*f1uV*Z3bA!HoM2c*~}rd`sHKcQ@L+6996x%jN%Gu6ASZ2Y2r}X&5h( zW_z!cChFX*(mzhN{pr)Qd_dHnvR+72150}HH%RE;DC#d7A1MAkI`3~&o`2hefQ13B z;QwzejIpu6-((olXr%5yBXmB22Q(ImpFA00GXG<5;7^}EPXo>Yz1f>5Lckr~3zXDv zspSvv=`UFPTyp3Wn5J`M{^>7nA#DF|N&hZXk^kSjOMz}7Ahpu`cSt}Em_bGl@@;=u zs{g+6yy@}3?LqP$4@yshOf=0k_$^m?DD3hT;tyz^|8r*Vx;k(T`igEoh5`Tg{@>uX z{*9Vf*Vwo%YFUT_q+#@h3)=hX_MW>P=Jylv(5^k&U<%oZN`a%JTvqhZq3VxsBW%-e z(6m$f!}*^Y8}^(wW9_-=t0xPWm27r_)IXb+SjiA=Nv}dVe`-lbz=W<7A3~3IUjR#b z|4&Q0I4nu&GICkm8;L6Nx$lPjC$nQPXBep~lw4NE8|Ouv}ujQ;rX z>4w}`g~``K54kr0|J*PBczH8N747eLckIs5$U|U-8)Bk=IYTKQa`dLhLZQ-l*7CzU zSLJqLes^BaVfRinVHTnfkc=}o>r8KZyyYZ{i}9Vi7mrl0M0 z1_x;J%O6QXBrTnum>hFKtTg^pyC%hJt8w3nW~VcYO@WRye0R>&(?U3p2q|cPtZ@A*|bTAEdJG;gH7?C+0MF3!yLY~B#;Q<|KqOp$X;eY z7>GY`o4PJhvl+2}zQ%2CK)=b{1<|*N(zrlwu{LkREc%^won}#OVqm-a@U#5zJ*=4J zoS?)3x9;KdkgW~1&82AJtlZq|4=S&qWXSCKk*_$ZP6u1AwhfW1zC~@W;Ht)|%HxDg zs_(3Ny8- zplk(nN8U#JZ*Ll20$rKU&_N~2f)w_oQwVRzGSDnmWY%ldb zG|r~;Bd=8~Qi7=$CWOi4grlMTt^2wB9E;nTjJWS>yj6{o}t%%P2eARk%a&)uV) z^#xSlW>23~KM;t2aT_^lwn-q(65k5de|4L+-s3|h)Q65P&Ce>ieMSXS@i;> z2$#FIXM~G)>H%~Lf#Qlv^-nolQzPku4JfXQtXNo&Q4XhIozMDlM#Pw{RW#$sb}&_bX3S^) zym0My@G8==em2R&IbrfG0v zYa~XEh2**W?oa<7*#muKW6Qk;7?)#tQ@t8fx?b>%)*up$dNC=UQ+vetV`!v)(475U?d$;gd*rN$eFEJO|Jzk<#A+ z8e$Ai*`n2~<9%`MwZ+Z!dzaIg_lXQ*okS-Ggs} zRy_R=$Yy)9lI$yrW-i6;c+VRpSX+JWbBIeuyxy*r`7*4tpK;!)%>-R5pIfEbken%Q zrs+*uc;s+G*vQu7(N3(8U}egdpDhL8ATUN6l^+qtPW=1l%aaDf`~c-B*CZEd2U9<2 z_inIMCDL(aRalo0mX5Ue)Y>pvpc^SZm~nl~d-|M@8GA;*`eF!`LLMSXCC!~iIXdCU zd(#o`w|kg#I#=M+#pOZXn3%YfXlA|s1bwlSGS-f-3(H!cvsfMz5rL0!EW$Qw+7o35 zW9R0Jo4I7A?u%mvMh}|7Ja7DU%X2*dR1=P(lQlbza5Lk(|IvpK1-*A_PjJp%4Mbn! zu&XudEy{mmKGbP4ASAVHvPeFi?i7T&CLk?6^GTpHQdpSUo!K>%RlhE8YT(pgM>=MG1;Cvug;uYp&!k#gSblc(>vX00HHAerR^>l2Qm}uLB`aV+tqFGzVt0)+S{|W!Cz!G-jE*7i=z9Rl;)!ct zvZkTE!#ayd(h)MXI`eSs`hZbGJ!xkJ3{N`Iud8vbJy@kqO3Gq}MhO8KK=Y7f0q;OK z(FdhztX+7jBRMJcMI$nQiKa?W!UR(ma zdMN%frFJC|WB+9VWzUNun6Q59_pxptI)E^-)~vG~tvPT;A3M(U!FHE{^~wIi zO43t@z@I1tcgq8xZKgsE*0)(=0WW^>++*=2ax|NZg?~{f**0@NydrZT?8K_hGR$Ry!h@Vg6J&n2i zZfhu7IPpUJoYm<%D!kOnz3&t`nQElp1tDK&yXtwbR(NX)$+7qv82wkTt@1W>HLuCs zPn~{*>lS82B|Oocf>#(c3^%NGXfxbwWtt`ZLwdw-k* z(E*<>EXgV^7qVSU9`Wm73^enlKI$np$Cj;_O-ME1<4X;PBQf@pw+@dwd63PX<+I=G zcL*UM-uMxRTI!2;+C;vr z&2!pw<59UNMFJ5)Co7gVGvY?4o7^HQEuh9@>i4>EWeJ@c9VxfjQ|}QZ3{`>eN!Nss z;4?ilNK=XX_P$dZJ5kH)aWcXQXn7${Sf;FUoZ=uQ+`(+)3Li6`-vT#80YY@0FSAm?mJa8Veez?RP*9g1$2c%)L z($I|0%d4gnKh~az>^&$ax3r|ob9<|>5Hh~vdLs@|jLwo~hT|5^HTs{8!hfW*y1Ul z?+Rs9q@9$*Hfc`M`V_CgVXm``h>YHZ^7217v&o=sMs;?w9qzDTp$&A!Ikh z5(vw6$l=q%*QU?2$=Ez8M@nQ`-oGDk`;#7Us7A z@UknxQ7aZMKf`j&>2=NOOF~$Il!%>fm0l+^-pL0vw8qk+5bLV#@h)f99d&QwjL=WN zC^Nl-3TYRp?+C?$;#Ka6yWefky$t9c#QAEbz-G}zlL1qocc)-$GaFuB!TYUZn>!LOJBzO2Q=PHY|Ncg%@FKH8R9c2i>Pnqa1`$^VRClUQ&l|8Qyiy zee1VXis%H$(F-&miY#a2wS@p#sHAnb&tC(0HYD}30oq?N6#KAW-ymvz&?r+?YU7j$?=Tx{sG+DP-&d0wNe#(QQbH`^tw4Pp?y22<$ge^^QWwte6l^ z{Z^k9x`OC<{^~se)mwXv{8hx*Z50~Vq*Mopvy2wDn;Q_mK=Z`*YDj&A-T6w!`KJm+ zJJt*?;2e`WZak}AuZpDuy~xXkIDXfu6T>`W8~MBM?GOVFFhh`+pCJ8mcU;% zww^}PR^}=Rv9BFRMkOa-0P)_1NZHMdX*?>--lVv%GaUUt;cw%F8v%j+dJ z2i*fER91Mt*v7Ws%frwD-Mz%&e$UB~F2T(hxO}b|T6$;sIW2_72r>qpCJy?26A6lK zH@~;1VezJdLjAnO$M<8|3S*_fJ%-@hOL>#tFTVP)_aXIZko&AV_DiDt%o|*GOL!Hv z7g03(OH*`ng_ilXG^3(_O#aZwR+42iv`O8GN254J zNJQ>SpaIlMV=#NY&Tp1J(3Wev++<)eI`|aSs&9S3R0%Qm;MkW{28<31eg9(L`G7n+Azi~SS$HrA7<@jAq*RM&|#xe475t?D%EoaqPV@lPldW>(s^P#bF>A$9?9Qo^9Z$xVQ;!nXHE&yo`Yye;e%RUv!7mcrTI69{|zmJL?GF+eoLB)YBN$T)Mu$%jPIoLtT^Uy-A$ z$5&R*Z1>KGWh=yd+{$ehMLgV&?y5fv`b|n5#?TLINq%BtV(tfsnvflyGrz#e|8o&AL6^$n}D;@tIs`-y%E`rkb^OvY1cNr7LP5J!W-muyb8a_{G$eNV7q&8yARDjN*fpd4nxD z4U5IOh2)^@yM@W>`VF7dhcg}vi)N+vDg0IK$c^7akR_df;vNzsfo@FvT{qG|Lq;J( zvmLF-;atY=fXT=`rFVPt;AGUY#E>6{Q+3fPuNi;P$n`Vr1`dMq3x$Qn^1bL(`P_!q zf2`EaF-tdxCEv5w!n)_HPd@SUa82p`ei7Z)QH66N5kQ{HGKWy*GLXsq6IfOoI{?pd z@Y|P2q#&gL-ajPf_v-{8EVl;ExQ{)2nu1PzpW{egEYB~fo3aI|GhwpiiO?3BcR^F! zGvxV#O9_uRYog-f>`1Ezf8yJpCxX&XuIcNqROUa|kT@Sqr;1p$yh~b)f#dNTn>I_j zN~vmUZT8H19*5Y6%r#`DihJ@6BRyq*JecuMdgcFAtw4N2D}Vjk$pLCjZweabFM(E; zo%;0vUt*(#`=;mOCnR)^Ux{u#PE5JzEb?9L-05Zff7RgjPae&61yBM!UZgnT*R7uq zJo~p%NBUeVKfDKn-YE>Hnd#{*;pW zA4g~XFQ|FTd(et^{{zUg|NguEkAdO-`%C)UAOT0C88&AChj;22iaci>uT~S3?eMj+4I>?{VDq^2p4+ zJz(^namv}Q6hz-N3Y2;RCiiHQ$tAVcwLh4*^{XWf;5%?#kaXfgQto;X@IDOBv+Yb* z=ZEthIs{`DI1#Gqbt6VkKOq&*o+l2WQVz4f1jEa}?b(`dX>EODPig6ltsS**`TY4g zX=L60<>Kvej-B!#YfXmqN9^G7^vuukxR56cIzJj#@T&3dde7@hjl?S1*du%H^gvd_5UO6J)@dh+qKc9EEPdO zML|HC1(d4Nq(;HEq$<*zi1ZQ#LJtw8h$tw%hu#8&9(q)i4xtAKAt0TQL~4MLpjeqGn?c~x8-mzhf3i|_VYoQQjDHF-E-HkTO~g9)2Ydqw{V zClcb#Rs+z6GL|N%(T6k^HnGc63jhn@csOtS5ca2b_e*DIvx8DEFTqQpnEz-2cwNDU z{_w1-?k#<5<{0bE);HbAam{Pgb7kZ0iA#`GX+dDwNhbiAW} zM>G7k4Zy$Bvre@EvdW!vzEa-p!^cD)K>uiAt;pa221KlaL|Kd;r4M=MdZO4#Q zfY!CV-cgoR(&;B?KZNhT&C&%_8Q982Uqw zb=pZ`Ipf}EO_?mz6VwR3vQ<3SwoTeAm><(gWCA9vQwaF3|GkRI5@gSlq>e>LUK)k~ zoFwhRNrQOB6_?9aELvJ-`_)dbi{ld$aQ44wJ7n9Q=Fe|-kTT6iyhFeKz4u!38MbPG zbj9teoCnj?}h2q z(d!aQr;Cb<&j0`+)2aW1zORrFWNBl6X=G%iy?$`NE=GkV3nk_4R5$_f4Kf~NRaJHO zi zIoRhjl;$;Q%lhqv6S&jpKc1Ksd){U0Sb4~$5r5#sq6UZV#G|hRbYh_k5IMC~m*U#0 zSA%B(cNhQtXa*fQ02JAW4Nl|dgTL~tYHBP2ua_5yl3Bk|MBF?7fM;uMP!Y2EE6Z9S zs647^_KEmc`d50^x;}U=_`$$H z`AeTyrz|epV8~)@KVIBS{OD^|OTAOnzgz=QpywAC4>{9Tg^&1v|MUFVV;-qQKn-GL zGXHD;wNeavQG=!Bj??$*4uF)a4G+BTZA~27yFK%D*OY-YyufuvkgIE#_7!garzpI9 z0iaysFp!tw0OHQY<3jgfsGLDUcgU>tMCo-KtK;TLQP?*oyqmd11*|g_gY8x`XM>};)Ur`P=3}?z6|eT%(O#z zYkK;cAN(*v01Abo+pPq4^ zf?ad9Ue^>DS|y;8PWn2tYNPCJOB=a@+S{gB#7cPy^&l0*Ln&iPuf>G%znlouc#HF_ z+nMV0-fHn-wZJXTO|q0Vk$mIgtLK)#EuCaBA(O@o=zFI4@&8pe_VA&z@a%a2p`QHu zlFuQ3miNbh`X2!8e3mP;U)uaRO)(1w7HHp4Sq(GTwG@Bz(CS(QErez}mi{7Y0I45i zd`z>&5p!pA&8*dbP#hVG$xsNg^gZ#4-oV($VsZ7tIl-XbRKu6D#@Y-GKa&)bKYm2! z(L=5_EMqzSh+OD#OZNY8O%9K$CArbEnXxPdZ9LO`$-C;{_*HN2?Ezhj9SnPDfLv%m zPrED{_Lk6)scwA%&u+EZ{qg-Y_j_#PmuMDF(n7;HU}4{2%Hw~8m6CKt5*koD4rjI* zw*X2j+kbU}pDN!-&n^viNNf!qiHsC3ea?2&khK_Z5Yl5OEiid4wLYG6*SA1m$Cjwt~kxUg+xTB*rIp12es+*4EdJj7Fd4e)F!jcX;{8 z$0u=qVd0VO-B$1GQt`byn`>P+=rx#;tsN!2MvyQT%lWtcOt2E`6xUs9+nO>5^V^#- z_ch8p_n1FI4+d=iewq+Sym9A@@|nI3ppvuv`0@ZgRj&9{;l^7nvuHj|ZcVPmdG`xH z0?ULaH@aJ}1r(%$+?wuQ^9#R$7I9Dr2q1Eac;Kxxs@|{nkAFw@#>N zNm@5hC$|_^GnG2#T*)EFv#(KFe_N}P=?kf%Bk7Rb5cpR|edaV-bn!{SZzH6gzkk>{ zjLkK69+{hjkslE;E3c_!t2})SIW{dxlFvcWp7LSSoT7i94QcZoexdta2#A9{@0{{o(kesz+HUx19TE97r@SrzHcFBbB4(V3$+ zjC)X*eb+8IlCgb?S(z)#=x|W;Cklc+1PFrzUnmiY$Agc;)fs%6(r|r@SZNSEu7kuvZ~LV22aB?R8_H z-5R#u3qh*GCe_~SzDq9fw`Ww1Ok*;-e0QeKs#*d$mJFZ6bqIa1Z~J1kY9gI2XeZA< zd?0zx&e+!BSjJ7)Oh7JVeA>CFS|2F{NGi=yJZES?sJwpb?$BAe>LL&Sh6Z44VN;zK zRId5^$4#-1w;3D1lTe$V-X${L9ifRV{1H&>eIex8!nDdMgHSyHuQ~TZp69LS#i6Xu zL$$33}8&~IaGoAN7jEjxNtzr1T?0S0Ur+b-1N?rS;@Y_lEZp_BzKv@E_^9lW@-&?Uk>FV%^0 z9keT~DYbM+eeP`L-|#YZ;$Fn5Rj>nZpBzSqqw8#>4i>TyI3ySRs=c9d$y}SMQL}uc z_wJvr=4rFTm%0@Aom6jk7&S2e6*Wpi2FJ3cGWfPkRHbwH1=_Wb<@YiT!{r*;;Ws&-#OvLvtKhRuUg`DWdINi&TayG+16QURB@F^54< z(vUEw#t<3|uL%O%VSRVfU(oKjlTQtm!Pth6{wa&Q_m7BC1L|6a-NR*VDIU8nKWvzH zn6lOO_ijsfz={8TLNf_-I<@46R@pyiovsl(Bk%uXA*Ru3loSJ-`L40}te1I9N9El` zyQuG%EJzjbtJBaVlKq;0;cRj{Z5co%eU9p|a->u0o!^#L?{q%eMR^~upK;V*bz~h) zxvTJzffJTmS*rhsVA2-6IInRTC*9m92%#qW}CutOk+teU@#Cd^_V8gUWQs2r10)fGJ)ia;c-ARi{Y7Ww;+a}Sk|cS=0Lg2%6Eihl=G zsiOY717&5i-=<(Cjg$>=9=%tp^Q%VdExZ=9>MfF_w3Jp?CK&+J3MS0W^ zmivrV$za)t@Ot^)gm$%;g=lML;F6Y&$kt;s$FeV9e#7NZKc>0+GLFbBPsoYv9V&A z(~VFy>`uD{r7Pb=uZlKi3O(PjwBveKC=tL|eE^`{nf2?yTs_`-*s*h08?kG_ zn5L}A!L@gj_oGtCfb5Usn($j816S0}0SbpTjdzwGh@B4D&UaRhdAX%3IK4GI#fYXg z78U*YTQjEGE?4HX3u8Sz*Se7o@TnQ~zc~JIX*0|Z@2uXZ)SNh=DtZI?525(*zD86% zRn>;0o*jJSUJNt861ar!lggG7By7ZY8mj`y8Gx4d_VSPY9Q;AkC0{VT!-50Y#Ch*K zXhOT)^#UH(tg4U#SNLdcZEfptBEvRKuyV-YdM9SH^-?i1-+sZte`}>qH{+^2NERDf zvsJoU_beSA{RkZOf<|Cwg$yjng_0c{S<8s)9S)0_o4nB|VMSB0bi=~_YoC_^OJ1vH zuq1#Q&=nl?YgV=I);e^K1{oXhf_6;1MeBC#xx%)ahz_iD$7axES7IZXj0$lfL~R7nz^r9rRlU8e0B4m=8bncPCqgtm)R}fTap5X z2Q%hCFZ=&MRe@tWrfI0GRb+-RlaGgtwy`g)aP=G&d|GLT?%T9q%f`E3lWhCD`n{YC zm$d)vmkJZ09g}Hti-PP2=rOC`mlrGTl0Lwd12Yc=m%JDIzp>Ere?8#_d$pG~_=pVw zzOpmx2wjCfv~H{2jve_i|B2Zm%kO~$z%BMOZy4>b_pVKDzOg$5Syb(pNcOb&TDtB? z!^}rVo~7A=OnugP7ROsp(&(h!7*}^?Z6LkY=<;?aK~~&4s$T{jeChckz~X(@p&g!4 z1B8?hd198D|Cs_FhCl-m?>8CwVE=Kklvy?od+FB;#J z{ErX&%?LoWr^~Qlq7ps%e)do_@kg$Oaam5z)8Lei2xY%xW{@8@fX1ynMhIHx(@#1< zIF-$t8JN08P7|%U`K8QvieNlww>5svO+~U;No#pipCNbMiY--C{`#lkM-k^<^_}Jq z0$4EB8 z|9<|ktG=zN$0K*cKJ4T9-s`DxIz#?rKV0Uigu=A1X0@k;N0NxA(&Gvb#PE$i3RE+jE+@@v){fKZawyXe+K+VzO%o|f)z#Z( zQ@edOPoCJ9I%?nd&o8ad;d5>92NHs~gT)e#^>-C~ zDiAb5XZCZdx{kSZ_4UNrY8`*<;9wlrUs&YV+M6P$4~+(KIn+7!U3 zKy_UTgJG*Mjh7R#>$!^4cf@p?J2MLzRw*yq{cZ1Np1K9I*;+Sb-o`Egc~FX1hHJk< zM>_GjSNT4dD8Fx-UFO64_c!}|x8;_`+Aa?7ZtqNPldF%=_y36zvdSPRx$nal`7Xv~ zYn^!PlgPLnQ-GpC_3e<8j4A(pORJX@>hxlx!j5_X8Z*Qf$}1E5?cIsTF=R3W3oe{e zizw7h0~FIzZvD9JNF*$Y-3}!6N=j7GFM2Sd7Zy{||7dVQJyuR-dG%G-Szu-AY_^eT zSolC(U+j8B^fWdYor#Gr9_}SHT%-}M>b5W#6mY-)2*{((-)H%O$jGff#rP0C$lCA+qA8v$JLexa=Ml>jLJ&- z(Y7q>B>8hH0pBetcFPDE0a&(~qIJ?oWVHu=uP<5op-26Kkg9e%Gm4#UgDwp4<{otY zxq0CX6w zjb3(SH(3=d?wIabv$ABjG2$g_p`=c(JmFCrB1FpQH{Jub?{(SfYX}pNkS(DL&>8Dn z`)}0UO?9CM+U?A_9oLrtD{NyRSHFkX23i;~UIsrIX01RzYSt%JOv-@L8P4}92dmMK z*I-`2_R!f=uR<`H=OP9j`%}fEK4u)>-`^Q~W4em+UkuJrFMQOvQngN})v0{6c6-|f zB$uhAU8t|Mti6~{a8m8gW*-yWQs`*{su;R9@(Pl^xHR^5Zp;*(&<2{QHc)cuSxA-FzlTc?7yLe&^s zQR|f9IVDS&`-CsXYT=jU>f1h_a05BL=DC9<$kZ2D0t=NHAQ5#m%?%RHyfzd}FNB}FD;XyRI zzRwLpYm)(ex-_U+rv)q07al&rD48Y0X=va z>?K*pr7Q|7`QeJi1+Sig&DQ8=w7!Fg6tj!iePt;_XR?C|!XOEN%OwR}Ad;tfSa_bv)Cks>4Hdd~U;2r+(NM zdO%%ap-zdg_xi37PEnLwas5njJB+G2`CyVr>15#o*-}nW*te#K(ageRBE_mBC+F-0 zAnsk)y|PH@=gC#P1)~Th#`MX3gGtM3Fm)#U3*CQ_#M^MN;BA~;xB;bP{SVUGca{zV zc$pVMjJc-J14w1fuKxEHO_V`mxMp;!=_OEa5gTM6qfzG|h2{&c$uzECDGt*$*9h(t zo$zG(w_IuKjUSuj(}bm{8R>zRV*O@ba3k4s;#EuS96i;0 z6R$X)Vo*FC!5yQn!%0J=ME3E1Xj&hGCI7W7S+(FGkY;u!qPs0dog4J{of=Y&`gu9S z#LDpU+{fDRuD5k8J9w*pwQW;eD=gf=y(xI0htS@m4?2CpgPonSi^(nCj#bwx;(dU; zY$asuPIBasbzLSY$wk@RzaRF|)Vb)~e42qNf8A5LcO(EQoW^&{%YOHU+~s^T6|+Lt zQKrA`@?yHp>(`NMUazcniGZ_=hXKnlH>D0~031mfN;}=5cT&(BmmPXnrBb%h%uJe) z!rdBix)Jv@xb~;mqB0~irP!3>0A?M@z-%i`U&XZUnrlL_CpZ!)4v8>ow5;N{2`HyV zzp?sYuczPcGQAY@S{yD3N^>l89e%ZUGeY><=Ettq9vwY%MXelyZ(_>C=HJ{us1fu7 z(n|Ff*#en3Ep}yX`(X^+#lcd2Ws7|d#rf8QQ9(}D1eRd|h}1%M$&FucDaXg^tdp%4y(Ky{nf>IBN;Ay~eYncQzL zIgsQU`q&0N27Tpk^%j=x249LGwi{2?c#-KV?Dz8|lAn{&BZI>+MyO8YrARD(Y(|)U z7ws0uaaa*qy7b%a?KV+EI_5RFUvN%tWvaMM(2MAM7#JaOog2(q;?bit@k)JDJ?V;H zpPU>tfVy%%>EjoKjtxDe51j&YszC2W99I!ia4WexpQXz@@}rGPc!8r5n8dQ`f{c^n zgmUz!v_3*3ht6#rjIrGTtJ09uDzeF>{-H7DZT)Yv4e7MnpALXI|35`rAg8{-O2SHt z{`h>seB(Q4nz&(b-|GEWNvbt*hZxY;h*so@6T~QKS0%P<3VJ&lI-J<%cV3msR%g34 zUyGnk8YJ#KOw#pO*%S2tuI!*ZMj*MpR?=y{H)iRjK-?KZfV+*b(+*8O2ZIrvoH%!9 zhbuh^);-@JOviq;COk|vli-q{MW2!6Leb3!dyR{GBH-TbrUDg%clxe6gxBdlba&#s zS$#j1Z(IglB~#zPoNnqdD-;$$TkC>6l)R>1uN!ol*ucPTW!7*#4aTd%fh3tF+WL}B zm!LzPvh4Gm5CaacVuC(M|K{vvZf^x#<-<>jxm>CPi8L)W-k2suji_m_7-n1V-`E@X z>Y7`934X{(D%Mr(z1Pnwk^a^|@{6M>G@SjoV+!ArH&A6e1DmdpapeyGydv53O2t7Z z65@V+)3(y{CA9cfYyV{K4!`7n(=y2KnoMN`iv6TS!P58^Jrt_(&PC)bmYvwq2c_5c z7FB{gH1m_BtFgq5H9O{w1^?Gf0dO&UsQKy5p8g&Gf$6H1c4A7(#U6XX>XAUJ3tJET zH%X1JMjFMwdVOwS2)I>5#~*+KsYis3YJm#;pOY=`kA66M=>??)CHbQ5?I+0(Ii^pZ zoXabIWcc-{@!GNb@bT@*Kw0ecv1fOFKc-wg?dMAB%1lpLOVQC3)0y#|z8p8aGqDud zy}LIB8yj;$0E^<9o7!Whe51xIdVF9HBty+{K5mH6dYX)?Oa9=(t&M2fLeux>l}7KY zKLq+PerkcHM`2?7JZR1pY*Ot}%WhwN9*NLKrI;6{7oZ2B*Z0jtaBj8!rHwhZ4*pd{ z`X&;sogiV9-?!LjbpbEaAD~F~TDz36>Q%r=AZyvT5*>ZX*G`bog^2H}Ym0;AmU-+% z;jx`)Fsz<41b;{BV3E?oy=_)Oy;CD>m+AMQEik45Pik29-#bwXvZ zez$SP2dm3Q!ei9`j#1n3ZoXw#{VA zVD8|geC?#is2;oX?|laxH{N=wG&q>c6P`s}MIzhMc#R&_+*nyDbTMf9?U1SNTW)#5 zKgqndr-eFhWL$=NJRN95$PW|sb9`NCeva%&S7!d2UVnpb0Y^IpR>WH;9 zNh^n3%_85b0kB0`z*LPh_uc|3@|O3m`+9{Vs7%a1)Hq=a3*K>4$CB?81dPvkg3IJx zFt|YB2WX|U$UtPU);s{!_Z)@e7{oghIin6$ek*7F7ixXyvme^s?E;u+W*&{!$h+Aw zP_YOTQs!;4Qxj&ji5|4z3X?Iux%E*`(2@pry}d=I2|*A+^@H$-W}rgnpJYQ-oB7Ea z+B_SFsYf{hv5BWs>kyWKG+mbbKAyA=Oq;|zfS%6cAxuO=sqW)NUk>E#3a0#emi@LV zh3(Oa1)l_(=)pt3iF^F#>O?Ay@~1tBiYii2?!CRFlP_R8&qdAB$ovZT>af18AowzL zRdGN#;$nSF4XC7Sq}Wh9b(I$Uj)T8@7)ZQ!I*ut$+$=pywhknhl1jzFEz~bG4Izy@ zIAA)uSy<5#k3z#3U&1VhLg~T$uxwYgZ1d`5HRxKstpR*@?;)~~;^+@e+@^hl#0&qy zPl{BGvA&d~){XtO&>3l32k-!AUW9&659u3{VXOU8cH%PRT2J`Xx{(NYkW2B}W&oz% z;DewSoXmU_qG0xY>nADAN#y2a%!sO#P37aAdOw#{4mRww(M1iu(R=J$&ev{gxCO0X zz+-Fv=6!kyrPUFu2t&}yZZ3Pwb792%t8$B=g8O@TP@Viz5qDs7wvoo+z7dK zH;%VmedcJCB;>s#;`Xv+w-t0W-*eaY#LpOqjx>NfmF_p`?~#Hy9boU~=V0+_%wN6c z=&vihmj0`iO=Y{;dX<}2Id{w3a2?+*GE_3N?v@Wjte(K!dWL#qhGuewckonDBP=AG zQsT000t>3X!JQUh8IZ|ohS!5hsxbUG(lW}<*F%-_06yc#nLMpH!7=W(cSv_aB?_3gTYQKjiGqHT82Dv=TO0N6;e zRfq9;J%qRE=KZJWOio);ff2tG{aN|-i!QhwKE-;ZTVtfLowOy0L|IuEi4S9zuXvfw zs?e^f5XQf6q+uR}84z9zJ6PfU1DEQ3GctE%=a?gl#$BAay7fNe&Lt}76Lzjt+@jW* z4@TL&(MY*du%s&YlwSXWDD zrXM7w2k+#2n6P`B*Eg1jBk23*k4ffX6qM?JXGsf`8pUu;+C&VGId{i9tGG3B`dZyq z4`7xJRV|1GkJU1Pxli|BP^HOv$^6xe4zLw1_t8d^|YFmL1#zM%Qd=ymZEnl{}`D6V52^?TD?s`pw{n`ELsorxDFK$(VW%|VUR5k8>F zmmN{YthM%5DsTx$UyYx^5TZL1UBD2e#7V=JGt;(W?kh$iQTCg>Qt`*PSASAEmg!*P zLra3qPUS!~1FkE%&GlN3>>a|cgoAERvd4(kIO1OZBI0_y10_OJ&vE}@N^t01!rk`R zPqT6)hyP!WMdTTa+x@+tIch2P>~0OrEyrI=aiGgY=||7C6fM5KB#?)(P9j@#Uvyt+ zwbSl6d7|k-dCZebWkdpPS#AaNydL)pNRoX=@6O3IYM4XT@Fwt)>;#3-r{m>Z2jl%Q zpwhNdrQzo`KOpNlSBJ>;rthybxMUDK24jDE{-|`UpKG!mS4e*920|P-#({+Cr%1dU z6{fXSAtp9_%?ypt=Y?-+j5QvT_3U@0eH$87A9B7nx?u%^bO8!KGEk)8;uzj{>D7cw z&)9wX^(oLK2o@X+L;aJ-#L$-@m7>M&tAi~!X86qD&1j?ZHJjBlBgrQ`MkPM6kx;&6 z@6!VZjm-VQZ;hwabog8-&TWSEKq0qr_!4m;#3=sYb#90O;-1s_rC2J(CuL_P z0zNe;+yKj~aT!!=MT5on4swB`AzF>o{b+@mufHHnos1q)M!LP%yWsj8g#;1*1~}$9 z)J*bgegIY8*-gubw#JpRAN9LdBemrCt&3YF zlUN>Y6j>{g9=t&k98l!RoB{|=^J4-h_D2I2Z(^+JcIQQ{Zm;c`COp6`GahEXGCD`{ zMaWF-RIq&5(@*~WQ0<<(Q795!j&d)*yHcodO?`|`Zo@MCJhB-Twm)U-Yhuz!x!D_+ zae^1Ygs@3AOpg)k=nX4x7)(#*bjhqv+QI%nYhCc*3#h&jZ=n8UQw{B>v99tuqpG_j zI1LBpyQ1ojF1z<`jc4Z~b=j%-$z|DKHCJ3gCiFxTTWq;z(eX7X?1gL8zDerdaFgY# zgwCWe`;>fd_(lCSWFL*_NZNniobhmj`oI&$BV&E#p%5>-!erpf2omaKoiK^QZ7#j1 zS9aPx&Pdv0v?|Pobo8rX4gX@f%HMlMXRzm0nHtz>5#iv2FdQSpWG!%@M2j~A!#$Uo zr25M}Cce+yBeg=KfqAu$$yj=+ygAi_U!PnNR@hp79Wrl(iJ~g`Gf#A=UFTBUfO*iX z^`{V{%28-osW~}UUya!Rdj9>9t2-Em>Qa`Ec#~q8%I=$1^SJijR;r7O=6;A<#IR)~B*) zwZBPQSz7lDwykjfF%ay*`YWf}>}q9JS6sQv;;^0cSJQKE*UxK4eK(KnU_uV=tpq%A zyWoQmsv~Fwo>LIqBc*@uPNWA|m1^+`U&<%wM+l6GeufX0WTVb;>Id3WHy7&dvjhHOlr75#vI+>W-xU_M-P3_DW|NqK^Znna#WRs$^sD^f zJZmOqE5-9^;pSYLxzlQV++aChZ?uE3-1=!@k8(kQoc8n4lhJ>rDvr z+^sQ)o?Zv(~t{_BKGt+QggvT)D8% zaK^@vB)Fl6_AO1n14|{tph1szu{HrZE~7gtda@r3?N5EkTC%Kv4qi|_Am$d~)psMP2ER-f#`kL$ za7FVi=?#Xx$AS%BG!4?@XenJmX3Xaz-h!s#{})5?e`4(cBy)m^h(!acTQasuT^ukp z#s6+-riR(2GHnh$`i3TXkctGW$+0-HxQmnMJLi$%O#rXJ6JNhjm-Z~~M4-Y4z?a_B zGSf2afhU+rA<0;c8+Pg2?DV!@XY&6iTWXt~Y`2$kJ;alnwyxGQOtW;F- ze z_FuLQ6K-*jPAryo3=i<&I2U-kH8kz4X2HLx85XQkr5MC=}VDD zHI^Cvs+0tNPV4;hn&P^97KsZ>+C<~N{JLX5B%Yf$zbp=4EqL^)SESpIR8tx%S+#IYVSF;Dj$_mFJxnjhF*&pEG25~7T(@A?HYf|}BO z^IK&Z-nuQcRAZyj58u16Ab*Wfy70FDd}U$HTgqHHeyKLq{c>55fE6W%fZDRe#a2yt7OmOwowZ%=!M0`xHK)pLud+!B4B6n%12zfgQM`#uo4#>IH}fA3W} zy}!P>Z7x)EBfj==xv;8!Ce6*v`~><|oJ2NSr#~rLtB__j6fA@`(OrR>xA-{?NF*j~ zAGB-irB6Y?rH|7%GN!B}IBm=-o|1!SZti&vox{ecdhnIDZFrPr$6P|cdSBo~KOxM^ zQSC7*q%>mdHmoat5ter`%jh0x;7*Kirurs*Cq7W|7mVL zQS-?Nr&fg*9Wsdslwpf2^-Pe%jNVDsWkI%Taw@;F*_)3-=9v)22Nq(cLET0wQDM3{ zq%LU$fqrIS!Nz74zk}Q?*y%z(%SWp26BC~z9CVdyy_B%`lH~LfWHR)l zZ3s_Lu75q2EC&j?U-v0Ue=pugw791FZNGRd1b)YnKZnF08cR1s*WoAZWo#en0X5qd?RGwPS^lydhgNk zQ<1e9O*?%36Mfgw7nk$xzwe`DSu|RdQ3*ovdszT$RdrnaZZ0`N03XM))8@DLyz0zMM4wSAm(P)g>r~XL!m@6qOur5p5WpAw zjQAK+`)VcO3tRR{t#(Jx&?p5Kc6Z)w$uo5 zalK;@@uK|AO3Xio_Swc99woLl$2nG4RXbXhNLDuYW30RL(mX(h6m*VDaz;(}^N1Iu zan*JcR>5M?*7-Tp2fy55I#ww$ZK~qy340bX%z@f}6I2LoKzHa7^ys z#m2-cO*8JmP|%LmR)wjJWd;vx%U3Qw`F7I)Tun-GTWaB`MecsH72Fn zp}5PxQRjz=bMWkVRTf zsss-)ct)e;YIz0}G?izIF$m11z@D{htd~Oc-8y#_Eoz3m1cjBcn_HH+mB}AOo>b;) zn(#tZCbqH8XTn*PFUVcCxNz1UHswoTK5JX9^o=oM#Tx-H>2m-*O+?gw@oRnp95Tfv zY(TA{hQq`+}0F7nW49+|9ByFulb$=|Ie4DXvc5Kij zQ(!K$M<*J#zCb@zzV#tlAz*KQBi~UWq&* z$p`AF9U|TNs%mx)M|z@du2lUdw)2N`&qA$i;!Am*X3#yOzU*cVZx)VsM$#9r#H9aI9Sa3OO7S!_hX?dP8C!r_GSsJ9xTHP)l^ajeYyfSh{Afpzr0{ zJJiaMk;+F1Sm|T!vEWSq1EFHWraB92Z>u2ChiK-yqso>ijD7t*dmR}?A%9_w3&VT} zuaj{PGf*X)Eo~3XtX;t}nChN|Zq2XbLU**Vz8NK_S6_Bh9&V|lfL-)^RxGJLwWtvn zq=L(Q0wLt6w6sN{ELyk5wI$Q7{K&}aD5BwUNuBeum9SIJc+N6?DZ*HPy7Q=W95c0G zwuSVwL`6)WhxL;QDY#H zxwP&oT)7m@j%$+9+cstauaR`k0qG`fASdcv)*t zdk=1nHHENc-$(S2lmbtjYRtY?aWKVA?K*BQy6e$G5SDHxP+E0aiY>rqzi#p_t3#g6 zszX*INr&(fiMkly*XK1Kaz-NcA@Fvk0UL7tlC8g{WZI;B7BXCS8!i1f^_DYrw)68k z(A)`cs8&zd<~m-5m!15R(pt!e^MEmwy}-O6fl?RFu!bHl_cCMA7%cd=eewQye(BJ! zk7k>eCQ}uCgL(6(j2u!w1sH}(x`!SstnsLEXgLp1o4CJ zndK!8Me4#l|D4~ggzj@)6s16a7vrGIZxHiu=sTwrjNHu3cB(%l2Vd}F*V9m5lyNpd zTWjJxoCw)UP4>kSp)%&prqQ>_r%-*79fEwqD$uFiT*!NJ`$@>^aB~K19R-bwwOipq$x|w- ztL;b3NexA}3Tj;w!~-oA2EwMRpNM*E3yvMgY$L7v7_2!(XxWwn|7Et-{K(D&`eh!(S#xg<_N|x)#w$Ve?~lu$ z0T}3mKhVhk3b}ZAfu$enqn`Rk0rR@d<=Y$Eb`dKh)Wi+rYpKQ-G8QZk1ax1QJg*6hXq9@3g z@imXfj}w*RC5&p1raEYqLTRUJ2`f^9XZ=>?Be_mq94Awl^js}TD#Z4(YUll>3{g^y zizLLmh|t-NtV`MC{86puBsitJ0#is?!Iz8c)8XiP`;EVpdqCsiGv=a;uTp6<8SXr`Jsc2Vfm=hW$8>~R{jZZ(r=I{07`7((M2ypN#Vzk=9bEWiV zlH-wY=$a3|3^A=zwF(u!-zG9FLFm05@yc1Od)4zNXlve{2LcMOanYkUmSEQYJ!RFx zit#l9dX2vi9=P0B6ZQ+#m;4n*e)lsfvD3cgxYNQ=WVs;XyWd^xaK2gfrhWbV%agVajF;R`sXr$NscQ!hN4$?8`Z*jKRpNrL2sDv0n+F8)jWL*UufX#bsGM( zjbylnSF~!x5a-GC4g7HZmnI|2-2FBXKSC8~%EOKUwpiy~tdq%)C?9V$@+vMff>z&@ z9-~2i2^(4tODj4{D7cP|(6T?UGxw=MX8mB}JWY2`V9=Mzl_72d4ABgYSZlY96$VXC z*0M#k$bf9jm(>;Q+I1SJ-#Qj*l?Yx-H3R@uA|hzr98hcOG!Z1bJZ)0SR;24=W8+8W z8+z6@GuOeC;fnUZrE#vVEasRdSELQaAVG^@gVT5>I)daPxzff10wX4FY{$OOqx0)N^>^;vJR<`B%6*bn(6b`$nol&}!0krfdd$+y z2$(#6xps2R14!$BBCI;Bx65zhG;X~NT7#NiCA;F9i_dE!1!_&h z{B41fyL1!z3c}6+%lxS_AAHWwf~w5vB(zg!6&09p24+RJ(%?dYF$$DPwCvM~v663- z^l5W6O)3xM+HdJ^grjW`Df8__qz{I&(JB0HT-abLU=A|$pe@hK0h0hE%pyWNs2!ja z)IZ>e((3SXPZ+Pp70;dgwh83lQ-$pBhNZWJDk!+J8t{(b?gEz3BaOmK;@q3^PmO|1 zZy>#?XzD2=R_QXZGpbOtsA=R+uH<9)elnTR17G$z5^s{ibT9p~8r9Na=ArnO1lE^Tz;_trbt$EZ1CWA-oUyS7?CaZ9V| zjy3#_MPQ!s2{0VL0I2W{|6bAyST!DYRacrq8c{aSYtlF7FDE3v z10pCQSO5hnj-yCtXi5!b5D_UVy^2Why(SSw7(qcrK)QfP2^~U&08tT;-U$#OQbS8Z zO-MqL^D_I|``i1R?>C-(u6?fS`{VmhjE47l-sf5CUTfX!UeYL9hoBNKX9Z>y*X=7a zTJbAk|8fo*m{PDvQk#lTr?%QpQSajgJ%TMam ztP+egY!)a}3m@i=;=i5uo8gAW!!f0loE~}Xsy7gd&>>enn)KsquuVqmv5cv$4+p8A z8*k_=G=Piikj3iRE%A_%vUFu@r49SAp^FPCggP_m^nLQs$N*}C1SZ497O+SN6sG9a zq5NW8Yo);!As(tZv_41X1W(nu=7)>aBb4#9Pajt3z$6Q68UnjltxoAp^tENSL9rCd zXV%E>7#d-TdhK9&=&;N|HE-9XgN0!{Wl)|wE9S%CHAEdKvysuEHb1xifPIi{92HM& z)MG^p-U5;t@+9#_V!qWTnIZ;1U2buc)Y&ucW{ncj!S%@hNSL6c`LHNv?(k}Qqw&SQ zk*K1-kB(FSCsz;tzeQQ{JeftNX-`i|c`xc(^F?0AeTdlJ8RhjbvItvJ?knq}YY2Xn zOEBNu%`YG0(vGH5(8I92I1Tx?m8(%17eY4~GPj2***#0Z)%hy1VGp_)A18exF;>$D ziO~m1qo+wZNL=$;`pxm@L6N~Pni?`1mOz5? zar6GRVBqvnd=GEB$G(IficXni0Cl&c0_p~9j@CHoT?G;|H=Uoc6R9INo&?7$`@DA? zNPHp;+>hbz$Ygi;NWofeL$Z-_^InDxuOB)`am8$603Z=aKZ(bdjcs*wDi7!JeNrL3 zZCjF>8IQ|v@Oo``ahHTtvf2%nvf5>=3)CA1&DJTt317G8{)3BMBxmD#aZX@%u=?ut zF4&zJ73h8!|1f#V$+s>FF7e&M)`Lf?3uTGDSjpnNRLN5|_G~2oa$d0bGTii?#W86C zRe%|>`nqZk3)~`uM5Aj74v)te;B80d$fs47u8~gF};Se8}ns1Y+dy4gCD*jyH;AlUkQ$f!mngljkp}&+li3d~0pGLVEco z*nz#_1)EMXh{UlPV1V9-G%%+lc19Py)t=jxk7b&uY`u-$8#m!Z7DuOhexnC-y;Am@ zm2v_h9dpl-3#LPem&SvmYrAvOJ(l_-c5pqP@-MZ=kr*g_!swm>x6AouwXiWFJ03=n z8oM~?CrCH_qV$Il?ZTupBIlkkaljzO@VX^^cJSf|7z(1#OEyCG@yCc}YQ+lmkC#rx zi%7%^MiBI4C{} zA5Xy`$I_LvmK(dHTjfi@Hca#l;lV zgQxYgeoGr>TuMrDieJfsADrhhN1T&8Cb!ZYkTZ_V|C{2SGB%e$bYlua{mJe{lMLl-#?g#;$3j5-;`ib`$qZO87 zg%DY*kEU0QawW$0DBeKU8EhQlPPuTMXkCYMPaR&Zlw4m&20KU1mbqhx?YNPSQos7r z8r)l-9ok9URmI9W+)oy(hTV==&aL zBONWYoi3X$e(%F-lzV9ux6enX!t9R@8QE^%FpTi;$)FE^-1wxl+45`@eXAuOUx1f& zZ2e-7YYf7zl6APag8ZcRhBvP1!rG?fHc8ks6}E^sYv!<28n{N$G4FHN$ir4m6Vi+y z8YI~HO!dv;n^kHp1oVapJvRKykBU*{28YF{@kN!2iq0!QakuuLi@P@kyKiB7#%w)0 z;ZVM>wNR|FolJ{}l_??Ll=I1~f2hV}X-G)Lpk+H<0sAt*W8hm{5O`@X?5jk`-g=0E zf0!CVp}+AqFj;_4@UB(7hdo9N78-&7o;B@pvehDfV#XwX!YC6W#6+H))R6S>piZj7 zy^yzZR}l|<^f+&$KV=vdRs58`JyYnk8)#yt_uAOv*r3GI%27G$Eg?-vP25Ljh9;&Z z-g|%FRq$Hc6x_2ZNCKY>0IlwNl-ozAc|3Te-ry42f|pCs=BX*{L|Ii{d9LHLXNcP= zcHWlD>t?%<9`)aB^kFQ-2fPUZ=^#e-FZY-X$5J{lJ^&HUW^y4&G@^oN52TfRdI1%~ zzh{5l^f zs0qn3Dl^DSg1aOdIXjsK!C$5sU0fK>ohr>ic448+Lftb6^u>gM69ix0Zq?kOKCU+8 zdOs$S3xQNNF;wIU>rk1Q@gGz!-+D{a@8~PU&Fp>2-NwX%3KDM}fB}fu1rBSE{p!~CGkCm$!705cgLdBJ$P@cK^WW7==dA! z*Isf7Dg~!HE5z6}NIAOp=StS$zV=_Nip=%?sLfhxUKeL^d0e#V>END18XJ5HRzwW* z{fb-(yMmsZPVC-?AcKwsn1YoFt;9R$X^TM_>XFAV2e{Sv`bZ!9+^P$as5d~a?6i!O z!e4JIhp6c)TWx9-Y*$Vt9MFJb|v4H2Eo`!>E(pyLq)@$Qq`ENLj4N{n| zv;2^jtqhVr;^F>F$OGMyDxZ;p_QWM1ul1_ASv_K|=$f69nOdhddiTu}lB|Q!=y(f& zUsQLu?Py919;{p|a8Q{~2d2)co#TztW(sL$;dRP5l-}MQk+##!bzUTj?7ky#!oJj8 z7f)C4>W|IFC_OGi^OtM==N?#pbPtHm zqYB)_yt_mH5@uqK4!LI59L4S-s=+8XlXaUUpbE%YGavab?%i99AVbus{seBY zW-EiBPaz@m&Pu~AZz~4SQ6?%5o-0A!biqdK_$Dpxcs|2s6VTH8tNs7fbXYCJG*#Fz z-2AKsSn_{Ho4!@3ww}_m`ZsvV)7wYtLgC|gN-Zcgx72o(_9BYr?W%6w0OCh{ecA*DfL5r^? zD_^w>9N!snmCow#zbtt^*51<0eo>!+9T>xcf|9!uF}DiC3zQboMF;+&*~O zMmqTO7d?UPdebJKoUZy@fbM=zz+JRC&Vp@^R~tXl@dr6iv$ex*NSaW^o~O5^Pv9mb z!+u3hy>Zbt?Qxrw>%VJN1#Pcf^G>WD<{8sJ*m zLTx>YM(tgSg%IK!lbJ+pCdnF;402w+fHZnvbMlq%SU5h<2q=Zy-r2u2hU_#KQ&zmT zDoc9sZMZoKXT<@Kmfa4T%}+>QwGuZ>lL8c)(mj<1d21i&X-lb>(QCtQ!9C64p6*K5 zq@scBWs#@38hM^qyJDU1MzYbQh6sDAp6nEk>ar8D$grUiWF=zxTB(#DBq#3+{h3h^ za_j3}J;T9Oop$!|@r}FQf8$pG)!X*Sv=+8jks8x9Znw*jsUdo3tJrL}<~@3y4?Pru z+Gg0bMmL^}(bRVx$Q6Toq2CxOPg<2(!YpRv^EHkH8ow*&{eDpAZ61Ti0y+BWMm?Zl zm$K*N23=cHBkHVVUYCv}@B24FneI1R{K54gM$TzT76Hz@&RK0z>$5%#sbQ8Ro7s(E z%4nO=^qNWhY~kNe@qdR{r3~mElCmI_eYPJd_5s4hhmQMr_65~rJ^cWW*^Wo#^zriU z@V~9Hh}(tY|CG5~yadpf6&(GZ;-#j~(swUbtZZ9-{4Ao6vzx!=ca#51Z3N8pTo!_l zPW?(xAR&Abbj%@*%;yy@E##YwuVPnTZp`D*ki6QXkh`d;@vs0*S*ufdq~V%{-$8WK z?V%;U_2;BciKkP|BBwP~iRt(NDIaU{gT!hUi}&X;C-Y_!1Nz-h1O{?z; zD3^#ujKMC)7K}f9`z6lLI4s&IugKoy>aS_I_MhW}2kv+flMj|89}=3`?!rBJJw~e? z)OiFV<9VZoRv&*y7DMbyb1i2qY|83aMo=$ar_(4>6^`&gmvRyDscRu1?XyG0?Z0wThc2u-l;fcSxgnHeso(nWHYDDDVj5$= za7YK<`TR#*El@&|GEA3F2Oy>aeoE;$ZBbWUb^WySs|bYN!`!`BJXxss`(Jeera@kq zox1QYdFy)jXQSnJ?AOLBT5Mtl_mRa(tx=w1yWHG)e?Cj9>HV$OMX)`(=`oWGnIDa` z(gLpQ^6cFUuiIUCsvo}wV2uBNA?=;d)mkOoPi;PVbsoxYt%=8t3n*=pEmwq=u_8kQ^+aJumDd9|zfH&YP@Y>lEHS<_ z{;)W`4EdHpbDDju2RtGVz4Ck5k4|{WG|Oafi@9&c^twjO;3uUnMGoExrB=XUDT_RJ zzv|VVz$+g`&_So);95uKzw_zfzKmDvik7;XsDJwr&1nWHV!>BF8rB{i#;WLPWm#qp zYXJ5wHf(@OB9yL)$ZGyM&{pD=mbP|GZ*E;AmOFJNcva#r;Ro<$--y2Ko3=d`?;0Y` z29!xZ{Z~e7`N=7+SesQ#pbX#EfiAPwT^p|`0`Dp5P^1z%s2soMO^d8A2i5q`owlwG z&JdKDm58+{z3T#J$$v)7RWH3)9D2~V;P}=dlk)Z_VY8|RKT%Ks$RD)WO|NzzdO(MK zbF3h;LmhX10FeNKtfOcaK#;9UQFHtBa!b5Y?`m(xgON9hx`$^Ec(q2Mckp>!KWq4X zet_tWdociWkaoEEX8+QbzKH64(98>VoUG$TWxHl0uC=kW=H|66trBU@mNxH*?Kgy( zk!o%GQiql%!WY|k#7RHFT-I6E)FqAT=!lr!9K@#ySFV zp~8E}Ivf!-yN}*4FcqAH$R$`F1MsKLPRO~Hvf9>@LMG>)%H47~Rya7Y5szD5G2N21 zIP1QLP%pJDc2z&&Nvs^CNiqUC4_^QIfGwYdrF0Ip62F=M%=sh@P1dcZ~nqn z#|$5Ps7AJEnyn5|1*Sv(oHHDk&ti0h)f{-9=&L=N6}Q}hqXJYX0sG^*xMZMF!Sd}e z3X>XK`wV2vi#*39DplRb%FNHiCCK8{6K_*;;4X!n{Bfy z%0*FRo|^+dy*2{4GBHY5zcZ~qHJLhKxxF@nuPgrWA`}AUk-)FL5AGe&x%WLy`2?@$ zmC+k|>gPi5&_SHBO{Oc`ytVJ-&U&_WqD$=!%Pgz-)|7jAwC+puT2&Hb^b*rQIz7Ul zUAI1GhGjJ`GqbK(dzagT3foYaL5j&%HAU&SD}1y$KS*+Fm4p3hby$h4;F>1XOzAuF z=i_vU)!TL=J5%Gc*pHsn%poS>8*tKcrq}*FY5$efN{Ez;mg~{6*34e|a^Em*Rc>3- z9_2OGn_{En)pW6NhQ-L)<7oR+xr1iIOZ#;XH8iJ1QnxA9Zji3wBC0G_qfhyEbdHUZ z-PLIzCsw(gmrqWEvOV0+SVb1Q?#`qbw0Zw2oNG#8ATx8DW0KcwDr8MyaTYKOqcWQ!Ev`h~@gfPv z>gmEQRY@a%ul&C-3A{jL-K^6%5+ngoPyoYqenJWlm;_$R{r#U_fN!woEBlSi(%W?5lmAYRs$S&roN6h-V6D@V!fu!cB?t%_Yz&(D-JXP|I7_Sr*cj1RR z=H0sQehKnh8p4Laxq9%2zMHC8zZTSz|DVnXoTQr;?7tYpw`CcbDsdnCuOlJ81a%G~dNXT9;365=CgY$pgk+ zezvp4IILLdtO}%22o3xMey4G`=`p|m%o z!1LD+&p^}F?7Y59lYR5iY=s|~jnq5$jc~IPg_D_U3NeF^qIaQE+~xi5Ks;j? zF8BJ0LEsJi`J0EZ6?I)Zydl8S(^sN*$q8%Rorfdb#`R)W!nAh7>Tc#b z@`JCt{pb>Ka4rIaOr{mmZm=Cce*B!g=J9!`Y%)_@ee_19#<|$)176&j7`UzIuBJ_j zTQAbhXf4Pa_|A7XMbWJy36ULsKZ2$&4Otq~qIxc8*{yQ|j@RqO>G-AAbj=I_p$BHy^n0=VGZOrz+>>CSE zapqGynf}6_PuMR#>igwKtN&;J9Fxr}N=~awzmo4PX<7O3+y0SPUowDnPrZGUOTKcr z-0-uSlRt{%{`stnqK|xQS(@TCm=OW2i}yc}+xO2m4LffATTgZY7;l#5%>S!V7=Qu& z{}bP&7Y5MJ+h1;7%=(|TLcOpOo=lc2XP0l+cyzZPZu{N98kT=++u7NGZU3%kFmxH% z_WS>d&H7JnJ8;9;ZvO)zZ{B~u_<-#Prr}?1nAXYTFQa-sC%!H)G~?j<^bcK#&re>Z zJqH}r|7|+xU;U_ZMT;QaDRVP{V0uyUf7(N30>i+#zW-;I2jm#;{>#PLmPn0K9s$l0IF)R;<(Z7AA0T+{5$bV&6nkE0#38vT8-c_^< z^}J^ryH13hy!k`5f6G89iE9yjjMm&B-f_=OoPcEOhY7`{c~e34bK;FwF+W0p|GdtZ zPgoXb12+raubc*~wcJ0J2%OZ*&X@nKlbUo4IH@(9uRo{(KK=TC;+qQ80HW+^AXv)& zM;acm%dcxr|62y4^Uy9&uo(;qvE+*%8i=!5AM7q1dW_5blK*VM|9X}X@M)mwqiNfd z4sXud7wEYE1X!)VHL`IDKd*mS{-bGL|MJY=egg)Ns)TCa4URwlzYp~PufOmZ*BY)kjQqeX3r9>j$}Dh{ss~bKmh@6QFW0d zlk%xMRnjHb;fd;6UV6Ia2jz($*17)XU9E%wX9p{^44~Qc10mZdwSTdfMK%&AIoE7Jj9Ye=9vUxnwev!Rd$eXcG($0>jPtDWZ1o+P zFa#DxCUiB`!bIIiBF7o!;cxlcVEBdzFn`2D z7&u2+LO+vvAG$Eu+BjTMzS}0DNJ0`mJn+dIo^w~ZkJ3nx9)PnApmyV$DgFADw*0{w z6;@Ye&{X6HS_&tb1XRf1Bx};;W4-`GA82)H{$G~l6dbTHpu72{*XnLkXOBbF#sxp6 znV=dV@!aUUQX%g}%HAiDdH_Am%Q+_NqL%g=y=);_%9Ey4$B=+8%9ELZHulezt21FU z+Wi~B^PVDC_z62>n#Tl6cw&Stx%gm7EkGTxY)TU`qwA+&rhGR@a9Ars;dSS zodWVRgU6q$ux1t6p9OM|BF|rish_KoLC<{*4g3~EJ3?#rpZjGs2+x5ayg!o!kXEpg zAMItN?b5&e4v4}wHPZ>5Nl=7+LwXxCosI_bRYEdC~_DZYA zD0myUh_YjHYq;lyG=0I;y7Vm4M4&-@NJ6u{-20XfM7&GS_ELANuDyu#k;p0W&IC5@ zbSe8Qw%)VD(l~urmQ|Mg)m%WkQUOM=tVYg z&n=2KT}M>Wwzl5(HIVCm9cz{RL*V|<@m)cX-z2CIzQb|ieBOK$Am=5nY}9xiNR5Ne z4QZ8_v$rssazFp%q{tJweN4YAhDOZTZVq&5&pg>@{{9J9I&FO)J9AP0#OfuWg{9Z+_HZKUNT+40@fUF!VfgpTbO9j5}L_#ZoVmkM@gq) zCz?Kp0hfh(yd2pn#?u5SL-t?zE8u#FV-t=r4t7jdryJ|6*luRUaf`^%2uQ!7qt=WH z;5}Mv=D?ePDVs4pg8!)h1Fx|w#&q2ul}?mg3T?@C_=xcq6I^)_WjjRG7m;w)rWA~U@aeQB5T&e6`ZaEDftJ36Y!jJnrJctE-EmQFw ze{ip{APODZTFe=1oGWiM8%e%|pbzO^kl?%Z+&J{i3-C5u*T6u!Py^;xC^n>Xf6XAJ zxEn(RO9}c8-kcKIldZ`w0Ua!&4rP`=->l-AB zcm?B?ch|@;T_y!kZ8vM(-?q(A3u}daK}V0gSo+RNPLoEsn|mV;H72Bfir7b;dbIMWO157F43SiW$@b%C(A#O(unf~)SAgYq7v*3dR^E1U#zc9x#Nf*^ zmcE&Iu)h;dnMYxpyewO6lqQ{M6BO?j7ca{#C8zlJ83zYT+l>UT&TK`BY*-S|0-SCE zkj1t-g=GMLZ7@~n9=e8Qe!~!>rBJJ21?XoTYm^hK2i!VHW&cr2HhyC=XbDdSy*jS| z=oX=#hp8!J>oc0liI6ILWLkFQ{BMLXsK`X`vrduK{>grPPZcU`-Q;>h4h*v9`Wn)_ zyM~E8QIS;E3CgF>ZNWpalZ`FpIh>6~$Okj)ss!&cv~Ri)f>&@^eWc5Aq#>R_tSp`3 z2GQlU8Ps(@4Og%R+Nfy8iTy0+9U2Kw*34#drs9c zfCjHF5VMyD*(tcLfGJ!0@PPbQBeC>KRGeevl268a+%9rfL3xr^aHsGnft(};2Q+g% zo3MC&4}uE@vi17%_TmWh6f*SCrvZp3*jx^;S~=<>w%X#<_A36vA{ajz!7LnD0(y@E z2J$qj4n4sE-5CSjoR-P?fW9{vo{WvlO++!T*=rtHVPAph z5<$zoK{R}QoARFo6H`=f@O1(Qq8E#(uw4b8xn5kWDZ;sH4aHY*f6X;fnDBqHH$x+@ zTsj%>*^kEEyDco=`U`e$`2iLLVybF0vK7Z>1mU~(@KOkRLZ!idRtjQ7mu0BBEJXE+ z6xQeUq)Dd$j5+iSj3iTOT^Bb&3o90C2*q;dGa1!18lZ`q?g+_QnOzO_!BZM`d`C;1 zYGmHDceir)jnDcZ@wHDNVoXsHC^a=oCUZ@szz!H6z8|oG($^ew+dVVm_5nh!vV-4M zo_Xjz=?WX)2ILsV!kppz4VpZj17B3!a_0e|%w&c#gPVL39)ut|(i~iWR8a@8)Qy-Z zLZf~^2(UMItCJ>VQI%=k6zGqO88qlkk!4IL1lroDJP`#A;&cctcftx z=bzXVdnv;sP~kq&V#`0N^TcbYi$!N9>SS;t;IPQy9D{t#g)!P{AJ`zq>I{yE-DkK0 zWD^!#D;o;v#eM$2Zk9_hIP4#f-s+w29WCuK2sEIg#1XA1*4x7&tM`i@5-70|_1+?AKK-25=30 z$dqYWk@7fm43%`ARvumHG&cC~Ge$uzgrg#`M`~h!9y%MWJv?{=5Iry;O3H8?JtFNM zM0{cr8yeJ;q(PPA&u#+mUJ^&tko+!8p)1Jbm_9r3So?#y=9S;dH8>w$F-cU~4I3M4 zo1Sk`y6#PYMEdvMlkr}}73SG`y<2InV@mVAllIs~2S;&<*;;r zFPBelfBl%gh~7ep`3dKN7C(U%KcV~kK;&V;?X){-eQh>vPq_M}m0tcg4ji$@i^sr+p;wE#5S% ziGY{w3jk#-f({O5;A!659n# zKS-%}^B!lJ)~#A!buA!Q%w%=lv(Dv4GIV#TQa1J18&zuDKBZ^N617T%%72-iLorB5 zb<*O_);ph?lu@?)s6oeK@95;GRMX*FHctMock6zJ;h_aI??HDkfAjM=gmks~LE0u= zC<;Zl#ro-CYh2*lL>Z)h`RPWsUiw3)J};`>nkzn;cZ1vFF4zwf)$yf}#C}GZLYtYz zlct$lxU&d#Y8w;rd(&W4a1`W6atl8nkx2X`0zJ4mGw3(!(J%ya=<0-r`u2KQSHN00M&wO_dz9%`Zc?9Ri_5l{5w}UTdms%s_Kug zycfSWBf&YffB&udJf^H~c{|4e36xTFsUOqRnaPsTKH;gCbB-)R`p&tfpZnr7d2VgXg*ipB{6Js8U6YS)QBD$T7}PdvR!AQr_#9TnF|~hzh|YViD#KVsp9fS2j2tR)*d9(QziU50P4wyf|Fy4J}nWRb>>Hvl5m8ii3Sbgz4W zqwJiG==6E0P_az`uu-EYs=;2jg}9+Y8p&?l0VKc>AFZQ=r4HQ9?eUsWBS}`9m*qGY z92-n@$~e>nwYi-o1GJx&vQb()TEp}d-fx( zohp=BK%?$SXZii2R@zgKD)zcONSuC9S60T{h^V<$#`hN9-8Mx2T&->S%)EMUgjxT3 zZ8M7)N}G`96tQg|%BWlZrWQFs3rjHyJVsDTUj4}1Ph9UaH7}VyYbw0J^DZUEZCW$VKQ{$*>8_0lfk0dWs=|W&=W6qv_ z1UQ*Oy0vq7${Ho68V34pPEDe&wT`VnglgT#9uA|6Y&PJm;ueAwBT;zG?HC4>ks&_N zvfC6zz^kpsX}8Q15w$jyuynZFz&j%n7<;1v4|XA{+iI?n*cY(OhUwh%oG)!c?!b0^ zh@%eac{oCpR~wc{LK=luZ8YqVDr-x@P^xEB7xX$0Ci8FP%fAYhwU&6R4-7C3K1+Cn zGh2kQ`qu9e)Kx3SA6o89EtQjpMUFEqWwr+@QVs@5eXBAmvq+cr%6njctF@!Vpdp0r`TGXjWV z%*PoZD5rF(AMbGxPMgw6RsJ=|;NEWL z6_5xlNLDOfRr;kj{dr{Zn(cP0MZo@dR$w%Ra7()P+#USM@#84=PBdsQyNTbzMgzIi zRoi1+BL{DBquQzc;=rrWsPo?ZJp9%dz7KG5ff6{F=Ambf>W`b%EbzHw?nFzL=X9Im z*;IBp%@HEFmb?NE?5SoPBeG&Tu!ydhK>%%%ZcnxMA_D{@1v1+YzgQMlhx&dD514ccJq`WmkVJKTi}q&1D_|tjcB&AnS3t3bhww8&q*L_J9XD zv^3;z`tI3&fqQs$Js@4a=$UVlRq(S*R`N-HXvxu!#@CgVcbmAPyxUILUPgV6WKJn1 z7!OHg3~f3DXRQz!nKoA^z9e_0u$4-gx}?lNLq*aQt$C`W24Z{_IGZvO^9r<4uUYa# zecRZ{#vEbRY8IykrfRxadid-bRsp%SSAcx+7dqte$wXgs_a1q@DvvoW032d3td)nr(eeWH)2wj<|3!)$XACksDD)I+2KVZ;^!%ZP~E+V!TI(q={cAoo>(;O9V(v{Y88d%(b3VZKAD(s6oqN8!Y zHsr_nOxeeGXx80hxAf^POPUPcJ|-|fc=02BvF(*I)*ja3lb`=;OU*_Jw zZG8LwAD2`FwJfxcXtQGeKkDGO*X6R!P>} zchOJ4H3}>P`og4*z}4LjwiDltz{Xju`Fq4~nUM5#>=sUlk{!H%ky{q3`N(h$J{`T| zXLiA{%5KU|NRcFKRV~+2RkD2EHdg!6Spi#(Bj8(iK8GXhh3PNE)y_h{k_kdg(pF&U zdZc-WV-6Fu+p`0)4fQ(NVcr=k&!aklD-`baJt=2NK2@a)j7QniNDiu8$8Mghn{pk9 zxH=QKp0i3)BN~VCq9~^nlkwG0@-hl+{NY?nSm!a7@kKZz=Y{D?jlM|cJX!8BywA49 z?|}8?u`^Jb@1&(@}v*}#w6;AcZVx{eU4>jXp;4GW3 zhBayW(CNm^@0}o%t*{&3dGtkELib>WZY}sITIWGVfR*Hq{8Q(?{i;{*bY5( z`sbvC_?l>nTEg*w8YRo>3*B1oQW)W$&W<$B*l|f=Wm!iDHj}BR&miETqScK&N7E!n zHj#D1u5>jjcJ4`rG6eOc6OWp*7H@!S2@o;@vhnbFDR z*HkWGv5m_t`FAolbBIy<*JLD$Z?KI`tf8Ln5gm8n(HkR>25K!AG4tq3e4#7>6oFR^!&YDxr(aQ?CX(u)IA1T5)LM?3RG3JOca* zVY3IpvsJRj>Grc~90y-JiLV{#-TZrkX=MW*L8A54q|ql$T#xi;>!~tf&Z3GE6*t<) zCNy`p$R7ZYqY=p8;>cJJax@8JEacYK*u19NVm0bc9MN}fdF@aCW_(nKofd5uuoSYY z2D%Pw-Uu}hiND0tBm-@`^nJf{08&&Qdj6UqN+e1nB)J|?^Lt;~PITt2C6Kh-_vi`O z-0=Q&r(KSaR>GL9kxAqi85abVj6`93UR*Kpj3u%I&AFoW4iuHLCpS_DW4r;Q!5Mq< z_d6p0X9EEogK_>CTfY`1eDhBK(voqmy2^}y8<$6xPlGtnF;Xd8cfA=c&Xc({@Ki-% zW=eUfVW>Ge@q$E!tSHRkW?8d+>xn9UTaJ<(I9`mm+eqZe(M z1X4A6E7uyp;IPvl>t4$(6Ve6RLsdZ2fF8VW#X=#;f0Z1H9J_MeCa<=98@m zjC34bdB%SM3;R%TYdI?MJNcs-69V0CBi<79Q7j!Qp2_-bvqvUxJNkYU2k$Xk9doHG zIN|J*YWo>@nZlYX6xJhV@M!9_|D%66v;yTg7Jx5!EB->@P3O}65fhL`PWb$@7s%5b zKBnO#H?%|*pQLaGxi-Olx?c<#0bPziKYM3;#OarMWv}rKd98^H94#c1Ywwvf()pzp z#*So??3V;2xrvzCei!E#^{{k@HMb^Ab?zfhiNw8h7fEL`DY)e%B`ZwkpMwuTw!t33 zr&eH=Ll|1AiE&jVg;NlX!DcaZ#4D1jEIp!dwv5X_mC6F#)KN>o(uM}QNrg$*uL=dM z)Cq#icxJU&^DXUnaDn0*=iWsaY`J^!a?sbVwOSr^EgY~lVc_aVk}&O<23rmF35oj{ zEctsuA}}sMg*Lsde#%CbP6J7&>9!WxJyv@hx>r$Vs#0XdM{#Cukvg{VdzxO`ttgco z1vg(R;Cn4kHujAt8xkRy^=`8H_n`!VtNAE;JjipV(R>lM z@~s2+^>oeb=!2ci;_|zKz9P);YJoEXK7XE|%D%8hLDu|q2S^C@^LgAGvP0Dk%%`if zqfmvlv8eS_C0l9eOxN-JfW9XYnrlDLb`l%zsZv6!O0bXxa4~EC`t~(-oo%8dL-P;3 zHI0~35a|!L%>%3>*v^0MviRe*nw;SIfOb&m#HBG5wkmv;b67KqXdU85X;QYJ(?c~( z55jI7fcY??81K|s$~e>`LTFFuPlGWIZx;J2mvJA=SHiC8xcQj>ecr*MSc`9QE3w8?LKFC^lW~y2_2o@^C$%HJ(}124${wzoDpIAKO<5j zXq@KoxUcEzAPu?)P_3KzX{-ihV`Yy^2)O#!@C=O|9h zFWa?YLQKwqS`U(v1g|x0jI1TYYs!`t`?nnP!W~IxPZ{TF;PL~pf)2Fpj%|`TZeR0; zR#^Aqi?R=CSaR7DE%(cFsecKG_pHAI`8EUOMy=rjUuENRW_>%Kt)gaU7Dp^({!HFg zjIqYvXz-U~VsD7ou+cl3laB*CRSyb*GaVAXn%tNVprLyXPiN$jN2mXGqMIkCc~W0gu`=N99ha>lSyfeytz*dd zx#k+H+|cGLIT7P+ykeKUcW1*|#Pu$BdM47ivaAT21o1TCz7hF( zC_a@41C0aJmc9_EL2(XaVS95Xbn-Vn7qnaxxfSrd8!D?wNJ3>CftQ0T`Ou?}_#m+6 z1OUIEU(HLr6$Hf59tnf4X1}YiXYce3y2XNkn=zh9i*P=*`AlnGHwdSsQ-Oc%wV8sD z^QD{A#TgMtq48~a=Ew>Unf7`i&+=&OLTkS!?(uFXjAyx(ZzlcT<>)$J*5tgb2m1{A2*DJRiI4GVOlHb zgrc`qfFArT01Mv4AUy6Bzrk*yu+dQozd2)k&_X@y(xCeNLVM9*pVpq4a~xccu?M8u z?I#Jf3nbl2gi0+dHk9ztlCGgXSgzE`C({FZVUCo7ZN!yacpU_yy^xVi z-;?u*_9fe$P(BPI7s%-j8Iau+$FDA_1H|gjd93QM0mT4|SYC7>bvV6MvBdTxz-SmDAc>kK&g_w<#X)|oEi!cKagJD$ zl7|_ncSFE`gnF7ICxThNyTv?su+R#4gqnpz*lg$8JXA_@rTHL?cle}k0>Ozam!)WZ zs0w(twaAJZjq4r8K~YPjU6dokqKa`x+Xv_Xe$hZD&q%hs=g73Mh33C0b0jD|{*Wiz zZ~0U@2uzBi-W%z0h`ya?e`xx7i0qn?o(T!B8l}t5-}wGpL*&!J{pM&&g8Z)+#+J*Z zAJ>|H`fW;iHr$i?jfS7NzxjEj<8yLzsjRbz(W!?T zOI%gn1Pe&g%PFx$Q?n!KWfc*G%&M9-^np#EM^F?!=s7)x%|;Fh|TUX`sVRit+Wfvr+Rs+7>AN$)iv zf}%h`MS2sFBE5$m1PMwPq(dUTgx&%qgm1-j^qe!kd$-(k@A!Rp{KofR9Fq0sU2B$S zK69=)l{kdG8gdw>zi6=b=M|oQB$r2cHbOI5^{g1XE ze$_GDv)vTrX(j8Cc373^JJZUl zql<{hHNi+y8vvNh=OlS`xvp<#Mg+;Ov-U@93EB$7xDcwOQ$pL)i|MBes*6KR4=Fch zJ}g2YMOy3i+6Q%U7xAu*0e;z>lu+Hoy3_3gx~*_u7#Q_N)pc9X1Q!P}p7bY`u|uD~ z^JcC=j3{6qGVaX|s^e~Cys|1+5?^kdlvbg;a~ZKc&JN3x4eGH8&vii)P#Rji=f>+~ zFpr?UB`U11kdaXL;fXce7`-}A-f}-q<;vBjTid^R6p*BwZFXS;-qJv%-{+{-3~KG< zPz_=Zl0#=>2lc7=bXebR%x;Er=jlAD%ay?zoPwDz?Bp=$^Q{_hr^IrdnpZc*HUkGSUVlhFaq?&9Qe{BT#oIhv zEF{7XnAfR?}(0w`0@lR{~bX>i{Qb}O+0(15`lYKno@I}UciX$zV%v>De zDo;IfJv(6k0Cu};eOURg>AG#DZ?j2G>Vw8!#-r^AGXjAL4SStVu~5|7Q?Yk*(sL3s zI3h`-28R7Em)O;o0a#K|k80gmCrM8j?tZdy{dru`#h%LB7+#k;<8CC+Q9d=TUKK=k z^GQ!$*xmU=`wSJ>Z5n(3e5W|$01tyIn{_6^rONh5+gb;>L579<bL(=Q~ySv7C$8cwasKZ49kTIHdd2v zW?icAO0{ailM17+hHt%@B9RlHN-lq}P)cMWvaFX88m--{A%6*;eh~RJ#_}r#qt^3Z zFz3jiU*E3t)6d#3v4Fm)KopKoF8s9}`9bai5j4KQfgPaj>seLOzus_4lfPs5tzG*p zag|sj&mRx_&(a4!){&?G?NhzS#JlpUKWG(%uKo1CY!O7c_>WT$Kfe1@4dmm_fJKHw z%BxtwzBf+)*!PJKHlHm?|6hOp^LzjL!95*ZCZ$)u9e(HD1>qAvi7Bwi9V@OQFOmQj ziBtPCshx(b^Bedj362g)t`^?cM!>Nh;oQGVnDJ$>vzF(2CtZDgeP7AjoaXvLbTkKl ziS1DTg}VLRwfXDEtKs}fFGS-wsS`hZ{z0>@nU-HFi~iS_|Ig2>Miq!l`{j3?Gh`uF z&aOWMQJTTw^U@!Gtq?yqEr|cz3qMTh8Cw^L*a)ZNU~enuM@l8co|0x{y%Zj zC;yiSZg{)qh4NEibN_#BK1t^OcWwS(v?A_gcv8^6jmc3+qD4@A$GK=NB)hKNUF zqo!Xa164INqApFjErEv>n$%VjeU+#AI9x#CjWwj=`oX?HUay*v9=yJByprW5plq=f5wJW9Qp{JfrVw zXymPEB;}$rDa}u#{g)r!*qxk4)mU-w*HZ&A06zEckOr8*`+7@t^GKbA4C|slHCf#q zD)kizCt4$Kk;<5T9U-66Kcdqh&jWi^yg{`)ou^k+`M_VD2l$?CyTMpy;Cs~Vcd^j9 zQk{i!bLVGn3{LDcWVB|TOS^EshVmAL(vjSoJn5-vl;pGe*lePgjsKDUA=+Qje5ufj zb5tBkc zzx$UTj0J!T-q!kBB~Posd_JlHI8XL*sba5cL1~5)Wtay=N=lSl^ICCP37fi93%2lN z$%`qQJL|T+LlYkNQpQSnh2s$G>wwk`E%3>0abCbs&aK=t^Z6h{eU~f z>hkp;5XVBCEh1X(a8P(NRjS=CMaVo!Sk56e_T80{AZrq#3#7H5XwT*a+`?2G`A9+c8l46@B9|PwNxuwGJcE>VBPJt(W1o zzX%cjeoCf2BxmXQRmD)=76vu$wu=ukd(`Jprzxfz-znJ4-g)suE53aGnYL{S!2PWi>(Ed4wpq&!*4*| zHnY)ClXd#j{zP|@d#2j`H!Ko%aVkq+cMRC0rBu$x+|Z-axUyN+TBM3welWb-I*sj8 zJNK97>hBgo|4tA%@_HlL1}z;0sRTd6JBh7Z6EZR~+#$0K$A22*6#r}L`1u*z&rPAP z7T+ve*?RBPZ+?qec;W|Y4Wb&#juZ~)BTnUVr}t%RXy8^6hcD2oTy9Q^0Yc2XXTUXA zRp%cTQ<;lBVv4XlR!TAp<4ld|efH9!XuG7!%|nyJ{o~qsGPR<>+1#yM>wJIS(%QeK z6n|>;z&#G`8xY@xOxx*CFBft$A`RNV?90`GEhZExM|pyPjxps&;2WVX%RrLukj-@_ zq!L>acQo}`IZsWt+~vc#l2`h`Kb-N?6A43ihv8)AjOd`dcaZSpL^HJ6=UuZj68fVl zLp^Qr1H7-4hho44P{C(EgG@I{A4L}kG*T7y1Fh3tG%bnid7;*w zyWMPv>&x)I+}1y8la46!%O@QX2RrWAaj$Hdogq#K@DDq&p<|IN4^|wG#bw-KKK{?h zhIl}Ej`tH~_qM~^?CLB<o=j`|MCO*wu9=`5hiEAeQW%YzgnnzhBm7Icbs4>N#{_xdFFgC$z-GD#cO_7paj?VJ6`Y z5xv3&1tFSjjhE@Fs|ERBu%^y$tV1z!p1uXDQ@x#>Sj;>H!l?+I{eCLeV=GzxXpBMQ z(+kC|Y<};S;pm18F(D&BMxmxpJ6{K$Th;s@{$|hA$fLd2y~`3540?5`-l6MMv-RMO zbM)g_PPU2~tKp<3?W*Eot!_q3x$<*eIGUgz&@)K5K1dXGFc^85SI`SutoBy9Oi2c4 zH7EFCb~_8_xU??_3kCfP25l~N`?B#mQr5#ZQNqdQM5&=V>&q@5+^(?gQt?<;^pT7a zhrH=s5N-yWV|H(Fhg5QaN~gbREWlE)?DV~i+^2HgH{jt9=BXV`{AY&Td(J6SOfZtG z9SKSu3-SOcrE*0r^x;iFo|OLU&#&2(iYvICCT?w`kiIJAYcILZ*z~4Z@`Jkikrr=! z1CZwFmVxzPZ8C=&ss9|Y>**~qgx$@#W}~NC5PlT<54Pb2aT`LIpEd(4{ATW7ka0_w zzAS-=T7~8cAbFT!*TrXBK^Cq0mjepUv#K8L$2y<=R8N5=C&%M~kVy~{4Nk{n!sb; z{_A5#8Fdu8>mFL}Z=2|5CRtS$G1QF4a`s-Rjk?{sEF-DpahLgMhU%o~NkH3dThpCV z5OVuzkca?zpkIoUWgm{y&90r>dbf9nz2WQCDXkIT{ZCuFjGQ6zwZ1z>EJu4tFzpLJ z=6s{MsB>hU@&9rj|0Yev%a~RMK(gDxNW}~iFihvlqK@R$z|XA#7`|ZC>fQD>=sj8T zzk{awTkz9$`JaQI4%wsGdot>IQE-BeNiPQmGZbu)#6`ZBDgArw9bFt)&U-pCx))} z3I;}zAjJ4zLDK$TN9$x~69MIao2C7qIOY+f$p1cZO&4eJd6$lu6$mfnOL7A=x`FHP zjiVWCkf68%V)!S~y_2ti(;*7ne}{|z4_K;yBR`ScpQp$5&oCDW1&zFe8fIpb-o4~h zDVISVncU~NmP{felh@ZlYL)$|xpT-odZO<9+=>dwYMr|K6Hk>{b*q8a?y5=ZX^F^4 z*1ipWsh{!6uRb(7pXD`axF{oT4Pg?pav!mewrz$=bfKdj{g%%7=7$xC@Y$r#0`2{? zu10fZ!Y-@X_|+YmjF&eRKSjrH@M=~-|Mg`xmb5JOVHp*R0(ivV7&K)K z4UNRT#^jBvPO))u7@JJ)GQ*M27d(SnzuHyD_uGfiHZ7;>y)9wa=}!|?FUI0fzv5y+Bo&8ipv3Zhwatqk<9s|G^fki8r zEI{dw?Z8s?jLVNxb|pXhDa&V{iWM+<$(AfsB)x2vq6YzWdB$%9{QE+{Pg|uaI%|Uny&;@eR=oXwr%?Pxa?ir$Lbs84u7+sbvb<)bR2MWL- zrkyRcLVX4#+Du7*2TvQwvwdT6Gqa~!>54GKB1zcAe;skgc57a<9=RFxtL^#em-Fug z#qj850v$bnXlOvm{DdN#)~v_7q971Z5(yH_Ht|AkHW{{Rt?>v`7m)RKnc8)kLT0%r zRrHj!1dI4?%>!|MhLm+W`s*eBwzZY74%)b@wuh5EZ4b+R0~*>mk(c~k2w>f)d=p^3 zWMU(_)rJ$FN(}ZhXkKy$6OCZy9*#y1Kl#dEeK0jX^D%ee99M)Mt-Em$<&-#3jI~f6 zY6Iije|W_48*1`$BHpT+tK3Hc!>d(Mpqmfa}TKNze zS6HiyycZ}jw3@Z7I+31mr}v2A!*^8v>9?C-ZUO5L19GZ#MB0M513eYQgL)4#%5Q9r z#&dpmku~>#iKFYg2((!Fh_v_jr^Nscz$yzA@CCnpeq}hz{UR``;oVS4ag>7yh0nP1Vu60iMLv^~>jg&D+$|4=!CJ*xBGTATZ3|M+u2;AJu)39Q%w|? zRZG6sK=v#WB6fo5$&qYZkQqHlji`QbKB+MGHvp7e%|z4^2Bsi@c2%`5Yp^7KfE}f# zOa763>q#*7BL-~W7`Vi%tkYAyNl(|_T-0IooM=|a3*@!Wq=;H__A7+O4$mJ+6`z7K zHetoF_ww^^FSYmLFOt{zZZFE}2|oOqBW#8r0-HD_{19v^*ht~G8UhqxQ5!yL1TS#D zw*757whN^o;nF*Em2gRZlMdmd1?u45otaAX+B=#?H~NC2w#0-S^^*!27w3cBX1C7- zD^m-{I;UQPT;mZE0a608KbP)=bL>BEfj&-OuZ1scy$55oGmDy!#2k=^%Abz}B-JOc zi`0^KJ7co#A2pJ%$*QH)yuAG;K?2y-#+q}_+R`^?`BXueuqHtb<$*XH_&AK3TW~m^|KwF+P}fL(`oL(kLDzc1IT~@8+o-}{hp~!ejeOt1(Ttp z`1_>60hjMtF&kc%6{8se;^-O#Xw;zlRGB(?x*}a7wv)TU9$_zhPkdxPlJa1Jh*tR) z6KKl%_fQC8J?1?!4TPu);vfsaPTWr=AKQA zgk~*sNYein&B-v;ge|@?Jl}uAfv8Q6LAa#c5cMQU3AJR7eA%(hN&Jx(KFtj*lJY~a zY-f9mld!Eq;&q>Uv%e8l{!JMP=qG*qEIprX^4HRJirzHF1q=MkMWVMSoItB@Bv#+JlTf5H{KQ?>s%4L0JJ=H~;WzMzH?( z;2+{(Xb(1%NuqRro_NEo`Eqe?HrM!JV={7Y%F7u^mn1Cfr}z&r@)u8$_c}|&DQW4y zg`lK1KR~RLN@lWbZx+!3Vdl6dO+urCC|VF(bZK#V4f~5_{_$TyEA&Lm&EmmL&28Ur zP6B+zHC~d=bPt4P^L2_NRk9LOAOKGe{oQ8_eFKy{(BLP$~8-SwX z;8cj1$`M4MIOw|onEoB8ih2VGLlF=*&hekV=06r~Jk3iqghv0~ce#s!-y!z;_;nNo_p zStmb9i{G%4cN`RX%$PDe)wv|fLid@dSkA2Q6jog%b|qLq`K_vf&II>I{8sDxAb!g+ z11(;jFaaAG3DcFaYTKQq;hFxfctK9JWO0{IMgHrPEejxb%F`|6voT%1H=9JYCo$M^ z=A^>llgWK4Qm3@} z_|)0n*H^XL>S=P5Fgz2vXQk8HdZOBKCT|ILTA$VBYh!(X9@Mh*aVI~#vircfw8Z7nNu~4ncOC=(9942B6z;3K8o4ui3I9#?bBl0~+keVWd|bXNvFQ z`}&c|=DyUO@~u`mLWxH0QIIhh5mwEeFcQsG{$EN-fjbIERPmC@>wS5v?Z zjQSm_*osh|%+%LSP&oZ*LfD8iMCnyn0G+Vq z4PyJybLQLtbnQ0Bmf3O+rKH11d00cf=iUIkL4?n!>RN$81-DY;*8zh!2zeXrbl9qa zz0^vTMa}krp&^xMqYO#W}?u<4Z<&sKxHaY0346h+1sNCB) zN#5pQNRe_(%%OHWg3kJj^m!6`h`&VxJawcmc;{WpY(!z@0s)pCW6zc2dpZy<)0@up0YG4kB<7|~A z`stPdl}F*{SL@+)0=MMwpED45G*hnUYL7_rTnqfrG#R2vt0w{seOCZB`By-qhDfw` z82wDY{L{7zb%}G<@3si=%l(u&oW89M-?bO=Hkb@jxC)26v*QVwTsAV;+9+tS9xEdp zdrfXCf3q(vhNu02StIEcWUVf^3@VZ(JOpk7(ggbNoVt`g(*}*MxdHMZ%vK*nezKtd z@rOodGQe1U{aN}1h8r8ydAroC)L?qw-HPr#m%0`LfZtFoFpVKy$)Hhi^L z09Pj#GBN=x-|jZ;*^wkBK=dS*Pim`>SGBUHYb-~J!QKk!ezOrsbS=L4DRz@oIpipC zL+uu+z`SJT6*Wlu`4_qX@rRGO-tplZb$}a>fJgL!GEH3a#TBs*tIouj$}!jWQuN2? z=WtuhHj2rcL8NR~;;Pxj9F5YN-wL~IRbvWkzZZ0RqI~8=QZrI& zspZ#`sZ*f)n;s(Z;-Ac;eyGjVt@j$xTz^DXU_P(H6YQ9q#=?DQprOQ>kiz#XiQDWp z9Ky;4SXLQdZQq$Yk+wdiv^=_5q;nl=08Fcc42+1H00Dm4kmayvZ0{<`tLX4Ii&jbY z$aZO;14tfmSo@zg9@BWzbh-*i)T1zeF4cDga*y*OR|dCPW2sK?*b z2)!h+8HTQUY1_^x>X3{nUrmjf+>3Bf;|?iQ<8c4*SZN_>*sb^D`V@fK>H-00_jT!D z+(m9@2di74Elxw_lc>qS;BoVS)j(6v#F81StK1#4l~viK*4=!BNT%q@A^T`RCnO%o zykagxiH;3k8(MwnLDe31yUvUO9m^-0l!}a6#Tm+JZ;E=fDf1yl`?o93hVVm5g~!F* zboU}04Wv1%5%hU3i6(Wt4h+ib;EN(+aklW;I1Bv6aw!Q!zY@SimcdMeJ{eO z_pAGG`hlk|)`F7X1jNrITI)TIt!JE(f5sVm-uE3SON|L2Y^G(qbxCkR_xsY+;~>7a z0&wCr&4ok4>S5a&q=>a8ROCzuQsUzLpfmwo?My}CXT`GN^~( zhkQ|p<;qcCH2_XCzf`gLl8WZ-{;U`xL3XDkDsy(qWIasp9cb1iu?+Bm zRkSwE6TMGx^d^96rn z5`gb73W(VmwoY@+cizXL4w@wk5?D}Peq$0m@ zjBf?SR&&#nvwhaGn+aQJ@T$;c)WM2ZTzey8r=o~2wEdI<##2*Gcs8$r&!j;bx@>21 zXVnjl^M$cP%_&ibpzFv~lw5<}nj=S0PkhvO=lA}yYWxer{vaYtO?3EOz+xm~bpV}X z_00EEnemezaefj#xr03?CdIn_kMzPQJ*SJwUJEmWS$->iZ@}&6DKF`!M_{~w$*8>P z$r^(5Oj_F`qoU(sR4!0b8Mn0SjRSQ83bt`3+jARCNmvI+$kI;WzLvq$j{$z4V~Fkk zu2O|x8Ugn|6pPCwyQ8vOG?I%rR6b83AU*q{$uz^g-N7ppeK;{OCo5eUFh0PE{^EI!H|#CD^sPQD@*A?Pb#H9YxD{uzH@7vI+#4KsUKki_?#vR9s73MxbKA)7TT)5m9w%UGt*cy1X@p3XNcId~l zKH3(q!vQJ6%ZnWb=t;)s)|um#g{l2|bm}z7gYitg%mk%3m4te>`(u4&RNe+)@<73e z@$H)RtkGVPzp;ZQkznHX(ks%RfX$Qwe#ADM>ma@H|?_#m#5$G30f{NdEXf<~Zk5&wxZ zANcOi;+itGMZ!*A9Z}C#?q` z)M8P7eP2}A$GJ>L9!3~(lsm?q&1%5U2xy>jD69d+{YrMfr4q|;m5bU~pY1%8l!kSf zlXh*}pl{2N%`U)){obejJeJl%9S>j+t@NYi=f|Ser>#tvjMcZTAp!0w7RI)`q{Ge{i=d}Xbs%Z7II7cS^ivLmInyT zftPO;tU?qm64vyu%aBC%)3qm{-TTz)V=3AH0eTSR7QzOp z{J)*BwCu?>BcdvrixHP5Id|!GxTYYv4sU{b?276u$$LRRDzZT&-9%e6m0X=a;)^nK zCNV6X!TIJ0^5$`8(yYN|`ZC&Vj8yD3MYi4qQ*xtQ*=tv7?0h@zAkm(~mT~R}+k`k=H!Zpln$ou2;`(Kh&_5DZm#ymI~3$aiBCWwB;z@lSS_jz_T(UC3EX` zay*$XjKz<3$Q%Ezw7@}Q@l*T(is&vhl4=)R74VQODez9rZV-Ku_GN%laQ zx~QS0w0qQHv+8=Xk0Fh3L|6*VP3ZbrN+({80L=2?6`qs2tt~!YL!Nop#~5_PR(T8I zY_}td&9x=+owkpC%hAkWiaDL}YNq_+F|i{34e7_PO8V<3-S7wJ+dn&%^~WmZ_?x+~ zW4F&lQ~p8C)UQ`mwZC6FX{DW-kBt0gTA>uvM+AG@k01xW8IKp~lAP{ItAzIj;Fj*6 z)}KNJZbiFk@h%%X&@7HwVffl{5ql9~IsJ4OS~4gDcOiL|zAs81h7X=;`F(wq4qIPN z76Do|gd*s_u(@eJd7N=?Dz-)D=CQ3x^TNfTk8^B09p#{43VGqc@wB$RTIYq-3SVwA(yOeY0ApRjB$fKXR*NgUl@?tVI?MMbb|Cj$ ztJ9w$)k*L>VZYS|BDTAf-L${Ligy8G?`8Eujbd;tY!l{x-5d2-P;0FG19qa$fzs-~ zcR&O>2hu1A!gf*i!{L?R_?u#gofEeO4|ZGS_?we;6hT8U{SE(a8TNwt?Lx2mJR?!r z$1?qN6Rj4J5?M5#@ZU(~;z7JhrBmBJNEV~~Xrq~(;GFi`TN+X6Z;wte9dLeQoIpMd zKW2H7C6BKu5dlIk-zT@Nx{?!6TUlb6X@sq8OE^G&d4k>IQxWHHv=15RI2VvAWSE5T z2sG?YmXA8twzMKE-4}YVFBzPY<(y_(VRpjXIRBxL07j7A>j!kbcd1Nqng7&F={U(uopQC1qAlUemy0Ff1sfDm_h zs?TgfeS^R@W_1hqU88bHSq%9Y**{cZ!nl*SA3p88QC$}3qnRjU?I?kthF#8_W8Y4O z#9Zbi_eBoRiN!KRJ+cg;w+NTtSdhc}aNcAKTS zELBc>DS0QzTk2ZSbDG9pI0tq;IUUBF$YQIC`n7q*wnvuR)q6NPG%h&YF7mEzh0?BM zYS}}-uCi1VM!K`RtHxKi6~=6+%M$sg-w<@Pk1`Bg>;ux%WGCwGtjD17AQ_YB>8q%l&jcPrOIoha zmsxl7^GL^1oYCN8a7iF%F+((_XN3vm36w+#FhgtT`!hv*Lpc$?MA+8}`a3}Z z)Jp?>s)}M-0S&X`k><-G+;y0CxR*BLy`BPFELu-liY+h<%t(X+_EEp<;Cb!x*ZES= z59ItE#cu8<$hKudYXn;ggfN-Ggnp?%BAFYEq(ck}a4Wn5NE5Y4OX9^(5?T`CXCGMq zp3KB}Zw)IT%f8hC?WG*u4mka`-UrtXgTIrb(-FyA-kq$YdFX&L_u1SO@Gak;e;Fz( z)HyWy1s&^q>{_JEbmKv9rP8U+*@8XEBt@rf!XY*#R1xz<^lMO$u@{QJWn;K{d5b-{ zkId9jNBf2}{@a1ucrTzZO}admA}l`uxb!8^w1?$^y@(u|@S&Rt9i5ZtyG7IO=&zC+ z4GEY@3)^cb!0Vf^)sLVTE20PQ9?A-nDkLFJ6!5xYzxY$pBnN!iq-yt2QxtR(kXZQy zFw&rE3Tpb6=RrDK(_B(>n-oAMmlAXlj{htxwx{w>8Zr!^FTA zW-XGsZ4rKHWy&kBp~b0xK~6ebvt+Bvf|8Htt1h$)*O|GHs*?K*xvJV>=7>W@1rC!H zr)`y5;QZsBqsTj|=zTIU{)nuqf- z-AN7HYd2ZZDJfk%R|KjHkgiK|`(L6Kvz_YXCPL0J;lA?&K!Of!m+S)0=_usgj@6VA z;r0WYv&PUbh&Vi0)vNWbA4mf&j2de79WU`!OS~e#z2u zl`IIfGKG(OCS-&by(dbq5|G|mXm$4#GV*HdYuOyVwT(7A!mGkVOs6GQ-*@jBU3&AC z%!AI=6yu$|(Bpyoo9K5r(~P5u+EcjC5(ss-@Jnwkl1f^v7CtGm7swll4iI+DO6>RF zn%~C_B`TV3A2W4hC>6cAe_!*|_6L>RH^|@nHE)&l54;C7wVnT7c-kP>dHO*Sz^L^J zbRR;eeoqQ^fw8tE=}Qut!i#K#Q@oot@9|o)8OnTOKqwyKH&lZ0uoGcoa{SzlT_(jj z_74j=47T$E(dM7miKwk2Z`qEkD4hVcp#)9ZYnjCK5TU#N(-%mEbH|Z$4>4PHEj|Jb zON+#iJSMK#mSz@qyS87KDx}g+wi^UR?$~;&L$6lHTKhC2H;{Xw_pWqar(2)+{q@VB zPy9>KN+QO0wZ&~HuD1d*JdcKs2=g?U)fSl{Q4NUN>0EaVE0vx)%yQ1{!q7w2ziy}q z%5Emke0*3565c)qp+nbeNxy>LqlV48&5UHAJF)u6gJ?CTEKs|7$AbNSE*vzlMAiA& zj+7sEqo9rLngh5K0k1vbwUzcwR6c7a|zvC3FBUc3^~)T z^-S1gat_&!6m@#zqd;}xC@Yt)nvqp5eN9Vf|0U}qsN62Z~Sz$ z1|?Urvw}Y(up`ep(|O*S+q}2pWElKF$)d&Nuuq@A0lp4}jh1=(DL*@2 zl3T#y9bTq~%HleiGv`+7QL{LvVQ+lsq8d*$b*LBIS-47*>K~h^h z0xSlmc3HBQ6QmsCApCH|94b)c#5bHQyd)n#o5VY)a^e>`Yv~GNbtP7ec)Kdkp{*Y< zFgiZ7O#|Od`juj>=a!b2jUCVIR2l8*=o#xIL&Owq$ju^|L~{28y~aKChCmz}js@=p zTF^?^!^fl3pD~cPC1f|vWG=|zhP$7N^H`2dR$Hg~Sc%WP)@p)@q2d$EaIPF%T~e#$ zhV~^&WhN}EAVrY4+AdIYiQ1sS$>sho19ibjm&9DM^H)t3+E6O-Z;#e$(FcXY6)fKQ zXG0t-`gK8vYNN`pt~wmT{4GQ|uFV-ogRb`mgo+}lgNr)(1oE0={{R>=JW#jVfD5~? zP*k_kCd*D^O%A&;uxpLqU57@-K+T=EYcPeFbKK(=Q8HnIt(g(Ewiu!~Wq|2E<`zg~ zFmu^L=5nbVKTd!y1eerJhOcl9;VIX9QDRx4NP6v1{AUapJYMcv!fh(OVh*en^Ff zBFC0MVEqOB@>9dG3j=%_sfFov_Pd1c(G0=p#ZptJz~Bga$X4juw`QR*S0Fr9+FnMb zQjWniWXm5J78|H$R>$NU^45KE81b)IzR2YjKXx)i{e$6ES9~{(%k!=BZ82)!y_ms} zUBBPq*$&mVLvzC#gd}}W<}mAuI{$;?xFtqWw0pC;5tU5uIsV=L%oa=#zCw?a905Ah zQcRzR{vyR@N=tuD(WZqW|16zua;cG9p)&kwK>)yCp}@^nRXruLWD}3i^6RhowowQg zy?$)(vVa*bi{nc&TIoZBiY%YarjO4x_@Tas2iQ;w(V;h2bU=&B51rtCb^6}R^>!n8 zOQRV%8?t~!(x}`<^H~s5+|Y{H`*muKm_A(sS@{<0kBv*Da=RZUok6SNR#vYNKKR)6 z6y1+|4^GUq)@^K z=KL|7>Y=AkehD614YO?QtDNeCJ9d1mw(uPti;Mdb33Cx`qjeB`!w_c{IV#^|K2tNZ zN$JR|h0wI9-p^fL6&qtJ+{bLUObl?C8y_n+DqXM$rBR8w7y!9xhcEmmnJ-=q0KD@2WELPgW9hEGYZzDM>APzWbpL$JU2XtB|vS2ceUaeHEhUuXR8| zYQ+>YMaEfvQ7j*n9sh_?K}f8e`)k~S!SL(6FCYXbrsS4DX-7-N$C;9;r6do-Seln zO7N2kULcuwF`22A?nI=3ujaOEhaf)wadA<=c3v|`m5kTD17|J{#?OE}O%wRRh7s1d zWjK`!-AHZ$=*a83%<%4DWUzs*3&iqPZsgi3(d9AFd!sK^*cgcD#{CZY5xEY*o*HOrKE4j$ajM z2HFw_bHgIV;dzj^4a_9zGnS3*LZz`qSL-3YyX+*%E;|~1qs~Jga8Er36LjlVonE;7 zaZ;fjT^;1F$N3e;2k%r^V3HZI1?G|5` z+WEKPO*csSk~E!H>#>eqyZW>*@_!3K8VE8lpD1;y-JJ|?2BXDH>KqU93`x=j{D#|` zeCAU0$X7$osgtE0;^z!QNBm7za3xVt-efR7qtjxhaSBu;8(Le#%Dy6pAG#TWsSOK4 zZ9*7ir3yP#ST*5#rnu40a|*$ezMn6}oiCd}UYbRK5SQ*CH#KeY+ckVY@vP7D{1(D* z-!bnCaD+X7Ov8tLOo$?-zhl~UJ;*kkTG8Ic(5`KIt+bhH$<1HM2^3>Lard+k?X7CL zFFQs4_+U5yUxpU$O44$Nm3biVOXXF{LdAwTSgKYborOSC2>7_TkWlLm-mX#eGFTI# z+fTPl850f$K%vlN3xvhzGs9E65ZdOtb6n+=72fC(Ff7|-h-@K>Cvsctllbo>={z`N z*oy-X`+E9dBB1DU@`tngOxCgnHwscGR|2XGCmpD`maltCVM-*%+DB$f2<};< z?f0va*PipB1~dBYYB;EYaf+fvwD-@?7Wd2Imd^2m{{53pk)V3TT(@IP2bo#s1#Hbx|}j)KAr)*D8koG83l-{*1R!4wR+s}MNBV<9u{>A}uukqrv48H&j<6$nG0g`L1dY#^yI`ZoM?x=Lzy3*fSRjuTbw3>j8<#V_^%BQvT^V z!}d0HyMV1V;N?rTB8A;~LrjaOMoe{e2LvoaAU0hiO(;322gr}Kees9Wopu3$l3?qS-s~`+Z4;hl6tX%$=ne`K z=+vs$V^2g8C$O9dpXhQIm?DQGaO0~)y^H717z&)>F*zgnEx8hq(IA+LgyA-LYf2!= zF~Uz}`k;JrXD!pjK%Q|z3caV!q);MOhl8QOnL-u+1GR7oRO~Yg_-Kb}~P{+{nCx5YR6i6*7@SIMn8 z%2|5DPlR2lXBgmd0)yn#CW-=aDrL5-GMWd;NpNz*hLX0v&Ua7=jM zICK>eM(zD@$P?4q?6%`?1)yCkItG+3T>Cg2O02>Et2?sSHh?f1xO9U;sj$MB&hNq) zCd7}tFFK|A)HeJ^BM_)Pv&~L1Dk2LNmT;|aW4;7fhkBOEJ98~i2{0VBH9^9H!6`e% zlRI`GGN?+p%t|`->|)Cfs1YoZh&E4#ZZ@8vS}GIvCXY!4y{k*?N?M%F$t5BkPUe_5 z3iErUR#yFS0IVPVWwpfDiA?}3F-^K|x{O*SIW&Q?4nR+?GCNjXNCrhSP$UDc{ItT9 z;(5jQmT<3njB!ivdOeI0JDLBoz^u`z@%ljFh@^y~4L*YCVJDOXd|gi?s*-)k>yeDS zQ}>qE8$eykp*?~C{A|73HC5r zW0K2}AJ@X-*ph2#aSVS(eE3)nV-n%E-z@KJ^2Ml9xv`}&uhh&1&Au9_T`q+Vf$8Jn zg)Z;+s~$i{z&%#x%2BKEV&y3?K6zWeYHvU)N$3Gm)}aE*Hmo~-lI1+t0zYb!_CO#NC z@(Jv4y(Msnc@{8|iU+O1euv6R*s31CG)vsurb5U)hIL6wDzoBm0 zw>r>{quf7#+r#Lg%;}q4s_{L(Ir@==tv(H>%GMXNJSKZ>v$(1boFQr>)|puEH0ml~ z&LG@1TMeAxw#1lZyG>fKM&0v4UnHaT`Beijctr5D29?D0i@TKVbSZZNTh`Dm^d?5- zZTvP00g91$zbfi$LR+Gwl(FT&UH#7aKF#ov(MGg-Vpd7{4pM1$({6xA-gdl_8ha!H8de~u! z!Z@9xi;LAjzhNw#i2UDyTV)e%{(3k}{E8;ms$uSZxRdd$%=WT2;76Eqtv!?g8&y@m zHu(Rr_a0D9W^3E<*bt>!=m=O*L3;1v2r3FHO0S}Tv`DWZG9ailD@95`1*8c|uThX7 zQi33a9w0#I9Rh@qZ$BuWnQ_jT^UZnJdjIu*%e7py&dk|)_TG29uDk3=$x}lk2F@hc zWG#ju>3F)Ijamv(fU0*C*IypAiE)zDxs!d>Xl(_``n(bJG1yt+ljxZIQAU6ODqoBp zOeS3cEU#kG4S+yh{ly!j-2N_(G#HuM;V86IebEV_0-nkzH%TB*%L9thFfR-0;q-R# zE=_0KMaDn{8&|)lc8aF`qA;qk_cyxxy*FbW-hWKC|ccBHMU1fK@=3{A&;6= zM+=yeIbwNr zY^@Ifqf9@)bq#*s&psO32@xIrq?V49Nq7ctmNZ;*l&jizwuYZA=aXiv%p2xq%8lMXk#w@uyz^y%Q+H*{IVZ=j`16f8=z#~QRSiyEOC5rR9|q$|?hr7cgKfX(32 z*qtD<;=aDdR4J*ve_Ca8qNuf3gO=~rSn2#3L!QVfhi(THKK|AS3*y+b7Z&u6udRbM z+Nw#rtqknr-seVz=849PnS_#dN7BQ(0cS{_CY*Lw@U8nSd~@P*X}I`Dbo6z6ey5PU zgr#G5vWM~7SYnWO#WwIhlSJm{LS&XtJKd)s0aKyqYQB z=khM6@^I)>{i@olqSC6V_Sd2M?#|{TZQu>Yf>|-*)xIV@*wOeF7T{rI`rW!0q=Qi1 zw=~2{%Q9TsaUeg*h-vd3yC&7Y6!NpF9cI^eeDJxeVm%bUJ`oIxIn&Rz9XDxvj(+sQ zk){$%)KspE7U$2%X^=AqaTY`7`-0;EfD%oPao7oJ1MCmZOGq1ePvo7hub1i+9M7&~ z+`nxhZS=z;`=O8=3A89EM0MC7AJciwY?zm_q+SThmhWSr9K(GT3(tE?U7m03A>3R9 zVQR-X=p{9FP~&`+_-WB|Ziq!Z5H0$F6XaPR*J|s=OVn-epB;5{YlEW7S~M)DUJ4#@ zjd!STfrPheh}HK&^%Gq&ibQ(Rkj|PDz6iWs_GbZ3Me>N2a0JWLfEB%a=G75t8=TLVUKQhNDlRSu^;rB9fUefiFghc7BNL{gWd#&^XwPWc3gSz9ifxI}p%U(zcEN=D zVNVJ0biDHUc!@W=6ykKo(^m?bc_@A1;>qX-7DhH+mFUa!-8|gE8fMB%jUgiGqPD{} zFLda%Q-19nF;<>*n`|~imb}Ha_=VWsm^1|O5yklnS3K=(gQt(ft}f#)Jx;g*K01V` zOho3;h?PGbk$-|2y^kiI!OO7KqtcZ8p6Rbda;KpG93V_TEDu@{SfXhwXbWjZ6AT!-tAG1fa~p$BB1CM*i!^ zEspi^frj+;A(TfuD1$wBpZc3+?x*`fG2y& zoJkParq%?zqTBO}{{fK-z$d9MvArAo+1D9v29HTGfL7s(^(yfEW4acH?qq5zljm;% z#(_l*LgA2S$czN7lJuzc`|-6u2>M#Dn$WfWiOOzgs1_tyidEjhuCR4&J@t10;Cnr6_M^>~8~s#}g{aWUyUw_q zPo!?M?~A}?nkA2)$l=!?5jbWR*toM0&@azzZ#cgW%)WX!atj!^?{^#bN~my0;P;>V zyMHh{ybWBiU+u;P2V~mRa34}Ji`urT#xTb)J@eBOf$mx7@;mxOmRle-e;WH~$cVu_ z{hZKt(EPo56A-vtd+4bewDa!V&L}X?JQ6DAYLObWp*3uIBzp!L*|ZeOImFeA|4A0& ztLSCC1QDu0FBli~6=YTDJT+nj^Ix`svn}pD!oK+@DY113=l66kNNCW2XI;ML#hCNc z4m&^i#m}5|noF`y`d#%;1oNxxTc3iS=l#d~c?TxWg10kig#Q3ovD-|ni$5FhCN7VMR_ ziGtk@|J#>xH!M|SAkgT4dboOL{gN~y4aAhJz;zF;wB)Ue?>$+y+h7755Z_7p-Z|>T zw-f?cZtF!Z_S%qApNNKRR}N5W^YTqfKLcp%{$^d1ayJ9<%@{UlkA)7VW)R4vF<-BA z8lA&$`~Q}w*f1A*XE%ulSVEd~z5p0&pwtFp+jlUtTl#@4sX0#=RmVGgz>fiIS`w^g zaa0!Ig^+U_m-U%5fA;p5-Eu&|ndx%Z)EH)}NitO2x}Om11NX81m}d58hw%cFS{s)5 zg{0l!rqlsv;8D$)t;wOBf%})vP_-TlSg(mon=^t#RfB!x2Kd0R1$Sin`UlSIYAbKv z8mOAEAZ>G0so9E}gUjIDJnPU6M?z!|;=3KnG#6kxa~KH>G&O60Q3D}hmCC!QN4)ld zjr2wv$JVzg6|zLs7%|8#{i@QFsp0V9m%#l%Z9i=oc?G_8f3tDXfA6zYB56OCxYmRY z6GS2e^>>3zkKT9b-2O76?j#k5y#BevyEl0j2o>$x!VaDfqk-PUSg$#lzWoI<^Hgrh z_yFHj>tX3@zf^+B0%q%tsE`Ncd9W^N!9ubmt8yt&3v5pkM z#zW(l5MYC7!0Z8?e$ys;D)zd+0gOIEtl8r7U}s)2&9UJc4_s<%^&nUNfGa`VGOhTGB`Xf3xM(W`Muq$ikNJ zET63Bx2OETpZ9O4R8~7t;%jj;iqli7E2lTkWD9D0?bcNsT`f1X<82h7C3mpo7LxnP zM(1d(*|yat;zM!5(MH_V`_<*L(3(wb0U&E8ye6OR;UJXG+S%D{Fvpx(blYuA9yJFI zHfq*X;49+vC*OYJpD;6rM;6Tu1?lJTGTCN`?$79nvbU~_`fUE?)urh4)?aU{NYmDN z9(Y%V{?HQKH{h#LY#xiIMjJ(zHjD1jX$YUMi|q#QIkWPXqCe~i{I{ z&VB3ydyB_qGcbtbB;Wkm@S$`~1$OXV%eP;OSR3&u(I_<&*yySYR*y;uf1v~3M3Zlu zD!(Q>&L^ySuFKV5l8*}0@3)^tFW1`9!^gp*Ww>ZA7!$f@t7dd5I-!nLF3PiD|7ot1 zyr7S_s6CF>bkK&rb<3lQ1jlG_9E5p|RXzIa@K*9@kxCr;y z0mX>_Ny+fK+hJGm>xp9V9-qm+)%W~>;oFA!W9IY2pR2aJ9k&(J^DKC;eMz5BVs78? zhb<4&McJ9o?DR)(FC~1gR`pSK&8#u67{!b0Cg1zT&w=fXUt>#kxP8N73<(TK855yq zn7QA3wy5@xA)&B=^G7uG6hE{iX*V32=;U~BHqhO{5$-&;?LLVD>Lr9v2#cDm9k@~! zcBA=Cu5dw8c&40=1Mf%ARENn}myA3&W1NfTn3C>`Jk0$bf{n|{rnBaag@~6{I(M5l zW#tXML|1mx`b$}|+3@TC-sd)6-3-b+17)3u0?*o> zr?sdSGK|ZQ+vFxU)!LvJTXN0snwy>mT9^s=JRw|q`C;7`%}i(L1EegM80fsbgH?t{ z@zpI%pDqC%Fabjm#$XROQL(?`%Vw?PvVAi}bCm%t)si~T;;&r))WKn$GUqT6QdLCW z1DqTB*oKE3p5O8oR78k?S+O0CG9WBYJIu#iY|}0L;wO)^LQ?J${Ot5Ou4g)q3_p?el7@76%fYwFb#F!he ze(ur0JCsqP&Te|y3DOX8x76G)Ef?o*YbDq)frOxo=0SW=ON9Yr7xIfzj3&3?L4Suf)*vU-+yz`$t66N9FaQ$t;H9#!s1H0W^^)+p z-TaHqiwaN`$NIjpV-rLh90yRf_*~Z!e=1(s1mi+sQx(%^Fc~72b;H>}%sM$Oz%|)3 zplbx6Iv40|)~}2Ykc^L8m>OtjjyPoZ%s(KNetHjrBNZ$dXzMnSDJt@Flc<8Fljy&m z=-yX>{?$MI&KryBT*;nqH$^%OZi-%;zvAh-u3alzk`-mPIjGW`3U;&dr7-;UPfH#^ zJ$f1)II8?KZx_;1Jpds=-$4ja-^*nX9&_S8<=UGVWEBA14jq%?>KTvW#Q5wQG*>{k zI~;uxzMA0EP#2w30R{%~9MOzrcJoCyZyyxb$saa-z3i>mc4l^}kSt|BJ8)J&(0K~Z z6p;RU&Cp}%VZ6wVPXpP+95;p-hqrw@F@`O2fZ`Q4A{HR_R`z&fjOC?Pow^l?ZHGfZ z8L>s4Asu+U>AuI$XhTRWMYuN~Lq;f-a5>rB6kPO#u&L2TBo(Tu%$M48QOP>nuj^`BOIfh^mL<&$*5~@ADO=hn5xwtkHKpQ?YRWFm z@dwP{tCHH{~d2Yj$na?bTT;S zk_iG|U%O2iYi#8e1y#L+zTKc3S^R7y-dNIHMe<|6^@1rTpMjUpCuge5PS}-xrqQQl zA>DKeM}|6svn}Wq^K%rhZcaqXjwlC5pTz1}T$33{fLH#ut!#gTlkAQ9nu%$b!6Hy6 zh%@r>l%Z7MSS^%hZv+Lx`yptxjCb|?=cOxgEV({a;A>@04O8SN5cgkC`I5#ezvR&@hR3N` z>$ltfTv>tJXFVgE=e8v_RkOI&Z7fM{(Vri1U~kr+8eGqNwrsziunFV0n03de-^?@c zqF>{hFbAbAwz^VX$@5&|QlbTpRjOS^ImZSu`97^N&r>QspEzO)vr^VrYCDh=7t zXL9pk`&TWW1T9S!LV zS%|HA>EprA9%s!Zz%))J!p?xjzUiNiYy+?>A$Z$m&CKp!tkNC}<$XODT0ODyiabeL z-Ori%vbq*m;VLwo)7;h+gOB`-pro7J)2_S&Yvc|0XdW}&b**r!0EIdrsMpxJI!sXV zCf*v*U}6~L>%G4@5sUNCSZc`g}d&Dbb8~K z1E+IONDG)S4aiwEE({36#qA2;d-WN^yzOP?NXj<@BwzxgA5d;xzA@7g(xqwBcaiY= zJn6mr>pbj&>!o*4){=FD?(O0KP>|P&563VluhiV>o5JF{O3P~=x6)!dEFz~%&}E?@ zh4u{1(y2^IWeSjtr$ws8@FIz|(+FLYeN2)CDUz!B>XMVSplQN5w`H3uMsbR*Zg5EDD$2J*zb`ok zCcN2AbjQ8|P^$^t8jmf_Zg=3dDr3SuZ#c_<;?mis0nuzWtH}`aPgh9xFoomeg_ZgB z2K;d=p$G1Aa`g*7pUL6%@=%)tiT&AU*-z5~=-jkJI*fGa~&RTdI^GVz> zYmT~{eG6;sX1!I_l&5LIzNjiWR9}j6b?bF)q_&c~D#*Cnu@Ac@_g>Es2l!z>jWm8t zB2Z?iEPkQx7D;*Pc&S17MERKJX+6os^p0zH=bmK6>eRjEB>jrn5z0I=-X6Xno5W3f z0e^p==1N`Avz&+V3*@;9WLEiW8Q~Spm|~qme1rG>8MR0Y+yc5^Z+TT)8KEj#VynP> zDo@mdtAJgq=SIpzFm__~Fmp#(yfZb_^M*G4+L;+S+Qt+QSaGz}?}F1uqYR7wQtQW# zRaJ7#)&@<5Xc6e?>#^JB4RMMovImDY{LyKYOf4ZPlta~Y=IyD?2gYPHJA^*$f+2? z$roVk($q`mA5528P3;?b6a7mz`+WOWSqVT znCxA&S^6MK8G9d8S%6tG_n1ypzi$D`!qJ1HIAY?S;Bv>-7JsI$6pwvYVP(Sw2wKua zj_wE|@UuN`&`)p)Bb*HY9GJVq=uGM)kXvAV5qcFib@X zIU9-&wbp&3s_u#DQddwgc~7OFY9jC22e#mLDW)!pY+qH7qVm2^lfp&gpQcgDWiRWm9N%;FFc8~Lh^zl62t5(4UuHpe`#z9-s$L+XXQhJE)E6E%l zp$zJ%*{Z6#xO)FkHUa0j!QO$AHL~kzGpUx+Wo0P3s?)+`ZZ^A99Qly(PRUe} za_nqN^L-+6t&Ma)S}tb0hVZBT4YOtEj6aD#17*3KR-B5HJE~5+s{VRAXn%o!;V-dEjvB2WfDJJbm) zaP=S=h6yq1Zc|0g^WqrYd*HB-+jisc5#kV+cI9{3Jr1qd-b0LN4L`0#Sd^a>ia3G- zb%qs+#2(Z6v*H%$?cxna99F{cmR&)22;*hmNX|G=jS#Jl-#wFwYhM@fx1|!^Af2Qr zUYWuv{v4gDu*cSicLJ|?w{v?BDI&W)nTsXMka8UJX}Ko6ls`ieZh(YlDWUhER`aIz z68D!Y4dde37GZTeY;8i7Zr{_Vc4UODk zD4OjtBYbHhv}JwGcV)gWJRivd=<|(mHk~eDZ7BssPbtO6qs7K-*^W@WrXOan1UQ<< zjWnO)2RB+))<@>|DPIaH))%$7C@iGN;|~}1o}qh292rZAV{s^6UcAOy2RI2KNuJ11 zQ&@LiWpx-r*U?0X*?`d&J?6b=QX;7grYGW&ua8BzhUa+sE(G(!Xij9o1f6~RPDkz) zMbRksz>PkkPpF4`8U3C-Y zMHWz{pBMylSjlR;UMZ|LA_Odb&ck)~(vm0jl^LOUs?DahzJk=V9N3?`S4nXDk=m0r zU1DQ1AC!$zVy4U*n$xiTOeEQB(gaXRX9xSi$s-6oY#XdOjlvxG7lw)TY!z< zgBP2hDOnxE8ME>L@14`iW$J$2W2ixX?-bB{u4{af4vwr?V1xO&-9B!rB5AJP2yE0m znnuMNXzWFs#l0+9*p%TNPx6^BE1h5&anU@{k~b9#F&z=VXF9fp>a*ab)-ekGK(C7B z>gtRqir$$}PnYGrP`l#uL6^%Med&ez9bzdtN@-%Ee7R9wi*arU!?b;p5`X3wZ`1>nQI4Lu+TI!~bFyyCE}#+>QQEWmNB%~9emm0kd13IIQth!lB0 z-yKC>Fpo*Ge^|^$N~8@yGjR^PqMbjzuX#@!u(m9Wy_4g6%VhP{wZO)`b%LCnh|KeR z5^-Ea_byK@8FHO-<%=B4Aj#G!_Ws#vM*&k^9|{zE>J6=tl;xDmPgp(;J&Dt_+ngA8 z@rwV?89EWC@8SHRuK`FHRL5cTuVVSN+w?AI44C*XT@H6+mv%D(VSISTynghSoBBi< zdC=uE8-E~M_qB67i|!p>@<7G&5piB+sq@BTH31-OoA*N~?o~p3q6jp2cgC1>$4=xP zX9ja2DRQfmk0=6`sS4mfAI~xJpilB$!kY^I1}1}*xO^pJN*+I=&3nza522z zQ^9B8amwC_?;vVi+ZCQ#sgflgugj>(sKz({x@3wjWv#m4{{HfztHL|HC*NprI=n#* zb#PUG|vnNqk2uP(n3-^sq&bqVx|Wh%F0#?*-9wxR}3XeIowq12$eEI z(Nc!uNzd`WtnCJqb|YR>!!*=>2b*w}l7-9rNblE4%KSZvuZ=<-5GrY#}T)SBbm zx?amDq>FM9BG=|R&4=pr_w&dD^m%8;9v$H@W3OeTs>CtOUJj-wr|vn9tUylxz+TSE z1kokW1=B3y1_Z^shg%9~ho3GOgYCqvOv;}Aogagd2*!Q6Q>zKI;pjln4Jste8yOuA z8oXv9|EEhCE2*N7m}xVkQQhtp=cwTNzIMZ2zcE}I4(j1(HHF3XyRN1eh;~kpw3du! z)(YKG$L!uL9|7HhY@$^>R)(RBqU=6h-aTJ3obeY5?oiemh{02*6s$l6b$oPqV3ser zBvjvoX=lmgTf3V}9zBB;V&rrsrHrqf-jv*59qesb{u&C@@g-vAGA0hn8_db`vN+UM z)y~h(LR9bpDxXsGxO&gKKC=FLKf3^le8=`XJK4$m06ag@9fB-6JXFwi?eVP-8jPvA zHwo|DF)NORLmfB#YW4U7B?T%_MkXGeyX^DHW(e)9S_RUmpp%+)8jtHt{1Oy#&pzzwkFQL8ZF<&bo0LFr>kVOQ-oqBBxrPN zJHfr+3G@KpQk+zrL@a4Tk&fVq17SX6<4bSbTJ^mRqLdLQ@8%>%6o4XK%vv9&;wfUy z*74k`(uV|xRV9U`#)GE~m*XR|(z$6v_u(^g9o;%Sgtb;bIGWe1ohgdDz?ZvEkw67$ zPrJdk+{-rGe2v%t8IshGh-s+>^dDfGb=u4`8a~{PROU0FG8yOSu2sEAzYEUT88xR z$`@Zn2GSuGNVhO$#2^SjW)HH)Bv;l>bn1&Kv2cBKMT-ntp{7l%QcYLKgJN?my*i+vf6^AsBlV(e2c^uP?V5(MaEm_H}G+lP+Jp zdkfUon=%Frp;=HGx~nszw?LBuW4JUrB7e#FzQ$)>*MYwMiq(Ul`7=>;>*{AMq(R8x z26j?17IomML28-G@VPg#)I6WoyTk8^wypHL=+Q%0|A$n%{*1;!o zfGaic$juuqW*mL9WJ>eA$(mlK5%dif8cO!Y4hzahXh6+lM zoA<2tWE)s0;LrIrc1@j)q;D6Y=J<-wn}I5dYri(9T)1g_9R%n1zwS6@zl1YPZdW-N zxdPID3GU{iD>Hbs&q+$9qm8pu;i3Kx8;Rg13B5vcuL-dIt;eB`Jxo`ef4l^#r?zidY+X2F7~lhxU_r!9)wSELL=ge*Sfz)e|-3cnPgp zD(()XyUy|re(7Mil`NO)^34tD6}zM!#yjVJ`xU1S@SxIN@Y<2!4)E&iQ4w@r9pf{S zVj_6K>E}+4kN<&_%><2%cHBJ2ADF-ZzyvDraoB-+fW4dPI5nz-?F2b^BZGOxEw3O4 z`<+qzKkLu<`tt+dPe|#-H|{uUe{MzIp1yP=2q!^Nzpe;~`tJ*XXZhZx@i9l<3$LxJ zwzoS*F9*c$`3a);W$tOVpKk>eim`4Orn?ox)r@8LU5%>h!+7;f zR?Zy%Fe&-r<1SP5&~rvViQF3PH#;VE6avaMKQj@qK*tZHL>wx17Y2!R>EC0ND;R$r zZvpN@jV<6&r};pT<_c0{l?dS}Gr(iss#vcwoJAC`Y^D$U`7f&U9yBM; zX!-|AM3Kp0(EiqK6OSrD--S^ZyN~18i71#*ls91wX z!#!mHaS@aEY|2=_pw#&7C!F_4?i#-4R~5__w5=@o{*8XFJ$`}N5<)CEX?jk?DJRrH zV;}uMY6Nb2?`~k_T+TY#oP91&w4}`}6po`c|K>LotXZTli-WYc-rMVE_1g4!d_mu_ zR*2nAWb{e=lv?b5iAiVFeq%S;K&r^3JL{10#WTQT8@|v0w)I!d6K7qfdgv3Qy_@gx z>8zXWZDn-9pTl{jrJPxSvVw=bqKd=!H&$sx1Xcxek5v6A z>C?urjmWV{uj4NmUiOUb*BkJ2Hr4%nn>KeJ1hx*ov_t~7_N~53b4bEXUiD`r@^HL> z$nKwsBe1)Fku*EV%>Np^LD%MXAmnMfs2&s4BXz{)0I19m-P%FxC57IL;Oa;2{pAEI z>{J8U&7~!Yo&Kki4=j-`-A2G>Z1)YZfXYztGY{5(<_ru5dn4#-6fd{-W*1+h;^vV8 zzR9t*54%#zyT!dgU(L_$kzUJpYw;TOy9}KCu21XcTkM6Ej_8Uczn0){3CHN4EkO`aXPFx81k43UZkCJ=<)+5=3f=O$XHu zhgu9_ajEdliDb?MO;8$sPi47)uUvD=ka$Xromche8{(UO!Uplig=pY&@?A3_w*;Dt zfFoXaOP4PSIKRSuO}$<&TfoNYgw)n0kjoY2cC(B5&B9JDyK<|Hk@j;vVfx8%ps)9r zy&2+9ySJ)7{*^cho*@HTyg&1+7WdGMUBDRK0o6mrm-vXCjiGo6=n==CRaN>uJOQlF z*^_({XI%(Nnn#a8mM)Ig?wiIM@Jfi^v2;i~via1}2i|Kz(|Gt^Dn#iQzkDbrVX>)C zO>!emGBX1^z!XR8{0BQg%{t(}QHxqnaI>r@xCONSsOFD1I$QwRsSoafy%Lma*{&C63j{}zu~vRHuD>T?r%pmT`|@rMl5 z!~Gk2_OkVxo`HJ(J}|xgYtb_Z%N~t-HXeuhQ(NWBehI2EW5vn?5-|U@{9yCtU9x$H zK9_j|{P-8q?M~22@%Nek4Rc|38)9`eA8Tvu?k-`Yt?!EYF#-qOW7O^Z-7k=g#~pzp z1ZNbeHv_c{e;p-Ix85&)n|6Zw4Y>>JUd)6KgtdPt%8B2exu^rG*)}%wxBuX`>!1iQ zrjD6VxIFd@0yYo-a@QY0d2$sy7{p*G&-xeV_^<(;yfF*luXVue`gfe4`cW*0A|w{#;fmgJO3Kg=kCu_xA{!3Hz z*TVUu`GJOMgxLSUfl%!wM6PXx%H&?Vg6p(5b_UXP;D4cLZq-qRv?lfIr-8ci|C%rR z_h^?oKlbkKuBgxA>4mK$&B?j0_1~GAE@9(h2~cnM2M>Pzb$=t-%8_|2}E{ki?+UKp+Kd3mKWGmE{*pLZ|NnuAt;iJA zmjWFA2mSs@)b@X8D*v;F*p9s$62z(bJGNk`A~xFLKkp;G@Ex4=<*tL1|LGtShz{t4 zxf)`=l6&l*Iqf|VN%DQ$P+tI`>?SgAQ5y#wR5Jrcu-~9MoU5}{@_UfsjQSc_IzV*( zm@YQrD`5fX{O%pqe|^C}A_B7n2=Fv~1w3Hp_)yRH`Hlbihi;`g3Ne)u5EhFASWKrY zaswtqb_n7`J-Mjw`hCg_F@F+rysI#8<-*P$sCZ$GoIH~eKM>@c2 zmhO`t{Ue?QO3z^B*R|$vc$WWCl=jC$=>Ke5f&I=6a+)FKTb@bKc!knlGN&w`CWlz% z-yiqOS=vM6r1DpHpVJNAJyhi$tYtF~?1`vku0xr{W-8v+wUyOTIg9;gY)Q*Ft&eqZeD z`eIAa2-NHocVy`*SKOZ3gsg097c0y3W{8n;81_ElMm39-xSv@zFMm_Rt35W~KC+Iv zqt{@kvlPAkxKIoG0^RO~QtQz0_l#6RaPG^S#dDmyNya%|X)SPdr4t}gd*{PIz9sDm zg_z4xIU*S_-Bk1|(+95zr^iS~sXxyB+YTUEgKEHH*TG`;9zsbXR+WwD$;+XaP<)p# z!L3W|WCk#;4kJ_iF4xXP<`@Sj=1x_xk%{#7k{we{`sS{lA>r2`%;* zTFkDVxntVs+{PWh@ICoFo-K1mw2g`)$#wSY)H$=-`TR+;Lq}oa-yUCdd(ot1hQ4A8 z(SZ6hJavlC$;N(5>?Qs$9j(D~9W$TLDP>Oxos~7u_0M@cqDf(z#pBzSbe!yd(%*O)VdlF82>Z+vN+L6csz5s18iF2wW;ZM*(M$u zr;_u0mv~S_TpnB2E}5QU(CsqN{2YAynez^|fsGY8$9|{c_qQ~_RD;; zHyuIR7c3_5-;{FV6mIJAOp?0xPUi^p!kEB@ogl{tz^T1%kqniA=6)I3ya!QTt@Md^ ztiWQdfeRDIHx`5Q-l0}@WUg<`KMx!08A}f3GU#^RI6+Hrf`Np~zcXfH;G7jH&swxY zvF6*Ul_hSu4dc5Xn{V>va~RZ4^}kcf5B5HuVR*e^85xAn9E{vY^@VKyuFx;n71{@o z0>ATqYUJPSqI+7)0xxh$&zG^vQ=4|_ZDjtYi#zhZ846&(Zvo%M6)5Ev3HdHau+i0K z)ZPf=F+a%skzWJzH(GuW$qIr$)oV_M;sLDj7mn5Gv$qzcU$OWieCEsC^9A@`s!iIv z2b&*i1y_Du%DWsZ&*grQMN;)o}X8gc2avMw?5LYUnLe? zWo|LyW6EieVgq`p! zBlg|4ZMlx8{@D?NO2hvbmbL!>BrKx^MRf1;_72&xceELU$miz3TPj*Vz7q6md>avc zvo) z9|P_qDXhC6w@-E!VEFMG;gBT{9mDFTZCG-}9VDuep`PEzlqyFjoBg6inH>MjEvT5C zffh6Wx|oV}U;C2{=nI_xuZqG3ICXPsLvTh8zZ1=!#l+&9_&^;Z_j2yh17A0dL*f3s z>Fdx@?*K==P_mcmo%b^D+K4Oi|0-VjXP&_W0PNpc-09yWoZ9~a#P#0Zxw@QK-K>c4 z$#!qh5H31uJeyDF)!Fh?*EHs=y!(V^wY^+&hke|S&hy=`JAc*8w7)Gr`r$%;CYx(6 z9%>Q#IpFcza^x0eOL(&Pl=mz5a(5#i^G}J5E3?syA{$Y#(!Ml*k1rBBR3W-OA=5We-;v$xNT)5OXo91>1y0 zEmV3Ixb!VsB@xRB`{q`9>OxIdN8@uny=;lQTQl_IK&yI(pD-Z{vs$+2KHMVWOM#Jx zX7TW9&UU39l^<}BU(I*eS7P@Up~r?D_zS8aBJ~{+F&{kNRNK4-*ZoQJ%zT$~!@IwcMNd&vPqB;1&;5h9gFFn@$w0+4W?``ZUdq zoWi1DCb*cQ848);-rQ5>T@v{KL7=P-YBBoAB7BEuT!_3p!=b3*-*TFtMjAJD=Kol1 zcemn20)eB|@G&=%@NXs44unSpbQq3iKT*}iuE~7R4^ZB@O7QaSm(heEr zKcgwLWr!A;yr_BN(2<-I4b0DkB{x6(tV8P8IY&x^FIc3J(#94X7baHPnwDQ=`P$7( zx7;N!xV6nJ;OE>nQ1oAa_p9xyS|fEGg$@%BZz2e&iB9(<-TbLD;^H~SD4Q~JOY!}5 z%}J3)S2~ii!c`dkhb~&f^%W(buFcFzV@vqGyrH(?aXYW2&nZo|;^HIY;#wD~c}pCh zPipYFJm(1t{-KyvPyn)v7a;O!AV1TJw!VP1ui6^#mfm?`tOd^|FSHmLb`Rb%H$N5( zPs+dY;%7ua#0wQg=8pNA*~~0EE#;Cq;>c5bc~%jh(V?+sQFY$D1XFc=z93uYB@rby zly7%Ot_9(BPj5JUso13}8N7q}xLnMd&!uVV6!;7IVC)B;fx8p}GV%}B`7IKS>f8YcFjd6qYg;D|EIW>NA=jLip8>Q0?qf|-+sNM8OtU#+aD07Eyvkj~Ho z(*qbpe=WKDP8{K_)IAei;`Ktw3Ewi(+Br3DkMv#~8F@x5PcKj$5!jq~j0{=IZiGhy z@s&_<@3_n0jmY`@%pa?cxMMdPCKmwNZ$>j@Isgvi5~%G4QdCcgGX0MbN_N-QKn5VX z6hneGL3z?=wzRmEGFscDZxw4WVcu zh)XVT(CEycI-kxcFSxHe`l$qEzZAj}zPGkErXYDlH$SO8y2xexO|{EwD+_}nr@^D= zFzGWdkgIZv#8r*%@~i2L8m8om>ShF-pnlV;dcUxCx=s?*h1R`rb{V(0ucQgsjmr+< zvR-hxkpR3GCXFZ1r@D$Q8MqPamyz+x(#qY-cv7onMX2M*Sp0#w(sArmX%Jlp|>%L3;9zR-sW13gMTh)o^QLpmYwy^vTwn#40^GqVs zTVv(?ktkpC1FZuDMl^M^HE0uJYA2q4}nI>g7F+1kh=o3L%Uemg~7UCl)`*Hax zMwni-#~XKQ1jD4%y3g$j|8RuW?r!Yz^_4hOxwIl^`>X2fcB{NJ)@-Fzek=L2I&#VO zQ_e*-GpC-DaiM$UPZrb)!42hJ@97^eC|#P3n6LbFKGt+?wZ40L5GjX^9K+O4=-={S z95i+Y&nSP<)-=k79ox6uLs_sbaABC8Nr`pwDIQrNCgHm0Qs#RPYAv4golmwc-qdcQ zZa!!#=PIY2=SIvh_P)N-#Saq<@p>^9Q?OcSv9yvhG6#qEiSzneSXCPMsu5TF-sjp9 z)9KU(5JvFRR;+c3-o#{6+(LEFTC$4uTBiMEfWzD)gfQp=RA*)ADxdBja>2lvk|Ryx zCtI>!54`J_$N658BV;tEb!!+P#5X4{7m%w-yqYDZaQpX?ysbMFO+9*|M`qno@b~4z zuPDi=m8x*}3%ou9_ZmWynA|FINY|{1QyCOYq;h*_*i&v!GFcI!86)q9TY2kLbzf7& zMZ37KSipNz?+W`5@qpi;;09vQ`F7*QevPZ@#d>Kc(a8%6Qj4bzC(^N_(#bf-B=|zd zn|<~RM&y-irl}`dj;@ETfSTpE?3v7a0m+0g!rr>B+PWJOWnIO>1h^xu65qPiuI%38b;aoxK8kd7Gk+%9&5E` z(xp35m-9-;KM1t>!{EfQhVj?(UO_{@IHEYr5!fBahRpDMwKZ{O6Wg;&4qT-u$$xZg z5#27vTwJi{dD%pmX$tqSk)3R^_@2oiSYeusg*924%!DjHrby}v9+5_&U?|ZZ{~X)Adw*o(mWa4@T&JEutXUf6+9qJ+3s#`4kquUm<+Jdo1}sife=Z<+vRZ)lWX`b3+CMuISn$0%P6D7`9*R+ zmiUq%`Ak^@)>GtVlSc9#RB>~ynKT`(8-14b`G@FB=E~LsDXmiPI9w|-zsAS!?$^gi zPhlvWO2W(G7H=dnr9;!hV4T`hyo*Mnz7OPrLo+9{vanN0<%kCp>sh3A4|W51Fn^;* zqJNIT{cap=Ji=^%JSm1Kmd7;d8>jg-+8|7n=jIJ_c8hG#3lnnpL{`$Yqfu#VyOiYb<|$+5&Ml zTybXpw=o+o+ImTZE0%Ytr5;&`$w{>gUzKiCI5jafxu#u^$*4f-(&SsZ@5EOq_d3@% z0gNG8tuGt7hf-)R30NhS%s&cl>cn~ZPECFC)amNssM7=3QNVjt6NqpV6SU(o>6jD5Nlz z!7g@%G+;`J@SREb_NxA(e2QXZHPp!K+Q6n!BfdE3#_iY0ol7prLS(TS)Dvnp)nVYX z=-m9dnwgbH{?P@LdFh*$miYzgpq1Zcjr&^Z@+oXG_s2|OWlBr;h%f@~WLS?(ddzo9A&&M7Q2QaBAV##42_X} ztdg5nmp$qJ>_mDmGT%ilLwU|ow+*6StaDYm@AQ_$1v3lRl6tQPS8NB$N8MZ%(b#Ii zR@{{rUPf^Ev1rz!dWys-x867uOhYK2tw+bDM3C-O| zqot9_WPCb#0h3L*Wy-jGH4?ELJ~dwvwX8i$nXd}hb;lU;s`Jf%=3jf}3&-*j;s|J> zX|$%|;0I55ODOlwhQ40Cj;*S^Uh_3Bx>iFNy;&R^YPBdlxdpz~yOsy*dFI3?9+A{= zO`j=1tt=_`f?u7yZ%_2{V!O0uIh-hu>xkpX9pPSRT(DA}dGGRNMf$h2Bo*uUv&4ha zCf!%w%BS19(zNag!#z>*!b^tY$M&~Y_2AH|Za;+IjOGCBZ1aP%hL-Ze%mxf6r7{Vf z)*aGzhzD1~$8?p~&)GD~bRRCa{E5YA-Z{x{?2s`|>QDEV-L(yD%GAeGW95CgZ~MXH z^=5iZ0YiwhltDyck^6c0VT?hPzh{fSEUEqoXEjz?6sb)1ox6ZVPdXSfVFtBY5xvi9 z38AU50n7~o$8L+7oEvL%kxIK_wiGOoY<5Nrah#=??5Ok|27%mTExB@!c*m`(u#vj_ z;-opx89f?N&KrDSG+x<3`_A%%n#cxu!o17E)2UGq6O1CGOa;#vradH~_t8!?)U_|$ zPmR?HJ&0VrxtQg%2BP$2%3y*mV=4L(B5OJN!)3|^!Wy?;11c*yNe*x2?qh|Q6sX_>nXFbc34@&VQ-?in&!wy2BRdAUIPLR8br<*-!jv5j1)EBw!4+dC&V>pisa(Gja=%w%bay32gv!bmf3 zNn$~q?x+M|!e+tq)!JV6-l+Xo?QG zd`k|nKo&$5dqrlit~b^6E=D{OWvashYYR1w%}Q&40?r`WGit5CkAX}u;+ziuV zevFP2yYTjvYc&Y%6-4=J4R8Vr(_IOEjo^vp{9JEhdYJ*TczNXsz%_bl=Ww5SY!YM3G*ZBa;H$+gu+{0ImK8o7Yc1MD23SP{Ty>Mkh?R(u%H;1Y^2W=+J{+L$(7sblN6ZyuR%~x#SthJfijA1Wv=IDJbnSc_y zHwLsK^nW0}gC40UzFN8$UaD;0;W4kWb)7FsO+k)@mJQgmy>u_bpD3Fs6|)1Ase+Ja zP$(j%jMKm2c@)#D2A`;cxw_|`fuHpP88B6jp6Dip5(`0pp9&@PkW|oB%EB0KtbSIn zTl%9{{An@yFOfDL&h^h)Mrx5HYxu{~iPe#r)g7~Xqvo|MgqrOPr^Ajj7h4b}=W)2@ zOGZ5^$SdLRail&>b8VddHH#t;D{B&BT@CCQzl6y%8&K?-TrTCETwtEdMvW9ZX0EEl z1o4(1fsf?ya#&}a{NV)sjWPl@5=0|HyoL$y#<>HkzCg=X7nQm&C>yBFDU9!!NjkjBB(OlszAF zDlhoNi=6=uo#o>2u@dFUI3WstLYPunUpC<@uz;Is(KUJ2AfG}>nH*b4Lm^LAW?!;+ zz@a*ZnvijelES=>v1$*!(S~#BmKmjQ9@4*{Oc`NfaK8KI$FtPgI7_~7g-#iOzA|8t z0j{hTGPmKKsf$;!W3Ja$kCM_Q(;>Z|Gj$JaWRIjIOQdO`DTY>Vm?A~* z=PoPb{7O+_2B>N0sj^LZPglmD=5+@<3?Ty6Fs3UXesg>Q#{BGA#WnDE&Y^rU8a3G^ zFBVakwyAFS49wFeN}*;ZI-6d1|DWdGJFLlUYah0tpa?SKC`xxMs0gSiy$N=jf&$Va zBE7dDT|`Ak0UOzhBk z#>`yLv!AusUh7`>Dm!cC1^9cwWv&JkjsY1Bbk!T|TQ{f*iYd2Xi~ zQJyz*#oif;5%;kNb!PNTA}1J$A5c_Ual5ESmL9z^CpmcFQPP7JbB2(rQ$2} z74%;JK5je~+bBi70tr6V>L0Em9L}YMWLBv(OJSjt{A{F|h-bA=uqhO_n;%xuIH%$_ zItm+P!vmR$N!KcJbFCa2@o;o{#yzm&4zoK@L~&FTICt3G=*%Z{(DkqHTrD7(mbLEX zZnI&|G6wd^Azqo4oZr4vc8T$LzQ{}Q1BbW&T7u&pGNq>TVs_8rwBR)5#Ujf|uF2)I zQ0gvU>CklGf6gr3f~fmeY&yQ_vH#whX5cE0Vx|EeO|SyMG}mH(cPF;<@)tQXR!u1L zIW2Rdl9DAQqen;yJmqUDB(EB$!8=KrqcV>w&`^Z$vLFJR1fkgBTsLGS1$wg}No94H zsaxr2ZKH0AB+AaFBDf}JvccSqLPqbb3C(KgpJqoyN9E+O!V_f%_^0i;Qs{qX*r79$ z$z)dL`wz^`_Q3~T36TIa(1^KcRm`yX?p2KJ9_ag01H>Pw;6+Y2!(^8+f{IgVE+)I* z(nF5VM5oPem?S;c&QsbXTvr<-)ncM7_i-xeQIUEQi3o>1bt|3vXWwBD6(D`YL-B6> zj4N^y<<5AuC0a)u@hvMXH(%WYSW|V@qUje#`ESF^29NIAZ*I=vhPeiL>+2D$W3XAj zxAHom#O}^~girxe_=Q)&UE!hBXuC8Co1$DtwVtaj2&d5CZV9Zvt`*7GiXw{8h>|MO zBkYSLgDkh)V!aY)mOE@<-yvOzLb1_J(ZW5Uw#w9}UZ3+r>f0#6Cd!nNmcFK|jQRq! zpIuO;GagZgpDFekGe;+ZAbQ;Xy*qOZ25yvx2Qs{_k7vWs2?nZ3-HM-w4rD0+gWdBSo z{fRdXqcvu9%O^@`GDEWKw7jp#aVuoku{jz`*696g%PJ~_HHe@52uUWo5@O`Tnp|SF zOO+l7*L{%`?SY9NW3=AYZJ-v4jZ@))r0>~k@ms_$=0 z;CeM8IKF;YT(O!$XG+M@EC5%0ugWUOwje@M@MDZfCKiXkbZBh8T_!6#<2|DEqx4$_COo-p6b5^4 z<7vV758$l&%I6qnUTg# zuas^vx+i3P?Gx&ascC=?z+|zPJzh}ut-LM}?Lf=U^>v{@1Uecp{vUilp!pNgo)UnX zS1rMZ*av(|Yw#~B>F+@S5cQgd1@mToweq8V?4Zm(=4LUqig=JJj#hphjvl2B#LT{)peRP%X52;+M6Yp{`dmo{wX!da z*h#Pw%Y5JAcIF>e59DP^dZwN;NPS-9fS#+^G@I{248YUB5i>FqhX!Synv8LZTqvzP zk6GRi5b+O4Kq}V25>bxATh<@ATGf` zB_%SN5lSvfm`v%Rf2-^~YU5JXKr4ol|Hu?dcHMYuw4!8)SZ-Z+TlI{eYYNK->5JNape_+uTM zy#l5dAk}!-a52@mFXWR{Ze=r**Jt)yX+UNh0;EC^j*ub^QafWC)=k+hlj&)Z`39on zSpPgY&1}I7xi-PCiY(_qEVvT2teK;fyZZY^YU^aqDg_mHylvHV$YD51KA2)+Lmoj# zx{Ep-%NgiJdcsKrkRQkRUz0!YEHwz4TuZz=2TLd{t|(1d`-Tl^Xj217&tNLAxUlkT zcE~uY&wl_e3tO9_ft(m^-1HoMvJ*iDY3LX>Q{+e`Wg`0MWL0n-OAw!u z!%TRiZmNHK!|^yd1C6u^rDoVN_IAj#yWLD$g+_Q!PLW45TP%au5|A>)YP!^ zya!*3;Dp;(*(|P`LqL8XCA2qx)Ln)9L{co9 z8pJb_{a(#(8m^1g|4ajbvwc`>uYeG@+63STs%RQdixF?t%C{8`w~Hp^3{`T&WyhnW z70+{&ZTCw2WZ0t?8Fr~+S8Q>w@TE3QHqCwtNe*FbQ;~Pew|ePg@qk>CdlAlNZP~>i zpr)dzQsUB{VUD0qTf6smoV=e#HLJ_HV1MXadvt^Nj zqcXICw2&xFU=qV7G}ATfl2C;y#7J89^Hr~@<%BvHPwP6c=)a3OVF|dsUUCV?6Py-` zvJQb5Dd28?1C-ejG($zup^m!UQ-^FAZTAAGAU0CBizlZC0E;?Kh#!Bf5qfPo1;-;` znhfUn)J>F?OD&Tq`-}VjTukuX8(ydalWYQ$1B9-=&LJ3KN``}}-~%zyvaSR~Wvg=) zUL`10#KbA7@q&=b8HiEULOYrKXQRU$*C&CZES_6p&(IB*I0YbZy?~2dE)Tb2cfRcb zwNHzkZ#YfB_L^Fv<1Bk*sF!LkV)}}~63B)P={9iykhNi(i&|?o72tC53$1ay$_^TQCtuT(U_sSoDTD8cXZ!N0$dtQHn zRoqg$AeLyxFg6Ww9QCwuYj9d(_-J;N`D^{)k>}Vn#(kU93uUA{&YW*E==~H|{7Sr= zRfms+`P07;*J3R2D^oU~7|@3CUSGgzcZJ z$bO^rj^o9G*$N9m$9jKd4z$a~Zxu#>bU$lmVKgQS*hOVbdJ#9Jq10;y-Qq$W$g>SF zFJZA;jbM^XwMhV$+0;bgCHwl0=v1TFc&x155cToc(i=OAQAGYyg6CpZjwZ6zY^iuA6DKaRC_UWDw&Q{p|BJHTbQ za!HJ)`1y`oVGFk1iToGy&I`J?Zi}!*Sy~kk8mtm6Mg26U$B6R+^?$ErjU`ch%`$22 zi#DhK-i{TH`3mj>Y-TvSuoJ~TeEr32*YVIMSf%GwW^OA)$b^*Le3a}?l7*F+2)I9dhX5rgbN(3RGg-Jt_CTD+9+WS;H(o$G+vPB(Qw+iK8I3a;Hv8-G?bmRMfcd@EKf zIt>w|lg2vE$l9G|lSta^W#Oy)S^>6eO18aNO&udn3d`FKvM695Lm9d4K*Vk9-fE5$>t(4fA z`Wx5&&J!SPud-N>e{8WJ-}D0CEtzdW2g$P3fjU=`dx3kt%>=8tnPPUXZ#dXBI+~4A z*sZ6rc+lbXOpC*BFBjO7$&JWcGPCCwUiC-3g7(nW)S70NAq3Hy&iXVZU5td_f?QhO zw)S~d$=9^!+Anl8wxw~@GyF;cn*NFhjs_3vJ&oS10`2n)eUjL%AVEmFX@D}3E!i)i z7)BBw}%zN_&%LJ)u3D)qP7suzL9ZFT6s^FzT;nS?wWVW3(fw)Yg6@(*IF|s z$O@ucj=IuL!vZQyar9|A6x)~0wyNx2l1rV+zp!W=LMz+~&_?T{`3`#;n6CRzSX0Q( z&*{L`&9pPf!k<_nmG@zR7B#UX;9J?vq>qa^=;U902ixJbe7`=QnTJ|Q6hf}Qb%Yp& zQezpB#1dJIsY@tTHH|7?Q=(MtBfTh=S8t+!^|)ngk4S#UZU9VTt4DTHbdcF!^Izc_ z2)^K34f_k)uw+nKiO4woIexpjJ60sgxkwE$zX9`T(iB+rkORxJ|G#{@2hUPd377T1 zx$mvHRa0H8^l|2zN6e{{RUtNXvT*>7?1Qfz1we~PGz zpDzg+Yn)^8s6E#_DM~R&Z=`uS*9c66;x+d0j#UfX^go6YKJkb8ui6M=QykatXo=1v zkEM0>WhgCt^Erd_9HqoJ-c`%wvC}H-%635ajTsvQ>Q5Ae);=Ssm-iXju+6HoClquO z8Tx87w|wfqo=yW1csj_hd@2ON5UI%rl_{C_GRjOL7rma-=!9wO0hjFAv#`EO&7ng( zKOaX$Iq7O3jyHa#Os5C;E$v8R@(WIU8y0@4*B!LYSalZLJg-B`1tbR#I^uSyC-F~D zXh+kaqCFcPwy(#WZ zo_d(}sp<6cm!-w&T|kZI?Ufm@SH!3E6)=j(#Wk|-_%5by5cP%knTe-Qq6|O}LY(ny zi$_rC`)6q653j@o*xddZkxA9Q!YA1gQ|;MdO;+~ywzhC9*2wII;r8^-GBq5!M!d>8 z*!S*A!sCuvI@jGni|2ZDQ!2<{a`Qo=0xXEd+-j$Z9mnaY%RRhDK5se15#ek0p5NL5 za`)hlL`edU1$sez@JjZtK5R_!2qh5-&7d-d5UBx5^_w4^7E#ZYb$_}hwQOn~A-R(- zo-3=Ez|l_uERg$)pNl%T7=R$;Zt)96iTGF`BCS#F?iCIBR5LwnpK65QtFxAz)nlu| zp0ZPw0mK2K|6hWZ9#Avi4(kbemavj$V)(24*XDuOl!qS}T`j=0uT7ny+;AthHWoyi zEwe*x#Xl&y)SI8TI2YyoCa{!$*h`tUM#Z|7fWw$bMR=xwRt~`uJTU7=9_ONd(5W$1QCqJR?@LLy(Nnnv{>uX zAF8M!E1>Kk;nn2n+4&(Vy6=T+U{EU&BX-w&X}XaPd%BUIGbi5*EX+PGvFp?K3Pa~u z_@d;VnTxZD$Fc*ldQ#Lp!a4#h!Pr2rLD)+4$=o*6jggHi?^dz`;r}wzi$x!q@%HopNu2-Kw=E)IZ&i_@VxLLij7C^Fnp^Bt?>< zus*sjP^^NafbujGU12J`g1Gi-vd#PcdGqetvqLNH&v=X4!+Vl1wC!(e4DQQ;C{q^J zAmzx}ODC`${tle_7Es!ZfSclGcDHryiXKwg3oT|`n}sN+JP7TZpn_sZLuDss`97TM zX;B_g>Tj*&ut53Kyne$d$E!PR-!vVRtyY8B{COgC^FIq)PPw$P{ z`3u<55#q6fO`k!=JVJj4Q<|pJ>4X|k|OIMIh1QaPK@J*Wg$^^K| z@cm;QrSyB(o)(=WZ1-a}E4>7&^0|J*qGU3$QI~h6Bg|d#?n5+l54m#!rJGVC5`0ma z+P{U4Z*Mm#hTG4vzE>Uwx!6j7tUqW{Tz!wzE{542SPJWO=gT{;a}BNoTk{lb4b#oG zl}aKA`7!A;BSEA^8eZ#6in^%Mw zvraW?7eC}lqtbBUMLGlT9U8hH6lc&0x!<3I6xp6P;1*SvDYLL9vD;fYylt4R%#si+ z7KKm_Tm#@bnBR)X1CcN7n2G!!l}Low?t#ZiE7I%rf*BxhP1Ap47SpK6?0@E|U^4SXSta zncbn8G-~bSbcx-{+yXLx&O(pD#68Cqrox%p%~NJ7Y_QS>JNc8U|FlDE^NG7&WzTHl zPcK67JN_F{oQPKBAm~tqoRxJ=)(1)>ZRM4`bu#{C^f}3t>lU7&MMe^ZEAkufzO!pI z9fP}@fY>T^SO{xBfgW_4ct)Md`JD}GeUq(m3n6${p27R;96HH~kLP~@ zS)7@|?ek0-Rt)sKhi!5Y`|QofFkRsE|C?tWG?*U^p->FsC}V}~OTF!#(7oJxHhj0l zhCBwt)O+To!nQ%|NdU=0Q-&*uW?8hoX#2x#_br{O6N{B&s6TnsnyTy91|!nc`DH&F z4v73V?H>M<6;fJ^-X5q51U&zcz)cTw6}v6?Dl*w-1-QF{5th)ra(D@TDMJ1go=1*8 z8FN2%Xre*UKIt@=_VSWo`n$Vgs=EZF8W(4iJ*RF;bvNEO(V*;}bb4u$!vK4dLxU}6 z{o=1e*w}kZm^H6(G1uTFG1;)`8X|$L|1Hvjo{PAj#FcUswi?Lsng;$_ z-w@rzdjui=wsr-OwxU#joMIEQMtE{B{g~e5rqnv=O^KmkPG$J;I?tlte28KP$g%Gg z2*^__o5H9(lDj2SbI2j&;@FE%HqMOqu3)5{bP+H_`!au#i`6a;QGQ_5dLZ9ss0h>(qFJqCMs7N=1}VP|DEoV zdypEUKfrT@z-IEvey*n8Ed5=wQp5!;X$y~9N3uo5uv#zJ0i{ie;+HBN#yYwX9v4pk zCPfGi?_0J_A8dJ{pRt(UP`n~{ON?aQG|L0b8+#%k(-t@i>mZ~+45#JzXN}+NY|dl} z!D@)is0l4q0wD;=)x=~95b{=CmB(Np;4cKz21q%Nay7=Mjff>kB+`UNcg{V-ZYXib z)>cdXU^g<_+1XVnc=Z1f0Y`_@wLzkNlm7sivvH<}g*ON|&2Cwmd?cJZ>T1qe_Hq5^ z0NVp4^KR*n?bq*|w{HBZdSmf~Bobb3TxO>{95(jn4TAD@A9oh0@)FX9`GQNq(zANzcrzOdW7UgRc1fF zI40brcaBx#SB`uc#J$whqjcTf?z2=6Xg;3Q=n35=5HON%(_EtwgnM{t69=Xf5n9OE z&QD3^``47@NV!`Z!=hNJi&@~W8d6)AZA93B^pEK$25?ZXe~{c=q{1FZgm1{PC_4fR zsNFz>2}0uKT~!0{2oef(m(5M*QsC2TUDUycJGaU1i5-2T`?HFYF zlheUq?u=0gKKvjUI3dmaQR6ptC)vB*ktho5`5b1!rL|6Lb3dh_Y}qkf4X`Zl2DW*r zhgNS&)q36+e16d^RHF5kvvXHwcvPo8wh(gyM-2dE79C2*&rI>QuqR)5j)0Yh66;B& zxpFEF2bZR#5!vJ)#ttxfuahl$@DnrwxCTNEf&DDN|uEqL`B?zne-q5gYuGf9C2%SlpZ<|NA*|3sh2byTO0LJ5{o z`|+a>XX#%LTFu<6^Nz1g<>eRgu3<}aX&6XXEdbn=4GRJyswtn9EiQSU<)SmcYfIZR7peHD}))hHWMu42#tUh~!-51vk zdMyMuZRH6L?}s2+xI!><$1Os0NLc2j)fS$ufc|kmr?$-DT=8GZ%iS_u1pXi-+Y1)m zu`N5`_&*1gm!0#~Yizbm+uGYTNh-_T0-fk>EKs+sD4Ks}`%x6ldGiU0qjRg>1BRzS z1FR^q4PynkQ8AdlqMMOf5h)L)6DS8R^jW_(4itYT!1P*Lu+8i-LaXsiM^H4`jIxn` zRdibH>-F3ZqtmA(MJmDgnyL>7`t4|1(FotjYZuVp7d#P*4YXtrb1VPWB^7`T&#A2R zcTNnkHBW>k=+uY>>Jt3j4z7>Vp=LEIHO;1>LtY(y-BIF=8EtfTH18@7wr7*n#G#kb zq}gQoSE}ZthOq$b!{g));G&{&xLiamKcrx0OmSJ*JQvL&m+E z`XVWY31rQv&RLFW6i>(m&F-Q=#>+%FZa_3`T=Lk~i_`@uTMH`YJrH<&+ruFaZ2I6* zZx4c1b+p9ZfFcrCNxnIm<5!thg0r&)Q^DBd8(6cwHn}NTTh1_tA~CP}l#jm)(c%Bd z{KkrCkwdGec>TyhyuMtZT*Z7tfZy-&vI;g4+ABc+4o;4%->fn@ddEc-3=PaKIrPRn zu>WRxTa;=aNx^-_6C;n_lo+1f2C^CYzDikJMrS8$+%lllcF1*4?{WfyyG2LVfJV=f z0!fi1U>ol@E7`-0$yUwuFmP>Fi#X^ypwJPsGf6G2S(k_gu%36!&8mrPvR4iDTx7Iz zuvVuf6aH+w{3_TIE3rho@*k{w4eL+Ff;?oT1KU2<-LdK z^(_Ya7%Sh)S#$g^PF3M5ZJlX;=5#)h_6@mK*IAcwJ&e$hiFSPInpCxoQp*u{*t#Xz zd){2EC0&!+S73SGAFzk9jSn{Xf0=vS!!ce(;t+&g$qP4^s;(5#XOQC54||+(4Jo>9 z4s6lz>W1tdMvf1@6E1sj8mTrTM@7zkSo=xBf%)7-FjUMitvP$8m zVZP~}7}e~)&``pZzjdRx3dUEDK;LE#B31|jT6gtB0K+?jl7g5X!$V)sjwWlsxyAke zmS+@8ruYQWdwoq_SvY6b0Wq_E4Kc!RHp}lG1xbJF0~+J}?6%`Nhn>y7RM#7RRzS_y zxesBLF^}wbi(d05U}Z}FUR(UY(0``OzE-h`&N)gkaoQ1u^LY3^pIWV=!@ z!?y-k*e}8*^w;qVS}*_Ht@BsSE#=1R6yEsD6NZE9>Mf&o{j=lC*%JaAUfjL8@9L^A zKD(OqT;2yyeYJd+WhCj_wtw@F;P%AK>(EQVS;OL(Xy&As^;zNDTv~5F!1Z^zX~XSP zGy>nc4qAZKSe95jXRUSWxLsIx&D6d%s}`EcpB_-D)E5n|9|%mbImI7mP2=Clyqp?# zp4>qnWMl`gJ|`95U>vS-86tv?dx)jD}QQYo0!g_pjbcP6Q3v z$-Sek(Beesk+AbmuH0L-hDYMYfBf-O|J}GFbe)&e-U|qwHK`jANu?SB*)X0F7wHtiA{@ z4fPuRE7J`U<5?%Pw|j!T*>Jf0?E2FP&7m9|!*I0<19oR7IZ;6`>j1VZ=S3_$L~Ymo zxLjID%cphxg!0Vy1$<(8QJLx3({qL|CzZY?l!eZ0tJN@My8zCo#?nGa;Cpc&qpfVhi6@mT$Fc55-p-?i}j(#Vpg)^2F(=?TH&_)*CeYG3Y8%uQG}M_&I$1 z5!j!~JxVQh9bnpu*XF{}U3N|h(hssQ?E>Bbit(Bm(NTQ=E};d1J>R`1)}Y{ff2-~b z)i~whKYvHC+Ym+d80HV6WV#L`3!0A$eHPY~7nNXS2}dDz!h|N&G;=gFpq>`w_Yp*! z!B)@amw|DUmM7l`3iB`rE5xs556?PkS7;%o>s=h%vfe{&4<5Ugk*X=&W$XRD^g{9F zALHPFJ9$v=bv`i?wJN|l9GWyEzAr;JrabZt*co;{7)eidb*M2Wvl3mOAw4c^tEy=B zGllzOi1;Kq1djG3sGO-ofioFXPrqz&s&cj5P>~K55mljw^!O8ng~ZwfGWp0p%Ce__ zbTrzmaPB3Vf>zhTFaH+@zjB<;jfhGL^>>RZ%=#>*d~mAz`=(on5+B$z^_E`T`x~sP zfqc)GRphJ2Z`WxRWe=%{zosTQtTG6{e&JMpSkrU!hIfb~tZ$E$+;?t|Z`(gKHx*+FkdSZpdL19j3S`0T<|a^oLQ1}B-TYL&f6!NGT1jrN zodxotC>5?>$DK2r5g8?#WsBhbm>!Kr&YpsjS&dj<0_=^EQC*;lmhsI~z@0qoxcwg% zZ{7gT$_SX`)RVR!WFIy39#O3=A+nyK5^*vLY01xhqeeGu(D*w}`#+BxTbXK4X~Oro zmwVZeNcJhucE*qs6{G_?wAY14%Bm@t95`TR_e^M$LsaOsSFM|dhE-&@TfwG>_U*Xt zC*GrU+l?z1j@!YQB<*#Vw%R3Vp>Rex9M{^@Cl>NqKB?*;m^d3`Oum&VdUztzWSh*C zo*c3_GoiU%?~_&iK8$9D`iJcMv7f=fi>jKh@3PlJTEHoG_+fdm&}F}@bbON4-#%42 zER)M`y~6!;!&`)&%Um0p{Gm){=5L{T(OtrM4&*o9MCWKWxkV$@#%+sxR$yqlIKj&7i*KuWR1Ww6oBsct^~P=8-cUVVq)psw=Ub z)e;raHFGHE4-<*WF6!_L>zc{@UUW1V7wX#Vx3yAi8xL+c-BBZ8_S7na`=18>*t8Ss zY2=S?I1=O6QNW@=k`E4b;yqdt;oN5S=y(ZSxzB;25~$%1_wTv9gJWn4MY=grY3UMz^TwZ6pe0Ny-Me$2`*rt!CiZc1d3$MSXnTPv69RRUVwCddxks z1m-$GYRk@Sd;y&`2&F%nMcs<5W!PP>bP zYrjZXuVD7i9|_yz)-&e%*E9l1lcrBeCWDv67aOI>HUa7xd!c$6Za=cilDB z{b8K@hIYk9a}Gb=QTnx*D4itCV_6wgMNzx0M@^O$C=aBBbh2hJ_^Qd#PQ7%4kdN5L zqCZEvvtTESJ~+@7y^AlXcNOCNQiwGl+CQvGI=h0JnnxAF4qe#VOQ3P+yFMYm`-X?!+ z=53pJ-MuI9N^?<4dPS~h3tmv}W@@v)%-s$p1A(t3aaAAPcGr%pNnSl_;jf^r9`0D< zL}4C2Xv4F^0~d&GFSBFxq$k#CA-f6;K=WxIGu@psdW4$)o;Ap}Mql+)<$Y?eGtCP< zf5g4a9DUg}a4fj{+nP^mxL~L+&a9%0ztU3K@2QEP^qBqz-dl*N!6Rket^KfxktcGm z-ddWwtGL|Obco-`Snkd>SA}`3uFE{h=V2yVajrGMIKr05H6&oBjD1iGY!ek8GCb0h zY2%A3TIuL@kFMz<&s~;x#}N-ZlxUL!01^Nf8;$tjW=1tkBra+{O;Oy!jQg%!s4`?U zkq!n;*9O@Zh{*!>pWJoqtNv5@aXmdZ0`Z6qnL zMC&A_xIh|npLsE5yzz^CgTMFM)H%z-i-|cFVlKN_5<4Mse)vuBy|?0h*s5*2kaO&LCN>o7aw_l0^3m7y^U28jn4q@y{Pvr)`2Q#JL%*Zm^PFcJ_d)>Jb* zHn;pC?6!E!Re+7X{E$}p8C=?~%G5IbFfozp8-vrQyIZMiXN@p$ZssKIwvX;6tD53) zOaZeGrM^3yoBG661re*(Gg^W(#32rB-AIN0Rc5FwaZv~P)+1lmbozZu!3lUSTH&Tq zqJ%@4->OxD3TICozo<}iX6<{Szltx&<>fvIz5WV7{+tYw3erTE`qR zWr%(5X!Mh9(q^wHk0+5^ju}-BhsLr!Jq*_I6K@Tv*HwX9@B@t$o zo&<}Ys0w%61CcoDFylZDKk_O)G~nI?YOk7;y$Sh8Er%F@b>9aA=yGHu8wUl&`S9rd zzS|F^I#7;?bya63Z+QwkQ*-{HWUf>54-37(Cbu#PBM@9e17*L$3~JH6yV(q0KQK; zGLk?h7=A9KRuj8?6UXBq&nRvCdN(1lD<|Q{kzNC+rzGCw#Qm<= z?j4Fcuav-8FyxGtI^$}IiO!rpBnTQ+;wm+>CN&^*F1SVfX0n>i&7|6@Cow#hA#bU? zoh5TO{9fBcI3<9Ht3-6xJd}P9@T`t1l6}={Jy{3B55~y6*wXIS z-Pst|SQ6<#LT`D%`qPJY3#Mrg5g;MEDxjeROD(uzD1IG3FYy_XIYf)OB2j1(opZ#d z_9dd_{hC-I50^&j(a%|7#=q)T1tc z3G=k^7w1+L=_V$+=?bPu3p@i5j8+gtJ*5=#7GF_xrL;xm2jd?WOg6~54E?eEOIMjw zx~QM;5F=pbw&ciplp(rh!u6Kk4rGt7Fm9+kDm^+#6tY=N`2eu$)R4!r*GFKVH*$*i zhGxOKgDH0FK4&JJ@Ry+?!NrpQkE*GEg^6+G)+<`vB2j$?AXvN%zqD0Z1MS%nz-a$LQ>rOu|tbIt|Bp*=T zM4J2(eU~M#c+>i_nmS?ZUUQLmO>gEp*Id(rvr({&iN>}YeXogq1??(6{iUycTRy>i zC~H5guRumHDuswr)tU|`Pu5+$UUZr!LxlSeTZg8ECaYW;^*Y|)yKlFE&?bGS$gi9E zpM0aWan;>paev?Htnh{H*=*?iEK+*5iJ&kqj#2JY_C?l( zP_bJevA5zLV{Lvwki!PV2^mfxtpL+(iII9c1dDl5Hkh4C$=jo1_ zzH3r<>>gRc_^XDMdf6J+YNs>4bG4Yu#~Ke^58kTVx(q6Fb9KH@JkIenWdy~DbQ5#%jCrPo6sWrl#vd9n=X0~0WO|0P@dtWset{w zji+dWYDqe?Sp<8SZPOxMmI5aTY1{fe;20YOy1taJw@5Ir6zKmwDqkQaopCwkGxfBZIGFfGqx$O=Z;=V z8UOPLSChsx=*wqn!8Z&4A&Q@-bTKUt2qaCsaO9@q^u4NkgG4U^@Y`~ZZj3E1GfIwE zyWCaUa{`AsM_@{UHc2{u4wF#^NR2GtP!pLfQNbcZ7G>;QAaw4)%zPr@%@GD|<)f!YPJoyFmxGpKL9RIhbf)Eev6D5R|W`l~o$^QjcX4^l}Aj0${v~S#i(cWV7$G9!p99@SxkKO+Wwb%*tD@{p#jirh+IZWYg8qqu+Kin6Ypyt z+@t5`F2_wQQ@&jVh42Z}bybjb`L?P{Yrk5*48D~10BZDwz*}xr6>zE8rDqQ#L{Y%_ zR2J4GJ^c|NhFVlmgBx#|rRtd<9irubQy8X{%dIbjrD@Vp=!I}=x4q=QFSj`h!>5E zWAsHw|AgJCYV!QibRjcaL%IuBV2M>3u)^#-Y;w1-sXQPVD%aNZ9hh4;p)nPUp_~J4 zZ;q;YnCVs-res@cg!-ln<*CvF^W)mmh310d5&!yO#5w-9RHU$ATh{)m;H=5_!Okezz-4#}c< z=h1JpKNr^aXUHQ9{uM0ds<4A=xU>r1vpm#^OEUE9DZg~`ra?7lAThr|5=rmIwou;&HJqY&%pufl0Yr6A+PdPNpOapq zooK>E$!*q`g;p!yibWlBzNG%}^-<2im@UQL*gq5lD)sy6)YgfM#m-WfE|J|5?E8+d zv|+W7cV12x);=??Nmg`H%l_~d+^q0F+lQ64m+d@1?ps=G$+r2mg67)Pw1S3=dhW-^ z-}A5deJiaA{1&!--9h-aWA7Y&N7`Ss7bqXp+2uhcUD_#NCWLLunkP;gU*)s_;$(>0 zu@V!`>C6|`?GdD9TCs1*hcx6UJF^7y61K)ka@p++zm}GHs!wIH*;;je$G2r z32+btFR~w$T<46*IcyvBICt;p=8vSY)IBrX%C>Pj)n7i%W0==ey&?JOe`w~fGl0K* z&W(Rx)o-K{fBhiM9lRf7qzaU$G8Qs6m|Tc`?euO4$c24QtJBNGvLWl2nqISCtJey? z%J#sL#zpT~)HrZDz-As1KeK*=$}f~_1xo{t<<-BItgHF`Me8c6rPG&KbU^%!u@L_X zZ~ygDKT0gEtK_f$>+4z~o}(CRGc|no`bO%DX3OP#`tQyc)?n$}bG=+RU+i+dTx%Ou+vj15G(D5bkU2nGZ2vX*=AcEUE7FpJjjjH>wMOqI!c^3|n;xY@>>d zTTEX6xZm&fYmsxT?VYp?`#|X~oJ{3gd#ByLvov3eiJRS&eWbGj5BbYymxQsYq6=RD zl`$i;;eYWU+k^87s*abB*#Vn{RRvTJIOh3v$ymCb!vzl;Z{482j+3@qSz{N}d!NY? z@(67FJ%snm5B>ktg7*oQf)c%WZ6FONXTk3f4NN!|C-dX7Q5W-i|8uJo9h=3SRphQ z(0qQK4TWUcTH`M>$E3{d5=iXNUL;=5B2MOT5--2rm~J{d+bubNyh>X6Q|G2@G=S>N zI4!F5WA{hS|CLH3*eZQ?esR*rHkF4k17;XKbNUiTznE-U_}<4s_bqt;iw`cPvI#$f sbNUUtYn{O~^ibAB%6M)lQ->wKj((BqzULry75L}uN&OQ!T32rVA6kZ`TmS$7 literal 0 HcmV?d00001 diff --git a/docs/source/huggingface/pretrained-models.rst b/docs/source/huggingface/pretrained-models.rst new file mode 100644 index 000000000..8ae22f76f --- /dev/null +++ b/docs/source/huggingface/pretrained-models.rst @@ -0,0 +1,17 @@ +Pre-trained models +================== + +We have uploaded pre-trained models for all recipes in ``icefall`` +to ``_. + +You can find them by visiting the following link: + +``_. + +You can also find links of pre-trained models for a specific recipe +by looking at the corresponding ``RESULTS.md``. For instance: + + - ``_ + - ``_ + - ``_ + - ``_ diff --git a/docs/source/huggingface/spaces.rst b/docs/source/huggingface/spaces.rst new file mode 100644 index 000000000..e718c3731 --- /dev/null +++ b/docs/source/huggingface/spaces.rst @@ -0,0 +1,65 @@ +Huggingface spaces +================== + +We have integrated the server framework +`sherpa `_ +with `Huggingface spaces `_ +so that you can try pre-trained models from within your browser +without the need to download or install anything. + +All you need is a browser, which can be run on Windows, macOS, Linux, or even on your +iPad and your phone. + +Start your browser and visit the following address: + +``_ + +and you will see a page like the following screenshot: + +.. image:: ./pic/hugging-face-sherpa.png + :alt: screenshot of ``_ + :target: https://huggingface.co/spaces/k2-fsa/automatic-speech-recognition + +You can: + + 1. Select a language for recognition. Currently, we provide pre-trained models + from ``icefall`` for the following languages: ``Chinese``, ``English``, and + ``Chinese+English``. + 2. After selecting the target language, you can select a pre-trained model + corresponding to the language. + 3. Select the decoding method. Currently, it provides ``greedy search`` + and ``modified_beam_search``. + 4. If you selected ``modified_beam_search``, you can choose the number of + active paths during the search. + 5. Either upload a file or record your speech for recognition. + 6. Click the button ``Submit for recognition``. + 7. Wait for a moment and you will get the recognition results. + +The following screenshot shows an example when selecting ``Chinese+English``: + +.. image:: ./pic/hugging-face-sherpa-3.png + :alt: screenshot of ``_ + :target: https://huggingface.co/spaces/k2-fsa/automatic-speech-recognition + + +In the bottom part of the page, you can find a table of examples. You can click +one of them and then click ``Submit for recognition``. + +.. image:: ./pic/hugging-face-sherpa-2.png + :alt: screenshot of ``_ + :target: https://huggingface.co/spaces/k2-fsa/automatic-speech-recognition + +YouTube Video +------------- + +We provide the following YouTube video demonstrating how to use +``_. + +.. note:: + + To get the latest news of `next-gen Kaldi `_, please subscribe + the following YouTube channel by `Nadira Povey `_: + + ``_ + +.. youtube:: ElN3r9dkKE4 diff --git a/docs/source/index.rst b/docs/source/index.rst index b06047a89..29491e3dc 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -23,3 +23,4 @@ speech recognition recipes using `k2 `_. installation/index recipes/index contributing/index + huggingface/index diff --git a/docs/source/installation/index.rst b/docs/source/installation/index.rst index 5d364dbc0..c4474c3d9 100644 --- a/docs/source/installation/index.rst +++ b/docs/source/installation/index.rst @@ -474,3 +474,19 @@ The decoding log is: **Congratulations!** You have successfully setup the environment and have run the first recipe in ``icefall``. Have fun with ``icefall``! + +YouTube Video +------------- + +We provide the following YouTube video showing how to install ``icefall``. +It also shows how to debug various problems that you may encounter while +using ``icefall``. + +.. note:: + + To get the latest news of `next-gen Kaldi `_, please subscribe + the following YouTube channel by `Nadira Povey `_: + + ``_ + +.. youtube:: LVmrBD0tLfE diff --git a/docs/source/recipes/librispeech/conformer_ctc.rst b/docs/source/recipes/librispeech/conformer_ctc.rst index 5acc4092b..4656acfd6 100644 --- a/docs/source/recipes/librispeech/conformer_ctc.rst +++ b/docs/source/recipes/librispeech/conformer_ctc.rst @@ -70,6 +70,17 @@ To run stage 2 to stage 5, use: All generated files by ``./prepare.sh``, e.g., features, lexicon, etc, are saved in ``./data`` directory. +We provide the following YouTube video showing how to run ``./prepare.sh``. + +.. note:: + + To get the latest news of `next-gen Kaldi `_, please subscribe + the following YouTube channel by `Nadira Povey `_: + + ``_ + +.. youtube:: ofEIoJL-mGM + Training -------- diff --git a/docs/source/recipes/librispeech/tdnn_lstm_ctc.rst b/docs/source/recipes/librispeech/tdnn_lstm_ctc.rst index 848026802..ca477fbaa 100644 --- a/docs/source/recipes/librispeech/tdnn_lstm_ctc.rst +++ b/docs/source/recipes/librispeech/tdnn_lstm_ctc.rst @@ -45,6 +45,16 @@ To run stage 2 to stage 5, use: $ ./prepare.sh --stage 2 --stop-stage 5 +We provide the following YouTube video showing how to run ``./prepare.sh``. + +.. note:: + + To get the latest news of `next-gen Kaldi `_, please subscribe + the following YouTube channel by `Nadira Povey `_: + + ``_ + +.. youtube:: ofEIoJL-mGM Training -------- From 8203d10be7f47077e62a133d6f23fd039d05a131 Mon Sep 17 00:00:00 2001 From: Zengwei Yao Date: Mon, 25 Jul 2022 16:40:43 +0800 Subject: [PATCH 11/38] Add stats about duration and padding proportion (#485) * add stats about duration and padding proportion * add for utt_duration * add stats for other recipes * add stats for other 2 recipes * modify doc * minor change --- .../train.py | 9 +++++ .../train.py | 9 +++++ .../pruned_stateless_emformer_rnnt2/train.py | 9 +++++ .../ASR/pruned_transducer_stateless/train.py | 9 +++++ .../ASR/pruned_transducer_stateless2/train.py | 9 +++++ .../ASR/pruned_transducer_stateless3/train.py | 9 +++++ .../ASR/pruned_transducer_stateless4/train.py | 9 +++++ .../ASR/pruned_transducer_stateless5/train.py | 9 +++++ .../ASR/pruned_transducer_stateless6/train.py | 9 +++++ icefall/utils.py | 33 +++++++++++++++---- 10 files changed, 107 insertions(+), 7 deletions(-) diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless/train.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless/train.py index 7dc9314f8..c07d8f76b 100755 --- a/egs/librispeech/ASR/conv_emformer_transducer_stateless/train.py +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless/train.py @@ -686,6 +686,15 @@ def compute_loss( (feature_lens // params.subsampling_factor).sum().item() ) + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + # Note: We use reduction=sum while computing the loss. info["loss"] = loss.detach().cpu().item() info["simple_loss"] = simple_loss.detach().cpu().item() diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/train.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/train.py index dfe1b6136..2bbc45d78 100755 --- a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/train.py +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/train.py @@ -686,6 +686,15 @@ def compute_loss( (feature_lens // params.subsampling_factor).sum().item() ) + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + # Note: We use reduction=sum while computing the loss. info["loss"] = loss.detach().cpu().item() info["simple_loss"] = simple_loss.detach().cpu().item() diff --git a/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/train.py b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/train.py index cd62787fa..dd23309b3 100755 --- a/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/train.py +++ b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/train.py @@ -603,6 +603,15 @@ def compute_loss( (feature_lens // params.subsampling_factor).sum().item() ) + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + # Note: We use reduction=sum while computing the loss. info["loss"] = loss.detach().cpu().item() info["simple_loss"] = simple_loss.detach().cpu().item() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/train.py b/egs/librispeech/ASR/pruned_transducer_stateless/train.py index 3708c17ef..b625ed3ff 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless/train.py @@ -559,6 +559,15 @@ def compute_loss( (feature_lens // params.subsampling_factor).sum().item() ) + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + # Note: We use reduction=sum while computing the loss. info["loss"] = loss.detach().cpu().item() info["simple_loss"] = simple_loss.detach().cpu().item() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/train.py b/egs/librispeech/ASR/pruned_transducer_stateless2/train.py index 13175c4c2..46d2cb86d 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/train.py @@ -627,6 +627,15 @@ def compute_loss( (feature_lens // params.subsampling_factor).sum().item() ) + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + # Note: We use reduction=sum while computing the loss. info["loss"] = loss.detach().cpu().item() info["simple_loss"] = simple_loss.detach().cpu().item() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/train.py b/egs/librispeech/ASR/pruned_transducer_stateless3/train.py index 3b9fb710c..371bf21d9 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/train.py @@ -652,6 +652,15 @@ def compute_loss( (feature_lens // params.subsampling_factor).sum().item() ) + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + # Note: We use reduction=sum while computing the loss. info["loss"] = loss.detach().cpu().item() info["simple_loss"] = simple_loss.detach().cpu().item() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless4/train.py b/egs/librispeech/ASR/pruned_transducer_stateless4/train.py index 47e2ae1c1..893a6a749 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless4/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless4/train.py @@ -657,6 +657,15 @@ def compute_loss( (feature_lens // params.subsampling_factor).sum().item() ) + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + # Note: We use reduction=sum while computing the loss. info["loss"] = loss.detach().cpu().item() info["simple_loss"] = simple_loss.detach().cpu().item() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless5/train.py b/egs/librispeech/ASR/pruned_transducer_stateless5/train.py index eaf893997..8f20eedc9 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless5/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless5/train.py @@ -644,6 +644,15 @@ def compute_loss( (feature_lens // params.subsampling_factor).sum().item() ) + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + # Note: We use reduction=sum while computing the loss. info["loss"] = loss.detach().cpu().item() info["simple_loss"] = simple_loss.detach().cpu().item() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/train.py b/egs/librispeech/ASR/pruned_transducer_stateless6/train.py index c054527ca..596f8f7d9 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless6/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless6/train.py @@ -661,6 +661,15 @@ def compute_loss( (feature_lens // params.subsampling_factor).sum().item() ) + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + # Note: We use reduction=sum while computing the loss. info["loss"] = loss.detach().cpu().item() info["simple_loss"] = simple_loss.detach().cpu().item() diff --git a/icefall/utils.py b/icefall/utils.py index 3bfd5e5b1..417ca1766 100644 --- a/icefall/utils.py +++ b/icefall/utils.py @@ -529,13 +529,26 @@ class MetricsTracker(collections.defaultdict): return ans def __str__(self) -> str: - ans = "" + ans_frames = "" + ans_utterances = "" for k, v in self.norm_items(): norm_value = "%.4g" % v - ans += str(k) + "=" + str(norm_value) + ", " + if "utt_" not in k: + ans_frames += str(k) + "=" + str(norm_value) + ", " + else: + ans_utterances += str(k) + "=" + str(norm_value) + if k == "utt_duration": + ans_utterances += " frames, " + elif k == "utt_pad_proportion": + ans_utterances += ", " + else: + raise ValueError(f"Unexpected key: {k}") frames = "%.2f" % self["frames"] - ans += "over " + str(frames) + " frames." - return ans + ans_frames += "over " + str(frames) + " frames; " + utterances = "%.2f" % self["utterances"] + ans_utterances += "over " + str(utterances) + " utterances." + + return ans_frames + ans_utterances def norm_items(self) -> List[Tuple[str, float]]: """ @@ -543,11 +556,17 @@ class MetricsTracker(collections.defaultdict): [('ctc_loss', 0.1), ('att_loss', 0.07)] """ num_frames = self["frames"] if "frames" in self else 1 + num_utterances = self["utterances"] if "utterances" in self else 1 ans = [] for k, v in self.items(): - if k != "frames": - norm_value = float(v) / num_frames - ans.append((k, norm_value)) + if k == "frames" or k == "utterances": + continue + norm_value = ( + float(v) / num_frames + if "utt_" not in k + else float(v) / num_utterances + ) + ans.append((k, norm_value)) return ans def reduce(self, device): From b1d0956855d5a113a1f3b25eadf7bd60f8a61935 Mon Sep 17 00:00:00 2001 From: Wei Kang Date: Mon, 25 Jul 2022 16:53:23 +0800 Subject: [PATCH 12/38] Add modified_beam_search for streaming decode (#489) * Add modified_beam_search for pruned_transducer_stateless/streaming_decode.py * refactor * modified beam search for stateless3,4 * Fix comments * Add real streamng ci --- ...pruned-transducer-stateless2-2022-06-26.sh | 16 +- .../beam_search.py | 4 +- .../decode_stream.py | 29 +- .../streaming_beam_search.py | 280 +++++++++++++++++ .../streaming_decode.py | 202 ++++-------- .../streaming_beam_search.py | 288 ++++++++++++++++++ .../streaming_decode.py | 202 ++++-------- .../streaming_beam_search.py | 1 + .../streaming_decode.py | 206 ++++--------- .../streaming_beam_search.py | 1 + .../streaming_decode.py | 206 ++++--------- 11 files changed, 843 insertions(+), 592 deletions(-) create mode 100644 egs/librispeech/ASR/pruned_transducer_stateless/streaming_beam_search.py create mode 100644 egs/librispeech/ASR/pruned_transducer_stateless2/streaming_beam_search.py create mode 120000 egs/librispeech/ASR/pruned_transducer_stateless3/streaming_beam_search.py create mode 120000 egs/librispeech/ASR/pruned_transducer_stateless4/streaming_beam_search.py diff --git a/.github/scripts/run-librispeech-streaming-pruned-transducer-stateless2-2022-06-26.sh b/.github/scripts/run-librispeech-streaming-pruned-transducer-stateless2-2022-06-26.sh index 85bbb919f..d9dc34e48 100755 --- a/.github/scripts/run-librispeech-streaming-pruned-transducer-stateless2-2022-06-26.sh +++ b/.github/scripts/run-librispeech-streaming-pruned-transducer-stateless2-2022-06-26.sh @@ -70,7 +70,7 @@ if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == max_duration=100 for method in greedy_search fast_beam_search modified_beam_search; do - log "Decoding with $method" + log "Simulate streaming decoding with $method" ./pruned_transducer_stateless2/decode.py \ --decoding-method $method \ @@ -82,5 +82,19 @@ if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == --causal-convolution 1 done + for method in greedy_search fast_beam_search modified_beam_search; do + log "Real streaming decoding with $method" + + ./pruned_transducer_stateless2/streaming_decode.py \ + --decoding-method $method \ + --epoch 999 \ + --avg 1 \ + --num-decode-streams 100 \ + --exp-dir pruned_transducer_stateless2/exp \ + --left-context 32 \ + --decode-chunk-size 8 \ + --right-context 0 + done + rm pruned_transducer_stateless2/exp/*.pt fi diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/beam_search.py b/egs/librispeech/ASR/pruned_transducer_stateless/beam_search.py index 40c442e7a..7af9cc3d7 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless/beam_search.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless/beam_search.py @@ -751,7 +751,7 @@ class HypothesisList(object): return ", ".join(s) -def _get_hyps_shape(hyps: List[HypothesisList]) -> k2.RaggedShape: +def get_hyps_shape(hyps: List[HypothesisList]) -> k2.RaggedShape: """Return a ragged shape with axes [utt][num_hyps]. Args: @@ -847,7 +847,7 @@ def modified_beam_search( finalized_B = B[batch_size:] + finalized_B B = B[:batch_size] - hyps_shape = _get_hyps_shape(B).to(device) + hyps_shape = get_hyps_shape(B).to(device) A = [list(b) for b in B] B = [HypothesisList() for _ in range(batch_size)] diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/decode_stream.py b/egs/librispeech/ASR/pruned_transducer_stateless/decode_stream.py index ba5e80555..6c0e9ba19 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless/decode_stream.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless/decode_stream.py @@ -19,6 +19,7 @@ from typing import List, Optional, Tuple import k2 import torch +from beam_search import Hypothesis, HypothesisList from icefall.utils import AttributeDict @@ -42,7 +43,8 @@ class DecodeStream(object): device: The device to run this stream. """ - if decoding_graph is not None: + if params.decoding_method == "fast_beam_search": + assert decoding_graph is not None assert device == decoding_graph.device self.params = params @@ -77,15 +79,23 @@ class DecodeStream(object): if params.decoding_method == "greedy_search": self.hyp = [params.blank_id] * params.context_size + elif params.decoding_method == "modified_beam_search": + self.hyps = HypothesisList() + self.hyps.add( + Hypothesis( + ys=[params.blank_id] * params.context_size, + log_prob=torch.zeros(1, dtype=torch.float32, device=device), + ) + ) elif params.decoding_method == "fast_beam_search": # The rnnt_decoding_stream for fast_beam_search. self.rnnt_decoding_stream: k2.RnntDecodingStream = ( k2.RnntDecodingStream(decoding_graph) ) else: - assert ( - False - ), f"Decoding method :{params.decoding_method} do not support." + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) @property def done(self) -> bool: @@ -124,3 +134,14 @@ class DecodeStream(object): self._done = True return ret_features, ret_length + + def decoding_result(self) -> List[int]: + """Obtain current decoding result.""" + if self.params.decoding_method == "greedy_search": + return self.hyp[self.params.context_size :] # noqa + elif self.params.decoding_method == "modified_beam_search": + best_hyp = self.hyps.get_most_probable(length_norm=True) + return best_hyp.ys[self.params.context_size :] # noqa + else: + assert self.params.decoding_method == "fast_beam_search" + return self.hyp diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/streaming_beam_search.py b/egs/librispeech/ASR/pruned_transducer_stateless/streaming_beam_search.py new file mode 100644 index 000000000..dcf6dc42f --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless/streaming_beam_search.py @@ -0,0 +1,280 @@ +# Copyright 2022 Xiaomi Corp. (authors: Wei Kang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from typing import List + +import k2 +import torch +import torch.nn as nn +from beam_search import Hypothesis, HypothesisList, get_hyps_shape +from decode_stream import DecodeStream + +from icefall.decode import one_best_decoding +from icefall.utils import get_texts + + +def greedy_search( + model: nn.Module, + encoder_out: torch.Tensor, + streams: List[DecodeStream], +) -> None: + """Greedy search in batch mode. It hardcodes --max-sym-per-frame=1. + + Args: + model: + The transducer model. + encoder_out: + Output from the encoder. Its shape is (N, T, C), where N >= 1. + streams: + A list of Stream objects. + """ + assert len(streams) == encoder_out.size(0) + assert encoder_out.ndim == 3 + + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + device = model.device + T = encoder_out.size(1) + + decoder_input = torch.tensor( + [stream.hyp[-context_size:] for stream in streams], + device=device, + dtype=torch.int64, + ) + # decoder_out is of shape (N, 1, decoder_out_dim) + decoder_out = model.decoder(decoder_input, need_pad=False) + + for t in range(T): + # current_encoder_out's shape: (batch_size, 1, encoder_out_dim) + current_encoder_out = encoder_out[:, t : t + 1, :] # noqa + + logits = model.joiner( + current_encoder_out.unsqueeze(2), + decoder_out.unsqueeze(1), + ) + # logits'shape (batch_size, vocab_size) + logits = logits.squeeze(1).squeeze(1) + + assert logits.ndim == 2, logits.shape + y = logits.argmax(dim=1).tolist() + emitted = False + for i, v in enumerate(y): + if v != blank_id: + streams[i].hyp.append(v) + emitted = True + if emitted: + # update decoder output + decoder_input = torch.tensor( + [stream.hyp[-context_size:] for stream in streams], + device=device, + dtype=torch.int64, + ) + decoder_out = model.decoder( + decoder_input, + need_pad=False, + ) + + +def modified_beam_search( + model: nn.Module, + encoder_out: torch.Tensor, + streams: List[DecodeStream], + num_active_paths: int = 4, +) -> None: + """Beam search in batch mode with --max-sym-per-frame=1 being hardcoded. + + Args: + model: + The RNN-T model. + encoder_out: + A 3-D tensor of shape (N, T, encoder_out_dim) containing the output of + the encoder model. + streams: + A list of stream objects. + num_active_paths: + Number of active paths during the beam search. + """ + assert encoder_out.ndim == 3, encoder_out.shape + assert len(streams) == encoder_out.size(0) + + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + device = next(model.parameters()).device + batch_size = len(streams) + T = encoder_out.size(1) + + B = [stream.hyps for stream in streams] + + for t in range(T): + current_encoder_out = encoder_out[:, t].unsqueeze(1).unsqueeze(1) + # current_encoder_out's shape: (batch_size, 1, 1, encoder_out_dim) + + hyps_shape = get_hyps_shape(B).to(device) + + A = [list(b) for b in B] + B = [HypothesisList() for _ in range(batch_size)] + + ys_log_probs = torch.stack( + [hyp.log_prob.reshape(1) for hyps in A for hyp in hyps], dim=0 + ) # (num_hyps, 1) + + decoder_input = torch.tensor( + [hyp.ys[-context_size:] for hyps in A for hyp in hyps], + device=device, + dtype=torch.int64, + ) # (num_hyps, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False).unsqueeze(1) + # decoder_out is of shape (num_hyps, 1, 1, decoder_output_dim) + + # Note: For torch 1.7.1 and below, it requires a torch.int64 tensor + # as index, so we use `to(torch.int64)` below. + current_encoder_out = torch.index_select( + current_encoder_out, + dim=0, + index=hyps_shape.row_ids(1).to(torch.int64), + ) # (num_hyps, encoder_out_dim) + + logits = model.joiner(current_encoder_out, decoder_out) + # logits is of shape (num_hyps, 1, 1, vocab_size) + + logits = logits.squeeze(1).squeeze(1) + + log_probs = logits.log_softmax(dim=-1) # (num_hyps, vocab_size) + + log_probs.add_(ys_log_probs) + + vocab_size = log_probs.size(-1) + + log_probs = log_probs.reshape(-1) + + row_splits = hyps_shape.row_splits(1) * vocab_size + log_probs_shape = k2.ragged.create_ragged_shape2( + row_splits=row_splits, cached_tot_size=log_probs.numel() + ) + ragged_log_probs = k2.RaggedTensor( + shape=log_probs_shape, value=log_probs + ) + + for i in range(batch_size): + topk_log_probs, topk_indexes = ragged_log_probs[i].topk( + num_active_paths + ) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + topk_hyp_indexes = (topk_indexes // vocab_size).tolist() + topk_token_indexes = (topk_indexes % vocab_size).tolist() + + for k in range(len(topk_hyp_indexes)): + hyp_idx = topk_hyp_indexes[k] + hyp = A[i][hyp_idx] + + new_ys = hyp.ys[:] + new_token = topk_token_indexes[k] + if new_token != blank_id: + new_ys.append(new_token) + + new_log_prob = topk_log_probs[k] + new_hyp = Hypothesis(ys=new_ys, log_prob=new_log_prob) + B[i].add(new_hyp) + + for i in range(batch_size): + streams[i].hyps = B[i] + + +def fast_beam_search_one_best( + model: nn.Module, + encoder_out: torch.Tensor, + processed_lens: torch.Tensor, + streams: List[DecodeStream], + beam: float, + max_states: int, + max_contexts: int, +) -> None: + """It limits the maximum number of symbols per frame to 1. + + A lattice is first generated by Fsa-based beam search, then we get the + recognition by applying shortest path on the lattice. + + Args: + model: + An instance of `Transducer`. + encoder_out: + A tensor of shape (N, T, C) from the encoder. + processed_lens: + A tensor of shape (N,) containing the number of processed frames + in `encoder_out` before padding. + streams: + A list of stream objects. + beam: + Beam value, similar to the beam used in Kaldi.. + max_states: + Max states per stream per frame. + max_contexts: + Max contexts pre stream per frame. + """ + assert encoder_out.ndim == 3 + B, T, C = encoder_out.shape + assert B == len(streams) + + context_size = model.decoder.context_size + vocab_size = model.decoder.vocab_size + + config = k2.RnntDecodingConfig( + vocab_size=vocab_size, + decoder_history_len=context_size, + beam=beam, + max_contexts=max_contexts, + max_states=max_states, + ) + individual_streams = [] + for i in range(B): + individual_streams.append(streams[i].rnnt_decoding_stream) + decoding_streams = k2.RnntDecodingStreams(individual_streams, config) + + for t in range(T): + # shape is a RaggedShape of shape (B, context) + # contexts is a Tensor of shape (shape.NumElements(), context_size) + shape, contexts = decoding_streams.get_contexts() + # `nn.Embedding()` in torch below v1.7.1 supports only torch.int64 + contexts = contexts.to(torch.int64) + # decoder_out is of shape (shape.NumElements(), 1, decoder_out_dim) + decoder_out = model.decoder(contexts, need_pad=False) + # current_encoder_out is of shape + # (shape.NumElements(), 1, joiner_dim) + # fmt: off + current_encoder_out = torch.index_select( + encoder_out[:, t:t + 1, :], 0, shape.row_ids(1).to(torch.int64) + ) + # fmt: on + logits = model.joiner( + current_encoder_out.unsqueeze(2), + decoder_out.unsqueeze(1), + ) + logits = logits.squeeze(1).squeeze(1) + log_probs = logits.log_softmax(dim=-1) + decoding_streams.advance(log_probs) + + decoding_streams.terminate_and_flush_to_streams() + + lattice = decoding_streams.format_output(processed_lens.tolist()) + best_path = one_best_decoding(lattice) + hyp_tokens = get_texts(best_path) + + for i in range(B): + streams[i].hyp = hyp_tokens[i] diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/streaming_decode.py b/egs/librispeech/ASR/pruned_transducer_stateless/streaming_decode.py index f05cf7a91..e455627f3 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless/streaming_decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless/streaming_decode.py @@ -17,13 +17,13 @@ """ Usage: -./pruned_transducer_stateless2/streaming_decode.py \ +./pruned_transducer_stateless/streaming_decode.py \ --epoch 28 \ --avg 15 \ --decode-chunk-size 8 \ --left-context 32 \ --right-context 0 \ - --exp-dir ./pruned_transducer_stateless2/exp \ + --exp-dir ./pruned_transducer_stateless/exp \ --decoding_method greedy_search \ --num-decode-streams 1000 """ @@ -43,6 +43,11 @@ from asr_datamodule import LibriSpeechAsrDataModule from decode_stream import DecodeStream from kaldifeat import Fbank, FbankOptions from lhotse import CutSet +from streaming_beam_search import ( + fast_beam_search_one_best, + greedy_search, + modified_beam_search, +) from torch.nn.utils.rnn import pad_sequence from train import add_model_arguments, get_params, get_transducer_model @@ -51,10 +56,8 @@ from icefall.checkpoint import ( find_checkpoints, load_checkpoint, ) -from icefall.decode import one_best_decoding from icefall.utils import ( AttributeDict, - get_texts, setup_logger, store_transcripts, write_error_stats, @@ -114,10 +117,21 @@ def get_parser(): "--decoding-method", type=str, default="greedy_search", - help="""Support only greedy_search and fast_beam_search now. + help="""Supported decoding methods are: + greedy_search + modified_beam_search + fast_beam_search """, ) + parser.add_argument( + "--num-active-paths", + type=int, + default=4, + help="""An interger indicating how many candidates we will keep for each + frame. Used only when --decoding-method is modified_beam_search.""", + ) + parser.add_argument( "--beam", type=float, @@ -185,103 +199,6 @@ def get_parser(): return parser -def greedy_search( - model: nn.Module, - encoder_out: torch.Tensor, - streams: List[DecodeStream], -) -> List[List[int]]: - - assert len(streams) == encoder_out.size(0) - assert encoder_out.ndim == 3 - - blank_id = model.decoder.blank_id - context_size = model.decoder.context_size - device = model.device - T = encoder_out.size(1) - - decoder_input = torch.tensor( - [stream.hyp[-context_size:] for stream in streams], - device=device, - dtype=torch.int64, - ) - # decoder_out is of shape (N, decoder_out_dim) - decoder_out = model.decoder(decoder_input, need_pad=False) - - for t in range(T): - # current_encoder_out's shape: (batch_size, 1, encoder_out_dim) - current_encoder_out = encoder_out[:, t : t + 1, :] # noqa - - logits = model.joiner( - current_encoder_out.unsqueeze(2), - decoder_out.unsqueeze(1), - ) - # logits'shape (batch_size, vocab_size) - logits = logits.squeeze(1).squeeze(1) - - assert logits.ndim == 2, logits.shape - y = logits.argmax(dim=1).tolist() - emitted = False - for i, v in enumerate(y): - if v != blank_id: - streams[i].hyp.append(v) - emitted = True - if emitted: - # update decoder output - decoder_input = torch.tensor( - [stream.hyp[-context_size:] for stream in streams], - device=device, - dtype=torch.int64, - ) - decoder_out = model.decoder( - decoder_input, - need_pad=False, - ) - - hyp_tokens = [] - for stream in streams: - hyp_tokens.append(stream.hyp) - return hyp_tokens - - -def fast_beam_search( - model: nn.Module, - encoder_out: torch.Tensor, - processed_lens: torch.Tensor, - decoding_streams: k2.RnntDecodingStreams, -) -> List[List[int]]: - - B, T, C = encoder_out.shape - for t in range(T): - # shape is a RaggedShape of shape (B, context) - # contexts is a Tensor of shape (shape.NumElements(), context_size) - shape, contexts = decoding_streams.get_contexts() - # `nn.Embedding()` in torch below v1.7.1 supports only torch.int64 - contexts = contexts.to(torch.int64) - # decoder_out is of shape (shape.NumElements(), 1, decoder_out_dim) - decoder_out = model.decoder(contexts, need_pad=False) - # current_encoder_out is of shape - # (shape.NumElements(), 1, joiner_dim) - # fmt: off - current_encoder_out = torch.index_select( - encoder_out[:, t:t + 1, :], 0, shape.row_ids(1).to(torch.int64) - ) - # fmt: on - logits = model.joiner( - current_encoder_out.unsqueeze(2), - decoder_out.unsqueeze(1), - ) - logits = logits.squeeze(1).squeeze(1) - log_probs = logits.log_softmax(dim=-1) - decoding_streams.advance(log_probs) - - decoding_streams.terminate_and_flush_to_streams() - - lattice = decoding_streams.format_output(processed_lens.tolist()) - best_path = one_best_decoding(lattice) - hyp_tokens = get_texts(best_path) - return hyp_tokens - - def decode_one_chunk( params: AttributeDict, model: nn.Module, @@ -305,8 +222,6 @@ def decode_one_chunk( features = [] feature_lens = [] states = [] - - rnnt_stream_list = [] processed_lens = [] for stream in decode_streams: @@ -317,8 +232,6 @@ def decode_one_chunk( feature_lens.append(feat_len) states.append(stream.states) processed_lens.append(stream.done_frames) - if params.decoding_method == "fast_beam_search": - rnnt_stream_list.append(stream.rnnt_decoding_stream) feature_lens = torch.tensor(feature_lens, device=device) features = pad_sequence(features, batch_first=True, padding_value=LOG_EPS) @@ -330,19 +243,13 @@ def decode_one_chunk( # frames. tail_length = 7 + (2 + params.right_context) * params.subsampling_factor if features.size(1) < tail_length: - feature_lens += tail_length - features.size(1) - features = torch.cat( - [ - features, - torch.tensor( - LOG_EPS, dtype=features.dtype, device=device - ).expand( - features.size(0), - tail_length - features.size(1), - features.size(2), - ), - ], - dim=1, + pad_length = tail_length - features.size(1) + feature_lens += pad_length + features = torch.nn.functional.pad( + features, + (0, 0, 0, pad_length), + mode="constant", + value=LOG_EPS, ) states = [ @@ -362,22 +269,31 @@ def decode_one_chunk( ) if params.decoding_method == "greedy_search": - hyp_tokens = greedy_search(model, encoder_out, decode_streams) - elif params.decoding_method == "fast_beam_search": - config = k2.RnntDecodingConfig( - vocab_size=params.vocab_size, - decoder_history_len=params.context_size, - beam=params.beam, - max_contexts=params.max_contexts, - max_states=params.max_states, + greedy_search( + model=model, encoder_out=encoder_out, streams=decode_streams ) - decoding_streams = k2.RnntDecodingStreams(rnnt_stream_list, config) + elif params.decoding_method == "fast_beam_search": processed_lens = processed_lens + encoder_out_lens - hyp_tokens = fast_beam_search( - model, encoder_out, processed_lens, decoding_streams + fast_beam_search_one_best( + model=model, + encoder_out=encoder_out, + processed_lens=processed_lens, + streams=decode_streams, + beam=params.beam, + max_states=params.max_states, + max_contexts=params.max_contexts, + ) + elif params.decoding_method == "modified_beam_search": + modified_beam_search( + model=model, + streams=decode_streams, + encoder_out=encoder_out, + num_active_paths=params.num_active_paths, ) else: - assert False + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) states = [torch.unbind(states[0], dim=2), torch.unbind(states[1], dim=2)] @@ -385,8 +301,6 @@ def decode_one_chunk( for i in range(len(decode_streams)): decode_streams[i].states = [states[0][i], states[1][i]] decode_streams[i].done_frames += encoder_out_lens[i] - if params.decoding_method == "fast_beam_search": - decode_streams[i].hyp = hyp_tokens[i] if decode_streams[i].done: finished_streams.append(i) @@ -469,13 +383,10 @@ def decode_dataset( params=params, model=model, decode_streams=decode_streams ) for i in sorted(finished_streams, reverse=True): - hyp = decode_streams[i].hyp - if params.decoding_method == "greedy_search": - hyp = hyp[params.context_size :] # noqa decode_results.append( ( decode_streams[i].ground_truth.split(), - sp.decode(hyp).split(), + sp.decode(decode_streams[i].decoding_result()).split(), ) ) del decode_streams[i] @@ -489,24 +400,29 @@ def decode_dataset( params=params, model=model, decode_streams=decode_streams ) for i in sorted(finished_streams, reverse=True): - hyp = decode_streams[i].hyp - if params.decoding_method == "greedy_search": - hyp = hyp[params.context_size :] # noqa decode_results.append( ( decode_streams[i].ground_truth.split(), - sp.decode(hyp).split(), + sp.decode(decode_streams[i].decoding_result()).split(), ) ) del decode_streams[i] - key = "greedy_search" - if params.decoding_method == "fast_beam_search": + if params.decoding_method == "greedy_search": + key = "greedy_search" + elif params.decoding_method == "fast_beam_search": key = ( f"beam_{params.beam}_" f"max_contexts_{params.max_contexts}_" f"max_states_{params.max_states}" ) + elif params.decoding_method == "modified_beam_search": + key = f"num_active_paths_{params.num_active_paths}" + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + return {key: decode_results} diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/streaming_beam_search.py b/egs/librispeech/ASR/pruned_transducer_stateless2/streaming_beam_search.py new file mode 100644 index 000000000..9bcd2f9f9 --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/streaming_beam_search.py @@ -0,0 +1,288 @@ +# Copyright 2022 Xiaomi Corp. (authors: Wei Kang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from typing import List + +import k2 +import torch +import torch.nn as nn +from beam_search import Hypothesis, HypothesisList, get_hyps_shape +from decode_stream import DecodeStream + +from icefall.decode import one_best_decoding +from icefall.utils import get_texts + + +def greedy_search( + model: nn.Module, + encoder_out: torch.Tensor, + streams: List[DecodeStream], +) -> None: + """Greedy search in batch mode. It hardcodes --max-sym-per-frame=1. + + Args: + model: + The transducer model. + encoder_out: + Output from the encoder. Its shape is (N, T, C), where N >= 1. + streams: + A list of Stream objects. + """ + assert len(streams) == encoder_out.size(0) + assert encoder_out.ndim == 3 + + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + device = model.device + T = encoder_out.size(1) + + decoder_input = torch.tensor( + [stream.hyp[-context_size:] for stream in streams], + device=device, + dtype=torch.int64, + ) + # decoder_out is of shape (N, 1, decoder_out_dim) + decoder_out = model.decoder(decoder_input, need_pad=False) + decoder_out = model.joiner.decoder_proj(decoder_out) + + for t in range(T): + # current_encoder_out's shape: (batch_size, 1, encoder_out_dim) + current_encoder_out = encoder_out[:, t : t + 1, :] # noqa + + logits = model.joiner( + current_encoder_out.unsqueeze(2), + decoder_out.unsqueeze(1), + project_input=False, + ) + # logits'shape (batch_size, vocab_size) + logits = logits.squeeze(1).squeeze(1) + + assert logits.ndim == 2, logits.shape + y = logits.argmax(dim=1).tolist() + emitted = False + for i, v in enumerate(y): + if v != blank_id: + streams[i].hyp.append(v) + emitted = True + if emitted: + # update decoder output + decoder_input = torch.tensor( + [stream.hyp[-context_size:] for stream in streams], + device=device, + dtype=torch.int64, + ) + decoder_out = model.decoder( + decoder_input, + need_pad=False, + ) + decoder_out = model.joiner.decoder_proj(decoder_out) + + +def modified_beam_search( + model: nn.Module, + encoder_out: torch.Tensor, + streams: List[DecodeStream], + num_active_paths: int = 4, +) -> None: + """Beam search in batch mode with --max-sym-per-frame=1 being hardcoded. + + Args: + model: + The RNN-T model. + encoder_out: + A 3-D tensor of shape (N, T, encoder_out_dim) containing the output of + the encoder model. + streams: + A list of stream objects. + num_active_paths: + Number of active paths during the beam search. + """ + assert encoder_out.ndim == 3, encoder_out.shape + assert len(streams) == encoder_out.size(0) + + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + device = next(model.parameters()).device + batch_size = len(streams) + T = encoder_out.size(1) + + B = [stream.hyps for stream in streams] + + for t in range(T): + current_encoder_out = encoder_out[:, t].unsqueeze(1).unsqueeze(1) + # current_encoder_out's shape: (batch_size, 1, 1, encoder_out_dim) + + hyps_shape = get_hyps_shape(B).to(device) + + A = [list(b) for b in B] + B = [HypothesisList() for _ in range(batch_size)] + + ys_log_probs = torch.stack( + [hyp.log_prob.reshape(1) for hyps in A for hyp in hyps], dim=0 + ) # (num_hyps, 1) + + decoder_input = torch.tensor( + [hyp.ys[-context_size:] for hyps in A for hyp in hyps], + device=device, + dtype=torch.int64, + ) # (num_hyps, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False).unsqueeze(1) + decoder_out = model.joiner.decoder_proj(decoder_out) + # decoder_out is of shape (num_hyps, 1, 1, decoder_output_dim) + + # Note: For torch 1.7.1 and below, it requires a torch.int64 tensor + # as index, so we use `to(torch.int64)` below. + current_encoder_out = torch.index_select( + current_encoder_out, + dim=0, + index=hyps_shape.row_ids(1).to(torch.int64), + ) # (num_hyps, encoder_out_dim) + + logits = model.joiner( + current_encoder_out, decoder_out, project_input=False + ) + # logits is of shape (num_hyps, 1, 1, vocab_size) + + logits = logits.squeeze(1).squeeze(1) + + log_probs = logits.log_softmax(dim=-1) # (num_hyps, vocab_size) + + log_probs.add_(ys_log_probs) + + vocab_size = log_probs.size(-1) + + log_probs = log_probs.reshape(-1) + + row_splits = hyps_shape.row_splits(1) * vocab_size + log_probs_shape = k2.ragged.create_ragged_shape2( + row_splits=row_splits, cached_tot_size=log_probs.numel() + ) + ragged_log_probs = k2.RaggedTensor( + shape=log_probs_shape, value=log_probs + ) + + for i in range(batch_size): + topk_log_probs, topk_indexes = ragged_log_probs[i].topk( + num_active_paths + ) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + topk_hyp_indexes = (topk_indexes // vocab_size).tolist() + topk_token_indexes = (topk_indexes % vocab_size).tolist() + + for k in range(len(topk_hyp_indexes)): + hyp_idx = topk_hyp_indexes[k] + hyp = A[i][hyp_idx] + + new_ys = hyp.ys[:] + new_token = topk_token_indexes[k] + if new_token != blank_id: + new_ys.append(new_token) + + new_log_prob = topk_log_probs[k] + new_hyp = Hypothesis(ys=new_ys, log_prob=new_log_prob) + B[i].add(new_hyp) + + for i in range(batch_size): + streams[i].hyps = B[i] + + +def fast_beam_search_one_best( + model: nn.Module, + encoder_out: torch.Tensor, + processed_lens: torch.Tensor, + streams: List[DecodeStream], + beam: float, + max_states: int, + max_contexts: int, +) -> None: + """It limits the maximum number of symbols per frame to 1. + + A lattice is first generated by Fsa-based beam search, then we get the + recognition by applying shortest path on the lattice. + + Args: + model: + An instance of `Transducer`. + encoder_out: + A tensor of shape (N, T, C) from the encoder. + processed_lens: + A tensor of shape (N,) containing the number of processed frames + in `encoder_out` before padding. + streams: + A list of stream objects. + beam: + Beam value, similar to the beam used in Kaldi.. + max_states: + Max states per stream per frame. + max_contexts: + Max contexts pre stream per frame. + """ + assert encoder_out.ndim == 3 + B, T, C = encoder_out.shape + assert B == len(streams) + + context_size = model.decoder.context_size + vocab_size = model.decoder.vocab_size + + config = k2.RnntDecodingConfig( + vocab_size=vocab_size, + decoder_history_len=context_size, + beam=beam, + max_contexts=max_contexts, + max_states=max_states, + ) + individual_streams = [] + for i in range(B): + individual_streams.append(streams[i].rnnt_decoding_stream) + decoding_streams = k2.RnntDecodingStreams(individual_streams, config) + + for t in range(T): + # shape is a RaggedShape of shape (B, context) + # contexts is a Tensor of shape (shape.NumElements(), context_size) + shape, contexts = decoding_streams.get_contexts() + # `nn.Embedding()` in torch below v1.7.1 supports only torch.int64 + contexts = contexts.to(torch.int64) + # decoder_out is of shape (shape.NumElements(), 1, decoder_out_dim) + decoder_out = model.decoder(contexts, need_pad=False) + decoder_out = model.joiner.decoder_proj(decoder_out) + # current_encoder_out is of shape + # (shape.NumElements(), 1, joiner_dim) + # fmt: off + current_encoder_out = torch.index_select( + encoder_out[:, t:t + 1, :], 0, shape.row_ids(1).to(torch.int64) + ) + # fmt: on + logits = model.joiner( + current_encoder_out.unsqueeze(2), + decoder_out.unsqueeze(1), + project_input=False, + ) + logits = logits.squeeze(1).squeeze(1) + log_probs = logits.log_softmax(dim=-1) + decoding_streams.advance(log_probs) + + decoding_streams.terminate_and_flush_to_streams() + + lattice = decoding_streams.format_output(processed_lens.tolist()) + best_path = one_best_decoding(lattice) + hyp_tokens = get_texts(best_path) + + for i in range(B): + streams[i].hyp = hyp_tokens[i] diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/streaming_decode.py b/egs/librispeech/ASR/pruned_transducer_stateless2/streaming_decode.py index b3e1f04c3..79963c968 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/streaming_decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/streaming_decode.py @@ -43,6 +43,11 @@ from asr_datamodule import LibriSpeechAsrDataModule from decode_stream import DecodeStream from kaldifeat import Fbank, FbankOptions from lhotse import CutSet +from streaming_beam_search import ( + fast_beam_search_one_best, + greedy_search, + modified_beam_search, +) from torch.nn.utils.rnn import pad_sequence from train import add_model_arguments, get_params, get_transducer_model @@ -51,10 +56,8 @@ from icefall.checkpoint import ( find_checkpoints, load_checkpoint, ) -from icefall.decode import one_best_decoding from icefall.utils import ( AttributeDict, - get_texts, setup_logger, store_transcripts, write_error_stats, @@ -114,10 +117,21 @@ def get_parser(): "--decoding-method", type=str, default="greedy_search", - help="""Support only greedy_search and fast_beam_search now. + help="""Supported decoding methods are: + greedy_search + modified_beam_search + fast_beam_search """, ) + parser.add_argument( + "--num_active_paths", + type=int, + default=4, + help="""An interger indicating how many candidates we will keep for each + frame. Used only when --decoding-method is modified_beam_search.""", + ) + parser.add_argument( "--beam", type=float, @@ -185,109 +199,6 @@ def get_parser(): return parser -def greedy_search( - model: nn.Module, - encoder_out: torch.Tensor, - streams: List[DecodeStream], -) -> List[List[int]]: - - assert len(streams) == encoder_out.size(0) - assert encoder_out.ndim == 3 - - blank_id = model.decoder.blank_id - context_size = model.decoder.context_size - device = model.device - T = encoder_out.size(1) - - decoder_input = torch.tensor( - [stream.hyp[-context_size:] for stream in streams], - device=device, - dtype=torch.int64, - ) - # decoder_out is of shape (N, decoder_out_dim) - decoder_out = model.decoder(decoder_input, need_pad=False) - decoder_out = model.joiner.decoder_proj(decoder_out) - # logging.info(f"decoder_out shape : {decoder_out.shape}") - - for t in range(T): - # current_encoder_out's shape: (batch_size, 1, encoder_out_dim) - current_encoder_out = encoder_out[:, t : t + 1, :] # noqa - - logits = model.joiner( - current_encoder_out.unsqueeze(2), - decoder_out.unsqueeze(1), - project_input=False, - ) - # logits'shape (batch_size, vocab_size) - logits = logits.squeeze(1).squeeze(1) - - assert logits.ndim == 2, logits.shape - y = logits.argmax(dim=1).tolist() - emitted = False - for i, v in enumerate(y): - if v != blank_id: - streams[i].hyp.append(v) - emitted = True - if emitted: - # update decoder output - decoder_input = torch.tensor( - [stream.hyp[-context_size:] for stream in streams], - device=device, - dtype=torch.int64, - ) - decoder_out = model.decoder( - decoder_input, - need_pad=False, - ) - decoder_out = model.joiner.decoder_proj(decoder_out) - - hyp_tokens = [] - for stream in streams: - hyp_tokens.append(stream.hyp) - return hyp_tokens - - -def fast_beam_search( - model: nn.Module, - encoder_out: torch.Tensor, - processed_lens: torch.Tensor, - decoding_streams: k2.RnntDecodingStreams, -) -> List[List[int]]: - - B, T, C = encoder_out.shape - for t in range(T): - # shape is a RaggedShape of shape (B, context) - # contexts is a Tensor of shape (shape.NumElements(), context_size) - shape, contexts = decoding_streams.get_contexts() - # `nn.Embedding()` in torch below v1.7.1 supports only torch.int64 - contexts = contexts.to(torch.int64) - # decoder_out is of shape (shape.NumElements(), 1, decoder_out_dim) - decoder_out = model.decoder(contexts, need_pad=False) - decoder_out = model.joiner.decoder_proj(decoder_out) - # current_encoder_out is of shape - # (shape.NumElements(), 1, joiner_dim) - # fmt: off - current_encoder_out = torch.index_select( - encoder_out[:, t:t + 1, :], 0, shape.row_ids(1).to(torch.int64) - ) - # fmt: on - logits = model.joiner( - current_encoder_out.unsqueeze(2), - decoder_out.unsqueeze(1), - project_input=False, - ) - logits = logits.squeeze(1).squeeze(1) - log_probs = logits.log_softmax(dim=-1) - decoding_streams.advance(log_probs) - - decoding_streams.terminate_and_flush_to_streams() - - lattice = decoding_streams.format_output(processed_lens.tolist()) - best_path = one_best_decoding(lattice) - hyp_tokens = get_texts(best_path) - return hyp_tokens - - def decode_one_chunk( params: AttributeDict, model: nn.Module, @@ -312,7 +223,6 @@ def decode_one_chunk( feature_lens = [] states = [] - rnnt_stream_list = [] processed_lens = [] for stream in decode_streams: @@ -323,8 +233,6 @@ def decode_one_chunk( feature_lens.append(feat_len) states.append(stream.states) processed_lens.append(stream.done_frames) - if params.decoding_method == "fast_beam_search": - rnnt_stream_list.append(stream.rnnt_decoding_stream) feature_lens = torch.tensor(feature_lens, device=device) features = pad_sequence(features, batch_first=True, padding_value=LOG_EPS) @@ -336,19 +244,13 @@ def decode_one_chunk( # frames. tail_length = 7 + (2 + params.right_context) * params.subsampling_factor if features.size(1) < tail_length: - feature_lens += tail_length - features.size(1) - features = torch.cat( - [ - features, - torch.tensor( - LOG_EPS, dtype=features.dtype, device=device - ).expand( - features.size(0), - tail_length - features.size(1), - features.size(2), - ), - ], - dim=1, + pad_length = tail_length - features.size(1) + feature_lens += pad_length + features = torch.nn.functional.pad( + features, + (0, 0, 0, pad_length), + mode="constant", + value=LOG_EPS, ) states = [ @@ -369,22 +271,31 @@ def decode_one_chunk( encoder_out = model.joiner.encoder_proj(encoder_out) if params.decoding_method == "greedy_search": - hyp_tokens = greedy_search(model, encoder_out, decode_streams) - elif params.decoding_method == "fast_beam_search": - config = k2.RnntDecodingConfig( - vocab_size=params.vocab_size, - decoder_history_len=params.context_size, - beam=params.beam, - max_contexts=params.max_contexts, - max_states=params.max_states, + greedy_search( + model=model, encoder_out=encoder_out, streams=decode_streams ) - decoding_streams = k2.RnntDecodingStreams(rnnt_stream_list, config) + elif params.decoding_method == "fast_beam_search": processed_lens = processed_lens + encoder_out_lens - hyp_tokens = fast_beam_search( - model, encoder_out, processed_lens, decoding_streams + fast_beam_search_one_best( + model=model, + encoder_out=encoder_out, + processed_lens=processed_lens, + streams=decode_streams, + beam=params.beam, + max_states=params.max_states, + max_contexts=params.max_contexts, + ) + elif params.decoding_method == "modified_beam_search": + modified_beam_search( + model=model, + streams=decode_streams, + encoder_out=encoder_out, + num_active_paths=params.num_active_paths, ) else: - assert False + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) states = [torch.unbind(states[0], dim=2), torch.unbind(states[1], dim=2)] @@ -392,8 +303,6 @@ def decode_one_chunk( for i in range(len(decode_streams)): decode_streams[i].states = [states[0][i], states[1][i]] decode_streams[i].done_frames += encoder_out_lens[i] - if params.decoding_method == "fast_beam_search": - decode_streams[i].hyp = hyp_tokens[i] if decode_streams[i].done: finished_streams.append(i) @@ -477,13 +386,10 @@ def decode_dataset( params=params, model=model, decode_streams=decode_streams ) for i in sorted(finished_streams, reverse=True): - hyp = decode_streams[i].hyp - if params.decoding_method == "greedy_search": - hyp = hyp[params.context_size :] # noqa decode_results.append( ( decode_streams[i].ground_truth.split(), - sp.decode(hyp).split(), + sp.decode(decode_streams[i].decoding_result()).split(), ) ) del decode_streams[i] @@ -497,24 +403,28 @@ def decode_dataset( params=params, model=model, decode_streams=decode_streams ) for i in sorted(finished_streams, reverse=True): - hyp = decode_streams[i].hyp - if params.decoding_method == "greedy_search": - hyp = hyp[params.context_size :] # noqa decode_results.append( ( decode_streams[i].ground_truth.split(), - sp.decode(hyp).split(), + sp.decode(decode_streams[i].decoding_result()).split(), ) ) del decode_streams[i] - key = "greedy_search" - if params.decoding_method == "fast_beam_search": + if params.decoding_method == "greedy_search": + key = "greedy_search" + elif params.decoding_method == "fast_beam_search": key = ( f"beam_{params.beam}_" f"max_contexts_{params.max_contexts}_" f"max_states_{params.max_states}" ) + elif params.decoding_method == "modified_beam_search": + key = f"num_active_paths_{params.num_active_paths}" + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) return {key: decode_results} diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/streaming_beam_search.py b/egs/librispeech/ASR/pruned_transducer_stateless3/streaming_beam_search.py new file mode 120000 index 000000000..3a5f89833 --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/streaming_beam_search.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/streaming_beam_search.py \ No newline at end of file diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/streaming_decode.py b/egs/librispeech/ASR/pruned_transducer_stateless3/streaming_decode.py index 8af2788be..1976d19a6 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/streaming_decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/streaming_decode.py @@ -17,13 +17,13 @@ """ Usage: -./pruned_transducer_stateless2/streaming_decode.py \ +./pruned_transducer_stateless3/streaming_decode.py \ --epoch 28 \ --avg 15 \ --left-context 32 \ --decode-chunk-size 8 \ --right-context 0 \ - --exp-dir ./pruned_transducer_stateless2/exp \ + --exp-dir ./pruned_transducer_stateless3/exp \ --decoding_method greedy_search \ --num-decode-streams 1000 """ @@ -44,6 +44,11 @@ from decode_stream import DecodeStream from kaldifeat import Fbank, FbankOptions from lhotse import CutSet from librispeech import LibriSpeech +from streaming_beam_search import ( + fast_beam_search_one_best, + greedy_search, + modified_beam_search, +) from torch.nn.utils.rnn import pad_sequence from train import add_model_arguments, get_params, get_transducer_model @@ -52,10 +57,8 @@ from icefall.checkpoint import ( find_checkpoints, load_checkpoint, ) -from icefall.decode import one_best_decoding from icefall.utils import ( AttributeDict, - get_texts, setup_logger, store_transcripts, write_error_stats, @@ -115,10 +118,21 @@ def get_parser(): "--decoding-method", type=str, default="greedy_search", - help="""Support only greedy_search and fast_beam_search now. + help="""Supported decoding methods are: + greedy_search + modified_beam_search + fast_beam_search """, ) + parser.add_argument( + "--num_active_paths", + type=int, + default=4, + help="""An interger indicating how many candidates we will keep for each + frame. Used only when --decoding-method is modified_beam_search.""", + ) + parser.add_argument( "--beam", type=float, @@ -186,109 +200,6 @@ def get_parser(): return parser -def greedy_search( - model: nn.Module, - encoder_out: torch.Tensor, - streams: List[DecodeStream], -) -> List[List[int]]: - - assert len(streams) == encoder_out.size(0) - assert encoder_out.ndim == 3 - - blank_id = model.decoder.blank_id - context_size = model.decoder.context_size - device = model.device - T = encoder_out.size(1) - - decoder_input = torch.tensor( - [stream.hyp[-context_size:] for stream in streams], - device=device, - dtype=torch.int64, - ) - # decoder_out is of shape (N, decoder_out_dim) - decoder_out = model.decoder(decoder_input, need_pad=False) - decoder_out = model.joiner.decoder_proj(decoder_out) - # logging.info(f"decoder_out shape : {decoder_out.shape}") - - for t in range(T): - # current_encoder_out's shape: (batch_size, 1, encoder_out_dim) - current_encoder_out = encoder_out[:, t : t + 1, :] # noqa - - logits = model.joiner( - current_encoder_out.unsqueeze(2), - decoder_out.unsqueeze(1), - project_input=False, - ) - # logits'shape (batch_size, vocab_size) - logits = logits.squeeze(1).squeeze(1) - - assert logits.ndim == 2, logits.shape - y = logits.argmax(dim=1).tolist() - emitted = False - for i, v in enumerate(y): - if v != blank_id: - streams[i].hyp.append(v) - emitted = True - if emitted: - # update decoder output - decoder_input = torch.tensor( - [stream.hyp[-context_size:] for stream in streams], - device=device, - dtype=torch.int64, - ) - decoder_out = model.decoder( - decoder_input, - need_pad=False, - ) - decoder_out = model.joiner.decoder_proj(decoder_out) - - hyp_tokens = [] - for stream in streams: - hyp_tokens.append(stream.hyp) - return hyp_tokens - - -def fast_beam_search( - model: nn.Module, - encoder_out: torch.Tensor, - processed_lens: torch.Tensor, - decoding_streams: k2.RnntDecodingStreams, -) -> List[List[int]]: - - B, T, C = encoder_out.shape - for t in range(T): - # shape is a RaggedShape of shape (B, context) - # contexts is a Tensor of shape (shape.NumElements(), context_size) - shape, contexts = decoding_streams.get_contexts() - # `nn.Embedding()` in torch below v1.7.1 supports only torch.int64 - contexts = contexts.to(torch.int64) - # decoder_out is of shape (shape.NumElements(), 1, decoder_out_dim) - decoder_out = model.decoder(contexts, need_pad=False) - decoder_out = model.joiner.decoder_proj(decoder_out) - # current_encoder_out is of shape - # (shape.NumElements(), 1, joiner_dim) - # fmt: off - current_encoder_out = torch.index_select( - encoder_out[:, t:t + 1, :], 0, shape.row_ids(1).to(torch.int64) - ) - # fmt: on - logits = model.joiner( - current_encoder_out.unsqueeze(2), - decoder_out.unsqueeze(1), - project_input=False, - ) - logits = logits.squeeze(1).squeeze(1) - log_probs = logits.log_softmax(dim=-1) - decoding_streams.advance(log_probs) - - decoding_streams.terminate_and_flush_to_streams() - - lattice = decoding_streams.format_output(processed_lens.tolist()) - best_path = one_best_decoding(lattice) - hyp_tokens = get_texts(best_path) - return hyp_tokens - - def decode_one_chunk( params: AttributeDict, model: nn.Module, @@ -313,7 +224,6 @@ def decode_one_chunk( feature_lens = [] states = [] - rnnt_stream_list = [] processed_lens = [] for stream in decode_streams: @@ -324,8 +234,6 @@ def decode_one_chunk( feature_lens.append(feat_len) states.append(stream.states) processed_lens.append(stream.done_frames) - if params.decoding_method == "fast_beam_search": - rnnt_stream_list.append(stream.rnnt_decoding_stream) feature_lens = torch.tensor(feature_lens, device=device) features = pad_sequence(features, batch_first=True, padding_value=LOG_EPS) @@ -337,19 +245,13 @@ def decode_one_chunk( # frames. tail_length = 7 + (2 + params.right_context) * params.subsampling_factor if features.size(1) < tail_length: - feature_lens += tail_length - features.size(1) - features = torch.cat( - [ - features, - torch.tensor( - LOG_EPS, dtype=features.dtype, device=device - ).expand( - features.size(0), - tail_length - features.size(1), - features.size(2), - ), - ], - dim=1, + pad_length = tail_length - features.size(1) + feature_lens += pad_length + features = torch.nn.functional.pad( + features, + (0, 0, 0, pad_length), + mode="constant", + value=LOG_EPS, ) states = [ @@ -370,22 +272,31 @@ def decode_one_chunk( encoder_out = model.joiner.encoder_proj(encoder_out) if params.decoding_method == "greedy_search": - hyp_tokens = greedy_search(model, encoder_out, decode_streams) - elif params.decoding_method == "fast_beam_search": - config = k2.RnntDecodingConfig( - vocab_size=params.vocab_size, - decoder_history_len=params.context_size, - beam=params.beam, - max_contexts=params.max_contexts, - max_states=params.max_states, + greedy_search( + model=model, encoder_out=encoder_out, streams=decode_streams ) - decoding_streams = k2.RnntDecodingStreams(rnnt_stream_list, config) + elif params.decoding_method == "fast_beam_search": processed_lens = processed_lens + encoder_out_lens - hyp_tokens = fast_beam_search( - model, encoder_out, processed_lens, decoding_streams + fast_beam_search_one_best( + model=model, + encoder_out=encoder_out, + processed_lens=processed_lens, + streams=decode_streams, + beam=params.beam, + max_states=params.max_states, + max_contexts=params.max_contexts, + ) + elif params.decoding_method == "modified_beam_search": + modified_beam_search( + model=model, + streams=decode_streams, + encoder_out=encoder_out, + num_active_paths=params.num_active_paths, ) else: - assert False + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) states = [torch.unbind(states[0], dim=2), torch.unbind(states[1], dim=2)] @@ -393,8 +304,6 @@ def decode_one_chunk( for i in range(len(decode_streams)): decode_streams[i].states = [states[0][i], states[1][i]] decode_streams[i].done_frames += encoder_out_lens[i] - if params.decoding_method == "fast_beam_search": - decode_streams[i].hyp = hyp_tokens[i] if decode_streams[i].done: finished_streams.append(i) @@ -478,13 +387,10 @@ def decode_dataset( params=params, model=model, decode_streams=decode_streams ) for i in sorted(finished_streams, reverse=True): - hyp = decode_streams[i].hyp - if params.decoding_method == "greedy_search": - hyp = hyp[params.context_size :] # noqa decode_results.append( ( decode_streams[i].ground_truth.split(), - sp.decode(hyp).split(), + sp.decode(decode_streams[i].decoding_result()).split(), ) ) del decode_streams[i] @@ -498,24 +404,28 @@ def decode_dataset( params=params, model=model, decode_streams=decode_streams ) for i in sorted(finished_streams, reverse=True): - hyp = decode_streams[i].hyp - if params.decoding_method == "greedy_search": - hyp = hyp[params.context_size :] # noqa decode_results.append( ( decode_streams[i].ground_truth.split(), - sp.decode(hyp).split(), + sp.decode(decode_streams[i].decoding_result()).split(), ) ) del decode_streams[i] - key = "greedy_search" - if params.decoding_method == "fast_beam_search": + if params.decoding_method == "greedy_search": + key = "greedy_search" + elif params.decoding_method == "fast_beam_search": key = ( f"beam_{params.beam}_" f"max_contexts_{params.max_contexts}_" f"max_states_{params.max_states}" ) + elif params.decoding_method == "modified_beam_search": + key = f"num_active_paths_{params.num_active_paths}" + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) return {key: decode_results} diff --git a/egs/librispeech/ASR/pruned_transducer_stateless4/streaming_beam_search.py b/egs/librispeech/ASR/pruned_transducer_stateless4/streaming_beam_search.py new file mode 120000 index 000000000..3a5f89833 --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless4/streaming_beam_search.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/streaming_beam_search.py \ No newline at end of file diff --git a/egs/librispeech/ASR/pruned_transducer_stateless4/streaming_decode.py b/egs/librispeech/ASR/pruned_transducer_stateless4/streaming_decode.py index 57fd06980..de89d41c2 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless4/streaming_decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless4/streaming_decode.py @@ -17,13 +17,13 @@ """ Usage: -./pruned_transducer_stateless2/streaming_decode.py \ +./pruned_transducer_stateless4/streaming_decode.py \ --epoch 28 \ --avg 15 \ --left-context 32 \ --decode-chunk-size 8 \ --right-context 0 \ - --exp-dir ./pruned_transducer_stateless2/exp \ + --exp-dir ./pruned_transducer_stateless4/exp \ --decoding_method greedy_search \ --num-decode-streams 200 """ @@ -43,6 +43,11 @@ from asr_datamodule import LibriSpeechAsrDataModule from decode_stream import DecodeStream from kaldifeat import Fbank, FbankOptions from lhotse import CutSet +from streaming_beam_search import ( + fast_beam_search_one_best, + greedy_search, + modified_beam_search, +) from torch.nn.utils.rnn import pad_sequence from train import add_model_arguments, get_params, get_transducer_model @@ -52,10 +57,8 @@ from icefall.checkpoint import ( find_checkpoints, load_checkpoint, ) -from icefall.decode import one_best_decoding from icefall.utils import ( AttributeDict, - get_texts, setup_logger, store_transcripts, str2bool, @@ -127,10 +130,21 @@ def get_parser(): "--decoding-method", type=str, default="greedy_search", - help="""Support only greedy_search and fast_beam_search now. + help="""Supported decoding methods are: + greedy_search + modified_beam_search + fast_beam_search """, ) + parser.add_argument( + "--num_active_paths", + type=int, + default=4, + help="""An interger indicating how many candidates we will keep for each + frame. Used only when --decoding-method is modified_beam_search.""", + ) + parser.add_argument( "--beam", type=float, @@ -198,109 +212,6 @@ def get_parser(): return parser -def greedy_search( - model: nn.Module, - encoder_out: torch.Tensor, - streams: List[DecodeStream], -) -> List[List[int]]: - - assert len(streams) == encoder_out.size(0) - assert encoder_out.ndim == 3 - - blank_id = model.decoder.blank_id - context_size = model.decoder.context_size - device = model.device - T = encoder_out.size(1) - - decoder_input = torch.tensor( - [stream.hyp[-context_size:] for stream in streams], - device=device, - dtype=torch.int64, - ) - # decoder_out is of shape (N, decoder_out_dim) - decoder_out = model.decoder(decoder_input, need_pad=False) - decoder_out = model.joiner.decoder_proj(decoder_out) - # logging.info(f"decoder_out shape : {decoder_out.shape}") - - for t in range(T): - # current_encoder_out's shape: (batch_size, 1, encoder_out_dim) - current_encoder_out = encoder_out[:, t : t + 1, :] # noqa - - logits = model.joiner( - current_encoder_out.unsqueeze(2), - decoder_out.unsqueeze(1), - project_input=False, - ) - # logits'shape (batch_size, vocab_size) - logits = logits.squeeze(1).squeeze(1) - - assert logits.ndim == 2, logits.shape - y = logits.argmax(dim=1).tolist() - emitted = False - for i, v in enumerate(y): - if v != blank_id: - streams[i].hyp.append(v) - emitted = True - if emitted: - # update decoder output - decoder_input = torch.tensor( - [stream.hyp[-context_size:] for stream in streams], - device=device, - dtype=torch.int64, - ) - decoder_out = model.decoder( - decoder_input, - need_pad=False, - ) - decoder_out = model.joiner.decoder_proj(decoder_out) - - hyp_tokens = [] - for stream in streams: - hyp_tokens.append(stream.hyp) - return hyp_tokens - - -def fast_beam_search( - model: nn.Module, - encoder_out: torch.Tensor, - processed_lens: torch.Tensor, - decoding_streams: k2.RnntDecodingStreams, -) -> List[List[int]]: - - B, T, C = encoder_out.shape - for t in range(T): - # shape is a RaggedShape of shape (B, context) - # contexts is a Tensor of shape (shape.NumElements(), context_size) - shape, contexts = decoding_streams.get_contexts() - # `nn.Embedding()` in torch below v1.7.1 supports only torch.int64 - contexts = contexts.to(torch.int64) - # decoder_out is of shape (shape.NumElements(), 1, decoder_out_dim) - decoder_out = model.decoder(contexts, need_pad=False) - decoder_out = model.joiner.decoder_proj(decoder_out) - # current_encoder_out is of shape - # (shape.NumElements(), 1, joiner_dim) - # fmt: off - current_encoder_out = torch.index_select( - encoder_out[:, t:t + 1, :], 0, shape.row_ids(1).to(torch.int64) - ) - # fmt: on - logits = model.joiner( - current_encoder_out.unsqueeze(2), - decoder_out.unsqueeze(1), - project_input=False, - ) - logits = logits.squeeze(1).squeeze(1) - log_probs = logits.log_softmax(dim=-1) - decoding_streams.advance(log_probs) - - decoding_streams.terminate_and_flush_to_streams() - - lattice = decoding_streams.format_output(processed_lens.tolist()) - best_path = one_best_decoding(lattice) - hyp_tokens = get_texts(best_path) - return hyp_tokens - - def decode_one_chunk( params: AttributeDict, model: nn.Module, @@ -325,7 +236,6 @@ def decode_one_chunk( feature_lens = [] states = [] - rnnt_stream_list = [] processed_lens = [] for stream in decode_streams: @@ -336,8 +246,6 @@ def decode_one_chunk( feature_lens.append(feat_len) states.append(stream.states) processed_lens.append(stream.done_frames) - if params.decoding_method == "fast_beam_search": - rnnt_stream_list.append(stream.rnnt_decoding_stream) feature_lens = torch.tensor(feature_lens, device=device) features = pad_sequence(features, batch_first=True, padding_value=LOG_EPS) @@ -349,19 +257,13 @@ def decode_one_chunk( # frames. tail_length = 7 + (2 + params.right_context) * params.subsampling_factor if features.size(1) < tail_length: - feature_lens += tail_length - features.size(1) - features = torch.cat( - [ - features, - torch.tensor( - LOG_EPS, dtype=features.dtype, device=device - ).expand( - features.size(0), - tail_length - features.size(1), - features.size(2), - ), - ], - dim=1, + pad_length = tail_length - features.size(1) + feature_lens += pad_length + features = torch.nn.functional.pad( + features, + (0, 0, 0, pad_length), + mode="constant", + value=LOG_EPS, ) states = [ @@ -382,22 +284,31 @@ def decode_one_chunk( encoder_out = model.joiner.encoder_proj(encoder_out) if params.decoding_method == "greedy_search": - hyp_tokens = greedy_search(model, encoder_out, decode_streams) - elif params.decoding_method == "fast_beam_search": - config = k2.RnntDecodingConfig( - vocab_size=params.vocab_size, - decoder_history_len=params.context_size, - beam=params.beam, - max_contexts=params.max_contexts, - max_states=params.max_states, + greedy_search( + model=model, encoder_out=encoder_out, streams=decode_streams ) - decoding_streams = k2.RnntDecodingStreams(rnnt_stream_list, config) + elif params.decoding_method == "fast_beam_search": processed_lens = processed_lens + encoder_out_lens - hyp_tokens = fast_beam_search( - model, encoder_out, processed_lens, decoding_streams + fast_beam_search_one_best( + model=model, + encoder_out=encoder_out, + processed_lens=processed_lens, + streams=decode_streams, + beam=params.beam, + max_states=params.max_states, + max_contexts=params.max_contexts, + ) + elif params.decoding_method == "modified_beam_search": + modified_beam_search( + model=model, + streams=decode_streams, + encoder_out=encoder_out, + num_active_paths=params.num_active_paths, ) else: - assert False + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) states = [torch.unbind(states[0], dim=2), torch.unbind(states[1], dim=2)] @@ -405,8 +316,6 @@ def decode_one_chunk( for i in range(len(decode_streams)): decode_streams[i].states = [states[0][i], states[1][i]] decode_streams[i].done_frames += encoder_out_lens[i] - if params.decoding_method == "fast_beam_search": - decode_streams[i].hyp = hyp_tokens[i] if decode_streams[i].done: finished_streams.append(i) @@ -490,13 +399,10 @@ def decode_dataset( params=params, model=model, decode_streams=decode_streams ) for i in sorted(finished_streams, reverse=True): - hyp = decode_streams[i].hyp - if params.decoding_method == "greedy_search": - hyp = hyp[params.context_size :] # noqa decode_results.append( ( decode_streams[i].ground_truth.split(), - sp.decode(hyp).split(), + sp.decode(decode_streams[i].decoding_result()).split(), ) ) del decode_streams[i] @@ -510,24 +416,28 @@ def decode_dataset( params=params, model=model, decode_streams=decode_streams ) for i in sorted(finished_streams, reverse=True): - hyp = decode_streams[i].hyp - if params.decoding_method == "greedy_search": - hyp = hyp[params.context_size :] # noqa decode_results.append( ( decode_streams[i].ground_truth.split(), - sp.decode(hyp).split(), + sp.decode(decode_streams[i].decoding_result()).split(), ) ) del decode_streams[i] - key = "greedy_search" - if params.decoding_method == "fast_beam_search": + if params.decoding_method == "greedy_search": + key = "greedy_search" + elif params.decoding_method == "fast_beam_search": key = ( f"beam_{params.beam}_" f"max_contexts_{params.max_contexts}_" f"max_states_{params.max_states}" ) + elif params.decoding_method == "modified_beam_search": + key = f"num_active_paths_{params.num_active_paths}" + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) return {key: decode_results} From 4612b0394711cd0d34eef50a3591b726e4af5b9b Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Tue, 26 Jul 2022 10:37:02 +0800 Subject: [PATCH 13/38] Fix using G before assignment in pruned_transducer_stateless/decode.py (#494) --- egs/librispeech/ASR/pruned_transducer_stateless3/decode.py | 1 + 1 file changed, 1 insertion(+) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/decode.py b/egs/librispeech/ASR/pruned_transducer_stateless3/decode.py index c3a03f2e1..d2605c072 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/decode.py @@ -999,6 +999,7 @@ def main(): model.device = device model.unk_id = params.unk_id + G = None if "fast_beam_search" in params.decoding_method: if params.decoding_method == "fast_beam_search_nbest_LG": lexicon = Lexicon(params.lang_dir) From d3fc4b031e1d8bcc4bba244813fac9af4473841d Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Tue, 26 Jul 2022 11:25:01 +0800 Subject: [PATCH 14/38] Support using aidatatang_200zh optionally in aishell training (#495) * Use aidatatang_200zh optionally in aishell training. --- .../ASR/pruned_transducer_stateless3/train.py | 105 ++++++++++++------ 1 file changed, 70 insertions(+), 35 deletions(-) diff --git a/egs/aishell/ASR/pruned_transducer_stateless3/train.py b/egs/aishell/ASR/pruned_transducer_stateless3/train.py index 02efe94fe..0e5291b21 100755 --- a/egs/aishell/ASR/pruned_transducer_stateless3/train.py +++ b/egs/aishell/ASR/pruned_transducer_stateless3/train.py @@ -62,6 +62,7 @@ import optim import torch import torch.multiprocessing as mp import torch.nn as nn + from aidatatang_200zh import AIDatatang200zh from aishell import AIShell from asr_datamodule import AsrDataModule @@ -344,8 +345,11 @@ def get_parser(): "--datatang-prob", type=float, default=0.2, - help="The probability to select a batch from the " - "aidatatang_200zh dataset", + help="""The probability to select a batch from the + aidatatang_200zh dataset. + If it is set to 0, you don't need to download the data + for aidatatang_200zh. + """, ) add_model_arguments(parser) @@ -457,8 +461,12 @@ def get_transducer_model(params: AttributeDict) -> nn.Module: decoder = get_decoder_model(params) joiner = get_joiner_model(params) - decoder_datatang = get_decoder_model(params) - joiner_datatang = get_joiner_model(params) + if params.datatang_prob > 0: + decoder_datatang = get_decoder_model(params) + joiner_datatang = get_joiner_model(params) + else: + decoder_datatang = None + joiner_datatang = None model = Transducer( encoder=encoder, @@ -726,7 +734,7 @@ def train_one_epoch( scheduler: LRSchedulerType, graph_compiler: CharCtcTrainingGraphCompiler, train_dl: torch.utils.data.DataLoader, - datatang_train_dl: torch.utils.data.DataLoader, + datatang_train_dl: Optional[torch.utils.data.DataLoader], valid_dl: torch.utils.data.DataLoader, rng: random.Random, scaler: GradScaler, @@ -778,13 +786,17 @@ def train_one_epoch( dl_weights = [1 - params.datatang_prob, params.datatang_prob] iter_aishell = iter(train_dl) - iter_datatang = iter(datatang_train_dl) + if datatang_train_dl is not None: + iter_datatang = iter(datatang_train_dl) batch_idx = 0 while True: - idx = rng.choices((0, 1), weights=dl_weights, k=1)[0] - dl = iter_aishell if idx == 0 else iter_datatang + if datatang_train_dl is not None: + idx = rng.choices((0, 1), weights=dl_weights, k=1)[0] + dl = iter_aishell if idx == 0 else iter_datatang + else: + dl = iter_aishell try: batch = next(dl) @@ -808,7 +820,11 @@ def train_one_epoch( warmup=(params.batch_idx_train / params.model_warm_step), ) # summary stats - tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + if datatang_train_dl is not None: + tot_loss = ( + tot_loss * (1 - 1 / params.reset_interval) + ) + loss_info + if aishell: aishell_tot_loss = ( aishell_tot_loss * (1 - 1 / params.reset_interval) @@ -871,12 +887,21 @@ def train_one_epoch( if batch_idx % params.log_interval == 0: cur_lr = scheduler.get_last_lr()[0] + if datatang_train_dl is not None: + datatang_str = f"datatang_tot_loss[{datatang_tot_loss}], " + tot_loss_str = ( + f"tot_loss[{tot_loss}], batch size: {batch_size}, " + ) + else: + tot_loss_str = "" + datatang_str = "" + logging.info( f"Epoch {params.cur_epoch}, " f"batch {batch_idx}, {prefix}_loss[{loss_info}], " - f"tot_loss[{tot_loss}], batch size: {batch_size}, " + f"{tot_loss_str}" f"aishell_tot_loss[{aishell_tot_loss}], " - f"datatang_tot_loss[{datatang_tot_loss}], " + f"{datatang_str}" f"batch size: {batch_size}, " f"lr: {cur_lr:.2e}" ) @@ -891,15 +916,18 @@ def train_one_epoch( f"train/current_{prefix}_", params.batch_idx_train, ) - tot_loss.write_summary( - tb_writer, "train/tot_", params.batch_idx_train - ) + if datatang_train_dl is not None: + # If it is None, tot_loss is the same as aishell_tot_loss. + tot_loss.write_summary( + tb_writer, "train/tot_", params.batch_idx_train + ) aishell_tot_loss.write_summary( tb_writer, "train/aishell_tot_", params.batch_idx_train ) - datatang_tot_loss.write_summary( - tb_writer, "train/datatang_tot_", params.batch_idx_train - ) + if datatang_train_dl is not None: + datatang_tot_loss.write_summary( + tb_writer, "train/datatang_tot_", params.batch_idx_train + ) if batch_idx > 0 and batch_idx % params.valid_interval == 0: logging.info("Computing validation loss") @@ -1032,11 +1060,6 @@ def run(rank, world_size, args): train_cuts = aishell.train_cuts() train_cuts = filter_short_and_long_utterances(train_cuts) - datatang = AIDatatang200zh(manifest_dir=args.manifest_dir) - train_datatang_cuts = datatang.train_cuts() - train_datatang_cuts = filter_short_and_long_utterances(train_datatang_cuts) - train_datatang_cuts = train_datatang_cuts.repeat(times=None) - if args.enable_musan: cuts_musan = load_manifest( Path(args.manifest_dir) / "musan_cuts.jsonl.gz" @@ -1052,11 +1075,21 @@ def run(rank, world_size, args): cuts_musan=cuts_musan, ) - datatang_train_dl = asr_datamodule.train_dataloaders( - train_datatang_cuts, - on_the_fly_feats=False, - cuts_musan=cuts_musan, - ) + if params.datatang_prob > 0: + datatang = AIDatatang200zh(manifest_dir=args.manifest_dir) + train_datatang_cuts = datatang.train_cuts() + train_datatang_cuts = filter_short_and_long_utterances( + train_datatang_cuts + ) + train_datatang_cuts = train_datatang_cuts.repeat(times=None) + datatang_train_dl = asr_datamodule.train_dataloaders( + train_datatang_cuts, + on_the_fly_feats=False, + cuts_musan=cuts_musan, + ) + else: + datatang_train_dl = None + logging.info("Not using aidatatang_200zh for training") valid_cuts = aishell.valid_cuts() valid_dl = asr_datamodule.valid_dataloaders(valid_cuts) @@ -1065,13 +1098,14 @@ def run(rank, world_size, args): train_dl, # datatang_train_dl ]: - scan_pessimistic_batches_for_oom( - model=model, - train_dl=dl, - optimizer=optimizer, - graph_compiler=graph_compiler, - params=params, - ) + if dl is not None: + scan_pessimistic_batches_for_oom( + model=model, + train_dl=dl, + optimizer=optimizer, + graph_compiler=graph_compiler, + params=params, + ) scaler = GradScaler(enabled=params.use_fp16) if checkpoints and "grad_scaler" in checkpoints: @@ -1083,7 +1117,8 @@ def run(rank, world_size, args): scheduler.step_epoch(epoch - 1) fix_random_seed(params.seed + epoch - 1) train_dl.sampler.set_epoch(epoch - 1) - datatang_train_dl.sampler.set_epoch(epoch) + if datatang_train_dl is not None: + datatang_train_dl.sampler.set_epoch(epoch) if tb_writer is not None: tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) From 385645d5333058d728bed5f8845d598d5f34dae0 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Tue, 26 Jul 2022 15:42:21 +0800 Subject: [PATCH 15/38] Fix get_transducer_model() for aishell. (#497) PR #495 introduces an error. This commit fixes it. --- egs/aishell/ASR/pruned_transducer_stateless3/decode.py | 1 + egs/aishell/ASR/pruned_transducer_stateless3/export.py | 1 + egs/aishell/ASR/pruned_transducer_stateless3/pretrained.py | 1 + 3 files changed, 3 insertions(+) diff --git a/egs/aishell/ASR/pruned_transducer_stateless3/decode.py b/egs/aishell/ASR/pruned_transducer_stateless3/decode.py index f686174f3..6aea306c8 100755 --- a/egs/aishell/ASR/pruned_transducer_stateless3/decode.py +++ b/egs/aishell/ASR/pruned_transducer_stateless3/decode.py @@ -464,6 +464,7 @@ def main(): params = get_params() params.update(vars(args)) + params.datatang_prob = 0 assert params.decoding_method in ( "greedy_search", diff --git a/egs/aishell/ASR/pruned_transducer_stateless3/export.py b/egs/aishell/ASR/pruned_transducer_stateless3/export.py index 307895a76..566902a85 100755 --- a/egs/aishell/ASR/pruned_transducer_stateless3/export.py +++ b/egs/aishell/ASR/pruned_transducer_stateless3/export.py @@ -157,6 +157,7 @@ def main(): params.blank_id = 0 params.vocab_size = max(lexicon.tokens) + 1 + params.datatang_prob = 0 logging.info(params) diff --git a/egs/aishell/ASR/pruned_transducer_stateless3/pretrained.py b/egs/aishell/ASR/pruned_transducer_stateless3/pretrained.py index 5cda411bc..04a0a882a 100755 --- a/egs/aishell/ASR/pruned_transducer_stateless3/pretrained.py +++ b/egs/aishell/ASR/pruned_transducer_stateless3/pretrained.py @@ -223,6 +223,7 @@ def main(): params.blank_id = 0 params.vocab_size = max(lexicon.tokens) + 1 + params.datatang_prob = 0 logging.info(params) From f26b62ac006c6486a01c4af1542b7ef5eed61730 Mon Sep 17 00:00:00 2001 From: Mingshuang Luo <37799481+luomingshuang@users.noreply.github.com> Date: Thu, 28 Jul 2022 12:54:27 +0800 Subject: [PATCH 16/38] [WIP] Pruned-transducer-stateless5-for-WenetSpeech (offline and streaming) (#447) * pruned-rnnt5-for-wenetspeech * style check * style check * add streaming conformer * add streaming decode * changes codes for fast_beam_search and export cpu jit * add modified-beam-search for streaming decoding * add modified-beam-search for streaming decoding * change for streaming_beam_search.py * add README.md and RESULTS.md * change for style_check.yml * do some changes * do some changes for export.py * add some decode commands for usage * add streaming results on README.md --- .github/workflows/style_check.yml | 2 +- README.md | 15 +- egs/wenetspeech/ASR/README.md | 1 + egs/wenetspeech/ASR/RESULTS.md | 78 +- .../ASR/pruned_transducer_stateless2/train.py | 2 - .../pruned_transducer_stateless5/__init__.py | 0 .../asr_datamodule.py | 1 + .../beam_search.py | 1 + .../pruned_transducer_stateless5/conformer.py | 1555 +++++++++++++++++ .../pruned_transducer_stateless5/decode.py | 778 +++++++++ .../decode_stream.py | 147 ++ .../pruned_transducer_stateless5/decoder.py | 1 + .../encoder_interface.py | 1 + .../pruned_transducer_stateless5/export.py | 209 +++ .../pruned_transducer_stateless5/joiner.py | 1 + .../ASR/pruned_transducer_stateless5/model.py | 1 + .../ASR/pruned_transducer_stateless5/optim.py | 1 + .../pretrained.py | 343 ++++ .../pruned_transducer_stateless5/scaling.py | 1 + .../streaming_beam_search.py | 287 +++ .../streaming_decode.py | 678 +++++++ .../ASR/pruned_transducer_stateless5/train.py | 1217 +++++++++++++ 22 files changed, 5311 insertions(+), 9 deletions(-) create mode 100644 egs/wenetspeech/ASR/pruned_transducer_stateless5/__init__.py create mode 120000 egs/wenetspeech/ASR/pruned_transducer_stateless5/asr_datamodule.py create mode 120000 egs/wenetspeech/ASR/pruned_transducer_stateless5/beam_search.py create mode 100644 egs/wenetspeech/ASR/pruned_transducer_stateless5/conformer.py create mode 100755 egs/wenetspeech/ASR/pruned_transducer_stateless5/decode.py create mode 100644 egs/wenetspeech/ASR/pruned_transducer_stateless5/decode_stream.py create mode 120000 egs/wenetspeech/ASR/pruned_transducer_stateless5/decoder.py create mode 120000 egs/wenetspeech/ASR/pruned_transducer_stateless5/encoder_interface.py create mode 100644 egs/wenetspeech/ASR/pruned_transducer_stateless5/export.py create mode 120000 egs/wenetspeech/ASR/pruned_transducer_stateless5/joiner.py create mode 120000 egs/wenetspeech/ASR/pruned_transducer_stateless5/model.py create mode 120000 egs/wenetspeech/ASR/pruned_transducer_stateless5/optim.py create mode 100644 egs/wenetspeech/ASR/pruned_transducer_stateless5/pretrained.py create mode 120000 egs/wenetspeech/ASR/pruned_transducer_stateless5/scaling.py create mode 100644 egs/wenetspeech/ASR/pruned_transducer_stateless5/streaming_beam_search.py create mode 100644 egs/wenetspeech/ASR/pruned_transducer_stateless5/streaming_decode.py create mode 100755 egs/wenetspeech/ASR/pruned_transducer_stateless5/train.py diff --git a/.github/workflows/style_check.yml b/.github/workflows/style_check.yml index 6b3d856df..239a0280c 100644 --- a/.github/workflows/style_check.yml +++ b/.github/workflows/style_check.yml @@ -29,7 +29,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-18.04, macos-10.15] + os: [ubuntu-18.04, macos-latest] python-version: [3.7, 3.9] fail-fast: false diff --git a/README.md b/README.md index 27398e712..fcba0723b 100644 --- a/README.md +++ b/README.md @@ -250,9 +250,9 @@ We provide a Colab notebook to run a pre-trained Pruned Transducer Stateless mod ### WenetSpeech -We provide one model for this recipe: [Pruned stateless RNN-T: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss][WenetSpeech_pruned_transducer_stateless2]. +We provide some models for this recipe: [Pruned stateless RNN-T_2: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss][WenetSpeech_pruned_transducer_stateless2] and [Pruned stateless RNN-T_5: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss][WenetSpeech_pruned_transducer_stateless5]. -#### Pruned stateless RNN-T: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss (trained with L subset) +#### Pruned stateless RNN-T_2: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss (trained with L subset, offline ASR) | | Dev | Test-Net | Test-Meeting | |----------------------|-------|----------|--------------| @@ -260,7 +260,15 @@ We provide one model for this recipe: [Pruned stateless RNN-T: Conformer encoder | fast beam search | 7.94 | 8.74 | 13.80 | | modified beam search | 7.76 | 8.71 | 13.41 | -We provide a Colab notebook to run a pre-trained Pruned Transducer Stateless model: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1EV4e1CHa1GZgEF-bZgizqI9RyFFehIiN?usp=sharing) +#### Pruned stateless RNN-T_5: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss (trained with L subset) +**Streaming**: +| | Dev | Test-Net | Test-Meeting | +|----------------------|-------|----------|--------------| +| greedy_search | 8.78 | 10.12 | 16.16 | +| modified_beam_search | 8.53| 9.95 | 15.81 | +| fast_beam_search| 9.01 | 10.47 | 16.28 | + +We provide a Colab notebook to run a pre-trained Pruned Transducer Stateless2 model: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1EV4e1CHa1GZgEF-bZgizqI9RyFFehIiN?usp=sharing) ### Alimeeting @@ -333,6 +341,7 @@ Please see: [![Open In Colab](https://colab.research.google.com/assets/colab-bad [GigaSpeech_pruned_transducer_stateless2]: egs/gigaspeech/ASR/pruned_transducer_stateless2 [Aidatatang_200zh_pruned_transducer_stateless2]: egs/aidatatang_200zh/ASR/pruned_transducer_stateless2 [WenetSpeech_pruned_transducer_stateless2]: egs/wenetspeech/ASR/pruned_transducer_stateless2 +[WenetSpeech_pruned_transducer_stateless5]: egs/wenetspeech/ASR/pruned_transducer_stateless5 [Alimeeting_pruned_transducer_stateless2]: egs/alimeeting/ASR/pruned_transducer_stateless2 [Aishell4_pruned_transducer_stateless5]: egs/aishell4/ASR/pruned_transducer_stateless5 [TAL_CSASR_pruned_transducer_stateless5]: egs/tal_csasr/ASR/pruned_transducer_stateless5 diff --git a/egs/wenetspeech/ASR/README.md b/egs/wenetspeech/ASR/README.md index c92f1b4e6..44e631b4a 100644 --- a/egs/wenetspeech/ASR/README.md +++ b/egs/wenetspeech/ASR/README.md @@ -13,6 +13,7 @@ The following table lists the differences among them. | | Encoder | Decoder | Comment | |---------------------------------------|---------------------|--------------------|-----------------------------| | `pruned_transducer_stateless2` | Conformer(modified) | Embedding + Conv1d | Using k2 pruned RNN-T loss | | +| `pruned_transducer_stateless5` | Conformer(modified) | Embedding + Conv1d | Using k2 pruned RNN-T loss | | The decoder in `transducer_stateless` is modified from the paper [Rnn-Transducer with Stateless Prediction Network](https://ieeexplore.ieee.org/document/9054419/). diff --git a/egs/wenetspeech/ASR/RESULTS.md b/egs/wenetspeech/ASR/RESULTS.md index ea6658ddb..cc36ae4f2 100644 --- a/egs/wenetspeech/ASR/RESULTS.md +++ b/egs/wenetspeech/ASR/RESULTS.md @@ -1,12 +1,84 @@ ## Results +### WenetSpeech char-based training results (offline and streaming) (Pruned Transducer 5) + +#### 2022-07-22 + +Using the codes from this PR https://github.com/k2-fsa/icefall/pull/447. + +When training with the L subset, the CERs are + +**Offline**: +|decoding-method| epoch | avg | use-averaged-model | DEV | TEST-NET | TEST-MEETING| +|-- | -- | -- | -- | -- | -- | --| +|greedy_search | 4 | 1 | True | 8.22 | 9.03 | 14.54| +|modified_beam_search | 4 | 1 | True | **8.17** | **9.04** | **14.44**| +|fast_beam_search | 4 | 1 | True | 8.29 | 9.00 | 14.93| + +The offline training command for reproducing is given below: +``` +export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7" + +./pruned_transducer_stateless5/train.py \ + --lang-dir data/lang_char \ + --exp-dir pruned_transducer_stateless5/exp_L_offline \ + --world-size 8 \ + --num-epochs 15 \ + --start-epoch 2 \ + --max-duration 120 \ + --valid-interval 3000 \ + --model-warm-step 3000 \ + --save-every-n 8000 \ + --average-period 1000 \ + --training-subset L +``` + +The tensorboard training log can be found at https://tensorboard.dev/experiment/SvnN2jfyTB2Hjqu22Z7ZoQ/#scalars . + + +A pre-trained offline model and decoding logs can be found at + +**Streaming**: +|decoding-method| epoch | avg | use-averaged-model | DEV | TEST-NET | TEST-MEETING| +|--|--|--|--|--|--|--| +| greedy_search | 7| 1| True | 8.78 | 10.12 | 16.16 | +| modified_beam_search | 7| 1| True| **8.53**| **9.95** | **15.81** | +| fast_beam_search | 7 | 1| True | 9.01 | 10.47 | 16.28 | + +The streaming training command for reproducing is given below: +``` +export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7" + +./pruned_transducer_stateless5/train.py \ + --lang-dir data/lang_char \ + --exp-dir pruned_transducer_stateless5/exp_L_streaming \ + --world-size 8 \ + --num-epochs 15 \ + --start-epoch 1 \ + --max-duration 140 \ + --valid-interval 3000 \ + --model-warm-step 3000 \ + --save-every-n 8000 \ + --average-period 1000 \ + --training-subset L \ + --dynamic-chunk-training True \ + --causal-convolution True \ + --short-chunk-size 25 \ + --num-left-chunks 4 +``` + +The tensorboard training log can be found at https://tensorboard.dev/experiment/E2NXPVflSOKWepzJ1a1uDQ/#scalars . + + +A pre-trained offline model and decoding logs can be found at + ### WenetSpeech char-based training results (Pruned Transducer 2) #### 2022-05-19 Using the codes from this PR https://github.com/k2-fsa/icefall/pull/349. -When training with the L subset, the WERs are +When training with the L subset, the CERs are | | dev | test-net | test-meeting | comment | |------------------------------------|-------|----------|--------------|------------------------------------------| @@ -72,7 +144,7 @@ avg=2 --max-states 8 ``` -When training with the M subset, the WERs are +When training with the M subset, the CERs are | | dev | test-net | test-meeting | comment | |------------------------------------|--------|-----------|---------------|-------------------------------------------| @@ -81,7 +153,7 @@ When training with the M subset, the WERs are | fast beam search (set as default) | 10.18 | 11.10 | 19.32 | --epoch 29, --avg 11, --max-duration 1500 | -When training with the S subset, the WERs are +When training with the S subset, the CERs are | | dev | test-net | test-meeting | comment | |------------------------------------|--------|-----------|---------------|-------------------------------------------| diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless2/train.py b/egs/wenetspeech/ASR/pruned_transducer_stateless2/train.py index 4db874c8d..faf25eda1 100644 --- a/egs/wenetspeech/ASR/pruned_transducer_stateless2/train.py +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless2/train.py @@ -348,7 +348,6 @@ def get_params() -> AttributeDict: epochs. - log_interval: Print training loss if batch_idx % log_interval` is 0 - reset_interval: Reset statistics if batch_idx % reset_interval is 0 - - valid_interval: Run validation if batch_idx % valid_interval is 0 - feature_dim: The model input dim. It has to match the one used in computing features. - subsampling_factor: The subsampling factor for the model. @@ -376,7 +375,6 @@ def get_params() -> AttributeDict: "decoder_dim": 512, # parameters for joiner "joiner_dim": 512, - # parameters for Noam "env_info": get_env_info(), } ) diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/__init__.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/asr_datamodule.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/asr_datamodule.py new file mode 120000 index 000000000..a074d6085 --- /dev/null +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/asr_datamodule.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/asr_datamodule.py \ No newline at end of file diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/beam_search.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/beam_search.py new file mode 120000 index 000000000..02d01b343 --- /dev/null +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/beam_search.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless5/beam_search.py \ No newline at end of file diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/conformer.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/conformer.py new file mode 100644 index 000000000..78baa2b78 --- /dev/null +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/conformer.py @@ -0,0 +1,1555 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021 University of Chinese Academy of Sciences (author: Han Zhu) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import math +import warnings +from typing import List, Optional, Tuple + +import torch +from encoder_interface import EncoderInterface +from scaling import ( + ActivationBalancer, + BasicNorm, + DoubleSwish, + ScaledConv1d, + ScaledConv2d, + ScaledLinear, +) +from torch import Tensor, nn + +from icefall.utils import make_pad_mask, subsequent_chunk_mask + + +class Conformer(EncoderInterface): + """ + Args: + num_features (int): Number of input features + subsampling_factor (int): subsampling factor of encoder (the convolution layers before transformers) + d_model (int): attention dimension, also the output dimension + nhead (int): number of head + dim_feedforward (int): feedforward dimention + num_encoder_layers (int): number of encoder layers + dropout (float): dropout rate + layer_dropout (float): layer-dropout rate. + cnn_module_kernel (int): Kernel size of convolution module + vgg_frontend (bool): whether to use vgg frontend. + dynamic_chunk_training (bool): whether to use dynamic chunk training, if + you want to train a streaming model, this is expected to be True. + When setting True, it will use a masking strategy to make the attention + see only limited left and right context. + short_chunk_threshold (float): a threshold to determinize the chunk size + to be used in masking training, if the randomly generated chunk size + is greater than ``max_len * short_chunk_threshold`` (max_len is the + max sequence length of current batch) then it will use + full context in training (i.e. with chunk size equals to max_len). + This will be used only when dynamic_chunk_training is True. + short_chunk_size (int): see docs above, if the randomly generated chunk + size equals to or less than ``max_len * short_chunk_threshold``, the + chunk size will be sampled uniformly from 1 to short_chunk_size. + This also will be used only when dynamic_chunk_training is True. + num_left_chunks (int): the left context (in chunks) attention can see, the + chunk size is decided by short_chunk_threshold and short_chunk_size. + A minus value means seeing full left context. + This also will be used only when dynamic_chunk_training is True. + causal (bool): Whether to use causal convolution in conformer encoder + layer. This MUST be True when using dynamic_chunk_training. + """ + + def __init__( + self, + num_features: int, + subsampling_factor: int = 4, + d_model: int = 256, + nhead: int = 4, + dim_feedforward: int = 2048, + num_encoder_layers: int = 12, + dropout: float = 0.1, + layer_dropout: float = 0.075, + cnn_module_kernel: int = 31, + dynamic_chunk_training: bool = False, + short_chunk_threshold: float = 0.75, + short_chunk_size: int = 25, + num_left_chunks: int = -1, + causal: bool = False, + ) -> None: + super(Conformer, self).__init__() + + self.num_features = num_features + self.subsampling_factor = subsampling_factor + if subsampling_factor != 4: + raise NotImplementedError("Support only 'subsampling_factor=4'.") + + # self.encoder_embed converts the input of shape (N, T, num_features) + # to the shape (N, T//subsampling_factor, d_model). + # That is, it does two things simultaneously: + # (1) subsampling: T -> T//subsampling_factor + # (2) embedding: num_features -> d_model + self.encoder_embed = Conv2dSubsampling(num_features, d_model) + + self.encoder_layers = num_encoder_layers + self.d_model = d_model + self.cnn_module_kernel = cnn_module_kernel + self.causal = causal + self.dynamic_chunk_training = dynamic_chunk_training + self.short_chunk_threshold = short_chunk_threshold + self.short_chunk_size = short_chunk_size + self.num_left_chunks = num_left_chunks + + self.encoder_pos = RelPositionalEncoding(d_model, dropout) + + encoder_layer = ConformerEncoderLayer( + d_model, + nhead, + dim_feedforward, + dropout, + layer_dropout, + cnn_module_kernel, + causal, + ) + self.encoder = ConformerEncoder(encoder_layer, num_encoder_layers) + self._init_state: List[torch.Tensor] = [torch.empty(0)] + + def forward( + self, x: torch.Tensor, x_lens: torch.Tensor, warmup: float = 1.0 + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Args: + x: + The input tensor. Its shape is (batch_size, seq_len, feature_dim). + x_lens: + A tensor of shape (batch_size,) containing the number of frames in + `x` before padding. + warmup: + A floating point value that gradually increases from 0 throughout + training; when it is >= 1.0 we are "fully warmed up". It is used + to turn modules on sequentially. + Returns: + Return a tuple containing 2 tensors: + - embeddings: its shape is (batch_size, output_seq_len, d_model) + - lengths, a tensor of shape (batch_size,) containing the number + of frames in `embeddings` before padding. + """ + x = self.encoder_embed(x) + x, pos_emb = self.encoder_pos(x) + x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C) + + # Caution: We assume the subsampling factor is 4! + + # lengths = ((x_lens - 1) // 2 - 1) // 2 # issue an warning + # + # Note: rounding_mode in torch.div() is available only in torch >= 1.8.0 + lengths = (((x_lens - 1) >> 1) - 1) >> 1 + + assert x.size(0) == lengths.max().item() + + src_key_padding_mask = make_pad_mask(lengths) + + if self.dynamic_chunk_training: + assert ( + self.causal + ), "Causal convolution is required for streaming conformer." + max_len = x.size(0) + chunk_size = torch.randint(1, max_len, (1,)).item() + if chunk_size > (max_len * self.short_chunk_threshold): + chunk_size = max_len + else: + chunk_size = chunk_size % self.short_chunk_size + 1 + + mask = ~subsequent_chunk_mask( + size=x.size(0), + chunk_size=chunk_size, + num_left_chunks=self.num_left_chunks, + device=x.device, + ) + x = self.encoder( + x, + pos_emb, + mask=mask, + src_key_padding_mask=src_key_padding_mask, + warmup=warmup, + ) # (T, N, C) + else: + x = self.encoder( + x, + pos_emb, + mask=None, + src_key_padding_mask=src_key_padding_mask, + warmup=warmup, + ) # (T, N, C) + + x = x.permute(1, 0, 2) # (T, N, C) ->(N, T, C) + return x, lengths + + @torch.jit.export + def get_init_state( + self, left_context: int, device: torch.device + ) -> List[torch.Tensor]: + """Return the initial cache state of the model. + Args: + left_context: The left context size (in frames after subsampling). + Returns: + Return the initial state of the model, it is a list containing two + tensors, the first one is the cache for attentions which has a shape + of (num_encoder_layers, left_context, encoder_dim), the second one + is the cache of conv_modules which has a shape of + (num_encoder_layers, cnn_module_kernel - 1, encoder_dim). + NOTE: the returned tensors are on the given device. + """ + if ( + len(self._init_state) == 2 + and self._init_state[0].size(1) == left_context + ): + # Note: It is OK to share the init state as it is + # not going to be modified by the model + return self._init_state + + init_states: List[torch.Tensor] = [ + torch.zeros( + ( + self.encoder_layers, + left_context, + self.d_model, + ), + device=device, + ), + torch.zeros( + ( + self.encoder_layers, + self.cnn_module_kernel - 1, + self.d_model, + ), + device=device, + ), + ] + + self._init_state = init_states + + return init_states + + @torch.jit.export + def streaming_forward( + self, + x: torch.Tensor, + x_lens: torch.Tensor, + states: Optional[List[Tensor]] = None, + processed_lens: Optional[Tensor] = None, + left_context: int = 64, + right_context: int = 4, + chunk_size: int = 16, + simulate_streaming: bool = False, + warmup: float = 1.0, + ) -> Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor]]: + """ + Args: + x: + The input tensor. Its shape is (batch_size, seq_len, feature_dim). + x_lens: + A tensor of shape (batch_size,) containing the number of frames in + `x` before padding. + states: + The decode states for previous frames which contains the cached data. + It has two elements, the first element is the attn_cache which has + a shape of (encoder_layers, left_context, batch, attention_dim), + the second element is the conv_cache which has a shape of + (encoder_layers, cnn_module_kernel-1, batch, conv_dim). + Note: states will be modified in this function. + processed_lens: + How many frames (after subsampling) have been processed for each sequence. + left_context: + How many previous frames the attention can see in current chunk. + Note: It's not that each individual frame has `left_context` frames + of left context, some have more. + right_context: + How many future frames the attention can see in current chunk. + Note: It's not that each individual frame has `right_context` frames + of right context, some have more. + chunk_size: + The chunk size for decoding, this will be used to simulate streaming + decoding using masking. + simulate_streaming: + If setting True, it will use a masking strategy to simulate streaming + fashion (i.e. every chunk data only see limited left context and + right context). The whole sequence is supposed to be send at a time + When using simulate_streaming. + warmup: + A floating point value that gradually increases from 0 throughout + training; when it is >= 1.0 we are "fully warmed up". It is used + to turn modules on sequentially. + Returns: + Return a tuple containing 2 tensors: + - logits, its shape is (batch_size, output_seq_len, output_dim) + - logit_lens, a tensor of shape (batch_size,) containing the number + of frames in `logits` before padding. + - decode_states, the updated states including the information + of current chunk. + """ + + # x: [N, T, C] + # Caution: We assume the subsampling factor is 4! + + # lengths = ((x_lens - 1) // 2 - 1) // 2 # issue an warning + # + # Note: rounding_mode in torch.div() is available only in torch >= 1.8.0 + lengths = (((x_lens - 1) >> 1) - 1) >> 1 + + if not simulate_streaming: + assert states is not None + assert processed_lens is not None + assert ( + len(states) == 2 + and states[0].shape + == (self.encoder_layers, left_context, x.size(0), self.d_model) + and states[1].shape + == ( + self.encoder_layers, + self.cnn_module_kernel - 1, + x.size(0), + self.d_model, + ) + ), f"""The length of states MUST be equal to 2, and the shape of + first element should be {(self.encoder_layers, left_context, x.size(0), self.d_model)}, + given {states[0].shape}. the shape of second element should be + {(self.encoder_layers, self.cnn_module_kernel - 1, x.size(0), self.d_model)}, + given {states[1].shape}.""" + + lengths -= 2 # we will cut off 1 frame on each side of encoder_embed output + + src_key_padding_mask = make_pad_mask(lengths) + + processed_mask = torch.arange(left_context, device=x.device).expand( + x.size(0), left_context + ) + processed_lens = processed_lens.view(x.size(0), 1) + processed_mask = (processed_lens <= processed_mask).flip(1) + + src_key_padding_mask = torch.cat( + [processed_mask, src_key_padding_mask], dim=1 + ) + + embed = self.encoder_embed(x) + + # cut off 1 frame on each size of embed as they see the padding + # value which causes a training and decoding mismatch. + embed = embed[:, 1:-1, :] + + embed, pos_enc = self.encoder_pos(embed, left_context) + embed = embed.permute(1, 0, 2) # (B, T, F) -> (T, B, F) + + x, states = self.encoder.chunk_forward( + embed, + pos_enc, + src_key_padding_mask=src_key_padding_mask, + warmup=warmup, + states=states, + left_context=left_context, + right_context=right_context, + ) # (T, B, F) + if right_context > 0: + x = x[0:-right_context, ...] + lengths -= right_context + else: + assert states is None + states = [] # just to make torch.script.jit happy + # this branch simulates streaming decoding using mask as we are + # using in training time. + src_key_padding_mask = make_pad_mask(lengths) + x = self.encoder_embed(x) + x, pos_emb = self.encoder_pos(x) + x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C) + + assert x.size(0) == lengths.max().item() + + num_left_chunks = -1 + if left_context >= 0: + assert left_context % chunk_size == 0 + num_left_chunks = left_context // chunk_size + + mask = ~subsequent_chunk_mask( + size=x.size(0), + chunk_size=chunk_size, + num_left_chunks=num_left_chunks, + device=x.device, + ) + x = self.encoder( + x, + pos_emb, + mask=mask, + src_key_padding_mask=src_key_padding_mask, + warmup=warmup, + ) # (T, N, C) + + x = x.permute(1, 0, 2) # (T, N, C) ->(N, T, C) + + return x, lengths, states + + +class ConformerEncoderLayer(nn.Module): + """ + ConformerEncoderLayer is made up of self-attn, feedforward and convolution networks. + See: "Conformer: Convolution-augmented Transformer for Speech Recognition" + Args: + d_model: the number of expected features in the input (required). + nhead: the number of heads in the multiheadattention models (required). + dim_feedforward: the dimension of the feedforward network model (default=2048). + dropout: the dropout value (default=0.1). + cnn_module_kernel (int): Kernel size of convolution module. + causal (bool): Whether to use causal convolution in conformer encoder + layer. This MUST be True when using dynamic_chunk_training and streaming decoding. + Examples:: + >>> encoder_layer = ConformerEncoderLayer(d_model=512, nhead=8) + >>> src = torch.rand(10, 32, 512) + >>> pos_emb = torch.rand(32, 19, 512) + >>> out = encoder_layer(src, pos_emb) + """ + + def __init__( + self, + d_model: int, + nhead: int, + dim_feedforward: int = 2048, + dropout: float = 0.1, + layer_dropout: float = 0.075, + cnn_module_kernel: int = 31, + causal: bool = False, + ) -> None: + super(ConformerEncoderLayer, self).__init__() + + self.layer_dropout = layer_dropout + + self.d_model = d_model + + self.self_attn = RelPositionMultiheadAttention( + d_model, nhead, dropout=0.0 + ) + + self.feed_forward = nn.Sequential( + ScaledLinear(d_model, dim_feedforward), + ActivationBalancer(channel_dim=-1), + DoubleSwish(), + nn.Dropout(dropout), + ScaledLinear(dim_feedforward, d_model, initial_scale=0.25), + ) + + self.feed_forward_macaron = nn.Sequential( + ScaledLinear(d_model, dim_feedforward), + ActivationBalancer(channel_dim=-1), + DoubleSwish(), + nn.Dropout(dropout), + ScaledLinear(dim_feedforward, d_model, initial_scale=0.25), + ) + + self.conv_module = ConvolutionModule( + d_model, cnn_module_kernel, causal=causal + ) + + self.norm_final = BasicNorm(d_model) + + # try to ensure the output is close to zero-mean (or at least, zero-median). + self.balancer = ActivationBalancer( + channel_dim=-1, min_positive=0.45, max_positive=0.55, max_abs=6.0 + ) + + self.dropout = nn.Dropout(dropout) + + def forward( + self, + src: Tensor, + pos_emb: Tensor, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + warmup: float = 1.0, + ) -> Tensor: + """ + Pass the input through the encoder layer. + Args: + src: the sequence to the encoder layer (required). + pos_emb: Positional embedding tensor (required). + src_mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional). + warmup: controls selective bypass of of layers; if < 1.0, we will + bypass layers more frequently. + Shape: + src: (S, N, E). + pos_emb: (N, 2*S-1, E) + src_mask: (S, S). + src_key_padding_mask: (N, S). + S is the source sequence length, N is the batch size, E is the feature number + """ + src_orig = src + + warmup_scale = min(0.1 + warmup, 1.0) + # alpha = 1.0 means fully use this encoder layer, 0.0 would mean + # completely bypass it. + if self.training: + alpha = ( + warmup_scale + if torch.rand(()).item() <= (1.0 - self.layer_dropout) + else 0.1 + ) + else: + alpha = 1.0 + + # macaron style feed forward module + src = src + self.dropout(self.feed_forward_macaron(src)) + + # multi-headed self-attention module + src_att = self.self_attn( + src, + src, + src, + pos_emb=pos_emb, + attn_mask=src_mask, + key_padding_mask=src_key_padding_mask, + )[0] + + src = src + self.dropout(src_att) + + # convolution module + conv, _ = self.conv_module(src) + src = src + self.dropout(conv) + + # feed forward module + src = src + self.dropout(self.feed_forward(src)) + + src = self.norm_final(self.balancer(src)) + + if alpha != 1.0: + src = alpha * src + (1 - alpha) * src_orig + + return src + + @torch.jit.export + def chunk_forward( + self, + src: Tensor, + pos_emb: Tensor, + states: List[Tensor], + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + warmup: float = 1.0, + left_context: int = 0, + right_context: int = 0, + ) -> Tuple[Tensor, List[Tensor]]: + """ + Pass the input through the encoder layer. + Args: + src: the sequence to the encoder layer (required). + pos_emb: Positional embedding tensor (required). + states: + The decode states for previous frames which contains the cached data. + It has two elements, the first element is the attn_cache which has + a shape of (left_context, batch, attention_dim), + the second element is the conv_cache which has a shape of + (cnn_module_kernel-1, batch, conv_dim). + Note: states will be modified in this function. + src_mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional). + warmup: controls selective bypass of of layers; if < 1.0, we will + bypass layers more frequently. + left_context: + How many previous frames the attention can see in current chunk. + Note: It's not that each individual frame has `left_context` frames + of left context, some have more. + right_context: + How many future frames the attention can see in current chunk. + Note: It's not that each individual frame has `right_context` frames + of right context, some have more. + Shape: + src: (S, N, E). + pos_emb: (N, 2*(S+left_context)-1, E). + src_mask: (S, S). + src_key_padding_mask: (N, S). + S is the source sequence length, N is the batch size, E is the feature number + """ + + assert not self.training + assert len(states) == 2 + assert states[0].shape == (left_context, src.size(1), src.size(2)) + + # macaron style feed forward module + src = src + self.dropout(self.feed_forward_macaron(src)) + + # We put the attention cache this level (i.e. before linear transformation) + # to save memory consumption, when decoding in streaming fashion, the + # batch size would be thousands (for 32GB machine), if we cache key & val + # separately, it needs extra several GB memory. + # TODO(WeiKang): Move cache to self_attn level (i.e. cache key & val + # separately) if needed. + key = torch.cat([states[0], src], dim=0) + val = key + if right_context > 0: + states[0] = key[ + -(left_context + right_context) : -right_context, ... # noqa + ] + else: + states[0] = key[-left_context:, ...] + + # multi-headed self-attention module + src_att = self.self_attn( + src, + key, + val, + pos_emb=pos_emb, + attn_mask=src_mask, + key_padding_mask=src_key_padding_mask, + left_context=left_context, + )[0] + + src = src + self.dropout(src_att) + + # convolution module + conv, conv_cache = self.conv_module(src, states[1], right_context) + states[1] = conv_cache + + src = src + self.dropout(conv) + + # feed forward module + src = src + self.dropout(self.feed_forward(src)) + + src = self.norm_final(self.balancer(src)) + + return src, states + + +class ConformerEncoder(nn.Module): + r"""ConformerEncoder is a stack of N encoder layers + Args: + encoder_layer: an instance of the ConformerEncoderLayer() class (required). + num_layers: the number of sub-encoder-layers in the encoder (required). + Examples:: + >>> encoder_layer = ConformerEncoderLayer(d_model=512, nhead=8) + >>> conformer_encoder = ConformerEncoder(encoder_layer, num_layers=6) + >>> src = torch.rand(10, 32, 512) + >>> pos_emb = torch.rand(32, 19, 512) + >>> out = conformer_encoder(src, pos_emb) + """ + + def __init__(self, encoder_layer: nn.Module, num_layers: int) -> None: + super().__init__() + self.layers = nn.ModuleList( + [copy.deepcopy(encoder_layer) for i in range(num_layers)] + ) + self.num_layers = num_layers + + def forward( + self, + src: Tensor, + pos_emb: Tensor, + mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + warmup: float = 1.0, + ) -> Tensor: + r"""Pass the input through the encoder layers in turn. + Args: + src: the sequence to the encoder (required). + pos_emb: Positional embedding tensor (required). + mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional). + warmup: controls selective bypass of of layers; if < 1.0, we will + bypass layers more frequently. + Shape: + src: (S, N, E). + pos_emb: (N, 2*S-1, E) + mask: (S, S). + src_key_padding_mask: (N, S). + S is the source sequence length, T is the target sequence length, N is the batch size, E is the feature number + """ + output = src + + for layer_index, mod in enumerate(self.layers): + output = mod( + output, + pos_emb, + src_mask=mask, + src_key_padding_mask=src_key_padding_mask, + warmup=warmup, + ) + + return output + + @torch.jit.export + def chunk_forward( + self, + src: Tensor, + pos_emb: Tensor, + states: List[Tensor], + mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + warmup: float = 1.0, + left_context: int = 0, + right_context: int = 0, + ) -> Tuple[Tensor, List[Tensor]]: + r"""Pass the input through the encoder layers in turn. + Args: + src: the sequence to the encoder (required). + pos_emb: Positional embedding tensor (required). + states: + The decode states for previous frames which contains the cached data. + It has two elements, the first element is the attn_cache which has + a shape of (encoder_layers, left_context, batch, attention_dim), + the second element is the conv_cache which has a shape of + (encoder_layers, cnn_module_kernel-1, batch, conv_dim). + Note: states will be modified in this function. + mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional). + warmup: controls selective bypass of of layers; if < 1.0, we will + bypass layers more frequently. + left_context: + How many previous frames the attention can see in current chunk. + Note: It's not that each individual frame has `left_context` frames + of left context, some have more. + right_context: + How many future frames the attention can see in current chunk. + Note: It's not that each individual frame has `right_context` frames + of right context, some have more. + Shape: + src: (S, N, E). + pos_emb: (N, 2*(S+left_context)-1, E). + mask: (S, S). + src_key_padding_mask: (N, S). + S is the source sequence length, T is the target sequence length, N is the batch size, E is the feature number + """ + assert not self.training + assert len(states) == 2 + assert states[0].shape == ( + self.num_layers, + left_context, + src.size(1), + src.size(2), + ) + assert states[1].size(0) == self.num_layers + + output = src + + for layer_index, mod in enumerate(self.layers): + cache = [states[0][layer_index], states[1][layer_index]] + output, cache = mod.chunk_forward( + output, + pos_emb, + states=cache, + src_mask=mask, + src_key_padding_mask=src_key_padding_mask, + warmup=warmup, + left_context=left_context, + right_context=right_context, + ) + states[0][layer_index] = cache[0] + states[1][layer_index] = cache[1] + + return output, states + + +class RelPositionalEncoding(torch.nn.Module): + """Relative positional encoding module. + See : Appendix B in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" + Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/embedding.py + Args: + d_model: Embedding dimension. + dropout_rate: Dropout rate. + max_len: Maximum input length. + """ + + def __init__( + self, d_model: int, dropout_rate: float, max_len: int = 5000 + ) -> None: + """Construct an PositionalEncoding object.""" + super(RelPositionalEncoding, self).__init__() + self.d_model = d_model + self.dropout = torch.nn.Dropout(p=dropout_rate) + self.pe = None + self.extend_pe(torch.tensor(0.0).expand(1, max_len)) + + def extend_pe(self, x: Tensor, left_context: int = 0) -> None: + """Reset the positional encodings.""" + x_size_1 = x.size(1) + left_context + if self.pe is not None: + # self.pe contains both positive and negative parts + # the length of self.pe is 2 * input_len - 1 + if self.pe.size(1) >= x_size_1 * 2 - 1: + # Note: TorchScript doesn't implement operator== for torch.Device + if self.pe.dtype != x.dtype or str(self.pe.device) != str( + x.device + ): + self.pe = self.pe.to(dtype=x.dtype, device=x.device) + return + # Suppose `i` means to the position of query vector and `j` means the + # position of key vector. We use position relative positions when keys + # are to the left (i>j) and negative relative positions otherwise (i Tuple[Tensor, Tensor]: + """Add positional encoding. + Args: + x (torch.Tensor): Input tensor (batch, time, `*`). + left_context (int): left context (in frames) used during streaming decoding. + this is used only in real streaming decoding, in other circumstances, + it MUST be 0. + Returns: + torch.Tensor: Encoded tensor (batch, time, `*`). + torch.Tensor: Encoded tensor (batch, 2*time-1, `*`). + """ + self.extend_pe(x, left_context) + x_size_1 = x.size(1) + left_context + pos_emb = self.pe[ + :, + self.pe.size(1) // 2 + - x_size_1 + + 1 : self.pe.size(1) // 2 # noqa E203 + + x.size(1), + ] + return self.dropout(x), self.dropout(pos_emb) + + +class RelPositionMultiheadAttention(nn.Module): + r"""Multi-Head Attention layer with relative position encoding + See reference: "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" + Args: + embed_dim: total dimension of the model. + num_heads: parallel attention heads. + dropout: a Dropout layer on attn_output_weights. Default: 0.0. + Examples:: + >>> rel_pos_multihead_attn = RelPositionMultiheadAttention(embed_dim, num_heads) + >>> attn_output, attn_output_weights = multihead_attn(query, key, value, pos_emb) + """ + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + ) -> None: + super(RelPositionMultiheadAttention, self).__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + assert ( + self.head_dim * num_heads == self.embed_dim + ), "embed_dim must be divisible by num_heads" + + self.in_proj = ScaledLinear(embed_dim, 3 * embed_dim, bias=True) + self.out_proj = ScaledLinear( + embed_dim, embed_dim, bias=True, initial_scale=0.25 + ) + + # linear transformation for positional encoding. + self.linear_pos = ScaledLinear(embed_dim, embed_dim, bias=False) + # these two learnable bias are used in matrix c and matrix d + # as described in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" Section 3.3 + self.pos_bias_u = nn.Parameter(torch.Tensor(num_heads, self.head_dim)) + self.pos_bias_v = nn.Parameter(torch.Tensor(num_heads, self.head_dim)) + self.pos_bias_u_scale = nn.Parameter(torch.zeros(()).detach()) + self.pos_bias_v_scale = nn.Parameter(torch.zeros(()).detach()) + self._reset_parameters() + + def _pos_bias_u(self): + return self.pos_bias_u * self.pos_bias_u_scale.exp() + + def _pos_bias_v(self): + return self.pos_bias_v * self.pos_bias_v_scale.exp() + + def _reset_parameters(self) -> None: + nn.init.normal_(self.pos_bias_u, std=0.01) + nn.init.normal_(self.pos_bias_v, std=0.01) + + def forward( + self, + query: Tensor, + key: Tensor, + value: Tensor, + pos_emb: Tensor, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + left_context: int = 0, + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + pos_emb: Positional embedding tensor + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. When given a binary mask and a value is True, + the corresponding value on the attention layer will be ignored. When given + a byte mask and a value is non-zero, the corresponding value on the attention + layer will be ignored + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + left_context (int): left context (in frames) used during streaming decoding. + this is used only in real streaming decoding, in other circumstances, + it MUST be 0. + Shape: + - Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - pos_emb: :math:`(N, 2*L-1, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the position + with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + return self.multi_head_attention_forward( + query, + key, + value, + pos_emb, + self.embed_dim, + self.num_heads, + self.in_proj.get_weight(), + self.in_proj.get_bias(), + self.dropout, + self.out_proj.get_weight(), + self.out_proj.get_bias(), + training=self.training, + key_padding_mask=key_padding_mask, + need_weights=need_weights, + attn_mask=attn_mask, + left_context=left_context, + ) + + def rel_shift(self, x: Tensor, left_context: int = 0) -> Tensor: + """Compute relative positional encoding. + Args: + x: Input tensor (batch, head, time1, 2*time1-1). + time1 means the length of query vector. + left_context (int): left context (in frames) used during streaming decoding. + this is used only in real streaming decoding, in other circumstances, + it MUST be 0. + Returns: + Tensor: tensor of shape (batch, head, time1, time2) + (note: time2 has the same value as time1, but it is for + the key, while time1 is for the query). + """ + (batch_size, num_heads, time1, n) = x.shape + + time2 = time1 + left_context + assert ( + n == left_context + 2 * time1 - 1 + ), f"{n} == {left_context} + 2 * {time1} - 1" + + # Note: TorchScript requires explicit arg for stride() + batch_stride = x.stride(0) + head_stride = x.stride(1) + time1_stride = x.stride(2) + n_stride = x.stride(3) + return x.as_strided( + (batch_size, num_heads, time1, time2), + (batch_stride, head_stride, time1_stride - n_stride, n_stride), + storage_offset=n_stride * (time1 - 1), + ) + + def multi_head_attention_forward( + self, + query: Tensor, + key: Tensor, + value: Tensor, + pos_emb: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Tensor, + in_proj_bias: Tensor, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Tensor, + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + left_context: int = 0, + ) -> Tuple[Tensor, Optional[Tensor]]: + r""" + Args: + query, key, value: map a query and a set of key-value pairs to an output. + pos_emb: Positional embedding tensor + embed_dim_to_check: total dimension of the model. + num_heads: parallel attention heads. + in_proj_weight, in_proj_bias: input projection weight and bias. + dropout_p: probability of an element to be zeroed. + out_proj_weight, out_proj_bias: the output projection weight and bias. + training: apply dropout if is ``True``. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + left_context (int): left context (in frames) used during streaming decoding. + this is used only in real streaming decoding, in other circumstances, + it MUST be 0. + Shape: + Inputs: + - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - pos_emb: :math:`(N, 2*L-1, E)` or :math:`(1, 2*L-1, E)` where L is the target sequence + length, N is the batch size, E is the embedding dimension. + - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions + will be unchanged. If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked + positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend + while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True`` + are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + Outputs: + - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: :math:`(N, L, S)` where N is the batch size, + L is the target sequence length, S is the source sequence length. + """ + + tgt_len, bsz, embed_dim = query.size() + assert embed_dim == embed_dim_to_check + assert key.size(0) == value.size(0) and key.size(1) == value.size(1) + + head_dim = embed_dim // num_heads + assert ( + head_dim * num_heads == embed_dim + ), "embed_dim must be divisible by num_heads" + + scaling = float(head_dim) ** -0.5 + + if torch.equal(query, key) and torch.equal(key, value): + # self-attention + q, k, v = nn.functional.linear( + query, in_proj_weight, in_proj_bias + ).chunk(3, dim=-1) + + elif torch.equal(key, value): + # encoder-decoder attention + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = nn.functional.linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + k, v = nn.functional.linear(key, _w, _b).chunk(2, dim=-1) + + else: + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = 0 + _end = embed_dim + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + q = nn.functional.linear(query, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim + _end = embed_dim * 2 + _w = in_proj_weight[_start:_end, :] + if _b is not None: + _b = _b[_start:_end] + k = nn.functional.linear(key, _w, _b) + + # This is inline in_proj function with in_proj_weight and in_proj_bias + _b = in_proj_bias + _start = embed_dim * 2 + _end = None + _w = in_proj_weight[_start:, :] + if _b is not None: + _b = _b[_start:] + v = nn.functional.linear(value, _w, _b) + + if attn_mask is not None: + assert ( + attn_mask.dtype == torch.float32 + or attn_mask.dtype == torch.float64 + or attn_mask.dtype == torch.float16 + or attn_mask.dtype == torch.uint8 + or attn_mask.dtype == torch.bool + ), "Only float, byte, and bool types are supported for attn_mask, not {}".format( + attn_mask.dtype + ) + if attn_mask.dtype == torch.uint8: + warnings.warn( + "Byte tensor for attn_mask is deprecated. Use bool tensor instead." + ) + attn_mask = attn_mask.to(torch.bool) + + if attn_mask.dim() == 2: + attn_mask = attn_mask.unsqueeze(0) + if list(attn_mask.size()) != [1, query.size(0), key.size(0)]: + raise RuntimeError( + "The size of the 2D attn_mask is not correct." + ) + elif attn_mask.dim() == 3: + if list(attn_mask.size()) != [ + bsz * num_heads, + query.size(0), + key.size(0), + ]: + raise RuntimeError( + "The size of the 3D attn_mask is not correct." + ) + else: + raise RuntimeError( + "attn_mask's dimension {} is not supported".format( + attn_mask.dim() + ) + ) + # attn_mask's dim is 3 now. + + # convert ByteTensor key_padding_mask to bool + if ( + key_padding_mask is not None + and key_padding_mask.dtype == torch.uint8 + ): + warnings.warn( + "Byte tensor for key_padding_mask is deprecated. Use bool tensor instead." + ) + key_padding_mask = key_padding_mask.to(torch.bool) + + q = (q * scaling).contiguous().view(tgt_len, bsz, num_heads, head_dim) + k = k.contiguous().view(-1, bsz, num_heads, head_dim) + v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1) + + src_len = k.size(0) + + if key_padding_mask is not None: + assert key_padding_mask.size(0) == bsz, "{} == {}".format( + key_padding_mask.size(0), bsz + ) + assert key_padding_mask.size(1) == src_len, "{} == {}".format( + key_padding_mask.size(1), src_len + ) + + q = q.transpose(0, 1) # (batch, time1, head, d_k) + + pos_emb_bsz = pos_emb.size(0) + assert pos_emb_bsz in (1, bsz) # actually it is 1 + p = self.linear_pos(pos_emb).view(pos_emb_bsz, -1, num_heads, head_dim) + # (batch, 2*time1, head, d_k) --> (batch, head, d_k, 2*time -1) + p = p.permute(0, 2, 3, 1) + + q_with_bias_u = (q + self._pos_bias_u()).transpose( + 1, 2 + ) # (batch, head, time1, d_k) + + q_with_bias_v = (q + self._pos_bias_v()).transpose( + 1, 2 + ) # (batch, head, time1, d_k) + + # compute attention score + # first compute matrix a and matrix c + # as described in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" Section 3.3 + k = k.permute(1, 2, 3, 0) # (batch, head, d_k, time2) + matrix_ac = torch.matmul( + q_with_bias_u, k + ) # (batch, head, time1, time2) + + # compute matrix b and matrix d + matrix_bd = torch.matmul( + q_with_bias_v, p + ) # (batch, head, time1, 2*time1-1) + matrix_bd = self.rel_shift(matrix_bd, left_context) + + attn_output_weights = ( + matrix_ac + matrix_bd + ) # (batch, head, time1, time2) + + attn_output_weights = attn_output_weights.view( + bsz * num_heads, tgt_len, -1 + ) + + assert list(attn_output_weights.size()) == [ + bsz * num_heads, + tgt_len, + src_len, + ] + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_output_weights.masked_fill_(attn_mask, float("-inf")) + else: + attn_output_weights += attn_mask + + if key_padding_mask is not None: + attn_output_weights = attn_output_weights.view( + bsz, num_heads, tgt_len, src_len + ) + attn_output_weights = attn_output_weights.masked_fill( + key_padding_mask.unsqueeze(1).unsqueeze(2), + float("-inf"), + ) + attn_output_weights = attn_output_weights.view( + bsz * num_heads, tgt_len, src_len + ) + + attn_output_weights = nn.functional.softmax(attn_output_weights, dim=-1) + + # If we are using dynamic_chunk_training and setting a limited + # num_left_chunks, the attention may only see the padding values which + # will also be masked out by `key_padding_mask`, at this circumstances, + # the whole column of `attn_output_weights` will be `-inf` + # (i.e. be `nan` after softmax), so, we fill `0.0` at the masking + # positions to avoid invalid loss value below. + if ( + attn_mask is not None + and attn_mask.dtype == torch.bool + and key_padding_mask is not None + ): + if attn_mask.size(0) != 1: + attn_mask = attn_mask.view(bsz, num_heads, tgt_len, src_len) + combined_mask = attn_mask | key_padding_mask.unsqueeze( + 1 + ).unsqueeze(2) + else: + # attn_mask.shape == (1, tgt_len, src_len) + combined_mask = attn_mask.unsqueeze( + 0 + ) | key_padding_mask.unsqueeze(1).unsqueeze(2) + + attn_output_weights = attn_output_weights.view( + bsz, num_heads, tgt_len, src_len + ) + attn_output_weights = attn_output_weights.masked_fill( + combined_mask, 0.0 + ) + attn_output_weights = attn_output_weights.view( + bsz * num_heads, tgt_len, src_len + ) + + attn_output_weights = nn.functional.dropout( + attn_output_weights, p=dropout_p, training=training + ) + + attn_output = torch.bmm(attn_output_weights, v) + assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + attn_output = ( + attn_output.transpose(0, 1) + .contiguous() + .view(tgt_len, bsz, embed_dim) + ) + attn_output = nn.functional.linear( + attn_output, out_proj_weight, out_proj_bias + ) + + if need_weights: + # average attention weights over heads + attn_output_weights = attn_output_weights.view( + bsz, num_heads, tgt_len, src_len + ) + return attn_output, attn_output_weights.sum(dim=1) / num_heads + else: + return attn_output, None + + +class ConvolutionModule(nn.Module): + """ConvolutionModule in Conformer model. + Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/conformer/convolution.py + Args: + channels (int): The number of channels of conv layers. + kernel_size (int): Kernerl size of conv layers. + bias (bool): Whether to use bias in conv layers (default=True). + causal (bool): Whether to use causal convolution. + """ + + def __init__( + self, + channels: int, + kernel_size: int, + bias: bool = True, + causal: bool = False, + ) -> None: + """Construct an ConvolutionModule object.""" + super(ConvolutionModule, self).__init__() + # kernerl_size should be a odd number for 'SAME' padding + assert (kernel_size - 1) % 2 == 0 + self.causal = causal + + self.pointwise_conv1 = ScaledConv1d( + channels, + 2 * channels, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + ) + + # after pointwise_conv1 we put x through a gated linear unit (nn.functional.glu). + # For most layers the normal rms value of channels of x seems to be in the range 1 to 4, + # but sometimes, for some reason, for layer 0 the rms ends up being very large, + # between 50 and 100 for different channels. This will cause very peaky and + # sparse derivatives for the sigmoid gating function, which will tend to make + # the loss function not learn effectively. (for most layers the average absolute values + # are in the range 0.5..9.0, and the average p(x>0), i.e. positive proportion, + # at the output of pointwise_conv1.output is around 0.35 to 0.45 for different + # layers, which likely breaks down as 0.5 for the "linear" half and + # 0.2 to 0.3 for the part that goes into the sigmoid. The idea is that if we + # constrain the rms values to a reasonable range via a constraint of max_abs=10.0, + # it will be in a better position to start learning something, i.e. to latch onto + # the correct range. + self.deriv_balancer1 = ActivationBalancer( + channel_dim=1, max_abs=10.0, min_positive=0.05, max_positive=1.0 + ) + + self.lorder = kernel_size - 1 + padding = (kernel_size - 1) // 2 + if self.causal: + padding = 0 + + self.depthwise_conv = ScaledConv1d( + channels, + channels, + kernel_size, + stride=1, + padding=padding, + groups=channels, + bias=bias, + ) + + self.deriv_balancer2 = ActivationBalancer( + channel_dim=1, min_positive=0.05, max_positive=1.0 + ) + + self.activation = DoubleSwish() + + self.pointwise_conv2 = ScaledConv1d( + channels, + channels, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + initial_scale=0.25, + ) + + def forward( + self, + x: Tensor, + cache: Optional[Tensor] = None, + right_context: int = 0, + ) -> Tuple[Tensor, Tensor]: + """Compute convolution module. + Args: + x: Input tensor (#time, batch, channels). + cache: The cache of depthwise_conv, only used in real streaming + decoding. + right_context: + How many future frames the attention can see in current chunk. + Note: It's not that each individual frame has `right_context` frames + of right context, some have more. + Returns: + If cache is None return the output tensor (#time, batch, channels). + If cache is not None, return a tuple of Tensor, the first one is + the output tensor (#time, batch, channels), the second one is the + new cache for next chunk (#kernel_size - 1, batch, channels). + """ + # exchange the temporal dimension and the feature dimension + x = x.permute(1, 2, 0) # (#batch, channels, time). + + # GLU mechanism + x = self.pointwise_conv1(x) # (batch, 2*channels, time) + + x = self.deriv_balancer1(x) + x = nn.functional.glu(x, dim=1) # (batch, channels, time) + + # 1D Depthwise Conv + if self.causal and self.lorder > 0: + if cache is None: + # Make depthwise_conv causal by + # manualy padding self.lorder zeros to the left + x = nn.functional.pad(x, (self.lorder, 0), "constant", 0.0) + else: + assert ( + not self.training + ), "Cache should be None in training time" + assert cache.size(0) == self.lorder + x = torch.cat([cache.permute(1, 2, 0), x], dim=2) + if right_context > 0: + cache = x.permute(2, 0, 1)[ + -(self.lorder + right_context) : ( # noqa + -right_context + ), + ..., + ] + else: + cache = x.permute(2, 0, 1)[-self.lorder :, ...] # noqa + x = self.depthwise_conv(x) + + x = self.deriv_balancer2(x) + x = self.activation(x) + + x = self.pointwise_conv2(x) # (batch, channel, time) + + # torch.jit.script requires return types be the same as annotated above + if cache is None: + cache = torch.empty(0) + + return x.permute(2, 0, 1), cache + + +class Conv2dSubsampling(nn.Module): + """Convolutional 2D subsampling (to 1/4 length). + Convert an input of shape (N, T, idim) to an output + with shape (N, T', odim), where + T' = ((T-1)//2 - 1)//2, which approximates T' == T//4 + It is based on + https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/subsampling.py # noqa + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + layer1_channels: int = 8, + layer2_channels: int = 32, + layer3_channels: int = 128, + ) -> None: + """ + Args: + in_channels: + Number of channels in. The input shape is (N, T, in_channels). + Caution: It requires: T >=7, in_channels >=7 + out_channels + Output dim. The output shape is (N, ((T-1)//2 - 1)//2, out_channels) + layer1_channels: + Number of channels in layer1 + layer1_channels: + Number of channels in layer2 + """ + assert in_channels >= 7 + super().__init__() + + self.conv = nn.Sequential( + ScaledConv2d( + in_channels=1, + out_channels=layer1_channels, + kernel_size=3, + padding=1, + ), + ActivationBalancer(channel_dim=1), + DoubleSwish(), + ScaledConv2d( + in_channels=layer1_channels, + out_channels=layer2_channels, + kernel_size=3, + stride=2, + ), + ActivationBalancer(channel_dim=1), + DoubleSwish(), + ScaledConv2d( + in_channels=layer2_channels, + out_channels=layer3_channels, + kernel_size=3, + stride=2, + ), + ActivationBalancer(channel_dim=1), + DoubleSwish(), + ) + self.out = ScaledLinear( + layer3_channels * (((in_channels - 1) // 2 - 1) // 2), out_channels + ) + # set learn_eps=False because out_norm is preceded by `out`, and `out` + # itself has learned scale, so the extra degree of freedom is not + # needed. + self.out_norm = BasicNorm(out_channels, learn_eps=False) + # constrain median of output to be close to zero. + self.out_balancer = ActivationBalancer( + channel_dim=-1, min_positive=0.45, max_positive=0.55 + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Subsample x. + Args: + x: + Its shape is (N, T, idim). + Returns: + Return a tensor of shape (N, ((T-1)//2 - 1)//2, odim) + """ + # On entry, x is (N, T, idim) + x = x.unsqueeze(1) # (N, T, idim) -> (N, 1, T, idim) i.e., (N, C, H, W) + x = self.conv(x) + # Now x is of shape (N, odim, ((T-1)//2 - 1)//2, ((idim-1)//2 - 1)//2) + b, c, t, f = x.size() + x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f)) + # Now x is of shape (N, ((T-1)//2 - 1))//2, odim) + x = self.out_norm(x) + x = self.out_balancer(x) + return x + + +if __name__ == "__main__": + feature_dim = 50 + c = Conformer(num_features=feature_dim, d_model=128, nhead=4) + batch_size = 5 + seq_len = 20 + # Just make sure the forward pass runs. + f = c( + torch.randn(batch_size, seq_len, feature_dim), + torch.full((batch_size,), seq_len, dtype=torch.int64), + warmup=0.5, + ) diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/decode.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/decode.py new file mode 100755 index 000000000..ca997456f --- /dev/null +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/decode.py @@ -0,0 +1,778 @@ +#!/usr/bin/env python3 +# +# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang) +# Copyright 2022 Xiaomi Corporation (Author: Mingshuang Luo) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +When training with the L subset, the offline usage: +(1) greedy search +./pruned_transducer_stateless5/decode.py \ + --epoch 4 \ + --avg 1 \ + --exp-dir ./pruned_transducer_stateless5/exp_L_offline \ + --lang-dir data/lang_char \ + --max-duration 100 \ + --decoding-method greedy_search + +(2) modified beam search +./pruned_transducer_stateless5/decode.py \ + --epoch 4 \ + --avg 1 \ + --exp-dir ./pruned_transducer_stateless5/exp_L_offline \ + --lang-dir data/lang_char \ + --max-duration 100 \ + --decoding-method modified_beam_search \ + --beam-size 4 + +(3) fast beam search +./pruned_transducer_stateless5/decode.py \ + --epoch 4 \ + --avg 1 \ + --exp-dir ./pruned_transducer_stateless5/exp_L_offline \ + --lang-dir data/lang_char \ + --max-duration 1500 \ + --decoding-method fast_beam_search \ + --beam 4 \ + --max-contexts 4 \ + --max-states 8 + +When training with the L subset, the streaming usage: +(1) greedy search +./pruned_transducer_stateless5/decode.py \ + --lang-dir data/lang_char \ + --exp-dir pruned_transducer_stateless5/exp_L_streaming \ + --use-averaged-model True \ + --max-duration 600 \ + --epoch 7 \ + --avg 1 \ + --decoding-method greedy_search \ + --simulate-streaming 1 \ + --causal-convolution 1 \ + --decode-chunk-size 16 \ + --left-context 64 + +(2) modified beam search +./pruned_transducer_stateless5/decode.py \ + --lang-dir data/lang_char \ + --exp-dir pruned_transducer_stateless5/exp_L_streaming \ + --use-averaged-model True \ + --max-duration 600 \ + --epoch 7 \ + --avg 1 \ + --decoding-method modified_beam_search \ + --simulate-streaming 1 \ + --causal-convolution 1 \ + --decode-chunk-size 16 \ + --left-context 64 + +(3) fast beam search +./pruned_transducer_stateless5/decode.py \ + --lang-dir data/lang_char \ + --exp-dir pruned_transducer_stateless5/exp_L_streaming \ + --use-averaged-model True \ + --max-duration 600 \ + --epoch 7 \ + --avg 1 \ + --decoding-method fast_beam_search \ + --simulate-streaming 1 \ + --causal-convolution 1 \ + --decode-chunk-size 16 \ + --left-context 64 +""" + + +import argparse +import logging +import math +from collections import defaultdict +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import k2 +import torch +import torch.nn as nn +from asr_datamodule import WenetSpeechAsrDataModule +from beam_search import ( + beam_search, + fast_beam_search_one_best, + greedy_search, + greedy_search_batch, + modified_beam_search, +) +from train import add_model_arguments, get_params, get_transducer_model + +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from icefall.lexicon import Lexicon +from icefall.utils import ( + AttributeDict, + setup_logger, + store_transcripts, + str2bool, + write_error_stats, +) + +LOG_EPS = math.log(1e-10) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=30, + help="""It specifies the checkpoint to use for decoding. + Note: Epoch counts from 1. + You can specify --avg to use more checkpoints for model averaging.""", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=15, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch' and '--iter'", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=True, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="pruned_transducer_stateless5/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--lang-dir", + type=str, + default="data/lang_char", + help="""The lang dir + It contains language related input files such as + "lexicon.txt" + """, + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + - modified_beam_search + - fast_beam_search + """, + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="""An interger indicating how many candidates we will keep for each + frame. Used only when --decoding-method is beam_search or + modified_beam_search.""", + ) + + parser.add_argument( + "--beam", + type=float, + default=4, + help="""A floating point value to calculate the cutoff score during beam + search (i.e., `cutoff = max-score - beam`), which is the same as the + `beam` in Kaldi. + Used only when --decoding-method is fast_beam_search""", + ) + + parser.add_argument( + "--max-contexts", + type=int, + default=4, + help="""Used only when --decoding-method is + fast_beam_search""", + ) + + parser.add_argument( + "--max-states", + type=int, + default=8, + help="""Used only when --decoding-method is + fast_beam_search""", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=1, + help="""Maximum number of symbols per frame. + Used only when --decoding_method is greedy_search""", + ) + + parser.add_argument( + "--simulate-streaming", + type=str2bool, + default=False, + help="""Whether to simulate streaming in decoding, this is a good way to + test a streaming model. + """, + ) + + parser.add_argument( + "--decode-chunk-size", + type=int, + default=16, + help="The chunk size for decoding (in frames after subsampling)", + ) + + parser.add_argument( + "--left-context", + type=int, + default=64, + help="left context can be seen during decoding (in frames after subsampling)", + ) + + add_model_arguments(parser) + + return parser + + +def decode_one_batch( + params: AttributeDict, + model: nn.Module, + lexicon: Lexicon, + batch: dict, + decoding_graph: Optional[k2.Fsa] = None, +) -> Dict[str, List[List[str]]]: + """Decode one batch and return the result in a dict. The dict has the + following format: + + - key: It indicates the setting used for decoding. For example, + if greedy_search is used, it would be "greedy_search" + If beam search with a beam size of 7 is used, it would be + "beam_7" + - value: It contains the decoding result. `len(value)` equals to + batch size. `value[i]` is the decoding result for the i-th + utterance in the given batch. + Args: + params: + It's the return value of :func:`get_params`. + model: + The neural model. + batch: + It is the return value from iterating + `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation + for the format of the `batch`. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used + only when --decoding_method is fast_beam_search. + Returns: + Return the decoding result. See above description for the format of + the returned dict. + """ + device = next(model.parameters()).device + feature = batch["inputs"] + assert feature.ndim == 3 + + feature = feature.to(device) + # at entry, feature is (N, T, C) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + feature_lens += params.left_context + feature = torch.nn.functional.pad( + feature, + pad=(0, 0, 0, params.left_context), + value=LOG_EPS, + ) + + if params.simulate_streaming: + encoder_out, encoder_out_lens, _ = model.encoder.streaming_forward( + x=feature, + x_lens=feature_lens, + chunk_size=params.decode_chunk_size, + left_context=params.left_context, + simulate_streaming=True, + ) + else: + encoder_out, encoder_out_lens = model.encoder( + x=feature, x_lens=feature_lens + ) + + hyps = [] + + if params.decoding_method == "fast_beam_search": + hyp_tokens = fast_beam_search_one_best( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + ) + for i in range(encoder_out.size(0)): + hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]]) + elif ( + params.decoding_method == "greedy_search" + and params.max_sym_per_frame == 1 + ): + hyp_tokens = greedy_search_batch( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + ) + for i in range(encoder_out.size(0)): + hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]]) + elif params.decoding_method == "modified_beam_search": + hyp_tokens = modified_beam_search( + model=model, + encoder_out=encoder_out, + beam=params.beam_size, + encoder_out_lens=encoder_out_lens, + ) + for i in range(encoder_out.size(0)): + hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]]) + else: + batch_size = encoder_out.size(0) + + for i in range(batch_size): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + if params.decoding_method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.decoding_method == "beam_search": + hyp = beam_search( + model=model, + encoder_out=encoder_out_i, + beam=params.beam_size, + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + hyps.append([lexicon.token_table[idx] for idx in hyp]) + + if params.decoding_method == "greedy_search": + return {"greedy_search": hyps} + elif params.decoding_method == "fast_beam_search": + return { + ( + f"beam_{params.beam}_" + f"max_contexts_{params.max_contexts}_" + f"max_states_{params.max_states}" + ): hyps + } + else: + return {f"beam_size_{params.beam_size}": hyps} + + +def decode_dataset( + dl: torch.utils.data.DataLoader, + params: AttributeDict, + model: nn.Module, + lexicon: Lexicon, + decoding_graph: Optional[k2.Fsa] = None, +) -> Dict[str, List[Tuple[List[str], List[str]]]]: + """Decode dataset. + + Args: + dl: + PyTorch's dataloader containing the dataset to decode. + params: + It is returned by :func:`get_params`. + model: + The neural model. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used + only when --decoding_method is fast_beam_search. + Returns: + Return a dict, whose key may be "greedy_search" if greedy search + is used, or it may be "beam_7" if beam size of 7 is used. + Its value is a list of tuples. Each tuple contains two elements: + The first is the reference transcript, and the second is the + predicted result. + """ + num_cuts = 0 + + try: + num_batches = len(dl) + except TypeError: + num_batches = "?" + + if params.decoding_method == "greedy_search": + log_interval = 100 + else: + log_interval = 2 + + results = defaultdict(list) + for batch_idx, batch in enumerate(dl): + texts = batch["supervisions"]["text"] + texts = [list(str(text)) for text in texts] + + hyps_dict = decode_one_batch( + params=params, + model=model, + lexicon=lexicon, + decoding_graph=decoding_graph, + batch=batch, + ) + + for name, hyps in hyps_dict.items(): + this_batch = [] + assert len(hyps) == len(texts) + for hyp_words, ref_text in zip(hyps, texts): + this_batch.append((ref_text, hyp_words)) + + results[name].extend(this_batch) + + num_cuts += len(texts) + + if batch_idx % log_interval == 0: + batch_str = f"{batch_idx}/{num_batches}" + + logging.info( + f"batch {batch_str}, cuts processed until now is {num_cuts}" + ) + return results + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[List[int], List[int]]]], +): + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = ( + params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + ) + store_transcripts(filename=recog_path, texts=results) + logging.info(f"The transcripts are stored in {recog_path}") + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = ( + params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_filename, "w") as f: + wer = write_error_stats( + f, f"{test_set_name}-{key}", results, enable_log=True + ) + test_set_wers[key] = wer + + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = ( + params.res_dir + / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_info, "w") as f: + print("settings\tWER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, WER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + WenetSpeechAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + assert params.decoding_method in ( + "greedy_search", + "beam_search", + "fast_beam_search", + "modified_beam_search", + ) + params.res_dir = params.exp_dir / params.decoding_method + + params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + if "fast_beam_search" in params.decoding_method: + params.suffix += f"-beam-{params.beam}" + params.suffix += f"-max-contexts-{params.max_contexts}" + params.suffix += f"-max-states-{params.max_states}" + elif "beam_search" in params.decoding_method: + params.suffix += f"-beam-{params.beam_size}" + else: + params.suffix += f"-context-{params.context_size}" + params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}" + + setup_logger(f"{params.res_dir}/log-decode-{params.suffix}") + logging.info("Decoding started") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"Device: {device}") + + lexicon = Lexicon(params.lang_dir) + params.blank_id = lexicon.token_table[""] + params.vocab_size = max(lexicon.tokens) + 1 + + if params.simulate_streaming: + assert ( + params.causal_convolution + ), "Decoding in streaming requires causal convolution" + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + else: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg + 1] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + + model.to(device) + model.eval() + model.device = device + + if params.decoding_method == "fast_beam_search": + decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device) + else: + decoding_graph = None + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + # Note: Please use "pip install webdataset==0.1.103" + # for installing the webdataset. + import glob + import os + + from lhotse import CutSet + from lhotse.dataset.webdataset import export_to_webdataset + + wenetspeech = WenetSpeechAsrDataModule(args) + + dev = "dev" + test_net = "test_net" + test_meeting = "test_meeting" + + if not os.path.exists(f"{dev}/shared-0.tar"): + os.makedirs(dev) + dev_cuts = wenetspeech.valid_cuts() + export_to_webdataset( + dev_cuts, + output_path=f"{dev}/shared-%d.tar", + shard_size=300, + ) + + if not os.path.exists(f"{test_net}/shared-0.tar"): + os.makedirs(test_net) + test_net_cuts = wenetspeech.test_net_cuts() + export_to_webdataset( + test_net_cuts, + output_path=f"{test_net}/shared-%d.tar", + shard_size=300, + ) + + if not os.path.exists(f"{test_meeting}/shared-0.tar"): + os.makedirs(test_meeting) + test_meeting_cuts = wenetspeech.test_meeting_cuts() + export_to_webdataset( + test_meeting_cuts, + output_path=f"{test_meeting}/shared-%d.tar", + shard_size=300, + ) + + dev_shards = [ + str(path) + for path in sorted(glob.glob(os.path.join(dev, "shared-*.tar"))) + ] + cuts_dev_webdataset = CutSet.from_webdataset( + dev_shards, + split_by_worker=True, + split_by_node=True, + shuffle_shards=True, + ) + + test_net_shards = [ + str(path) + for path in sorted(glob.glob(os.path.join(test_net, "shared-*.tar"))) + ] + cuts_test_net_webdataset = CutSet.from_webdataset( + test_net_shards, + split_by_worker=True, + split_by_node=True, + shuffle_shards=True, + ) + + test_meeting_shards = [ + str(path) + for path in sorted( + glob.glob(os.path.join(test_meeting, "shared-*.tar")) + ) + ] + cuts_test_meeting_webdataset = CutSet.from_webdataset( + test_meeting_shards, + split_by_worker=True, + split_by_node=True, + shuffle_shards=True, + ) + + dev_dl = wenetspeech.valid_dataloaders(cuts_dev_webdataset) + test_net_dl = wenetspeech.test_dataloaders(cuts_test_net_webdataset) + test_meeting_dl = wenetspeech.test_dataloaders(cuts_test_meeting_webdataset) + + test_sets = ["DEV", "TEST_NET", "TEST_MEETING"] + test_dl = [dev_dl, test_net_dl, test_meeting_dl] + + for test_set, test_dl in zip(test_sets, test_dl): + results_dict = decode_dataset( + dl=test_dl, + params=params, + model=model, + lexicon=lexicon, + decoding_graph=decoding_graph, + ) + save_results( + params=params, + test_set_name=test_set, + results_dict=results_dict, + ) + + logging.info("Done!") + + +if __name__ == "__main__": + main() diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/decode_stream.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/decode_stream.py new file mode 100644 index 000000000..6c0e9ba19 --- /dev/null +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/decode_stream.py @@ -0,0 +1,147 @@ +# Copyright 2022 Xiaomi Corp. (authors: Wei Kang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List, Optional, Tuple + +import k2 +import torch +from beam_search import Hypothesis, HypothesisList + +from icefall.utils import AttributeDict + + +class DecodeStream(object): + def __init__( + self, + params: AttributeDict, + initial_states: List[torch.Tensor], + decoding_graph: Optional[k2.Fsa] = None, + device: torch.device = torch.device("cpu"), + ) -> None: + """ + Args: + initial_states: + Initial decode states of the model, e.g. the return value of + `get_init_state` in conformer.py + decoding_graph: + Decoding graph used for decoding, may be a TrivialGraph or a HLG. + Used only when decoding_method is fast_beam_search. + device: + The device to run this stream. + """ + if params.decoding_method == "fast_beam_search": + assert decoding_graph is not None + assert device == decoding_graph.device + + self.params = params + self.LOG_EPS = math.log(1e-10) + + self.states = initial_states + + # It contains a 2-D tensors representing the feature frames. + self.features: torch.Tensor = None + + self.num_frames: int = 0 + # how many frames have been processed. (before subsampling). + # we only modify this value in `func:get_feature_frames`. + self.num_processed_frames: int = 0 + + self._done: bool = False + + # The transcript of current utterance. + self.ground_truth: str = "" + + # The decoding result (partial or final) of current utterance. + self.hyp: List = [] + + # how many frames have been processed, after subsampling (i.e. a + # cumulative sum of the second return value of + # encoder.streaming_forward + self.done_frames: int = 0 + + self.pad_length = ( + params.right_context + 2 + ) * params.subsampling_factor + 3 + + if params.decoding_method == "greedy_search": + self.hyp = [params.blank_id] * params.context_size + elif params.decoding_method == "modified_beam_search": + self.hyps = HypothesisList() + self.hyps.add( + Hypothesis( + ys=[params.blank_id] * params.context_size, + log_prob=torch.zeros(1, dtype=torch.float32, device=device), + ) + ) + elif params.decoding_method == "fast_beam_search": + # The rnnt_decoding_stream for fast_beam_search. + self.rnnt_decoding_stream: k2.RnntDecodingStream = ( + k2.RnntDecodingStream(decoding_graph) + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + + @property + def done(self) -> bool: + """Return True if all the features are processed.""" + return self._done + + def set_features( + self, + features: torch.Tensor, + ) -> None: + """Set features tensor of current utterance.""" + assert features.dim() == 2, features.dim() + self.features = torch.nn.functional.pad( + features, + (0, 0, 0, self.pad_length), + mode="constant", + value=self.LOG_EPS, + ) + self.num_frames = self.features.size(0) + + def get_feature_frames(self, chunk_size: int) -> Tuple[torch.Tensor, int]: + """Consume chunk_size frames of features""" + chunk_length = chunk_size + self.pad_length + + ret_length = min( + self.num_frames - self.num_processed_frames, chunk_length + ) + + ret_features = self.features[ + self.num_processed_frames : self.num_processed_frames # noqa + + ret_length + ] + + self.num_processed_frames += chunk_size + if self.num_processed_frames >= self.num_frames: + self._done = True + + return ret_features, ret_length + + def decoding_result(self) -> List[int]: + """Obtain current decoding result.""" + if self.params.decoding_method == "greedy_search": + return self.hyp[self.params.context_size :] # noqa + elif self.params.decoding_method == "modified_beam_search": + best_hyp = self.hyps.get_most_probable(length_norm=True) + return best_hyp.ys[self.params.context_size :] # noqa + else: + assert self.params.decoding_method == "fast_beam_search" + return self.hyp diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/decoder.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/decoder.py new file mode 120000 index 000000000..6775ee67e --- /dev/null +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/decoder.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless5/decoder.py \ No newline at end of file diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/encoder_interface.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/encoder_interface.py new file mode 120000 index 000000000..972e44ca4 --- /dev/null +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/encoder_interface.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless5/encoder_interface.py \ No newline at end of file diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/export.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/export.py new file mode 100644 index 000000000..d0a7fd69f --- /dev/null +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/export.py @@ -0,0 +1,209 @@ +# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script converts several saved checkpoints +# to a single one using model averaging. +""" +Usage for offline: +./pruned_transducer_stateless5/export.py \ + --exp-dir ./pruned_transducer_stateless5/exp_L_offline \ + --lang-dir data/lang_char \ + --epoch 4 \ + --avg 1 + +It will generate a file exp_dir/pretrained.pt for offline ASR. + +./pruned_transducer_stateless5/export.py \ + --exp-dir ./pruned_transducer_stateless5/exp_L_offline \ + --lang-dir data/lang_char \ + --epoch 4 \ + --avg 1 \ + --jit True + +It will generate a file exp_dir/cpu_jit.pt for offline ASR. + +Usage for streaming: +./pruned_transducer_stateless5/export.py \ + --exp-dir ./pruned_transducer_stateless5/exp_L_streaming \ + --lang-dir data/lang_char \ + --epoch 7 \ + --avg 1 + +It will generate a file exp_dir/pretrained.pt for streaming ASR. + +./pruned_transducer_stateless5/export.py \ + --exp-dir ./pruned_transducer_stateless5/exp_L_streaming \ + --lang-dir data/lang_char \ + --epoch 7 \ + --avg 1 \ + --jit True + +It will generate a file exp_dir/cpu_jit.pt for streaming ASR. + +To use the generated file with `pruned_transducer_stateless5/decode.py`, +you can do: + + cd /path/to/exp_dir + ln -s pretrained.pt epoch-9999.pt + + cd /path/to/egs/wenetspeech/ASR + ./pruned_transducer_stateless5/decode.py \ + --exp-dir ./pruned_transducer_stateless5/exp \ + --epoch 4 \ + --avg 1 \ + --decoding-method greedy_search \ + --max-duration 100 \ + --lang-dir data/lang_char +""" + +import argparse +import logging +from pathlib import Path + +import torch +from train import add_model_arguments, get_params, get_transducer_model + +from icefall.checkpoint import average_checkpoints, load_checkpoint +from icefall.lexicon import Lexicon +from icefall.utils import str2bool + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=28, + help="It specifies the checkpoint to use for decoding." + "Note: Epoch counts from 0.", + ) + + parser.add_argument( + "--avg", + type=int, + default=15, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch'. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="pruned_transducer_stateless5/exp", + help="""It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--lang-dir", + type=str, + default="data/lang_char", + help="The lang dir", + ) + + parser.add_argument( + "--jit", + type=str2bool, + default=False, + help="""True to save a model after applying torch.jit.script. + """, + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + add_model_arguments(parser) + + return parser + + +def main(): + args = get_parser().parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + lexicon = Lexicon(params.lang_dir) + + params.blank_id = 0 + params.vocab_size = max(lexicon.tokens) + 1 + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + model.to(device) + if params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if start >= 0: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + + model.eval() + + model.to("cpu") + model.eval() + + if params.jit: + # We won't use the forward() method of the model in C++, so just ignore + # it here. + # Otherwise, one of its arguments is a ragged tensor and is not + # torch scriptabe. + model.__class__.forward = torch.jit.ignore(model.__class__.forward) + logging.info("Using torch.jit.script") + model = torch.jit.script(model) + filename = params.exp_dir / "cpu_jit.pt" + model.save(str(filename)) + logging.info(f"Saved to {filename}") + else: + logging.info("Not using torch.jit.script") + # Save it using a format so that it can be loaded + # by :func:`load_checkpoint` + filename = params.exp_dir / "pretrained.pt" + torch.save({"model": model.state_dict()}, str(filename)) + logging.info(f"Saved to {filename}") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/joiner.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/joiner.py new file mode 120000 index 000000000..f5279e151 --- /dev/null +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/joiner.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless5/joiner.py \ No newline at end of file diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/model.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/model.py new file mode 120000 index 000000000..7b417fd89 --- /dev/null +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/model.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless5/model.py \ No newline at end of file diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/optim.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/optim.py new file mode 120000 index 000000000..210374f22 --- /dev/null +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/optim.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless5/optim.py \ No newline at end of file diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/pretrained.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/pretrained.py new file mode 100644 index 000000000..1b064c874 --- /dev/null +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/pretrained.py @@ -0,0 +1,343 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# 2022 Xiaomi Crop. (authors: Mingshuang Luo) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Offline Usage: +(1) greedy search +./pruned_transducer_stateless5/pretrained.py \ + --checkpoint ./pruned_transducer_stateless5/exp_L_offline/pretrained.pt \ + --lang-dir ./data/lang_char \ + --method greedy_search \ + --max-sym-per-frame 1 \ + /path/to/foo.wav \ + /path/to/bar.wav +(2) modified beam search +./pruned_transducer_stateless5/pretrained.py \ + --checkpoint ./pruned_transducer_stateless5/exp_L_offline/pretrained.pt \ + --lang-dir ./data/lang_char \ + --method modified_beam_search \ + --beam-size 4 \ + /path/to/foo.wav \ + /path/to/bar.wav +(3) fast beam search +./pruned_transducer_stateless5/pretrained.py \ + --checkpoint ./pruned_transducer_stateless/exp_L_offline/pretrained.pt \ + --lang-dir ./data/lang_char \ + --method fast_beam_search \ + --beam 4 \ + --max-contexts 4 \ + --max-states 8 \ + /path/to/foo.wav \ + /path/to/bar.wav +You can also use `./pruned_transducer_stateless5/exp_L_offline/epoch-xx.pt`. +Note: ./pruned_transducer_stateless5/exp_L_offline/pretrained.pt is generated by +./pruned_transducer_stateless5/export.py +""" + + +import argparse +import logging +import math +from typing import List + +import k2 +import kaldifeat +import torch +import torchaudio +from beam_search import ( + beam_search, + fast_beam_search_one_best, + greedy_search, + greedy_search_batch, + modified_beam_search, +) +from torch.nn.utils.rnn import pad_sequence +from train import add_model_arguments, get_params, get_transducer_model + +from icefall.lexicon import Lexicon + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--checkpoint", + type=str, + required=True, + help="Path to the checkpoint. " + "The checkpoint is assumed to be saved by " + "icefall.checkpoint.save_checkpoint().", + ) + + parser.add_argument( + "--lang-dir", + type=str, + help="""Path to lang. + """, + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - modified_beam_search + - fast_beam_search + """, + ) + + parser.add_argument( + "sound_files", + type=str, + nargs="+", + help="The input sound file(s) to transcribe. " + "Supported formats are those supported by torchaudio.load(). " + "For example, wav and flac are supported. " + "The sample rate has to be 16kHz.", + ) + + parser.add_argument( + "--sample-rate", + type=int, + default=48000, + help="The sample rate of the input sound file", + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="Used only when --method is beam_search and modified_beam_search ", + ) + + parser.add_argument( + "--beam", + type=float, + default=4, + help="""A floating point value to calculate the cutoff score during beam + search (i.e., `cutoff = max-score - beam`), which is the same as the + `beam` in Kaldi. + Used only when --decoding-method is fast_beam_search""", + ) + + parser.add_argument( + "--max-contexts", + type=int, + default=4, + help="""Used only when --decoding-method is + fast_beam_search""", + ) + + parser.add_argument( + "--max-states", + type=int, + default=8, + help="""Used only when --decoding-method is + fast_beam_search""", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=1, + help="""Maximum number of symbols per frame. Used only when + --method is greedy_search. + """, + ) + add_model_arguments(parser) + + return parser + + +def read_sound_files( + filenames: List[str], expected_sample_rate: float +) -> List[torch.Tensor]: + """Read a list of sound files into a list 1-D float32 torch tensors. + Args: + filenames: + A list of sound filenames. + expected_sample_rate: + The expected sample rate of the sound files. + Returns: + Return a list of 1-D float32 torch tensors. + """ + ans = [] + for f in filenames: + wave, sample_rate = torchaudio.load(f) + assert sample_rate == expected_sample_rate, ( + f"expected sample rate: {expected_sample_rate}. " + f"Given: {sample_rate}" + ) + # We use only the first channel + ans.append(wave[0]) + return ans + + +@torch.no_grad() +def main(): + parser = get_parser() + args = parser.parse_args() + + params = get_params() + + params.update(vars(args)) + + lexicon = Lexicon(params.lang_dir) + params.blank_id = lexicon.token_table[""] + params.vocab_size = max(lexicon.tokens) + 1 + + logging.info(f"{params}") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + logging.info("Creating model") + model = get_transducer_model(params) + + checkpoint = torch.load(args.checkpoint, map_location="cpu") + model.load_state_dict(checkpoint["model"], strict=False) + model.to(device) + model.eval() + model.device = device + + if params.decoding_method == "fast_beam_search": + decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device) + else: + decoding_graph = None + + logging.info("Constructing Fbank computer") + opts = kaldifeat.FbankOptions() + opts.device = device + opts.frame_opts.dither = 0 + opts.frame_opts.snip_edges = False + opts.frame_opts.samp_freq = params.sample_rate + opts.mel_opts.num_bins = params.feature_dim + + fbank = kaldifeat.Fbank(opts) + + logging.info(f"Reading sound files: {params.sound_files}") + waves = read_sound_files( + filenames=params.sound_files, expected_sample_rate=params.sample_rate + ) + waves = [w.to(device) for w in waves] + + logging.info("Decoding started") + features = fbank(waves) + feature_lengths = [f.size(0) for f in features] + + features = pad_sequence( + features, batch_first=True, padding_value=math.log(1e-10) + ) + + feature_lengths = torch.tensor(feature_lengths, device=device) + + with torch.no_grad(): + encoder_out, encoder_out_lens = model.encoder( + x=features, x_lens=feature_lengths + ) + + hyps = [] + msg = f"Using {params.decoding_method}" + logging.info(msg) + + if params.decoding_method == "fast_beam_search": + hyp_tokens = fast_beam_search_one_best( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + ) + for i in range(encoder_out.size(0)): + hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]]) + elif ( + params.decoding_method == "greedy_search" + and params.max_sym_per_frame == 1 + ): + hyp_tokens = greedy_search_batch( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + ) + for i in range(encoder_out.size(0)): + hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]]) + elif params.decoding_method == "modified_beam_search": + hyp_tokens = modified_beam_search( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam_size, + ) + for i in range(encoder_out.size(0)): + hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]]) + else: + batch_size = encoder_out.size(0) + + for i in range(batch_size): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + if params.decoding_method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.decoding_method == "beam_search": + hyp = beam_search( + model=model, + encoder_out=encoder_out_i, + beam=params.beam_size, + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + hyps.append([lexicon.token_table[idx] for idx in hyp]) + + s = "\n" + for filename, hyp in zip(params.sound_files, hyps): + words = " ".join(hyp) + s += f"{filename}:\n{words}\n\n" + logging.info(s) + + logging.info("Decoding Done") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/scaling.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/scaling.py new file mode 120000 index 000000000..ff7bfeda9 --- /dev/null +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/scaling.py @@ -0,0 +1 @@ +../../../librispeech/ASR/pruned_transducer_stateless5/scaling.py \ No newline at end of file diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/streaming_beam_search.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/streaming_beam_search.py new file mode 100644 index 000000000..651aff6c9 --- /dev/null +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/streaming_beam_search.py @@ -0,0 +1,287 @@ +# Copyright 2022 Xiaomi Corp. (authors: Wei Kang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from typing import List + +import k2 +import torch +import torch.nn as nn +from beam_search import Hypothesis, HypothesisList, get_hyps_shape +from decode_stream import DecodeStream + +from icefall.decode import one_best_decoding +from icefall.utils import get_texts + + +def greedy_search( + model: nn.Module, + encoder_out: torch.Tensor, + streams: List[DecodeStream], +) -> None: + """Greedy search in batch mode. It hardcodes --max-sym-per-frame=1. + Args: + model: + The transducer model. + encoder_out: + Output from the encoder. Its shape is (N, T, C), where N >= 1. + streams: + A list of Stream objects. + """ + assert len(streams) == encoder_out.size(0) + assert encoder_out.ndim == 3 + + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + device = model.device + T = encoder_out.size(1) + + decoder_input = torch.tensor( + [stream.hyp[-context_size:] for stream in streams], + device=device, + dtype=torch.int64, + ) + # decoder_out is of shape (N, 1, decoder_out_dim) + decoder_out = model.decoder(decoder_input, need_pad=False) + decoder_out = model.joiner.decoder_proj(decoder_out) + + for t in range(T): + # current_encoder_out's shape: (batch_size, 1, encoder_out_dim) + current_encoder_out = encoder_out[:, t : t + 1, :] # noqa + # print(current_encoder_out.shape) + + logits = model.joiner( + current_encoder_out.unsqueeze(2), + decoder_out.unsqueeze(1), + project_input=False, + ) + # logits'shape (batch_size, vocab_size) + logits = logits.squeeze(1).squeeze(1) + + assert logits.ndim == 2, logits.shape + y = logits.argmax(dim=1).tolist() + emitted = False + for i, v in enumerate(y): + if v != blank_id: + streams[i].hyp.append(v) + emitted = True + if emitted: + # update decoder output + decoder_input = torch.tensor( + [stream.hyp[-context_size:] for stream in streams], + device=device, + dtype=torch.int64, + ) + decoder_out = model.decoder( + decoder_input, + need_pad=False, + ) + decoder_out = model.joiner.decoder_proj(decoder_out) + + +def modified_beam_search( + model: nn.Module, + encoder_out: torch.Tensor, + streams: List[DecodeStream], + num_active_paths: int = 4, +) -> None: + """Beam search in batch mode with --max-sym-per-frame=1 being hardcoded. + Args: + model: + The RNN-T model. + encoder_out: + A 3-D tensor of shape (N, T, encoder_out_dim) containing the output of + the encoder model. + streams: + A list of stream objects. + num_active_paths: + Number of active paths during the beam search. + """ + assert encoder_out.ndim == 3, encoder_out.shape + assert len(streams) == encoder_out.size(0) + + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + device = next(model.parameters()).device + batch_size = len(streams) + T = encoder_out.size(1) + + B = [stream.hyps for stream in streams] + + for t in range(T): + current_encoder_out = encoder_out[:, t].unsqueeze(1).unsqueeze(1) + # current_encoder_out's shape: (batch_size, 1, 1, encoder_out_dim) + + hyps_shape = get_hyps_shape(B).to(device) + + A = [list(b) for b in B] + B = [HypothesisList() for _ in range(batch_size)] + + ys_log_probs = torch.stack( + [hyp.log_prob.reshape(1) for hyps in A for hyp in hyps], dim=0 + ) # (num_hyps, 1) + + decoder_input = torch.tensor( + [hyp.ys[-context_size:] for hyps in A for hyp in hyps], + device=device, + dtype=torch.int64, + ) # (num_hyps, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False) + decoder_out = model.joiner.decoder_proj(decoder_out) + # decoder_out is of shape (num_hyps, 1, 1, decoder_output_dim) + + # Note: For torch 1.7.1 and below, it requires a torch.int64 tensor + # as index, so we use `to(torch.int64)` below. + current_encoder_out = torch.index_select( + current_encoder_out, + dim=0, + index=hyps_shape.row_ids(1).to(torch.int64), + ) # (num_hyps, encoder_out_dim) + + logits = model.joiner( + current_encoder_out, + decoder_out.unsqueeze(1), + project_input=False, + ) + # logits is of shape (num_hyps, 1, 1, vocab_size) + + logits = logits.squeeze(1).squeeze(1) + + log_probs = logits.log_softmax(dim=-1) # (num_hyps, vocab_size) + + log_probs.add_(ys_log_probs) + + vocab_size = log_probs.size(-1) + + log_probs = log_probs.reshape(-1) + + row_splits = hyps_shape.row_splits(1) * vocab_size + log_probs_shape = k2.ragged.create_ragged_shape2( + row_splits=row_splits, cached_tot_size=log_probs.numel() + ) + ragged_log_probs = k2.RaggedTensor( + shape=log_probs_shape, value=log_probs + ) + + for i in range(batch_size): + topk_log_probs, topk_indexes = ragged_log_probs[i].topk( + num_active_paths + ) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + topk_hyp_indexes = (topk_indexes // vocab_size).tolist() + topk_token_indexes = (topk_indexes % vocab_size).tolist() + + for k in range(len(topk_hyp_indexes)): + hyp_idx = topk_hyp_indexes[k] + hyp = A[i][hyp_idx] + + new_ys = hyp.ys[:] + new_token = topk_token_indexes[k] + if new_token != blank_id: + new_ys.append(new_token) + + new_log_prob = topk_log_probs[k] + new_hyp = Hypothesis(ys=new_ys, log_prob=new_log_prob) + B[i].add(new_hyp) + + for i in range(batch_size): + streams[i].hyps = B[i] + + +def fast_beam_search_one_best( + model: nn.Module, + encoder_out: torch.Tensor, + processed_lens: torch.Tensor, + streams: List[DecodeStream], + beam: float, + max_states: int, + max_contexts: int, +) -> None: + """It limits the maximum number of symbols per frame to 1. + A lattice is first generated by Fsa-based beam search, then we get the + recognition by applying shortest path on the lattice. + Args: + model: + An instance of `Transducer`. + encoder_out: + A tensor of shape (N, T, C) from the encoder. + processed_lens: + A tensor of shape (N,) containing the number of processed frames + in `encoder_out` before padding. + streams: + A list of stream objects. + beam: + Beam value, similar to the beam used in Kaldi.. + max_states: + Max states per stream per frame. + max_contexts: + Max contexts pre stream per frame. + """ + assert encoder_out.ndim == 3 + B, T, C = encoder_out.shape + assert B == len(streams) + + context_size = model.decoder.context_size + vocab_size = model.decoder.vocab_size + + config = k2.RnntDecodingConfig( + vocab_size=vocab_size, + decoder_history_len=context_size, + beam=beam, + max_contexts=max_contexts, + max_states=max_states, + ) + individual_streams = [] + for i in range(B): + individual_streams.append(streams[i].rnnt_decoding_stream) + decoding_streams = k2.RnntDecodingStreams(individual_streams, config) + + for t in range(T): + # shape is a RaggedShape of shape (B, context) + # contexts is a Tensor of shape (shape.NumElements(), context_size) + shape, contexts = decoding_streams.get_contexts() + # `nn.Embedding()` in torch below v1.7.1 supports only torch.int64 + contexts = contexts.to(torch.int64) + # decoder_out is of shape (shape.NumElements(), 1, decoder_out_dim) + decoder_out = model.decoder(contexts, need_pad=False) + decoder_out = model.joiner.decoder_proj(decoder_out) + # current_encoder_out is of shape + # (shape.NumElements(), 1, joiner_dim) + # fmt: off + current_encoder_out = torch.index_select( + encoder_out[:, t:t + 1, :], 0, shape.row_ids(1).to(torch.int64) + ) + # fmt: on + logits = model.joiner( + current_encoder_out.unsqueeze(2), + decoder_out.unsqueeze(1), + project_input=False, + ) + logits = logits.squeeze(1).squeeze(1) + log_probs = logits.log_softmax(dim=-1) + decoding_streams.advance(log_probs) + + decoding_streams.terminate_and_flush_to_streams() + + lattice = decoding_streams.format_output(processed_lens.tolist()) + best_path = one_best_decoding(lattice) + hyp_tokens = get_texts(best_path) + + for i in range(B): + streams[i].hyp = hyp_tokens[i] diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/streaming_decode.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/streaming_decode.py new file mode 100644 index 000000000..2a383ca46 --- /dev/null +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/streaming_decode.py @@ -0,0 +1,678 @@ +#!/usr/bin/env python3 +# Copyright 2022 Xiaomi Corporation (Authors: Wei Kang, Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Usage: +(1) greedy search +python pruned_transducer_stateless5/streaming_decode.py \ + --epoch 7 \ + --avg 1 \ + --decode-chunk-size 16 \ + --left-context 64 \ + --right-context 0 \ + --exp-dir ./pruned_transducer_stateless5/exp_L_streaming \ + --decoding-method greedy_search \ + --num-decode-streams 2000 + +(2) modified beam search +python pruned_transducer_stateless5/streaming_decode.py \ + --epoch 7 \ + --avg 1 \ + --decode-chunk-size 16 \ + --left-context 64 \ + --right-context 0 \ + --exp-dir ./pruned_transducer_stateless5/exp_L_streaming \ + --decoding-method modified_beam_search \ + --num-decode-streams 2000 + +(3) fast beam search +python pruned_transducer_stateless5/streaming_decode.py \ + --epoch 7 \ + --avg 1 \ + --decode-chunk-size 16 \ + --left-context 64 \ + --right-context 0 \ + --exp-dir ./pruned_transducer_stateless5/exp_L_streaming \ + --decoding-method fast_beam_search \ + --num-decode-streams 2000 +""" + +import argparse +import logging +import math +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import k2 +import numpy as np +import torch +import torch.nn as nn +from asr_datamodule import WenetSpeechAsrDataModule +from decode_stream import DecodeStream +from kaldifeat import Fbank, FbankOptions +from lhotse import CutSet +from streaming_beam_search import ( + fast_beam_search_one_best, + greedy_search, + modified_beam_search, +) +from torch.nn.utils.rnn import pad_sequence +from train import add_model_arguments, get_params, get_transducer_model + +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from icefall.lexicon import Lexicon +from icefall.utils import ( + AttributeDict, + setup_logger, + store_transcripts, + str2bool, + write_error_stats, +) + +LOG_EPS = math.log(1e-10) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=28, + help="""It specifies the checkpoint to use for decoding. + Note: Epoch counts from 0. + You can specify --avg to use more checkpoints for model averaging.""", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=15, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch' and '--iter'", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=True, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="pruned_transducer_stateless5/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--lang-dir", + type=str, + default="data/lang_char", + help="""The lang dir + It contains language related input files such as + "lexicon.txt" + """, + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="""Supported decoding methods are: + greedy_search + modified_beam_search + fast_beam_search + """, + ) + + parser.add_argument( + "--num-active-paths", + type=int, + default=4, + help="""An interger indicating how many candidates we will keep for each + frame. Used only when --decoding-method is modified_beam_search.""", + ) + + parser.add_argument( + "--beam", + type=float, + default=4, + help="""A floating point value to calculate the cutoff score during beam + search (i.e., `cutoff = max-score - beam`), which is the same as the + `beam` in Kaldi. + Used only when --decoding-method is fast_beam_search""", + ) + + parser.add_argument( + "--max-contexts", + type=int, + default=4, + help="""Used only when --decoding-method is + fast_beam_search""", + ) + + parser.add_argument( + "--max-states", + type=int, + default=32, + help="""Used only when --decoding-method is + fast_beam_search""", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + parser.add_argument( + "--decode-chunk-size", + type=int, + default=16, + help="The chunk size for decoding (in frames after subsampling)", + ) + + parser.add_argument( + "--left-context", + type=int, + default=64, + help="left context can be seen during decoding (in frames after subsampling)", + ) + + parser.add_argument( + "--right-context", + type=int, + default=0, + help="right context can be seen during decoding (in frames after subsampling)", + ) + + parser.add_argument( + "--num-decode-streams", + type=int, + default=2000, + help="The number of streams that can be decoded parallel.", + ) + + add_model_arguments(parser) + + return parser + + +def decode_one_chunk( + params: AttributeDict, + model: nn.Module, + decode_streams: List[DecodeStream], +) -> List[int]: + """Decode one chunk frames of features for each decode_streams and + return the indexes of finished streams in a List. + + Args: + params: + It's the return value of :func:`get_params`. + model: + The neural model. + decode_streams: + A List of DecodeStream, each belonging to a utterance. + Returns: + Return a List containing which DecodeStreams are finished. + """ + device = model.device + + features = [] + feature_lens = [] + states = [] + + processed_lens = [] + + for stream in decode_streams: + feat, feat_len = stream.get_feature_frames( + params.decode_chunk_size * params.subsampling_factor + ) + features.append(feat) + feature_lens.append(feat_len) + states.append(stream.states) + processed_lens.append(stream.done_frames) + + feature_lens = torch.tensor(feature_lens, device=device) + features = pad_sequence(features, batch_first=True, padding_value=LOG_EPS) + + # if T is less than 7 there will be an error in time reduction layer, + # because we subsample features with ((x_len - 1) // 2 - 1) // 2 + # we plus 2 here because we will cut off one frame on each size of + # encoder_embed output as they see invalid paddings. so we need extra 2 + # frames. + tail_length = 7 + (2 + params.right_context) * params.subsampling_factor + if features.size(1) < tail_length: + pad_length = tail_length - features.size(1) + feature_lens += pad_length + features = torch.nn.functional.pad( + features, + (0, 0, 0, pad_length), + mode="constant", + value=LOG_EPS, + ) + + states = [ + torch.stack([x[0] for x in states], dim=2), + torch.stack([x[1] for x in states], dim=2), + ] + + processed_lens = torch.tensor(processed_lens, device=device) + + encoder_out, encoder_out_lens, states = model.encoder.streaming_forward( + x=features, + x_lens=feature_lens, + states=states, + left_context=params.left_context, + right_context=params.right_context, + processed_lens=processed_lens, + ) + + encoder_out = model.joiner.encoder_proj(encoder_out) + + if params.decoding_method == "greedy_search": + greedy_search( + model=model, encoder_out=encoder_out, streams=decode_streams + ) + elif params.decoding_method == "fast_beam_search": + processed_lens = processed_lens + encoder_out_lens + fast_beam_search_one_best( + model=model, + encoder_out=encoder_out, + processed_lens=processed_lens, + streams=decode_streams, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + ) + elif params.decoding_method == "modified_beam_search": + modified_beam_search( + model=model, + streams=decode_streams, + encoder_out=encoder_out, + num_active_paths=params.num_active_paths, + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + + states = [torch.unbind(states[0], dim=2), torch.unbind(states[1], dim=2)] + + finished_streams = [] + for i in range(len(decode_streams)): + decode_streams[i].states = [states[0][i], states[1][i]] + decode_streams[i].done_frames += encoder_out_lens[i] + if decode_streams[i].done: + finished_streams.append(i) + + return finished_streams + + +def decode_dataset( + cuts: CutSet, + params: AttributeDict, + model: nn.Module, + lexicon: Lexicon, + decoding_graph: Optional[k2.Fsa] = None, +) -> Dict[str, List[Tuple[List[str], List[str]]]]: + """Decode dataset. + + Args: + cuts: + Lhotse Cutset containing the dataset to decode. + params: + It is returned by :func:`get_params`. + model: + The neural model. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used + only when --decoding_method is fast_beam_search. + Returns: + Return a dict, whose key may be "greedy_search" if greedy search + is used, or it may be "beam_7" if beam size of 7 is used. + Its value is a list of tuples. Each tuple contains two elements: + The first is the reference transcript, and the second is the + predicted result. + """ + device = model.device + + opts = FbankOptions() + opts.device = device + opts.frame_opts.dither = 0 + opts.frame_opts.snip_edges = False + opts.frame_opts.samp_freq = 16000 + opts.mel_opts.num_bins = 80 + + log_interval = 100 + + decode_results = [] + # Contain decode streams currently running. + decode_streams = [] + initial_states = model.encoder.get_init_state( + params.left_context, device=device + ) + for num, cut in enumerate(cuts): + # each utterance has a DecodeStream. + decode_stream = DecodeStream( + params=params, + initial_states=initial_states, + decoding_graph=decoding_graph, + device=device, + ) + + audio: np.ndarray = cut.load_audio() + # audio.shape: (1, num_samples) + assert len(audio.shape) == 2 + assert audio.shape[0] == 1, "Should be single channel" + assert audio.dtype == np.float32, audio.dtype + + samples = torch.from_numpy(audio).squeeze(0) + + fbank = Fbank(opts) + decode_stream.set_features(fbank(samples.to(device))) + decode_stream.ground_truth = cut.supervisions[0].text + + decode_streams.append(decode_stream) + + while len(decode_streams) >= params.num_decode_streams: + finished_streams = decode_one_chunk( + params=params, model=model, decode_streams=decode_streams + ) + for i in sorted(finished_streams, reverse=True): + hyp = decode_streams[i].decoding_result() + decode_results.append( + ( + list(decode_streams[i].ground_truth), + [lexicon.token_table[idx] for idx in hyp], + ) + ) + del decode_streams[i] + + if num % log_interval == 0: + logging.info(f"Cuts processed until now is {num}.") + + # decode final chunks of last sequences + while len(decode_streams): + finished_streams = decode_one_chunk( + params=params, model=model, decode_streams=decode_streams + ) + for i in sorted(finished_streams, reverse=True): + hyp = decode_streams[i].decoding_result() + decode_results.append( + ( + list(decode_streams[i].ground_truth), + [lexicon.token_table[idx] for idx in hyp], + ) + ) + del decode_streams[i] + + if params.decoding_method == "greedy_search": + key = "greedy_search" + elif params.decoding_method == "fast_beam_search": + key = ( + f"beam_{params.beam}_" + f"max_contexts_{params.max_contexts}_" + f"max_states_{params.max_states}" + ) + elif params.decoding_method == "modified_beam_search": + key = f"num_active_paths_{params.num_active_paths}" + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + + return {key: decode_results} + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[List[str], List[str]]]], +): + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = ( + params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + ) + # sort results so we can easily compare the difference between two + # recognition results + results = sorted(results) + store_transcripts(filename=recog_path, texts=results) + logging.info(f"The transcripts are stored in {recog_path}") + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = ( + params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_filename, "w") as f: + wer = write_error_stats( + f, f"{test_set_name}-{key}", results, enable_log=True + ) + test_set_wers[key] = wer + + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = ( + params.res_dir + / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_info, "w") as f: + print("settings\tWER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, WER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + WenetSpeechAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + params.res_dir = params.exp_dir / "streaming" / params.decoding_method + + if params.iter > 0: + params.suffix = f"iter-{params.iter}-avg-{params.avg}" + else: + params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + + # for streaming + params.suffix += f"-streaming-chunk-size-{params.decode_chunk_size}" + params.suffix += f"-left-context-{params.left_context}" + params.suffix += f"-right-context-{params.right_context}" + + # for fast_beam_search + if params.decoding_method == "fast_beam_search": + params.suffix += f"-beam-{params.beam}" + params.suffix += f"-max-contexts-{params.max_contexts}" + params.suffix += f"-max-states-{params.max_states}" + + setup_logger(f"{params.res_dir}/log-decode-{params.suffix}") + logging.info("Decoding started") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"Device: {device}") + + lexicon = Lexicon(params.lang_dir) + params.blank_id = lexicon.token_table[""] + params.vocab_size = max(lexicon.tokens) + 1 + + params.causal_convolution = True + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + else: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg + 1] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + + model.to(device) + model.eval() + model.device = device + + decoding_graph = None + if params.decoding_method == "fast_beam_search": + decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + wenetspeech = WenetSpeechAsrDataModule(args) + + dev_cuts = wenetspeech.valid_cuts() + test_net_cuts = wenetspeech.test_net_cuts() + test_meeting_cuts = wenetspeech.test_meeting_cuts() + + test_sets = ["DEV", "TEST_NET", "TEST_MEETING"] + test_cuts = [dev_cuts, test_net_cuts, test_meeting_cuts] + + for test_set, test_cut in zip(test_sets, test_cuts): + results_dict = decode_dataset( + cuts=test_cut, + params=params, + model=model, + lexicon=lexicon, + decoding_graph=decoding_graph, + ) + save_results( + params=params, + test_set_name=test_set, + results_dict=results_dict, + ) + + logging.info("Done!") + + +if __name__ == "__main__": + main() diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/train.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/train.py new file mode 100755 index 000000000..7d09acc39 --- /dev/null +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/train.py @@ -0,0 +1,1217 @@ +#!/usr/bin/env python3 +# Copyright 2021-2022 Xiaomi Corp. (authors: Fangjun Kuang, +# Wei Kang, +# Mingshuang Luo, +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage for offline ASR: + +export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7" + +./pruned_transducer_stateless5/train.py \ + --lang-dir data/lang_char \ + --exp-dir pruned_transducer_stateless5/exp_L_offline \ + --world-size 8 \ + --num-epochs 15 \ + --start-epoch 2 \ + --max-duration 120 \ + --valid-interval 3000 \ + --model-warm-step 3000 \ + --save-every-n 8000 \ + --average-period 1000 \ + --training-subset L + +Usage for streaming ASR: + +export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7" + +./pruned_transducer_stateless5/train.py \ + --lang-dir data/lang_char \ + --exp-dir pruned_transducer_stateless5/exp_L_streaming \ + --world-size 8 \ + --num-epochs 15 \ + --start-epoch 1 \ + --max-duration 140 \ + --valid-interval 3000 \ + --model-warm-step 3000 \ + --save-every-n 8000 \ + --average-period 1000 \ + --training-subset L \ + --dynamic-chunk-training True \ + --causal-convolution True \ + --short-chunk-size 25 \ + --num-left-chunks 4 +""" + + +import argparse +import copy +import logging +import os +import warnings +from pathlib import Path +from shutil import copyfile +from typing import Any, Dict, Optional, Tuple, Union + +import k2 +import optim +import torch +import torch.multiprocessing as mp +import torch.nn as nn +from asr_datamodule import WenetSpeechAsrDataModule +from conformer import Conformer +from decoder import Decoder +from joiner import Joiner +from lhotse.cut import Cut +from lhotse.dataset.sampling.base import CutSampler +from lhotse.utils import fix_random_seed +from model import Transducer +from optim import Eden, Eve +from torch import Tensor +from torch.cuda.amp import GradScaler +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.tensorboard import SummaryWriter + +from icefall import diagnostics +from icefall.char_graph_compiler import CharCtcTrainingGraphCompiler +from icefall.checkpoint import load_checkpoint, remove_checkpoints +from icefall.checkpoint import save_checkpoint as save_checkpoint_impl +from icefall.checkpoint import ( + save_checkpoint_with_global_batch_idx, + update_averaged_model, +) +from icefall.dist import cleanup_dist, setup_dist +from icefall.env import get_env_info +from icefall.lexicon import Lexicon +from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool + +LRSchedulerType = Union[ + torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler +] + +os.environ["CUDA_LAUNCH_BLOCKING"] = "1" + + +def add_model_arguments(parser: argparse.ArgumentParser): + parser.add_argument( + "--num-encoder-layers", + type=int, + default=24, + help="Number of conformer encoder layers..", + ) + + parser.add_argument( + "--dim-feedforward", + type=int, + default=1536, + help="Feedforward dimension of the conformer encoder layer.", + ) + + parser.add_argument( + "--nhead", + type=int, + default=8, + help="Number of attention heads in the conformer encoder layer.", + ) + + parser.add_argument( + "--encoder-dim", + type=int, + default=384, + help="Attention dimension in the conformer encoder layer.", + ) + + parser.add_argument( + "--decoder-dim", + type=int, + default=512, + help="Embedding dimension in the decoder model.", + ) + + parser.add_argument( + "--joiner-dim", + type=int, + default=512, + help="""Dimension used in the joiner model. + Outputs from the encoder and decoder model are projected + to this dimension before adding. + """, + ) + + parser.add_argument( + "--dynamic-chunk-training", + type=str2bool, + default=False, + help="""Whether to use dynamic_chunk_training, if you want a streaming + model, this requires to be True. + """, + ) + + parser.add_argument( + "--causal-convolution", + type=str2bool, + default=False, + help="""Whether to use causal convolution, this requires to be True when + using dynamic_chunk_training. + """, + ) + + parser.add_argument( + "--short-chunk-size", + type=int, + default=25, + help="""Chunk length of dynamic training, the chunk size would be either + max sequence length of current batch or uniformly sampled from (1, short_chunk_size). + """, + ) + + parser.add_argument( + "--num-left-chunks", + type=int, + default=4, + help="How many left context can be seen in chunks when calculating attention.", + ) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--world-size", + type=int, + default=1, + help="Number of GPUs for DDP training.", + ) + + parser.add_argument( + "--master-port", + type=int, + default=12354, + help="Master port to use for DDP training.", + ) + + parser.add_argument( + "--tensorboard", + type=str2bool, + default=True, + help="Should various information be logged in tensorboard.", + ) + + parser.add_argument( + "--num-epochs", + type=int, + default=30, + help="Number of epochs to train.", + ) + + parser.add_argument( + "--start-epoch", + type=int, + default=1, + help="""Resume training from this epoch. It should be positive. + If larger than 1, it will load checkpoint from + exp-dir/epoch-{start_epoch-1}.pt + """, + ) + + parser.add_argument( + "--start-batch", + type=int, + default=0, + help="""If positive, --start-epoch is ignored and + it loads the checkpoint from exp-dir/checkpoint-{start_batch}.pt + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="pruned_transducer_stateless5/exp", + help="""The experiment dir. + It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--lang-dir", + type=str, + default="data/lang_char", + help="""The lang dir + It contains language related input files such as + "lexicon.txt" + """, + ) + + parser.add_argument( + "--initial-lr", + type=float, + default=0.003, + help="The initial learning rate. This value should not need " + "to be changed.", + ) + + parser.add_argument( + "--lr-batches", + type=float, + default=5000, + help="""Number of steps that affects how rapidly the learning rate + decreases. We suggest not to change this.""", + ) + + parser.add_argument( + "--lr-epochs", + type=float, + default=6, + help="""Number of epochs that affects how rapidly the learning rate decreases. + """, + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + parser.add_argument( + "--prune-range", + type=int, + default=5, + help="The prune range for rnnt loss, it means how many symbols(context)" + "we are using to compute the loss", + ) + + parser.add_argument( + "--lm-scale", + type=float, + default=0.25, + help="The scale to smooth the loss with lm " + "(output of prediction network) part.", + ) + + parser.add_argument( + "--am-scale", + type=float, + default=0.0, + help="The scale to smooth the loss with am (output of encoder network)" + "part.", + ) + + parser.add_argument( + "--simple-loss-scale", + type=float, + default=0.5, + help="To get pruning ranges, we will calculate a simple version" + "loss(joiner is just addition), this simple loss also uses for" + "training (as a regularization item). We will scale the simple loss" + "with this parameter before adding to the final loss.", + ) + + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + + parser.add_argument( + "--print-diagnostics", + type=str2bool, + default=False, + help="Accumulate stats on activations, print them and exit.", + ) + + parser.add_argument( + "--save-every-n", + type=int, + default=8000, + help="""Save checkpoint after processing this number of batches" + periodically. We save checkpoint to exp-dir/ whenever + params.batch_idx_train % save_every_n == 0. The checkpoint filename + has the form: f'exp-dir/checkpoint-{params.batch_idx_train}.pt' + Note: It also saves checkpoint to `exp-dir/epoch-xxx.pt` at the + end of each epoch where `xxx` is the epoch number counting from 0. + """, + ) + + parser.add_argument( + "--keep-last-k", + type=int, + default=30, + help="""Only keep this number of checkpoints on disk. + For instance, if it is 3, there are only 3 checkpoints + in the exp-dir with filenames `checkpoint-xxx.pt`. + It does not affect checkpoints with name `epoch-xxx.pt`. + """, + ) + + parser.add_argument( + "--average-period", + type=int, + default=200, + help="""Update the averaged model, namely `model_avg`, after processing + this number of batches. `model_avg` is a separate version of model, + in which each floating-point parameter is the average of all the + parameters from the start of training. Each time we take the average, + we do: `model_avg = model * (average_period / batch_idx_train) + + model_avg * ((batch_idx_train - average_period) / batch_idx_train)`. + """, + ) + + parser.add_argument( + "--use-fp16", + type=str2bool, + default=False, + help="Whether to use half precision training.", + ) + + parser.add_argument( + "--valid-interval", + type=int, + default=3000, + help="""When training_subset is L, set the valid_interval to 3000. + When training_subset is M, set the valid_interval to 1000. + When training_subset is S, set the valid_interval to 400. + """, + ) + + parser.add_argument( + "--model-warm-step", + type=int, + default=3000, + help="""When training_subset is L, set the model_warm_step to 3000. + When training_subset is M, set the model_warm_step to 500. + When training_subset is S, set the model_warm_step to 100. + """, + ) + + add_model_arguments(parser) + + return parser + + +def get_params() -> AttributeDict: + """Return a dict containing training parameters. + + All training related parameters that are not passed from the commandline + are saved in the variable `params`. + + Commandline options are merged into `params` after they are parsed, so + you can also access them via `params`. + + Explanation of options saved in `params`: + + - best_train_loss: Best training loss so far. It is used to select + the model that has the lowest training loss. It is + updated during the training. + + - best_valid_loss: Best validation loss so far. It is used to select + the model that has the lowest validation loss. It is + updated during the training. + + - best_train_epoch: It is the epoch that has the best training loss. + + - best_valid_epoch: It is the epoch that has the best validation loss. + + - batch_idx_train: Used to writing statistics to tensorboard. It + contains number of batches trained so far across + epochs. + + - log_interval: Print training loss if batch_idx % log_interval` is 0 + + - reset_interval: Reset statistics if batch_idx % reset_interval is 0 + + - feature_dim: The model input dim. It has to match the one used + in computing features. + + - subsampling_factor: The subsampling factor for the model. + + - encoder_dim: Hidden dim for multi-head attention model. + + - num_decoder_layers: Number of decoder layer of transformer decoder. + + - warm_step: The warm_step for Noam optimizer. + """ + params = AttributeDict( + { + "best_train_loss": float("inf"), + "best_valid_loss": float("inf"), + "best_train_epoch": -1, + "best_valid_epoch": -1, + "batch_idx_train": 0, + "log_interval": 50, + "reset_interval": 200, + # parameters for conformer + "feature_dim": 80, + "subsampling_factor": 4, + "env_info": get_env_info(), + } + ) + + return params + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + # TODO: We can add an option to switch between Conformer and Transformer + encoder = Conformer( + num_features=params.feature_dim, + subsampling_factor=params.subsampling_factor, + d_model=params.encoder_dim, + nhead=params.nhead, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + dynamic_chunk_training=params.dynamic_chunk_training, + short_chunk_size=params.short_chunk_size, + num_left_chunks=params.num_left_chunks, + causal=params.causal_convolution, + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + decoder_dim=params.decoder_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + encoder_dim=params.encoder_dim, + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict) -> nn.Module: + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + encoder_dim=params.encoder_dim, + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return model + + +def load_checkpoint_if_available( + params: AttributeDict, + model: nn.Module, + model_avg: nn.Module = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, +) -> Optional[Dict[str, Any]]: + """Load checkpoint from file. + + If params.start_batch is positive, it will load the checkpoint from + `params.exp_dir/checkpoint-{params.start_batch}.pt`. Otherwise, if + params.start_epoch is larger than 1, it will load the checkpoint from + `params.start_epoch - 1`. + + Apart from loading state dict for `model` and `optimizer` it also updates + `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, + and `best_valid_loss` in `params`. + + Args: + params: + The return value of :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer that we are using. + scheduler: + The scheduler that we are using. + Returns: + Return a dict containing previously saved training info. + """ + if params.start_batch > 0: + filename = params.exp_dir / f"checkpoint-{params.start_batch}.pt" + elif params.start_epoch > 1: + filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" + else: + return None + + assert filename.is_file(), f"{filename} does not exist!" + + saved_params = load_checkpoint( + filename, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + ) + + keys = [ + "best_train_epoch", + "best_valid_epoch", + "batch_idx_train", + "best_train_loss", + "best_valid_loss", + ] + for k in keys: + params[k] = saved_params[k] + + if params.start_batch > 0: + if "cur_epoch" in saved_params: + params["start_epoch"] = saved_params["cur_epoch"] + + if "cur_batch_idx" in saved_params: + params["cur_batch_idx"] = saved_params["cur_batch_idx"] + + return saved_params + + +def save_checkpoint( + params: AttributeDict, + model: Union[nn.Module, DDP], + model_avg: Optional[nn.Module] = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, + sampler: Optional[CutSampler] = None, + scaler: Optional[GradScaler] = None, + rank: int = 0, +) -> None: + """Save model, optimizer, scheduler and training stats to file. + + Args: + params: + It is returned by :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer used in the training. + sampler: + The sampler for the training dataset. + scaler: + The scaler used for mix precision training. + """ + if rank != 0: + return + filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" + save_checkpoint_impl( + filename=filename, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=sampler, + scaler=scaler, + rank=rank, + ) + + if params.best_train_epoch == params.cur_epoch: + best_train_filename = params.exp_dir / "best-train-loss.pt" + copyfile(src=filename, dst=best_train_filename) + + if params.best_valid_epoch == params.cur_epoch: + best_valid_filename = params.exp_dir / "best-valid-loss.pt" + copyfile(src=filename, dst=best_valid_filename) + + +def compute_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + graph_compiler: CharCtcTrainingGraphCompiler, + batch: dict, + is_training: bool, + warmup: float = 1.0, +) -> Tuple[Tensor, MetricsTracker]: + """ + Compute RNN-T loss given the model and its inputs. + + Args: + params: + Parameters for training. See :func:`get_params`. + model: + The model for training. It is an instance of Conformer in our case. + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + is_training: + True for training. False for validation. When it is True, this + function enables autograd during computation; when it is False, it + disables autograd. + warmup: a floating point value which increases throughout training; + values >= 1.0 are fully warmed up and have all modules present. + """ + device = ( + model.device + if isinstance(model, DDP) + else next(model.parameters()).device + ) + feature = batch["inputs"] + # at entry, feature is (N, T, C) + assert feature.ndim == 3 + feature = feature.to(device) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + texts = batch["supervisions"]["text"] + + y = graph_compiler.texts_to_ids(texts) + if type(y) == list: + y = k2.RaggedTensor(y).to(device) + else: + y = y.to(device) + + with torch.set_grad_enabled(is_training): + simple_loss, pruned_loss = model( + x=feature, + x_lens=feature_lens, + y=y, + prune_range=params.prune_range, + am_scale=params.am_scale, + lm_scale=params.lm_scale, + warmup=warmup, + ) + # after the main warmup step, we keep pruned_loss_scale small + # for the same amount of time (model_warm_step), to avoid + # overwhelming the simple_loss and causing it to diverge, + # in case it had not fully learned the alignment yet. + pruned_loss_scale = ( + 0.0 + if warmup < 1.0 + else (0.1 if warmup > 1.0 and warmup < 2.0 else 1.0) + ) + loss = ( + params.simple_loss_scale * simple_loss + + pruned_loss_scale * pruned_loss + ) + + assert loss.requires_grad == is_training + + info = MetricsTracker() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + info["frames"] = ( + (feature_lens // params.subsampling_factor).sum().item() + ) + + # Note: We use reduction=sum while computing the loss. + info["loss"] = loss.detach().cpu().item() + info["simple_loss"] = simple_loss.detach().cpu().item() + info["pruned_loss"] = pruned_loss.detach().cpu().item() + + return loss, info + + +def compute_validation_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + graph_compiler: CharCtcTrainingGraphCompiler, + valid_dl: torch.utils.data.DataLoader, + world_size: int = 1, +) -> MetricsTracker: + """Run the validation process.""" + model.eval() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(valid_dl): + loss, loss_info = compute_loss( + params=params, + model=model, + graph_compiler=graph_compiler, + batch=batch, + is_training=False, + ) + assert loss.requires_grad is False + tot_loss = tot_loss + loss_info + + if world_size > 1: + tot_loss.reduce(loss.device) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + if loss_value < params.best_valid_loss: + params.best_valid_epoch = params.cur_epoch + params.best_valid_loss = loss_value + + return tot_loss + + +def train_one_epoch( + params: AttributeDict, + model: Union[nn.Module, DDP], + optimizer: torch.optim.Optimizer, + scheduler: LRSchedulerType, + graph_compiler: CharCtcTrainingGraphCompiler, + train_dl: torch.utils.data.DataLoader, + valid_dl: torch.utils.data.DataLoader, + scaler: GradScaler, + model_avg: Optional[nn.Module] = None, + tb_writer: Optional[SummaryWriter] = None, + world_size: int = 1, + rank: int = 0, +) -> None: + """Train the model for one epoch. + + The training loss from the mean of all frames is saved in + `params.train_loss`. It runs the validation process every + `params.valid_interval` batches. + + Args: + params: + It is returned by :func:`get_params`. + model: + The model for training. + optimizer: + The optimizer we are using. + scheduler: + The learning rate scheduler, we call step() every step. + train_dl: + Dataloader for the training dataset. + valid_dl: + Dataloader for the validation dataset. + scaler: + The scaler used for mix precision training. + model_avg: + The stored model averaged from the start of training. + tb_writer: + Writer to write log messages to tensorboard. + world_size: + Number of nodes in DDP training. If it is 1, DDP is disabled. + rank: + The rank of the node in DDP training. If no DDP is used, it should + be set to 0. + """ + model.train() + + tot_loss = MetricsTracker() + + cur_batch_idx = params.get("cur_batch_idx", 0) + + for batch_idx, batch in enumerate(train_dl): + if batch_idx < cur_batch_idx: + continue + cur_batch_idx = batch_idx + + params.batch_idx_train += 1 + batch_size = len(batch["supervisions"]["text"]) + + try: + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, loss_info = compute_loss( + params=params, + model=model, + graph_compiler=graph_compiler, + batch=batch, + is_training=True, + warmup=(params.batch_idx_train / params.model_warm_step), + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + scaler.scale(loss).backward() + scheduler.step_batch(params.batch_idx_train) + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad() + except: # noqa + display_and_save_batch( + batch, params=params, graph_compiler=graph_compiler + ) + raise + + if params.print_diagnostics and batch_idx == 5: + return + + if ( + rank == 0 + and params.batch_idx_train > 0 + and params.batch_idx_train % params.average_period == 0 + ): + update_averaged_model( + params=params, + model_cur=model, + model_avg=model_avg, + ) + + if ( + params.batch_idx_train > 0 + and params.batch_idx_train % params.save_every_n == 0 + ): + params.cur_batch_idx = batch_idx + save_checkpoint_with_global_batch_idx( + out_dir=params.exp_dir, + global_batch_idx=params.batch_idx_train, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + del params.cur_batch_idx + remove_checkpoints( + out_dir=params.exp_dir, + topk=params.keep_last_k, + rank=rank, + ) + + if batch_idx % params.log_interval == 0: + cur_lr = scheduler.get_last_lr()[0] + logging.info( + f"Epoch {params.cur_epoch}, " + f"batch {batch_idx}, loss[{loss_info}], " + f"tot_loss[{tot_loss}], batch size: {batch_size}, " + f"lr: {cur_lr:.2e}" + ) + + if tb_writer is not None: + tb_writer.add_scalar( + "train/learning_rate", cur_lr, params.batch_idx_train + ) + + loss_info.write_summary( + tb_writer, "train/current_", params.batch_idx_train + ) + tot_loss.write_summary( + tb_writer, "train/tot_", params.batch_idx_train + ) + + if batch_idx > 0 and batch_idx % params.valid_interval == 0: + logging.info("Computing validation loss") + valid_info = compute_validation_loss( + params=params, + model=model, + graph_compiler=graph_compiler, + valid_dl=valid_dl, + world_size=world_size, + ) + model.train() + logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") + if tb_writer is not None: + valid_info.write_summary( + tb_writer, "train/valid_", params.batch_idx_train + ) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + params.train_loss = loss_value + if params.train_loss < params.best_train_loss: + params.best_train_epoch = params.cur_epoch + params.best_train_loss = params.train_loss + + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + + fix_random_seed(params.seed) + if world_size > 1: + setup_dist(rank, world_size, params.master_port) + + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info("Training started") + + if args.tensorboard and rank == 0: + tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") + else: + tb_writer = None + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", rank) + logging.info(f"Device: {device}") + + lexicon = Lexicon(params.lang_dir) + graph_compiler = CharCtcTrainingGraphCompiler( + lexicon=lexicon, + device=device, + ) + + params.blank_id = lexicon.token_table[""] + params.vocab_size = max(lexicon.tokens) + 1 + + if params.dynamic_chunk_training: + assert ( + params.causal_convolution + ), "dynamic_chunk_training requires causal convolution" + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + assert params.save_every_n >= params.average_period + model_avg: Optional[nn.Module] = None + if rank == 0: + # model_avg is only used with rank 0 + model_avg = copy.deepcopy(model) + + assert params.start_epoch > 0, params.start_epoch + checkpoints = load_checkpoint_if_available( + params=params, model=model, model_avg=model_avg + ) + + model.to(device) + if world_size > 1: + logging.info("Using DDP") + model = DDP(model, device_ids=[rank]) + + optimizer = Eve(model.parameters(), lr=params.initial_lr) + + scheduler = Eden(optimizer, params.lr_batches, params.lr_epochs) + + if checkpoints and "optimizer" in checkpoints: + logging.info("Loading optimizer state dict") + optimizer.load_state_dict(checkpoints["optimizer"]) + + if ( + checkpoints + and "scheduler" in checkpoints + and checkpoints["scheduler"] is not None + ): + logging.info("Loading scheduler state dict") + scheduler.load_state_dict(checkpoints["scheduler"]) + + if params.print_diagnostics: + opts = diagnostics.TensorDiagnosticOptions( + 2 ** 22 + ) # allow 4 megabytes per sub-module + diagnostic = diagnostics.attach_diagnostics(model, opts) + + wenetspeech = WenetSpeechAsrDataModule(args) + + train_cuts = wenetspeech.train_cuts() + valid_cuts = wenetspeech.valid_cuts() + + def remove_short_and_long_utt(c: Cut): + # Keep only utterances with duration between 1 second and 15.0 seconds + # + # Caution: There is a reason to select 15.0 here. Please see + # ../local/display_manifest_statistics.py + # + # You should use ../local/display_manifest_statistics.py to get + # an utterance duration distribution for your dataset to select + # the threshold + return 1.0 <= c.duration <= 15.0 + + train_cuts = train_cuts.filter(remove_short_and_long_utt) + + valid_dl = wenetspeech.valid_dataloaders(valid_cuts) + + if params.start_batch > 0 and checkpoints and "sampler" in checkpoints: + # We only load the sampler's state dict when it loads a checkpoint + # saved in the middle of an epoch + sampler_state_dict = checkpoints["sampler"] + else: + sampler_state_dict = None + + train_dl = wenetspeech.train_dataloaders( + train_cuts, sampler_state_dict=sampler_state_dict + ) + + if not params.print_diagnostics and params.start_batch == 0: + scan_pessimistic_batches_for_oom( + model=model, + train_dl=train_dl, + optimizer=optimizer, + graph_compiler=graph_compiler, + params=params, + warmup=0.0 if params.start_epoch == 1 else 1.0, + ) + + scaler = GradScaler(enabled=params.use_fp16) + if checkpoints and "grad_scaler" in checkpoints: + logging.info("Loading grad scaler state dict") + scaler.load_state_dict(checkpoints["grad_scaler"]) + + for epoch in range(params.start_epoch, params.num_epochs + 1): + scheduler.step_epoch(epoch - 1) + fix_random_seed(params.seed + epoch - 1) + train_dl.sampler.set_epoch(epoch - 1) + + if tb_writer is not None: + tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) + + params.cur_epoch = epoch + + train_one_epoch( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + graph_compiler=graph_compiler, + train_dl=train_dl, + valid_dl=valid_dl, + scaler=scaler, + tb_writer=tb_writer, + world_size=world_size, + rank=rank, + ) + + if params.print_diagnostics: + diagnostic.print_diagnostics() + break + + save_checkpoint( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + + logging.info("Done!") + + if world_size > 1: + torch.distributed.barrier() + cleanup_dist() + + +def display_and_save_batch( + batch: dict, + params: AttributeDict, + graph_compiler: CharCtcTrainingGraphCompiler, +) -> None: + """Display the batch statistics and save the batch into disk. + + Args: + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + params: + Parameters for training. See :func:`get_params`. + """ + from lhotse.utils import uuid4 + + filename = f"{params.exp_dir}/batch-{uuid4()}.pt" + logging.info(f"Saving batch to {filename}") + torch.save(batch, filename) + + texts = batch["supervisions"]["text"] + features = batch["inputs"] + + logging.info(f"features shape: {features.shape}") + + y = graph_compiler.texts_to_ids(texts) + if type(y) == list: + y = k2.RaggedTensor(y) + else: + y = y + + num_tokens = sum(len(i) for i in y) + logging.info(f"num tokens: {num_tokens}") + + +def scan_pessimistic_batches_for_oom( + model: Union[nn.Module, DDP], + train_dl: torch.utils.data.DataLoader, + optimizer: torch.optim.Optimizer, + graph_compiler: CharCtcTrainingGraphCompiler, + params: AttributeDict, + warmup: float, +): + from lhotse.dataset import find_pessimistic_batches + + logging.info( + "Sanity check -- see if any of the batches in epoch 1 would cause OOM." + ) + batches, crit_values = find_pessimistic_batches(train_dl.sampler) + for criterion, cuts in batches.items(): + batch = train_dl.dataset[cuts] + try: + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, _ = compute_loss( + params=params, + model=model, + graph_compiler=graph_compiler, + batch=batch, + is_training=True, + warmup=warmup, + ) + loss.backward() + optimizer.step() + optimizer.zero_grad() + except Exception as e: + if "CUDA out of memory" in str(e): + logging.error( + "Your GPU ran out of memory with the current " + "max_duration setting. We recommend decreasing " + "max_duration and trying again.\n" + f"Failing criterion: {criterion} " + f"(={crit_values[criterion]}) ..." + ) + display_and_save_batch( + batch, params=params, graph_compiler=graph_compiler + ) + raise + + +def main(): + parser = get_parser() + WenetSpeechAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.lang_dir = Path(args.lang_dir) + args.exp_dir = Path(args.exp_dir) + + world_size = args.world_size + assert world_size >= 1 + if world_size > 1: + mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) + else: + run(rank=0, world_size=1, args=args) + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + +if __name__ == "__main__": + main() From 3c9e7f733b92c844d4dcac18f137f962ae8737be Mon Sep 17 00:00:00 2001 From: boji123 Date: Thu, 28 Jul 2022 16:17:49 +0800 Subject: [PATCH 17/38] [debug] raise remind when git-lfs not available (#504) * [debug] raise remind when git-lfs not available * modify comment --- egs/aishell/ASR/prepare.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/egs/aishell/ASR/prepare.sh b/egs/aishell/ASR/prepare.sh index da0a1470e..f86dd8de3 100755 --- a/egs/aishell/ASR/prepare.sh +++ b/egs/aishell/ASR/prepare.sh @@ -48,6 +48,8 @@ if [ $stage -le -1 ] && [ $stop_stage -ge -1 ]; then log "stage -1: Download LM" # We assume that you have installed the git-lfs, if not, you could install it # using: `sudo apt-get install git-lfs && git-lfs install` + git lfs 1>/dev/null 2>&1 || (echo "please install git-lfs, consider using: sudo apt-get install git-lfs && git-lfs install" && exit 1) + if [ ! -f $dl_dir/lm/3-gram.unpruned.arpa ]; then git clone https://huggingface.co/pkufool/aishell_lm $dl_dir/lm fi From 389f9c77e549f7acfcaa61405d31efcb5a358282 Mon Sep 17 00:00:00 2001 From: Mingshuang Luo <37799481+luomingshuang@users.noreply.github.com> Date: Thu, 28 Jul 2022 17:01:46 +0800 Subject: [PATCH 18/38] correction for prepare.sh (#506) --- egs/wenetspeech/ASR/prepare.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/egs/wenetspeech/ASR/prepare.sh b/egs/wenetspeech/ASR/prepare.sh index 6ce4734a7..9449e5d1e 100755 --- a/egs/wenetspeech/ASR/prepare.sh +++ b/egs/wenetspeech/ASR/prepare.sh @@ -99,7 +99,7 @@ fi if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then log "Stage 5: Split S subset into ${num_splits} pieces" - split_dir=data/fbank/S_split_${num_splits}_test + split_dir=data/fbank/S_split_${num_splits} if [ ! -f $split_dir/.split_completed ]; then lhotse split $num_splits ./data/fbank/cuts_S_raw.jsonl.gz $split_dir touch $split_dir/.split_completed From ec699675845455c855fa16b5790f1ff1690c035e Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Fri, 29 Jul 2022 11:17:19 +0800 Subject: [PATCH 19/38] Set overwrite=True when extracting features in batches. (#487) --- .../ASR/local/compute_fbank_gigaspeech_dev_test.py | 7 ++----- .../ASR/local/compute_fbank_gigaspeech_splits.py | 7 ++----- .../ASR/local/compute_fbank_gigaspeech_dev_test.py | 1 + .../ASR/local/compute_fbank_gigaspeech_splits.py | 1 + egs/spgispeech/ASR/local/compute_fbank_musan.py | 1 + egs/spgispeech/ASR/local/compute_fbank_spgispeech.py | 2 ++ .../ASR/local/compute_fbank_wenetspeech_dev_test.py | 1 + .../ASR/local/compute_fbank_wenetspeech_splits.py | 1 + 8 files changed, 11 insertions(+), 10 deletions(-) diff --git a/egs/gigaspeech/ASR/local/compute_fbank_gigaspeech_dev_test.py b/egs/gigaspeech/ASR/local/compute_fbank_gigaspeech_dev_test.py index 9f1039893..8209ee3ec 100755 --- a/egs/gigaspeech/ASR/local/compute_fbank_gigaspeech_dev_test.py +++ b/egs/gigaspeech/ASR/local/compute_fbank_gigaspeech_dev_test.py @@ -20,11 +20,7 @@ import logging from pathlib import Path import torch -from lhotse import ( - CutSet, - KaldifeatFbank, - KaldifeatFbankConfig, -) +from lhotse import CutSet, KaldifeatFbank, KaldifeatFbankConfig # Torch's multithreaded behavior needs to be disabled or # it wastes a lot of CPU and slow things down. @@ -69,6 +65,7 @@ def compute_fbank_gigaspeech_dev_test(): storage_path=f"{in_out_dir}/feats_{partition}", num_workers=num_workers, batch_duration=batch_duration, + overwrite=True, ) cut_set = cut_set.trim_to_supervisions( keep_overlapping=False, min_duration=None diff --git a/egs/gigaspeech/ASR/local/compute_fbank_gigaspeech_splits.py b/egs/gigaspeech/ASR/local/compute_fbank_gigaspeech_splits.py index 9dd3c046d..6410249db 100755 --- a/egs/gigaspeech/ASR/local/compute_fbank_gigaspeech_splits.py +++ b/egs/gigaspeech/ASR/local/compute_fbank_gigaspeech_splits.py @@ -22,11 +22,7 @@ from datetime import datetime from pathlib import Path import torch -from lhotse import ( - CutSet, - KaldifeatFbank, - KaldifeatFbankConfig, -) +from lhotse import CutSet, KaldifeatFbank, KaldifeatFbankConfig # Torch's multithreaded behavior needs to be disabled or # it wastes a lot of CPU and slow things down. @@ -120,6 +116,7 @@ def compute_fbank_gigaspeech_splits(args): storage_path=f"{output_dir}/feats_XL_{idx}", num_workers=args.num_workers, batch_duration=args.batch_duration, + overwrite=True, ) logging.info("About to split cuts into smaller chunks.") diff --git a/egs/librispeech/ASR/local/compute_fbank_gigaspeech_dev_test.py b/egs/librispeech/ASR/local/compute_fbank_gigaspeech_dev_test.py index 68d93d2c5..c0c7ef8c5 100644 --- a/egs/librispeech/ASR/local/compute_fbank_gigaspeech_dev_test.py +++ b/egs/librispeech/ASR/local/compute_fbank_gigaspeech_dev_test.py @@ -68,6 +68,7 @@ def compute_fbank_gigaspeech_dev_test(): storage_path=f"{in_out_dir}/{prefix}_feats_{partition}", num_workers=num_workers, batch_duration=batch_duration, + overwrite=True, ) cut_set = cut_set.trim_to_supervisions( keep_overlapping=False, min_duration=None diff --git a/egs/librispeech/ASR/local/compute_fbank_gigaspeech_splits.py b/egs/librispeech/ASR/local/compute_fbank_gigaspeech_splits.py index f826f064e..5587106e5 100644 --- a/egs/librispeech/ASR/local/compute_fbank_gigaspeech_splits.py +++ b/egs/librispeech/ASR/local/compute_fbank_gigaspeech_splits.py @@ -126,6 +126,7 @@ def compute_fbank_gigaspeech_splits(args): storage_path=f"{output_dir}/{prefix}_feats_XL_{idx}", num_workers=args.num_workers, batch_duration=args.batch_duration, + overwrite=True, ) logging.info("About to split cuts into smaller chunks.") diff --git a/egs/spgispeech/ASR/local/compute_fbank_musan.py b/egs/spgispeech/ASR/local/compute_fbank_musan.py index b88286c41..70372af2b 100755 --- a/egs/spgispeech/ASR/local/compute_fbank_musan.py +++ b/egs/spgispeech/ASR/local/compute_fbank_musan.py @@ -92,6 +92,7 @@ def compute_fbank_musan(): batch_duration=500, num_workers=4, storage_type=LilcomChunkyWriter, + overwrite=True, ) ) diff --git a/egs/spgispeech/ASR/local/compute_fbank_spgispeech.py b/egs/spgispeech/ASR/local/compute_fbank_spgispeech.py index b67754e2a..8116e7605 100755 --- a/egs/spgispeech/ASR/local/compute_fbank_spgispeech.py +++ b/egs/spgispeech/ASR/local/compute_fbank_spgispeech.py @@ -119,6 +119,7 @@ def compute_fbank_spgispeech(args): batch_duration=500, num_workers=4, storage_type=LilcomChunkyWriter, + overwrite=True, ) cs.to_file(cuts_train_idx_path) @@ -138,6 +139,7 @@ def compute_fbank_spgispeech(args): batch_duration=500, num_workers=4, storage_type=LilcomChunkyWriter, + overwrite=True, ) diff --git a/egs/wenetspeech/ASR/local/compute_fbank_wenetspeech_dev_test.py b/egs/wenetspeech/ASR/local/compute_fbank_wenetspeech_dev_test.py index c10a84d21..8a9f6ed30 100755 --- a/egs/wenetspeech/ASR/local/compute_fbank_wenetspeech_dev_test.py +++ b/egs/wenetspeech/ASR/local/compute_fbank_wenetspeech_dev_test.py @@ -75,6 +75,7 @@ def compute_fbank_wenetspeech_dev_test(): num_workers=num_workers, batch_duration=batch_duration, storage_type=LilcomHdf5Writer, + overwrite=True, ) logging.info(f"Saving to {cuts_path}") diff --git a/egs/wenetspeech/ASR/local/compute_fbank_wenetspeech_splits.py b/egs/wenetspeech/ASR/local/compute_fbank_wenetspeech_splits.py index bf9a03b20..a882b6113 100755 --- a/egs/wenetspeech/ASR/local/compute_fbank_wenetspeech_splits.py +++ b/egs/wenetspeech/ASR/local/compute_fbank_wenetspeech_splits.py @@ -140,6 +140,7 @@ def compute_fbank_wenetspeech_splits(args): num_workers=args.num_workers, batch_duration=args.batch_duration, storage_type=LilcomChunkyWriter, + overwrite=True, ) logging.info(f"Saving to {cuts_path}") From 34b4356bad189b0a9403af423b3917e0b4fd7502 Mon Sep 17 00:00:00 2001 From: Lucky Wong Date: Fri, 29 Jul 2022 11:28:52 +0800 Subject: [PATCH 20/38] correction for get rank id. (#507) * Fix no attribute 'data' error. * minor fixes * correction for get rank id. --- icefall/dist.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/icefall/dist.py b/icefall/dist.py index 6334f9c13..7016beafb 100644 --- a/icefall/dist.py +++ b/icefall/dist.py @@ -57,9 +57,9 @@ def get_rank(): if "RANK" in os.environ: return int(os.environ["RANK"]) elif dist.is_available() and dist.is_initialized(): - return dist.rank() + return dist.get_rank() else: - return 1 + return 0 def get_local_rank(): From 1b478d3ac34f037b8d6fbafce4fe0da36ac1df64 Mon Sep 17 00:00:00 2001 From: Mingshuang Luo <37799481+luomingshuang@users.noreply.github.com> Date: Fri, 29 Jul 2022 12:03:08 +0800 Subject: [PATCH 21/38] Add other decoding methods (nbest, nbest oracle, nbest LG) for wenetspeech pruned rnnt2 (#482) * add other decoding methods for wenetspeech * changes for RESULTS.md * add ngram-lm-scale=0.35 results * set ngram-lm-scale=0.35 as default * Update README.md * add nbest-scale for flie name --- README.md | 2 +- egs/wenetspeech/ASR/RESULTS.md | 48 +++++- egs/wenetspeech/ASR/local/compile_lg.py | 1 + egs/wenetspeech/ASR/prepare.sh | 31 ++++ .../pruned_transducer_stateless2/decode.py | 156 +++++++++++++++++- 5 files changed, 232 insertions(+), 6 deletions(-) create mode 120000 egs/wenetspeech/ASR/local/compile_lg.py diff --git a/README.md b/README.md index fcba0723b..7213d8460 100644 --- a/README.md +++ b/README.md @@ -257,8 +257,8 @@ We provide some models for this recipe: [Pruned stateless RNN-T_2: Conformer enc | | Dev | Test-Net | Test-Meeting | |----------------------|-------|----------|--------------| | greedy search | 7.80 | 8.75 | 13.49 | +| modified beam search| 7.76 | 8.71 | 13.41 | | fast beam search | 7.94 | 8.74 | 13.80 | -| modified beam search | 7.76 | 8.71 | 13.41 | #### Pruned stateless RNN-T_5: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss (trained with L subset) **Streaming**: diff --git a/egs/wenetspeech/ASR/RESULTS.md b/egs/wenetspeech/ASR/RESULTS.md index cc36ae4f2..658ad4a9b 100644 --- a/egs/wenetspeech/ASR/RESULTS.md +++ b/egs/wenetspeech/ASR/RESULTS.md @@ -84,7 +84,10 @@ When training with the L subset, the CERs are |------------------------------------|-------|----------|--------------|------------------------------------------| | greedy search | 7.80 | 8.75 | 13.49 | --epoch 10, --avg 2, --max-duration 100 | | modified beam search (beam size 4) | 7.76 | 8.71 | 13.41 | --epoch 10, --avg 2, --max-duration 100 | -| fast beam search (set as default) | 7.94 | 8.74 | 13.80 | --epoch 10, --avg 2, --max-duration 1500 | +| fast beam search (1best) | 7.94 | 8.74 | 13.80 | --epoch 10, --avg 2, --max-duration 1500 | +| fast beam search (nbest) | 9.82 | 10.98 | 16.37 | --epoch 10, --avg 2, --max-duration 600 | +| fast beam search (nbest oracle) | 6.88 | 7.18 | 11.77 | --epoch 10, --avg 2, --max-duration 600 | +| fast beam search (nbest LG, ngram_lm_scale=0.35) | 8.83 | 9.88 | 15.47 | --epoch 10, --avg 2, --max-duration 600 | The training command for reproducing is given below: @@ -131,7 +134,7 @@ avg=2 --decoding-method modified_beam_search \ --beam-size 4 -## fast beam search +## fast beam search (1best) ./pruned_transducer_stateless2/decode.py \ --epoch $epoch \ --avg $avg \ @@ -142,6 +145,47 @@ avg=2 --beam 4 \ --max-contexts 4 \ --max-states 8 + +## fast beam search (nbest) +./pruned_transducer_stateless2/decode.py \ + --epoch 10 \ + --avg 2 \ + --exp-dir ./pruned_transducer_stateless2/exp \ + --lang-dir data/lang_char \ + --max-duration 600 \ + --decoding-method fast_beam_search_nbest \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 \ + --num-paths 200 \ + --nbest-scale 0.5 + +## fast beam search (nbest oracle WER) +./pruned_transducer_stateless2/decode.py \ + --epoch 10 \ + --avg 2 \ + --exp-dir ./pruned_transducer_stateless2/exp \ + --lang-dir data/lang_char \ + --max-duration 600 \ + --decoding-method fast_beam_search_nbest_oracle \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 \ + --num-paths 200 \ + --nbest-scale 0.5 + +## fast beam search (with LG) +./pruned_transducer_stateless2/decode.py \ + --epoch 10 \ + --avg 2 \ + --exp-dir ./pruned_transducer_stateless2/exp \ + --lang-dir data/lang_char \ + --max-duration 600 \ + --decoding-method fast_beam_search_nbest_LG \ + --ngram-lm-scale 0.35 \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 ``` When training with the M subset, the CERs are diff --git a/egs/wenetspeech/ASR/local/compile_lg.py b/egs/wenetspeech/ASR/local/compile_lg.py new file mode 120000 index 000000000..462d6d3fb --- /dev/null +++ b/egs/wenetspeech/ASR/local/compile_lg.py @@ -0,0 +1 @@ +../../../librispeech/ASR/local/compile_lg.py \ No newline at end of file diff --git a/egs/wenetspeech/ASR/prepare.sh b/egs/wenetspeech/ASR/prepare.sh index 9449e5d1e..6573a94ad 100755 --- a/egs/wenetspeech/ASR/prepare.sh +++ b/egs/wenetspeech/ASR/prepare.sh @@ -225,3 +225,34 @@ if [ $stage -le 16 ] && [ $stop_stage -ge 16 ]; then --lang-dir data/lang_char fi fi + +# If you don't want to use LG for decoding, the following steps are not necessary. +if [ $stage -le 17 ] && [ $stop_stage -ge 17 ]; then + log "Stage 17: Prepare G" + # It will take about 20 minutes. + # We assume you have install kaldilm, if not, please install + # it using: pip install kaldilm + lang_char_dir=data/lang_char + if [ ! -f $lang_char_dir/3-gram.unpruned.arpa ]; then + python ./shared/make_kn_lm.py \ + -ngram-order 3 \ + -text $lang_char_dir/text_words_segmentation \ + -lm $lang_char_dir/3-gram.unpruned.arpa + fi + + mkdir -p data/lm + if [ ! -f data/lm/G_3_gram.fst.txt ]; then + # It is used in building LG + python3 -m kaldilm \ + --read-symbol-table="$lang_char_dir/words.txt" \ + --disambig-symbol='#0' \ + --max-order=3 \ + $lang_char_dir/3-gram.unpruned.arpa > data/lm/G_3_gram.fst.txt + fi +fi + +if [ $stage -le 18 ] && [ $stop_stage -ge 18 ]; then + log "Stage 18: Compile LG" + lang_char_dir=data/lang_char + python ./local/compile_lg.py --lang-dir $lang_char_dir +fi diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless2/decode.py b/egs/wenetspeech/ASR/pruned_transducer_stateless2/decode.py index 41e7a0f44..7c06cdb3d 100755 --- a/egs/wenetspeech/ASR/pruned_transducer_stateless2/decode.py +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless2/decode.py @@ -37,7 +37,7 @@ When training with the L subset, usage: --decoding-method modified_beam_search \ --beam-size 4 -(3) fast beam search +(3) fast beam search (1best) ./pruned_transducer_stateless2/decode.py \ --epoch 10 \ --avg 2 \ @@ -48,6 +48,46 @@ When training with the L subset, usage: --beam 4 \ --max-contexts 4 \ --max-states 8 + +(4) fast beam search (nbest) +./pruned_transducer_stateless2/decode.py \ + --epoch 10 \ + --avg 2 \ + --exp-dir ./pruned_transducer_stateless2/exp \ + --lang-dir data/lang_char \ + --max-duration 600 \ + --decoding-method fast_beam_search_nbest \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 \ + --num-paths 200 \ + --nbest-scale 0.5 + +(5) fast beam search (nbest oracle WER) +./pruned_transducer_stateless2/decode.py \ + --epoch 10 \ + --avg 2 \ + --exp-dir ./pruned_transducer_stateless2/exp \ + --lang-dir data/lang_char \ + --max-duration 600 \ + --decoding-method fast_beam_search_nbest_oracle \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 \ + --num-paths 200 \ + --nbest-scale 0.5 + +(6) fast beam search (with LG) +./pruned_transducer_stateless2/decode.py \ + --epoch 10 \ + --avg 2 \ + --exp-dir ./pruned_transducer_stateless2/exp \ + --lang-dir data/lang_char \ + --max-duration 600 \ + --decoding-method fast_beam_search_nbest_LG \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 """ @@ -63,6 +103,9 @@ import torch.nn as nn from asr_datamodule import WenetSpeechAsrDataModule from beam_search import ( beam_search, + fast_beam_search_nbest, + fast_beam_search_nbest_LG, + fast_beam_search_nbest_oracle, fast_beam_search_one_best, greedy_search, greedy_search_batch, @@ -70,6 +113,7 @@ from beam_search import ( ) from train import get_params, get_transducer_model +from icefall.char_graph_compiler import CharCtcTrainingGraphCompiler from icefall.checkpoint import ( average_checkpoints, find_checkpoints, @@ -151,6 +195,11 @@ def get_parser(): - beam_search - modified_beam_search - fast_beam_search + - fast_beam_search_nbest + - fast_beam_search_nbest_oracle + - fast_beam_search_nbest_LG + If you use fast_beam_search_nbest_LG, you have to + specify `--lang-dir`, which should contain `LG.pt`. """, ) @@ -173,6 +222,16 @@ def get_parser(): Used only when --decoding-method is fast_beam_search""", ) + parser.add_argument( + "--ngram-lm-scale", + type=float, + default=0.35, + help=""" + Used only when --decoding_method is fast_beam_search_nbest_LG. + It specifies the scale for n-gram LM scores. + """, + ) + parser.add_argument( "--max-contexts", type=int, @@ -204,6 +263,24 @@ def get_parser(): Used only when --decoding_method is greedy_search""", ) + parser.add_argument( + "--num-paths", + type=int, + default=200, + help="""Number of paths for nbest decoding. + Used only when the decoding method is fast_beam_search_nbest, + fast_beam_search_nbest_LG, and fast_beam_search_nbest_oracle""", + ) + + parser.add_argument( + "--nbest-scale", + type=float, + default=0.5, + help="""Scale applied to lattice scores when computing nbest paths. + Used only when the decoding method is fast_beam_search_nbest, + fast_beam_search_nbest_LG, and fast_beam_search_nbest_oracle""", + ) + return parser @@ -211,6 +288,7 @@ def decode_one_batch( params: AttributeDict, model: nn.Module, lexicon: Lexicon, + graph_compiler: CharCtcTrainingGraphCompiler, batch: dict, decoding_graph: Optional[k2.Fsa] = None, ) -> Dict[str, List[List[str]]]: @@ -267,6 +345,50 @@ def decode_one_batch( ) for i in range(encoder_out.size(0)): hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]]) + elif params.decoding_method == "fast_beam_search_nbest_LG": + hyp_tokens = fast_beam_search_nbest_LG( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + num_paths=params.num_paths, + nbest_scale=params.nbest_scale, + ) + for hyp in hyp_tokens: + sentence = "".join([lexicon.word_table[i] for i in hyp]) + hyps.append(list(sentence)) + elif params.decoding_method == "fast_beam_search_nbest": + hyp_tokens = fast_beam_search_nbest( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + num_paths=params.num_paths, + nbest_scale=params.nbest_scale, + ) + for i in range(encoder_out.size(0)): + hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]]) + elif params.decoding_method == "fast_beam_search_nbest_oracle": + hyp_tokens = fast_beam_search_nbest_oracle( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + num_paths=params.num_paths, + ref_texts=graph_compiler.texts_to_ids(supervisions["text"]), + nbest_scale=params.nbest_scale, + ) + for i in range(encoder_out.size(0)): + hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]]) elif ( params.decoding_method == "greedy_search" and params.max_sym_per_frame == 1 @@ -331,6 +453,7 @@ def decode_dataset( params: AttributeDict, model: nn.Module, lexicon: Lexicon, + graph_compiler: CharCtcTrainingGraphCompiler, decoding_graph: Optional[k2.Fsa] = None, ) -> Dict[str, List[Tuple[List[str], List[str]]]]: """Decode dataset. @@ -373,6 +496,7 @@ def decode_dataset( params=params, model=model, lexicon=lexicon, + graph_compiler=graph_compiler, decoding_graph=decoding_graph, batch=batch, ) @@ -454,6 +578,9 @@ def main(): "greedy_search", "beam_search", "fast_beam_search", + "fast_beam_search_nbest", + "fast_beam_search_nbest_LG", + "fast_beam_search_nbest_oracle", "modified_beam_search", ) params.res_dir = params.exp_dir / params.decoding_method @@ -463,6 +590,13 @@ def main(): params.suffix += f"-beam-{params.beam}" params.suffix += f"-max-contexts-{params.max_contexts}" params.suffix += f"-max-states-{params.max_states}" + if params.decoding_method == "fast_beam_search_nbest_LG": + params.suffix += f"-ngram-lm-scale-{params.ngram_lm_scale}" + if ( + params.decoding_method == "fast_beam_search_nbest" + or params.decoding_method == "fast_beam_search_nbest_oracle" + ): + params.suffix += f"-nbest-scale-{params.nbest_scale}" elif "beam_search" in params.decoding_method: params.suffix += f"-beam-{params.beam_size}" else: @@ -482,6 +616,11 @@ def main(): params.blank_id = lexicon.token_table[""] params.vocab_size = max(lexicon.tokens) + 1 + graph_compiler = CharCtcTrainingGraphCompiler( + lexicon=lexicon, + device=device, + ) + logging.info(params) logging.info("About to create model") @@ -513,8 +652,18 @@ def main(): model.eval() model.device = device - if params.decoding_method == "fast_beam_search": - decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device) + if "fast_beam_search" in params.decoding_method: + if params.decoding_method == "fast_beam_search_nbest_LG": + lg_filename = params.lang_dir + "/LG.pt" + logging.info(f"Loading {lg_filename}") + decoding_graph = k2.Fsa.from_dict( + torch.load(lg_filename, map_location=device) + ) + decoding_graph.scores *= params.ngram_lm_scale + else: + decoding_graph = k2.trivial_graph( + params.vocab_size - 1, device=device + ) else: decoding_graph = None @@ -610,6 +759,7 @@ def main(): params=params, model=model, lexicon=lexicon, + graph_compiler=graph_compiler, decoding_graph=decoding_graph, ) save_results( From 2f75236c05d38fb52bceb14e77ac104952e856cc Mon Sep 17 00:00:00 2001 From: Wei Kang Date: Fri, 29 Jul 2022 16:40:06 +0800 Subject: [PATCH 22/38] Support dynamic chunk streaming training in pruned_transcuder_stateless5 (#454) * support dynamic chunk streaming training * Add simulate streaming decoding * Support streaming decoding * fix causal * Minor fixes * fix streaming decode; add results --- egs/librispeech/ASR/RESULTS.md | 74 ++ .../pruned_transducer_stateless5/conformer.py | 610 +++++++++++++++- .../pruned_transducer_stateless5/decode.py | 56 +- .../decode_stream.py | 1 + .../pruned_transducer_stateless5/export.py | 14 +- .../streaming_beam_search.py | 1 + .../streaming_decode.py | 660 ++++++++++++++++++ .../ASR/pruned_transducer_stateless5/train.py | 43 ++ 8 files changed, 1420 insertions(+), 39 deletions(-) create mode 120000 egs/librispeech/ASR/pruned_transducer_stateless5/decode_stream.py create mode 120000 egs/librispeech/ASR/pruned_transducer_stateless5/streaming_beam_search.py create mode 100755 egs/librispeech/ASR/pruned_transducer_stateless5/streaming_decode.py diff --git a/egs/librispeech/ASR/RESULTS.md b/egs/librispeech/ASR/RESULTS.md index b10ae98e6..496445fbf 100644 --- a/egs/librispeech/ASR/RESULTS.md +++ b/egs/librispeech/ASR/RESULTS.md @@ -618,6 +618,80 @@ done Pre-trained models, training and decoding logs, and decoding results are available at +#### [pruned_transducer_stateless5](./pruned_transducer_stateless5) + +See for more details. + +##### Training on full librispeech +The WERs are (the number in the table formatted as test-clean & test-other): + +We only trained 25 epochs for saving time, if you want to get better results you can train more epochs. + +| decoding method | left context | chunk size = 2 | chunk size = 4 | chunk size = 8 | chunk size = 16| +|----------------------|--------------|----------------|----------------|----------------|----------------| +| greedy search | 32 | 3.93 & 9.88 | 3.64 & 9.43 | 3.51 & 8.92 | 3.26 & 8.37 | +| greedy search | 64 | 4.84 & 9.81 | 3.59 & 9.27 | 3.44 & 8.83 | 3.23 & 8.33 | +| fast beam search | 32 | 3.86 & 9.77 | 3.67 & 9.3 | 3.5 & 8.83 | 3.27 & 8.33 | +| fast beam search | 64 | 3.79 & 9.68 | 3.57 & 9.21 | 3.41 & 8.72 | 3.25 & 8.27 | +| modified beam search | 32 | 3.84 & 9.71 | 3.66 & 9.38 | 3.47 & 8.86 | 3.26 & 8.42 | +| modified beam search | 64 | 3.81 & 9.59 | 3.58 & 9.2 | 3.44 & 8.74 | 3.23 & 8.35 | + + +**NOTE:** The WERs in table above were decoded with simulate streaming method (i.e. using masking strategy), see commands below. We also have [real streaming decoding](./pruned_transducer_stateless5/streaming_decode.py) script which should produce almost the same results. We tried adding right context in the real streaming decoding, but it seemed not to benefit the performance for all the models, the reasons might be the training and decoding mismatching. + +The training command is: + +```bash +./pruned_transducer_stateless5/train.py \ + --exp-dir pruned_transducer_stateless5/exp \ + --num-encoder-layers 18 \ + --dim-feedforward 2048 \ + --nhead 8 \ + --encoder-dim 512 \ + --decoder-dim 512 \ + --joiner-dim 512 \ + --full-libri 1 \ + --dynamic-chunk-training 1 \ + --causal-convolution 1 \ + --short-chunk-size 20 \ + --num-left-chunks 4 \ + --max-duration 300 \ + --world-size 4 \ + --start-epoch 1 \ + --num-epochs 25 +``` + +You can find the tensorboard log here + +The decoding command is: +```bash +decoding_method="greedy_search" # "fast_beam_search", "modified_beam_search" + +for chunk in 2 4 8 16; do + for left in 32 64; do + ./pruned_transducer_stateless5/decode.py \ + --num-encoder-layers 18 \ + --dim-feedforward 2048 \ + --nhead 8 \ + --encoder-dim 512 \ + --decoder-dim 512 \ + --joiner-dim 512 \ + --simulate-streaming 1 \ + --decode-chunk-size ${chunk} \ + --left-context ${left} \ + --causal-convolution 1 \ + --epoch 25 \ + --avg 3 \ + --exp-dir ./pruned_transducer_stateless5/exp \ + --max-sym-per-frame 1 \ + --max-duration 1000 \ + --decoding-method ${decoding_method} + done +done +``` + +Pre-trained models, training and decoding logs, and decoding results are available at + ### LibriSpeech BPE training results (Pruned Stateless Conv-Emformer RNN-T) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless5/conformer.py b/egs/librispeech/ASR/pruned_transducer_stateless5/conformer.py index cb56bdffc..9d63cb123 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless5/conformer.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless5/conformer.py @@ -32,7 +32,7 @@ from scaling import ( ) from torch import Tensor, nn -from icefall.utils import make_pad_mask +from icefall.utils import make_pad_mask, subsequent_chunk_mask class Conformer(EncoderInterface): @@ -46,8 +46,27 @@ class Conformer(EncoderInterface): num_encoder_layers (int): number of encoder layers dropout (float): dropout rate layer_dropout (float): layer-dropout rate. - cnn_module_kernel (int): Kernel size of convolution module - vgg_frontend (bool): whether to use vgg frontend. + cnn_module_kernel (int): Kernel size of convolution module. + dynamic_chunk_training (bool): whether to use dynamic chunk training, if + you want to train a streaming model, this is expected to be True. + When setting True, it will use a masking strategy to make the attention + see only limited left and right context. + short_chunk_threshold (float): a threshold to determinize the chunk size + to be used in masking training, if the randomly generated chunk size + is greater than ``max_len * short_chunk_threshold`` (max_len is the + max sequence length of current batch) then it will use + full context in training (i.e. with chunk size equals to max_len). + This will be used only when dynamic_chunk_training is True. + short_chunk_size (int): see docs above, if the randomly generated chunk + size equals to or less than ``max_len * short_chunk_threshold``, the + chunk size will be sampled uniformly from 1 to short_chunk_size. + This also will be used only when dynamic_chunk_training is True. + num_left_chunks (int): the left context (in chunks) attention can see, the + chunk size is decided by short_chunk_threshold and short_chunk_size. + A minus value means seeing full left context. + This also will be used only when dynamic_chunk_training is True. + causal (bool): Whether to use causal convolution in conformer encoder + layer. This MUST be True when using dynamic_chunk_training. """ def __init__( @@ -62,6 +81,11 @@ class Conformer(EncoderInterface): layer_dropout: float = 0.075, cnn_module_kernel: int = 31, aux_layer_period: int = 3, + dynamic_chunk_training: bool = False, + short_chunk_threshold: float = 0.75, + short_chunk_size: int = 25, + num_left_chunks: int = -1, + causal: bool = False, ) -> None: super(Conformer, self).__init__() @@ -79,18 +103,28 @@ class Conformer(EncoderInterface): self.encoder_pos = RelPositionalEncoding(d_model, dropout) + self.encoder_layers = num_encoder_layers + self.d_model = d_model + self.cnn_module_kernel = cnn_module_kernel + self.causal = causal + self.dynamic_chunk_training = dynamic_chunk_training + self.short_chunk_threshold = short_chunk_threshold + self.short_chunk_size = short_chunk_size + self.num_left_chunks = num_left_chunks + encoder_layer = ConformerEncoderLayer( - d_model, - nhead, - dim_feedforward, - dropout, - layer_dropout, - cnn_module_kernel, + d_model=d_model, + nhead=nhead, + dim_feedforward=dim_feedforward, + dropout=dropout, + layer_dropout=layer_dropout, + cnn_module_kernel=cnn_module_kernel, + causal=causal, ) # aux_layers from 1/3 self.encoder = ConformerEncoder( - encoder_layer, - num_encoder_layers, + encoder_layer=encoder_layer, + num_layers=num_encoder_layers, aux_layers=list( range( num_encoder_layers // 3, @@ -99,6 +133,7 @@ class Conformer(EncoderInterface): ) ), ) + self._init_state: List[torch.Tensor] = [torch.empty(0)] def forward( self, x: torch.Tensor, x_lens: torch.Tensor, warmup: float = 1.0 @@ -126,16 +161,246 @@ class Conformer(EncoderInterface): lengths = (((x_lens - 1) >> 1) - 1) >> 1 assert x.size(0) == lengths.max().item() - mask = make_pad_mask(lengths) + src_key_padding_mask = make_pad_mask(lengths) - x = self.encoder( - x, pos_emb, src_key_padding_mask=mask, warmup=warmup - ) # (T, N, C) + if self.dynamic_chunk_training: + assert ( + self.causal + ), "Causal convolution is required for streaming conformer." + max_len = x.size(0) + chunk_size = torch.randint(1, max_len, (1,)).item() + if chunk_size > (max_len * self.short_chunk_threshold): + chunk_size = max_len + else: + chunk_size = chunk_size % self.short_chunk_size + 1 + + mask = ~subsequent_chunk_mask( + size=x.size(0), + chunk_size=chunk_size, + num_left_chunks=self.num_left_chunks, + device=x.device, + ) + x = self.encoder( + x, + pos_emb, + mask=mask, + src_key_padding_mask=src_key_padding_mask, + warmup=warmup, + ) # (T, N, C) + else: + x = self.encoder( + x, + pos_emb, + src_key_padding_mask=src_key_padding_mask, + warmup=warmup, + ) # (T, N, C) x = x.permute(1, 0, 2) # (T, N, C) ->(N, T, C) return x, lengths + @torch.jit.export + def get_init_state( + self, left_context: int, device: torch.device + ) -> List[torch.Tensor]: + """Return the initial cache state of the model. + Args: + left_context: The left context size (in frames after subsampling). + Returns: + Return the initial state of the model, it is a list containing two + tensors, the first one is the cache for attentions which has a shape + of (num_encoder_layers, left_context, encoder_dim), the second one + is the cache of conv_modules which has a shape of + (num_encoder_layers, cnn_module_kernel - 1, encoder_dim). + NOTE: the returned tensors are on the given device. + """ + if ( + len(self._init_state) == 2 + and self._init_state[0].size(1) == left_context + ): + # Note: It is OK to share the init state as it is + # not going to be modified by the model + return self._init_state + + init_states: List[torch.Tensor] = [ + torch.zeros( + ( + self.encoder_layers, + left_context, + self.d_model, + ), + device=device, + ), + torch.zeros( + ( + self.encoder_layers, + self.cnn_module_kernel - 1, + self.d_model, + ), + device=device, + ), + ] + + self._init_state = init_states + + return init_states + + @torch.jit.export + def streaming_forward( + self, + x: torch.Tensor, + x_lens: torch.Tensor, + states: Optional[List[Tensor]] = None, + processed_lens: Optional[Tensor] = None, + left_context: int = 64, + right_context: int = 4, + chunk_size: int = 16, + simulate_streaming: bool = False, + warmup: float = 1.0, + ) -> Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor]]: + """ + Args: + x: + The input tensor. Its shape is (batch_size, seq_len, feature_dim). + x_lens: + A tensor of shape (batch_size,) containing the number of frames in + `x` before padding. + states: + The decode states for previous frames which contains the cached data. + It has two elements, the first element is the attn_cache which has + a shape of (encoder_layers, left_context, batch, attention_dim), + the second element is the conv_cache which has a shape of + (encoder_layers, cnn_module_kernel-1, batch, conv_dim). + Note: states will be modified in this function. + processed_lens: + How many frames (after subsampling) have been processed for each sequence. + left_context: + How many previous frames the attention can see in current chunk. + Note: It's not that each individual frame has `left_context` frames + of left context, some have more. + right_context: + How many future frames the attention can see in current chunk. + Note: It's not that each individual frame has `right_context` frames + of right context, some have more. + chunk_size: + The chunk size for decoding, this will be used to simulate streaming + decoding using masking. + simulate_streaming: + If setting True, it will use a masking strategy to simulate streaming + fashion (i.e. every chunk data only see limited left context and + right context). The whole sequence is supposed to be send at a time + When using simulate_streaming. + warmup: + A floating point value that gradually increases from 0 throughout + training; when it is >= 1.0 we are "fully warmed up". It is used + to turn modules on sequentially. + Returns: + Return a tuple containing 2 tensors: + - logits, its shape is (batch_size, output_seq_len, output_dim) + - logit_lens, a tensor of shape (batch_size,) containing the number + of frames in `logits` before padding. + - decode_states, the updated states including the information + of current chunk. + """ + + # x: [N, T, C] + # Caution: We assume the subsampling factor is 4! + + # lengths = ((x_lens - 1) // 2 - 1) // 2 # issue an warning + # + # Note: rounding_mode in torch.div() is available only in torch >= 1.8.0 + lengths = (((x_lens - 1) >> 1) - 1) >> 1 + + if not simulate_streaming: + assert states is not None + assert processed_lens is not None + assert ( + len(states) == 2 + and states[0].shape + == (self.encoder_layers, left_context, x.size(0), self.d_model) + and states[1].shape + == ( + self.encoder_layers, + self.cnn_module_kernel - 1, + x.size(0), + self.d_model, + ) + ), f"""The length of states MUST be equal to 2, and the shape of + first element should be {(self.encoder_layers, left_context, x.size(0), self.d_model)}, + given {states[0].shape}. the shape of second element should be + {(self.encoder_layers, self.cnn_module_kernel - 1, x.size(0), self.d_model)}, + given {states[1].shape}.""" + + lengths -= 2 # we will cut off 1 frame on each side of encoder_embed output + + src_key_padding_mask = make_pad_mask(lengths) + + processed_mask = torch.arange(left_context, device=x.device).expand( + x.size(0), left_context + ) + processed_lens = processed_lens.view(x.size(0), 1) + processed_mask = (processed_lens <= processed_mask).flip(1) + + src_key_padding_mask = torch.cat( + [processed_mask, src_key_padding_mask], dim=1 + ) + + embed = self.encoder_embed(x) + + # cut off 1 frame on each size of embed as they see the padding + # value which causes a training and decoding mismatch. + embed = embed[:, 1:-1, :] + + embed, pos_enc = self.encoder_pos(embed, left_context) + embed = embed.permute(1, 0, 2) # (B, T, F) -> (T, B, F) + + x, states = self.encoder.chunk_forward( + embed, + pos_enc, + src_key_padding_mask=src_key_padding_mask, + warmup=warmup, + states=states, + left_context=left_context, + right_context=right_context, + ) # (T, B, F) + if right_context > 0: + x = x[0:-right_context, ...] + lengths -= right_context + else: + assert states is None + states = [] # just to make torch.script.jit happy + # this branch simulates streaming decoding using mask as we are + # using in training time. + src_key_padding_mask = make_pad_mask(lengths) + x = self.encoder_embed(x) + x, pos_emb = self.encoder_pos(x) + x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C) + + assert x.size(0) == lengths.max().item() + + num_left_chunks = -1 + if left_context >= 0: + assert left_context % chunk_size == 0 + num_left_chunks = left_context // chunk_size + + mask = ~subsequent_chunk_mask( + size=x.size(0), + chunk_size=chunk_size, + num_left_chunks=num_left_chunks, + device=x.device, + ) + x = self.encoder( + x, + pos_emb, + mask=mask, + src_key_padding_mask=src_key_padding_mask, + warmup=warmup, + ) # (T, N, C) + + x = x.permute(1, 0, 2) # (T, N, C) ->(N, T, C) + + return x, lengths, states + class ConformerEncoderLayer(nn.Module): """ @@ -148,6 +413,8 @@ class ConformerEncoderLayer(nn.Module): dim_feedforward: the dimension of the feedforward network model (default=2048). dropout: the dropout value (default=0.1). cnn_module_kernel (int): Kernel size of convolution module. + causal (bool): Whether to use causal convolution in conformer encoder + layer. This MUST be True when using dynamic_chunk_training and streaming decoding. Examples:: >>> encoder_layer = ConformerEncoderLayer(d_model=512, nhead=8) @@ -164,6 +431,7 @@ class ConformerEncoderLayer(nn.Module): dropout: float = 0.1, layer_dropout: float = 0.075, cnn_module_kernel: int = 31, + causal: bool = False, ) -> None: super(ConformerEncoderLayer, self).__init__() @@ -191,7 +459,9 @@ class ConformerEncoderLayer(nn.Module): ScaledLinear(dim_feedforward, d_model, initial_scale=0.25), ) - self.conv_module = ConvolutionModule(d_model, cnn_module_kernel) + self.conv_module = ConvolutionModule( + d_model, cnn_module_kernel, causal=causal + ) self.norm_final = BasicNorm(d_model) @@ -257,7 +527,8 @@ class ConformerEncoderLayer(nn.Module): src = src + self.dropout(src_att) # convolution module - src = src + self.dropout(self.conv_module(src)) + conv, _ = self.conv_module(src) + src = src + self.dropout(conv) # feed forward module src = src + self.dropout(self.feed_forward(src)) @@ -269,6 +540,98 @@ class ConformerEncoderLayer(nn.Module): return src + @torch.jit.export + def chunk_forward( + self, + src: Tensor, + pos_emb: Tensor, + states: List[Tensor], + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + warmup: float = 1.0, + left_context: int = 0, + right_context: int = 0, + ) -> Tuple[Tensor, List[Tensor]]: + """ + Pass the input through the encoder layer. + Args: + src: the sequence to the encoder layer (required). + pos_emb: Positional embedding tensor (required). + states: + The decode states for previous frames which contains the cached data. + It has two elements, the first element is the attn_cache which has + a shape of (left_context, batch, attention_dim), + the second element is the conv_cache which has a shape of + (cnn_module_kernel-1, batch, conv_dim). + Note: states will be modified in this function. + src_mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional). + warmup: controls selective bypass of of layers; if < 1.0, we will + bypass layers more frequently. + left_context: + How many previous frames the attention can see in current chunk. + Note: It's not that each individual frame has `left_context` frames + of left context, some have more. + right_context: + How many future frames the attention can see in current chunk. + Note: It's not that each individual frame has `right_context` frames + of right context, some have more. + Shape: + src: (S, N, E). + pos_emb: (N, 2*(S+left_context)-1, E). + src_mask: (S, S). + src_key_padding_mask: (N, S). + S is the source sequence length, N is the batch size, E is the feature number + """ + + assert not self.training + assert len(states) == 2 + assert states[0].shape == (left_context, src.size(1), src.size(2)) + + # macaron style feed forward module + src = src + self.dropout(self.feed_forward_macaron(src)) + + # We put the attention cache this level (i.e. before linear transformation) + # to save memory consumption, when decoding in streaming fashion, the + # batch size would be thousands (for 32GB machine), if we cache key & val + # separately, it needs extra several GB memory. + # TODO(WeiKang): Move cache to self_attn level (i.e. cache key & val + # separately) if needed. + key = torch.cat([states[0], src], dim=0) + val = key + if right_context > 0: + states[0] = key[ + -(left_context + right_context) : -right_context, ... # noqa + ] + else: + states[0] = key[-left_context:, ...] + + # multi-headed self-attention module + src_att = self.self_attn( + src, + key, + val, + pos_emb=pos_emb, + attn_mask=src_mask, + key_padding_mask=src_key_padding_mask, + left_context=left_context, + )[0] + + src = src + self.dropout(src_att) + + # convolution module + conv, conv_cache = self.conv_module(src, states[1], right_context) + states[1] = conv_cache + + src = src + self.dropout(conv) + + # feed forward module + src = src + self.dropout(self.feed_forward(src)) + + src = self.norm_final(self.balancer(src)) + + return src, states + class ConformerEncoder(nn.Module): r"""ConformerEncoder is a stack of N encoder layers @@ -352,6 +715,77 @@ class ConformerEncoder(nn.Module): return output + @torch.jit.export + def chunk_forward( + self, + src: Tensor, + pos_emb: Tensor, + states: List[Tensor], + mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + warmup: float = 1.0, + left_context: int = 0, + right_context: int = 0, + ) -> Tuple[Tensor, List[Tensor]]: + r"""Pass the input through the encoder layers in turn. + Args: + src: the sequence to the encoder (required). + pos_emb: Positional embedding tensor (required). + states: + The decode states for previous frames which contains the cached data. + It has two elements, the first element is the attn_cache which has + a shape of (encoder_layers, left_context, batch, attention_dim), + the second element is the conv_cache which has a shape of + (encoder_layers, cnn_module_kernel-1, batch, conv_dim). + Note: states will be modified in this function. + mask: the mask for the src sequence (optional). + src_key_padding_mask: the mask for the src keys per batch (optional). + warmup: controls selective bypass of of layers; if < 1.0, we will + bypass layers more frequently. + left_context: + How many previous frames the attention can see in current chunk. + Note: It's not that each individual frame has `left_context` frames + of left context, some have more. + right_context: + How many future frames the attention can see in current chunk. + Note: It's not that each individual frame has `right_context` frames + of right context, some have more. + Shape: + src: (S, N, E). + pos_emb: (N, 2*(S+left_context)-1, E). + mask: (S, S). + src_key_padding_mask: (N, S). + S is the source sequence length, T is the target sequence length, N is the batch size, E is the feature number + """ + assert not self.training + assert len(states) == 2 + assert states[0].shape == ( + self.num_layers, + left_context, + src.size(1), + src.size(2), + ) + assert states[1].size(0) == self.num_layers + + output = src + + for layer_index, mod in enumerate(self.layers): + cache = [states[0][layer_index], states[1][layer_index]] + output, cache = mod.chunk_forward( + output, + pos_emb, + states=cache, + src_mask=mask, + src_key_padding_mask=src_key_padding_mask, + warmup=warmup, + left_context=left_context, + right_context=right_context, + ) + states[0][layer_index] = cache[0] + states[1][layer_index] = cache[1] + + return output, states + class RelPositionalEncoding(torch.nn.Module): """Relative positional encoding module. @@ -376,12 +810,13 @@ class RelPositionalEncoding(torch.nn.Module): self.pe = None self.extend_pe(torch.tensor(0.0).expand(1, max_len)) - def extend_pe(self, x: Tensor) -> None: + def extend_pe(self, x: Tensor, left_context: int = 0) -> None: """Reset the positional encodings.""" + x_size_1 = x.size(1) + left_context if self.pe is not None: # self.pe contains both positive and negative parts # the length of self.pe is 2 * input_len - 1 - if self.pe.size(1) >= x.size(1) * 2 - 1: + if self.pe.size(1) >= x_size_1 * 2 - 1: # Note: TorchScript doesn't implement operator== for torch.Device if self.pe.dtype != x.dtype or str(self.pe.device) != str( x.device @@ -391,9 +826,9 @@ class RelPositionalEncoding(torch.nn.Module): # Suppose `i` means to the position of query vector and `j` means the # position of key vector. We use position relative positions when keys # are to the left (i>j) and negative relative positions otherwise (i Tuple[Tensor, Tensor]: + def forward( + self, x: torch.Tensor, left_context: int = 0 + ) -> Tuple[Tensor, Tensor]: """Add positional encoding. Args: x (torch.Tensor): Input tensor (batch, time, `*`). + left_context (int): left context (in frames) used during streaming decoding. + this is used only in real streaming decoding, in other circumstances, + it MUST be 0. Returns: torch.Tensor: Encoded tensor (batch, time, `*`). torch.Tensor: Encoded tensor (batch, 2*time-1, `*`). """ - self.extend_pe(x) + self.extend_pe(x, left_context) + x_size_1 = x.size(1) + left_context pos_emb = self.pe[ :, self.pe.size(1) // 2 - - x.size(1) + - x_size_1 + 1 : self.pe.size(1) // 2 # noqa E203 + x.size(1), ] @@ -498,6 +939,7 @@ class RelPositionMultiheadAttention(nn.Module): key_padding_mask: Optional[Tensor] = None, need_weights: bool = True, attn_mask: Optional[Tensor] = None, + left_context: int = 0, ) -> Tuple[Tensor, Optional[Tensor]]: r""" Args: @@ -511,6 +953,9 @@ class RelPositionMultiheadAttention(nn.Module): need_weights: output attn_output_weights. attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all the batches while a 3D mask allows to specify a different mask for the entries of each batch. + left_context (int): left context (in frames) used during streaming decoding. + this is used only in real streaming decoding, in other circumstances, + it MUST be 0. Shape: - Inputs: @@ -556,14 +1001,18 @@ class RelPositionMultiheadAttention(nn.Module): key_padding_mask=key_padding_mask, need_weights=need_weights, attn_mask=attn_mask, + left_context=left_context, ) - def rel_shift(self, x: Tensor) -> Tensor: + def rel_shift(self, x: Tensor, left_context: int = 0) -> Tensor: """Compute relative positional encoding. Args: x: Input tensor (batch, head, time1, 2*time1-1). time1 means the length of query vector. + left_context (int): left context (in frames) used during streaming decoding. + this is used only in real streaming decoding, in other circumstances, + it MUST be 0. Returns: Tensor: tensor of shape (batch, head, time1, time2) @@ -571,14 +1020,17 @@ class RelPositionMultiheadAttention(nn.Module): the key, while time1 is for the query). """ (batch_size, num_heads, time1, n) = x.shape - assert n == 2 * time1 - 1 + time2 = time1 + left_context + assert ( + n == left_context + 2 * time1 - 1 + ), f"{n} == {left_context} + 2 * {time1} - 1" # Note: TorchScript requires explicit arg for stride() batch_stride = x.stride(0) head_stride = x.stride(1) time1_stride = x.stride(2) n_stride = x.stride(3) return x.as_strided( - (batch_size, num_heads, time1, time1), + (batch_size, num_heads, time1, time2), (batch_stride, head_stride, time1_stride - n_stride, n_stride), storage_offset=n_stride * (time1 - 1), ) @@ -600,6 +1052,7 @@ class RelPositionMultiheadAttention(nn.Module): key_padding_mask: Optional[Tensor] = None, need_weights: bool = True, attn_mask: Optional[Tensor] = None, + left_context: int = 0, ) -> Tuple[Tensor, Optional[Tensor]]: r""" Args: @@ -617,6 +1070,9 @@ class RelPositionMultiheadAttention(nn.Module): need_weights: output attn_output_weights. attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all the batches while a 3D mask allows to specify a different mask for the entries of each batch. + left_context (int): left context (in frames) used during streaming decoding. + this is used only in real streaming decoding, in other circumstances, + it MUST be 0. Shape: Inputs: @@ -780,7 +1236,8 @@ class RelPositionMultiheadAttention(nn.Module): pos_emb_bsz = pos_emb.size(0) assert pos_emb_bsz in (1, bsz) # actually it is 1 p = self.linear_pos(pos_emb).view(pos_emb_bsz, -1, num_heads, head_dim) - p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k) + # (batch, 2*time1, head, d_k) --> (batch, head, d_k, 2*time -1) + p = p.permute(0, 2, 3, 1) q_with_bias_u = (q + self._pos_bias_u()).transpose( 1, 2 @@ -800,9 +1257,9 @@ class RelPositionMultiheadAttention(nn.Module): # compute matrix b and matrix d matrix_bd = torch.matmul( - q_with_bias_v, p.transpose(-2, -1) + q_with_bias_v, p ) # (batch, head, time1, 2*time1-1) - matrix_bd = self.rel_shift(matrix_bd) + matrix_bd = self.rel_shift(matrix_bd, left_context) attn_output_weights = ( matrix_ac + matrix_bd @@ -837,6 +1294,39 @@ class RelPositionMultiheadAttention(nn.Module): ) attn_output_weights = nn.functional.softmax(attn_output_weights, dim=-1) + + # If we are using dynamic_chunk_training and setting a limited + # num_left_chunks, the attention may only see the padding values which + # will also be masked out by `key_padding_mask`, at this circumstances, + # the whole column of `attn_output_weights` will be `-inf` + # (i.e. be `nan` after softmax), so, we fill `0.0` at the masking + # positions to avoid invalid loss value below. + if ( + attn_mask is not None + and attn_mask.dtype == torch.bool + and key_padding_mask is not None + ): + if attn_mask.size(0) != 1: + attn_mask = attn_mask.view(bsz, num_heads, tgt_len, src_len) + combined_mask = attn_mask | key_padding_mask.unsqueeze( + 1 + ).unsqueeze(2) + else: + # attn_mask.shape == (1, tgt_len, src_len) + combined_mask = attn_mask.unsqueeze( + 0 + ) | key_padding_mask.unsqueeze(1).unsqueeze(2) + + attn_output_weights = attn_output_weights.view( + bsz, num_heads, tgt_len, src_len + ) + attn_output_weights = attn_output_weights.masked_fill( + combined_mask, 0.0 + ) + attn_output_weights = attn_output_weights.view( + bsz * num_heads, tgt_len, src_len + ) + attn_output_weights = nn.functional.dropout( attn_output_weights, p=dropout_p, training=training ) @@ -870,17 +1360,24 @@ class ConvolutionModule(nn.Module): channels (int): The number of channels of conv layers. kernel_size (int): Kernerl size of conv layers. bias (bool): Whether to use bias in conv layers (default=True). + causal (bool): Whether to use causal convolution. """ def __init__( - self, channels: int, kernel_size: int, bias: bool = True + self, + channels: int, + kernel_size: int, + bias: bool = True, + causal: bool = False, ) -> None: """Construct an ConvolutionModule object.""" super(ConvolutionModule, self).__init__() # kernerl_size should be a odd number for 'SAME' padding assert (kernel_size - 1) % 2 == 0 + self.causal = causal + self.pointwise_conv1 = ScaledConv1d( channels, 2 * channels, @@ -907,12 +1404,17 @@ class ConvolutionModule(nn.Module): channel_dim=1, max_abs=10.0, min_positive=0.05, max_positive=1.0 ) + self.lorder = kernel_size - 1 + padding = (kernel_size - 1) // 2 + if self.causal: + padding = 0 + self.depthwise_conv = ScaledConv1d( channels, channels, kernel_size, stride=1, - padding=(kernel_size - 1) // 2, + padding=padding, groups=channels, bias=bias, ) @@ -933,15 +1435,26 @@ class ConvolutionModule(nn.Module): initial_scale=0.25, ) - def forward(self, x: Tensor) -> Tensor: + def forward( + self, x: Tensor, cache: Optional[Tensor] = None, right_context: int = 0 + ) -> Tuple[Tensor, Tensor]: """Compute convolution module. Args: x: Input tensor (#time, batch, channels). + cache: The cache of depthwise_conv, only used in real streaming + decoding. + right_context: + How many future frames the attention can see in current chunk. + Note: It's not that each individual frame has `right_context` frames + of right context, some have more. Returns: Tensor: Output tensor (#time, batch, channels). - + If cache is None return the output tensor (#time, batch, channels). + If cache is not None, return a tuple of Tensor, the first one is + the output tensor (#time, batch, channels), the second one is the + new cache for next chunk (#kernel_size - 1, batch, channels). """ # exchange the temporal dimension and the feature dimension x = x.permute(1, 2, 0) # (#batch, channels, time). @@ -953,6 +1466,27 @@ class ConvolutionModule(nn.Module): x = nn.functional.glu(x, dim=1) # (batch, channels, time) # 1D Depthwise Conv + if self.causal and self.lorder > 0: + if cache is None: + # Make depthwise_conv causal by + # manualy padding self.lorder zeros to the left + x = nn.functional.pad(x, (self.lorder, 0), "constant", 0.0) + else: + assert ( + not self.training + ), "Cache should be None in training time" + assert cache.size(0) == self.lorder + x = torch.cat([cache.permute(1, 2, 0), x], dim=2) + if right_context > 0: + cache = x.permute(2, 0, 1)[ + -(self.lorder + right_context) : ( # noqa + -right_context + ), + ..., + ] + else: + cache = x.permute(2, 0, 1)[-self.lorder :, ...] # noqa + x = self.depthwise_conv(x) x = self.deriv_balancer2(x) @@ -960,7 +1494,11 @@ class ConvolutionModule(nn.Module): x = self.pointwise_conv2(x) # (batch, channel, time) - return x.permute(2, 0, 1) + # torch.jit.script requires return types be the same as annotated above + if cache is None: + cache = torch.empty(0) + + return x.permute(2, 0, 1), cache class Conv2dSubsampling(nn.Module): diff --git a/egs/librispeech/ASR/pruned_transducer_stateless5/decode.py b/egs/librispeech/ASR/pruned_transducer_stateless5/decode.py index f87d23cc9..2d0965023 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless5/decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless5/decode.py @@ -96,6 +96,7 @@ Usage: import argparse import logging +import math from collections import defaultdict from pathlib import Path from typing import Dict, List, Optional, Tuple @@ -132,6 +133,8 @@ from icefall.utils import ( write_error_stats, ) +LOG_EPS = math.log(1e-10) + def get_parser(): parser = argparse.ArgumentParser( @@ -298,6 +301,29 @@ def get_parser(): fast_beam_search_nbest_LG, and fast_beam_search_nbest_oracle""", ) + parser.add_argument( + "--simulate-streaming", + type=str2bool, + default=False, + help="""Whether to simulate streaming in decoding, this is a good way to + test a streaming model. + """, + ) + + parser.add_argument( + "--decode-chunk-size", + type=int, + default=16, + help="The chunk size for decoding (in frames after subsampling)", + ) + + parser.add_argument( + "--left-context", + type=int, + default=64, + help="left context can be seen during decoding (in frames after subsampling)", + ) + add_model_arguments(parser) return parser @@ -352,9 +378,26 @@ def decode_one_batch( supervisions = batch["supervisions"] feature_lens = supervisions["num_frames"].to(device) - encoder_out, encoder_out_lens = model.encoder( - x=feature, x_lens=feature_lens + feature_lens += params.left_context + feature = torch.nn.functional.pad( + feature, + pad=(0, 0, 0, params.left_context), + value=LOG_EPS, ) + + if params.simulate_streaming: + encoder_out, encoder_out_lens, _ = model.encoder.streaming_forward( + x=feature, + x_lens=feature_lens, + chunk_size=params.decode_chunk_size, + left_context=params.left_context, + simulate_streaming=True, + ) + else: + encoder_out, encoder_out_lens = model.encoder( + x=feature, x_lens=feature_lens + ) + hyps = [] if params.decoding_method == "fast_beam_search": @@ -621,6 +664,10 @@ def main(): else: params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + if params.simulate_streaming: + params.suffix += f"-streaming-chunk-size-{params.decode_chunk_size}" + params.suffix += f"-left-context-{params.left_context}" + if "fast_beam_search" in params.decoding_method: params.suffix += f"-beam-{params.beam}" params.suffix += f"-max-contexts-{params.max_contexts}" @@ -658,6 +705,11 @@ def main(): params.unk_id = sp.piece_to_id("") params.vocab_size = sp.get_piece_size() + if params.simulate_streaming: + assert ( + params.causal_convolution + ), "Decoding in streaming requires causal convolution" + logging.info(params) logging.info("About to create model") diff --git a/egs/librispeech/ASR/pruned_transducer_stateless5/decode_stream.py b/egs/librispeech/ASR/pruned_transducer_stateless5/decode_stream.py new file mode 120000 index 000000000..30f264813 --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless5/decode_stream.py @@ -0,0 +1 @@ +../pruned_transducer_stateless/decode_stream.py \ No newline at end of file diff --git a/egs/librispeech/ASR/pruned_transducer_stateless5/export.py b/egs/librispeech/ASR/pruned_transducer_stateless5/export.py index 936508900..b2e5b430e 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless5/export.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless5/export.py @@ -97,7 +97,7 @@ def get_parser(): parser.add_argument( "--use-averaged-model", type=str2bool, - default=False, + default=True, help="Whether to load averaged model. Currently it only supports " "using --epoch. If True, it would decode with the averaged model " "over the epoch range from `epoch-avg` (excluded) to `epoch`." @@ -137,6 +137,15 @@ def get_parser(): "2 means tri-gram", ) + parser.add_argument( + "--streaming-model", + type=str2bool, + default=False, + help="""Whether to export a streaming model, if the models in exp-dir + are streaming model, this should be True. + """, + ) + add_model_arguments(parser) return parser @@ -162,6 +171,9 @@ def main(): params.blank_id = sp.piece_to_id("") params.vocab_size = sp.get_piece_size() + if params.streaming_model: + assert params.causal_convolution + logging.info(params) logging.info("About to create model") diff --git a/egs/librispeech/ASR/pruned_transducer_stateless5/streaming_beam_search.py b/egs/librispeech/ASR/pruned_transducer_stateless5/streaming_beam_search.py new file mode 120000 index 000000000..3a5f89833 --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless5/streaming_beam_search.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/streaming_beam_search.py \ No newline at end of file diff --git a/egs/librispeech/ASR/pruned_transducer_stateless5/streaming_decode.py b/egs/librispeech/ASR/pruned_transducer_stateless5/streaming_decode.py new file mode 100755 index 000000000..d47d57d1b --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless5/streaming_decode.py @@ -0,0 +1,660 @@ +#!/usr/bin/env python3 +# Copyright 2022 Xiaomi Corporation (Authors: Wei Kang, Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Usage: +./pruned_transducer_stateless5/streaming_decode.py \ + --epoch 28 \ + --avg 15 \ + --left-context 32 \ + --decode-chunk-size 8 \ + --right-context 0 \ + --exp-dir ./pruned_transducer_stateless5/exp \ + --decoding_method greedy_search \ + --num-decode-streams 200 +""" + +import argparse +import logging +import math +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import k2 +import numpy as np +import sentencepiece as spm +import torch +import torch.nn as nn +from asr_datamodule import LibriSpeechAsrDataModule +from decode_stream import DecodeStream +from kaldifeat import Fbank, FbankOptions +from lhotse import CutSet +from streaming_beam_search import ( + fast_beam_search_one_best, + greedy_search, + modified_beam_search, +) +from torch.nn.utils.rnn import pad_sequence +from train import add_model_arguments, get_params, get_transducer_model + +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from icefall.utils import ( + AttributeDict, + setup_logger, + store_transcripts, + str2bool, + write_error_stats, +) + +LOG_EPS = math.log(1e-10) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=28, + help="""It specifies the checkpoint to use for decoding. + Note: Epoch counts from 0. + You can specify --avg to use more checkpoints for model averaging.""", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=15, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch' and '--iter'", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=True, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="pruned_transducer_stateless2/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="""Supported decoding methods are: + greedy_search + modified_beam_search + fast_beam_search + """, + ) + + parser.add_argument( + "--num_active_paths", + type=int, + default=4, + help="""An interger indicating how many candidates we will keep for each + frame. Used only when --decoding-method is modified_beam_search.""", + ) + + parser.add_argument( + "--beam", + type=float, + default=4, + help="""A floating point value to calculate the cutoff score during beam + search (i.e., `cutoff = max-score - beam`), which is the same as the + `beam` in Kaldi. + Used only when --decoding-method is fast_beam_search""", + ) + + parser.add_argument( + "--max-contexts", + type=int, + default=4, + help="""Used only when --decoding-method is + fast_beam_search""", + ) + + parser.add_argument( + "--max-states", + type=int, + default=32, + help="""Used only when --decoding-method is + fast_beam_search""", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + parser.add_argument( + "--decode-chunk-size", + type=int, + default=16, + help="The chunk size for decoding (in frames after subsampling)", + ) + + parser.add_argument( + "--left-context", + type=int, + default=64, + help="left context can be seen during decoding (in frames after subsampling)", + ) + + parser.add_argument( + "--right-context", + type=int, + default=0, + help="right context can be seen during decoding (in frames after subsampling)", + ) + + parser.add_argument( + "--num-decode-streams", + type=int, + default=2000, + help="The number of streams that can be decoded parallel.", + ) + + add_model_arguments(parser) + + return parser + + +def decode_one_chunk( + params: AttributeDict, + model: nn.Module, + decode_streams: List[DecodeStream], +) -> List[int]: + """Decode one chunk frames of features for each decode_streams and + return the indexes of finished streams in a List. + + Args: + params: + It's the return value of :func:`get_params`. + model: + The neural model. + decode_streams: + A List of DecodeStream, each belonging to a utterance. + Returns: + Return a List containing which DecodeStreams are finished. + """ + device = model.device + + features = [] + feature_lens = [] + states = [] + + processed_lens = [] + + for stream in decode_streams: + feat, feat_len = stream.get_feature_frames( + params.decode_chunk_size * params.subsampling_factor + ) + features.append(feat) + feature_lens.append(feat_len) + states.append(stream.states) + processed_lens.append(stream.done_frames) + + feature_lens = torch.tensor(feature_lens, device=device) + features = pad_sequence(features, batch_first=True, padding_value=LOG_EPS) + + # if T is less than 7 there will be an error in time reduction layer, + # because we subsample features with ((x_len - 1) // 2 - 1) // 2 + # we plus 2 here because we will cut off one frame on each size of + # encoder_embed output as they see invalid paddings. so we need extra 2 + # frames. + tail_length = 7 + (2 + params.right_context) * params.subsampling_factor + if features.size(1) < tail_length: + pad_length = tail_length - features.size(1) + feature_lens += pad_length + features = torch.nn.functional.pad( + features, + (0, 0, 0, pad_length), + mode="constant", + value=LOG_EPS, + ) + + states = [ + torch.stack([x[0] for x in states], dim=2), + torch.stack([x[1] for x in states], dim=2), + ] + processed_lens = torch.tensor(processed_lens, device=device) + + encoder_out, encoder_out_lens, states = model.encoder.streaming_forward( + x=features, + x_lens=feature_lens, + states=states, + left_context=params.left_context, + right_context=params.right_context, + processed_lens=processed_lens, + ) + + encoder_out = model.joiner.encoder_proj(encoder_out) + + if params.decoding_method == "greedy_search": + greedy_search( + model=model, encoder_out=encoder_out, streams=decode_streams + ) + elif params.decoding_method == "fast_beam_search": + processed_lens = processed_lens + encoder_out_lens + fast_beam_search_one_best( + model=model, + encoder_out=encoder_out, + processed_lens=processed_lens, + streams=decode_streams, + beam=params.beam, + max_states=params.max_states, + max_contexts=params.max_contexts, + ) + elif params.decoding_method == "modified_beam_search": + modified_beam_search( + model=model, + streams=decode_streams, + encoder_out=encoder_out, + num_active_paths=params.num_active_paths, + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + + states = [torch.unbind(states[0], dim=2), torch.unbind(states[1], dim=2)] + + finished_streams = [] + for i in range(len(decode_streams)): + decode_streams[i].states = [states[0][i], states[1][i]] + decode_streams[i].done_frames += encoder_out_lens[i] + if decode_streams[i].done: + finished_streams.append(i) + + return finished_streams + + +def decode_dataset( + cuts: CutSet, + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + decoding_graph: Optional[k2.Fsa] = None, +) -> Dict[str, List[Tuple[List[str], List[str]]]]: + """Decode dataset. + + Args: + cuts: + Lhotse Cutset containing the dataset to decode. + params: + It is returned by :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used + only when --decoding_method is fast_beam_search. + Returns: + Return a dict, whose key may be "greedy_search" if greedy search + is used, or it may be "beam_7" if beam size of 7 is used. + Its value is a list of tuples. Each tuple contains two elements: + The first is the reference transcript, and the second is the + predicted result. + """ + device = model.device + + opts = FbankOptions() + opts.device = device + opts.frame_opts.dither = 0 + opts.frame_opts.snip_edges = False + opts.frame_opts.samp_freq = 16000 + opts.mel_opts.num_bins = 80 + + log_interval = 50 + + decode_results = [] + # Contain decode streams currently running. + decode_streams = [] + initial_states = model.encoder.get_init_state( + params.left_context, device=device + ) + for num, cut in enumerate(cuts): + # each utterance has a DecodeStream. + decode_stream = DecodeStream( + params=params, + initial_states=initial_states, + decoding_graph=decoding_graph, + device=device, + ) + + audio: np.ndarray = cut.load_audio() + # audio.shape: (1, num_samples) + assert len(audio.shape) == 2 + assert audio.shape[0] == 1, "Should be single channel" + assert audio.dtype == np.float32, audio.dtype + + # The trained model is using normalized samples + assert audio.max() <= 1, "Should be normalized to [-1, 1])" + + samples = torch.from_numpy(audio).squeeze(0) + + fbank = Fbank(opts) + feature = fbank(samples.to(device)) + decode_stream.set_features(feature) + decode_stream.ground_truth = cut.supervisions[0].text + + decode_streams.append(decode_stream) + + while len(decode_streams) >= params.num_decode_streams: + finished_streams = decode_one_chunk( + params=params, model=model, decode_streams=decode_streams + ) + for i in sorted(finished_streams, reverse=True): + decode_results.append( + ( + decode_streams[i].ground_truth.split(), + sp.decode(decode_streams[i].decoding_result()).split(), + ) + ) + del decode_streams[i] + + if num % log_interval == 0: + logging.info(f"Cuts processed until now is {num}.") + + # decode final chunks of last sequences + while len(decode_streams): + finished_streams = decode_one_chunk( + params=params, model=model, decode_streams=decode_streams + ) + for i in sorted(finished_streams, reverse=True): + decode_results.append( + ( + decode_streams[i].ground_truth.split(), + sp.decode(decode_streams[i].decoding_result()).split(), + ) + ) + del decode_streams[i] + + if params.decoding_method == "greedy_search": + key = "greedy_search" + elif params.decoding_method == "fast_beam_search": + key = ( + f"beam_{params.beam}_" + f"max_contexts_{params.max_contexts}_" + f"max_states_{params.max_states}" + ) + elif params.decoding_method == "modified_beam_search": + key = f"num_active_paths_{params.num_active_paths}" + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + return {key: decode_results} + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[List[str], List[str]]]], +): + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = ( + params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + ) + results = sorted(results) + store_transcripts(filename=recog_path, texts=results) + logging.info(f"The transcripts are stored in {recog_path}") + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = ( + params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_filename, "w") as f: + wer = write_error_stats( + f, f"{test_set_name}-{key}", results, enable_log=True + ) + test_set_wers[key] = wer + + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = ( + params.res_dir + / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_info, "w") as f: + print("settings\tWER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, WER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + LibriSpeechAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + params.res_dir = params.exp_dir / "streaming" / params.decoding_method + + if params.iter > 0: + params.suffix = f"iter-{params.iter}-avg-{params.avg}" + else: + params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + + # for streaming + params.suffix += f"-streaming-chunk-size-{params.decode_chunk_size}" + params.suffix += f"-left-context-{params.left_context}" + params.suffix += f"-right-context-{params.right_context}" + + # for fast_beam_search + if params.decoding_method == "fast_beam_search": + params.suffix += f"-beam-{params.beam}" + params.suffix += f"-max-contexts-{params.max_contexts}" + params.suffix += f"-max-states-{params.max_states}" + + if params.use_averaged_model: + params.suffix += "-use-averaged-model" + + setup_logger(f"{params.res_dir}/log-decode-{params.suffix}") + logging.info("Decoding started") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # and is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.unk_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + # Decoding in streaming requires causal convolution + params.causal_convolution = True + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if start >= 0: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + else: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg + 1] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + + model.to(device) + model.eval() + model.device = device + + decoding_graph = None + if params.decoding_method == "fast_beam_search": + decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + librispeech = LibriSpeechAsrDataModule(args) + + test_clean_cuts = librispeech.test_clean_cuts() + test_other_cuts = librispeech.test_other_cuts() + + test_sets = ["test-clean", "test-other"] + test_cuts = [test_clean_cuts, test_other_cuts] + + for test_set, test_cut in zip(test_sets, test_cuts): + results_dict = decode_dataset( + cuts=test_cut, + params=params, + model=model, + sp=sp, + decoding_graph=decoding_graph, + ) + + save_results( + params=params, + test_set_name=test_set, + results_dict=results_dict, + ) + + logging.info("Done!") + + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless5/train.py b/egs/librispeech/ASR/pruned_transducer_stateless5/train.py index 8f20eedc9..44abdcd49 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless5/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless5/train.py @@ -134,6 +134,40 @@ def add_model_arguments(parser: argparse.ArgumentParser): """, ) + parser.add_argument( + "--dynamic-chunk-training", + type=str2bool, + default=False, + help="""Whether to use dynamic_chunk_training, if you want a streaming + model, this requires to be True. + """, + ) + + parser.add_argument( + "--causal-convolution", + type=str2bool, + default=False, + help="""Whether to use causal convolution, this requires to be True when + using dynamic_chunk_training. + """, + ) + + parser.add_argument( + "--short-chunk-size", + type=int, + default=25, + help="""Chunk length of dynamic training, the chunk size would be either + max sequence length of current batch or uniformly sampled from (1, short_chunk_size). + """, + ) + + parser.add_argument( + "--num-left-chunks", + type=int, + default=4, + help="How many left context can be seen in chunks when calculating attention.", + ) + def get_parser(): parser = argparse.ArgumentParser( @@ -408,6 +442,10 @@ def get_encoder_model(params: AttributeDict) -> nn.Module: nhead=params.nhead, dim_feedforward=params.dim_feedforward, num_encoder_layers=params.num_encoder_layers, + dynamic_chunk_training=params.dynamic_chunk_training, + short_chunk_size=params.short_chunk_size, + num_left_chunks=params.num_left_chunks, + causal=params.causal_convolution, ) return encoder @@ -901,6 +939,11 @@ def run(rank, world_size, args): params.blank_id = sp.piece_to_id("") params.vocab_size = sp.get_piece_size() + if params.dynamic_chunk_training: + assert ( + params.causal_convolution + ), "dynamic_chunk_training requires causal convolution" + logging.info(params) logging.info("About to create model") From 132132f52a660f74dac552b432776d82aa1f5dab Mon Sep 17 00:00:00 2001 From: "LIyong.Guo" Date: Tue, 2 Aug 2022 22:28:12 +0800 Subject: [PATCH 23/38] liear_fst_with_self_loops (#512) --- icefall/decode.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/icefall/decode.py b/icefall/decode.py index e596876f4..3b64481c7 100644 --- a/icefall/decode.py +++ b/icefall/decode.py @@ -330,12 +330,14 @@ class Nbest(object): # We use a word fsa to intersect with k2.invert(lattice) word_fsa = k2.invert(self.fsa) + word_fsa.scores.zero_() if hasattr(lattice, "aux_labels"): # delete token IDs as it is not needed del word_fsa.aux_labels + word_fsa_with_epsilon_loops = k2.linear_fsa_with_self_loops(word_fsa) + else: + word_fsa_with_epsilon_loops = k2.linear_fst_with_self_loops(word_fsa) - word_fsa.scores.zero_() - word_fsa_with_epsilon_loops = k2.linear_fsa_with_self_loops(word_fsa) path_to_utt_map = self.shape.row_ids(1) From 58a96e5b681a1db8b0029f4627b4d160af78054a Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Wed, 3 Aug 2022 10:30:28 +0800 Subject: [PATCH 24/38] Support exporting to ONNX format (#501) * WIP: Support exporting to ONNX format * Minor fixes. * Combine encoder/decoder/joiner into a single file. * Revert merging three onnx models into a single one. It's quite time consuming to extract a sub-graph from the combined model. For instance, it takes more than one hour to extract the encoder model. * Update CI to test ONNX models. * Decode with exported models. * Fix typos. * Add more doc. * Remove ncnn as it is not fully tested yet. * Fix as_strided for streaming conformer. --- ...pruned-transducer-stateless3-2022-05-13.sh | 68 +++ ...runed-transducer-stateless3-2022-05-13.yml | 2 +- .../pruned_transducer_stateless2/conformer.py | 89 ++-- .../pruned_transducer_stateless2/decoder.py | 16 +- .../pruned_transducer_stateless2/joiner.py | 8 +- .../pruned_transducer_stateless2/scaling.py | 7 +- .../pruned_transducer_stateless3/export.py | 438 +++++++++++++++++- .../jit_pretrained.py | 338 ++++++++++++++ .../onnx_check.py | 199 ++++++++ .../onnx_pretrained.py | 337 ++++++++++++++ .../pretrained.py | 11 +- .../scaling_converter.py | 189 ++++++++ .../test_scaling_converter.py | 201 ++++++++ .../ASR/pruned_transducer_stateless3/train.py | 15 +- requirements-ci.txt | 3 + requirements.txt | 2 + 16 files changed, 1872 insertions(+), 51 deletions(-) create mode 100755 egs/librispeech/ASR/pruned_transducer_stateless3/jit_pretrained.py create mode 100755 egs/librispeech/ASR/pruned_transducer_stateless3/onnx_check.py create mode 100755 egs/librispeech/ASR/pruned_transducer_stateless3/onnx_pretrained.py create mode 100644 egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py create mode 100644 egs/librispeech/ASR/pruned_transducer_stateless3/test_scaling_converter.py diff --git a/.github/scripts/run-librispeech-pruned-transducer-stateless3-2022-05-13.sh b/.github/scripts/run-librispeech-pruned-transducer-stateless3-2022-05-13.sh index 3617bc369..2deab04b9 100755 --- a/.github/scripts/run-librispeech-pruned-transducer-stateless3-2022-05-13.sh +++ b/.github/scripts/run-librispeech-pruned-transducer-stateless3-2022-05-13.sh @@ -22,8 +22,76 @@ ls -lh $repo/test_wavs/*.wav pushd $repo/exp ln -s pretrained-iter-1224000-avg-14.pt pretrained.pt +ln -s pretrained-iter-1224000-avg-14.pt epoch-99.pt popd +log "Test exporting to ONNX format" + +./pruned_transducer_stateless3/export.py \ + --exp-dir $repo/exp \ + --bpe-model $repo/data/lang_bpe_500/bpe.model \ + --epoch 99 \ + --avg 1 \ + --onnx 1 + +log "Export to torchscript model" +./pruned_transducer_stateless3/export.py \ + --exp-dir $repo/exp \ + --bpe-model $repo/data/lang_bpe_500/bpe.model \ + --epoch 99 \ + --avg 1 \ + --jit 1 + +./pruned_transducer_stateless3/export.py \ + --exp-dir $repo/exp \ + --bpe-model $repo/data/lang_bpe_500/bpe.model \ + --epoch 99 \ + --avg 1 \ + --jit-trace 1 + +ls -lh $repo/exp/*.onnx +ls -lh $repo/exp/*.pt + +log "Decode with ONNX models" + +./pruned_transducer_stateless3/onnx_check.py \ + --jit-filename $repo/exp/cpu_jit.pt \ + --onnx-encoder-filename $repo/exp/encoder.onnx \ + --onnx-decoder-filename $repo/exp/decoder.onnx \ + --onnx-joiner-filename $repo/exp/joiner.onnx + +./pruned_transducer_stateless3/onnx_pretrained.py \ + --bpe-model $repo/data/lang_bpe_500/bpe.model \ + --encoder-model-filename $repo/exp/encoder.onnx \ + --decoder-model-filename $repo/exp/decoder.onnx \ + --joiner-model-filename $repo/exp/joiner.onnx \ + $repo/test_wavs/1089-134686-0001.wav \ + $repo/test_wavs/1221-135766-0001.wav \ + $repo/test_wavs/1221-135766-0002.wav + +log "Decode with models exported by torch.jit.trace()" + +./pruned_transducer_stateless3/jit_pretrained.py \ + --bpe-model $repo/data/lang_bpe_500/bpe.model \ + --encoder-model-filename $repo/exp/encoder_jit_trace.pt \ + --decoder-model-filename $repo/exp/decoder_jit_trace.pt \ + --joiner-model-filename $repo/exp/joiner_jit_trace.pt \ + $repo/test_wavs/1089-134686-0001.wav \ + $repo/test_wavs/1221-135766-0001.wav \ + $repo/test_wavs/1221-135766-0002.wav + +log "Decode with models exported by torch.jit.script()" + +./pruned_transducer_stateless3/jit_pretrained.py \ + --bpe-model $repo/data/lang_bpe_500/bpe.model \ + --encoder-model-filename $repo/exp/encoder_jit_script.pt \ + --decoder-model-filename $repo/exp/decoder_jit_script.pt \ + --joiner-model-filename $repo/exp/joiner_jit_script.pt \ + $repo/test_wavs/1089-134686-0001.wav \ + $repo/test_wavs/1221-135766-0001.wav \ + $repo/test_wavs/1221-135766-0002.wav + + for sym in 1 2 3; do log "Greedy search with --max-sym-per-frame $sym" diff --git a/.github/workflows/run-librispeech-pruned-transducer-stateless3-2022-05-13.yml b/.github/workflows/run-librispeech-pruned-transducer-stateless3-2022-05-13.yml index 47976fc2c..3b6e11a31 100644 --- a/.github/workflows/run-librispeech-pruned-transducer-stateless3-2022-05-13.yml +++ b/.github/workflows/run-librispeech-pruned-transducer-stateless3-2022-05-13.yml @@ -35,7 +35,7 @@ on: jobs: run_librispeech_pruned_transducer_stateless3_2022_05_13: - if: github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule' + if: github.event.label.name == 'onnx' || github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule' runs-on: ${{ matrix.os }} strategy: matrix: diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/conformer.py b/egs/librispeech/ASR/pruned_transducer_stateless2/conformer.py index fb8123838..e95360d1d 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/conformer.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/conformer.py @@ -155,7 +155,8 @@ class Conformer(EncoderInterface): # Note: rounding_mode in torch.div() is available only in torch >= 1.8.0 lengths = (((x_lens - 1) >> 1) - 1) >> 1 - assert x.size(0) == lengths.max().item() + if not torch.jit.is_tracing(): + assert x.size(0) == lengths.max().item() src_key_padding_mask = make_pad_mask(lengths) @@ -787,6 +788,14 @@ class RelPositionalEncoding(torch.nn.Module): ) -> None: """Construct an PositionalEncoding object.""" super(RelPositionalEncoding, self).__init__() + if torch.jit.is_tracing(): + # 10k frames correspond to ~100k ms, e.g., 100 seconds, i.e., + # It assumes that the maximum input won't have more than + # 10k frames. + # + # TODO(fangjun): Use torch.jit.script() for this module + max_len = 10000 + self.d_model = d_model self.dropout = torch.nn.Dropout(p=dropout_rate) self.pe = None @@ -992,7 +1001,7 @@ class RelPositionMultiheadAttention(nn.Module): """Compute relative positional encoding. Args: - x: Input tensor (batch, head, time1, 2*time1-1). + x: Input tensor (batch, head, time1, 2*time1-1+left_context). time1 means the length of query vector. left_context (int): left context (in frames) used during streaming decoding. this is used only in real streaming decoding, in other circumstances, @@ -1006,20 +1015,32 @@ class RelPositionMultiheadAttention(nn.Module): (batch_size, num_heads, time1, n) = x.shape time2 = time1 + left_context - assert ( - n == left_context + 2 * time1 - 1 - ), f"{n} == {left_context} + 2 * {time1} - 1" + if not torch.jit.is_tracing(): + assert ( + n == left_context + 2 * time1 - 1 + ), f"{n} == {left_context} + 2 * {time1} - 1" - # Note: TorchScript requires explicit arg for stride() - batch_stride = x.stride(0) - head_stride = x.stride(1) - time1_stride = x.stride(2) - n_stride = x.stride(3) - return x.as_strided( - (batch_size, num_heads, time1, time2), - (batch_stride, head_stride, time1_stride - n_stride, n_stride), - storage_offset=n_stride * (time1 - 1), - ) + if torch.jit.is_tracing(): + rows = torch.arange(start=time1 - 1, end=-1, step=-1) + cols = torch.arange(time2) + rows = rows.repeat(batch_size * num_heads).unsqueeze(-1) + indexes = rows + cols + + x = x.reshape(-1, n) + x = torch.gather(x, dim=1, index=indexes) + x = x.reshape(batch_size, num_heads, time1, time2) + return x + else: + # Note: TorchScript requires explicit arg for stride() + batch_stride = x.stride(0) + head_stride = x.stride(1) + time1_stride = x.stride(2) + n_stride = x.stride(3) + return x.as_strided( + (batch_size, num_heads, time1, time2), + (batch_stride, head_stride, time1_stride - n_stride, n_stride), + storage_offset=n_stride * (time1 - 1), + ) def multi_head_attention_forward( self, @@ -1090,13 +1111,15 @@ class RelPositionMultiheadAttention(nn.Module): """ tgt_len, bsz, embed_dim = query.size() - assert embed_dim == embed_dim_to_check - assert key.size(0) == value.size(0) and key.size(1) == value.size(1) + if not torch.jit.is_tracing(): + assert embed_dim == embed_dim_to_check + assert key.size(0) == value.size(0) and key.size(1) == value.size(1) head_dim = embed_dim // num_heads - assert ( - head_dim * num_heads == embed_dim - ), "embed_dim must be divisible by num_heads" + if not torch.jit.is_tracing(): + assert ( + head_dim * num_heads == embed_dim + ), "embed_dim must be divisible by num_heads" scaling = float(head_dim) ** -0.5 @@ -1209,7 +1232,7 @@ class RelPositionMultiheadAttention(nn.Module): src_len = k.size(0) - if key_padding_mask is not None: + if key_padding_mask is not None and not torch.jit.is_tracing(): assert key_padding_mask.size(0) == bsz, "{} == {}".format( key_padding_mask.size(0), bsz ) @@ -1220,7 +1243,9 @@ class RelPositionMultiheadAttention(nn.Module): q = q.transpose(0, 1) # (batch, time1, head, d_k) pos_emb_bsz = pos_emb.size(0) - assert pos_emb_bsz in (1, bsz) # actually it is 1 + if not torch.jit.is_tracing(): + assert pos_emb_bsz in (1, bsz) # actually it is 1 + p = self.linear_pos(pos_emb).view(pos_emb_bsz, -1, num_heads, head_dim) # (batch, 2*time1, head, d_k) --> (batch, head, d_k, 2*time -1) p = p.permute(0, 2, 3, 1) @@ -1255,11 +1280,12 @@ class RelPositionMultiheadAttention(nn.Module): bsz * num_heads, tgt_len, -1 ) - assert list(attn_output_weights.size()) == [ - bsz * num_heads, - tgt_len, - src_len, - ] + if not torch.jit.is_tracing(): + assert list(attn_output_weights.size()) == [ + bsz * num_heads, + tgt_len, + src_len, + ] if attn_mask is not None: if attn_mask.dtype == torch.bool: @@ -1318,7 +1344,14 @@ class RelPositionMultiheadAttention(nn.Module): ) attn_output = torch.bmm(attn_output_weights, v) - assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim] + + if not torch.jit.is_tracing(): + assert list(attn_output.size()) == [ + bsz * num_heads, + tgt_len, + head_dim, + ] + attn_output = ( attn_output.transpose(0, 1) .contiguous() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/decoder.py b/egs/librispeech/ASR/pruned_transducer_stateless2/decoder.py index 1ddfce034..bd0df5d49 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/decoder.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/decoder.py @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Union + import torch import torch.nn as nn import torch.nn.functional as F @@ -77,7 +79,9 @@ class Decoder(nn.Module): # It is to support torch script self.conv = nn.Identity() - def forward(self, y: torch.Tensor, need_pad: bool = True) -> torch.Tensor: + def forward( + self, y: torch.Tensor, need_pad: Union[bool, torch.Tensor] = True + ) -> torch.Tensor: """ Args: y: @@ -88,18 +92,24 @@ class Decoder(nn.Module): Returns: Return a tensor of shape (N, U, decoder_dim). """ + if isinstance(need_pad, torch.Tensor): + # This is for torch.jit.trace(), which cannot handle the case + # when the input argument is not a tensor. + need_pad = bool(need_pad) + y = y.to(torch.int64) embedding_out = self.embedding(y) if self.context_size > 1: embedding_out = embedding_out.permute(0, 2, 1) - if need_pad is True: + if need_pad: embedding_out = F.pad( embedding_out, pad=(self.context_size - 1, 0) ) else: # During inference time, there is no need to do extra padding # as we only need one output - assert embedding_out.size(-1) == self.context_size + if not torch.jit.is_tracing(): + assert embedding_out.size(-1) == self.context_size embedding_out = self.conv(embedding_out) embedding_out = embedding_out.permute(0, 2, 1) embedding_out = F.relu(embedding_out) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/joiner.py b/egs/librispeech/ASR/pruned_transducer_stateless2/joiner.py index b916addf0..b2d6ed0f2 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/joiner.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/joiner.py @@ -52,10 +52,10 @@ class Joiner(nn.Module): Returns: Return a tensor of shape (N, T, s_range, C). """ - - assert encoder_out.ndim == decoder_out.ndim - assert encoder_out.ndim in (2, 4) - assert encoder_out.shape == decoder_out.shape + if not torch.jit.is_tracing(): + assert encoder_out.ndim == decoder_out.ndim + assert encoder_out.ndim in (2, 4) + assert encoder_out.shape == decoder_out.shape if project_input: logit = self.encoder_proj(encoder_out) + self.decoder_proj( diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py b/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py index 26a8cca44..2b44dc649 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py @@ -152,7 +152,8 @@ class BasicNorm(torch.nn.Module): self.register_buffer("eps", torch.tensor(eps).log().detach()) def forward(self, x: Tensor) -> Tensor: - assert x.shape[self.channel_dim] == self.num_channels + if not torch.jit.is_tracing(): + assert x.shape[self.channel_dim] == self.num_channels scales = ( torch.mean(x ** 2, dim=self.channel_dim, keepdim=True) + self.eps.exp() @@ -423,7 +424,7 @@ class ActivationBalancer(torch.nn.Module): self.max_abs = max_abs def forward(self, x: Tensor) -> Tensor: - if torch.jit.is_scripting(): + if torch.jit.is_scripting() or torch.jit.is_tracing(): return x else: return ActivationBalancerFunction.apply( @@ -472,7 +473,7 @@ class DoubleSwish(torch.nn.Module): """Return double-swish activation function which is an approximation to Swish(Swish(x)), that we approximate closely with x * sigmoid(x-1). """ - if torch.jit.is_scripting(): + if torch.jit.is_scripting() or torch.jit.is_tracing(): return x * torch.sigmoid(x - 1.0) else: return DoubleSwishFunction.apply(x) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/export.py b/egs/librispeech/ASR/pruned_transducer_stateless3/export.py index 53ea306ff..1485c6d6a 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/export.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/export.py @@ -19,14 +19,67 @@ # This script converts several saved checkpoints # to a single one using model averaging. """ + Usage: + +(1) Export to torchscript model using torch.jit.script() + +./pruned_transducer_stateless3/export.py \ + --exp-dir ./pruned_transducer_stateless3/exp \ + --bpe-model data/lang_bpe_500/bpe.model \ + --epoch 20 \ + --avg 10 \ + --jit 1 + +It will generate a file `cpu_jit.pt` in the given `exp_dir`. You can later +load it by `torch.jit.load("cpu_jit.pt")`. + +Note `cpu` in the name `cpu_jit.pt` means the parameters when loaded into Python +are on CPU. You can use `to("cuda")` to move them to a CUDA device. + +It will also generate 3 other files: `encoder_jit_script.pt`, +`decoder_jit_script.pt`, and `joiner_jit_script.pt`. + +(2) Export to torchscript model using torch.jit.trace() + +./pruned_transducer_stateless3/export.py \ + --exp-dir ./pruned_transducer_stateless3/exp \ + --bpe-model data/lang_bpe_500/bpe.model \ + --epoch 20 \ + --avg 10 \ + --jit-trace 1 + +It will generates 3 files: `encoder_jit_trace.pt`, +`decoder_jit_trace.pt`, and `joiner_jit_trace.pt`. + + +(3) Export to ONNX format + +./pruned_transducer_stateless3/export.py \ + --exp-dir ./pruned_transducer_stateless3/exp \ + --bpe-model data/lang_bpe_500/bpe.model \ + --epoch 20 \ + --avg 10 \ + --onnx 1 + +It will generate the following three files in the given `exp_dir`. +Check `onnx_check.py` for how to use them. + + - encoder.onnx + - decoder.onnx + - joiner.onnx + + +(4) Export `model.state_dict()` + ./pruned_transducer_stateless3/export.py \ --exp-dir ./pruned_transducer_stateless3/exp \ --bpe-model data/lang_bpe_500/bpe.model \ --epoch 20 \ --avg 10 -It will generate a file exp_dir/pretrained.pt +It will generate a file `pretrained.pt` in the given `exp_dir`. You can later +load it by `icefall.checkpoint.load_checkpoint()`. To use the generated file with `pruned_transducer_stateless3/decode.py`, you can do: @@ -42,6 +95,20 @@ you can do: --max-duration 600 \ --decoding-method greedy_search \ --bpe-model data/lang_bpe_500/bpe.model + +Check ./pretrained.py for its usage. + +Note: If you don't want to train a model from scratch, we have +provided one for you. You can get it at + +https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13 + +with the following commands: + + sudo apt-get install git-lfs + git lfs install + git clone https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13 + # You will find the pre-trained model in icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13/exp """ import argparse @@ -50,6 +117,8 @@ from pathlib import Path import sentencepiece as spm import torch +import torch.nn as nn +from scaling_converter import convert_scaled_to_non_scaled from train import add_model_arguments, get_params, get_transducer_model from icefall.checkpoint import ( @@ -114,6 +183,42 @@ def get_parser(): type=str2bool, default=False, help="""True to save a model after applying torch.jit.script. + It will generate 4 files: + - encoder_jit_script.pt + - decoder_jit_script.pt + - joiner_jit_script.pt + - cpu_jit.pt (which combines the above 3 files) + + Check ./jit_pretrained.py for how to use them. + """, + ) + + parser.add_argument( + "--jit-trace", + type=str2bool, + default=False, + help="""True to save a model after applying torch.jit.trace. + It will generate 3 files: + - encoder_jit_trace.pt + - decoder_jit_trace.pt + - joiner_jit_trace.pt + + Check ./jit_pretrained.py for how to use them. + """, + ) + + parser.add_argument( + "--onnx", + type=str2bool, + default=False, + help="""If True, --jit is ignored and it exports the model + to onnx format. Three files will be generated: + + - encoder.onnx + - decoder.onnx + - joiner.onnx + + Check ./onnx_check.py and ./onnx_pretrained.py for how to use them. """, ) @@ -139,6 +244,275 @@ def get_parser(): return parser +def export_encoder_model_jit_script( + encoder_model: nn.Module, + encoder_filename: str, +) -> None: + """Export the given encoder model with torch.jit.script() + + Args: + encoder_model: + The input encoder model + encoder_filename: + The filename to save the exported model. + """ + script_model = torch.jit.script(encoder_model) + script_model.save(encoder_filename) + logging.info(f"Saved to {encoder_filename}") + + +def export_decoder_model_jit_script( + decoder_model: nn.Module, + decoder_filename: str, +) -> None: + """Export the given decoder model with torch.jit.script() + + Args: + decoder_model: + The input decoder model + decoder_filename: + The filename to save the exported model. + """ + script_model = torch.jit.script(decoder_model) + script_model.save(decoder_filename) + logging.info(f"Saved to {decoder_filename}") + + +def export_joiner_model_jit_script( + joiner_model: nn.Module, + joiner_filename: str, +) -> None: + """Export the given joiner model with torch.jit.trace() + + Args: + joiner_model: + The input joiner model + joiner_filename: + The filename to save the exported model. + """ + script_model = torch.jit.script(joiner_model) + script_model.save(joiner_filename) + logging.info(f"Saved to {joiner_filename}") + + +def export_encoder_model_jit_trace( + encoder_model: nn.Module, + encoder_filename: str, +) -> None: + """Export the given encoder model with torch.jit.trace() + + Note: The warmup argument is fixed to 1. + + Args: + encoder_model: + The input encoder model + encoder_filename: + The filename to save the exported model. + """ + x = torch.zeros(1, 100, 80, dtype=torch.float32) + x_lens = torch.tensor([100], dtype=torch.int64) + + traced_model = torch.jit.trace(encoder_model, (x, x_lens)) + traced_model.save(encoder_filename) + logging.info(f"Saved to {encoder_filename}") + + +def export_decoder_model_jit_trace( + decoder_model: nn.Module, + decoder_filename: str, +) -> None: + """Export the given decoder model with torch.jit.trace() + + Note: The argument need_pad is fixed to False. + + Args: + decoder_model: + The input decoder model + decoder_filename: + The filename to save the exported model. + """ + y = torch.zeros(10, decoder_model.context_size, dtype=torch.int64) + need_pad = torch.tensor([False]) + + traced_model = torch.jit.trace(decoder_model, (y, need_pad)) + traced_model.save(decoder_filename) + logging.info(f"Saved to {decoder_filename}") + + +def export_joiner_model_jit_trace( + joiner_model: nn.Module, + joiner_filename: str, +) -> None: + """Export the given joiner model with torch.jit.trace() + + Note: The argument project_input is fixed to True. A user should not + project the encoder_out/decoder_out by himself/herself. The exported joiner + will do that for the user. + + Args: + joiner_model: + The input joiner model + joiner_filename: + The filename to save the exported model. + + """ + encoder_out_dim = joiner_model.encoder_proj.weight.shape[1] + decoder_out_dim = joiner_model.decoder_proj.weight.shape[1] + encoder_out = torch.rand(1, encoder_out_dim, dtype=torch.float32) + decoder_out = torch.rand(1, decoder_out_dim, dtype=torch.float32) + + traced_model = torch.jit.trace(joiner_model, (encoder_out, decoder_out)) + traced_model.save(joiner_filename) + logging.info(f"Saved to {joiner_filename}") + + +def export_encoder_model_onnx( + encoder_model: nn.Module, + encoder_filename: str, + opset_version: int = 11, +) -> None: + """Export the given encoder model to ONNX format. + The exported model has two inputs: + + - x, a tensor of shape (N, T, C); dtype is torch.float32 + - x_lens, a tensor of shape (N,); dtype is torch.int64 + + and it has two outputs: + + - encoder_out, a tensor of shape (N, T, C) + - encoder_out_lens, a tensor of shape (N,) + + Note: The warmup argument is fixed to 1. + + Args: + encoder_model: + The input encoder model + encoder_filename: + The filename to save the exported ONNX model. + opset_version: + The opset version to use. + """ + x = torch.zeros(1, 100, 80, dtype=torch.float32) + x_lens = torch.tensor([100], dtype=torch.int64) + + # encoder_model = torch.jit.script(encoder_model) + # It throws the following error for the above statement + # + # RuntimeError: Exporting the operator __is_ to ONNX opset version + # 11 is not supported. Please feel free to request support or + # submit a pull request on PyTorch GitHub. + # + # I cannot find which statement causes the above error. + # torch.onnx.export() will use torch.jit.trace() internally, which + # works well for the current reworked model + warmup = 1.0 + torch.onnx.export( + encoder_model, + (x, x_lens, warmup), + encoder_filename, + verbose=False, + opset_version=opset_version, + input_names=["x", "x_lens", "warmup"], + output_names=["encoder_out", "encoder_out_lens"], + dynamic_axes={ + "x": {0: "N", 1: "T"}, + "x_lens": {0: "N"}, + "encoder_out": {0: "N", 1: "T"}, + "encoder_out_lens": {0: "N"}, + }, + ) + logging.info(f"Saved to {encoder_filename}") + + +def export_decoder_model_onnx( + decoder_model: nn.Module, + decoder_filename: str, + opset_version: int = 11, +) -> None: + """Export the decoder model to ONNX format. + + The exported model has one input: + + - y: a torch.int64 tensor of shape (N, decoder_model.context_size) + + and has one output: + + - decoder_out: a torch.float32 tensor of shape (N, 1, C) + + Note: The argument need_pad is fixed to False. + + Args: + decoder_model: + The decoder model to be exported. + decoder_filename: + Filename to save the exported ONNX model. + opset_version: + The opset version to use. + """ + y = torch.zeros(10, decoder_model.context_size, dtype=torch.int64) + need_pad = False # Always False, so we can use torch.jit.trace() here + # Note(fangjun): torch.jit.trace() is more efficient than torch.jit.script() + # in this case + torch.onnx.export( + decoder_model, + (y, need_pad), + decoder_filename, + verbose=False, + opset_version=opset_version, + input_names=["y", "need_pad"], + output_names=["decoder_out"], + dynamic_axes={ + "y": {0: "N"}, + "decoder_out": {0: "N"}, + }, + ) + logging.info(f"Saved to {decoder_filename}") + + +def export_joiner_model_onnx( + joiner_model: nn.Module, + joiner_filename: str, + opset_version: int = 11, +) -> None: + """Export the joiner model to ONNX format. + The exported model has two inputs: + + - encoder_out: a tensor of shape (N, encoder_out_dim) + - decoder_out: a tensor of shape (N, decoder_out_dim) + + and has one output: + + - joiner_out: a tensor of shape (N, vocab_size) + + Note: The argument project_input is fixed to True. A user should not + project the encoder_out/decoder_out by himself/herself. The exported joiner + will do that for the user. + """ + encoder_out_dim = joiner_model.encoder_proj.weight.shape[1] + decoder_out_dim = joiner_model.decoder_proj.weight.shape[1] + encoder_out = torch.rand(1, encoder_out_dim, dtype=torch.float32) + decoder_out = torch.rand(1, decoder_out_dim, dtype=torch.float32) + + project_input = True + # Note: It uses torch.jit.trace() internally + torch.onnx.export( + joiner_model, + (encoder_out, decoder_out, project_input), + joiner_filename, + verbose=False, + opset_version=opset_version, + input_names=["encoder_out", "decoder_out", "project_input"], + output_names=["logit"], + dynamic_axes={ + "encoder_out": {0: "N"}, + "decoder_out": {0: "N"}, + "logit": {0: "N"}, + }, + ) + logging.info(f"Saved to {joiner_filename}") + + +@torch.no_grad() def main(): args = get_parser().parse_args() args.exp_dir = Path(args.exp_dir) @@ -165,7 +539,7 @@ def main(): logging.info(params) logging.info("About to create model") - model = get_transducer_model(params) + model = get_transducer_model(params, enable_giga=False) model.to(device) @@ -185,7 +559,9 @@ def main(): ) logging.info(f"averaging {filenames}") model.to(device) - model.load_state_dict(average_checkpoints(filenames, device=device)) + model.load_state_dict( + average_checkpoints(filenames, device=device), strict=False + ) elif params.avg == 1: load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) else: @@ -196,14 +572,39 @@ def main(): filenames.append(f"{params.exp_dir}/epoch-{i}.pt") logging.info(f"averaging {filenames}") model.to(device) - model.load_state_dict(average_checkpoints(filenames, device=device)) - - model.eval() + model.load_state_dict( + average_checkpoints(filenames, device=device), strict=False + ) model.to("cpu") model.eval() + convert_scaled_to_non_scaled(model, inplace=True) - if params.jit: + if params.onnx is True: + opset_version = 11 + logging.info("Exporting to onnx format") + encoder_filename = params.exp_dir / "encoder.onnx" + export_encoder_model_onnx( + model.encoder, + encoder_filename, + opset_version=opset_version, + ) + + decoder_filename = params.exp_dir / "decoder.onnx" + export_decoder_model_onnx( + model.decoder, + decoder_filename, + opset_version=opset_version, + ) + + joiner_filename = params.exp_dir / "joiner.onnx" + export_joiner_model_onnx( + model.joiner, + joiner_filename, + opset_version=opset_version, + ) + elif params.jit is True: + logging.info("Using torch.jit.script()") # We won't use the forward() method of the model in C++, so just ignore # it here. # Otherwise, one of its arguments is a ragged tensor and is not @@ -214,8 +615,29 @@ def main(): filename = params.exp_dir / "cpu_jit.pt" model.save(str(filename)) logging.info(f"Saved to {filename}") + + # Also export encoder/decoder/joiner separately + encoder_filename = params.exp_dir / "encoder_jit_script.pt" + export_encoder_model_jit_trace(model.encoder, encoder_filename) + + decoder_filename = params.exp_dir / "decoder_jit_script.pt" + export_decoder_model_jit_trace(model.decoder, decoder_filename) + + joiner_filename = params.exp_dir / "joiner_jit_script.pt" + export_joiner_model_jit_trace(model.joiner, joiner_filename) + + elif params.jit_trace is True: + logging.info("Using torch.jit.trace()") + encoder_filename = params.exp_dir / "encoder_jit_trace.pt" + export_encoder_model_jit_trace(model.encoder, encoder_filename) + + decoder_filename = params.exp_dir / "decoder_jit_trace.pt" + export_decoder_model_jit_trace(model.decoder, decoder_filename) + + joiner_filename = params.exp_dir / "joiner_jit_trace.pt" + export_joiner_model_jit_trace(model.joiner, joiner_filename) else: - logging.info("Not using torch.jit.script") + logging.info("Not using torchscript") # Save it using a format so that it can be loaded # by :func:`load_checkpoint` filename = params.exp_dir / "pretrained.pt" diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/jit_pretrained.py b/egs/librispeech/ASR/pruned_transducer_stateless3/jit_pretrained.py new file mode 100755 index 000000000..162f8c7db --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/jit_pretrained.py @@ -0,0 +1,338 @@ +#!/usr/bin/env python3 +# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This script loads torchscript models, either exported by `torch.jit.trace()` +or by `torch.jit.script()`, and uses them to decode waves. +You can use the following command to get the exported models: + +./pruned_transducer_stateless3/export.py \ + --exp-dir ./pruned_transducer_stateless3/exp \ + --bpe-model data/lang_bpe_500/bpe.model \ + --epoch 20 \ + --avg 10 \ + --jit-trace 1 + +or + +./pruned_transducer_stateless3/export.py \ + --exp-dir ./pruned_transducer_stateless3/exp \ + --bpe-model data/lang_bpe_500/bpe.model \ + --epoch 20 \ + --avg 10 \ + --jit 1 + +Usage of this script: + +./pruned_transducer_stateless3/jit_pretrained.py \ + --encoder-model-filename ./pruned_transducer_stateless3/exp/encoder_jit_trace.pt \ + --decoder-model-filename ./pruned_transducer_stateless3/exp/decoder_jit_trace.pt \ + --joiner-model-filename ./pruned_transducer_stateless3/exp/joiner_jit_trace.pt \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + /path/to/foo.wav \ + /path/to/bar.wav + +or + +./pruned_transducer_stateless3/jit_pretrained.py \ + --encoder-model-filename ./pruned_transducer_stateless3/exp/encoder_jit_script.pt \ + --decoder-model-filename ./pruned_transducer_stateless3/exp/decoder_jit_script.pt \ + --joiner-model-filename ./pruned_transducer_stateless3/exp/joiner_jit_script.pt \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + /path/to/foo.wav \ + /path/to/bar.wav +""" + +import argparse +import logging +import math +from typing import List + +import kaldifeat +import sentencepiece as spm +import torch +import torchaudio +from torch.nn.utils.rnn import pad_sequence + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--encoder-model-filename", + type=str, + required=True, + help="Path to the encoder torchscript model. ", + ) + + parser.add_argument( + "--decoder-model-filename", + type=str, + required=True, + help="Path to the decoder torchscript model. ", + ) + + parser.add_argument( + "--joiner-model-filename", + type=str, + required=True, + help="Path to the joiner torchscript model. ", + ) + + parser.add_argument( + "--bpe-model", + type=str, + help="""Path to bpe.model.""", + ) + + parser.add_argument( + "sound_files", + type=str, + nargs="+", + help="The input sound file(s) to transcribe. " + "Supported formats are those supported by torchaudio.load(). " + "For example, wav and flac are supported. " + "The sample rate has to be 16kHz.", + ) + + parser.add_argument( + "--sample-rate", + type=int, + default=16000, + help="The sample rate of the input sound file", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="Context size of the decoder model", + ) + + return parser + + +def read_sound_files( + filenames: List[str], expected_sample_rate: float +) -> List[torch.Tensor]: + """Read a list of sound files into a list 1-D float32 torch tensors. + Args: + filenames: + A list of sound filenames. + expected_sample_rate: + The expected sample rate of the sound files. + Returns: + Return a list of 1-D float32 torch tensors. + """ + ans = [] + for f in filenames: + wave, sample_rate = torchaudio.load(f) + assert sample_rate == expected_sample_rate, ( + f"expected sample rate: {expected_sample_rate}. " + f"Given: {sample_rate}" + ) + # We use only the first channel + ans.append(wave[0]) + return ans + + +def greedy_search( + decoder: torch.jit.ScriptModule, + joiner: torch.jit.ScriptModule, + encoder_out: torch.Tensor, + encoder_out_lens: torch.Tensor, + context_size: int, +) -> List[List[int]]: + """Greedy search in batch mode. It hardcodes --max-sym-per-frame=1. + Args: + decoder: + The decoder model. + joiner: + The joiner model. + encoder_out: + A 3-D tensor of shape (N, T, C) + encoder_out_lens: + A 1-D tensor of shape (N,). + context_size: + The context size of the decoder model. + Returns: + Return the decoded results for each utterance. + """ + assert encoder_out.ndim == 3 + assert encoder_out.size(0) >= 1, encoder_out.size(0) + + packed_encoder_out = torch.nn.utils.rnn.pack_padded_sequence( + input=encoder_out, + lengths=encoder_out_lens.cpu(), + batch_first=True, + enforce_sorted=False, + ) + + device = encoder_out.device + blank_id = 0 # hard-code to 0 + + batch_size_list = packed_encoder_out.batch_sizes.tolist() + N = encoder_out.size(0) + + assert torch.all(encoder_out_lens > 0), encoder_out_lens + assert N == batch_size_list[0], (N, batch_size_list) + + hyps = [[blank_id] * context_size for _ in range(N)] + + decoder_input = torch.tensor( + hyps, + device=device, + dtype=torch.int64, + ) # (N, context_size) + + decoder_out = decoder( + decoder_input, + need_pad=torch.tensor([False]), + ).squeeze(1) + + offset = 0 + for batch_size in batch_size_list: + start = offset + end = offset + batch_size + current_encoder_out = packed_encoder_out.data[start:end] + current_encoder_out = current_encoder_out + # current_encoder_out's shape: (batch_size, encoder_out_dim) + offset = end + + decoder_out = decoder_out[:batch_size] + + logits = joiner( + current_encoder_out, + decoder_out, + ) + # logits'shape (batch_size, vocab_size) + + assert logits.ndim == 2, logits.shape + y = logits.argmax(dim=1).tolist() + emitted = False + for i, v in enumerate(y): + if v != blank_id: + hyps[i].append(v) + emitted = True + if emitted: + # update decoder output + decoder_input = [h[-context_size:] for h in hyps[:batch_size]] + decoder_input = torch.tensor( + decoder_input, + device=device, + dtype=torch.int64, + ) + decoder_out = decoder( + decoder_input, + need_pad=torch.tensor([False]), + ) + decoder_out = decoder_out.squeeze(1) + + sorted_ans = [h[context_size:] for h in hyps] + ans = [] + unsorted_indices = packed_encoder_out.unsorted_indices.tolist() + for i in range(N): + ans.append(sorted_ans[unsorted_indices[i]]) + + return ans + + +@torch.no_grad() +def main(): + parser = get_parser() + args = parser.parse_args() + logging.info(vars(args)) + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + encoder = torch.jit.load(args.encoder_model_filename) + decoder = torch.jit.load(args.decoder_model_filename) + joiner = torch.jit.load(args.joiner_model_filename) + + encoder.eval() + decoder.eval() + joiner.eval() + + encoder.to(device) + decoder.to(device) + joiner.to(device) + + sp = spm.SentencePieceProcessor() + sp.load(args.bpe_model) + + logging.info("Constructing Fbank computer") + opts = kaldifeat.FbankOptions() + opts.device = device + opts.frame_opts.dither = 0 + opts.frame_opts.snip_edges = False + opts.frame_opts.samp_freq = args.sample_rate + opts.mel_opts.num_bins = 80 + + fbank = kaldifeat.Fbank(opts) + + logging.info(f"Reading sound files: {args.sound_files}") + waves = read_sound_files( + filenames=args.sound_files, + expected_sample_rate=args.sample_rate, + ) + waves = [w.to(device) for w in waves] + + logging.info("Decoding started") + features = fbank(waves) + feature_lengths = [f.size(0) for f in features] + + features = pad_sequence( + features, + batch_first=True, + padding_value=math.log(1e-10), + ) + + feature_lengths = torch.tensor(feature_lengths, device=device) + + encoder_out, encoder_out_lens = encoder( + x=features, + x_lens=feature_lengths, + ) + + hyps = greedy_search( + decoder=decoder, + joiner=joiner, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + context_size=args.context_size, + ) + s = "\n" + for filename, hyp in zip(args.sound_files, hyps): + words = sp.decode(hyp) + s += f"{filename}:\n{words}\n\n" + logging.info(s) + + logging.info("Decoding Done") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/onnx_check.py b/egs/librispeech/ASR/pruned_transducer_stateless3/onnx_check.py new file mode 100755 index 000000000..3da31b7ce --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/onnx_check.py @@ -0,0 +1,199 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 Xiaomi Corporation (Author: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This script checks that exported onnx models produce the same output +with the given torchscript model for the same input. +""" + +import argparse +import logging + +import onnxruntime as ort +import torch + +ort.set_default_logger_severity(3) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--jit-filename", + required=True, + type=str, + help="Path to the torchscript model", + ) + + parser.add_argument( + "--onnx-encoder-filename", + required=True, + type=str, + help="Path to the onnx encoder model", + ) + + parser.add_argument( + "--onnx-decoder-filename", + required=True, + type=str, + help="Path to the onnx decoder model", + ) + + parser.add_argument( + "--onnx-joiner-filename", + required=True, + type=str, + help="Path to the onnx joiner model", + ) + + return parser + + +def test_encoder( + model: torch.jit.ScriptModule, + encoder_session: ort.InferenceSession, +): + encoder_inputs = encoder_session.get_inputs() + assert encoder_inputs[0].name == "x" + assert encoder_inputs[1].name == "x_lens" + assert encoder_inputs[0].shape == ["N", "T", 80] + assert encoder_inputs[1].shape == ["N"] + + for N in [1, 5]: + for T in [12, 25]: + print("N, T", N, T) + x = torch.rand(N, T, 80, dtype=torch.float32) + x_lens = torch.randint(low=10, high=T + 1, size=(N,)) + x_lens[0] = T + + encoder_inputs = { + "x": x.numpy(), + "x_lens": x_lens.numpy(), + } + encoder_out, encoder_out_lens = encoder_session.run( + ["encoder_out", "encoder_out_lens"], + encoder_inputs, + ) + + torch_encoder_out, torch_encoder_out_lens = model.encoder(x, x_lens) + + encoder_out = torch.from_numpy(encoder_out) + assert torch.allclose(encoder_out, torch_encoder_out, atol=1e-05), ( + (encoder_out - torch_encoder_out).abs().max() + ) + + +def test_decoder( + model: torch.jit.ScriptModule, + decoder_session: ort.InferenceSession, +): + decoder_inputs = decoder_session.get_inputs() + assert decoder_inputs[0].name == "y" + assert decoder_inputs[0].shape == ["N", 2] + for N in [1, 5, 10]: + y = torch.randint(low=1, high=500, size=(10, 2)) + + decoder_inputs = {"y": y.numpy()} + decoder_out = decoder_session.run( + ["decoder_out"], + decoder_inputs, + )[0] + decoder_out = torch.from_numpy(decoder_out) + + torch_decoder_out = model.decoder(y, need_pad=False) + assert torch.allclose(decoder_out, torch_decoder_out, atol=1e-5), ( + (decoder_out - torch_decoder_out).abs().max() + ) + + +def test_joiner( + model: torch.jit.ScriptModule, + joiner_session: ort.InferenceSession, +): + joiner_inputs = joiner_session.get_inputs() + assert joiner_inputs[0].name == "encoder_out" + assert joiner_inputs[0].shape == ["N", 512] + + assert joiner_inputs[1].name == "decoder_out" + assert joiner_inputs[1].shape == ["N", 512] + + for N in [1, 5, 10]: + encoder_out = torch.rand(N, 512) + decoder_out = torch.rand(N, 512) + + joiner_inputs = { + "encoder_out": encoder_out.numpy(), + "decoder_out": decoder_out.numpy(), + } + joiner_out = joiner_session.run(["logit"], joiner_inputs)[0] + joiner_out = torch.from_numpy(joiner_out) + + torch_joiner_out = model.joiner( + encoder_out, + decoder_out, + project_input=True, + ) + assert torch.allclose(joiner_out, torch_joiner_out, atol=1e-5), ( + (joiner_out - torch_joiner_out).abs().max() + ) + + +@torch.no_grad() +def main(): + args = get_parser().parse_args() + logging.info(vars(args)) + + model = torch.jit.load(args.jit_filename) + + options = ort.SessionOptions() + options.inter_op_num_threads = 1 + options.intra_op_num_threads = 1 + + logging.info("Test encoder") + encoder_session = ort.InferenceSession( + args.onnx_encoder_filename, + sess_options=options, + ) + test_encoder(model, encoder_session) + + logging.info("Test decoder") + decoder_session = ort.InferenceSession( + args.onnx_decoder_filename, + sess_options=options, + ) + test_decoder(model, decoder_session) + + logging.info("Test joiner") + joiner_session = ort.InferenceSession( + args.onnx_joiner_filename, + sess_options=options, + ) + test_joiner(model, joiner_session) + logging.info("Finished checking ONNX models") + + +if __name__ == "__main__": + torch.manual_seed(20220727) + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/onnx_pretrained.py b/egs/librispeech/ASR/pruned_transducer_stateless3/onnx_pretrained.py new file mode 100755 index 000000000..ebfae9d5f --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/onnx_pretrained.py @@ -0,0 +1,337 @@ +#!/usr/bin/env python3 +# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This script loads ONNX models and uses them to decode waves. +You can use the following command to get the exported models: + +./pruned_transducer_stateless3/export.py \ + --exp-dir ./pruned_transducer_stateless3/exp \ + --bpe-model data/lang_bpe_500/bpe.model \ + --epoch 20 \ + --avg 10 \ + --onnx 1 + +Usage of this script: + +./pruned_transducer_stateless3/jit_trace_pretrained.py \ + --encoder-model-filename ./pruned_transducer_stateless3/exp/encoder.onnx \ + --decoder-model-filename ./pruned_transducer_stateless3/exp/decoder.onnx \ + --joiner-model-filename ./pruned_transducer_stateless3/exp/joiner.onnx \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + /path/to/foo.wav \ + /path/to/bar.wav +""" + +import argparse +import logging +import math +from typing import List + +import kaldifeat +import numpy as np +import onnxruntime as ort +import sentencepiece as spm +import torch +import torchaudio +from torch.nn.utils.rnn import pad_sequence + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--encoder-model-filename", + type=str, + required=True, + help="Path to the encoder torchscript model. ", + ) + + parser.add_argument( + "--decoder-model-filename", + type=str, + required=True, + help="Path to the decoder torchscript model. ", + ) + + parser.add_argument( + "--joiner-model-filename", + type=str, + required=True, + help="Path to the joiner torchscript model. ", + ) + + parser.add_argument( + "--bpe-model", + type=str, + help="""Path to bpe.model.""", + ) + + parser.add_argument( + "sound_files", + type=str, + nargs="+", + help="The input sound file(s) to transcribe. " + "Supported formats are those supported by torchaudio.load(). " + "For example, wav and flac are supported. " + "The sample rate has to be 16kHz.", + ) + + parser.add_argument( + "--sample-rate", + type=int, + default=16000, + help="The sample rate of the input sound file", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="Context size of the decoder model", + ) + + return parser + + +def read_sound_files( + filenames: List[str], expected_sample_rate: float +) -> List[torch.Tensor]: + """Read a list of sound files into a list 1-D float32 torch tensors. + Args: + filenames: + A list of sound filenames. + expected_sample_rate: + The expected sample rate of the sound files. + Returns: + Return a list of 1-D float32 torch tensors. + """ + ans = [] + for f in filenames: + wave, sample_rate = torchaudio.load(f) + assert sample_rate == expected_sample_rate, ( + f"expected sample rate: {expected_sample_rate}. " + f"Given: {sample_rate}" + ) + # We use only the first channel + ans.append(wave[0]) + return ans + + +def greedy_search( + decoder: ort.InferenceSession, + joiner: ort.InferenceSession, + encoder_out: np.ndarray, + encoder_out_lens: np.ndarray, + context_size: int, +) -> List[List[int]]: + """Greedy search in batch mode. It hardcodes --max-sym-per-frame=1. + Args: + decoder: + The decoder model. + joiner: + The joiner model. + encoder_out: + A 3-D tensor of shape (N, T, C) + encoder_out_lens: + A 1-D tensor of shape (N,). + context_size: + The context size of the decoder model. + Returns: + Return the decoded results for each utterance. + """ + encoder_out = torch.from_numpy(encoder_out) + encoder_out_lens = torch.from_numpy(encoder_out_lens) + assert encoder_out.ndim == 3 + assert encoder_out.size(0) >= 1, encoder_out.size(0) + + packed_encoder_out = torch.nn.utils.rnn.pack_padded_sequence( + input=encoder_out, + lengths=encoder_out_lens.cpu(), + batch_first=True, + enforce_sorted=False, + ) + + blank_id = 0 # hard-code to 0 + + batch_size_list = packed_encoder_out.batch_sizes.tolist() + N = encoder_out.size(0) + + assert torch.all(encoder_out_lens > 0), encoder_out_lens + assert N == batch_size_list[0], (N, batch_size_list) + + hyps = [[blank_id] * context_size for _ in range(N)] + + decoder_input_nodes = decoder.get_inputs() + decoder_output_nodes = decoder.get_outputs() + + joiner_input_nodes = joiner.get_inputs() + joiner_output_nodes = joiner.get_outputs() + + decoder_input = torch.tensor( + hyps, + dtype=torch.int64, + ) # (N, context_size) + + decoder_out = decoder.run( + [decoder_output_nodes[0].name], + { + decoder_input_nodes[0].name: decoder_input.numpy(), + }, + )[0].squeeze(1) + + offset = 0 + for batch_size in batch_size_list: + start = offset + end = offset + batch_size + current_encoder_out = packed_encoder_out.data[start:end] + current_encoder_out = current_encoder_out + # current_encoder_out's shape: (batch_size, encoder_out_dim) + offset = end + + decoder_out = decoder_out[:batch_size] + + logits = joiner.run( + [joiner_output_nodes[0].name], + { + joiner_input_nodes[0].name: current_encoder_out.numpy(), + joiner_input_nodes[1].name: decoder_out, + }, + )[0] + logits = torch.from_numpy(logits) + # logits'shape (batch_size, vocab_size) + + assert logits.ndim == 2, logits.shape + y = logits.argmax(dim=1).tolist() + emitted = False + for i, v in enumerate(y): + if v != blank_id: + hyps[i].append(v) + emitted = True + if emitted: + # update decoder output + decoder_input = [h[-context_size:] for h in hyps[:batch_size]] + decoder_input = torch.tensor( + decoder_input, + dtype=torch.int64, + ) + decoder_out = decoder.run( + [decoder_output_nodes[0].name], + { + decoder_input_nodes[0].name: decoder_input.numpy(), + }, + )[0].squeeze(1) + + sorted_ans = [h[context_size:] for h in hyps] + ans = [] + unsorted_indices = packed_encoder_out.unsorted_indices.tolist() + for i in range(N): + ans.append(sorted_ans[unsorted_indices[i]]) + + return ans + + +@torch.no_grad() +def main(): + parser = get_parser() + args = parser.parse_args() + logging.info(vars(args)) + + session_opts = ort.SessionOptions() + session_opts.inter_op_num_threads = 1 + session_opts.intra_op_num_threads = 1 + + encoder = ort.InferenceSession( + args.encoder_model_filename, + sess_options=session_opts, + ) + + decoder = ort.InferenceSession( + args.decoder_model_filename, + sess_options=session_opts, + ) + + joiner = ort.InferenceSession( + args.joiner_model_filename, + sess_options=session_opts, + ) + + sp = spm.SentencePieceProcessor() + sp.load(args.bpe_model) + + logging.info("Constructing Fbank computer") + opts = kaldifeat.FbankOptions() + opts.device = "cpu" + opts.frame_opts.dither = 0 + opts.frame_opts.snip_edges = False + opts.frame_opts.samp_freq = args.sample_rate + opts.mel_opts.num_bins = 80 + + fbank = kaldifeat.Fbank(opts) + + logging.info(f"Reading sound files: {args.sound_files}") + waves = read_sound_files( + filenames=args.sound_files, + expected_sample_rate=args.sample_rate, + ) + + logging.info("Decoding started") + features = fbank(waves) + feature_lengths = [f.size(0) for f in features] + + features = pad_sequence( + features, + batch_first=True, + padding_value=math.log(1e-10), + ) + + feature_lengths = torch.tensor(feature_lengths, dtype=torch.int64) + + encoder_input_nodes = encoder.get_inputs() + encoder_out_nodes = encoder.get_outputs() + encoder_out, encoder_out_lens = encoder.run( + [encoder_out_nodes[0].name, encoder_out_nodes[1].name], + { + encoder_input_nodes[0].name: features.numpy(), + encoder_input_nodes[1].name: feature_lengths.numpy(), + }, + ) + + hyps = greedy_search( + decoder=decoder, + joiner=joiner, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + context_size=args.context_size, + ) + s = "\n" + for filename, hyp in zip(args.sound_files, hyps): + words = sp.decode(hyp) + s += f"{filename}:\n{words}\n\n" + logging.info(s) + + logging.info("Decoding Done") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/pretrained.py b/egs/librispeech/ASR/pruned_transducer_stateless3/pretrained.py index 8b0389bc9..c15d65ded 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/pretrained.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/pretrained.py @@ -15,7 +15,16 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -Usage: +This script loads a checkpoint and uses it to decode waves. +You can generate the checkpoint with the following command: + +./pruned_transducer_stateless3/export.py \ + --exp-dir ./pruned_transducer_stateless3/exp \ + --bpe-model data/lang_bpe_500/bpe.model \ + --epoch 20 \ + --avg 10 + +Usage of this script: (1) greedy search ./pruned_transducer_stateless3/pretrained.py \ diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py b/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py new file mode 100644 index 000000000..c810e36e6 --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py @@ -0,0 +1,189 @@ +# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This file provides functions to convert `ScaledLinear`, `ScaledConv1d`, +and `ScaledConv2d` to their non-scaled counterparts: `nn.Linear`, `nn.Conv1d`, +and `nn.Conv2d`. + +The scaled version are required only in the training time. It simplifies our +life by converting them their non-scaled version during inference time. +""" + +import copy +import re + +import torch +import torch.nn as nn +from scaling import ScaledConv1d, ScaledConv2d, ScaledLinear + + +def _get_weight(self: torch.nn.Linear): + return self.weight + + +def _get_bias(self: torch.nn.Linear): + return self.bias + + +def scaled_linear_to_linear(scaled_linear: ScaledLinear) -> nn.Linear: + """Convert an instance of ScaledLinear to nn.Linear. + + Args: + scaled_linear: + The layer to be converted. + Returns: + Return a linear layer. It satisfies: + + scaled_linear(x) == linear(x) + + for any given input tensor `x`. + """ + assert isinstance(scaled_linear, ScaledLinear), type(scaled_linear) + + # if not hasattr(torch.nn.Linear, "get_weight"): + # torch.nn.Linear.get_weight = _get_weight + # torch.nn.Linear.get_bias = _get_bias + + weight = scaled_linear.get_weight() + bias = scaled_linear.get_bias() + has_bias = bias is not None + + linear = torch.nn.Linear( + in_features=scaled_linear.in_features, + out_features=scaled_linear.out_features, + bias=True, # otherwise, it throws errors when converting to PNNX format. + device=weight.device, + ) + linear.weight.data.copy_(weight) + + if has_bias: + linear.bias.data.copy_(bias) + else: + linear.bias.data.zero_() + + return linear + + +def scaled_conv1d_to_conv1d(scaled_conv1d: ScaledConv1d) -> nn.Conv1d: + """Convert an instance of ScaledConv1d to nn.Conv1d. + + Args: + scaled_conv1d: + The layer to be converted. + Returns: + Return an instance of nn.Conv1d that has the same `forward()` behavior + of the given `scaled_conv1d`. + """ + assert isinstance(scaled_conv1d, ScaledConv1d), type(scaled_conv1d) + + weight = scaled_conv1d.get_weight() + bias = scaled_conv1d.get_bias() + has_bias = bias is not None + + conv1d = nn.Conv1d( + in_channels=scaled_conv1d.in_channels, + out_channels=scaled_conv1d.out_channels, + kernel_size=scaled_conv1d.kernel_size, + stride=scaled_conv1d.stride, + padding=scaled_conv1d.padding, + dilation=scaled_conv1d.dilation, + groups=scaled_conv1d.groups, + bias=scaled_conv1d.bias is not None, + padding_mode=scaled_conv1d.padding_mode, + ) + + conv1d.weight.data.copy_(weight) + if has_bias: + conv1d.bias.data.copy_(bias) + + return conv1d + + +def scaled_conv2d_to_conv2d(scaled_conv2d: ScaledConv2d) -> nn.Conv2d: + """Convert an instance of ScaledConv2d to nn.Conv2d. + + Args: + scaled_conv2d: + The layer to be converted. + Returns: + Return an instance of nn.Conv2d that has the same `forward()` behavior + of the given `scaled_conv2d`. + """ + assert isinstance(scaled_conv2d, ScaledConv2d), type(scaled_conv2d) + + weight = scaled_conv2d.get_weight() + bias = scaled_conv2d.get_bias() + has_bias = bias is not None + + conv2d = nn.Conv2d( + in_channels=scaled_conv2d.in_channels, + out_channels=scaled_conv2d.out_channels, + kernel_size=scaled_conv2d.kernel_size, + stride=scaled_conv2d.stride, + padding=scaled_conv2d.padding, + dilation=scaled_conv2d.dilation, + groups=scaled_conv2d.groups, + bias=scaled_conv2d.bias is not None, + padding_mode=scaled_conv2d.padding_mode, + ) + + conv2d.weight.data.copy_(weight) + if has_bias: + conv2d.bias.data.copy_(bias) + + return conv2d + + +def convert_scaled_to_non_scaled(model: nn.Module, inplace: bool = False): + """Convert `ScaledLinear`, `ScaledConv1d`, and `ScaledConv2d` + in the given modle to their unscaled version `nn.Linear`, `nn.Conv1d`, + and `nn.Conv2d`. + + Args: + model: + The model to be converted. + inplace: + If True, the input model is modified inplace. + If False, the input model is copied and we modify the copied version. + Return: + Return a model without scaled layers. + """ + if not inplace: + model = copy.deepcopy(model) + + excluded_patterns = r"self_attn\.(in|out)_proj" + p = re.compile(excluded_patterns) + + d = {} + for name, m in model.named_modules(): + if isinstance(m, ScaledLinear): + if p.search(name) is not None: + continue + d[name] = scaled_linear_to_linear(m) + elif isinstance(m, ScaledConv1d): + d[name] = scaled_conv1d_to_conv1d(m) + elif isinstance(m, ScaledConv2d): + d[name] = scaled_conv2d_to_conv2d(m) + + for k, v in d.items(): + if "." in k: + parent, child = k.rsplit(".", maxsplit=1) + setattr(model.get_submodule(parent), child, v) + else: + setattr(model, k, v) + + return model diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/test_scaling_converter.py b/egs/librispeech/ASR/pruned_transducer_stateless3/test_scaling_converter.py new file mode 100644 index 000000000..34a9c27f7 --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/test_scaling_converter.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python3 +# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +To run this file, do: + + cd icefall/egs/librispeech/ASR + python ./pruned_transducer_stateless3/test_scaling_converter.py +""" + +import copy + +import torch +from scaling import ScaledConv1d, ScaledConv2d, ScaledLinear +from scaling_converter import ( + convert_scaled_to_non_scaled, + scaled_conv1d_to_conv1d, + scaled_conv2d_to_conv2d, + scaled_linear_to_linear, +) +from train import get_params, get_transducer_model + + +def get_model(): + params = get_params() + params.vocab_size = 500 + params.blank_id = 0 + params.context_size = 2 + params.unk_id = 2 + + params.dynamic_chunk_training = False + params.short_chunk_size = 25 + params.num_left_chunks = 4 + params.causal_convolution = False + + model = get_transducer_model(params, enable_giga=False) + return model + + +def test_scaled_linear_to_linear(): + N = 5 + in_features = 10 + out_features = 20 + for bias in [True, False]: + scaled_linear = ScaledLinear( + in_features=in_features, + out_features=out_features, + bias=bias, + ) + linear = scaled_linear_to_linear(scaled_linear) + x = torch.rand(N, in_features) + + y1 = scaled_linear(x) + y2 = linear(x) + assert torch.allclose(y1, y2) + + jit_scaled_linear = torch.jit.script(scaled_linear) + jit_linear = torch.jit.script(linear) + + y3 = jit_scaled_linear(x) + y4 = jit_linear(x) + + assert torch.allclose(y3, y4) + assert torch.allclose(y1, y4) + + +def test_scaled_conv1d_to_conv1d(): + in_channels = 3 + for bias in [True, False]: + scaled_conv1d = ScaledConv1d( + in_channels, + 6, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + ) + + conv1d = scaled_conv1d_to_conv1d(scaled_conv1d) + + x = torch.rand(20, in_channels, 10) + y1 = scaled_conv1d(x) + y2 = conv1d(x) + assert torch.allclose(y1, y2) + + jit_scaled_conv1d = torch.jit.script(scaled_conv1d) + jit_conv1d = torch.jit.script(conv1d) + + y3 = jit_scaled_conv1d(x) + y4 = jit_conv1d(x) + + assert torch.allclose(y3, y4) + assert torch.allclose(y1, y4) + + +def test_scaled_conv2d_to_conv2d(): + in_channels = 1 + for bias in [True, False]: + scaled_conv2d = ScaledConv2d( + in_channels=in_channels, + out_channels=3, + kernel_size=3, + padding=1, + bias=bias, + ) + + conv2d = scaled_conv2d_to_conv2d(scaled_conv2d) + + x = torch.rand(20, in_channels, 10, 20) + y1 = scaled_conv2d(x) + y2 = conv2d(x) + assert torch.allclose(y1, y2) + + jit_scaled_conv2d = torch.jit.script(scaled_conv2d) + jit_conv2d = torch.jit.script(conv2d) + + y3 = jit_scaled_conv2d(x) + y4 = jit_conv2d(x) + + assert torch.allclose(y3, y4) + assert torch.allclose(y1, y4) + + +def test_convert_scaled_to_non_scaled(): + for inplace in [False, True]: + model = get_model() + model.eval() + + orig_model = copy.deepcopy(model) + + converted_model = convert_scaled_to_non_scaled(model, inplace=inplace) + + model = orig_model + + # test encoder + N = 2 + T = 100 + vocab_size = model.decoder.vocab_size + + x = torch.randn(N, T, 80, dtype=torch.float32) + x_lens = torch.full((N,), x.size(1)) + + e1, e1_lens = model.encoder(x, x_lens) + e2, e2_lens = converted_model.encoder(x, x_lens) + + assert torch.all(torch.eq(e1_lens, e2_lens)) + assert torch.allclose(e1, e2), (e1 - e2).abs().max() + + # test decoder + U = 50 + y = torch.randint(low=1, high=vocab_size - 1, size=(N, U)) + + d1 = model.decoder(y) + d2 = model.decoder(y) + + assert torch.allclose(d1, d2) + + # test simple projection + lm1 = model.simple_lm_proj(d1) + am1 = model.simple_am_proj(e1) + + lm2 = converted_model.simple_lm_proj(d2) + am2 = converted_model.simple_am_proj(e2) + + assert torch.allclose(lm1, lm2) + assert torch.allclose(am1, am2) + + # test joiner + e = torch.rand(2, 3, 4, 512) + d = torch.rand(2, 3, 4, 512) + + j1 = model.joiner(e, d) + j2 = converted_model.joiner(e, d) + assert torch.allclose(j1, j2) + + +@torch.no_grad() +def main(): + test_scaled_linear_to_linear() + test_scaled_conv1d_to_conv1d() + test_scaled_conv2d_to_conv2d() + test_convert_scaled_to_non_scaled() + + +if __name__ == "__main__": + torch.manual_seed(20220730) + main() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/train.py b/egs/librispeech/ASR/pruned_transducer_stateless3/train.py index 371bf21d9..ff9b8d808 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/train.py @@ -436,13 +436,22 @@ def get_joiner_model(params: AttributeDict) -> nn.Module: return joiner -def get_transducer_model(params: AttributeDict) -> nn.Module: +def get_transducer_model( + params: AttributeDict, + enable_giga: bool = True, +) -> nn.Module: encoder = get_encoder_model(params) decoder = get_decoder_model(params) joiner = get_joiner_model(params) - decoder_giga = get_decoder_model(params) - joiner_giga = get_joiner_model(params) + if enable_giga: + logging.info("Use giga") + decoder_giga = get_decoder_model(params) + joiner_giga = get_joiner_model(params) + else: + logging.info("Disable giga") + decoder_giga = None + joiner_giga = None model = Transducer( encoder=encoder, diff --git a/requirements-ci.txt b/requirements-ci.txt index fc17b123a..48769d61a 100644 --- a/requirements-ci.txt +++ b/requirements-ci.txt @@ -20,3 +20,6 @@ sentencepiece==0.1.96 tensorboard==2.8.0 typeguard==2.13.3 multi_quantization + +onnx +onnxruntime diff --git a/requirements.txt b/requirements.txt index 90b1dac69..25b5529f0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,3 +4,5 @@ sentencepiece>=0.1.96 tensorboard typeguard multi_quantization +onnx +onnxruntime From 6af5a82d8f9c9c38e3c87eaaab706b292c15bd9b Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Wed, 3 Aug 2022 15:34:55 +0800 Subject: [PATCH 25/38] Convert ScaledEmbedding to nn.Embedding for inference. (#517) * Convert ScaledEmbedding to nn.Embedding for inference. * Fix CI style issues. --- .../pruned_transducer_stateless2/scaling.py | 5 +- .../scaling_converter.py | 50 +++++++++++++------ .../test_scaling_converter.py | 19 ++++++- icefall/decode.py | 9 ++-- 4 files changed, 59 insertions(+), 24 deletions(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py b/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py index 2b44dc649..566b3622f 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py @@ -495,9 +495,6 @@ class ScaledEmbedding(nn.Module): embedding_dim (int): the size of each embedding vector padding_idx (int, optional): If given, pads the output with the embedding vector at :attr:`padding_idx` (initialized to zeros) whenever it encounters the index. - max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm` - is renormalized to have norm :attr:`max_norm`. - norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``. scale_grad_by_freq (boolean, optional): If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default ``False``. sparse (bool, optional): If ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. @@ -506,7 +503,7 @@ class ScaledEmbedding(nn.Module): initial_speed (float, optional): This affects how fast the parameter will learn near the start of training; you can set it to a value less than one if you suspect that a module is contributing to instability near - the start of training. Nnote: regardless of the use of this option, + the start of training. Note: regardless of the use of this option, it's best to use schedulers like Noam that have a warm-up period. Alternatively you can set it to more than 1 if you want it to initially train faster. Must be greater than 0. diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py b/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py index c810e36e6..79b178421 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py @@ -16,11 +16,11 @@ """ This file provides functions to convert `ScaledLinear`, `ScaledConv1d`, -and `ScaledConv2d` to their non-scaled counterparts: `nn.Linear`, `nn.Conv1d`, -and `nn.Conv2d`. +`ScaledConv2d`, and `ScaledEmbedding` to their non-scaled counterparts: +`nn.Linear`, `nn.Conv1d`, `nn.Conv2d`, and `nn.Embedding`. The scaled version are required only in the training time. It simplifies our -life by converting them their non-scaled version during inference time. +life by converting them to their non-scaled version during inference. """ import copy @@ -28,15 +28,7 @@ import re import torch import torch.nn as nn -from scaling import ScaledConv1d, ScaledConv2d, ScaledLinear - - -def _get_weight(self: torch.nn.Linear): - return self.weight - - -def _get_bias(self: torch.nn.Linear): - return self.bias +from scaling import ScaledConv1d, ScaledConv2d, ScaledEmbedding, ScaledLinear def scaled_linear_to_linear(scaled_linear: ScaledLinear) -> nn.Linear: @@ -54,10 +46,6 @@ def scaled_linear_to_linear(scaled_linear: ScaledLinear) -> nn.Linear: """ assert isinstance(scaled_linear, ScaledLinear), type(scaled_linear) - # if not hasattr(torch.nn.Linear, "get_weight"): - # torch.nn.Linear.get_weight = _get_weight - # torch.nn.Linear.get_bias = _get_bias - weight = scaled_linear.get_weight() bias = scaled_linear.get_bias() has_bias = bias is not None @@ -148,6 +136,34 @@ def scaled_conv2d_to_conv2d(scaled_conv2d: ScaledConv2d) -> nn.Conv2d: return conv2d +def scaled_embedding_to_embedding( + scaled_embedding: ScaledEmbedding, +) -> nn.Embedding: + """Convert an instance of ScaledEmbedding to nn.Embedding. + + Args: + scaled_embedding: + The layer to be converted. + Returns: + Return an instance of nn.Embedding that has the same `forward()` behavior + of the given `scaled_embedding`. + """ + assert isinstance(scaled_embedding, ScaledEmbedding), type(scaled_embedding) + embedding = nn.Embedding( + num_embeddings=scaled_embedding.num_embeddings, + embedding_dim=scaled_embedding.embedding_dim, + padding_idx=scaled_embedding.padding_idx, + scale_grad_by_freq=scaled_embedding.scale_grad_by_freq, + sparse=scaled_embedding.sparse, + ) + weight = scaled_embedding.weight + scale = scaled_embedding.scale + + embedding.weight.data.copy_(weight * scale.exp()) + + return embedding + + def convert_scaled_to_non_scaled(model: nn.Module, inplace: bool = False): """Convert `ScaledLinear`, `ScaledConv1d`, and `ScaledConv2d` in the given modle to their unscaled version `nn.Linear`, `nn.Conv1d`, @@ -178,6 +194,8 @@ def convert_scaled_to_non_scaled(model: nn.Module, inplace: bool = False): d[name] = scaled_conv1d_to_conv1d(m) elif isinstance(m, ScaledConv2d): d[name] = scaled_conv2d_to_conv2d(m) + elif isinstance(m, ScaledEmbedding): + d[name] = scaled_embedding_to_embedding(m) for k, v in d.items(): if "." in k: diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/test_scaling_converter.py b/egs/librispeech/ASR/pruned_transducer_stateless3/test_scaling_converter.py index 34a9c27f7..a9feea83c 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/test_scaling_converter.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/test_scaling_converter.py @@ -25,11 +25,12 @@ To run this file, do: import copy import torch -from scaling import ScaledConv1d, ScaledConv2d, ScaledLinear +from scaling import ScaledConv1d, ScaledConv2d, ScaledEmbedding, ScaledLinear from scaling_converter import ( convert_scaled_to_non_scaled, scaled_conv1d_to_conv1d, scaled_conv2d_to_conv2d, + scaled_embedding_to_embedding, scaled_linear_to_linear, ) from train import get_params, get_transducer_model @@ -135,6 +136,21 @@ def test_scaled_conv2d_to_conv2d(): assert torch.allclose(y1, y4) +def test_scaled_embedding_to_embedding(): + scaled_embedding = ScaledEmbedding( + num_embeddings=500, + embedding_dim=10, + padding_idx=0, + ) + embedding = scaled_embedding_to_embedding(scaled_embedding) + + for s in [10, 100, 300, 500, 800, 1000]: + x = torch.randint(low=0, high=500, size=(s,)) + scaled_y = scaled_embedding(x) + y = embedding(x) + assert torch.equal(scaled_y, y) + + def test_convert_scaled_to_non_scaled(): for inplace in [False, True]: model = get_model() @@ -193,6 +209,7 @@ def main(): test_scaled_linear_to_linear() test_scaled_conv1d_to_conv1d() test_scaled_conv2d_to_conv2d() + test_scaled_embedding_to_embedding() test_convert_scaled_to_non_scaled() diff --git a/icefall/decode.py b/icefall/decode.py index 3b64481c7..f04ee368c 100644 --- a/icefall/decode.py +++ b/icefall/decode.py @@ -334,10 +334,13 @@ class Nbest(object): if hasattr(lattice, "aux_labels"): # delete token IDs as it is not needed del word_fsa.aux_labels - word_fsa_with_epsilon_loops = k2.linear_fsa_with_self_loops(word_fsa) + word_fsa_with_epsilon_loops = k2.linear_fsa_with_self_loops( + word_fsa + ) else: - word_fsa_with_epsilon_loops = k2.linear_fst_with_self_loops(word_fsa) - + word_fsa_with_epsilon_loops = k2.linear_fst_with_self_loops( + word_fsa + ) path_to_utt_map = self.shape.row_ids(1) From 36eacaccb2f88e724045159a0f15bb20b82e46c4 Mon Sep 17 00:00:00 2001 From: Weiji Zhuang Date: Wed, 3 Aug 2022 19:19:40 +0800 Subject: [PATCH 26/38] Fix preparing char based lang and add multiprocessing for wenetspeech text segmentation (#513) * add multiprocessing for wenetspeech text segmentation * Fix preparing char based lang for wenetspeech * fix style Co-authored-by: WeijiZhuang --- egs/wenetspeech/ASR/local/text2segments.py | 49 ++++++++++++++++------ egs/wenetspeech/ASR/prepare.sh | 32 +++++++------- 2 files changed, 54 insertions(+), 27 deletions(-) diff --git a/egs/wenetspeech/ASR/local/text2segments.py b/egs/wenetspeech/ASR/local/text2segments.py index 3df727c67..df5b3c119 100644 --- a/egs/wenetspeech/ASR/local/text2segments.py +++ b/egs/wenetspeech/ASR/local/text2segments.py @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- # Copyright 2021 Xiaomi Corp. (authors: Mingshuang Luo) +# 2022 Xiaomi Corp. (authors: Weiji Zhuang) # # See ../../../../LICENSE for clarification regarding multiple authors # @@ -29,10 +30,18 @@ with word segmenting: import argparse +from multiprocessing import Pool import jieba +import paddle from tqdm import tqdm +# In PaddlePaddle 2.x, dynamic graph mode is turned on by default, +# and 'data()' is only supported in static graph mode. So if you +# want to use this api, should call 'paddle.enable_static()' before +# this api to enter static graph mode. +paddle.enable_static() +paddle.disable_signal_handler() jieba.enable_paddle() @@ -41,14 +50,23 @@ def get_parser(): description="Chinese Word Segmentation for text", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) + parser.add_argument( + "--num-process", + "-n", + default=20, + type=int, + help="the number of processes", + ) parser.add_argument( "--input-file", + "-i", default="data/lang_char/text", type=str, help="the input text file for WenetSpeech", ) parser.add_argument( "--output-file", + "-o", default="data/lang_char/text_words_segmentation", type=str, help="the text implemented with words segmenting for WenetSpeech", @@ -57,26 +75,33 @@ def get_parser(): return parser +def cut(lines): + if lines is not None: + cut_lines = jieba.cut(lines, use_paddle=True) + return [i for i in cut_lines] + else: + return None + + def main(): parser = get_parser() args = parser.parse_args() + num_process = args.num_process input_file = args.input_file output_file = args.output_file + # parallel mode does not support use_paddle + # jieba.enable_parallel(num_process) - f = open(input_file, "r", encoding="utf-8") - lines = f.readlines() - new_lines = [] - for i in tqdm(range(len(lines))): - x = lines[i].rstrip() - seg_list = jieba.cut(x, use_paddle=True) - new_line = " ".join(seg_list) - new_lines.append(new_line) + with open(input_file, "r", encoding="utf-8") as fr: + lines = fr.readlines() - f_new = open(output_file, "w", encoding="utf-8") - for line in new_lines: - f_new.write(line) - f_new.write("\n") + with Pool(processes=num_process) as p: + new_lines = list(tqdm(p.imap(cut, lines), total=len(lines))) + + with open(output_file, "w", encoding="utf-8") as fw: + for line in new_lines: + fw.write(" ".join(line) + "\n") if __name__ == "__main__": diff --git a/egs/wenetspeech/ASR/prepare.sh b/egs/wenetspeech/ASR/prepare.sh index 6573a94ad..755fbb2d7 100755 --- a/egs/wenetspeech/ASR/prepare.sh +++ b/egs/wenetspeech/ASR/prepare.sh @@ -28,6 +28,7 @@ num_splits=1000 # - speech dl_dir=$PWD/download +lang_char_dir=data/lang_char . shared/parse_options.sh || exit 1 @@ -186,24 +187,27 @@ fi if [ $stage -le 15 ] && [ $stop_stage -ge 15 ]; then log "Stage 15: Prepare char based lang" - lang_char_dir=data/lang_char mkdir -p $lang_char_dir - # Prepare text. - # Note: in Linux, you can install jq with the following command: - # 1. wget -O jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 - # 2. chmod +x ./jq - # 3. cp jq /usr/bin - if [ ! -f $lang_char_dir/text ]; then - gunzip -c data/manifests/supervisions_L.jsonl.gz \ - | jq 'text' | sed 's/"//g' \ + if ! which jq; then + echo "This script is intended to be used with jq but you have not installed jq + Note: in Linux, you can install jq with the following command: + 1. wget -O jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 + 2. chmod +x ./jq + 3. cp jq /usr/bin" && exit 1 + fi + if [ ! -f $lang_char_dir/text ] || [ ! -s $lang_char_dir/text ]; then + log "Prepare text." + gunzip -c data/manifests/wenetspeech_supervisions_L.jsonl.gz \ + | jq '.text' | sed 's/"//g' \ | ./local/text2token.py -t "char" > $lang_char_dir/text fi # The implementation of chinese word segmentation for text, # and it will take about 15 minutes. if [ ! -f $lang_char_dir/text_words_segmentation ]; then - python ./local/text2segments.py \ + python3 ./local/text2segments.py \ + --num-process $nj \ --input-file $lang_char_dir/text \ --output-file $lang_char_dir/text_words_segmentation fi @@ -212,7 +216,7 @@ if [ $stage -le 15 ] && [ $stop_stage -ge 15 ]; then | sort -u | sed '/^$/d' | uniq > $lang_char_dir/words_no_ids.txt if [ ! -f $lang_char_dir/words.txt ]; then - python ./local/prepare_words.py \ + python3 ./local/prepare_words.py \ --input-file $lang_char_dir/words_no_ids.txt \ --output-file $lang_char_dir/words.txt fi @@ -221,7 +225,7 @@ fi if [ $stage -le 16 ] && [ $stop_stage -ge 16 ]; then log "Stage 16: Prepare char based L_disambig.pt" if [ ! -f data/lang_char/L_disambig.pt ]; then - python ./local/prepare_char.py \ + python3 ./local/prepare_char.py \ --lang-dir data/lang_char fi fi @@ -232,9 +236,8 @@ if [ $stage -le 17 ] && [ $stop_stage -ge 17 ]; then # It will take about 20 minutes. # We assume you have install kaldilm, if not, please install # it using: pip install kaldilm - lang_char_dir=data/lang_char if [ ! -f $lang_char_dir/3-gram.unpruned.arpa ]; then - python ./shared/make_kn_lm.py \ + python3 ./shared/make_kn_lm.py \ -ngram-order 3 \ -text $lang_char_dir/text_words_segmentation \ -lm $lang_char_dir/3-gram.unpruned.arpa @@ -253,6 +256,5 @@ fi if [ $stage -le 18 ] && [ $stop_stage -ge 18 ]; then log "Stage 18: Compile LG" - lang_char_dir=data/lang_char python ./local/compile_lg.py --lang-dir $lang_char_dir fi From e538232485db8999ee2a5308685cc61e3c4f79b0 Mon Sep 17 00:00:00 2001 From: Mingshuang Luo <37799481+luomingshuang@users.noreply.github.com> Date: Thu, 4 Aug 2022 12:29:39 +0800 Subject: [PATCH 27/38] change for pruned rnnt5 train.py (#519) --- egs/wenetspeech/ASR/pruned_transducer_stateless5/train.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/train.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/train.py index 7d09acc39..5a5925d55 100755 --- a/egs/wenetspeech/ASR/pruned_transducer_stateless5/train.py +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/train.py @@ -1144,8 +1144,6 @@ def display_and_save_batch( y = graph_compiler.texts_to_ids(texts) if type(y) == list: y = k2.RaggedTensor(y) - else: - y = y num_tokens = sum(len(i) for i in y) logging.info(f"num tokens: {num_tokens}") From a4dd273776934506c8600a24307c3b02dea1d6e8 Mon Sep 17 00:00:00 2001 From: Zengwei Yao Date: Thu, 4 Aug 2022 19:57:12 +0800 Subject: [PATCH 28/38] fix about tensorboard (#516) * fix metricstracker * fix style --- egs/librispeech/ASR/conformer_ctc/train.py | 11 +++++++++++ egs/librispeech/ASR/conformer_ctc2/train.py | 9 +++++++++ egs/librispeech/ASR/streaming_conformer_ctc/train.py | 11 +++++++++++ egs/librispeech/ASR/tdnn_lstm_ctc/train.py | 11 +++++++++++ egs/librispeech/ASR/transducer/train.py | 9 +++++++++ egs/librispeech/ASR/transducer_lstm/train.py | 9 +++++++++ egs/librispeech/ASR/transducer_stateless/train.py | 9 +++++++++ egs/librispeech/ASR/transducer_stateless2/train.py | 9 +++++++++ .../ASR/transducer_stateless_multi_datasets/train.py | 9 +++++++++ icefall/utils.py | 7 ++++--- 10 files changed, 91 insertions(+), 3 deletions(-) diff --git a/egs/librispeech/ASR/conformer_ctc/train.py b/egs/librispeech/ASR/conformer_ctc/train.py index fc8fc8863..6419f6816 100755 --- a/egs/librispeech/ASR/conformer_ctc/train.py +++ b/egs/librispeech/ASR/conformer_ctc/train.py @@ -447,6 +447,17 @@ def compute_loss( info["loss"] = loss.detach().cpu().item() + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = supervisions["num_frames"].sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - supervisions["num_frames"]) / feature.size(1)) + .sum() + .item() + ) + return loss, info diff --git a/egs/librispeech/ASR/conformer_ctc2/train.py b/egs/librispeech/ASR/conformer_ctc2/train.py index d7baa229f..9d9c2af1f 100755 --- a/egs/librispeech/ASR/conformer_ctc2/train.py +++ b/egs/librispeech/ASR/conformer_ctc2/train.py @@ -605,6 +605,15 @@ def compute_loss( # Note: We use reduction=sum while computing the loss. info["loss"] = loss.detach().cpu().item() + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + return loss, info diff --git a/egs/librispeech/ASR/streaming_conformer_ctc/train.py b/egs/librispeech/ASR/streaming_conformer_ctc/train.py index 9beb185a2..e41b7ea78 100755 --- a/egs/librispeech/ASR/streaming_conformer_ctc/train.py +++ b/egs/librispeech/ASR/streaming_conformer_ctc/train.py @@ -430,6 +430,17 @@ def compute_loss( info["loss"] = loss.detach().cpu().item() + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = supervisions["num_frames"].sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - supervisions["num_frames"]) / feature.size(1)) + .sum() + .item() + ) + return loss, info diff --git a/egs/librispeech/ASR/tdnn_lstm_ctc/train.py b/egs/librispeech/ASR/tdnn_lstm_ctc/train.py index 827e3ae1f..6b37d5c23 100755 --- a/egs/librispeech/ASR/tdnn_lstm_ctc/train.py +++ b/egs/librispeech/ASR/tdnn_lstm_ctc/train.py @@ -349,6 +349,17 @@ def compute_loss( info["frames"] = supervision_segments[:, 2].sum().item() info["loss"] = loss.detach().cpu().item() + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = supervisions["num_frames"].sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(2) - supervisions["num_frames"]) / feature.size(2)) + .sum() + .item() + ) + return loss, info diff --git a/egs/librispeech/ASR/transducer/train.py b/egs/librispeech/ASR/transducer/train.py index 11c72ae4f..1dd65eddb 100755 --- a/egs/librispeech/ASR/transducer/train.py +++ b/egs/librispeech/ASR/transducer/train.py @@ -403,6 +403,15 @@ def compute_loss( # Note: We use reduction=sum while computing the loss. info["loss"] = loss.detach().cpu().item() + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + return loss, info diff --git a/egs/librispeech/ASR/transducer_lstm/train.py b/egs/librispeech/ASR/transducer_lstm/train.py index 17ba6143c..cdb801e79 100755 --- a/egs/librispeech/ASR/transducer_lstm/train.py +++ b/egs/librispeech/ASR/transducer_lstm/train.py @@ -407,6 +407,15 @@ def compute_loss( # Note: We use reduction=sum while computing the loss. info["loss"] = loss.detach().cpu().item() + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + return loss, info diff --git a/egs/librispeech/ASR/transducer_stateless/train.py b/egs/librispeech/ASR/transducer_stateless/train.py index 837a9de2d..ae93f3348 100755 --- a/egs/librispeech/ASR/transducer_stateless/train.py +++ b/egs/librispeech/ASR/transducer_stateless/train.py @@ -429,6 +429,15 @@ def compute_loss( # Note: We use reduction=sum while computing the loss. info["loss"] = loss.detach().cpu().item() + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + return loss, info diff --git a/egs/librispeech/ASR/transducer_stateless2/train.py b/egs/librispeech/ASR/transducer_stateless2/train.py index fe075b073..ea15c9040 100755 --- a/egs/librispeech/ASR/transducer_stateless2/train.py +++ b/egs/librispeech/ASR/transducer_stateless2/train.py @@ -417,6 +417,15 @@ def compute_loss( # Note: We use reduction=sum while computing the loss. info["loss"] = loss.detach().cpu().item() + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + return loss, info diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/train.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/train.py index 32ce1032c..27912738c 100755 --- a/egs/librispeech/ASR/transducer_stateless_multi_datasets/train.py +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/train.py @@ -476,6 +476,15 @@ def compute_loss( # Note: We use reduction=sum while computing the loss. info["loss"] = loss.detach().cpu().item() + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + return loss, info diff --git a/icefall/utils.py b/icefall/utils.py index 417ca1766..f40f769f8 100644 --- a/icefall/utils.py +++ b/icefall/utils.py @@ -544,9 +544,10 @@ class MetricsTracker(collections.defaultdict): else: raise ValueError(f"Unexpected key: {k}") frames = "%.2f" % self["frames"] - ans_frames += "over " + str(frames) + " frames; " - utterances = "%.2f" % self["utterances"] - ans_utterances += "over " + str(utterances) + " utterances." + ans_frames += "over " + str(frames) + " frames. " + if ans_utterances != "": + utterances = "%.2f" % self["utterances"] + ans_utterances += "over " + str(utterances) + " utterances." return ans_frames + ans_utterances From 7157f62af3b7712eda2186f7e3d253df7cde65b5 Mon Sep 17 00:00:00 2001 From: Yunusemre Date: Thu, 4 Aug 2022 18:03:41 +0300 Subject: [PATCH 29/38] Merging onnx models (#518) * add export function of onnx-all-in-one to export.py * add onnx_check script for all-in-one onnx model * minor fix * remove unused arguments * add onnx-all-in-one test * fix style * fix style * fix requirements * fix input/output names * fix installing onnx_graphsurgeon * fix instaliing onnx_graphsurgeon * revert to previous requirements.txt * fix minor --- ...pruned-transducer-stateless3-2022-05-13.sh | 4 + .../pruned_transducer_stateless3/export.py | 33 ++ .../onnx_check_all_in_one.py | 284 ++++++++++++++++++ requirements-ci.txt | 1 + requirements.txt | 2 + 5 files changed, 324 insertions(+) create mode 100755 egs/librispeech/ASR/pruned_transducer_stateless3/onnx_check_all_in_one.py diff --git a/.github/scripts/run-librispeech-pruned-transducer-stateless3-2022-05-13.sh b/.github/scripts/run-librispeech-pruned-transducer-stateless3-2022-05-13.sh index 2deab04b9..bdc8a3838 100755 --- a/.github/scripts/run-librispeech-pruned-transducer-stateless3-2022-05-13.sh +++ b/.github/scripts/run-librispeech-pruned-transducer-stateless3-2022-05-13.sh @@ -60,6 +60,10 @@ log "Decode with ONNX models" --onnx-decoder-filename $repo/exp/decoder.onnx \ --onnx-joiner-filename $repo/exp/joiner.onnx +./pruned_transducer_stateless3/onnx_check_all_in_one.py \ + --jit-filename $repo/exp/cpu_jit.pt \ + --onnx-all-in-one-filename $repo/exp/all_in_one.onnx + ./pruned_transducer_stateless3/onnx_pretrained.py \ --bpe-model $repo/data/lang_bpe_500/bpe.model \ --encoder-model-filename $repo/exp/encoder.onnx \ diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/export.py b/egs/librispeech/ASR/pruned_transducer_stateless3/export.py index 1485c6d6a..2bb518bcd 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/export.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/export.py @@ -115,6 +115,7 @@ import argparse import logging from pathlib import Path +import onnx import sentencepiece as spm import torch import torch.nn as nn @@ -512,6 +513,30 @@ def export_joiner_model_onnx( logging.info(f"Saved to {joiner_filename}") +def export_all_in_one_onnx( + encoder_filename: str, + decoder_filename: str, + joiner_filename: str, + all_in_one_filename: str, +): + encoder_onnx = onnx.load(encoder_filename) + decoder_onnx = onnx.load(decoder_filename) + joiner_onnx = onnx.load(joiner_filename) + + encoder_onnx = onnx.compose.add_prefix(encoder_onnx, prefix="encoder/") + decoder_onnx = onnx.compose.add_prefix(decoder_onnx, prefix="decoder/") + joiner_onnx = onnx.compose.add_prefix(joiner_onnx, prefix="joiner/") + + combined_model = onnx.compose.merge_models( + encoder_onnx, decoder_onnx, io_map={} + ) + combined_model = onnx.compose.merge_models( + combined_model, joiner_onnx, io_map={} + ) + onnx.save(combined_model, all_in_one_filename) + logging.info(f"Saved to {all_in_one_filename}") + + @torch.no_grad() def main(): args = get_parser().parse_args() @@ -603,6 +628,14 @@ def main(): joiner_filename, opset_version=opset_version, ) + + all_in_one_filename = params.exp_dir / "all_in_one.onnx" + export_all_in_one_onnx( + encoder_filename, + decoder_filename, + joiner_filename, + all_in_one_filename, + ) elif params.jit is True: logging.info("Using torch.jit.script()") # We won't use the forward() method of the model in C++, so just ignore diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/onnx_check_all_in_one.py b/egs/librispeech/ASR/pruned_transducer_stateless3/onnx_check_all_in_one.py new file mode 100755 index 000000000..b4cf8c94a --- /dev/null +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/onnx_check_all_in_one.py @@ -0,0 +1,284 @@ +#!/usr/bin/env python3 +# +# Copyright 2022 Xiaomi Corporation (Author: Yunus Emre Ozkose) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This script checks that exported onnx models produce the same output +with the given torchscript model for the same input. +""" + +import argparse +import logging +import os + +import onnx +import onnx_graphsurgeon as gs +import onnxruntime +import onnxruntime as ort +import torch + +ort.set_default_logger_severity(3) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--jit-filename", + required=True, + type=str, + help="Path to the torchscript model", + ) + + parser.add_argument( + "--onnx-all-in-one-filename", + required=True, + type=str, + help="Path to the onnx all in one model", + ) + + return parser + + +def test_encoder( + model: torch.jit.ScriptModule, + encoder_session: ort.InferenceSession, +): + encoder_inputs = encoder_session.get_inputs() + assert encoder_inputs[0].shape == ["N", "T", 80] + assert encoder_inputs[1].shape == ["N"] + encoder_input_names = [i.name for i in encoder_inputs] + encoder_output_names = [i.name for i in encoder_session.get_outputs()] + + for N in [1, 5]: + for T in [12, 25]: + print("N, T", N, T) + x = torch.rand(N, T, 80, dtype=torch.float32) + x_lens = torch.randint(low=10, high=T + 1, size=(N,)) + x_lens[0] = T + + encoder_inputs = { + encoder_input_names[0]: x.numpy(), + encoder_input_names[1]: x_lens.numpy(), + } + encoder_out, encoder_out_lens = encoder_session.run( + [encoder_output_names[1], encoder_output_names[0]], + encoder_inputs, + ) + + torch_encoder_out, torch_encoder_out_lens = model.encoder(x, x_lens) + + encoder_out = torch.from_numpy(encoder_out) + assert torch.allclose(encoder_out, torch_encoder_out, atol=1e-05), ( + (encoder_out - torch_encoder_out).abs().max() + ) + + +def test_decoder( + model: torch.jit.ScriptModule, + decoder_session: ort.InferenceSession, +): + decoder_inputs = decoder_session.get_inputs() + assert decoder_inputs[0].shape == ["N", 2] + decoder_input_names = [i.name for i in decoder_inputs] + decoder_output_names = [i.name for i in decoder_session.get_outputs()] + + for N in [1, 5, 10]: + y = torch.randint(low=1, high=500, size=(10, 2)) + + decoder_inputs = {decoder_input_names[0]: y.numpy()} + decoder_out = decoder_session.run( + [decoder_output_names[0]], + decoder_inputs, + )[0] + decoder_out = torch.from_numpy(decoder_out) + + torch_decoder_out = model.decoder(y, need_pad=False) + assert torch.allclose(decoder_out, torch_decoder_out, atol=1e-5), ( + (decoder_out - torch_decoder_out).abs().max() + ) + + +def test_joiner( + model: torch.jit.ScriptModule, + joiner_session: ort.InferenceSession, +): + joiner_inputs = joiner_session.get_inputs() + assert joiner_inputs[0].shape == ["N", 512] + assert joiner_inputs[1].shape == ["N", 512] + joiner_input_names = [i.name for i in joiner_inputs] + joiner_output_names = [i.name for i in joiner_session.get_outputs()] + + for N in [1, 5, 10]: + encoder_out = torch.rand(N, 512) + decoder_out = torch.rand(N, 512) + + joiner_inputs = { + joiner_input_names[0]: encoder_out.numpy(), + joiner_input_names[1]: decoder_out.numpy(), + } + joiner_out = joiner_session.run( + [joiner_output_names[0]], joiner_inputs + )[0] + joiner_out = torch.from_numpy(joiner_out) + + torch_joiner_out = model.joiner( + encoder_out, + decoder_out, + project_input=True, + ) + assert torch.allclose(joiner_out, torch_joiner_out, atol=1e-5), ( + (joiner_out - torch_joiner_out).abs().max() + ) + + +def extract_sub_model( + onnx_graph: onnx.ModelProto, + input_op_names: list, + output_op_names: list, + non_verbose=False, +): + onnx_graph = onnx.shape_inference.infer_shapes(onnx_graph) + graph = gs.import_onnx(onnx_graph) + graph.cleanup().toposort() + + # Extraction of input OP and output OP + graph_node_inputs = [ + graph_nodes + for graph_nodes in graph.nodes + for graph_nodes_input in graph_nodes.inputs + if graph_nodes_input.name in input_op_names + ] + graph_node_outputs = [ + graph_nodes + for graph_nodes in graph.nodes + for graph_nodes_output in graph_nodes.outputs + if graph_nodes_output.name in output_op_names + ] + + # Init graph INPUT/OUTPUT + graph.inputs.clear() + graph.outputs.clear() + + # Update graph INPUT/OUTPUT + graph.inputs = [ + graph_node_input + for graph_node in graph_node_inputs + for graph_node_input in graph_node.inputs + if graph_node_input.shape + ] + graph.outputs = [ + graph_node_output + for graph_node in graph_node_outputs + for graph_node_output in graph_node.outputs + ] + + # Cleanup + graph.cleanup().toposort() + + # Shape Estimation + extracted_graph = None + try: + extracted_graph = onnx.shape_inference.infer_shapes( + gs.export_onnx(graph) + ) + except Exception: + extracted_graph = gs.export_onnx(graph) + if not non_verbose: + print( + "WARNING: " + + "The input shape of the next OP does not match the output shape. " + + "Be sure to open the .onnx file to verify the certainty of the geometry." + ) + return extracted_graph + + +def extract_encoder(onnx_model: onnx.ModelProto): + encoder_ = extract_sub_model( + onnx_model, + ["encoder/x", "encoder/x_lens"], + ["encoder/encoder_out", "encoder/encoder_out_lens"], + False, + ) + onnx.save(encoder_, "tmp_encoder.onnx") + onnx.checker.check_model(encoder_) + sess = onnxruntime.InferenceSession("tmp_encoder.onnx") + os.remove("tmp_encoder.onnx") + return sess + + +def extract_decoder(onnx_model: onnx.ModelProto): + decoder_ = extract_sub_model( + onnx_model, ["decoder/y"], ["decoder/decoder_out"], False + ) + onnx.save(decoder_, "tmp_decoder.onnx") + onnx.checker.check_model(decoder_) + sess = onnxruntime.InferenceSession("tmp_decoder.onnx") + os.remove("tmp_decoder.onnx") + return sess + + +def extract_joiner(onnx_model: onnx.ModelProto): + joiner_ = extract_sub_model( + onnx_model, + ["joiner/encoder_out", "joiner/decoder_out"], + ["joiner/logit"], + False, + ) + onnx.save(joiner_, "tmp_joiner.onnx") + onnx.checker.check_model(joiner_) + sess = onnxruntime.InferenceSession("tmp_joiner.onnx") + os.remove("tmp_joiner.onnx") + return sess + + +@torch.no_grad() +def main(): + args = get_parser().parse_args() + logging.info(vars(args)) + + model = torch.jit.load(args.jit_filename) + onnx_model = onnx.load(args.onnx_all_in_one_filename) + + options = ort.SessionOptions() + options.inter_op_num_threads = 1 + options.intra_op_num_threads = 1 + + logging.info("Test encoder") + encoder_session = extract_encoder(onnx_model) + test_encoder(model, encoder_session) + + logging.info("Test decoder") + decoder_session = extract_decoder(onnx_model) + test_decoder(model, decoder_session) + + logging.info("Test joiner") + joiner_session = extract_joiner(onnx_model) + test_joiner(model, joiner_session) + logging.info("Finished checking ONNX models") + + +if __name__ == "__main__": + torch.manual_seed(20220727) + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/requirements-ci.txt b/requirements-ci.txt index 48769d61a..385c8737e 100644 --- a/requirements-ci.txt +++ b/requirements-ci.txt @@ -23,3 +23,4 @@ multi_quantization onnx onnxruntime +onnx_graphsurgeon -i https://pypi.ngc.nvidia.com diff --git a/requirements.txt b/requirements.txt index 25b5529f0..2e72d2eb6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,3 +6,5 @@ typeguard multi_quantization onnx onnxruntime +--extra-index-url https://pypi.ngc.nvidia.com +onnx_graphsurgeon From 1f7832b93c461f2c14880d7ab9189c70ce11b2d3 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Sat, 6 Aug 2022 10:00:08 +0800 Subject: [PATCH 30/38] Fix loading sampler state dict. (#421) * Fix loading sampler state dict. * skip scan_pessimistic_batches_for_oom if params.start_batch > 0 --- .../ASR/pruned_transducer_stateless/train.py | 26 ++++++------------- .../ASR/pruned_transducer_stateless2/train.py | 13 +--------- .../ASR/pruned_transducer_stateless3/train.py | 17 ++++++------ .../ASR/pruned_transducer_stateless4/train.py | 13 +--------- .../ASR/pruned_transducer_stateless5/train.py | 13 +--------- .../ASR/pruned_transducer_stateless6/train.py | 13 +--------- 6 files changed, 21 insertions(+), 74 deletions(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/train.py b/egs/librispeech/ASR/pruned_transducer_stateless/train.py index b625ed3ff..33b23038c 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless/train.py @@ -457,9 +457,6 @@ def load_checkpoint_if_available( if "cur_epoch" in saved_params: params["start_epoch"] = saved_params["cur_epoch"] - if "cur_batch_idx" in saved_params: - params["cur_batch_idx"] = saved_params["cur_batch_idx"] - return saved_params @@ -674,13 +671,7 @@ def train_one_epoch( global_step=params.batch_idx_train, ) - cur_batch_idx = params.get("cur_batch_idx", 0) - for batch_idx, batch in enumerate(train_dl): - if batch_idx < cur_batch_idx: - continue - cur_batch_idx = batch_idx - params.batch_idx_train += 1 batch_size = len(batch["supervisions"]["text"]) @@ -728,7 +719,6 @@ def train_one_epoch( params.batch_idx_train > 0 and params.batch_idx_train % params.save_every_n == 0 ): - params.cur_batch_idx = batch_idx save_checkpoint_with_global_batch_idx( out_dir=params.exp_dir, global_batch_idx=params.batch_idx_train, @@ -738,7 +728,6 @@ def train_one_epoch( sampler=train_dl.sampler, rank=rank, ) - del params.cur_batch_idx remove_checkpoints( out_dir=params.exp_dir, topk=params.keep_last_k, @@ -893,13 +882,14 @@ def run(rank, world_size, args): valid_cuts += librispeech.dev_other_cuts() valid_dl = librispeech.valid_dataloaders(valid_cuts) - scan_pessimistic_batches_for_oom( - model=model, - train_dl=train_dl, - optimizer=optimizer, - sp=sp, - params=params, - ) + if params.start_batch <= 0: + scan_pessimistic_batches_for_oom( + model=model, + train_dl=train_dl, + optimizer=optimizer, + sp=sp, + params=params, + ) for epoch in range(params.start_epoch, params.num_epochs): fix_random_seed(params.seed + epoch) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/train.py b/egs/librispeech/ASR/pruned_transducer_stateless2/train.py index 46d2cb86d..4d290e39f 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/train.py @@ -503,9 +503,6 @@ def load_checkpoint_if_available( if "cur_epoch" in saved_params: params["start_epoch"] = saved_params["cur_epoch"] - if "cur_batch_idx" in saved_params: - params["cur_batch_idx"] = saved_params["cur_batch_idx"] - return saved_params @@ -724,13 +721,7 @@ def train_one_epoch( tot_loss = MetricsTracker() - cur_batch_idx = params.get("cur_batch_idx", 0) - for batch_idx, batch in enumerate(train_dl): - if batch_idx < cur_batch_idx: - continue - cur_batch_idx = batch_idx - params.batch_idx_train += 1 batch_size = len(batch["supervisions"]["text"]) @@ -765,7 +756,6 @@ def train_one_epoch( params.batch_idx_train > 0 and params.batch_idx_train % params.save_every_n == 0 ): - params.cur_batch_idx = batch_idx save_checkpoint_with_global_batch_idx( out_dir=params.exp_dir, global_batch_idx=params.batch_idx_train, @@ -777,7 +767,6 @@ def train_one_epoch( scaler=scaler, rank=rank, ) - del params.cur_batch_idx remove_checkpoints( out_dir=params.exp_dir, topk=params.keep_last_k, @@ -944,7 +933,7 @@ def run(rank, world_size, args): valid_cuts += librispeech.dev_other_cuts() valid_dl = librispeech.valid_dataloaders(valid_cuts) - if not params.print_diagnostics: + if params.start_batch <= 0 and not params.print_diagnostics: scan_pessimistic_batches_for_oom( model=model, train_dl=train_dl, diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/train.py b/egs/librispeech/ASR/pruned_transducer_stateless3/train.py index ff9b8d808..914b9b5eb 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/train.py @@ -1058,14 +1058,15 @@ def run(rank, world_size, args): # It's time consuming to include `giga_train_dl` here # for dl in [train_dl, giga_train_dl]: for dl in [train_dl]: - scan_pessimistic_batches_for_oom( - model=model, - train_dl=dl, - optimizer=optimizer, - sp=sp, - params=params, - warmup=0.0 if params.start_epoch == 0 else 1.0, - ) + if params.start_batch <= 0: + scan_pessimistic_batches_for_oom( + model=model, + train_dl=dl, + optimizer=optimizer, + sp=sp, + params=params, + warmup=0.0 if params.start_epoch == 0 else 1.0, + ) scaler = GradScaler(enabled=params.use_fp16) if checkpoints and "grad_scaler" in checkpoints: diff --git a/egs/librispeech/ASR/pruned_transducer_stateless4/train.py b/egs/librispeech/ASR/pruned_transducer_stateless4/train.py index 893a6a749..325b01323 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless4/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless4/train.py @@ -525,9 +525,6 @@ def load_checkpoint_if_available( if "cur_epoch" in saved_params: params["start_epoch"] = saved_params["cur_epoch"] - if "cur_batch_idx" in saved_params: - params["cur_batch_idx"] = saved_params["cur_batch_idx"] - return saved_params @@ -757,13 +754,7 @@ def train_one_epoch( tot_loss = MetricsTracker() - cur_batch_idx = params.get("cur_batch_idx", 0) - for batch_idx, batch in enumerate(train_dl): - if batch_idx < cur_batch_idx: - continue - cur_batch_idx = batch_idx - params.batch_idx_train += 1 batch_size = len(batch["supervisions"]["text"]) @@ -805,7 +796,6 @@ def train_one_epoch( params.batch_idx_train > 0 and params.batch_idx_train % params.save_every_n == 0 ): - params.cur_batch_idx = batch_idx save_checkpoint_with_global_batch_idx( out_dir=params.exp_dir, global_batch_idx=params.batch_idx_train, @@ -818,7 +808,6 @@ def train_one_epoch( scaler=scaler, rank=rank, ) - del params.cur_batch_idx remove_checkpoints( out_dir=params.exp_dir, topk=params.keep_last_k, @@ -993,7 +982,7 @@ def run(rank, world_size, args): valid_cuts += librispeech.dev_other_cuts() valid_dl = librispeech.valid_dataloaders(valid_cuts) - if not params.print_diagnostics: + if params.start_batch <= 0 and not params.print_diagnostics: scan_pessimistic_batches_for_oom( model=model, train_dl=train_dl, diff --git a/egs/librispeech/ASR/pruned_transducer_stateless5/train.py b/egs/librispeech/ASR/pruned_transducer_stateless5/train.py index 44abdcd49..3bfe22155 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless5/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless5/train.py @@ -550,9 +550,6 @@ def load_checkpoint_if_available( if "cur_epoch" in saved_params: params["start_epoch"] = saved_params["cur_epoch"] - if "cur_batch_idx" in saved_params: - params["cur_batch_idx"] = saved_params["cur_batch_idx"] - return saved_params @@ -782,13 +779,7 @@ def train_one_epoch( tot_loss = MetricsTracker() - cur_batch_idx = params.get("cur_batch_idx", 0) - for batch_idx, batch in enumerate(train_dl): - if batch_idx < cur_batch_idx: - continue - cur_batch_idx = batch_idx - params.batch_idx_train += 1 batch_size = len(batch["supervisions"]["text"]) @@ -834,7 +825,6 @@ def train_one_epoch( params.batch_idx_train > 0 and params.batch_idx_train % params.save_every_n == 0 ): - params.cur_batch_idx = batch_idx save_checkpoint_with_global_batch_idx( out_dir=params.exp_dir, global_batch_idx=params.batch_idx_train, @@ -847,7 +837,6 @@ def train_one_epoch( scaler=scaler, rank=rank, ) - del params.cur_batch_idx remove_checkpoints( out_dir=params.exp_dir, topk=params.keep_last_k, @@ -1025,7 +1014,7 @@ def run(rank, world_size, args): valid_cuts += librispeech.dev_other_cuts() valid_dl = librispeech.valid_dataloaders(valid_cuts) - if not params.print_diagnostics: + if params.start_batch <= 0 and not params.print_diagnostics: scan_pessimistic_batches_for_oom( model=model, train_dl=train_dl, diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/train.py b/egs/librispeech/ASR/pruned_transducer_stateless6/train.py index 596f8f7d9..a4595211c 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless6/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless6/train.py @@ -507,9 +507,6 @@ def load_checkpoint_if_available( if "cur_epoch" in saved_params: params["start_epoch"] = saved_params["cur_epoch"] - if "cur_batch_idx" in saved_params: - params["cur_batch_idx"] = saved_params["cur_batch_idx"] - return saved_params @@ -763,13 +760,7 @@ def train_one_epoch( tot_loss = MetricsTracker() - cur_batch_idx = params.get("cur_batch_idx", 0) - for batch_idx, batch in enumerate(train_dl): - if batch_idx < cur_batch_idx: - continue - cur_batch_idx = batch_idx - params.batch_idx_train += 1 batch_size = len(batch["supervisions"]["text"]) @@ -811,7 +802,6 @@ def train_one_epoch( params.batch_idx_train > 0 and params.batch_idx_train % params.save_every_n == 0 ): - params.cur_batch_idx = batch_idx save_checkpoint_with_global_batch_idx( out_dir=params.exp_dir, global_batch_idx=params.batch_idx_train, @@ -824,7 +814,6 @@ def train_one_epoch( scaler=scaler, rank=rank, ) - del params.cur_batch_idx remove_checkpoints( out_dir=params.exp_dir, topk=params.keep_last_k, @@ -999,7 +988,7 @@ def run(rank, world_size, args): valid_cuts += librispeech.dev_other_cuts() valid_dl = librispeech.valid_dataloaders(valid_cuts) - if not params.print_diagnostics: + if params.start_batch <= 0 and not params.print_diagnostics: scan_pessimistic_batches_for_oom( model=model, train_dl=train_dl, From f24b76e64bb59e157e3904d0330a132a308f18c0 Mon Sep 17 00:00:00 2001 From: FNLPprojects Date: Sat, 6 Aug 2022 18:33:43 +0800 Subject: [PATCH 31/38] fix torchaudio version (#524) * fix torchaudio version * fix torchaudio version --- docker/Ubuntu18.04-pytorch1.7.1-cuda11.0-cudnn8/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/Ubuntu18.04-pytorch1.7.1-cuda11.0-cudnn8/Dockerfile b/docker/Ubuntu18.04-pytorch1.7.1-cuda11.0-cudnn8/Dockerfile index a9caf07ed..746c2c4f3 100644 --- a/docker/Ubuntu18.04-pytorch1.7.1-cuda11.0-cudnn8/Dockerfile +++ b/docker/Ubuntu18.04-pytorch1.7.1-cuda11.0-cudnn8/Dockerfile @@ -79,6 +79,7 @@ RUN git clone https://github.com/k2-fsa/k2.git /opt/k2 && \ cd - # install lhotse +RUN pip install torchaudio==0.7.2 RUN pip install git+https://github.com/lhotse-speech/lhotse #RUN pip install lhotse From 5149788cb2e0730d1537b9711dcfc5c4b11a0f4b Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Tue, 9 Aug 2022 10:53:31 +0800 Subject: [PATCH 32/38] Fix computing averaged loss in the aishell recipe. (#523) * Fix computing averaged loss in the aishell recipe. * Set find_unused_parameters optionally. --- .../ASR/pruned_transducer_stateless3/train.py | 23 +++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/egs/aishell/ASR/pruned_transducer_stateless3/train.py b/egs/aishell/ASR/pruned_transducer_stateless3/train.py index 0e5291b21..feaef5cf6 100755 --- a/egs/aishell/ASR/pruned_transducer_stateless3/train.py +++ b/egs/aishell/ASR/pruned_transducer_stateless3/train.py @@ -22,8 +22,12 @@ Usage: ./prepare.sh + +# If you use a non-zero value for --datatang-prob, you also need to run ./prepare_aidatatang_200zh.sh +If you use --datatang-prob=0, then you don't need to run the above script. + export CUDA_VISIBLE_DEVICES="0,1,2,3" @@ -62,7 +66,6 @@ import optim import torch import torch.multiprocessing as mp import torch.nn as nn - from aidatatang_200zh import AIDatatang200zh from aishell import AIShell from asr_datamodule import AsrDataModule @@ -344,7 +347,7 @@ def get_parser(): parser.add_argument( "--datatang-prob", type=float, - default=0.2, + default=0.0, help="""The probability to select a batch from the aidatatang_200zh dataset. If it is set to 0, you don't need to download the data @@ -945,7 +948,10 @@ def train_one_epoch( tb_writer, "train/valid_", params.batch_idx_train ) - loss_value = tot_loss["loss"] / tot_loss["frames"] + if datatang_train_dl is not None: + loss_value = tot_loss["loss"] / tot_loss["frames"] + else: + loss_value = aishell_tot_loss["loss"] / aishell_tot_loss["frames"] params.train_loss = loss_value if params.train_loss < params.best_train_loss: params.best_train_epoch = params.cur_epoch @@ -1032,7 +1038,16 @@ def run(rank, world_size, args): model.to(device) if world_size > 1: logging.info("Using DDP") - model = DDP(model, device_ids=[rank], find_unused_parameters=True) + if params.datatang_prob > 0: + find_unused_parameters = True + else: + find_unused_parameters = False + + model = DDP( + model, + device_ids=[rank], + find_unused_parameters=find_unused_parameters, + ) optimizer = Eve(model.parameters(), lr=params.initial_lr) From 5c17255eecb5b776b342cb336f9958bb8b4d20a0 Mon Sep 17 00:00:00 2001 From: Wei Kang Date: Fri, 12 Aug 2022 07:12:50 +0800 Subject: [PATCH 33/38] Sort results to make it more convenient to compare decoding results (#522) * Sort result to make it more convenient to compare decoding results * Add cut_id to recognition results * add cut_id to results for all recipes * Fix torch.jit.script * Fix comments * Minor fixes * Fix torch.jit.tracing for Pytorch version before v1.9.0 --- .../pruned_transducer_stateless2/decode.py | 8 +++- egs/aishell/ASR/conformer_ctc/decode.py | 8 +++- egs/aishell/ASR/conformer_mmi/decode.py | 8 +++- .../pruned_transducer_stateless3/decode.py | 8 +++- egs/aishell/ASR/tdnn_lstm_ctc/decode.py | 8 +++- .../ASR/transducer_stateless/decode.py | 10 +++-- .../transducer_stateless_modified-2/decode.py | 8 +++- .../transducer_stateless_modified/decode.py | 8 +++- .../pruned_transducer_stateless5/decode.py | 8 +++- .../pruned_transducer_stateless5/decode.py | 8 +++- .../pruned_transducer_stateless2/decode.py | 8 +++- egs/gigaspeech/ASR/conformer_ctc/decode.py | 8 +++- .../pruned_transducer_stateless2/decode.py | 8 +++- egs/librispeech/ASR/conformer_ctc/decode.py | 8 +++- egs/librispeech/ASR/conformer_ctc2/decode.py | 11 ++++-- egs/librispeech/ASR/conformer_mmi/decode.py | 8 +++- .../decode.py | 8 +++- .../stream.py | 6 +++ .../streaming_decode.py | 5 ++- .../decode.py | 8 +++- .../streaming_decode.py | 5 ++- .../pruned_stateless_emformer_rnnt2/decode.py | 8 +++- .../ASR/pruned_transducer_stateless/decode.py | 10 ++++- .../decode_stream.py | 6 +++ .../streaming_decode.py | 3 ++ .../pruned_transducer_stateless2/conformer.py | 22 +++++------ .../pruned_transducer_stateless2/decode.py | 8 +++- .../pruned_transducer_stateless2/decoder.py | 11 ++++-- .../pruned_transducer_stateless2/joiner.py | 4 +- .../pruned_transducer_stateless2/scaling.py | 8 ++-- .../streaming_decode.py | 3 ++ .../decode-giga.py | 7 +++- .../pruned_transducer_stateless3/decode.py | 8 +++- .../scaling_converter.py | 26 ++++++++++++- .../streaming_decode.py | 3 ++ .../pruned_transducer_stateless4/decode.py | 8 +++- .../streaming_decode.py | 3 ++ .../pruned_transducer_stateless5/decode.py | 8 +++- .../streaming_decode.py | 3 ++ .../pruned_transducer_stateless6/decode.py | 8 +++- egs/librispeech/ASR/tdnn_lstm_ctc/decode.py | 8 +++- egs/librispeech/ASR/transducer/decode.py | 8 +++- egs/librispeech/ASR/transducer_lstm/decode.py | 8 +++- .../ASR/transducer_stateless/decode.py | 8 +++- .../ASR/transducer_stateless2/decode.py | 8 +++- .../decode.py | 8 +++- .../pruned_transducer_stateless2/decode.py | 8 +++- .../pruned_transducer_stateless5/decode.py | 16 +++++--- .../ASR/pruned_transducer_stateless/decode.py | 8 +++- .../ASR/transducer_stateless/decode.py | 8 +++- egs/timit/ASR/tdnn_ligru_ctc/decode.py | 8 +++- egs/timit/ASR/tdnn_lstm_ctc/decode.py | 8 +++- .../pruned_transducer_stateless2/decode.py | 8 +++- .../pruned_transducer_stateless5/decode.py | 8 +++- .../decode_stream.py | 6 +++ .../streaming_decode.py | 3 ++ egs/yesno/ASR/tdnn/decode.py | 8 +++- egs/yesno/ASR/transducer/decode.py | 8 +++- icefall/__init__.py | 1 + icefall/utils.py | 37 +++++++++++++------ 60 files changed, 379 insertions(+), 126 deletions(-) diff --git a/egs/aidatatang_200zh/ASR/pruned_transducer_stateless2/decode.py b/egs/aidatatang_200zh/ASR/pruned_transducer_stateless2/decode.py index b78c600c3..a185567da 100755 --- a/egs/aidatatang_200zh/ASR/pruned_transducer_stateless2/decode.py +++ b/egs/aidatatang_200zh/ASR/pruned_transducer_stateless2/decode.py @@ -367,6 +367,7 @@ def decode_dataset( for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] texts = [list(str(text).replace(" ", "")) for text in texts] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -379,8 +380,8 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): - this_batch.append((ref_text, hyp_words)) + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): + this_batch.append((cut_id, ref_text, hyp_words)) results[name].extend(this_batch) @@ -405,6 +406,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -528,6 +530,8 @@ def main(): from lhotse import CutSet from lhotse.dataset.webdataset import export_to_webdataset + # we need cut ids to display recognition results. + args.return_cuts = True aidatatang_200zh = Aidatatang_200zhAsrDataModule(args) dev = "dev" diff --git a/egs/aishell/ASR/conformer_ctc/decode.py b/egs/aishell/ASR/conformer_ctc/decode.py index c38c4c65f..d860d3fdb 100755 --- a/egs/aishell/ASR/conformer_ctc/decode.py +++ b/egs/aishell/ASR/conformer_ctc/decode.py @@ -374,6 +374,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -389,9 +390,9 @@ def decode_dataset( for lm_scale, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[lm_scale].extend(this_batch) @@ -419,6 +420,7 @@ def save_results( test_set_wers = dict() for key, results in results_dict.items(): recog_path = params.exp_dir / f"recogs-{test_set_name}-{key}.txt" + results = sorted(results) store_transcripts(filename=recog_path, texts=results) if enable_log: logging.info(f"The transcripts are stored in {recog_path}") @@ -537,6 +539,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True aishell = AishellAsrDataModule(args) test_cuts = aishell.test_cuts() test_dl = aishell.test_dataloaders(test_cuts) diff --git a/egs/aishell/ASR/conformer_mmi/decode.py b/egs/aishell/ASR/conformer_mmi/decode.py index 35a7d98fc..f33ddd48b 100755 --- a/egs/aishell/ASR/conformer_mmi/decode.py +++ b/egs/aishell/ASR/conformer_mmi/decode.py @@ -386,6 +386,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -401,9 +402,9 @@ def decode_dataset( for lm_scale, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[lm_scale].extend(this_batch) @@ -431,6 +432,7 @@ def save_results( test_set_wers = dict() for key, results in results_dict.items(): recog_path = params.exp_dir / f"recogs-{test_set_name}-{key}.txt" + results = sorted(results) store_transcripts(filename=recog_path, texts=results) if enable_log: logging.info(f"The transcripts are stored in {recog_path}") @@ -556,6 +558,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True aishell = AishellAsrDataModule(args) test_cuts = aishell.test_cuts() test_dl = aishell.test_dataloaders(test_cuts) diff --git a/egs/aishell/ASR/pruned_transducer_stateless3/decode.py b/egs/aishell/ASR/pruned_transducer_stateless3/decode.py index 6aea306c8..a6a01c2c6 100755 --- a/egs/aishell/ASR/pruned_transducer_stateless3/decode.py +++ b/egs/aishell/ASR/pruned_transducer_stateless3/decode.py @@ -377,6 +377,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -389,9 +390,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -416,6 +417,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -606,6 +608,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True asr_datamodule = AsrDataModule(args) aishell = AIShell(manifest_dir=args.manifest_dir) test_cuts = aishell.test_cuts() diff --git a/egs/aishell/ASR/tdnn_lstm_ctc/decode.py b/egs/aishell/ASR/tdnn_lstm_ctc/decode.py index f3c8e8f44..58a999c22 100755 --- a/egs/aishell/ASR/tdnn_lstm_ctc/decode.py +++ b/egs/aishell/ASR/tdnn_lstm_ctc/decode.py @@ -241,6 +241,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -253,9 +254,9 @@ def decode_dataset( for lm_scale, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[lm_scale].extend(this_batch) @@ -278,6 +279,7 @@ def save_results( test_set_wers = dict() for key, results in results_dict.items(): recog_path = params.exp_dir / f"recogs-{test_set_name}-{key}.txt" + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -365,6 +367,8 @@ def main(): model.to(device) model.eval() + # we need cut ids to display recognition results. + args.return_cuts = True aishell = AishellAsrDataModule(args) test_cuts = aishell.test_cuts() test_dl = aishell.test_dataloaders(test_cuts) diff --git a/egs/aishell/ASR/transducer_stateless/decode.py b/egs/aishell/ASR/transducer_stateless/decode.py index a7b030fa5..7a96b6f73 100755 --- a/egs/aishell/ASR/transducer_stateless/decode.py +++ b/egs/aishell/ASR/transducer_stateless/decode.py @@ -38,8 +38,8 @@ from icefall.utils import ( AttributeDict, setup_logger, store_transcripts, - write_error_stats, str2bool, + write_error_stats, ) @@ -296,6 +296,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -307,9 +308,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -334,6 +335,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) # The following prints out WERs, per-word error statistics and aligned @@ -438,6 +440,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True aishell = AishellAsrDataModule(args) test_cuts = aishell.test_cuts() test_dl = aishell.test_dataloaders(test_cuts) diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/decode.py b/egs/aishell/ASR/transducer_stateless_modified-2/decode.py index 47265f846..7eb273da0 100755 --- a/egs/aishell/ASR/transducer_stateless_modified-2/decode.py +++ b/egs/aishell/ASR/transducer_stateless_modified-2/decode.py @@ -341,6 +341,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -353,9 +354,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -380,6 +381,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -496,6 +498,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True asr_datamodule = AsrDataModule(args) aishell = AIShell(manifest_dir=args.manifest_dir) test_cuts = aishell.test_cuts() diff --git a/egs/aishell/ASR/transducer_stateless_modified/decode.py b/egs/aishell/ASR/transducer_stateless_modified/decode.py index 4773ebc7d..1fe39fed2 100755 --- a/egs/aishell/ASR/transducer_stateless_modified/decode.py +++ b/egs/aishell/ASR/transducer_stateless_modified/decode.py @@ -345,6 +345,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -357,9 +358,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -384,6 +385,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -498,6 +500,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True aishell = AishellAsrDataModule(args) test_cuts = aishell.test_cuts() test_dl = aishell.test_dataloaders(test_cuts) diff --git a/egs/aishell2/ASR/pruned_transducer_stateless5/decode.py b/egs/aishell2/ASR/pruned_transducer_stateless5/decode.py index f03bd34d3..7d6f6f6d5 100755 --- a/egs/aishell2/ASR/pruned_transducer_stateless5/decode.py +++ b/egs/aishell2/ASR/pruned_transducer_stateless5/decode.py @@ -514,6 +514,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -527,8 +528,8 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): - this_batch.append((ref_text, hyp_words)) + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): + this_batch.append((cut_id, ref_text, hyp_words)) results[name].extend(this_batch) @@ -553,6 +554,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -756,6 +758,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True aishell2 = AiShell2AsrDataModule(args) valid_cuts = aishell2.valid_cuts() diff --git a/egs/aishell4/ASR/pruned_transducer_stateless5/decode.py b/egs/aishell4/ASR/pruned_transducer_stateless5/decode.py index d329410e1..739dfc4a1 100755 --- a/egs/aishell4/ASR/pruned_transducer_stateless5/decode.py +++ b/egs/aishell4/ASR/pruned_transducer_stateless5/decode.py @@ -378,6 +378,7 @@ def decode_dataset( for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] texts = [list(str(text).replace(" ", "")) for text in texts] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -390,8 +391,8 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): - this_batch.append((ref_text, hyp_words)) + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): + this_batch.append((cut_id, ref_text, hyp_words)) results[name].extend(this_batch) @@ -416,6 +417,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -607,6 +609,8 @@ def main(): c.supervisions[0].text = text_normalize(text) return c + # we need cut ids to display recognition results. + args.return_cuts = True aishell4 = Aishell4AsrDataModule(args) test_cuts = aishell4.test_cuts() test_cuts = test_cuts.map(text_normalize_for_cut) diff --git a/egs/alimeeting/ASR/pruned_transducer_stateless2/decode.py b/egs/alimeeting/ASR/pruned_transducer_stateless2/decode.py index cb455838e..65fc74728 100755 --- a/egs/alimeeting/ASR/pruned_transducer_stateless2/decode.py +++ b/egs/alimeeting/ASR/pruned_transducer_stateless2/decode.py @@ -367,6 +367,7 @@ def decode_dataset( for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] texts = [list(str(text).replace(" ", "")) for text in texts] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -379,8 +380,8 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): - this_batch.append((ref_text, hyp_words)) + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): + this_batch.append((cut_id, ref_text, hyp_words)) results[name].extend(this_batch) @@ -405,6 +406,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -535,6 +537,8 @@ def main(): from lhotse import CutSet from lhotse.dataset.webdataset import export_to_webdataset + # we need cut ids to display recognition results. + args.return_cuts = True alimeeting = AlimeetingAsrDataModule(args) dev = "eval" diff --git a/egs/gigaspeech/ASR/conformer_ctc/decode.py b/egs/gigaspeech/ASR/conformer_ctc/decode.py index 6ab9852b4..f4b438aad 100755 --- a/egs/gigaspeech/ASR/conformer_ctc/decode.py +++ b/egs/gigaspeech/ASR/conformer_ctc/decode.py @@ -451,6 +451,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -469,9 +470,9 @@ def decode_dataset( for lm_scale, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[lm_scale].extend(this_batch) else: @@ -512,6 +513,7 @@ def save_results( for key, results in results_dict.items(): recog_path = params.exp_dir / f"recogs-{test_set_name}-{key}.txt" results = post_processing(results) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) if enable_log: logging.info(f"The transcripts are stored in {recog_path}") @@ -676,6 +678,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True gigaspeech = GigaSpeechAsrDataModule(args) dev_cuts = gigaspeech.dev_cuts() diff --git a/egs/gigaspeech/ASR/pruned_transducer_stateless2/decode.py b/egs/gigaspeech/ASR/pruned_transducer_stateless2/decode.py index ce5116336..d7ecc3fdc 100755 --- a/egs/gigaspeech/ASR/pruned_transducer_stateless2/decode.py +++ b/egs/gigaspeech/ASR/pruned_transducer_stateless2/decode.py @@ -374,6 +374,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -386,9 +387,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -414,6 +415,7 @@ def save_results( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) results = post_processing(results) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -544,6 +546,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True gigaspeech = GigaSpeechAsrDataModule(args) dev_cuts = gigaspeech.dev_cuts() diff --git a/egs/librispeech/ASR/conformer_ctc/decode.py b/egs/librispeech/ASR/conformer_ctc/decode.py index 0e8247b8d..7a8fb2130 100755 --- a/egs/librispeech/ASR/conformer_ctc/decode.py +++ b/egs/librispeech/ASR/conformer_ctc/decode.py @@ -525,6 +525,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -544,9 +545,9 @@ def decode_dataset( for lm_scale, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[lm_scale].extend(this_batch) else: @@ -586,6 +587,7 @@ def save_results( test_set_wers = dict() for key, results in results_dict.items(): recog_path = params.exp_dir / f"recogs-{test_set_name}-{key}.txt" + results = sorted(results) store_transcripts(filename=recog_path, texts=results) if enable_log: logging.info(f"The transcripts are stored in {recog_path}") @@ -779,6 +781,8 @@ def main(): ) rnn_lm_model.eval() + # we need cut ids to display recognition results. + args.return_cuts = True librispeech = LibriSpeechAsrDataModule(args) test_clean_cuts = librispeech.test_clean_cuts() diff --git a/egs/librispeech/ASR/conformer_ctc2/decode.py b/egs/librispeech/ASR/conformer_ctc2/decode.py index 8a4cad1ad..6b9da12a9 100755 --- a/egs/librispeech/ASR/conformer_ctc2/decode.py +++ b/egs/librispeech/ASR/conformer_ctc2/decode.py @@ -31,14 +31,13 @@ import torch.nn as nn from asr_datamodule import LibriSpeechAsrDataModule from conformer import Conformer +from icefall.bpe_graph_compiler import BpeCtcTrainingGraphCompiler from icefall.checkpoint import ( average_checkpoints, average_checkpoints_with_averaged_model, find_checkpoints, load_checkpoint, ) - -from icefall.bpe_graph_compiler import BpeCtcTrainingGraphCompiler from icefall.decode import ( get_lattice, nbest_decoding, @@ -633,6 +632,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -652,9 +652,9 @@ def decode_dataset( for lm_scale, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[lm_scale].extend(this_batch) else: @@ -694,6 +694,7 @@ def save_results( test_set_wers = dict() for key, results in results_dict.items(): recog_path = params.exp_dir / f"recogs-{test_set_name}-{key}.txt" + results = sorted(results) store_transcripts(filename=recog_path, texts=results) if enable_log: logging.info(f"The transcripts are stored in {recog_path}") @@ -956,6 +957,8 @@ def main(): ) rnn_lm_model.eval() + # we need cut ids to display recognition results. + args.return_cuts = True librispeech = LibriSpeechAsrDataModule(args) test_clean_cuts = librispeech.test_clean_cuts() diff --git a/egs/librispeech/ASR/conformer_mmi/decode.py b/egs/librispeech/ASR/conformer_mmi/decode.py index a77168b62..23372034a 100755 --- a/egs/librispeech/ASR/conformer_mmi/decode.py +++ b/egs/librispeech/ASR/conformer_mmi/decode.py @@ -449,6 +449,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -466,9 +467,9 @@ def decode_dataset( for lm_scale, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[lm_scale].extend(this_batch) @@ -496,6 +497,7 @@ def save_results( test_set_wers = dict() for key, results in results_dict.items(): recog_path = params.exp_dir / f"recogs-{test_set_name}-{key}.txt" + results = sorted(results) store_transcripts(filename=recog_path, texts=results) if enable_log: logging.info(f"The transcripts are stored in {recog_path}") @@ -661,6 +663,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True librispeech = LibriSpeechAsrDataModule(args) # CAUTION: `test_sets` is for displaying only. # If you want to skip test-clean, you have to skip diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless/decode.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless/decode.py index 287fb94df..a03fe2684 100755 --- a/egs/librispeech/ASR/conv_emformer_transducer_stateless/decode.py +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless/decode.py @@ -403,6 +403,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -415,9 +416,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -442,6 +443,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -624,6 +626,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True librispeech = LibriSpeechAsrDataModule(args) test_clean_cuts = librispeech.test_clean_cuts() diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless/stream.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless/stream.py index 69ee7ee9a..9494e1fc1 100644 --- a/egs/librispeech/ASR/conv_emformer_transducer_stateless/stream.py +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless/stream.py @@ -29,6 +29,7 @@ class Stream(object): def __init__( self, params: AttributeDict, + cut_id: str, decoding_graph: Optional[k2.Fsa] = None, device: torch.device = torch.device("cpu"), LOG_EPS: float = math.log(1e-10), @@ -44,6 +45,7 @@ class Stream(object): The device to run this stream. """ self.LOG_EPS = LOG_EPS + self.cut_id = cut_id # Containing attention caches and convolution caches self.states: Optional[ @@ -138,6 +140,10 @@ class Stream(object): """Return True if all feature frames are processed.""" return self._done + @property + def id(self) -> str: + return self.cut_id + def decoding_result(self) -> List[int]: """Obtain current decoding result.""" if self.decoding_method == "greedy_search": diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless/streaming_decode.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless/streaming_decode.py index 0a6bbfa8b..61dbe8658 100755 --- a/egs/librispeech/ASR/conv_emformer_transducer_stateless/streaming_decode.py +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless/streaming_decode.py @@ -74,7 +74,6 @@ from pathlib import Path from typing import Dict, List, Optional, Tuple import k2 -from lhotse import CutSet import numpy as np import sentencepiece as spm import torch @@ -83,6 +82,7 @@ from asr_datamodule import LibriSpeechAsrDataModule from beam_search import Hypothesis, HypothesisList, get_hyps_shape from emformer import LOG_EPSILON, stack_states, unstack_states from kaldifeat import Fbank, FbankOptions +from lhotse import CutSet from stream import Stream from torch.nn.utils.rnn import pad_sequence from train import add_model_arguments, get_params, get_transducer_model @@ -678,6 +678,7 @@ def decode_dataset( # Each utterance has a Stream. stream = Stream( params=params, + cut_id=cut.id, decoding_graph=decoding_graph, device=device, LOG_EPS=LOG_EPSILON, @@ -711,6 +712,7 @@ def decode_dataset( for i in sorted(finished_streams, reverse=True): decode_results.append( ( + streams[i].id, streams[i].ground_truth.split(), sp.decode(streams[i].decoding_result()).split(), ) @@ -731,6 +733,7 @@ def decode_dataset( for i in sorted(finished_streams, reverse=True): decode_results.append( ( + streams[i].id, streams[i].ground_truth.split(), sp.decode(streams[i].decoding_result()).split(), ) diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/decode.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/decode.py index 402ec4293..d204a9d75 100755 --- a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/decode.py +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/decode.py @@ -403,6 +403,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -415,9 +416,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -442,6 +443,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -624,6 +626,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True librispeech = LibriSpeechAsrDataModule(args) test_clean_cuts = librispeech.test_clean_cuts() diff --git a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/streaming_decode.py b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/streaming_decode.py index 0f687898f..71150392d 100755 --- a/egs/librispeech/ASR/conv_emformer_transducer_stateless2/streaming_decode.py +++ b/egs/librispeech/ASR/conv_emformer_transducer_stateless2/streaming_decode.py @@ -74,7 +74,6 @@ from pathlib import Path from typing import Dict, List, Optional, Tuple import k2 -from lhotse import CutSet import numpy as np import sentencepiece as spm import torch @@ -83,6 +82,7 @@ from asr_datamodule import LibriSpeechAsrDataModule from beam_search import Hypothesis, HypothesisList, get_hyps_shape from emformer import LOG_EPSILON, stack_states, unstack_states from kaldifeat import Fbank, FbankOptions +from lhotse import CutSet from stream import Stream from torch.nn.utils.rnn import pad_sequence from train import add_model_arguments, get_params, get_transducer_model @@ -678,6 +678,7 @@ def decode_dataset( # Each utterance has a Stream. stream = Stream( params=params, + cut_id=cut.id, decoding_graph=decoding_graph, device=device, LOG_EPS=LOG_EPSILON, @@ -711,6 +712,7 @@ def decode_dataset( for i in sorted(finished_streams, reverse=True): decode_results.append( ( + streams[i].id, streams[i].ground_truth.split(), sp.decode(streams[i].decoding_result()).split(), ) @@ -731,6 +733,7 @@ def decode_dataset( for i in sorted(finished_streams, reverse=True): decode_results.append( ( + streams[i].id, streams[i].ground_truth.split(), sp.decode(streams[i].decoding_result()).split(), ) diff --git a/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/decode.py b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/decode.py index e9989579b..282ce3737 100755 --- a/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/decode.py +++ b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/decode.py @@ -391,6 +391,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -403,9 +404,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -430,6 +431,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -612,6 +614,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True librispeech = LibriSpeechAsrDataModule(args) test_clean_cuts = librispeech.test_clean_cuts() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/decode.py b/egs/librispeech/ASR/pruned_transducer_stateless/decode.py index b7558089c..ab6cf336c 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless/decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless/decode.py @@ -551,6 +551,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -564,9 +565,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -591,6 +592,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -631,6 +633,8 @@ def main(): LibriSpeechAsrDataModule.add_arguments(parser) args = parser.parse_args() args.exp_dir = Path(args.exp_dir) + # we need cut ids to display recognition results. + args.return_cuts = True params = get_params() params.update(vars(args)) @@ -754,6 +758,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True librispeech = LibriSpeechAsrDataModule(args) test_clean_cuts = librispeech.test_clean_cuts() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/decode_stream.py b/egs/librispeech/ASR/pruned_transducer_stateless/decode_stream.py index 6c0e9ba19..386248554 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless/decode_stream.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless/decode_stream.py @@ -28,6 +28,7 @@ class DecodeStream(object): def __init__( self, params: AttributeDict, + cut_id: str, initial_states: List[torch.Tensor], decoding_graph: Optional[k2.Fsa] = None, device: torch.device = torch.device("cpu"), @@ -48,6 +49,7 @@ class DecodeStream(object): assert device == decoding_graph.device self.params = params + self.cut_id = cut_id self.LOG_EPS = math.log(1e-10) self.states = initial_states @@ -102,6 +104,10 @@ class DecodeStream(object): """Return True if all the features are processed.""" return self._done + @property + def id(self) -> str: + return self.cut_id + def set_features( self, features: torch.Tensor, diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/streaming_decode.py b/egs/librispeech/ASR/pruned_transducer_stateless/streaming_decode.py index e455627f3..d2cae4f9f 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless/streaming_decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless/streaming_decode.py @@ -356,6 +356,7 @@ def decode_dataset( # each utterance has a DecodeStream. decode_stream = DecodeStream( params=params, + cut_id=cut.id, initial_states=initial_states, decoding_graph=decoding_graph, device=device, @@ -385,6 +386,7 @@ def decode_dataset( for i in sorted(finished_streams, reverse=True): decode_results.append( ( + decode_streams[i].id, decode_streams[i].ground_truth.split(), sp.decode(decode_streams[i].decoding_result()).split(), ) @@ -402,6 +404,7 @@ def decode_dataset( for i in sorted(finished_streams, reverse=True): decode_results.append( ( + decode_streams[i].id, decode_streams[i].ground_truth.split(), sp.decode(decode_streams[i].decoding_result()).split(), ) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/conformer.py b/egs/librispeech/ASR/pruned_transducer_stateless2/conformer.py index e95360d1d..9a0405c57 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/conformer.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/conformer.py @@ -32,7 +32,7 @@ from scaling import ( ) from torch import Tensor, nn -from icefall.utils import make_pad_mask, subsequent_chunk_mask +from icefall.utils import is_jit_tracing, make_pad_mask, subsequent_chunk_mask class Conformer(EncoderInterface): @@ -155,7 +155,7 @@ class Conformer(EncoderInterface): # Note: rounding_mode in torch.div() is available only in torch >= 1.8.0 lengths = (((x_lens - 1) >> 1) - 1) >> 1 - if not torch.jit.is_tracing(): + if not is_jit_tracing(): assert x.size(0) == lengths.max().item() src_key_padding_mask = make_pad_mask(lengths) @@ -788,7 +788,7 @@ class RelPositionalEncoding(torch.nn.Module): ) -> None: """Construct an PositionalEncoding object.""" super(RelPositionalEncoding, self).__init__() - if torch.jit.is_tracing(): + if is_jit_tracing(): # 10k frames correspond to ~100k ms, e.g., 100 seconds, i.e., # It assumes that the maximum input won't have more than # 10k frames. @@ -1015,12 +1015,12 @@ class RelPositionMultiheadAttention(nn.Module): (batch_size, num_heads, time1, n) = x.shape time2 = time1 + left_context - if not torch.jit.is_tracing(): + if not is_jit_tracing(): assert ( n == left_context + 2 * time1 - 1 ), f"{n} == {left_context} + 2 * {time1} - 1" - if torch.jit.is_tracing(): + if is_jit_tracing(): rows = torch.arange(start=time1 - 1, end=-1, step=-1) cols = torch.arange(time2) rows = rows.repeat(batch_size * num_heads).unsqueeze(-1) @@ -1111,12 +1111,12 @@ class RelPositionMultiheadAttention(nn.Module): """ tgt_len, bsz, embed_dim = query.size() - if not torch.jit.is_tracing(): + if not is_jit_tracing(): assert embed_dim == embed_dim_to_check assert key.size(0) == value.size(0) and key.size(1) == value.size(1) head_dim = embed_dim // num_heads - if not torch.jit.is_tracing(): + if not is_jit_tracing(): assert ( head_dim * num_heads == embed_dim ), "embed_dim must be divisible by num_heads" @@ -1232,7 +1232,7 @@ class RelPositionMultiheadAttention(nn.Module): src_len = k.size(0) - if key_padding_mask is not None and not torch.jit.is_tracing(): + if key_padding_mask is not None and not is_jit_tracing(): assert key_padding_mask.size(0) == bsz, "{} == {}".format( key_padding_mask.size(0), bsz ) @@ -1243,7 +1243,7 @@ class RelPositionMultiheadAttention(nn.Module): q = q.transpose(0, 1) # (batch, time1, head, d_k) pos_emb_bsz = pos_emb.size(0) - if not torch.jit.is_tracing(): + if not is_jit_tracing(): assert pos_emb_bsz in (1, bsz) # actually it is 1 p = self.linear_pos(pos_emb).view(pos_emb_bsz, -1, num_heads, head_dim) @@ -1280,7 +1280,7 @@ class RelPositionMultiheadAttention(nn.Module): bsz * num_heads, tgt_len, -1 ) - if not torch.jit.is_tracing(): + if not is_jit_tracing(): assert list(attn_output_weights.size()) == [ bsz * num_heads, tgt_len, @@ -1345,7 +1345,7 @@ class RelPositionMultiheadAttention(nn.Module): attn_output = torch.bmm(attn_output_weights, v) - if not torch.jit.is_tracing(): + if not is_jit_tracing(): assert list(attn_output.size()) == [ bsz * num_heads, tgt_len, diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/decode.py b/egs/librispeech/ASR/pruned_transducer_stateless2/decode.py index 60a948a99..34fd31e7e 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/decode.py @@ -574,6 +574,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -587,9 +588,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -614,6 +615,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -777,6 +779,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True librispeech = LibriSpeechAsrDataModule(args) test_clean_cuts = librispeech.test_clean_cuts() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/decoder.py b/egs/librispeech/ASR/pruned_transducer_stateless2/decoder.py index bd0df5d49..e01167285 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/decoder.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/decoder.py @@ -14,13 +14,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Union - import torch import torch.nn as nn import torch.nn.functional as F from scaling import ScaledConv1d, ScaledEmbedding +from icefall.utils import is_jit_tracing + class Decoder(nn.Module): """This class modifies the stateless decoder from the following paper: @@ -80,7 +80,10 @@ class Decoder(nn.Module): self.conv = nn.Identity() def forward( - self, y: torch.Tensor, need_pad: Union[bool, torch.Tensor] = True + self, + y: torch.Tensor, + need_pad: bool = True # Annotation should be Union[bool, torch.Tensor] + # but, torch.jit.script does not support Union. ) -> torch.Tensor: """ Args: @@ -108,7 +111,7 @@ class Decoder(nn.Module): else: # During inference time, there is no need to do extra padding # as we only need one output - if not torch.jit.is_tracing(): + if not is_jit_tracing(): assert embedding_out.size(-1) == self.context_size embedding_out = self.conv(embedding_out) embedding_out = embedding_out.permute(0, 2, 1) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/joiner.py b/egs/librispeech/ASR/pruned_transducer_stateless2/joiner.py index b2d6ed0f2..6a9d08033 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/joiner.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/joiner.py @@ -18,6 +18,8 @@ import torch import torch.nn as nn from scaling import ScaledLinear +from icefall.utils import is_jit_tracing + class Joiner(nn.Module): def __init__( @@ -52,7 +54,7 @@ class Joiner(nn.Module): Returns: Return a tensor of shape (N, T, s_range, C). """ - if not torch.jit.is_tracing(): + if not is_jit_tracing(): assert encoder_out.ndim == decoder_out.ndim assert encoder_out.ndim in (2, 4) assert encoder_out.shape == decoder_out.shape diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py b/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py index 566b3622f..e93d6e4a3 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py @@ -23,6 +23,8 @@ import torch import torch.nn as nn from torch import Tensor +from icefall.utils import is_jit_tracing + def _ntuple(n): def parse(x): @@ -152,7 +154,7 @@ class BasicNorm(torch.nn.Module): self.register_buffer("eps", torch.tensor(eps).log().detach()) def forward(self, x: Tensor) -> Tensor: - if not torch.jit.is_tracing(): + if not is_jit_tracing(): assert x.shape[self.channel_dim] == self.num_channels scales = ( torch.mean(x ** 2, dim=self.channel_dim, keepdim=True) @@ -424,7 +426,7 @@ class ActivationBalancer(torch.nn.Module): self.max_abs = max_abs def forward(self, x: Tensor) -> Tensor: - if torch.jit.is_scripting() or torch.jit.is_tracing(): + if torch.jit.is_scripting() or is_jit_tracing(): return x else: return ActivationBalancerFunction.apply( @@ -473,7 +475,7 @@ class DoubleSwish(torch.nn.Module): """Return double-swish activation function which is an approximation to Swish(Swish(x)), that we approximate closely with x * sigmoid(x-1). """ - if torch.jit.is_scripting() or torch.jit.is_tracing(): + if torch.jit.is_scripting() or is_jit_tracing(): return x * torch.sigmoid(x - 1.0) else: return DoubleSwishFunction.apply(x) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/streaming_decode.py b/egs/librispeech/ASR/pruned_transducer_stateless2/streaming_decode.py index 79963c968..d76a03946 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/streaming_decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/streaming_decode.py @@ -358,6 +358,7 @@ def decode_dataset( # each utterance has a DecodeStream. decode_stream = DecodeStream( params=params, + cut_id=cut.id, initial_states=initial_states, decoding_graph=decoding_graph, device=device, @@ -388,6 +389,7 @@ def decode_dataset( for i in sorted(finished_streams, reverse=True): decode_results.append( ( + decode_streams[i].id, decode_streams[i].ground_truth.split(), sp.decode(decode_streams[i].decoding_result()).split(), ) @@ -405,6 +407,7 @@ def decode_dataset( for i in sorted(finished_streams, reverse=True): decode_results.append( ( + decode_streams[i].id, decode_streams[i].ground_truth.split(), sp.decode(decode_streams[i].decoding_result()).split(), ) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/decode-giga.py b/egs/librispeech/ASR/pruned_transducer_stateless3/decode-giga.py index 8d6e33e9d..5784a78ba 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/decode-giga.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/decode-giga.py @@ -422,6 +422,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -434,9 +435,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -610,6 +611,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True asr_datamodule = AsrDataModule(args) gigaspeech = GigaSpeech(manifest_dir=args.manifest_dir) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/decode.py b/egs/librispeech/ASR/pruned_transducer_stateless3/decode.py index d2605c072..72d6f656c 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/decode.py @@ -745,6 +745,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -760,9 +761,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -787,6 +788,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -1067,6 +1069,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True asr_datamodule = AsrDataModule(args) librispeech = LibriSpeech(manifest_dir=args.manifest_dir) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py b/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py index 79b178421..992b71dd1 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py @@ -25,6 +25,7 @@ life by converting them to their non-scaled version during inference. import copy import re +from typing import List import torch import torch.nn as nn @@ -54,7 +55,10 @@ def scaled_linear_to_linear(scaled_linear: ScaledLinear) -> nn.Linear: in_features=scaled_linear.in_features, out_features=scaled_linear.out_features, bias=True, # otherwise, it throws errors when converting to PNNX format. - device=weight.device, + # device=weight.device, # Pytorch version before v1.9.0 does not has + # this argument. Comment out for now, we will + # see if it will raise error for versions + # after v1.9.0 ) linear.weight.data.copy_(weight) @@ -164,6 +168,24 @@ def scaled_embedding_to_embedding( return embedding +# Copied from https://pytorch.org/docs/1.9.0/_modules/torch/nn/modules/module.html#Module.get_submodule +# get_submodule was added to nn.Module at v1.9.0 +def get_submodule(model, target): + if target == "": + return model + atoms: List[str] = target.split(".") + mod: torch.nn.Module = model + for item in atoms: + if not hasattr(mod, item): + raise AttributeError( + mod._get_name() + " has no " "attribute `" + item + "`" + ) + mod = getattr(mod, item) + if not isinstance(mod, torch.nn.Module): + raise AttributeError("`" + item + "` is not " "an nn.Module") + return mod + + def convert_scaled_to_non_scaled(model: nn.Module, inplace: bool = False): """Convert `ScaledLinear`, `ScaledConv1d`, and `ScaledConv2d` in the given modle to their unscaled version `nn.Linear`, `nn.Conv1d`, @@ -200,7 +222,7 @@ def convert_scaled_to_non_scaled(model: nn.Module, inplace: bool = False): for k, v in d.items(): if "." in k: parent, child = k.rsplit(".", maxsplit=1) - setattr(model.get_submodule(parent), child, v) + setattr(get_submodule(model, parent), child, v) else: setattr(model, k, v) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/streaming_decode.py b/egs/librispeech/ASR/pruned_transducer_stateless3/streaming_decode.py index 1976d19a6..10bb44e00 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/streaming_decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/streaming_decode.py @@ -359,6 +359,7 @@ def decode_dataset( # each utterance has a DecodeStream. decode_stream = DecodeStream( params=params, + cut_id=cut.id, initial_states=initial_states, decoding_graph=decoding_graph, device=device, @@ -389,6 +390,7 @@ def decode_dataset( for i in sorted(finished_streams, reverse=True): decode_results.append( ( + decode_streams[i].id, decode_streams[i].ground_truth.split(), sp.decode(decode_streams[i].decoding_result()).split(), ) @@ -406,6 +408,7 @@ def decode_dataset( for i in sorted(finished_streams, reverse=True): decode_results.append( ( + decode_streams[i].id, decode_streams[i].ground_truth.split(), sp.decode(decode_streams[i].decoding_result()).split(), ) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless4/decode.py b/egs/librispeech/ASR/pruned_transducer_stateless4/decode.py index d8ae8e026..8431492e6 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless4/decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless4/decode.py @@ -578,6 +578,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -591,9 +592,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -618,6 +619,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -831,6 +833,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True librispeech = LibriSpeechAsrDataModule(args) test_clean_cuts = librispeech.test_clean_cuts() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless4/streaming_decode.py b/egs/librispeech/ASR/pruned_transducer_stateless4/streaming_decode.py index de89d41c2..7af9ea9b8 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless4/streaming_decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless4/streaming_decode.py @@ -371,6 +371,7 @@ def decode_dataset( # each utterance has a DecodeStream. decode_stream = DecodeStream( params=params, + cut_id=cut.id, initial_states=initial_states, decoding_graph=decoding_graph, device=device, @@ -401,6 +402,7 @@ def decode_dataset( for i in sorted(finished_streams, reverse=True): decode_results.append( ( + decode_streams[i].id, decode_streams[i].ground_truth.split(), sp.decode(decode_streams[i].decoding_result()).split(), ) @@ -418,6 +420,7 @@ def decode_dataset( for i in sorted(finished_streams, reverse=True): decode_results.append( ( + decode_streams[i].id, decode_streams[i].ground_truth.split(), sp.decode(decode_streams[i].decoding_result()).split(), ) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless5/decode.py b/egs/librispeech/ASR/pruned_transducer_stateless5/decode.py index 2d0965023..32bbd16f7 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless5/decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless5/decode.py @@ -564,6 +564,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -577,9 +578,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -604,6 +605,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -817,6 +819,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True librispeech = LibriSpeechAsrDataModule(args) test_clean_cuts = librispeech.test_clean_cuts() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless5/streaming_decode.py b/egs/librispeech/ASR/pruned_transducer_stateless5/streaming_decode.py index d47d57d1b..6fee9483e 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless5/streaming_decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless5/streaming_decode.py @@ -371,6 +371,7 @@ def decode_dataset( # each utterance has a DecodeStream. decode_stream = DecodeStream( params=params, + cut_id=cut.id, initial_states=initial_states, decoding_graph=decoding_graph, device=device, @@ -401,6 +402,7 @@ def decode_dataset( for i in sorted(finished_streams, reverse=True): decode_results.append( ( + decode_streams[i].id, decode_streams[i].ground_truth.split(), sp.decode(decode_streams[i].decoding_result()).split(), ) @@ -418,6 +420,7 @@ def decode_dataset( for i in sorted(finished_streams, reverse=True): decode_results.append( ( + decode_streams[i].id, decode_streams[i].ground_truth.split(), sp.decode(decode_streams[i].decoding_result()).split(), ) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/decode.py b/egs/librispeech/ASR/pruned_transducer_stateless6/decode.py index 701cad73c..2f69ba401 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless6/decode.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless6/decode.py @@ -387,6 +387,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -399,9 +400,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -426,6 +427,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -608,6 +610,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True librispeech = LibriSpeechAsrDataModule(args) test_clean_cuts = librispeech.test_clean_cuts() diff --git a/egs/librispeech/ASR/tdnn_lstm_ctc/decode.py b/egs/librispeech/ASR/tdnn_lstm_ctc/decode.py index 9c964c2aa..f1aacb5e7 100755 --- a/egs/librispeech/ASR/tdnn_lstm_ctc/decode.py +++ b/egs/librispeech/ASR/tdnn_lstm_ctc/decode.py @@ -311,6 +311,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -324,9 +325,9 @@ def decode_dataset( for lm_scale, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[lm_scale].extend(this_batch) @@ -349,6 +350,7 @@ def save_results( test_set_wers = dict() for key, results in results_dict.items(): recog_path = params.exp_dir / f"recogs-{test_set_name}-{key}.txt" + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -473,6 +475,8 @@ def main(): model.to(device) model.eval() + # we need cut ids to display recognition results. + args.return_cuts = True librispeech = LibriSpeechAsrDataModule(args) test_clean_cuts = librispeech.test_clean_cuts() diff --git a/egs/librispeech/ASR/transducer/decode.py b/egs/librispeech/ASR/transducer/decode.py index 990513ed9..83e924256 100755 --- a/egs/librispeech/ASR/transducer/decode.py +++ b/egs/librispeech/ASR/transducer/decode.py @@ -295,6 +295,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -306,9 +307,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -333,6 +334,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -424,6 +426,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True librispeech = LibriSpeechAsrDataModule(args) test_clean_cuts = librispeech.test_clean_cuts() diff --git a/egs/librispeech/ASR/transducer_lstm/decode.py b/egs/librispeech/ASR/transducer_lstm/decode.py index 18ae5234c..43debe643 100755 --- a/egs/librispeech/ASR/transducer_lstm/decode.py +++ b/egs/librispeech/ASR/transducer_lstm/decode.py @@ -292,6 +292,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -303,9 +304,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -330,6 +331,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -422,6 +424,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True librispeech = LibriSpeechAsrDataModule(args) test_clean_cuts = librispeech.test_clean_cuts() diff --git a/egs/librispeech/ASR/transducer_stateless/decode.py b/egs/librispeech/ASR/transducer_stateless/decode.py index 5ea17b173..19a685090 100755 --- a/egs/librispeech/ASR/transducer_stateless/decode.py +++ b/egs/librispeech/ASR/transducer_stateless/decode.py @@ -350,6 +350,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -362,9 +363,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -389,6 +390,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -500,6 +502,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True librispeech = LibriSpeechAsrDataModule(args) test_clean_cuts = librispeech.test_clean_cuts() diff --git a/egs/librispeech/ASR/transducer_stateless2/decode.py b/egs/librispeech/ASR/transducer_stateless2/decode.py index 4cf1e559c..f48ce82f4 100755 --- a/egs/librispeech/ASR/transducer_stateless2/decode.py +++ b/egs/librispeech/ASR/transducer_stateless2/decode.py @@ -350,6 +350,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -362,9 +363,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -389,6 +390,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -500,6 +502,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True librispeech = LibriSpeechAsrDataModule(args) test_clean_cuts = librispeech.test_clean_cuts() diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/decode.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/decode.py index 955366970..2bb6df5d6 100755 --- a/egs/librispeech/ASR/transducer_stateless_multi_datasets/decode.py +++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/decode.py @@ -351,6 +351,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -363,9 +364,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -390,6 +391,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -503,6 +505,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True asr_datamodule = AsrDataModule(args) librispeech = LibriSpeech(manifest_dir=args.manifest_dir) diff --git a/egs/spgispeech/ASR/pruned_transducer_stateless2/decode.py b/egs/spgispeech/ASR/pruned_transducer_stateless2/decode.py index ae49d166b..c13b980c6 100755 --- a/egs/spgispeech/ASR/pruned_transducer_stateless2/decode.py +++ b/egs/spgispeech/ASR/pruned_transducer_stateless2/decode.py @@ -365,6 +365,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -377,9 +378,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -405,6 +406,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -561,6 +563,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True spgispeech = SPGISpeechAsrDataModule(args) dev_cuts = spgispeech.dev_cuts() diff --git a/egs/tal_csasr/ASR/pruned_transducer_stateless5/decode.py b/egs/tal_csasr/ASR/pruned_transducer_stateless5/decode.py index 305729a99..45f702163 100755 --- a/egs/tal_csasr/ASR/pruned_transducer_stateless5/decode.py +++ b/egs/tal_csasr/ASR/pruned_transducer_stateless5/decode.py @@ -453,6 +453,7 @@ def decode_dataset( zh_char = "[\u4e00-\u9fa5]+" # Chinese chars for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] zh_texts = [] en_texts = [] for i in range(len(texts)): @@ -487,14 +488,14 @@ def decode_dataset( # print(hyps_texts) hyps, zh_hyps, en_hyps = hyps_texts assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): - this_batch.append((ref_text, hyp_words)) + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): + this_batch.append((cut_id, ref_text, hyp_words)) - for hyp_words, ref_text in zip(zh_hyps, zh_texts): - this_batch_zh.append((ref_text, hyp_words)) + for cut_id, hyp_words, ref_text in zip(cut_ids, zh_hyps, zh_texts): + this_batch_zh.append((cut_id, ref_text, hyp_words)) - for hyp_words, ref_text in zip(en_hyps, en_texts): - this_batch_en.append((ref_text, hyp_words)) + for cut_id, hyp_words, ref_text in zip(cut_ids, en_hyps, en_texts): + this_batch_en.append((cut_id, ref_text, hyp_words)) results[name].extend(this_batch) zh_results[name + "_zh"].extend(this_batch_zh) @@ -521,6 +522,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -710,6 +712,8 @@ def main(): c.supervisions[0].text = text_normalize(text) return c + # we need cut ids to display recognition results. + args.return_cuts = True tal_csasr = TAL_CSASRAsrDataModule(args) dev_cuts = tal_csasr.valid_cuts() diff --git a/egs/tedlium3/ASR/pruned_transducer_stateless/decode.py b/egs/tedlium3/ASR/pruned_transducer_stateless/decode.py index 4d9d3c3cf..bb352baa7 100755 --- a/egs/tedlium3/ASR/pruned_transducer_stateless/decode.py +++ b/egs/tedlium3/ASR/pruned_transducer_stateless/decode.py @@ -350,6 +350,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -362,9 +363,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -389,6 +390,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -498,6 +500,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True tedlium = TedLiumAsrDataModule(args) dev_cuts = tedlium.dev_cuts() test_cuts = tedlium.test_cuts() diff --git a/egs/tedlium3/ASR/transducer_stateless/decode.py b/egs/tedlium3/ASR/transducer_stateless/decode.py index 3185e7581..c1aa2c366 100755 --- a/egs/tedlium3/ASR/transducer_stateless/decode.py +++ b/egs/tedlium3/ASR/transducer_stateless/decode.py @@ -325,6 +325,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -336,9 +337,9 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[name].extend(this_batch) @@ -363,6 +364,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -462,6 +464,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") + # we need cut ids to display recognition results. + args.return_cuts = True tedlium = TedLiumAsrDataModule(args) dev_cuts = tedlium.dev_cuts() test_cuts = tedlium.test_cuts() diff --git a/egs/timit/ASR/tdnn_ligru_ctc/decode.py b/egs/timit/ASR/tdnn_ligru_ctc/decode.py index b141e58fa..84d9f7f1b 100644 --- a/egs/timit/ASR/tdnn_ligru_ctc/decode.py +++ b/egs/timit/ASR/tdnn_ligru_ctc/decode.py @@ -311,6 +311,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -324,9 +325,9 @@ def decode_dataset( for lm_scale, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[lm_scale].extend(this_batch) @@ -349,6 +350,7 @@ def save_results( test_set_wers = dict() for key, results in results_dict.items(): recog_path = params.exp_dir / f"recogs-{test_set_name}-{key}.txt" + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -468,6 +470,8 @@ def main(): model.to(device) model.eval() + # we need cut ids to display recognition results. + args.return_cuts = True timit = TimitAsrDataModule(args) test_set = "TEST" test_dl = timit.test_dataloaders() diff --git a/egs/timit/ASR/tdnn_lstm_ctc/decode.py b/egs/timit/ASR/tdnn_lstm_ctc/decode.py index e9ca96615..7672a2e1d 100644 --- a/egs/timit/ASR/tdnn_lstm_ctc/decode.py +++ b/egs/timit/ASR/tdnn_lstm_ctc/decode.py @@ -310,6 +310,7 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -323,9 +324,9 @@ def decode_dataset( for lm_scale, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results[lm_scale].extend(this_batch) @@ -348,6 +349,7 @@ def save_results( test_set_wers = dict() for key, results in results_dict.items(): recog_path = params.exp_dir / f"recogs-{test_set_name}-{key}.txt" + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -467,6 +469,8 @@ def main(): model.to(device) model.eval() + # we need cut ids to display recognition results. + args.return_cuts = True timit = TimitAsrDataModule(args) test_set = "TEST" test_dl = timit.test_dataloaders() diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless2/decode.py b/egs/wenetspeech/ASR/pruned_transducer_stateless2/decode.py index 7c06cdb3d..bbd8680b2 100755 --- a/egs/wenetspeech/ASR/pruned_transducer_stateless2/decode.py +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless2/decode.py @@ -491,6 +491,7 @@ def decode_dataset( for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] texts = [list(str(text)) for text in texts] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -504,8 +505,8 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): - this_batch.append((ref_text, hyp_words)) + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): + this_batch.append((cut_id, ref_text, hyp_words)) results[name].extend(this_batch) @@ -530,6 +531,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -678,6 +680,8 @@ def main(): from lhotse import CutSet from lhotse.dataset.webdataset import export_to_webdataset + # we need cut ids to display recognition results. + args.return_cuts = True wenetspeech = WenetSpeechAsrDataModule(args) dev = "dev" diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/decode.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/decode.py index ca997456f..c36df2458 100755 --- a/egs/wenetspeech/ASR/pruned_transducer_stateless5/decode.py +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/decode.py @@ -461,6 +461,7 @@ def decode_dataset( for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] texts = [list(str(text)) for text in texts] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, @@ -473,8 +474,8 @@ def decode_dataset( for name, hyps in hyps_dict.items(): this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): - this_batch.append((ref_text, hyp_words)) + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): + this_batch.append((cut_id, ref_text, hyp_words)) results[name].extend(this_batch) @@ -499,6 +500,7 @@ def save_results( recog_path = ( params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -682,6 +684,8 @@ def main(): from lhotse import CutSet from lhotse.dataset.webdataset import export_to_webdataset + # we need cut ids to display recognition results. + args.return_cuts = True wenetspeech = WenetSpeechAsrDataModule(args) dev = "dev" diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/decode_stream.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/decode_stream.py index 6c0e9ba19..386248554 100644 --- a/egs/wenetspeech/ASR/pruned_transducer_stateless5/decode_stream.py +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/decode_stream.py @@ -28,6 +28,7 @@ class DecodeStream(object): def __init__( self, params: AttributeDict, + cut_id: str, initial_states: List[torch.Tensor], decoding_graph: Optional[k2.Fsa] = None, device: torch.device = torch.device("cpu"), @@ -48,6 +49,7 @@ class DecodeStream(object): assert device == decoding_graph.device self.params = params + self.cut_id = cut_id self.LOG_EPS = math.log(1e-10) self.states = initial_states @@ -102,6 +104,10 @@ class DecodeStream(object): """Return True if all the features are processed.""" return self._done + @property + def id(self) -> str: + return self.cut_id + def set_features( self, features: torch.Tensor, diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless5/streaming_decode.py b/egs/wenetspeech/ASR/pruned_transducer_stateless5/streaming_decode.py index 2a383ca46..ff96c6487 100644 --- a/egs/wenetspeech/ASR/pruned_transducer_stateless5/streaming_decode.py +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless5/streaming_decode.py @@ -396,6 +396,7 @@ def decode_dataset( # each utterance has a DecodeStream. decode_stream = DecodeStream( params=params, + cut_id=cut.id, initial_states=initial_states, decoding_graph=decoding_graph, device=device, @@ -423,6 +424,7 @@ def decode_dataset( hyp = decode_streams[i].decoding_result() decode_results.append( ( + decode_streams[i].id, list(decode_streams[i].ground_truth), [lexicon.token_table[idx] for idx in hyp], ) @@ -441,6 +443,7 @@ def decode_dataset( hyp = decode_streams[i].decoding_result() decode_results.append( ( + decode_streams[i].id, list(decode_streams[i].ground_truth), [lexicon.token_table[idx] for idx in hyp], ) diff --git a/egs/yesno/ASR/tdnn/decode.py b/egs/yesno/ASR/tdnn/decode.py index a6a57a2fc..79adcb14e 100755 --- a/egs/yesno/ASR/tdnn/decode.py +++ b/egs/yesno/ASR/tdnn/decode.py @@ -178,6 +178,7 @@ def decode_dataset( results = [] for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps = decode_one_batch( params=params, @@ -189,9 +190,9 @@ def decode_dataset( this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results.extend(this_batch) @@ -237,6 +238,7 @@ def save_results( Return None. """ recog_path = exp_dir / f"recogs-{test_set_name}.txt" + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -303,6 +305,8 @@ def main(): model.to(device) model.eval() + # we need cut ids to display recognition results. + args.return_cuts = True yes_no = YesNoAsrDataModule(args) test_dl = yes_no.test_dataloaders() results = decode_dataset( diff --git a/egs/yesno/ASR/transducer/decode.py b/egs/yesno/ASR/transducer/decode.py index abb34da4c..6714180db 100755 --- a/egs/yesno/ASR/transducer/decode.py +++ b/egs/yesno/ASR/transducer/decode.py @@ -165,6 +165,7 @@ def decode_dataset( results = [] for batch_idx, batch in enumerate(dl): texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps = decode_one_batch( params=params, @@ -174,9 +175,9 @@ def decode_dataset( this_batch = [] assert len(hyps) == len(texts) - for hyp_words, ref_text in zip(hyps, texts): + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): ref_words = ref_text.split() - this_batch.append((ref_words, hyp_words)) + this_batch.append((cut_id, ref_words, hyp_words)) results.extend(this_batch) @@ -222,6 +223,7 @@ def save_results( Return None. """ recog_path = exp_dir / f"recogs-{test_set_name}.txt" + results = sorted(results) store_transcripts(filename=recog_path, texts=results) logging.info(f"The transcripts are stored in {recog_path}") @@ -291,6 +293,8 @@ def main(): model.eval() model.device = device + # we need cut ids to display recognition results. + args.return_cuts = True yes_no = YesNoAsrDataModule(args) test_dl = yes_no.test_dataloaders() results = decode_dataset( diff --git a/icefall/__init__.py b/icefall/__init__.py index 52d551c6a..0399c8459 100644 --- a/icefall/__init__.py +++ b/icefall/__init__.py @@ -49,6 +49,7 @@ from .utils import ( get_alignments, get_executor, get_texts, + is_jit_tracing, l1_norm, l2_norm, linf_norm, diff --git a/icefall/utils.py b/icefall/utils.py index f40f769f8..2b089c8d0 100644 --- a/icefall/utils.py +++ b/icefall/utils.py @@ -42,6 +42,18 @@ from icefall.checkpoint import average_checkpoints Pathlike = Union[str, Path] +# Pytorch issue: https://github.com/pytorch/pytorch/issues/47379 +# Fixed: https://github.com/pytorch/pytorch/pull/49853 +# The fix was included in v1.9.0 +# https://github.com/pytorch/pytorch/releases/tag/v1.9.0 +def is_jit_tracing(): + if torch.jit.is_scripting(): + return False + elif torch.jit.is_tracing(): + return True + return False + + @contextmanager def get_executor(): # We'll either return a process pool or a distributed worker pool. @@ -321,7 +333,7 @@ def load_alignments(filename: str) -> Tuple[int, Dict[str, List[int]]]: def store_transcripts( - filename: Pathlike, texts: Iterable[Tuple[str, str]] + filename: Pathlike, texts: Iterable[Tuple[str, str, str]] ) -> None: """Save predicted results and reference transcripts to a file. @@ -329,15 +341,15 @@ def store_transcripts( filename: File to save the results to. texts: - An iterable of tuples. The first element is the reference transcript - while the second element is the predicted result. + An iterable of tuples. The first element is the cur_id, the second is + the reference transcript and the third element is the predicted result. Returns: Return None. """ with open(filename, "w") as f: - for ref, hyp in texts: - print(f"ref={ref}", file=f) - print(f"hyp={hyp}", file=f) + for cut_id, ref, hyp in texts: + print(f"{cut_id}:\tref={ref}", file=f) + print(f"{cut_id}:\thyp={hyp}", file=f) def write_error_stats( @@ -372,8 +384,8 @@ def write_error_stats( The reference word `SIR` is missing in the predicted results (a deletion error). results: - An iterable of tuples. The first element is the reference transcript - while the second element is the predicted result. + An iterable of tuples. The first element is the cur_id, the second is + the reference transcript and the third element is the predicted result. enable_log: If True, also print detailed WER to the console. Otherwise, it is written only to the given file. @@ -389,7 +401,7 @@ def write_error_stats( words: Dict[str, List[int]] = defaultdict(lambda: [0, 0, 0, 0, 0]) num_corr = 0 ERR = "*" - for ref, hyp in results: + for cut_id, ref, hyp in results: ali = kaldialign.align(ref, hyp, ERR) for ref_word, hyp_word in ali: if ref_word == ERR: @@ -405,7 +417,7 @@ def write_error_stats( else: words[ref_word][0] += 1 num_corr += 1 - ref_len = sum([len(r) for r, _ in results]) + ref_len = sum([len(r) for _, r, _ in results]) sub_errs = sum(subs.values()) ins_errs = sum(ins.values()) del_errs = sum(dels.values()) @@ -434,7 +446,7 @@ def write_error_stats( print("", file=f) print("PER-UTT DETAILS: corr or (ref->hyp) ", file=f) - for ref, hyp in results: + for cut_id, ref, hyp in results: ali = kaldialign.align(ref, hyp, ERR) combine_successive_errors = True if combine_successive_errors: @@ -461,7 +473,8 @@ def write_error_stats( ] print( - " ".join( + f"{cut_id}:\t" + + " ".join( ( ref_word if ref_word == hyp_word From 951b03f6d74ba11b9e79143685fc42fc6e0a7805 Mon Sep 17 00:00:00 2001 From: yangsuxia <34536059+yangsuxia@users.noreply.github.com> Date: Sat, 13 Aug 2022 11:09:54 +0800 Subject: [PATCH 34/38] Add function display_and_save_batch in wenetspeech/pruned_transducer_stateless2/train.py (#528) * Add function display_and_save_batch in egs/wenetspeech/ASR/pruned_transducer_stateless2/train.py * Modify function: display_and_save_batch * Delete empty line in pruned_transducer_stateless2/train.py * Modify code format --- .../ASR/pruned_transducer_stateless2/train.py | 70 ++++++++++++++----- 1 file changed, 52 insertions(+), 18 deletions(-) diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless2/train.py b/egs/wenetspeech/ASR/pruned_transducer_stateless2/train.py index faf25eda1..5208dbefe 100644 --- a/egs/wenetspeech/ASR/pruned_transducer_stateless2/train.py +++ b/egs/wenetspeech/ASR/pruned_transducer_stateless2/train.py @@ -701,25 +701,29 @@ def train_one_epoch( params.batch_idx_train += 1 batch_size = len(batch["supervisions"]["text"]) - with torch.cuda.amp.autocast(enabled=params.use_fp16): - loss, loss_info = compute_loss( - params=params, - model=model, - graph_compiler=graph_compiler, - batch=batch, - is_training=True, - warmup=(params.batch_idx_train / params.model_warm_step), - ) - # summary stats - tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + try: + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, loss_info = compute_loss( + params=params, + model=model, + graph_compiler=graph_compiler, + batch=batch, + is_training=True, + warmup=(params.batch_idx_train / params.model_warm_step), + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info - # NOTE: We use reduction==sum and loss is computed over utterances - # in the batch and there is no normalization to it so far. - scaler.scale(loss).backward() - scheduler.step_batch(params.batch_idx_train) - scaler.step(optimizer) - scaler.update() - optimizer.zero_grad() + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + scaler.scale(loss).backward() + scheduler.step_batch(params.batch_idx_train) + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad() + except: # noqa + display_and_save_batch(batch, params=params) + raise if params.print_diagnostics and batch_idx == 5: return @@ -958,6 +962,35 @@ def run(rank, world_size, args): cleanup_dist() +def display_and_save_batch( + batch: dict, + params: AttributeDict, +) -> None: + """Display the batch statistics and save the batch into disk. + + Args: + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + params: + Parameters for training. See :func:`get_params`. + """ + from lhotse.utils import uuid4 + + filename = f"{params.exp_dir}/batch-{uuid4()}.pt" + logging.info(f"Saving batch to {filename}") + torch.save(batch, filename) + + features = batch["inputs"] + + logging.info(f"features shape: {features.shape}") + + texts = batch["supervisions"]["text"] + num_tokens = sum(len(i) for i in texts) + + logging.info(f"num tokens: {num_tokens}") + + def scan_pessimistic_batches_for_oom( model: nn.Module, train_dl: torch.utils.data.DataLoader, @@ -998,6 +1031,7 @@ def scan_pessimistic_batches_for_oom( f"Failing criterion: {criterion} " f"(={crit_values[criterion]}) ..." ) + display_and_save_batch(batch, params=params) raise From 669401869da1e42c2375226199ca20384ce22ba7 Mon Sep 17 00:00:00 2001 From: Fangjun Kuang Date: Wed, 17 Aug 2022 12:22:43 +0800 Subject: [PATCH 35/38] Filter non-finite losses (#525) * Filter non-finite losses * Fixes after review --- .../ASR/pruned_transducer_stateless2/model.py | 10 ++++-- .../ASR/pruned_transducer_stateless5/train.py | 32 +++++++++++++++++++ 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/model.py b/egs/librispeech/ASR/pruned_transducer_stateless2/model.py index 2434fd41d..452102d21 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/model.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/model.py @@ -78,6 +78,7 @@ class Transducer(nn.Module): am_scale: float = 0.0, lm_scale: float = 0.0, warmup: float = 1.0, + reduction: str = "sum", ) -> torch.Tensor: """ Args: @@ -101,6 +102,10 @@ class Transducer(nn.Module): warmup: A value warmup >= 0 that determines which modules are active, values warmup > 1 "are fully warmed up" and all modules will be active. + reduction: + "sum" to sum the losses over all utterances in the batch. + "none" to return the loss in a 1-D tensor for each utterance + in the batch. Returns: Return the transducer loss. @@ -110,6 +115,7 @@ class Transducer(nn.Module): lm_scale * lm_probs + am_scale * am_probs + (1-lm_scale-am_scale) * combined_probs """ + assert reduction in ("sum", "none"), reduction assert x.ndim == 3, x.shape assert x_lens.ndim == 1, x_lens.shape assert y.num_axes == 2, y.num_axes @@ -155,7 +161,7 @@ class Transducer(nn.Module): lm_only_scale=lm_scale, am_only_scale=am_scale, boundary=boundary, - reduction="sum", + reduction=reduction, return_grad=True, ) @@ -188,7 +194,7 @@ class Transducer(nn.Module): ranges=ranges, termination_symbol=blank_id, boundary=boundary, - reduction="sum", + reduction=reduction, ) return (simple_loss, pruned_loss) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless5/train.py b/egs/librispeech/ASR/pruned_transducer_stateless5/train.py index 3bfe22155..b7ef288c6 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless5/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless5/train.py @@ -655,7 +655,35 @@ def compute_loss( am_scale=params.am_scale, lm_scale=params.lm_scale, warmup=warmup, + reduction="none", ) + simple_loss_is_finite = torch.isfinite(simple_loss) + pruned_loss_is_finite = torch.isfinite(pruned_loss) + is_finite = simple_loss_is_finite & pruned_loss_is_finite + if not torch.all(is_finite): + logging.info( + "Not all losses are finite!\n" + f"simple_loss: {simple_loss}\n" + f"pruned_loss: {pruned_loss}" + ) + display_and_save_batch(batch, params=params, sp=sp) + simple_loss = simple_loss[simple_loss_is_finite] + pruned_loss = pruned_loss[pruned_loss_is_finite] + + # If the batch contains more than 10 utterance AND + # if either all simple_loss or pruned_loss is inf or nan, + # we stop the training process by raising an exception + if feature.size(0) >= 10: + if torch.all(~simple_loss_is_finite) or torch.all( + ~pruned_loss_is_finite + ): + raise ValueError( + "There are too many utterances in this batch " + "leading to inf or nan losses." + ) + + simple_loss = simple_loss.sum() + pruned_loss = pruned_loss.sum() # after the main warmup step, we keep pruned_loss_scale small # for the same amount of time (model_warm_step), to avoid # overwhelming the simple_loss and causing it to diverge, @@ -675,6 +703,10 @@ def compute_loss( info = MetricsTracker() with warnings.catch_warnings(): warnings.simplefilter("ignore") + # info["frames"] is an approximate number for two reasons: + # (1) The acutal subsampling factor is ((lens - 1) // 2 - 1) // 2 + # (2) If some utterances in the batch lead to inf/nan loss, they + # are filtered out. info["frames"] = ( (feature_lens // params.subsampling_factor).sum().item() ) From c74cec59e9f6d00e3a5838b4f8d4ace7e2303ad4 Mon Sep 17 00:00:00 2001 From: marcoyang1998 <45973641+marcoyang1998@users.noreply.github.com> Date: Wed, 17 Aug 2022 17:18:15 +0800 Subject: [PATCH 36/38] propagate changes from #525 to other librispeech recipes (#531) * propaga changes from #525 to other librispeech recipes * refactor display_and_save_batch to utils * fixed typo * reformat code style --- .../ASR/pruned_transducer_stateless/model.py | 10 ++- .../ASR/pruned_transducer_stateless/train.py | 34 +++++++++ .../ASR/pruned_transducer_stateless2/train.py | 72 ++++++++++--------- .../ASR/pruned_transducer_stateless3/model.py | 10 ++- .../ASR/pruned_transducer_stateless3/train.py | 40 ++++++++++- .../ASR/pruned_transducer_stateless4/train.py | 40 ++++++++++- .../ASR/pruned_transducer_stateless5/train.py | 42 +++-------- .../ASR/pruned_transducer_stateless6/model.py | 10 ++- .../ASR/pruned_transducer_stateless6/train.py | 39 +++++++++- icefall/utils.py | 32 +++++++++ 10 files changed, 253 insertions(+), 76 deletions(-) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/model.py b/egs/librispeech/ASR/pruned_transducer_stateless/model.py index 2f019bcdb..e2c9eb789 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless/model.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless/model.py @@ -66,6 +66,7 @@ class Transducer(nn.Module): prune_range: int = 5, am_scale: float = 0.0, lm_scale: float = 0.0, + reduction: str = "sum", ) -> torch.Tensor: """ Args: @@ -86,6 +87,10 @@ class Transducer(nn.Module): lm_scale: The scale to smooth the loss with lm (output of predictor network) part + reduction: + "sum" to sum the losses over all utterances in the batch. + "none" to return the loss in a 1-D tensor for each utterance + in the batch. Returns: Return the transducer loss. @@ -95,6 +100,7 @@ class Transducer(nn.Module): lm_scale * lm_probs + am_scale * am_probs + (1-lm_scale-am_scale) * combined_probs """ + assert reduction in ("sum", "none"), reduction assert x.ndim == 3, x.shape assert x_lens.ndim == 1, x_lens.shape assert y.num_axes == 2, y.num_axes @@ -136,7 +142,7 @@ class Transducer(nn.Module): lm_only_scale=lm_scale, am_only_scale=am_scale, boundary=boundary, - reduction="sum", + reduction=reduction, return_grad=True, ) @@ -163,7 +169,7 @@ class Transducer(nn.Module): ranges=ranges, termination_symbol=blank_id, boundary=boundary, - reduction="sum", + reduction=reduction, ) return (simple_loss, pruned_loss) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/train.py b/egs/librispeech/ASR/pruned_transducer_stateless/train.py index 33b23038c..c2e0f1f98 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless/train.py @@ -78,6 +78,7 @@ from icefall.env import get_env_info from icefall.utils import ( AttributeDict, MetricsTracker, + display_and_save_batch, measure_gradient_norms, measure_weight_norms, optim_step_and_measure_param_change, @@ -544,7 +545,36 @@ def compute_loss( prune_range=params.prune_range, am_scale=params.am_scale, lm_scale=params.lm_scale, + reduction="none", ) + simple_loss_is_finite = torch.isfinite(simple_loss) + pruned_loss_is_finite = torch.isfinite(pruned_loss) + is_finite = simple_loss_is_finite & pruned_loss_is_finite + if not torch.all(is_finite): + logging.info( + "Not all losses are finite!\n" + f"simple_loss: {simple_loss}\n" + f"pruned_loss: {pruned_loss}" + ) + display_and_save_batch(batch, params=params, sp=sp) + simple_loss = simple_loss[simple_loss_is_finite] + pruned_loss = pruned_loss[pruned_loss_is_finite] + + # If the batch contains more than 10 utterances AND + # if either all simple_loss or pruned_loss is inf or nan, + # we stop the training process by raising an exception + if feature.size(0) >= 10: + if torch.all(~simple_loss_is_finite) or torch.all( + ~pruned_loss_is_finite + ): + raise ValueError( + "There are too many utterances in this batch " + "leading to inf or nan losses." + ) + + simple_loss = simple_loss.sum() + pruned_loss = pruned_loss.sum() + loss = params.simple_loss_scale * simple_loss + pruned_loss assert loss.requires_grad == is_training @@ -552,6 +582,10 @@ def compute_loss( info = MetricsTracker() with warnings.catch_warnings(): warnings.simplefilter("ignore") + # info["frames"] is an approximate number for two reasons: + # (1) The acutal subsampling factor is ((lens - 1) // 2 - 1) // 2 + # (2) If some utterances in the batch lead to inf/nan loss, they + # are filtered out. info["frames"] = ( (feature_lens // params.subsampling_factor).sum().item() ) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/train.py b/egs/librispeech/ASR/pruned_transducer_stateless2/train.py index 4d290e39f..c801bd2bd 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/train.py @@ -88,7 +88,13 @@ from icefall.checkpoint import save_checkpoint as save_checkpoint_impl from icefall.checkpoint import save_checkpoint_with_global_batch_idx from icefall.dist import cleanup_dist, setup_dist from icefall.env import get_env_info -from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool +from icefall.utils import ( + AttributeDict, + MetricsTracker, + display_and_save_batch, + setup_logger, + str2bool, +) LRSchedulerType = Union[ torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler @@ -600,7 +606,35 @@ def compute_loss( am_scale=params.am_scale, lm_scale=params.lm_scale, warmup=warmup, + reduction="none", ) + simple_loss_is_finite = torch.isfinite(simple_loss) + pruned_loss_is_finite = torch.isfinite(pruned_loss) + is_finite = simple_loss_is_finite & pruned_loss_is_finite + if not torch.all(is_finite): + logging.info( + "Not all losses are finite!\n" + f"simple_loss: {simple_loss}\n" + f"pruned_loss: {pruned_loss}" + ) + display_and_save_batch(batch, params=params, sp=sp) + simple_loss = simple_loss[simple_loss_is_finite] + pruned_loss = pruned_loss[pruned_loss_is_finite] + + # If the batch contains more than 10 utterances AND + # if either all simple_loss or pruned_loss is inf or nan, + # we stop the training process by raising an exception + if feature.size(0) >= 10: + if torch.all(~simple_loss_is_finite) or torch.all( + ~pruned_loss_is_finite + ): + raise ValueError( + "There are too many utterances in this batch " + "leading to inf or nan losses." + ) + + simple_loss = simple_loss.sum() + pruned_loss = pruned_loss.sum() # after the main warmup step, we keep pruned_loss_scale small # for the same amount of time (model_warm_step), to avoid # overwhelming the simple_loss and causing it to diverge, @@ -620,6 +654,10 @@ def compute_loss( info = MetricsTracker() with warnings.catch_warnings(): warnings.simplefilter("ignore") + # info["frames"] is an approximate number for two reasons: + # (1) The acutal subsampling factor is ((lens - 1) // 2 - 1) // 2 + # (2) If some utterances in the batch lead to inf/nan loss, they + # are filtered out. info["frames"] = ( (feature_lens // params.subsampling_factor).sum().item() ) @@ -993,38 +1031,6 @@ def run(rank, world_size, args): cleanup_dist() -def display_and_save_batch( - batch: dict, - params: AttributeDict, - sp: spm.SentencePieceProcessor, -) -> None: - """Display the batch statistics and save the batch into disk. - - Args: - batch: - A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` - for the content in it. - params: - Parameters for training. See :func:`get_params`. - sp: - The BPE model. - """ - from lhotse.utils import uuid4 - - filename = f"{params.exp_dir}/batch-{uuid4()}.pt" - logging.info(f"Saving batch to {filename}") - torch.save(batch, filename) - - supervisions = batch["supervisions"] - features = batch["inputs"] - - logging.info(f"features shape: {features.shape}") - - y = sp.encode(supervisions["text"], out_type=int) - num_tokens = sum(len(i) for i in y) - logging.info(f"num tokens: {num_tokens}") - - def scan_pessimistic_batches_for_oom( model: nn.Module, train_dl: torch.utils.data.DataLoader, diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/model.py b/egs/librispeech/ASR/pruned_transducer_stateless3/model.py index 5894361fc..ece340534 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/model.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/model.py @@ -105,6 +105,7 @@ class Transducer(nn.Module): am_scale: float = 0.0, lm_scale: float = 0.0, warmup: float = 1.0, + reduction: str = "sum", ) -> torch.Tensor: """ Args: @@ -131,6 +132,10 @@ class Transducer(nn.Module): warmup: A value warmup >= 0 that determines which modules are active, values warmup > 1 "are fully warmed up" and all modules will be active. + reduction: + "sum" to sum the losses over all utterances in the batch. + "none" to return the loss in a 1-D tensor for each utterance + in the batch. Returns: Return the transducer loss. @@ -140,6 +145,7 @@ class Transducer(nn.Module): lm_scale * lm_probs + am_scale * am_probs + (1-lm_scale-am_scale) * combined_probs """ + assert reduction in ("sum", "none"), reduction assert x.ndim == 3, x.shape assert x_lens.ndim == 1, x_lens.shape assert y.num_axes == 2, y.num_axes @@ -196,7 +202,7 @@ class Transducer(nn.Module): lm_only_scale=lm_scale, am_only_scale=am_scale, boundary=boundary, - reduction="sum", + reduction=reduction, return_grad=True, ) @@ -229,7 +235,7 @@ class Transducer(nn.Module): ranges=ranges, termination_symbol=blank_id, boundary=boundary, - reduction="sum", + reduction=reduction, ) return (simple_loss, pruned_loss) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/train.py b/egs/librispeech/ASR/pruned_transducer_stateless3/train.py index 914b9b5eb..be12e69ce 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/train.py @@ -84,7 +84,13 @@ from icefall.checkpoint import save_checkpoint as save_checkpoint_impl from icefall.checkpoint import save_checkpoint_with_global_batch_idx from icefall.dist import cleanup_dist, setup_dist from icefall.env import get_env_info -from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool +from icefall.utils import ( + AttributeDict, + MetricsTracker, + display_and_save_batch, + setup_logger, + str2bool, +) LRSchedulerType = Union[ torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler @@ -637,7 +643,35 @@ def compute_loss( am_scale=params.am_scale, lm_scale=params.lm_scale, warmup=warmup, + reduction="none", ) + simple_loss_is_finite = torch.isfinite(simple_loss) + pruned_loss_is_finite = torch.isfinite(pruned_loss) + is_finite = simple_loss_is_finite & pruned_loss_is_finite + if not torch.all(is_finite): + logging.info( + "Not all losses are finite!\n" + f"simple_loss: {simple_loss}\n" + f"pruned_loss: {pruned_loss}" + ) + display_and_save_batch(batch, params=params, sp=sp) + simple_loss = simple_loss[simple_loss_is_finite] + pruned_loss = pruned_loss[pruned_loss_is_finite] + + # If the batch contains more than 10 utterances AND + # if either all simple_loss or pruned_loss is inf or nan, + # we stop the training process by raising an exception + if feature.size(0) >= 10: + if torch.all(~simple_loss_is_finite) or torch.all( + ~pruned_loss_is_finite + ): + raise ValueError( + "There are too many utterances in this batch " + "leading to inf or nan losses." + ) + + simple_loss = simple_loss.sum() + pruned_loss = pruned_loss.sum() # after the main warmup step, we keep pruned_loss_scale small # for the same amount of time (model_warm_step), to avoid # overwhelming the simple_loss and causing it to diverge, @@ -657,6 +691,10 @@ def compute_loss( info = MetricsTracker() with warnings.catch_warnings(): warnings.simplefilter("ignore") + # info["frames"] is an approximate number for two reasons: + # (1) The acutal subsampling factor is ((lens - 1) // 2 - 1) // 2 + # (2) If some utterances in the batch lead to inf/nan loss, they + # are filtered out. info["frames"] = ( (feature_lens // params.subsampling_factor).sum().item() ) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless4/train.py b/egs/librispeech/ASR/pruned_transducer_stateless4/train.py index 325b01323..2ba28acd4 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless4/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless4/train.py @@ -93,7 +93,13 @@ from icefall.checkpoint import ( ) from icefall.dist import cleanup_dist, setup_dist from icefall.env import get_env_info -from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool +from icefall.utils import ( + AttributeDict, + MetricsTracker, + display_and_save_batch, + setup_logger, + str2bool, +) LRSchedulerType = Union[ torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler @@ -630,7 +636,35 @@ def compute_loss( am_scale=params.am_scale, lm_scale=params.lm_scale, warmup=warmup, + reduction="none", ) + simple_loss_is_finite = torch.isfinite(simple_loss) + pruned_loss_is_finite = torch.isfinite(pruned_loss) + is_finite = simple_loss_is_finite & pruned_loss_is_finite + if not torch.all(is_finite): + logging.info( + "Not all losses are finite!\n" + f"simple_loss: {simple_loss}\n" + f"pruned_loss: {pruned_loss}" + ) + display_and_save_batch(batch, params=params, sp=sp) + simple_loss = simple_loss[simple_loss_is_finite] + pruned_loss = pruned_loss[pruned_loss_is_finite] + + # If the batch contains more than 10 utterances AND + # if either all simple_loss or pruned_loss is inf or nan, + # we stop the training process by raising an exception + if feature.size(0) >= 10: + if torch.all(~simple_loss_is_finite) or torch.all( + ~pruned_loss_is_finite + ): + raise ValueError( + "There are too many utterances in this batch " + "leading to inf or nan losses." + ) + + simple_loss = simple_loss.sum() + pruned_loss = pruned_loss.sum() # after the main warmup step, we keep pruned_loss_scale small # for the same amount of time (model_warm_step), to avoid # overwhelming the simple_loss and causing it to diverge, @@ -650,6 +684,10 @@ def compute_loss( info = MetricsTracker() with warnings.catch_warnings(): warnings.simplefilter("ignore") + # info["frames"] is an approximate number for two reasons: + # (1) The acutal subsampling factor is ((lens - 1) // 2 - 1) // 2 + # (2) If some utterances in the batch lead to inf/nan loss, they + # are filtered out. info["frames"] = ( (feature_lens // params.subsampling_factor).sum().item() ) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless5/train.py b/egs/librispeech/ASR/pruned_transducer_stateless5/train.py index b7ef288c6..cee7d2bff 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless5/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless5/train.py @@ -81,7 +81,13 @@ from icefall.checkpoint import ( ) from icefall.dist import cleanup_dist, setup_dist from icefall.env import get_env_info -from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool +from icefall.utils import ( + AttributeDict, + MetricsTracker, + display_and_save_batch, + setup_logger, + str2bool, +) LRSchedulerType = Union[ torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler @@ -670,7 +676,7 @@ def compute_loss( simple_loss = simple_loss[simple_loss_is_finite] pruned_loss = pruned_loss[pruned_loss_is_finite] - # If the batch contains more than 10 utterance AND + # If the batch contains more than 10 utterances AND # if either all simple_loss or pruned_loss is inf or nan, # we stop the training process by raising an exception if feature.size(0) >= 10: @@ -1108,38 +1114,6 @@ def run(rank, world_size, args): cleanup_dist() -def display_and_save_batch( - batch: dict, - params: AttributeDict, - sp: spm.SentencePieceProcessor, -) -> None: - """Display the batch statistics and save the batch into disk. - - Args: - batch: - A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` - for the content in it. - params: - Parameters for training. See :func:`get_params`. - sp: - The BPE model. - """ - from lhotse.utils import uuid4 - - filename = f"{params.exp_dir}/batch-{uuid4()}.pt" - logging.info(f"Saving batch to {filename}") - torch.save(batch, filename) - - supervisions = batch["supervisions"] - features = batch["inputs"] - - logging.info(f"features shape: {features.shape}") - - y = sp.encode(supervisions["text"], out_type=int) - num_tokens = sum(len(i) for i in y) - logging.info(f"num tokens: {num_tokens}") - - def scan_pessimistic_batches_for_oom( model: Union[nn.Module, DDP], train_dl: torch.utils.data.DataLoader, diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/model.py b/egs/librispeech/ASR/pruned_transducer_stateless6/model.py index 1ed5636c8..9de0769d9 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless6/model.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless6/model.py @@ -89,6 +89,7 @@ class Transducer(nn.Module): am_scale: float = 0.0, lm_scale: float = 0.0, warmup: float = 1.0, + reduction: str = "sum", codebook_indexes: torch.Tensor = None, ) -> torch.Tensor: """ @@ -113,6 +114,10 @@ class Transducer(nn.Module): warmup: A value warmup >= 0 that determines which modules are active, values warmup > 1 "are fully warmed up" and all modules will be active. + reduction: + "sum" to sum the losses over all utterances in the batch. + "none" to return the loss in a 1-D tensor for each utterance + in the batch. codebook_indexes: codebook_indexes extracted from a teacher model. Returns: @@ -124,6 +129,7 @@ class Transducer(nn.Module): lm_scale * lm_probs + am_scale * am_probs + (1-lm_scale-am_scale) * combined_probs """ + assert reduction in ("sum", "none"), reduction assert x.ndim == 3, x.shape assert x_lens.ndim == 1, x_lens.shape assert y.num_axes == 2, y.num_axes @@ -184,7 +190,7 @@ class Transducer(nn.Module): lm_only_scale=lm_scale, am_only_scale=am_scale, boundary=boundary, - reduction="sum", + reduction=reduction, return_grad=True, ) @@ -217,7 +223,7 @@ class Transducer(nn.Module): ranges=ranges, termination_symbol=blank_id, boundary=boundary, - reduction="sum", + reduction=reduction, ) return (simple_loss, pruned_loss, codebook_loss) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/train.py b/egs/librispeech/ASR/pruned_transducer_stateless6/train.py index a4595211c..294fd4c52 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless6/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless6/train.py @@ -93,7 +93,13 @@ from icefall.checkpoint import ( ) from icefall.dist import cleanup_dist, setup_dist from icefall.env import get_env_info -from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool +from icefall.utils import ( + AttributeDict, + MetricsTracker, + display_and_save_batch, + setup_logger, + str2bool, +) LRSchedulerType = Union[ torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler @@ -631,8 +637,35 @@ def compute_loss( am_scale=params.am_scale, lm_scale=params.lm_scale, warmup=warmup, + reduction="none", codebook_indexes=codebook_indexes, ) + simple_loss_is_finite = torch.isfinite(simple_loss) + pruned_loss_is_finite = torch.isfinite(pruned_loss) + is_finite = simple_loss_is_finite & pruned_loss_is_finite + if not torch.all(is_finite): + logging.info( + "Not all losses are finite!\n" + f"simple_loss: {simple_loss}\n" + f"pruned_loss: {pruned_loss}" + ) + display_and_save_batch(batch, params=params, sp=sp) + simple_loss = simple_loss[simple_loss_is_finite] + pruned_loss = pruned_loss[pruned_loss_is_finite] + # If the batch contains more than 10 utterances AND + # if either all simple_loss or pruned_loss is inf or nan, + # we stop the training process by raising an exception + if feature.size(0) >= 10: + if torch.all(~simple_loss_is_finite) or torch.all( + ~pruned_loss_is_finite + ): + raise ValueError( + "There are too many utterances in this batch " + "leading to inf or nan losses." + ) + + simple_loss = simple_loss.sum() + pruned_loss = pruned_loss.sum() # after the main warmup step, we keep pruned_loss_scale small # for the same amount of time (model_warm_step), to avoid # overwhelming the simple_loss and causing it to diverge, @@ -654,6 +687,10 @@ def compute_loss( with warnings.catch_warnings(): warnings.simplefilter("ignore") + # info["frames"] is an approximate number for two reasons: + # (1) The acutal subsampling factor is ((lens - 1) // 2 - 1) // 2 + # (2) If some utterances in the batch lead to inf/nan loss, they + # are filtered out. info["frames"] = ( (feature_lens // params.subsampling_factor).sum().item() ) diff --git a/icefall/utils.py b/icefall/utils.py index 2b089c8d0..ad079222e 100644 --- a/icefall/utils.py +++ b/icefall/utils.py @@ -944,3 +944,35 @@ def tokenize_by_bpe_model( txt_with_bpe = "/".join(tokens) return txt_with_bpe + + +def display_and_save_batch( + batch: dict, + params: AttributeDict, + sp: spm.SentencePieceProcessor, +) -> None: + """Display the batch statistics and save the batch into disk. + + Args: + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + params: + Parameters for training. See :func:`get_params`. + sp: + The BPE model. + """ + from lhotse.utils import uuid4 + + filename = f"{params.exp_dir}/batch-{uuid4()}.pt" + logging.info(f"Saving batch to {filename}") + torch.save(batch, filename) + + supervisions = batch["supervisions"] + features = batch["inputs"] + + logging.info(f"features shape: {features.shape}") + + y = sp.encode(supervisions["text"], out_type=int) + num_tokens = sum(len(i) for i in y) + logging.info(f"num tokens: {num_tokens}") From 31686ac829296f442fc82b3083717ad92cc28984 Mon Sep 17 00:00:00 2001 From: Lucky Wong Date: Thu, 18 Aug 2022 10:45:06 +0800 Subject: [PATCH 37/38] Fix not enough values to unpack error . (#533) --- egs/aishell/ASR/conformer_ctc/decode.py | 4 +++- egs/aishell/ASR/conformer_mmi/decode.py | 4 +++- egs/aishell/ASR/pruned_transducer_stateless3/decode.py | 4 +++- egs/aishell/ASR/tdnn_lstm_ctc/decode.py | 4 +++- egs/aishell/ASR/transducer_stateless/decode.py | 4 +++- egs/aishell/ASR/transducer_stateless_modified-2/decode.py | 4 +++- egs/aishell/ASR/transducer_stateless_modified/decode.py | 4 +++- egs/spgispeech/ASR/pruned_transducer_stateless2/decode.py | 4 +++- 8 files changed, 24 insertions(+), 8 deletions(-) diff --git a/egs/aishell/ASR/conformer_ctc/decode.py b/egs/aishell/ASR/conformer_ctc/decode.py index d860d3fdb..a3e7f98e3 100755 --- a/egs/aishell/ASR/conformer_ctc/decode.py +++ b/egs/aishell/ASR/conformer_ctc/decode.py @@ -431,7 +431,9 @@ def save_results( # we compute CER for aishell dataset. results_char = [] for res in results: - results_char.append((list("".join(res[0])), list("".join(res[1])))) + results_char.append( + (res[0], list("".join(res[1])), list("".join(res[2]))) + ) with open(errs_filename, "w") as f: wer = write_error_stats( f, f"{test_set_name}-{key}", results_char, enable_log=enable_log diff --git a/egs/aishell/ASR/conformer_mmi/decode.py b/egs/aishell/ASR/conformer_mmi/decode.py index f33ddd48b..ac68b61e7 100755 --- a/egs/aishell/ASR/conformer_mmi/decode.py +++ b/egs/aishell/ASR/conformer_mmi/decode.py @@ -443,7 +443,9 @@ def save_results( # we compute CER for aishell dataset. results_char = [] for res in results: - results_char.append((list("".join(res[0])), list("".join(res[1])))) + results_char.append( + (res[0], list("".join(res[1])), list("".join(res[2]))) + ) with open(errs_filename, "w") as f: wer = write_error_stats( f, f"{test_set_name}-{key}", results_char, enable_log=enable_log diff --git a/egs/aishell/ASR/pruned_transducer_stateless3/decode.py b/egs/aishell/ASR/pruned_transducer_stateless3/decode.py index a6a01c2c6..3268c8bb2 100755 --- a/egs/aishell/ASR/pruned_transducer_stateless3/decode.py +++ b/egs/aishell/ASR/pruned_transducer_stateless3/decode.py @@ -429,7 +429,9 @@ def save_results( # we compute CER for aishell dataset. results_char = [] for res in results: - results_char.append((list("".join(res[0])), list("".join(res[1])))) + results_char.append( + (res[0], list("".join(res[1])), list("".join(res[2]))) + ) with open(errs_filename, "w") as f: wer = write_error_stats( f, f"{test_set_name}-{key}", results_char, enable_log=True diff --git a/egs/aishell/ASR/tdnn_lstm_ctc/decode.py b/egs/aishell/ASR/tdnn_lstm_ctc/decode.py index 58a999c22..45c1c4ec1 100755 --- a/egs/aishell/ASR/tdnn_lstm_ctc/decode.py +++ b/egs/aishell/ASR/tdnn_lstm_ctc/decode.py @@ -289,7 +289,9 @@ def save_results( # We compute CER for aishell dataset. results_char = [] for res in results: - results_char.append((list("".join(res[0])), list("".join(res[1])))) + results_char.append( + (res[0], list("".join(res[1])), list("".join(res[2]))) + ) with open(errs_filename, "w") as f: wer = write_error_stats(f, f"{test_set_name}-{key}", results_char) test_set_wers[key] = wer diff --git a/egs/aishell/ASR/transducer_stateless/decode.py b/egs/aishell/ASR/transducer_stateless/decode.py index 7a96b6f73..d78821b95 100755 --- a/egs/aishell/ASR/transducer_stateless/decode.py +++ b/egs/aishell/ASR/transducer_stateless/decode.py @@ -346,7 +346,9 @@ def save_results( # we compute CER for aishell dataset. results_char = [] for res in results: - results_char.append((list("".join(res[0])), list("".join(res[1])))) + results_char.append( + (res[0], list("".join(res[1])), list("".join(res[2]))) + ) with open(errs_filename, "w") as f: wer = write_error_stats( f, f"{test_set_name}-{key}", results_char, enable_log=True diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/decode.py b/egs/aishell/ASR/transducer_stateless_modified-2/decode.py index 7eb273da0..a9dca995f 100755 --- a/egs/aishell/ASR/transducer_stateless_modified-2/decode.py +++ b/egs/aishell/ASR/transducer_stateless_modified-2/decode.py @@ -393,7 +393,9 @@ def save_results( # we compute CER for aishell dataset. results_char = [] for res in results: - results_char.append((list("".join(res[0])), list("".join(res[1])))) + results_char.append( + (res[0], list("".join(res[1])), list("".join(res[2]))) + ) with open(errs_filename, "w") as f: wer = write_error_stats( f, f"{test_set_name}-{key}", results_char, enable_log=True diff --git a/egs/aishell/ASR/transducer_stateless_modified/decode.py b/egs/aishell/ASR/transducer_stateless_modified/decode.py index 1fe39fed2..9e827e1d1 100755 --- a/egs/aishell/ASR/transducer_stateless_modified/decode.py +++ b/egs/aishell/ASR/transducer_stateless_modified/decode.py @@ -397,7 +397,9 @@ def save_results( # we compute CER for aishell dataset. results_char = [] for res in results: - results_char.append((list("".join(res[0])), list("".join(res[1])))) + results_char.append( + (res[0], list("".join(res[1])), list("".join(res[2]))) + ) with open(errs_filename, "w") as f: wer = write_error_stats( f, f"{test_set_name}-{key}", results_char, enable_log=True diff --git a/egs/spgispeech/ASR/pruned_transducer_stateless2/decode.py b/egs/spgispeech/ASR/pruned_transducer_stateless2/decode.py index c13b980c6..722cd8dbd 100755 --- a/egs/spgispeech/ASR/pruned_transducer_stateless2/decode.py +++ b/egs/spgispeech/ASR/pruned_transducer_stateless2/decode.py @@ -424,7 +424,9 @@ def save_results( # we also compute CER for spgispeech dataset. results_char = [] for res in results: - results_char.append((list("".join(res[0])), list("".join(res[1])))) + results_char.append( + (res[0], list("".join(res[1])), list("".join(res[2]))) + ) cers_filename = ( params.res_dir / f"cers-{test_set_name}-{key}-{params.suffix}.txt" ) From f2f5baf6871ad3acdd749fd511b6ab2e9670b10e Mon Sep 17 00:00:00 2001 From: Zengwei Yao Date: Fri, 19 Aug 2022 14:38:45 +0800 Subject: [PATCH 38/38] Use ScaledLSTM as streaming encoder (#479) * add ScaledLSTM * add RNNEncoderLayer and RNNEncoder classes in lstm.py * add RNN and Conv2dSubsampling classes in lstm.py * hardcode bidirectional=False * link from pruned_transducer_stateless2 * link scaling.py pruned_transducer_stateless2 * copy from pruned_transducer_stateless2 * modify decode.py pretrained.py test_model.py train.py * copy streaming decoding files from pruned_transducer_stateless2 * modify streaming decoding files * simplified code in ScaledLSTM * flat weights after scaling * pruned2 -> pruned4 * link __init__.py * fix style * remove add_model_arguments * modify .flake8 * fix style * fix scale value in scaling.py * add random combiner for training deeper model * add using proj_size * add scaling converter for ScaledLSTM * support jit trace * add using averaged model in export.py * modify test_model.py, test if the model can be successfully exported by jit.trace * modify pretrained.py * support streaming decoding * fix model.py * Add cut_id to recognition results * Add cut_id to recognition results * do not pad in Conv subsampling module; add tail padding during decoding. * update RESULTS.md * minor fix * fix doc * update README.md * minor change, filter infinite loss * remove the condition of raise error * modify type hint for the return value in model.py * minor change * modify RESULTS.md Co-authored-by: pkufool --- .flake8 | 3 +- egs/librispeech/ASR/README.md | 1 + egs/librispeech/ASR/RESULTS.md | 86 ++ .../ASR/lstm_transducer_stateless/__init__.py | 1 + .../asr_datamodule.py | 1 + .../lstm_transducer_stateless/beam_search.py | 1 + .../ASR/lstm_transducer_stateless/decode.py | 818 ++++++++++++ .../ASR/lstm_transducer_stateless/decoder.py | 1 + .../encoder_interface.py | 1 + .../ASR/lstm_transducer_stateless/export.py | 388 ++++++ .../jit_pretrained.py | 322 +++++ .../ASR/lstm_transducer_stateless/joiner.py | 1 + .../ASR/lstm_transducer_stateless/lstm.py | 842 ++++++++++++ .../ASR/lstm_transducer_stateless/model.py | 202 +++ .../ASR/lstm_transducer_stateless/optim.py | 1 + .../lstm_transducer_stateless/pretrained.py | 352 +++++ .../ASR/lstm_transducer_stateless/scaling.py | 1 + .../scaling_converter.py | 1 + .../ASR/lstm_transducer_stateless/stream.py | 148 +++ .../streaming_decode.py | 968 ++++++++++++++ .../lstm_transducer_stateless/test_model.py | 92 ++ .../test_scaling_converter.py | 257 ++++ .../ASR/lstm_transducer_stateless/train.py | 1132 +++++++++++++++++ .../ASR/pruned_transducer_stateless/model.py | 4 +- .../ASR/pruned_transducer_stateless/train.py | 18 +- .../ASR/pruned_transducer_stateless2/model.py | 4 +- .../pruned_transducer_stateless2/scaling.py | 169 ++- .../ASR/pruned_transducer_stateless2/train.py | 18 +- .../pruned_transducer_stateless3/export.py | 4 +- .../ASR/pruned_transducer_stateless3/model.py | 4 +- .../scaling_converter.py | 42 +- .../ASR/pruned_transducer_stateless3/train.py | 18 +- .../ASR/pruned_transducer_stateless4/train.py | 18 +- .../ASR/pruned_transducer_stateless5/train.py | 15 +- .../ASR/pruned_transducer_stateless6/model.py | 7 +- .../ASR/pruned_transducer_stateless6/train.py | 15 +- 36 files changed, 5888 insertions(+), 68 deletions(-) create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless/__init__.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless/asr_datamodule.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless/beam_search.py create mode 100755 egs/librispeech/ASR/lstm_transducer_stateless/decode.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless/decoder.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless/encoder_interface.py create mode 100755 egs/librispeech/ASR/lstm_transducer_stateless/export.py create mode 100755 egs/librispeech/ASR/lstm_transducer_stateless/jit_pretrained.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless/joiner.py create mode 100644 egs/librispeech/ASR/lstm_transducer_stateless/lstm.py create mode 100644 egs/librispeech/ASR/lstm_transducer_stateless/model.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless/optim.py create mode 100755 egs/librispeech/ASR/lstm_transducer_stateless/pretrained.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless/scaling.py create mode 120000 egs/librispeech/ASR/lstm_transducer_stateless/scaling_converter.py create mode 100644 egs/librispeech/ASR/lstm_transducer_stateless/stream.py create mode 100755 egs/librispeech/ASR/lstm_transducer_stateless/streaming_decode.py create mode 100755 egs/librispeech/ASR/lstm_transducer_stateless/test_model.py create mode 100644 egs/librispeech/ASR/lstm_transducer_stateless/test_scaling_converter.py create mode 100755 egs/librispeech/ASR/lstm_transducer_stateless/train.py diff --git a/.flake8 b/.flake8 index cbf0d8484..67c6c164d 100644 --- a/.flake8 +++ b/.flake8 @@ -9,7 +9,8 @@ per-file-ignores = egs/*/ASR/pruned_transducer_stateless*/*.py: E501, egs/*/ASR/*/optim.py: E501, egs/*/ASR/*/scaling.py: E501, - egs/librispeech/ASR/conv_emformer_transducer_stateless*/*.py: E501, E203, + egs/librispeech/ASR/lstm_transducer_stateless/*.py: E501, E203 + egs/librispeech/ASR/conv_emformer_transducer_stateless*/*.py: E501, E203 egs/librispeech/ASR/conformer_ctc2/*py: E501, egs/librispeech/ASR/RESULTS.md: E999, diff --git a/egs/librispeech/ASR/README.md b/egs/librispeech/ASR/README.md index cbdee53e6..f590bc837 100644 --- a/egs/librispeech/ASR/README.md +++ b/egs/librispeech/ASR/README.md @@ -25,6 +25,7 @@ The following table lists the differences among them. | `pruned_stateless_emformer_rnnt2` | Emformer(from torchaudio) | Embedding + Conv1d | Using Emformer from torchaudio for streaming ASR| | `conv_emformer_transducer_stateless` | ConvEmformer | Embedding + Conv1d | Using ConvEmformer for streaming ASR + mechanisms in reworked model | | `conv_emformer_transducer_stateless2` | ConvEmformer | Embedding + Conv1d | Using ConvEmformer with simplified memory for streaming ASR + mechanisms in reworked model | +| `lstm_transducer_stateless` | LSTM | Embedding + Conv1d | Using LSTM with mechanisms in reworked model | The decoder in `transducer_stateless` is modified from the paper [Rnn-Transducer with Stateless Prediction Network](https://ieeexplore.ieee.org/document/9054419/). diff --git a/egs/librispeech/ASR/RESULTS.md b/egs/librispeech/ASR/RESULTS.md index 496445fbf..1c6a350f0 100644 --- a/egs/librispeech/ASR/RESULTS.md +++ b/egs/librispeech/ASR/RESULTS.md @@ -1,5 +1,91 @@ ## Results +#### LibriSpeech BPE training results (Pruned Stateless LSTM RNN-T) + +[lstm_transducer_stateless](./lstm_transducer_stateless) + +It implements LSTM model with mechanisms in reworked model for streaming ASR. + +See for more details. + +#### training on full librispeech + +This model contains 12 encoder layers (LSTM module + Feedforward module). The number of model parameters is 84689496. + +The WERs are: + +| | test-clean | test-other | comment | decoding mode | +|-------------------------------------|------------|------------|----------------------|----------------------| +| greedy search (max sym per frame 1) | 3.81 | 9.73 | --epoch 35 --avg 15 | simulated streaming | +| greedy search (max sym per frame 1) | 3.78 | 9.79 | --epoch 35 --avg 15 | streaming | +| fast beam search | 3.74 | 9.59 | --epoch 35 --avg 15 | simulated streaming | +| fast beam search | 3.73 | 9.61 | --epoch 35 --avg 15 | streaming | +| modified beam search | 3.64 | 9.55 | --epoch 35 --avg 15 | simulated streaming | +| modified beam search | 3.65 | 9.51 | --epoch 35 --avg 15 | streaming | + +Note: `simulated streaming` indicates feeding full utterance during decoding, while `streaming` indicates feeding certain number of frames at each time. + +The training command is: + +```bash +./lstm_transducer_stateless/train.py \ + --world-size 4 \ + --num-epochs 35 \ + --start-epoch 1 \ + --exp-dir lstm_transducer_stateless/exp \ + --full-libri 1 \ + --max-duration 500 \ + --master-port 12321 \ + --num-encoder-layers 12 \ + --rnn-hidden-size 1024 +``` + +The tensorboard log can be found at + + +The simulated streaming decoding command using greedy search, fast beam search, and modified beam search is: +```bash +for decoding_method in greedy_search fast_beam_search modified_beam_search; do + ./lstm_transducer_stateless/decode.py \ + --epoch 35 \ + --avg 15 \ + --exp-dir lstm_transducer_stateless/exp \ + --max-duration 600 \ + --num-encoder-layers 12 \ + --rnn-hidden-size 1024 \ + --decoding-method $decoding_method \ + --use-averaged-model True \ + --beam 4 \ + --max-contexts 4 \ + --max-states 8 \ + --beam-size 4 +done +``` + +The streaming decoding command using greedy search, fast beam search, and modified beam search is: +```bash +for decoding_method in greedy_search fast_beam_search modified_beam_search; do + ./lstm_transducer_stateless/streaming_decode.py \ + --epoch 35 \ + --avg 15 \ + --exp-dir lstm_transducer_stateless/exp \ + --max-duration 600 \ + --num-encoder-layers 12 \ + --rnn-hidden-size 1024 \ + --decoding-method $decoding_method \ + --use-averaged-model True \ + --beam 4 \ + --max-contexts 4 \ + --max-states 8 \ + --beam-size 4 +done +``` + +Pretrained models, training logs, decoding logs, and decoding results +are available at + + + #### LibriSpeech BPE training results (Pruned Stateless Conv-Emformer RNN-T 2) [conv_emformer_transducer_stateless2](./conv_emformer_transducer_stateless2) diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/__init__.py b/egs/librispeech/ASR/lstm_transducer_stateless/__init__.py new file mode 120000 index 000000000..b24e5e357 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/__init__.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/__init__.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/asr_datamodule.py b/egs/librispeech/ASR/lstm_transducer_stateless/asr_datamodule.py new file mode 120000 index 000000000..a074d6085 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/asr_datamodule.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/asr_datamodule.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/beam_search.py b/egs/librispeech/ASR/lstm_transducer_stateless/beam_search.py new file mode 120000 index 000000000..8554e44cc --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/beam_search.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/beam_search.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/decode.py b/egs/librispeech/ASR/lstm_transducer_stateless/decode.py new file mode 100755 index 000000000..bfc158e0a --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/decode.py @@ -0,0 +1,818 @@ +#!/usr/bin/env python3 +# +# Copyright 2021-2022 Xiaomi Corporation (Author: Fangjun Kuang, +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +(1) greedy search +./lstm_transducer_stateless/decode.py \ + --epoch 35 \ + --avg 15 \ + --exp-dir ./lstm_transducer_stateless/exp \ + --max-duration 600 \ + --decoding-method greedy_search + +(2) beam search (not recommended) +./lstm_transducer_stateless/decode.py \ + --epoch 35 \ + --avg 15 \ + --exp-dir ./lstm_transducer_stateless/exp \ + --max-duration 600 \ + --decoding-method beam_search \ + --beam-size 4 + +(3) modified beam search +./lstm_transducer_stateless/decode.py \ + --epoch 35 \ + --avg 15 \ + --exp-dir ./lstm_transducer_stateless/exp \ + --max-duration 600 \ + --decoding-method modified_beam_search \ + --beam-size 4 + +(4) fast beam search (one best) +./lstm_transducer_stateless/decode.py \ + --epoch 35 \ + --avg 15 \ + --exp-dir ./lstm_transducer_stateless/exp \ + --max-duration 600 \ + --decoding-method fast_beam_search \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 + +(5) fast beam search (nbest) +./lstm_transducer_stateless/decode.py \ + --epoch 30 \ + --avg 15 \ + --exp-dir ./pruned_transducer_stateless3/exp \ + --max-duration 600 \ + --decoding-method fast_beam_search_nbest \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 \ + --num-paths 200 \ + --nbest-scale 0.5 + +(6) fast beam search (nbest oracle WER) +./lstm_transducer_stateless/decode.py \ + --epoch 35 \ + --avg 15 \ + --exp-dir ./lstm_transducer_stateless/exp \ + --max-duration 600 \ + --decoding-method fast_beam_search_nbest_oracle \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 \ + --num-paths 200 \ + --nbest-scale 0.5 + +(7) fast beam search (with LG) +./lstm_transducer_stateless/decode.py \ + --epoch 35 \ + --avg 15 \ + --exp-dir ./lstm_transducer_stateless/exp \ + --max-duration 600 \ + --decoding-method fast_beam_search_nbest_LG \ + --beam 20.0 \ + --max-contexts 8 \ + --max-states 64 +""" + + +import argparse +import logging +import math +from collections import defaultdict +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import k2 +import sentencepiece as spm +import torch +import torch.nn as nn +from asr_datamodule import LibriSpeechAsrDataModule +from beam_search import ( + beam_search, + fast_beam_search_nbest, + fast_beam_search_nbest_LG, + fast_beam_search_nbest_oracle, + fast_beam_search_one_best, + greedy_search, + greedy_search_batch, + modified_beam_search, +) +from train import add_model_arguments, get_params, get_transducer_model + +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from icefall.lexicon import Lexicon +from icefall.utils import ( + AttributeDict, + setup_logger, + store_transcripts, + str2bool, + write_error_stats, +) + +LOG_EPS = math.log(1e-10) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=30, + help="""It specifies the checkpoint to use for decoding. + Note: Epoch counts from 1. + You can specify --avg to use more checkpoints for model averaging.""", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=15, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch' and '--iter'", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=True, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="lstm_transducer_stateless/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--lang-dir", + type=Path, + default="data/lang_bpe_500", + help="The lang dir containing word table and LG graph", + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + - modified_beam_search + - fast_beam_search + - fast_beam_search_nbest + - fast_beam_search_nbest_oracle + - fast_beam_search_nbest_LG + If you use fast_beam_search_nbest_LG, you have to specify + `--lang-dir`, which should contain `LG.pt`. + """, + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="""An integer indicating how many candidates we will keep for each + frame. Used only when --decoding-method is beam_search or + modified_beam_search.""", + ) + + parser.add_argument( + "--beam", + type=float, + default=20.0, + help="""A floating point value to calculate the cutoff score during beam + search (i.e., `cutoff = max-score - beam`), which is the same as the + `beam` in Kaldi. + Used only when --decoding-method is fast_beam_search, + fast_beam_search_nbest, fast_beam_search_nbest_LG, + and fast_beam_search_nbest_oracle + """, + ) + + parser.add_argument( + "--ngram-lm-scale", + type=float, + default=0.01, + help=""" + Used only when --decoding_method is fast_beam_search_nbest_LG. + It specifies the scale for n-gram LM scores. + """, + ) + + parser.add_argument( + "--max-contexts", + type=int, + default=8, + help="""Used only when --decoding-method is + fast_beam_search, fast_beam_search_nbest, fast_beam_search_nbest_LG, + and fast_beam_search_nbest_oracle""", + ) + + parser.add_argument( + "--max-states", + type=int, + default=64, + help="""Used only when --decoding-method is + fast_beam_search, fast_beam_search_nbest, fast_beam_search_nbest_LG, + and fast_beam_search_nbest_oracle""", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=1, + help="""Maximum number of symbols per frame. + Used only when --decoding_method is greedy_search""", + ) + + parser.add_argument( + "--num-paths", + type=int, + default=200, + help="""Number of paths for nbest decoding. + Used only when the decoding method is fast_beam_search_nbest, + fast_beam_search_nbest_LG, and fast_beam_search_nbest_oracle""", + ) + + parser.add_argument( + "--nbest-scale", + type=float, + default=0.5, + help="""Scale applied to lattice scores when computing nbest paths. + Used only when the decoding method is fast_beam_search_nbest, + fast_beam_search_nbest_LG, and fast_beam_search_nbest_oracle""", + ) + + add_model_arguments(parser) + + return parser + + +def decode_one_batch( + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + batch: dict, + word_table: Optional[k2.SymbolTable] = None, + decoding_graph: Optional[k2.Fsa] = None, +) -> Dict[str, List[List[str]]]: + """Decode one batch and return the result in a dict. The dict has the + following format: + + - key: It indicates the setting used for decoding. For example, + if greedy_search is used, it would be "greedy_search" + If beam search with a beam size of 7 is used, it would be + "beam_7" + - value: It contains the decoding result. `len(value)` equals to + batch size. `value[i]` is the decoding result for the i-th + utterance in the given batch. + Args: + params: + It's the return value of :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + batch: + It is the return value from iterating + `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation + for the format of the `batch`. + word_table: + The word symbol table. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or LG, Used + only when --decoding_method is fast_beam_search, fast_beam_search_nbest, + fast_beam_search_nbest_oracle, and fast_beam_search_nbest_LG. + Returns: + Return the decoding result. See above description for the format of + the returned dict. + """ + device = next(model.parameters()).device + feature = batch["inputs"] + assert feature.ndim == 3 + + feature = feature.to(device) + # at entry, feature is (N, T, C) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + # tail padding here to alleviate the tail deletion problem + num_tail_padded_frames = 35 + feature = torch.nn.functional.pad( + feature, + (0, 0, 0, num_tail_padded_frames), + mode="constant", + value=LOG_EPS, + ) + feature_lens += num_tail_padded_frames + + encoder_out, encoder_out_lens, _ = model.encoder( + x=feature, x_lens=feature_lens + ) + + hyps = [] + + if params.decoding_method == "fast_beam_search": + hyp_tokens = fast_beam_search_one_best( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif params.decoding_method == "fast_beam_search_nbest_LG": + hyp_tokens = fast_beam_search_nbest_LG( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + num_paths=params.num_paths, + nbest_scale=params.nbest_scale, + ) + for hyp in hyp_tokens: + hyps.append([word_table[i] for i in hyp]) + elif params.decoding_method == "fast_beam_search_nbest": + hyp_tokens = fast_beam_search_nbest( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + num_paths=params.num_paths, + nbest_scale=params.nbest_scale, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif params.decoding_method == "fast_beam_search_nbest_oracle": + hyp_tokens = fast_beam_search_nbest_oracle( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + num_paths=params.num_paths, + ref_texts=sp.encode(supervisions["text"]), + nbest_scale=params.nbest_scale, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif ( + params.decoding_method == "greedy_search" + and params.max_sym_per_frame == 1 + ): + hyp_tokens = greedy_search_batch( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif params.decoding_method == "modified_beam_search": + hyp_tokens = modified_beam_search( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam_size, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + else: + batch_size = encoder_out.size(0) + + for i in range(batch_size): + # fmt: off + encoder_out_i = encoder_out[i:i + 1, :encoder_out_lens[i]] + # fmt: on + if params.decoding_method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.decoding_method == "beam_search": + hyp = beam_search( + model=model, + encoder_out=encoder_out_i, + beam=params.beam_size, + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + hyps.append(sp.decode(hyp).split()) + + if params.decoding_method == "greedy_search": + return {"greedy_search": hyps} + elif "fast_beam_search" in params.decoding_method: + key = f"beam_{params.beam}_" + key += f"max_contexts_{params.max_contexts}_" + key += f"max_states_{params.max_states}" + if "nbest" in params.decoding_method: + key += f"_num_paths_{params.num_paths}_" + key += f"nbest_scale_{params.nbest_scale}" + if "LG" in params.decoding_method: + key += f"_ngram_lm_scale_{params.ngram_lm_scale}" + + return {key: hyps} + else: + return {f"beam_size_{params.beam_size}": hyps} + + +def decode_dataset( + dl: torch.utils.data.DataLoader, + params: AttributeDict, + model: nn.Module, + sp: spm.SentencePieceProcessor, + word_table: Optional[k2.SymbolTable] = None, + decoding_graph: Optional[k2.Fsa] = None, +) -> Dict[str, List[Tuple[List[str], List[str]]]]: + """Decode dataset. + + Args: + dl: + PyTorch's dataloader containing the dataset to decode. + params: + It is returned by :func:`get_params`. + model: + The neural model. + sp: + The BPE model. + word_table: + The word symbol table. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or LG, Used + only when --decoding_method is fast_beam_search, fast_beam_search_nbest, + fast_beam_search_nbest_oracle, and fast_beam_search_nbest_LG. + Returns: + Return a dict, whose key may be "greedy_search" if greedy search + is used, or it may be "beam_7" if beam size of 7 is used. + Its value is a list of tuples. Each tuple contains two elements: + The first is the reference transcript, and the second is the + predicted result. + """ + num_cuts = 0 + + try: + num_batches = len(dl) + except TypeError: + num_batches = "?" + + if params.decoding_method == "greedy_search": + log_interval = 50 + else: + log_interval = 20 + + results = defaultdict(list) + for batch_idx, batch in enumerate(dl): + texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] + + hyps_dict = decode_one_batch( + params=params, + model=model, + sp=sp, + decoding_graph=decoding_graph, + word_table=word_table, + batch=batch, + ) + + for name, hyps in hyps_dict.items(): + this_batch = [] + assert len(hyps) == len(texts) + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): + ref_words = ref_text.split() + this_batch.append((cut_id, ref_words, hyp_words)) + + results[name].extend(this_batch) + + num_cuts += len(texts) + + if batch_idx % log_interval == 0: + batch_str = f"{batch_idx}/{num_batches}" + + logging.info( + f"batch {batch_str}, cuts processed until now is {num_cuts}" + ) + return results + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[List[int], List[int]]]], +): + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = ( + params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + ) + results = sorted(results) + store_transcripts(filename=recog_path, texts=results) + logging.info(f"The transcripts are stored in {recog_path}") + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = ( + params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_filename, "w") as f: + wer = write_error_stats( + f, f"{test_set_name}-{key}", results, enable_log=True + ) + test_set_wers[key] = wer + + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = ( + params.res_dir + / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_info, "w") as f: + print("settings\tWER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, WER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + LibriSpeechAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + assert params.decoding_method in ( + "greedy_search", + "beam_search", + "fast_beam_search", + "fast_beam_search_nbest", + "fast_beam_search_nbest_LG", + "fast_beam_search_nbest_oracle", + "modified_beam_search", + ) + params.res_dir = params.exp_dir / params.decoding_method + + if params.iter > 0: + params.suffix = f"iter-{params.iter}-avg-{params.avg}" + else: + params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + + if "fast_beam_search" in params.decoding_method: + params.suffix += f"-beam-{params.beam}" + params.suffix += f"-max-contexts-{params.max_contexts}" + params.suffix += f"-max-states-{params.max_states}" + if "nbest" in params.decoding_method: + params.suffix += f"-nbest-scale-{params.nbest_scale}" + params.suffix += f"-num-paths-{params.num_paths}" + if "LG" in params.decoding_method: + params.suffix += f"-ngram-lm-scale-{params.ngram_lm_scale}" + elif "beam_search" in params.decoding_method: + params.suffix += ( + f"-{params.decoding_method}-beam-size-{params.beam_size}" + ) + else: + params.suffix += f"-context-{params.context_size}" + params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}" + + if params.use_averaged_model: + params.suffix += "-use-averaged-model" + + setup_logger(f"{params.res_dir}/log-decode-{params.suffix}") + logging.info("Decoding started") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # and are defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.unk_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + else: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg + 1] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + + model.to(device) + model.eval() + + if "fast_beam_search" in params.decoding_method: + if params.decoding_method == "fast_beam_search_nbest_LG": + lexicon = Lexicon(params.lang_dir) + word_table = lexicon.word_table + lg_filename = params.lang_dir / "LG.pt" + logging.info(f"Loading {lg_filename}") + decoding_graph = k2.Fsa.from_dict( + torch.load(lg_filename, map_location=device) + ) + decoding_graph.scores *= params.ngram_lm_scale + else: + word_table = None + decoding_graph = k2.trivial_graph( + params.vocab_size - 1, device=device + ) + else: + decoding_graph = None + word_table = None + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + # we need cut ids to display recognition results. + args.return_cuts = True + librispeech = LibriSpeechAsrDataModule(args) + + test_clean_cuts = librispeech.test_clean_cuts() + test_other_cuts = librispeech.test_other_cuts() + + test_clean_dl = librispeech.test_dataloaders(test_clean_cuts) + test_other_dl = librispeech.test_dataloaders(test_other_cuts) + + test_sets = ["test-clean", "test-other"] + test_dl = [test_clean_dl, test_other_dl] + + for test_set, test_dl in zip(test_sets, test_dl): + results_dict = decode_dataset( + dl=test_dl, + params=params, + model=model, + sp=sp, + word_table=word_table, + decoding_graph=decoding_graph, + ) + + save_results( + params=params, + test_set_name=test_set, + results_dict=results_dict, + ) + + logging.info("Done!") + + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/decoder.py b/egs/librispeech/ASR/lstm_transducer_stateless/decoder.py new file mode 120000 index 000000000..0793c5709 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/decoder.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/decoder.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/encoder_interface.py b/egs/librispeech/ASR/lstm_transducer_stateless/encoder_interface.py new file mode 120000 index 000000000..aa5d0217a --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/encoder_interface.py @@ -0,0 +1 @@ +../transducer_stateless/encoder_interface.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/export.py b/egs/librispeech/ASR/lstm_transducer_stateless/export.py new file mode 100755 index 000000000..13dac6009 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/export.py @@ -0,0 +1,388 @@ +#!/usr/bin/env python3 +# +# Copyright 2021-2022 Xiaomi Corporation (Author: Fangjun Kuang, Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script converts several saved checkpoints +# to a single one using model averaging. +""" + +Usage: + +(1) Export to torchscript model using torch.jit.trace() + +./lstm_transducer_stateless/export.py \ + --exp-dir ./lstm_transducer_stateless/exp \ + --bpe-model data/lang_bpe_500/bpe.model \ + --epoch 35 \ + --avg 10 \ + --jit-trace 1 + +It will generate 3 files: `encoder_jit_trace.pt`, +`decoder_jit_trace.pt`, and `joiner_jit_trace.pt`. + +(2) Export `model.state_dict()` + +./lstm_transducer_stateless/export.py \ + --exp-dir ./lstm_transducer_stateless/exp \ + --bpe-model data/lang_bpe_500/bpe.model \ + --epoch 35 \ + --avg 10 + +It will generate a file `pretrained.pt` in the given `exp_dir`. You can later +load it by `icefall.checkpoint.load_checkpoint()`. + +To use the generated file with `lstm_transducer_stateless/decode.py`, +you can do: + + cd /path/to/exp_dir + ln -s pretrained.pt epoch-9999.pt + + cd /path/to/egs/librispeech/ASR + ./lstm_transducer_stateless/decode.py \ + --exp-dir ./lstm_transducer_stateless/exp \ + --epoch 9999 \ + --avg 1 \ + --max-duration 600 \ + --decoding-method greedy_search \ + --bpe-model data/lang_bpe_500/bpe.model + +Check ./pretrained.py for its usage. + +Note: If you don't want to train a model from scratch, we have +provided one for you. You can get it at + +https://huggingface.co/Zengwei/icefall-asr-librispeech-lstm-transducer-stateless-2022-08-18 + +with the following commands: + + sudo apt-get install git-lfs + git lfs install + git clone https://huggingface.co/Zengwei/icefall-asr-librispeech-lstm-transducer-stateless-2022-08-18 + # You will find the pre-trained model in icefall-asr-librispeech-lstm-transducer-stateless-2022-08-18/exp +""" + +import argparse +import logging +from pathlib import Path + +import sentencepiece as spm +import torch +import torch.nn as nn +from scaling_converter import convert_scaled_to_non_scaled +from train import add_model_arguments, get_params, get_transducer_model + +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from icefall.utils import str2bool + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=28, + help="""It specifies the checkpoint to use for averaging. + Note: Epoch counts from 0. + You can specify --avg to use more checkpoints for model averaging.""", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=15, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch' and '--iter'", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=True, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="pruned_transducer_stateless3/exp", + help="""It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--jit-trace", + type=str2bool, + default=False, + help="""True to save a model after applying torch.jit.trace. + It will generate 3 files: + - encoder_jit_trace.pt + - decoder_jit_trace.pt + - joiner_jit_trace.pt + + Check ./jit_pretrained.py for how to use them. + """, + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + add_model_arguments(parser) + + return parser + + +def export_encoder_model_jit_trace( + encoder_model: nn.Module, + encoder_filename: str, +) -> None: + """Export the given encoder model with torch.jit.trace() + + Note: The warmup argument is fixed to 1. + + Args: + encoder_model: + The input encoder model + encoder_filename: + The filename to save the exported model. + """ + x = torch.zeros(1, 100, 80, dtype=torch.float32) + x_lens = torch.tensor([100], dtype=torch.int64) + states = encoder_model.get_init_states() + + traced_model = torch.jit.trace(encoder_model, (x, x_lens, states)) + traced_model.save(encoder_filename) + logging.info(f"Saved to {encoder_filename}") + + +def export_decoder_model_jit_trace( + decoder_model: nn.Module, + decoder_filename: str, +) -> None: + """Export the given decoder model with torch.jit.trace() + + Note: The argument need_pad is fixed to False. + + Args: + decoder_model: + The input decoder model + decoder_filename: + The filename to save the exported model. + """ + y = torch.zeros(10, decoder_model.context_size, dtype=torch.int64) + need_pad = torch.tensor([False]) + + traced_model = torch.jit.trace(decoder_model, (y, need_pad)) + traced_model.save(decoder_filename) + logging.info(f"Saved to {decoder_filename}") + + +def export_joiner_model_jit_trace( + joiner_model: nn.Module, + joiner_filename: str, +) -> None: + """Export the given joiner model with torch.jit.trace() + + Note: The argument project_input is fixed to True. A user should not + project the encoder_out/decoder_out by himself/herself. The exported joiner + will do that for the user. + + Args: + joiner_model: + The input joiner model + joiner_filename: + The filename to save the exported model. + + """ + encoder_out_dim = joiner_model.encoder_proj.weight.shape[1] + decoder_out_dim = joiner_model.decoder_proj.weight.shape[1] + encoder_out = torch.rand(1, encoder_out_dim, dtype=torch.float32) + decoder_out = torch.rand(1, decoder_out_dim, dtype=torch.float32) + + traced_model = torch.jit.trace(joiner_model, (encoder_out, decoder_out)) + traced_model.save(joiner_filename) + logging.info(f"Saved to {joiner_filename}") + + +@torch.no_grad() +def main(): + args = get_parser().parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + else: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg + 1] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + + model.to("cpu") + model.eval() + + if params.jit_trace is True: + convert_scaled_to_non_scaled(model, inplace=True) + logging.info("Using torch.jit.trace()") + encoder_filename = params.exp_dir / "encoder_jit_trace.pt" + export_encoder_model_jit_trace(model.encoder, encoder_filename) + + decoder_filename = params.exp_dir / "decoder_jit_trace.pt" + export_decoder_model_jit_trace(model.decoder, decoder_filename) + + joiner_filename = params.exp_dir / "joiner_jit_trace.pt" + export_joiner_model_jit_trace(model.joiner, joiner_filename) + else: + logging.info("Not using torchscript") + # Save it using a format so that it can be loaded + # by :func:`load_checkpoint` + filename = params.exp_dir / "pretrained.pt" + torch.save({"model": model.state_dict()}, str(filename)) + logging.info(f"Saved to {filename}") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/jit_pretrained.py b/egs/librispeech/ASR/lstm_transducer_stateless/jit_pretrained.py new file mode 100755 index 000000000..594c33e4f --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/jit_pretrained.py @@ -0,0 +1,322 @@ +#!/usr/bin/env python3 +# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This script loads torchscript models, either exported by `torch.jit.trace()` +or by `torch.jit.script()`, and uses them to decode waves. +You can use the following command to get the exported models: + +./lstm_transducer_stateless/export.py \ + --exp-dir ./lstm_transducer_stateless/exp \ + --bpe-model data/lang_bpe_500/bpe.model \ + --epoch 20 \ + --avg 10 \ + --jit-trace 1 + +Usage of this script: + +./lstm_transducer_stateless/jit_pretrained.py \ + --encoder-model-filename ./lstm_transducer_stateless/exp/encoder_jit_trace.pt \ + --decoder-model-filename ./lstm_transducer_stateless/exp/decoder_jit_trace.pt \ + --joiner-model-filename ./lstm_transducer_stateless/exp/joiner_jit_trace.pt \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + /path/to/foo.wav \ + /path/to/bar.wav +""" + +import argparse +import logging +import math +from typing import List + +import kaldifeat +import sentencepiece as spm +import torch +import torchaudio +from torch.nn.utils.rnn import pad_sequence + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--encoder-model-filename", + type=str, + required=True, + help="Path to the encoder torchscript model. ", + ) + + parser.add_argument( + "--decoder-model-filename", + type=str, + required=True, + help="Path to the decoder torchscript model. ", + ) + + parser.add_argument( + "--joiner-model-filename", + type=str, + required=True, + help="Path to the joiner torchscript model. ", + ) + + parser.add_argument( + "--bpe-model", + type=str, + help="""Path to bpe.model.""", + ) + + parser.add_argument( + "sound_files", + type=str, + nargs="+", + help="The input sound file(s) to transcribe. " + "Supported formats are those supported by torchaudio.load(). " + "For example, wav and flac are supported. " + "The sample rate has to be 16kHz.", + ) + + parser.add_argument( + "--sample-rate", + type=int, + default=16000, + help="The sample rate of the input sound file", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="Context size of the decoder model", + ) + + return parser + + +def read_sound_files( + filenames: List[str], expected_sample_rate: float +) -> List[torch.Tensor]: + """Read a list of sound files into a list 1-D float32 torch tensors. + Args: + filenames: + A list of sound filenames. + expected_sample_rate: + The expected sample rate of the sound files. + Returns: + Return a list of 1-D float32 torch tensors. + """ + ans = [] + for f in filenames: + wave, sample_rate = torchaudio.load(f) + assert sample_rate == expected_sample_rate, ( + f"expected sample rate: {expected_sample_rate}. " + f"Given: {sample_rate}" + ) + # We use only the first channel + ans.append(wave[0]) + return ans + + +def greedy_search( + decoder: torch.jit.ScriptModule, + joiner: torch.jit.ScriptModule, + encoder_out: torch.Tensor, + encoder_out_lens: torch.Tensor, + context_size: int, +) -> List[List[int]]: + """Greedy search in batch mode. It hardcodes --max-sym-per-frame=1. + Args: + decoder: + The decoder model. + joiner: + The joiner model. + encoder_out: + A 3-D tensor of shape (N, T, C) + encoder_out_lens: + A 1-D tensor of shape (N,). + context_size: + The context size of the decoder model. + Returns: + Return the decoded results for each utterance. + """ + assert encoder_out.ndim == 3 + assert encoder_out.size(0) >= 1, encoder_out.size(0) + + packed_encoder_out = torch.nn.utils.rnn.pack_padded_sequence( + input=encoder_out, + lengths=encoder_out_lens.cpu(), + batch_first=True, + enforce_sorted=False, + ) + + device = encoder_out.device + blank_id = 0 # hard-code to 0 + + batch_size_list = packed_encoder_out.batch_sizes.tolist() + N = encoder_out.size(0) + + assert torch.all(encoder_out_lens > 0), encoder_out_lens + assert N == batch_size_list[0], (N, batch_size_list) + + hyps = [[blank_id] * context_size for _ in range(N)] + + decoder_input = torch.tensor( + hyps, + device=device, + dtype=torch.int64, + ) # (N, context_size) + + decoder_out = decoder( + decoder_input, + need_pad=torch.tensor([False]), + ).squeeze(1) + + offset = 0 + for batch_size in batch_size_list: + start = offset + end = offset + batch_size + current_encoder_out = packed_encoder_out.data[start:end] + current_encoder_out = current_encoder_out + # current_encoder_out's shape: (batch_size, encoder_out_dim) + offset = end + + decoder_out = decoder_out[:batch_size] + + logits = joiner( + current_encoder_out, + decoder_out, + ) + # logits'shape (batch_size, vocab_size) + + assert logits.ndim == 2, logits.shape + y = logits.argmax(dim=1).tolist() + emitted = False + for i, v in enumerate(y): + if v != blank_id: + hyps[i].append(v) + emitted = True + if emitted: + # update decoder output + decoder_input = [h[-context_size:] for h in hyps[:batch_size]] + decoder_input = torch.tensor( + decoder_input, + device=device, + dtype=torch.int64, + ) + decoder_out = decoder( + decoder_input, + need_pad=torch.tensor([False]), + ) + decoder_out = decoder_out.squeeze(1) + + sorted_ans = [h[context_size:] for h in hyps] + ans = [] + unsorted_indices = packed_encoder_out.unsorted_indices.tolist() + for i in range(N): + ans.append(sorted_ans[unsorted_indices[i]]) + + return ans + + +@torch.no_grad() +def main(): + parser = get_parser() + args = parser.parse_args() + logging.info(vars(args)) + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + encoder = torch.jit.load(args.encoder_model_filename) + decoder = torch.jit.load(args.decoder_model_filename) + joiner = torch.jit.load(args.joiner_model_filename) + + encoder.eval() + decoder.eval() + joiner.eval() + + encoder.to(device) + decoder.to(device) + joiner.to(device) + + sp = spm.SentencePieceProcessor() + sp.load(args.bpe_model) + + logging.info("Constructing Fbank computer") + opts = kaldifeat.FbankOptions() + opts.device = device + opts.frame_opts.dither = 0 + opts.frame_opts.snip_edges = False + opts.frame_opts.samp_freq = args.sample_rate + opts.mel_opts.num_bins = 80 + + fbank = kaldifeat.Fbank(opts) + + logging.info(f"Reading sound files: {args.sound_files}") + waves = read_sound_files( + filenames=args.sound_files, + expected_sample_rate=args.sample_rate, + ) + waves = [w.to(device) for w in waves] + + logging.info("Decoding started") + features = fbank(waves) + feature_lengths = [f.size(0) for f in features] + + features = pad_sequence( + features, + batch_first=True, + padding_value=math.log(1e-10), + ) + + feature_lengths = torch.tensor(feature_lengths, device=device) + + states = encoder.get_init_states(batch_size=features.size(0), device=device) + + encoder_out, encoder_out_lens, _ = encoder( + x=features, + x_lens=feature_lengths, + states=states, + ) + + hyps = greedy_search( + decoder=decoder, + joiner=joiner, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + context_size=args.context_size, + ) + s = "\n" + for filename, hyp in zip(args.sound_files, hyps): + words = sp.decode(hyp) + s += f"{filename}:\n{words}\n\n" + logging.info(s) + + logging.info("Decoding Done") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/joiner.py b/egs/librispeech/ASR/lstm_transducer_stateless/joiner.py new file mode 120000 index 000000000..815fd4bb6 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/joiner.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/joiner.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/lstm.py b/egs/librispeech/ASR/lstm_transducer_stateless/lstm.py new file mode 100644 index 000000000..6ce966b13 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/lstm.py @@ -0,0 +1,842 @@ +# Copyright 2022 Xiaomi Corp. (authors: Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import math +from typing import List, Optional, Tuple + +import torch +from encoder_interface import EncoderInterface +from scaling import ( + ActivationBalancer, + BasicNorm, + DoubleSwish, + ScaledConv2d, + ScaledLinear, + ScaledLSTM, +) +from torch import nn + +LOG_EPSILON = math.log(1e-10) + + +def unstack_states( + states: Tuple[torch.Tensor, torch.Tensor] +) -> List[Tuple[torch.Tensor, torch.Tensor]]: + """ + Unstack the lstm states corresponding to a batch of utterances into a list + of states, where the i-th entry is the state from the i-th utterance. + + Args: + states: + A tuple of 2 elements. + ``states[0]`` is the lstm hidden states, of a batch of utterance. + ``states[1]`` is the lstm cell states, of a batch of utterances. + + Returns: + A list of states. + ``states[i]`` is a tuple of 2 elememts of i-th utterance. + ``states[i][0]`` is the lstm hidden states of i-th utterance. + ``states[i][1]`` is the lstm cell states of i-th utterance. + """ + hidden_states, cell_states = states + + list_hidden_states = hidden_states.unbind(dim=1) + list_cell_states = cell_states.unbind(dim=1) + + ans = [ + (h.unsqueeze(1), c.unsqueeze(1)) + for (h, c) in zip(list_hidden_states, list_cell_states) + ] + return ans + + +def stack_states( + states_list: List[Tuple[torch.Tensor, torch.Tensor]] +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Stack list of lstm states corresponding to separate utterances into a single + lstm state so that it can be used as an input for lstm when those utterances + are formed into a batch. + + Args: + state_list: + Each element in state_list corresponds to the lstm state for a single + utterance. + ``states[i]`` is a tuple of 2 elememts of i-th utterance. + ``states[i][0]`` is the lstm hidden states of i-th utterance. + ``states[i][1]`` is the lstm cell states of i-th utterance. + + + Returns: + A new state corresponding to a batch of utterances. + It is a tuple of 2 elements. + ``states[0]`` is the lstm hidden states, of a batch of utterance. + ``states[1]`` is the lstm cell states, of a batch of utterances. + """ + hidden_states = torch.cat([s[0] for s in states_list], dim=1) + cell_states = torch.cat([s[1] for s in states_list], dim=1) + ans = (hidden_states, cell_states) + return ans + + +class RNN(EncoderInterface): + """ + Args: + num_features (int): + Number of input features. + subsampling_factor (int): + Subsampling factor of encoder (convolution layers before lstm layers) (default=4). # noqa + d_model (int): + Output dimension (default=512). + dim_feedforward (int): + Feedforward dimension (default=2048). + rnn_hidden_size (int): + Hidden dimension for lstm layers (default=1024). + num_encoder_layers (int): + Number of encoder layers (default=12). + dropout (float): + Dropout rate (default=0.1). + layer_dropout (float): + Dropout value for model-level warmup (default=0.075). + aux_layer_period (int): + Period of auxiliary layers used for random combiner during training. + If set to 0, will not use the random combiner (Default). + You can set a positive integer to use the random combiner, e.g., 3. + """ + + def __init__( + self, + num_features: int, + subsampling_factor: int = 4, + d_model: int = 512, + dim_feedforward: int = 2048, + rnn_hidden_size: int = 1024, + num_encoder_layers: int = 12, + dropout: float = 0.1, + layer_dropout: float = 0.075, + aux_layer_period: int = 0, + ) -> None: + super(RNN, self).__init__() + + self.num_features = num_features + self.subsampling_factor = subsampling_factor + if subsampling_factor != 4: + raise NotImplementedError("Support only 'subsampling_factor=4'.") + + # self.encoder_embed converts the input of shape (N, T, num_features) + # to the shape (N, T//subsampling_factor, d_model). + # That is, it does two things simultaneously: + # (1) subsampling: T -> T//subsampling_factor + # (2) embedding: num_features -> d_model + self.encoder_embed = Conv2dSubsampling(num_features, d_model) + + self.num_encoder_layers = num_encoder_layers + self.d_model = d_model + self.rnn_hidden_size = rnn_hidden_size + + encoder_layer = RNNEncoderLayer( + d_model=d_model, + dim_feedforward=dim_feedforward, + rnn_hidden_size=rnn_hidden_size, + dropout=dropout, + layer_dropout=layer_dropout, + ) + self.encoder = RNNEncoder( + encoder_layer, + num_encoder_layers, + aux_layers=list( + range( + num_encoder_layers // 3, + num_encoder_layers - 1, + aux_layer_period, + ) + ) + if aux_layer_period > 0 + else None, + ) + + def forward( + self, + x: torch.Tensor, + x_lens: torch.Tensor, + states: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + warmup: float = 1.0, + ) -> Tuple[torch.Tensor, torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + """ + Args: + x: + The input tensor. Its shape is (N, T, C), where N is the batch size, + T is the sequence length, C is the feature dimension. + x_lens: + A tensor of shape (N,), containing the number of frames in `x` + before padding. + states: + A tuple of 2 tensors (optional). It is for streaming inference. + states[0] is the hidden states of all layers, + with shape of (num_layers, N, d_model); + states[1] is the cell states of all layers, + with shape of (num_layers, N, rnn_hidden_size). + warmup: + A floating point value that gradually increases from 0 throughout + training; when it is >= 1.0 we are "fully warmed up". It is used + to turn modules on sequentially. + + Returns: + A tuple of 3 tensors: + - embeddings: its shape is (N, T', d_model), where T' is the output + sequence lengths. + - lengths: a tensor of shape (batch_size,) containing the number of + frames in `embeddings` before padding. + - updated states, whose shape is the same as the input states. + """ + x = self.encoder_embed(x) + x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C) + + # lengths = ((x_lens - 3) // 2 - 1) // 2 # issue an warning + # + # Note: rounding_mode in torch.div() is available only in torch >= 1.8.0 + lengths = (((x_lens - 3) >> 1) - 1) >> 1 + if not torch.jit.is_tracing(): + assert x.size(0) == lengths.max().item() + + if states is None: + x = self.encoder(x, warmup=warmup)[0] + # torch.jit.trace requires returned types to be the same as annotated # noqa + new_states = (torch.empty(0), torch.empty(0)) + else: + assert not self.training + assert len(states) == 2 + if not torch.jit.is_tracing(): + # for hidden state + assert states[0].shape == ( + self.num_encoder_layers, + x.size(1), + self.d_model, + ) + # for cell state + assert states[1].shape == ( + self.num_encoder_layers, + x.size(1), + self.rnn_hidden_size, + ) + x, new_states = self.encoder(x, states) + + x = x.permute(1, 0, 2) # (T, N, C) -> (N, T, C) + return x, lengths, new_states + + @torch.jit.export + def get_init_states( + self, batch_size: int = 1, device: torch.device = torch.device("cpu") + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Get model initial states.""" + # for rnn hidden states + hidden_states = torch.zeros( + (self.num_encoder_layers, batch_size, self.d_model), device=device + ) + cell_states = torch.zeros( + (self.num_encoder_layers, batch_size, self.rnn_hidden_size), + device=device, + ) + return (hidden_states, cell_states) + + +class RNNEncoderLayer(nn.Module): + """ + RNNEncoderLayer is made up of lstm and feedforward networks. + + Args: + d_model: + The number of expected features in the input (required). + dim_feedforward: + The dimension of feedforward network model (default=2048). + rnn_hidden_size: + The hidden dimension of rnn layer. + dropout: + The dropout value (default=0.1). + layer_dropout: + The dropout value for model-level warmup (default=0.075). + """ + + def __init__( + self, + d_model: int, + dim_feedforward: int, + rnn_hidden_size: int, + dropout: float = 0.1, + layer_dropout: float = 0.075, + ) -> None: + super(RNNEncoderLayer, self).__init__() + self.layer_dropout = layer_dropout + self.d_model = d_model + self.rnn_hidden_size = rnn_hidden_size + + assert rnn_hidden_size >= d_model, (rnn_hidden_size, d_model) + self.lstm = ScaledLSTM( + input_size=d_model, + hidden_size=rnn_hidden_size, + proj_size=d_model if rnn_hidden_size > d_model else 0, + num_layers=1, + dropout=0.0, + ) + self.feed_forward = nn.Sequential( + ScaledLinear(d_model, dim_feedforward), + ActivationBalancer(channel_dim=-1), + DoubleSwish(), + nn.Dropout(dropout), + ScaledLinear(dim_feedforward, d_model, initial_scale=0.25), + ) + self.norm_final = BasicNorm(d_model) + + # try to ensure the output is close to zero-mean (or at least, zero-median). # noqa + self.balancer = ActivationBalancer( + channel_dim=-1, min_positive=0.45, max_positive=0.55, max_abs=6.0 + ) + self.dropout = nn.Dropout(dropout) + + def forward( + self, + src: torch.Tensor, + states: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + warmup: float = 1.0, + ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + """ + Pass the input through the encoder layer. + + Args: + src: + The sequence to the encoder layer (required). + Its shape is (S, N, E), where S is the sequence length, + N is the batch size, and E is the feature number. + states: + A tuple of 2 tensors (optional). It is for streaming inference. + states[0] is the hidden states of all layers, + with shape of (1, N, d_model); + states[1] is the cell states of all layers, + with shape of (1, N, rnn_hidden_size). + warmup: + It controls selective bypass of of layers; if < 1.0, we will + bypass layers more frequently. + """ + src_orig = src + + warmup_scale = min(0.1 + warmup, 1.0) + # alpha = 1.0 means fully use this encoder layer, 0.0 would mean + # completely bypass it. + if self.training: + alpha = ( + warmup_scale + if torch.rand(()).item() <= (1.0 - self.layer_dropout) + else 0.1 + ) + else: + alpha = 1.0 + + # lstm module + if states is None: + src_lstm = self.lstm(src)[0] + # torch.jit.trace requires returned types be the same as annotated + new_states = (torch.empty(0), torch.empty(0)) + else: + assert not self.training + assert len(states) == 2 + if not torch.jit.is_tracing(): + # for hidden state + assert states[0].shape == (1, src.size(1), self.d_model) + # for cell state + assert states[1].shape == (1, src.size(1), self.rnn_hidden_size) + src_lstm, new_states = self.lstm(src, states) + src = src + self.dropout(src_lstm) + + # feed forward module + src = src + self.dropout(self.feed_forward(src)) + + src = self.norm_final(self.balancer(src)) + + if alpha != 1.0: + src = alpha * src + (1 - alpha) * src_orig + + return src, new_states + + +class RNNEncoder(nn.Module): + """ + RNNEncoder is a stack of N encoder layers. + + Args: + encoder_layer: + An instance of the RNNEncoderLayer() class (required). + num_layers: + The number of sub-encoder-layers in the encoder (required). + """ + + def __init__( + self, + encoder_layer: nn.Module, + num_layers: int, + aux_layers: Optional[List[int]] = None, + ) -> None: + super(RNNEncoder, self).__init__() + self.layers = nn.ModuleList( + [copy.deepcopy(encoder_layer) for i in range(num_layers)] + ) + self.num_layers = num_layers + self.d_model = encoder_layer.d_model + self.rnn_hidden_size = encoder_layer.rnn_hidden_size + + self.aux_layers: List[int] = [] + self.combiner: Optional[nn.Module] = None + if aux_layers is not None: + assert len(set(aux_layers)) == len(aux_layers) + assert num_layers - 1 not in aux_layers + self.aux_layers = aux_layers + [num_layers - 1] + self.combiner = RandomCombine( + num_inputs=len(self.aux_layers), + final_weight=0.5, + pure_prob=0.333, + stddev=2.0, + ) + + def forward( + self, + src: torch.Tensor, + states: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + warmup: float = 1.0, + ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + """ + Pass the input through the encoder layer in turn. + + Args: + src: + The sequence to the encoder layer (required). + Its shape is (S, N, E), where S is the sequence length, + N is the batch size, and E is the feature number. + states: + A tuple of 2 tensors (optional). It is for streaming inference. + states[0] is the hidden states of all layers, + with shape of (num_layers, N, d_model); + states[1] is the cell states of all layers, + with shape of (num_layers, N, rnn_hidden_size). + warmup: + It controls selective bypass of of layers; if < 1.0, we will + bypass layers more frequently. + """ + if states is not None: + assert not self.training + assert len(states) == 2 + if not torch.jit.is_tracing(): + # for hidden state + assert states[0].shape == ( + self.num_layers, + src.size(1), + self.d_model, + ) + # for cell state + assert states[1].shape == ( + self.num_layers, + src.size(1), + self.rnn_hidden_size, + ) + + output = src + + outputs = [] + + new_hidden_states = [] + new_cell_states = [] + + for i, mod in enumerate(self.layers): + if states is None: + output = mod(output, warmup=warmup)[0] + else: + layer_state = ( + states[0][i : i + 1, :, :], # h: (1, N, d_model) + states[1][i : i + 1, :, :], # c: (1, N, rnn_hidden_size) + ) + output, (h, c) = mod(output, layer_state) + new_hidden_states.append(h) + new_cell_states.append(c) + + if self.combiner is not None and i in self.aux_layers: + outputs.append(output) + + if self.combiner is not None: + output = self.combiner(outputs) + + if states is None: + new_states = (torch.empty(0), torch.empty(0)) + else: + new_states = ( + torch.cat(new_hidden_states, dim=0), + torch.cat(new_cell_states, dim=0), + ) + + return output, new_states + + +class Conv2dSubsampling(nn.Module): + """Convolutional 2D subsampling (to 1/4 length). + + Convert an input of shape (N, T, idim) to an output + with shape (N, T', odim), where + T' = ((T-3)//2-1)//2, which approximates T' == T//4 + + It is based on + https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/subsampling.py # noqa + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + layer1_channels: int = 8, + layer2_channels: int = 32, + layer3_channels: int = 128, + ) -> None: + """ + Args: + in_channels: + Number of channels in. The input shape is (N, T, in_channels). + Caution: It requires: T >= 9, in_channels >= 9. + out_channels + Output dim. The output shape is (N, ((T-3)//2-1)//2, out_channels) + layer1_channels: + Number of channels in layer1 + layer1_channels: + Number of channels in layer2 + """ + assert in_channels >= 9 + super().__init__() + + self.conv = nn.Sequential( + ScaledConv2d( + in_channels=1, + out_channels=layer1_channels, + kernel_size=3, + padding=0, + ), + ActivationBalancer(channel_dim=1), + DoubleSwish(), + ScaledConv2d( + in_channels=layer1_channels, + out_channels=layer2_channels, + kernel_size=3, + stride=2, + ), + ActivationBalancer(channel_dim=1), + DoubleSwish(), + ScaledConv2d( + in_channels=layer2_channels, + out_channels=layer3_channels, + kernel_size=3, + stride=2, + ), + ActivationBalancer(channel_dim=1), + DoubleSwish(), + ) + self.out = ScaledLinear( + layer3_channels * (((in_channels - 3) // 2 - 1) // 2), out_channels + ) + # set learn_eps=False because out_norm is preceded by `out`, and `out` + # itself has learned scale, so the extra degree of freedom is not + # needed. + self.out_norm = BasicNorm(out_channels, learn_eps=False) + # constrain median of output to be close to zero. + self.out_balancer = ActivationBalancer( + channel_dim=-1, min_positive=0.45, max_positive=0.55 + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Subsample x. + + Args: + x: + Its shape is (N, T, idim). + + Returns: + Return a tensor of shape (N, ((T-3)//2-1)//2, odim) + """ + # On entry, x is (N, T, idim) + x = x.unsqueeze(1) # (N, T, idim) -> (N, 1, T, idim) i.e., (N, C, H, W) + x = self.conv(x) + # Now x is of shape (N, odim, ((T-3)//2-1)//2, ((idim-3)//2-1)//2) + b, c, t, f = x.size() + x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f)) + # Now x is of shape (N, ((T-3)//2-1))//2, odim) + x = self.out_norm(x) + x = self.out_balancer(x) + return x + + +class RandomCombine(nn.Module): + """ + This module combines a list of Tensors, all with the same shape, to + produce a single output of that same shape which, in training time, + is a random combination of all the inputs; but which in test time + will be just the last input. + + The idea is that the list of Tensors will be a list of outputs of multiple + conformer layers. This has a similar effect as iterated loss. (See: + DEJA-VU: DOUBLE FEATURE PRESENTATION AND ITERATED LOSS IN DEEP TRANSFORMER + NETWORKS). + """ + + def __init__( + self, + num_inputs: int, + final_weight: float = 0.5, + pure_prob: float = 0.5, + stddev: float = 2.0, + ) -> None: + """ + Args: + num_inputs: + The number of tensor inputs, which equals the number of layers' + outputs that are fed into this module. E.g. in an 18-layer neural + net if we output layers 16, 12, 18, num_inputs would be 3. + final_weight: + The amount of weight or probability we assign to the + final layer when randomly choosing layers or when choosing + continuous layer weights. + pure_prob: + The probability, on each frame, with which we choose + only a single layer to output (rather than an interpolation) + stddev: + A standard deviation that we add to log-probs for computing + randomized weights. + + The method of choosing which layers, or combinations of layers, to use, + is conceptually as follows:: + + With probability `pure_prob`:: + With probability `final_weight`: choose final layer, + Else: choose random non-final layer. + Else:: + Choose initial log-weights that correspond to assigning + weight `final_weight` to the final layer and equal + weights to other layers; then add Gaussian noise + with variance `stddev` to these log-weights, and normalize + to weights (note: the average weight assigned to the + final layer here will not be `final_weight` if stddev>0). + """ + super().__init__() + assert 0 <= pure_prob <= 1, pure_prob + assert 0 < final_weight < 1, final_weight + assert num_inputs >= 1 + + self.num_inputs = num_inputs + self.final_weight = final_weight + self.pure_prob = pure_prob + self.stddev = stddev + + self.final_log_weight = ( + torch.tensor( + (final_weight / (1 - final_weight)) * (self.num_inputs - 1) + ) + .log() + .item() + ) + + def forward(self, inputs: List[torch.Tensor]) -> torch.Tensor: + """Forward function. + Args: + inputs: + A list of Tensor, e.g. from various layers of a transformer. + All must be the same shape, of (*, num_channels) + Returns: + A Tensor of shape (*, num_channels). In test mode + this is just the final input. + """ + num_inputs = self.num_inputs + assert len(inputs) == num_inputs + if not self.training or torch.jit.is_scripting(): + return inputs[-1] + + # Shape of weights: (*, num_inputs) + num_channels = inputs[0].shape[-1] + num_frames = inputs[0].numel() // num_channels + + ndim = inputs[0].ndim + # stacked_inputs: (num_frames, num_channels, num_inputs) + stacked_inputs = torch.stack(inputs, dim=ndim).reshape( + (num_frames, num_channels, num_inputs) + ) + + # weights: (num_frames, num_inputs) + weights = self._get_random_weights( + inputs[0].dtype, inputs[0].device, num_frames + ) + + weights = weights.reshape(num_frames, num_inputs, 1) + # ans: (num_frames, num_channels, 1) + ans = torch.matmul(stacked_inputs, weights) + # ans: (*, num_channels) + + ans = ans.reshape(inputs[0].shape[:-1] + (num_channels,)) + + # The following if causes errors for torch script in torch 1.6.0 + # if __name__ == "__main__": + # # for testing only... + # print("Weights = ", weights.reshape(num_frames, num_inputs)) + return ans + + def _get_random_weights( + self, dtype: torch.dtype, device: torch.device, num_frames: int + ) -> torch.Tensor: + """Return a tensor of random weights, of shape + `(num_frames, self.num_inputs)`, + Args: + dtype: + The data-type desired for the answer, e.g. float, double. + device: + The device needed for the answer. + num_frames: + The number of sets of weights desired + Returns: + A tensor of shape (num_frames, self.num_inputs), such that + `ans.sum(dim=1)` is all ones. + """ + pure_prob = self.pure_prob + if pure_prob == 0.0: + return self._get_random_mixed_weights(dtype, device, num_frames) + elif pure_prob == 1.0: + return self._get_random_pure_weights(dtype, device, num_frames) + else: + p = self._get_random_pure_weights(dtype, device, num_frames) + m = self._get_random_mixed_weights(dtype, device, num_frames) + return torch.where( + torch.rand(num_frames, 1, device=device) < self.pure_prob, p, m + ) + + def _get_random_pure_weights( + self, dtype: torch.dtype, device: torch.device, num_frames: int + ): + """Return a tensor of random one-hot weights, of shape + `(num_frames, self.num_inputs)`, + Args: + dtype: + The data-type desired for the answer, e.g. float, double. + device: + The device needed for the answer. + num_frames: + The number of sets of weights desired. + Returns: + A one-hot tensor of shape `(num_frames, self.num_inputs)`, with + exactly one weight equal to 1.0 on each frame. + """ + final_prob = self.final_weight + + # final contains self.num_inputs - 1 in all elements + final = torch.full((num_frames,), self.num_inputs - 1, device=device) + # nonfinal contains random integers in [0..num_inputs - 2], these are for non-final weights. # noqa + nonfinal = torch.randint( + self.num_inputs - 1, (num_frames,), device=device + ) + + indexes = torch.where( + torch.rand(num_frames, device=device) < final_prob, final, nonfinal + ) + ans = torch.nn.functional.one_hot( + indexes, num_classes=self.num_inputs + ).to(dtype=dtype) + return ans + + def _get_random_mixed_weights( + self, dtype: torch.dtype, device: torch.device, num_frames: int + ): + """Return a tensor of random one-hot weights, of shape + `(num_frames, self.num_inputs)`, + Args: + dtype: + The data-type desired for the answer, e.g. float, double. + device: + The device needed for the answer. + num_frames: + The number of sets of weights desired. + Returns: + A tensor of shape (num_frames, self.num_inputs), which elements + in [0..1] that sum to one over the second axis, i.e. + `ans.sum(dim=1)` is all ones. + """ + logprobs = ( + torch.randn(num_frames, self.num_inputs, dtype=dtype, device=device) + * self.stddev + ) + logprobs[:, -1] += self.final_log_weight + return logprobs.softmax(dim=1) + + +def _test_random_combine(final_weight: float, pure_prob: float, stddev: float): + print( + f"_test_random_combine: final_weight={final_weight}, pure_prob={pure_prob}, stddev={stddev}" # noqa + ) + num_inputs = 3 + num_channels = 50 + m = RandomCombine( + num_inputs=num_inputs, + final_weight=final_weight, + pure_prob=pure_prob, + stddev=stddev, + ) + + x = [torch.ones(3, 4, num_channels) for _ in range(num_inputs)] + + y = m(x) + assert y.shape == x[0].shape + assert torch.allclose(y, x[0]) # .. since actually all ones. + + +def _test_random_combine_main(): + _test_random_combine(0.999, 0, 0.0) + _test_random_combine(0.5, 0, 0.0) + _test_random_combine(0.999, 0, 0.0) + _test_random_combine(0.5, 0, 0.3) + _test_random_combine(0.5, 1, 0.3) + _test_random_combine(0.5, 0.5, 0.3) + + feature_dim = 50 + c = RNN(num_features=feature_dim, d_model=128) + batch_size = 5 + seq_len = 20 + # Just make sure the forward pass runs. + f = c( + torch.randn(batch_size, seq_len, feature_dim), + torch.full((batch_size,), seq_len, dtype=torch.int64), + ) + f # to remove flake8 warnings + + +if __name__ == "__main__": + feature_dim = 80 + m = RNN( + num_features=feature_dim, + d_model=512, + rnn_hidden_size=1024, + dim_feedforward=2048, + num_encoder_layers=12, + ) + batch_size = 5 + seq_len = 20 + # Just make sure the forward pass runs. + f = m( + torch.randn(batch_size, seq_len, feature_dim), + torch.full((batch_size,), seq_len, dtype=torch.int64), + warmup=0.5, + ) + num_param = sum([p.numel() for p in m.parameters()]) + print(f"Number of model parameters: {num_param}") + + _test_random_combine_main() diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/model.py b/egs/librispeech/ASR/lstm_transducer_stateless/model.py new file mode 100644 index 000000000..efbc88a55 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/model.py @@ -0,0 +1,202 @@ +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, Wei Kang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Tuple + +import k2 +import torch +import torch.nn as nn +from encoder_interface import EncoderInterface +from scaling import ScaledLinear + +from icefall.utils import add_sos + + +class Transducer(nn.Module): + """It implements https://arxiv.org/pdf/1211.3711.pdf + "Sequence Transduction with Recurrent Neural Networks" + """ + + def __init__( + self, + encoder: EncoderInterface, + decoder: nn.Module, + joiner: nn.Module, + encoder_dim: int, + decoder_dim: int, + joiner_dim: int, + vocab_size: int, + ): + """ + Args: + encoder: + It is the transcription network in the paper. Its accepts + two inputs: `x` of (N, T, encoder_dim) and `x_lens` of shape (N,). + It returns two tensors: `logits` of shape (N, T, encoder_dm) and + `logit_lens` of shape (N,). + decoder: + It is the prediction network in the paper. Its input shape + is (N, U) and its output shape is (N, U, decoder_dim). + It should contain one attribute: `blank_id`. + joiner: + It has two inputs with shapes: (N, T, encoder_dim) and + (N, U, decoder_dim). + Its output shape is (N, T, U, vocab_size). Note that its output + contains unnormalized probs, i.e., not processed by log-softmax. + """ + super().__init__() + assert isinstance(encoder, EncoderInterface), type(encoder) + assert hasattr(decoder, "blank_id") + + self.encoder = encoder + self.decoder = decoder + self.joiner = joiner + + self.simple_am_proj = ScaledLinear( + encoder_dim, vocab_size, initial_speed=0.5 + ) + self.simple_lm_proj = ScaledLinear(decoder_dim, vocab_size) + + def forward( + self, + x: torch.Tensor, + x_lens: torch.Tensor, + y: k2.RaggedTensor, + prune_range: int = 5, + am_scale: float = 0.0, + lm_scale: float = 0.0, + warmup: float = 1.0, + reduction: str = "sum", + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Args: + x: + A 3-D tensor of shape (N, T, C). + x_lens: + A 1-D tensor of shape (N,). It contains the number of frames in `x` + before padding. + y: + A ragged tensor with 2 axes [utt][label]. It contains labels of each + utterance. + prune_range: + The prune range for rnnt loss, it means how many symbols(context) + we are considering for each frame to compute the loss. + am_scale: + The scale to smooth the loss with am (output of encoder network) + part + lm_scale: + The scale to smooth the loss with lm (output of predictor network) + part + warmup: + A value warmup >= 0 that determines which modules are active, values + warmup > 1 "are fully warmed up" and all modules will be active. + reduction: + "sum" to sum the losses over all utterances in the batch. + "none" to return the loss in a 1-D tensor for each utterance + in the batch. + Returns: + Return the transducer loss. + + Note: + Regarding am_scale & lm_scale, it will make the loss-function one of + the form: + lm_scale * lm_probs + am_scale * am_probs + + (1-lm_scale-am_scale) * combined_probs + """ + assert reduction in ("sum", "none"), reduction + assert x.ndim == 3, x.shape + assert x_lens.ndim == 1, x_lens.shape + assert y.num_axes == 2, y.num_axes + + assert x.size(0) == x_lens.size(0) == y.dim0 + + encoder_out, x_lens, _ = self.encoder(x, x_lens, warmup=warmup) + assert torch.all(x_lens > 0) + + # Now for the decoder, i.e., the prediction network + row_splits = y.shape.row_splits(1) + y_lens = row_splits[1:] - row_splits[:-1] + + blank_id = self.decoder.blank_id + sos_y = add_sos(y, sos_id=blank_id) + + # sos_y_padded: [B, S + 1], start with SOS. + sos_y_padded = sos_y.pad(mode="constant", padding_value=blank_id) + + # decoder_out: [B, S + 1, decoder_dim] + decoder_out = self.decoder(sos_y_padded) + + # Note: y does not start with SOS + # y_padded : [B, S] + y_padded = y.pad(mode="constant", padding_value=0) + + y_padded = y_padded.to(torch.int64) + boundary = torch.zeros( + (x.size(0), 4), dtype=torch.int64, device=x.device + ) + boundary[:, 2] = y_lens + boundary[:, 3] = x_lens + + lm = self.simple_lm_proj(decoder_out) + am = self.simple_am_proj(encoder_out) + + with torch.cuda.amp.autocast(enabled=False): + simple_loss, (px_grad, py_grad) = k2.rnnt_loss_smoothed( + lm=lm.float(), + am=am.float(), + symbols=y_padded, + termination_symbol=blank_id, + lm_only_scale=lm_scale, + am_only_scale=am_scale, + boundary=boundary, + reduction=reduction, + return_grad=True, + ) + + # ranges : [B, T, prune_range] + ranges = k2.get_rnnt_prune_ranges( + px_grad=px_grad, + py_grad=py_grad, + boundary=boundary, + s_range=prune_range, + ) + + # am_pruned : [B, T, prune_range, encoder_dim] + # lm_pruned : [B, T, prune_range, decoder_dim] + am_pruned, lm_pruned = k2.do_rnnt_pruning( + am=self.joiner.encoder_proj(encoder_out), + lm=self.joiner.decoder_proj(decoder_out), + ranges=ranges, + ) + + # logits : [B, T, prune_range, vocab_size] + + # project_input=False since we applied the decoder's input projections + # prior to do_rnnt_pruning (this is an optimization for speed). + logits = self.joiner(am_pruned, lm_pruned, project_input=False) + + with torch.cuda.amp.autocast(enabled=False): + pruned_loss = k2.rnnt_loss_pruned( + logits=logits.float(), + symbols=y_padded, + ranges=ranges, + termination_symbol=blank_id, + boundary=boundary, + reduction=reduction, + ) + + return (simple_loss, pruned_loss) diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/optim.py b/egs/librispeech/ASR/lstm_transducer_stateless/optim.py new file mode 120000 index 000000000..e2deb4492 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/optim.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/optim.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/pretrained.py b/egs/librispeech/ASR/lstm_transducer_stateless/pretrained.py new file mode 100755 index 000000000..2a6e2adc6 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/pretrained.py @@ -0,0 +1,352 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +(1) greedy search +./lstm_transducer_stateless/pretrained.py \ + --checkpoint ./lstm_transducer_stateless/exp/pretrained.pt \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --method greedy_search \ + /path/to/foo.wav \ + /path/to/bar.wav + +(2) beam search +./lstm_transducer_stateless/pretrained.py \ + --checkpoint ./lstm_transducer_stateless/exp/pretrained.pt \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --method beam_search \ + --beam-size 4 \ + /path/to/foo.wav \ + /path/to/bar.wav + +(3) modified beam search +./lstm_transducer_stateless/pretrained.py \ + --checkpoint ./lstm_transducer_stateless/exp/pretrained.pt \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --method modified_beam_search \ + --beam-size 4 \ + /path/to/foo.wav \ + /path/to/bar.wav + +(4) fast beam search +./lstm_transducer_stateless/pretrained.py \ + --checkpoint ./lstm_transducer_stateless/exp/pretrained.pt \ + --bpe-model ./data/lang_bpe_500/bpe.model \ + --method fast_beam_search \ + --beam-size 4 \ + /path/to/foo.wav \ + /path/to/bar.wav + +You can also use `./lstm_transducer_stateless/exp/epoch-xx.pt`. + +Note: ./lstm_transducer_stateless/exp/pretrained.pt is generated by +./lstm_transducer_stateless/export.py +""" + + +import argparse +import logging +import math +from typing import List + +import k2 +import kaldifeat +import sentencepiece as spm +import torch +import torchaudio +from beam_search import ( + beam_search, + fast_beam_search_one_best, + greedy_search, + greedy_search_batch, + modified_beam_search, +) +from torch.nn.utils.rnn import pad_sequence +from train import add_model_arguments, get_params, get_transducer_model + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--checkpoint", + type=str, + required=True, + help="Path to the checkpoint. " + "The checkpoint is assumed to be saved by " + "icefall.checkpoint.save_checkpoint().", + ) + + parser.add_argument( + "--bpe-model", + type=str, + help="""Path to bpe.model.""", + ) + + parser.add_argument( + "--method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - beam_search + - modified_beam_search + - fast_beam_search + """, + ) + + parser.add_argument( + "sound_files", + type=str, + nargs="+", + help="The input sound file(s) to transcribe. " + "Supported formats are those supported by torchaudio.load(). " + "For example, wav and flac are supported. " + "The sample rate has to be 16kHz.", + ) + + parser.add_argument( + "--sample-rate", + type=int, + default=16000, + help="The sample rate of the input sound file", + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="""An integer indicating how many candidates we will keep for each + frame. Used only when --method is beam_search or + modified_beam_search.""", + ) + + parser.add_argument( + "--beam", + type=float, + default=4, + help="""A floating point value to calculate the cutoff score during beam + search (i.e., `cutoff = max-score - beam`), which is the same as the + `beam` in Kaldi. + Used only when --method is fast_beam_search""", + ) + + parser.add_argument( + "--max-contexts", + type=int, + default=4, + help="""Used only when --method is fast_beam_search""", + ) + + parser.add_argument( + "--max-states", + type=int, + default=8, + help="""Used only when --method is fast_beam_search""", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=1, + help="""Maximum number of symbols per frame. Used only when + --method is greedy_search. + """, + ) + + add_model_arguments(parser) + + return parser + + +def read_sound_files( + filenames: List[str], expected_sample_rate: float +) -> List[torch.Tensor]: + """Read a list of sound files into a list 1-D float32 torch tensors. + Args: + filenames: + A list of sound filenames. + expected_sample_rate: + The expected sample rate of the sound files. + Returns: + Return a list of 1-D float32 torch tensors. + """ + ans = [] + for f in filenames: + wave, sample_rate = torchaudio.load(f) + assert sample_rate == expected_sample_rate, ( + f"expected sample rate: {expected_sample_rate}. " + f"Given: {sample_rate}" + ) + # We use only the first channel + ans.append(wave[0]) + return ans + + +@torch.no_grad() +def main(): + parser = get_parser() + args = parser.parse_args() + + params = get_params() + + params.update(vars(args)) + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.unk_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(f"{params}") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"device: {device}") + + logging.info("Creating model") + model = get_transducer_model(params) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + checkpoint = torch.load(args.checkpoint, map_location="cpu") + model.load_state_dict(checkpoint["model"], strict=False) + model.to(device) + model.eval() + model.device = device + + logging.info("Constructing Fbank computer") + opts = kaldifeat.FbankOptions() + opts.device = device + opts.frame_opts.dither = 0 + opts.frame_opts.snip_edges = False + opts.frame_opts.samp_freq = params.sample_rate + opts.mel_opts.num_bins = params.feature_dim + + fbank = kaldifeat.Fbank(opts) + + logging.info(f"Reading sound files: {params.sound_files}") + waves = read_sound_files( + filenames=params.sound_files, expected_sample_rate=params.sample_rate + ) + waves = [w.to(device) for w in waves] + + logging.info("Decoding started") + features = fbank(waves) + feature_lengths = [f.size(0) for f in features] + + features = pad_sequence( + features, batch_first=True, padding_value=math.log(1e-10) + ) + + feature_lengths = torch.tensor(feature_lengths, device=device) + + encoder_out, encoder_out_lens, _ = model.encoder( + x=features, x_lens=feature_lengths + ) + + num_waves = encoder_out.size(0) + hyps = [] + msg = f"Using {params.method}" + if params.method == "beam_search": + msg += f" with beam size {params.beam_size}" + logging.info(msg) + + if params.method == "fast_beam_search": + decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device) + hyp_tokens = fast_beam_search_one_best( + model=model, + decoding_graph=decoding_graph, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif params.method == "modified_beam_search": + hyp_tokens = modified_beam_search( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + beam=params.beam_size, + ) + + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + elif params.method == "greedy_search" and params.max_sym_per_frame == 1: + hyp_tokens = greedy_search_batch( + model=model, + encoder_out=encoder_out, + encoder_out_lens=encoder_out_lens, + ) + for hyp in sp.decode(hyp_tokens): + hyps.append(hyp.split()) + else: + for i in range(num_waves): + # fmt: off + encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]] + # fmt: on + if params.method == "greedy_search": + hyp = greedy_search( + model=model, + encoder_out=encoder_out_i, + max_sym_per_frame=params.max_sym_per_frame, + ) + elif params.method == "beam_search": + hyp = beam_search( + model=model, + encoder_out=encoder_out_i, + beam=params.beam_size, + ) + else: + raise ValueError(f"Unsupported method: {params.method}") + + hyps.append(sp.decode(hyp).split()) + + s = "\n" + for filename, hyp in zip(params.sound_files, hyps): + words = " ".join(hyp) + s += f"{filename}:\n{words}\n\n" + logging.info(s) + + logging.info("Decoding Done") + + +if __name__ == "__main__": + formatter = ( + "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + ) + + logging.basicConfig(format=formatter, level=logging.INFO) + main() diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/scaling.py b/egs/librispeech/ASR/lstm_transducer_stateless/scaling.py new file mode 120000 index 000000000..09d802cc4 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/scaling.py @@ -0,0 +1 @@ +../pruned_transducer_stateless2/scaling.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/scaling_converter.py b/egs/librispeech/ASR/lstm_transducer_stateless/scaling_converter.py new file mode 120000 index 000000000..3b667058d --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/scaling_converter.py @@ -0,0 +1 @@ +../pruned_transducer_stateless3/scaling_converter.py \ No newline at end of file diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/stream.py b/egs/librispeech/ASR/lstm_transducer_stateless/stream.py new file mode 100644 index 000000000..97d890c82 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/stream.py @@ -0,0 +1,148 @@ +# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang, +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List, Optional, Tuple + +import k2 +import torch +from beam_search import Hypothesis, HypothesisList + +from icefall.utils import AttributeDict + + +class Stream(object): + def __init__( + self, + params: AttributeDict, + cut_id: str, + decoding_graph: Optional[k2.Fsa] = None, + device: torch.device = torch.device("cpu"), + LOG_EPS: float = math.log(1e-10), + ) -> None: + """ + Args: + params: + It's the return value of :func:`get_params`. + cut_id: + The cut id of the current stream. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used + only when --decoding_method is fast_beam_search. + device: + The device to run this stream. + LOG_EPS: + A float value used for padding. + """ + self.LOG_EPS = LOG_EPS + self.cut_id = cut_id + + # Containing attention caches and convolution caches + self.states: Optional[Tuple[torch.Tensor, torch.Tensor]] = None + + # It uses different attributes for different decoding methods. + self.context_size = params.context_size + self.decoding_method = params.decoding_method + if params.decoding_method == "greedy_search": + self.hyp = [params.blank_id] * params.context_size + elif params.decoding_method == "modified_beam_search": + self.hyps = HypothesisList() + self.hyps.add( + Hypothesis( + ys=[params.blank_id] * params.context_size, + log_prob=torch.zeros(1, dtype=torch.float32, device=device), + ) + ) + elif params.decoding_method == "fast_beam_search": + # feature_len is needed to get partial results. + # The rnnt_decoding_stream for fast_beam_search. + self.rnnt_decoding_stream: k2.RnntDecodingStream = ( + k2.RnntDecodingStream(decoding_graph) + ) + self.hyp: Optional[List[int]] = None + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + + self.ground_truth: str = "" + + self.feature: Optional[torch.Tensor] = None + # Make sure all feature frames can be used. + # We aim to obtain 1 frame after subsampling. + self.chunk_length = params.subsampling_factor + self.pad_length = 5 + self.num_frames = 0 + self.num_processed_frames = 0 + + # After all feature frames are processed, we set this flag to True + self._done = False + + def set_feature(self, feature: torch.Tensor) -> None: + assert feature.dim() == 2, feature.dim() + # tail padding here to alleviate the tail deletion problem + num_tail_padded_frames = 35 + self.num_frames = feature.size(0) + num_tail_padded_frames + self.feature = torch.nn.functional.pad( + feature, + (0, 0, 0, self.pad_length + num_tail_padded_frames), + mode="constant", + value=self.LOG_EPS, + ) + + def get_feature_chunk(self) -> torch.Tensor: + """Get a chunk of feature frames. + + Returns: + A tensor of shape (ret_length, feature_dim). + """ + update_length = min( + self.num_frames - self.num_processed_frames, self.chunk_length + ) + ret_length = update_length + self.pad_length + + ret_feature = self.feature[ + self.num_processed_frames : self.num_processed_frames + ret_length + ] + # Cut off used frames. + # self.feature = self.feature[update_length:] + + self.num_processed_frames += update_length + if self.num_processed_frames >= self.num_frames: + self._done = True + + return ret_feature + + @property + def id(self) -> str: + return self.cut_id + + @property + def done(self) -> bool: + """Return True if all feature frames are processed.""" + return self._done + + def decoding_result(self) -> List[int]: + """Obtain current decoding result.""" + if self.decoding_method == "greedy_search": + return self.hyp[self.context_size :] + elif self.decoding_method == "modified_beam_search": + best_hyp = self.hyps.get_most_probable(length_norm=True) + return best_hyp.ys[self.context_size :] + else: + assert self.decoding_method == "fast_beam_search" + return self.hyp diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/streaming_decode.py b/egs/librispeech/ASR/lstm_transducer_stateless/streaming_decode.py new file mode 100755 index 000000000..d6376bdc0 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/streaming_decode.py @@ -0,0 +1,968 @@ +#!/usr/bin/env python3 +# +# Copyright 2021-2022 Xiaomi Corporation (Author: Fangjun Kuang, +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +(1) greedy search +./lstm_transducer_stateless/streaming_decode.py \ + --epoch 35 \ + --avg 10 \ + --exp-dir lstm_transducer_stateless/exp \ + --num-decode-streams 2000 \ + --num-encoder-layers 12 \ + --rnn-hidden-size 1024 \ + --decoding-method greedy_search \ + --use-averaged-model True + +(2) modified beam search +./lstm_transducer_stateless/streaming_decode.py \ + --epoch 35 \ + --avg 10 \ + --exp-dir lstm_transducer_stateless/exp \ + --num-decode-streams 2000 \ + --num-encoder-layers 12 \ + --rnn-hidden-size 1024 \ + --decoding-method modified_beam_search \ + --use-averaged-model True \ + --beam-size 4 + +(3) fast beam search +./lstm_transducer_stateless/streaming_decode.py \ + --epoch 35 \ + --avg 10 \ + --exp-dir lstm_transducer_stateless/exp \ + --num-decode-streams 2000 \ + --num-encoder-layers 12 \ + --rnn-hidden-size 1024 \ + --decoding-method fast_beam_search \ + --use-averaged-model True \ + --beam 4 \ + --max-contexts 4 \ + --max-states 8 +""" +import argparse +import logging +import warnings +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import k2 +import numpy as np +import sentencepiece as spm +import torch +import torch.nn as nn +from asr_datamodule import LibriSpeechAsrDataModule +from beam_search import Hypothesis, HypothesisList, get_hyps_shape +from kaldifeat import Fbank, FbankOptions +from lhotse import CutSet +from lstm import LOG_EPSILON, stack_states, unstack_states +from stream import Stream +from torch.nn.utils.rnn import pad_sequence +from train import add_model_arguments, get_params, get_transducer_model + +from icefall.checkpoint import ( + average_checkpoints, + average_checkpoints_with_averaged_model, + find_checkpoints, + load_checkpoint, +) +from icefall.decode import one_best_decoding +from icefall.utils import ( + AttributeDict, + get_texts, + setup_logger, + store_transcripts, + str2bool, + write_error_stats, +) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=28, + help="It specifies the checkpoint to use for decoding." + "Note: Epoch counts from 0.", + ) + + parser.add_argument( + "--iter", + type=int, + default=0, + help="""If positive, --epoch is ignored and it + will use the checkpoint exp_dir/checkpoint-iter.pt. + You can specify --avg to use more checkpoints for model averaging. + """, + ) + + parser.add_argument( + "--avg", + type=int, + default=15, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch'. ", + ) + + parser.add_argument( + "--use-averaged-model", + type=str2bool, + default=False, + help="Whether to load averaged model. Currently it only supports " + "using --epoch. If True, it would decode with the averaged model " + "over the epoch range from `epoch-avg` (excluded) to `epoch`." + "Actually only the models with epoch number of `epoch-avg` and " + "`epoch` are loaded for averaging. ", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="transducer_emformer/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--decoding-method", + type=str, + default="greedy_search", + help="""Possible values are: + - greedy_search + - modified_beam_search + - fast_beam_search + """, + ) + + parser.add_argument( + "--beam-size", + type=int, + default=4, + help="""An interger indicating how many candidates we will keep for each + frame. Used only when --decoding-method is beam_search or + modified_beam_search.""", + ) + + parser.add_argument( + "--beam", + type=float, + default=20.0, + help="""A floating point value to calculate the cutoff score during beam + search (i.e., `cutoff = max-score - beam`), which is the same as the + `beam` in Kaldi. + Used only when --decoding-method is fast_beam_search""", + ) + + parser.add_argument( + "--max-contexts", + type=int, + default=8, + help="""Used only when --decoding-method is + fast_beam_search""", + ) + + parser.add_argument( + "--max-states", + type=int, + default=64, + help="""Used only when --decoding-method is + fast_beam_search""", + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + parser.add_argument( + "--max-sym-per-frame", + type=int, + default=1, + help="""Maximum number of symbols per frame. + Used only when --decoding_method is greedy_search""", + ) + + parser.add_argument( + "--sampling-rate", + type=float, + default=16000, + help="Sample rate of the audio", + ) + + parser.add_argument( + "--num-decode-streams", + type=int, + default=2000, + help="The number of streams that can be decoded in parallel", + ) + + add_model_arguments(parser) + + return parser + + +def greedy_search( + model: nn.Module, + encoder_out: torch.Tensor, + streams: List[Stream], +) -> None: + """Greedy search in batch mode. It hardcodes --max-sym-per-frame=1. + + Args: + model: + The transducer model. + encoder_out: + Output from the encoder. Its shape is (N, T, C), where N >= 1. + streams: + A list of Stream objects. + """ + assert len(streams) == encoder_out.size(0) + assert encoder_out.ndim == 3 + + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + device = next(model.parameters()).device + T = encoder_out.size(1) + + encoder_out = model.joiner.encoder_proj(encoder_out) + + decoder_input = torch.tensor( + [stream.hyp[-context_size:] for stream in streams], + device=device, + dtype=torch.int64, + ) + # decoder_out is of shape (batch_size, 1, decoder_out_dim) + decoder_out = model.decoder(decoder_input, need_pad=False) + decoder_out = model.joiner.decoder_proj(decoder_out) + + for t in range(T): + # current_encoder_out's shape: (batch_size, 1, encoder_out_dim) + current_encoder_out = encoder_out[:, t : t + 1, :] # noqa + + logits = model.joiner( + current_encoder_out.unsqueeze(2), + decoder_out.unsqueeze(1), + project_input=False, + ) + # logits'shape (batch_size, vocab_size) + logits = logits.squeeze(1).squeeze(1) + + assert logits.ndim == 2, logits.shape + y = logits.argmax(dim=1).tolist() + emitted = False + for i, v in enumerate(y): + if v != blank_id: + streams[i].hyp.append(v) + emitted = True + if emitted: + # update decoder output + decoder_input = torch.tensor( + [stream.hyp[-context_size:] for stream in streams], + device=device, + dtype=torch.int64, + ) + decoder_out = model.decoder( + decoder_input, + need_pad=False, + ) + decoder_out = model.joiner.decoder_proj(decoder_out) + + +def modified_beam_search( + model: nn.Module, + encoder_out: torch.Tensor, + streams: List[Stream], + beam: int = 4, +): + """Beam search in batch mode with --max-sym-per-frame=1 being hardcoded. + + Args: + model: + The RNN-T model. + encoder_out: + A 3-D tensor of shape (N, T, encoder_out_dim) containing the output of + the encoder model. + streams: + A list of stream objects. + beam: + Number of active paths during the beam search. + """ + assert encoder_out.ndim == 3, encoder_out.shape + assert len(streams) == encoder_out.size(0) + + blank_id = model.decoder.blank_id + context_size = model.decoder.context_size + device = next(model.parameters()).device + batch_size = len(streams) + T = encoder_out.size(1) + + B = [stream.hyps for stream in streams] + + encoder_out = model.joiner.encoder_proj(encoder_out) + + for t in range(T): + current_encoder_out = encoder_out[:, t].unsqueeze(1).unsqueeze(1) + # current_encoder_out's shape: (batch_size, 1, 1, encoder_out_dim) + + hyps_shape = get_hyps_shape(B).to(device) + + A = [list(b) for b in B] + B = [HypothesisList() for _ in range(batch_size)] + + ys_log_probs = torch.stack( + [hyp.log_prob.reshape(1) for hyps in A for hyp in hyps], dim=0 + ) # (num_hyps, 1) + + decoder_input = torch.tensor( + [hyp.ys[-context_size:] for hyps in A for hyp in hyps], + device=device, + dtype=torch.int64, + ) # (num_hyps, context_size) + + decoder_out = model.decoder(decoder_input, need_pad=False).unsqueeze(1) + decoder_out = model.joiner.decoder_proj(decoder_out) + # decoder_out is of shape (num_hyps, 1, 1, decoder_output_dim) + + # Note: For torch 1.7.1 and below, it requires a torch.int64 tensor + # as index, so we use `to(torch.int64)` below. + current_encoder_out = torch.index_select( + current_encoder_out, + dim=0, + index=hyps_shape.row_ids(1).to(torch.int64), + ) # (num_hyps, encoder_out_dim) + + logits = model.joiner( + current_encoder_out, decoder_out, project_input=False + ) + # logits is of shape (num_hyps, 1, 1, vocab_size) + + logits = logits.squeeze(1).squeeze(1) + + log_probs = logits.log_softmax(dim=-1) # (num_hyps, vocab_size) + + log_probs.add_(ys_log_probs) + + vocab_size = log_probs.size(-1) + + log_probs = log_probs.reshape(-1) + + row_splits = hyps_shape.row_splits(1) * vocab_size + log_probs_shape = k2.ragged.create_ragged_shape2( + row_splits=row_splits, cached_tot_size=log_probs.numel() + ) + ragged_log_probs = k2.RaggedTensor( + shape=log_probs_shape, value=log_probs + ) + + for i in range(batch_size): + topk_log_probs, topk_indexes = ragged_log_probs[i].topk(beam) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + topk_hyp_indexes = (topk_indexes // vocab_size).tolist() + topk_token_indexes = (topk_indexes % vocab_size).tolist() + + for k in range(len(topk_hyp_indexes)): + hyp_idx = topk_hyp_indexes[k] + hyp = A[i][hyp_idx] + + new_ys = hyp.ys[:] + new_token = topk_token_indexes[k] + if new_token != blank_id: + new_ys.append(new_token) + + new_log_prob = topk_log_probs[k] + new_hyp = Hypothesis(ys=new_ys, log_prob=new_log_prob) + B[i].add(new_hyp) + + for i in range(batch_size): + streams[i].hyps = B[i] + + +def fast_beam_search_one_best( + model: nn.Module, + streams: List[Stream], + encoder_out: torch.Tensor, + processed_lens: torch.Tensor, + beam: float, + max_states: int, + max_contexts: int, +) -> None: + """It limits the maximum number of symbols per frame to 1. + + A lattice is first obtained using modified beam search, and then + the shortest path within the lattice is used as the final output. + + Args: + model: + An instance of `Transducer`. + streams: + A list of stream objects. + encoder_out: + A tensor of shape (N, T, C) from the encoder. + processed_lens: + A tensor of shape (N,) containing the number of processed frames + in `encoder_out` before padding. + beam: + Beam value, similar to the beam used in Kaldi.. + max_states: + Max states per stream per frame. + max_contexts: + Max contexts pre stream per frame. + """ + assert encoder_out.ndim == 3 + + context_size = model.decoder.context_size + vocab_size = model.decoder.vocab_size + + B, T, C = encoder_out.shape + assert B == len(streams) + + config = k2.RnntDecodingConfig( + vocab_size=vocab_size, + decoder_history_len=context_size, + beam=beam, + max_contexts=max_contexts, + max_states=max_states, + ) + individual_streams = [] + for i in range(B): + individual_streams.append(streams[i].rnnt_decoding_stream) + decoding_streams = k2.RnntDecodingStreams(individual_streams, config) + + encoder_out = model.joiner.encoder_proj(encoder_out) + + for t in range(T): + # shape is a RaggedShape of shape (B, context) + # contexts is a Tensor of shape (shape.NumElements(), context_size) + shape, contexts = decoding_streams.get_contexts() + # `nn.Embedding()` in torch below v1.7.1 supports only torch.int64 + contexts = contexts.to(torch.int64) + # decoder_out is of shape (shape.NumElements(), 1, decoder_out_dim) + decoder_out = model.decoder(contexts, need_pad=False) + decoder_out = model.joiner.decoder_proj(decoder_out) + # current_encoder_out is of shape + # (shape.NumElements(), 1, joiner_dim) + # fmt: off + current_encoder_out = torch.index_select( + encoder_out[:, t:t + 1, :], 0, shape.row_ids(1).to(torch.int64) + ) + # fmt: on + logits = model.joiner( + current_encoder_out.unsqueeze(2), + decoder_out.unsqueeze(1), + project_input=False, + ) + logits = logits.squeeze(1).squeeze(1) + log_probs = logits.log_softmax(dim=-1) + decoding_streams.advance(log_probs) + + decoding_streams.terminate_and_flush_to_streams() + + lattice = decoding_streams.format_output(processed_lens.tolist()) + + best_path = one_best_decoding(lattice) + hyps = get_texts(best_path) + + for i in range(B): + streams[i].hyp = hyps[i] + + +def decode_one_chunk( + model: nn.Module, + streams: List[Stream], + params: AttributeDict, + decoding_graph: Optional[k2.Fsa] = None, +) -> List[int]: + """ + Args: + model: + The Transducer model. + streams: + A list of Stream objects. + params: + It is returned by :func:`get_params`. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or LG, Used + only when --decoding_method is fast_beam_search. + + Returns: + A list of indexes indicating the finished streams. + """ + device = next(model.parameters()).device + + feature_list = [] + feature_len_list = [] + state_list = [] + num_processed_frames_list = [] + + for stream in streams: + # We should first get `stream.num_processed_frames` + # before calling `stream.get_feature_chunk()` + # since `stream.num_processed_frames` would be updated + num_processed_frames_list.append(stream.num_processed_frames) + feature = stream.get_feature_chunk() + feature_len = feature.size(0) + feature_list.append(feature) + feature_len_list.append(feature_len) + state_list.append(stream.states) + + features = pad_sequence( + feature_list, batch_first=True, padding_value=LOG_EPSILON + ).to(device) + feature_lens = torch.tensor(feature_len_list, device=device) + num_processed_frames = torch.tensor( + num_processed_frames_list, device=device + ) + + # Make sure it has at least 1 frame after subsampling + tail_length = params.subsampling_factor + 5 + if features.size(1) < tail_length: + pad_length = tail_length - features.size(1) + feature_lens += pad_length + features = torch.nn.functional.pad( + features, + (0, 0, 0, pad_length), + mode="constant", + value=LOG_EPSILON, + ) + + # Stack states of all streams + states = stack_states(state_list) + + encoder_out, encoder_out_lens, states = model.encoder( + x=features, + x_lens=feature_lens, + states=states, + ) + + if params.decoding_method == "greedy_search": + greedy_search( + model=model, + streams=streams, + encoder_out=encoder_out, + ) + elif params.decoding_method == "modified_beam_search": + modified_beam_search( + model=model, + streams=streams, + encoder_out=encoder_out, + beam=params.beam_size, + ) + elif params.decoding_method == "fast_beam_search": + # feature_len is needed to get partial results. + # The rnnt_decoding_stream for fast_beam_search. + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + processed_lens = ( + num_processed_frames // params.subsampling_factor + + encoder_out_lens + ) + fast_beam_search_one_best( + model=model, + streams=streams, + encoder_out=encoder_out, + processed_lens=processed_lens, + beam=params.beam, + max_contexts=params.max_contexts, + max_states=params.max_states, + ) + else: + raise ValueError( + f"Unsupported decoding method: {params.decoding_method}" + ) + + # Update cached states of each stream + state_list = unstack_states(states) + for i, s in enumerate(state_list): + streams[i].states = s + + finished_streams = [i for i, stream in enumerate(streams) if stream.done] + return finished_streams + + +def create_streaming_feature_extractor() -> Fbank: + """Create a CPU streaming feature extractor. + + At present, we assume it returns a fbank feature extractor with + fixed options. In the future, we will support passing in the options + from outside. + + Returns: + Return a CPU streaming feature extractor. + """ + opts = FbankOptions() + opts.device = "cpu" + opts.frame_opts.dither = 0 + opts.frame_opts.snip_edges = False + opts.frame_opts.samp_freq = 16000 + opts.mel_opts.num_bins = 80 + return Fbank(opts) + + +def decode_dataset( + cuts: CutSet, + model: nn.Module, + params: AttributeDict, + sp: spm.SentencePieceProcessor, + decoding_graph: Optional[k2.Fsa] = None, +): + """Decode dataset. + + Args: + cuts: + Lhotse Cutset containing the dataset to decode. + params: + It is returned by :func:`get_params`. + model: + The Transducer model. + sp: + The BPE model. + decoding_graph: + The decoding graph. Can be either a `k2.trivial_graph` or LG, Used + only when --decoding_method is fast_beam_search. + + Returns: + Return a dict, whose key may be "greedy_search" if greedy search + is used, or it may be "beam_7" if beam size of 7 is used. + Its value is a list of tuples. Each tuple contains two elements: + The first is the reference transcript, and the second is the + predicted result. + """ + device = next(model.parameters()).device + + log_interval = 300 + + fbank = create_streaming_feature_extractor() + + decode_results = [] + streams = [] + for num, cut in enumerate(cuts): + # Each utterance has a Stream. + stream = Stream( + params=params, + cut_id=cut.id, + decoding_graph=decoding_graph, + device=device, + LOG_EPS=LOG_EPSILON, + ) + + stream.states = model.encoder.get_init_states(device=device) + + audio: np.ndarray = cut.load_audio() + # audio.shape: (1, num_samples) + assert len(audio.shape) == 2 + assert audio.shape[0] == 1, "Should be single channel" + assert audio.dtype == np.float32, audio.dtype + # The trained model is using normalized samples + assert audio.max() <= 1, "Should be normalized to [-1, 1])" + + samples = torch.from_numpy(audio).squeeze(0) + feature = fbank(samples) + stream.set_feature(feature) + stream.ground_truth = cut.supervisions[0].text + + streams.append(stream) + + while len(streams) >= params.num_decode_streams: + finished_streams = decode_one_chunk( + model=model, + streams=streams, + params=params, + decoding_graph=decoding_graph, + ) + + for i in sorted(finished_streams, reverse=True): + decode_results.append( + ( + streams[i].id, + streams[i].ground_truth.split(), + sp.decode(streams[i].decoding_result()).split(), + ) + ) + del streams[i] + + if num % log_interval == 0: + logging.info(f"Cuts processed until now is {num}.") + + while len(streams) > 0: + finished_streams = decode_one_chunk( + model=model, + streams=streams, + params=params, + decoding_graph=decoding_graph, + ) + + for i in sorted(finished_streams, reverse=True): + decode_results.append( + ( + streams[i].id, + streams[i].ground_truth.split(), + sp.decode(streams[i].decoding_result()).split(), + ) + ) + del streams[i] + + if params.decoding_method == "greedy_search": + key = "greedy_search" + elif params.decoding_method == "fast_beam_search": + key = ( + f"beam_{params.beam}_" + f"max_contexts_{params.max_contexts}_" + f"max_states_{params.max_states}" + ) + else: + key = f"beam_size_{params.beam_size}" + + return {key: decode_results} + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[List[str], List[str]]]], +): + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = ( + params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + ) + store_transcripts(filename=recog_path, texts=sorted(results)) + logging.info(f"The transcripts are stored in {recog_path}") + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = ( + params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_filename, "w") as f: + wer = write_error_stats( + f, f"{test_set_name}-{key}", results, enable_log=True + ) + test_set_wers[key] = wer + + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = ( + params.res_dir + / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt" + ) + with open(errs_info, "w") as f: + print("settings\tWER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, WER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + LibriSpeechAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + + assert params.decoding_method in ( + "greedy_search", + "fast_beam_search", + "modified_beam_search", + ) + params.res_dir = params.exp_dir / "streaming" / params.decoding_method + + if params.iter > 0: + params.suffix = f"iter-{params.iter}-avg-{params.avg}" + else: + params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + + if "fast_beam_search" in params.decoding_method: + params.suffix += f"-beam-{params.beam}" + params.suffix += f"-max-contexts-{params.max_contexts}" + params.suffix += f"-max-states-{params.max_states}" + elif "beam_search" in params.decoding_method: + params.suffix += ( + f"-{params.decoding_method}-beam-size-{params.beam_size}" + ) + else: + params.suffix += f"-context-{params.context_size}" + params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}" + + if params.use_averaged_model: + params.suffix += "-use-averaged-model" + + setup_logger(f"{params.res_dir}/log-streaming-decode") + logging.info("Decoding started") + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # and are defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.unk_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + params.device = device + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + if not params.use_averaged_model: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + elif params.avg == 1: + load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model) + else: + start = params.epoch - params.avg + 1 + filenames = [] + for i in range(start, params.epoch + 1): + if i >= 1: + filenames.append(f"{params.exp_dir}/epoch-{i}.pt") + logging.info(f"averaging {filenames}") + model.to(device) + model.load_state_dict(average_checkpoints(filenames, device=device)) + else: + if params.iter > 0: + filenames = find_checkpoints( + params.exp_dir, iteration=-params.iter + )[: params.avg + 1] + if len(filenames) == 0: + raise ValueError( + f"No checkpoints found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + elif len(filenames) < params.avg + 1: + raise ValueError( + f"Not enough checkpoints ({len(filenames)}) found for" + f" --iter {params.iter}, --avg {params.avg}" + ) + filename_start = filenames[-1] + filename_end = filenames[0] + logging.info( + "Calculating the averaged model over iteration checkpoints" + f" from {filename_start} (excluded) to {filename_end}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + else: + assert params.avg > 0, params.avg + start = params.epoch - params.avg + assert start >= 1, start + filename_start = f"{params.exp_dir}/epoch-{start}.pt" + filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt" + logging.info( + f"Calculating the averaged model over epoch range from " + f"{start} (excluded) to {params.epoch}" + ) + model.to(device) + model.load_state_dict( + average_checkpoints_with_averaged_model( + filename_start=filename_start, + filename_end=filename_end, + device=device, + ) + ) + + model.eval() + + if params.decoding_method == "fast_beam_search": + decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device) + else: + decoding_graph = None + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + librispeech = LibriSpeechAsrDataModule(args) + + test_clean_cuts = librispeech.test_clean_cuts() + test_other_cuts = librispeech.test_other_cuts() + + test_sets = ["test-clean", "test-other"] + test_cuts = [test_clean_cuts, test_other_cuts] + + for test_set, test_cut in zip(test_sets, test_cuts): + results_dict = decode_dataset( + cuts=test_cut, + model=model, + params=params, + sp=sp, + decoding_graph=decoding_graph, + ) + + save_results( + params=params, + test_set_name=test_set, + results_dict=results_dict, + ) + + logging.info("Done!") + + +if __name__ == "__main__": + torch.manual_seed(20220810) + main() diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/test_model.py b/egs/librispeech/ASR/lstm_transducer_stateless/test_model.py new file mode 100755 index 000000000..03dfe1997 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/test_model.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 +# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +""" +To run this file, do: + + cd icefall/egs/librispeech/ASR + python ./lstm_transducer_stateless/test_model.py +""" + +import os +from pathlib import Path + +import torch +from export import ( + export_decoder_model_jit_trace, + export_encoder_model_jit_trace, + export_joiner_model_jit_trace, +) +from lstm import stack_states, unstack_states +from scaling_converter import convert_scaled_to_non_scaled +from train import get_params, get_transducer_model + + +def test_model(): + params = get_params() + params.vocab_size = 500 + params.blank_id = 0 + params.context_size = 2 + params.unk_id = 2 + params.encoder_dim = 512 + params.rnn_hidden_size = 1024 + params.num_encoder_layers = 12 + params.aux_layer_period = 0 + params.exp_dir = Path("exp_test_model") + + model = get_transducer_model(params) + model.eval() + + num_param = sum([p.numel() for p in model.parameters()]) + print(f"Number of model parameters: {num_param}") + + convert_scaled_to_non_scaled(model, inplace=True) + + if not os.path.exists(params.exp_dir): + os.path.mkdir(params.exp_dir) + + encoder_filename = params.exp_dir / "encoder_jit_trace.pt" + export_encoder_model_jit_trace(model.encoder, encoder_filename) + + decoder_filename = params.exp_dir / "decoder_jit_trace.pt" + export_decoder_model_jit_trace(model.decoder, decoder_filename) + + joiner_filename = params.exp_dir / "joiner_jit_trace.pt" + export_joiner_model_jit_trace(model.joiner, joiner_filename) + + print("The model has been successfully exported using jit.trace.") + + +def test_states_stack_and_unstack(): + layer, batch, hidden, cell = 12, 100, 512, 1024 + states = ( + torch.randn(layer, batch, hidden), + torch.randn(layer, batch, cell), + ) + states2 = stack_states(unstack_states(states)) + assert torch.allclose(states[0], states2[0]) + assert torch.allclose(states[1], states2[1]) + + +def main(): + test_model() + test_states_stack_and_unstack() + + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/test_scaling_converter.py b/egs/librispeech/ASR/lstm_transducer_stateless/test_scaling_converter.py new file mode 100644 index 000000000..7567dd58c --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/test_scaling_converter.py @@ -0,0 +1,257 @@ +#!/usr/bin/env python3 +# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +To run this file, do: + + cd icefall/egs/librispeech/ASR + python ./lstm_transducer_stateless/test_scaling_converter.py +""" + +import copy + +import torch +from scaling import ( + ScaledConv1d, + ScaledConv2d, + ScaledEmbedding, + ScaledLinear, + ScaledLSTM, +) +from scaling_converter import ( + convert_scaled_to_non_scaled, + scaled_conv1d_to_conv1d, + scaled_conv2d_to_conv2d, + scaled_embedding_to_embedding, + scaled_linear_to_linear, + scaled_lstm_to_lstm, +) +from train import get_params, get_transducer_model + + +def get_model(): + params = get_params() + params.vocab_size = 500 + params.blank_id = 0 + params.context_size = 2 + params.unk_id = 2 + params.encoder_dim = 512 + params.rnn_hidden_size = 1024 + params.num_encoder_layers = 12 + params.aux_layer_period = -1 + + model = get_transducer_model(params) + return model + + +def test_scaled_linear_to_linear(): + N = 5 + in_features = 10 + out_features = 20 + for bias in [True, False]: + scaled_linear = ScaledLinear( + in_features=in_features, + out_features=out_features, + bias=bias, + ) + linear = scaled_linear_to_linear(scaled_linear) + x = torch.rand(N, in_features) + + y1 = scaled_linear(x) + y2 = linear(x) + assert torch.allclose(y1, y2) + + jit_scaled_linear = torch.jit.script(scaled_linear) + jit_linear = torch.jit.script(linear) + + y3 = jit_scaled_linear(x) + y4 = jit_linear(x) + + assert torch.allclose(y3, y4) + assert torch.allclose(y1, y4) + + +def test_scaled_conv1d_to_conv1d(): + in_channels = 3 + for bias in [True, False]: + scaled_conv1d = ScaledConv1d( + in_channels, + 6, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + ) + + conv1d = scaled_conv1d_to_conv1d(scaled_conv1d) + + x = torch.rand(20, in_channels, 10) + y1 = scaled_conv1d(x) + y2 = conv1d(x) + assert torch.allclose(y1, y2) + + jit_scaled_conv1d = torch.jit.script(scaled_conv1d) + jit_conv1d = torch.jit.script(conv1d) + + y3 = jit_scaled_conv1d(x) + y4 = jit_conv1d(x) + + assert torch.allclose(y3, y4) + assert torch.allclose(y1, y4) + + +def test_scaled_conv2d_to_conv2d(): + in_channels = 1 + for bias in [True, False]: + scaled_conv2d = ScaledConv2d( + in_channels=in_channels, + out_channels=3, + kernel_size=3, + padding=1, + bias=bias, + ) + + conv2d = scaled_conv2d_to_conv2d(scaled_conv2d) + + x = torch.rand(20, in_channels, 10, 20) + y1 = scaled_conv2d(x) + y2 = conv2d(x) + assert torch.allclose(y1, y2) + + jit_scaled_conv2d = torch.jit.script(scaled_conv2d) + jit_conv2d = torch.jit.script(conv2d) + + y3 = jit_scaled_conv2d(x) + y4 = jit_conv2d(x) + + assert torch.allclose(y3, y4) + assert torch.allclose(y1, y4) + + +def test_scaled_embedding_to_embedding(): + scaled_embedding = ScaledEmbedding( + num_embeddings=500, + embedding_dim=10, + padding_idx=0, + ) + embedding = scaled_embedding_to_embedding(scaled_embedding) + + for s in [10, 100, 300, 500, 800, 1000]: + x = torch.randint(low=0, high=500, size=(s,)) + scaled_y = scaled_embedding(x) + y = embedding(x) + assert torch.equal(scaled_y, y) + + +def test_scaled_lstm_to_lstm(): + input_size = 512 + batch_size = 20 + for bias in [True, False]: + for hidden_size in [512, 1024]: + scaled_lstm = ScaledLSTM( + input_size=input_size, + hidden_size=hidden_size, + num_layers=1, + bias=bias, + proj_size=0 if hidden_size == input_size else input_size, + ) + + lstm = scaled_lstm_to_lstm(scaled_lstm) + + x = torch.rand(200, batch_size, input_size) + h0 = torch.randn(1, batch_size, input_size) + c0 = torch.randn(1, batch_size, hidden_size) + + y1, (h1, c1) = scaled_lstm(x, (h0, c0)) + y2, (h2, c2) = lstm(x, (h0, c0)) + assert torch.allclose(y1, y2) + assert torch.allclose(h1, h2) + assert torch.allclose(c1, c2) + + jit_scaled_lstm = torch.jit.trace(lstm, (x, (h0, c0))) + y3, (h3, c3) = jit_scaled_lstm(x, (h0, c0)) + assert torch.allclose(y1, y3) + assert torch.allclose(h1, h3) + assert torch.allclose(c1, c3) + + +def test_convert_scaled_to_non_scaled(): + for inplace in [False, True]: + model = get_model() + model.eval() + + orig_model = copy.deepcopy(model) + + converted_model = convert_scaled_to_non_scaled(model, inplace=inplace) + + model = orig_model + + # test encoder + N = 2 + T = 100 + vocab_size = model.decoder.vocab_size + + x = torch.randn(N, T, 80, dtype=torch.float32) + x_lens = torch.full((N,), x.size(1)) + + e1, e1_lens, _ = model.encoder(x, x_lens) + e2, e2_lens, _ = converted_model.encoder(x, x_lens) + + assert torch.all(torch.eq(e1_lens, e2_lens)) + assert torch.allclose(e1, e2), (e1 - e2).abs().max() + + # test decoder + U = 50 + y = torch.randint(low=1, high=vocab_size - 1, size=(N, U)) + + d1 = model.decoder(y) + d2 = model.decoder(y) + + assert torch.allclose(d1, d2) + + # test simple projection + lm1 = model.simple_lm_proj(d1) + am1 = model.simple_am_proj(e1) + + lm2 = converted_model.simple_lm_proj(d2) + am2 = converted_model.simple_am_proj(e2) + + assert torch.allclose(lm1, lm2) + assert torch.allclose(am1, am2) + + # test joiner + e = torch.rand(2, 3, 4, 512) + d = torch.rand(2, 3, 4, 512) + + j1 = model.joiner(e, d) + j2 = converted_model.joiner(e, d) + assert torch.allclose(j1, j2) + + +@torch.no_grad() +def main(): + test_scaled_linear_to_linear() + test_scaled_conv1d_to_conv1d() + test_scaled_conv2d_to_conv2d() + test_scaled_embedding_to_embedding() + test_scaled_lstm_to_lstm() + test_convert_scaled_to_non_scaled() + + +if __name__ == "__main__": + torch.manual_seed(20220730) + main() diff --git a/egs/librispeech/ASR/lstm_transducer_stateless/train.py b/egs/librispeech/ASR/lstm_transducer_stateless/train.py new file mode 100755 index 000000000..3bc8d7843 --- /dev/null +++ b/egs/librispeech/ASR/lstm_transducer_stateless/train.py @@ -0,0 +1,1132 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, +# Wei Kang, +# Mingshuang Luo,) +# Zengwei Yao) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: + +export CUDA_VISIBLE_DEVICES="0,1,2,3" + +./lstm_transducer_stateless/train.py \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 1 \ + --exp-dir lstm_transducer_stateless/exp \ + --full-libri 1 \ + --max-duration 300 + +# For mix precision training: + +./lstm_transducer_stateless/train.py \ + --world-size 4 \ + --num-epochs 30 \ + --start-epoch 1 \ + --use-fp16 1 \ + --exp-dir lstm_transducer_stateless/exp \ + --full-libri 1 \ + --max-duration 550 +""" + +import argparse +import copy +import logging +import warnings +from pathlib import Path +from shutil import copyfile +from typing import Any, Dict, Optional, Tuple, Union + +import k2 +import optim +import sentencepiece as spm +import torch +import torch.multiprocessing as mp +import torch.nn as nn +from asr_datamodule import LibriSpeechAsrDataModule +from decoder import Decoder +from joiner import Joiner +from lhotse.cut import Cut +from lhotse.dataset.sampling.base import CutSampler +from lhotse.utils import fix_random_seed +from lstm import RNN +from model import Transducer +from optim import Eden, Eve +from torch import Tensor +from torch.cuda.amp import GradScaler +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.tensorboard import SummaryWriter + +from icefall import diagnostics +from icefall.checkpoint import load_checkpoint, remove_checkpoints +from icefall.checkpoint import save_checkpoint as save_checkpoint_impl +from icefall.checkpoint import ( + save_checkpoint_with_global_batch_idx, + update_averaged_model, +) +from icefall.dist import cleanup_dist, setup_dist +from icefall.env import get_env_info +from icefall.utils import ( + AttributeDict, + MetricsTracker, + display_and_save_batch, + setup_logger, + str2bool, +) + +LRSchedulerType = Union[ + torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler +] + + +def add_model_arguments(parser: argparse.ArgumentParser): + parser.add_argument( + "--num-encoder-layers", + type=int, + default=12, + help="Number of RNN encoder layers..", + ) + + parser.add_argument( + "--encoder-dim", + type=int, + default=512, + help="Encoder output dimesion.", + ) + + parser.add_argument( + "--rnn-hidden-size", + type=int, + default=1024, + help="Hidden dim for LSTM layers.", + ) + + parser.add_argument( + "--aux-layer-period", + type=int, + default=0, + help="""Peroid of auxiliary layers used for randomly combined during training. + If set to 0, will not use the random combiner (Default). + You can set a positive integer to use the random combiner, e.g., 3. + """, + ) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--world-size", + type=int, + default=1, + help="Number of GPUs for DDP training.", + ) + + parser.add_argument( + "--master-port", + type=int, + default=12354, + help="Master port to use for DDP training.", + ) + + parser.add_argument( + "--tensorboard", + type=str2bool, + default=True, + help="Should various information be logged in tensorboard.", + ) + + parser.add_argument( + "--num-epochs", + type=int, + default=35, + help="Number of epochs to train.", + ) + + parser.add_argument( + "--start-epoch", + type=int, + default=1, + help="""Resume training from this epoch. It should be positive. + If larger than 1, it will load checkpoint from + exp-dir/epoch-{start_epoch-1}.pt + """, + ) + + parser.add_argument( + "--start-batch", + type=int, + default=0, + help="""If positive, --start-epoch is ignored and + it loads the checkpoint from exp-dir/checkpoint-{start_batch}.pt + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="lstm_transducer_stateless/exp", + help="""The experiment dir. + It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--bpe-model", + type=str, + default="data/lang_bpe_500/bpe.model", + help="Path to the BPE model", + ) + + parser.add_argument( + "--initial-lr", + type=float, + default=0.003, + help="""The initial learning rate. This value should not need to be + changed.""", + ) + + parser.add_argument( + "--lr-batches", + type=float, + default=5000, + help="""Number of steps that affects how rapidly the learning rate decreases. + We suggest not to change this.""", + ) + + parser.add_argument( + "--lr-epochs", + type=float, + default=10, + help="""Number of epochs that affects how rapidly the learning rate decreases. + """, + ) + + parser.add_argument( + "--context-size", + type=int, + default=2, + help="The context size in the decoder. 1 means bigram; " + "2 means tri-gram", + ) + + parser.add_argument( + "--prune-range", + type=int, + default=5, + help="The prune range for rnnt loss, it means how many symbols(context)" + "we are using to compute the loss", + ) + + parser.add_argument( + "--lm-scale", + type=float, + default=0.25, + help="The scale to smooth the loss with lm " + "(output of prediction network) part.", + ) + + parser.add_argument( + "--am-scale", + type=float, + default=0.0, + help="The scale to smooth the loss with am (output of encoder network)" + "part.", + ) + + parser.add_argument( + "--simple-loss-scale", + type=float, + default=0.5, + help="To get pruning ranges, we will calculate a simple version" + "loss(joiner is just addition), this simple loss also uses for" + "training (as a regularization item). We will scale the simple loss" + "with this parameter before adding to the final loss.", + ) + + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + + parser.add_argument( + "--print-diagnostics", + type=str2bool, + default=False, + help="Accumulate stats on activations, print them and exit.", + ) + + parser.add_argument( + "--save-every-n", + type=int, + default=4000, + help="""Save checkpoint after processing this number of batches" + periodically. We save checkpoint to exp-dir/ whenever + params.batch_idx_train % save_every_n == 0. The checkpoint filename + has the form: f'exp-dir/checkpoint-{params.batch_idx_train}.pt' + Note: It also saves checkpoint to `exp-dir/epoch-xxx.pt` at the + end of each epoch where `xxx` is the epoch number counting from 0. + """, + ) + + parser.add_argument( + "--keep-last-k", + type=int, + default=20, + help="""Only keep this number of checkpoints on disk. + For instance, if it is 3, there are only 3 checkpoints + in the exp-dir with filenames `checkpoint-xxx.pt`. + It does not affect checkpoints with name `epoch-xxx.pt`. + """, + ) + + parser.add_argument( + "--average-period", + type=int, + default=100, + help="""Update the averaged model, namely `model_avg`, after processing + this number of batches. `model_avg` is a separate version of model, + in which each floating-point parameter is the average of all the + parameters from the start of training. Each time we take the average, + we do: `model_avg = model * (average_period / batch_idx_train) + + model_avg * ((batch_idx_train - average_period) / batch_idx_train)`. + """, + ) + + parser.add_argument( + "--use-fp16", + type=str2bool, + default=False, + help="Whether to use half precision training.", + ) + + add_model_arguments(parser) + + return parser + + +def get_params() -> AttributeDict: + """Return a dict containing training parameters. + + All training related parameters that are not passed from the commandline + are saved in the variable `params`. + + Commandline options are merged into `params` after they are parsed, so + you can also access them via `params`. + + Explanation of options saved in `params`: + + - best_train_loss: Best training loss so far. It is used to select + the model that has the lowest training loss. It is + updated during the training. + + - best_valid_loss: Best validation loss so far. It is used to select + the model that has the lowest validation loss. It is + updated during the training. + + - best_train_epoch: It is the epoch that has the best training loss. + + - best_valid_epoch: It is the epoch that has the best validation loss. + + - batch_idx_train: Used to writing statistics to tensorboard. It + contains number of batches trained so far across + epochs. + + - log_interval: Print training loss if batch_idx % log_interval` is 0 + + - reset_interval: Reset statistics if batch_idx % reset_interval is 0 + + - valid_interval: Run validation if batch_idx % valid_interval is 0 + + - feature_dim: The model input dim. It has to match the one used + in computing features. + + - subsampling_factor: The subsampling factor for the model. + + - num_decoder_layers: Number of decoder layer of transformer decoder. + + - warm_step: The warm_step for Noam optimizer. + """ + params = AttributeDict( + { + "best_train_loss": float("inf"), + "best_valid_loss": float("inf"), + "best_train_epoch": -1, + "best_valid_epoch": -1, + "batch_idx_train": 0, + "log_interval": 50, + "reset_interval": 200, + "valid_interval": 3000, # For the 100h subset, use 800 + # parameters for conformer + "feature_dim": 80, + "subsampling_factor": 4, + "dim_feedforward": 2048, + # parameters for decoder + "decoder_dim": 512, + # parameters for joiner + "joiner_dim": 512, + # parameters for Noam + "model_warm_step": 3000, # arg given to model, not for lrate + "env_info": get_env_info(), + } + ) + + return params + + +def get_encoder_model(params: AttributeDict) -> nn.Module: + encoder = RNN( + num_features=params.feature_dim, + subsampling_factor=params.subsampling_factor, + d_model=params.encoder_dim, + rnn_hidden_size=params.rnn_hidden_size, + dim_feedforward=params.dim_feedforward, + num_encoder_layers=params.num_encoder_layers, + aux_layer_period=params.aux_layer_period, + ) + return encoder + + +def get_decoder_model(params: AttributeDict) -> nn.Module: + decoder = Decoder( + vocab_size=params.vocab_size, + decoder_dim=params.decoder_dim, + blank_id=params.blank_id, + context_size=params.context_size, + ) + return decoder + + +def get_joiner_model(params: AttributeDict) -> nn.Module: + joiner = Joiner( + encoder_dim=params.encoder_dim, + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return joiner + + +def get_transducer_model(params: AttributeDict) -> nn.Module: + encoder = get_encoder_model(params) + decoder = get_decoder_model(params) + joiner = get_joiner_model(params) + + model = Transducer( + encoder=encoder, + decoder=decoder, + joiner=joiner, + encoder_dim=params.encoder_dim, + decoder_dim=params.decoder_dim, + joiner_dim=params.joiner_dim, + vocab_size=params.vocab_size, + ) + return model + + +def load_checkpoint_if_available( + params: AttributeDict, + model: nn.Module, + model_avg: nn.Module = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, +) -> Optional[Dict[str, Any]]: + """Load checkpoint from file. + + If params.start_batch is positive, it will load the checkpoint from + `params.exp_dir/checkpoint-{params.start_batch}.pt`. Otherwise, if + params.start_epoch is larger than 1, it will load the checkpoint from + `params.start_epoch - 1`. + + Apart from loading state dict for `model` and `optimizer` it also updates + `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, + and `best_valid_loss` in `params`. + + Args: + params: + The return value of :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer that we are using. + scheduler: + The scheduler that we are using. + Returns: + Return a dict containing previously saved training info. + """ + if params.start_batch > 0: + filename = params.exp_dir / f"checkpoint-{params.start_batch}.pt" + elif params.start_epoch > 1: + filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt" + else: + return None + + assert filename.is_file(), f"{filename} does not exist!" + + saved_params = load_checkpoint( + filename, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + ) + + keys = [ + "best_train_epoch", + "best_valid_epoch", + "batch_idx_train", + "best_train_loss", + "best_valid_loss", + ] + for k in keys: + params[k] = saved_params[k] + + if params.start_batch > 0: + if "cur_epoch" in saved_params: + params["start_epoch"] = saved_params["cur_epoch"] + + if "cur_batch_idx" in saved_params: + params["cur_batch_idx"] = saved_params["cur_batch_idx"] + + return saved_params + + +def save_checkpoint( + params: AttributeDict, + model: Union[nn.Module, DDP], + model_avg: Optional[nn.Module] = None, + optimizer: Optional[torch.optim.Optimizer] = None, + scheduler: Optional[LRSchedulerType] = None, + sampler: Optional[CutSampler] = None, + scaler: Optional[GradScaler] = None, + rank: int = 0, +) -> None: + """Save model, optimizer, scheduler and training stats to file. + + Args: + params: + It is returned by :func:`get_params`. + model: + The training model. + model_avg: + The stored model averaged from the start of training. + optimizer: + The optimizer used in the training. + sampler: + The sampler for the training dataset. + scaler: + The scaler used for mix precision training. + """ + if rank != 0: + return + filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt" + save_checkpoint_impl( + filename=filename, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=sampler, + scaler=scaler, + rank=rank, + ) + + if params.best_train_epoch == params.cur_epoch: + best_train_filename = params.exp_dir / "best-train-loss.pt" + copyfile(src=filename, dst=best_train_filename) + + if params.best_valid_epoch == params.cur_epoch: + best_valid_filename = params.exp_dir / "best-valid-loss.pt" + copyfile(src=filename, dst=best_valid_filename) + + +def compute_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + sp: spm.SentencePieceProcessor, + batch: dict, + is_training: bool, + warmup: float = 1.0, +) -> Tuple[Tensor, MetricsTracker]: + """ + Compute RNN-T loss given the model and its inputs. + + Args: + params: + Parameters for training. See :func:`get_params`. + model: + The model for training. It is an instance of Conformer in our case. + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + is_training: + True for training. False for validation. When it is True, this + function enables autograd during computation; when it is False, it + disables autograd. + warmup: a floating point value which increases throughout training; + values >= 1.0 are fully warmed up and have all modules present. + """ + device = ( + model.device + if isinstance(model, DDP) + else next(model.parameters()).device + ) + feature = batch["inputs"] + # at entry, feature is (N, T, C) + assert feature.ndim == 3 + feature = feature.to(device) + + supervisions = batch["supervisions"] + feature_lens = supervisions["num_frames"].to(device) + + texts = batch["supervisions"]["text"] + y = sp.encode(texts, out_type=int) + y = k2.RaggedTensor(y).to(device) + + with torch.set_grad_enabled(is_training): + simple_loss, pruned_loss = model( + x=feature, + x_lens=feature_lens, + y=y, + prune_range=params.prune_range, + am_scale=params.am_scale, + lm_scale=params.lm_scale, + warmup=warmup, + reduction="none", + ) + simple_loss[0] = float("inf") + pruned_loss[1] = float("nan") + simple_loss_is_finite = torch.isfinite(simple_loss) + pruned_loss_is_finite = torch.isfinite(pruned_loss) + is_finite = simple_loss_is_finite & pruned_loss_is_finite + if not torch.all(is_finite): + logging.info( + "Not all losses are finite!\n" + f"simple_loss: {simple_loss}\n" + f"pruned_loss: {pruned_loss}" + ) + display_and_save_batch(batch, params=params, sp=sp) + simple_loss = simple_loss[simple_loss_is_finite] + pruned_loss = pruned_loss[pruned_loss_is_finite] + + # If either all simple_loss or pruned_loss is inf or nan, + # we stop the training process by raising an exception + if torch.all(~simple_loss_is_finite) or torch.all( + ~pruned_loss_is_finite + ): + raise ValueError( + "There are too many utterances in this batch " + "leading to inf or nan losses." + ) + + simple_loss = simple_loss.sum() + pruned_loss = pruned_loss.sum() + # after the main warmup step, we keep pruned_loss_scale small + # for the same amount of time (model_warm_step), to avoid + # overwhelming the simple_loss and causing it to diverge, + # in case it had not fully learned the alignment yet. + pruned_loss_scale = ( + 0.0 + if warmup < 1.0 + else (0.1 if warmup > 1.0 and warmup < 2.0 else 1.0) + ) + loss = ( + params.simple_loss_scale * simple_loss + + pruned_loss_scale * pruned_loss + ) + + assert loss.requires_grad == is_training + + info = MetricsTracker() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + # info["frames"] is an approximate number for two reasons: + # (1) The acutal subsampling factor is ((lens - 1) // 2 - 1) // 2 + # (2) If some utterances in the batch lead to inf/nan loss, they + # are filtered out. + info["frames"] = ( + (feature_lens // params.subsampling_factor).sum().item() + ) + + # `utt_duration` and `utt_pad_proportion` would be normalized by `utterances` # noqa + info["utterances"] = feature.size(0) + # averaged input duration in frames over utterances + info["utt_duration"] = feature_lens.sum().item() + # averaged padding proportion over utterances + info["utt_pad_proportion"] = ( + ((feature.size(1) - feature_lens) / feature.size(1)).sum().item() + ) + + # Note: We use reduction=sum while computing the loss. + info["loss"] = loss.detach().cpu().item() + info["simple_loss"] = simple_loss.detach().cpu().item() + info["pruned_loss"] = pruned_loss.detach().cpu().item() + + return loss, info + + +def compute_validation_loss( + params: AttributeDict, + model: Union[nn.Module, DDP], + sp: spm.SentencePieceProcessor, + valid_dl: torch.utils.data.DataLoader, + world_size: int = 1, +) -> MetricsTracker: + """Run the validation process.""" + model.eval() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(valid_dl): + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=False, + ) + assert loss.requires_grad is False + tot_loss = tot_loss + loss_info + + if world_size > 1: + tot_loss.reduce(loss.device) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + if loss_value < params.best_valid_loss: + params.best_valid_epoch = params.cur_epoch + params.best_valid_loss = loss_value + + return tot_loss + + +def train_one_epoch( + params: AttributeDict, + model: Union[nn.Module, DDP], + optimizer: torch.optim.Optimizer, + scheduler: LRSchedulerType, + sp: spm.SentencePieceProcessor, + train_dl: torch.utils.data.DataLoader, + valid_dl: torch.utils.data.DataLoader, + scaler: GradScaler, + model_avg: Optional[nn.Module] = None, + tb_writer: Optional[SummaryWriter] = None, + world_size: int = 1, + rank: int = 0, +) -> None: + """Train the model for one epoch. + + The training loss from the mean of all frames is saved in + `params.train_loss`. It runs the validation process every + `params.valid_interval` batches. + + Args: + params: + It is returned by :func:`get_params`. + model: + The model for training. + optimizer: + The optimizer we are using. + scheduler: + The learning rate scheduler, we call step() every step. + train_dl: + Dataloader for the training dataset. + valid_dl: + Dataloader for the validation dataset. + scaler: + The scaler used for mix precision training. + model_avg: + The stored model averaged from the start of training. + tb_writer: + Writer to write log messages to tensorboard. + world_size: + Number of nodes in DDP training. If it is 1, DDP is disabled. + rank: + The rank of the node in DDP training. If no DDP is used, it should + be set to 0. + """ + model.train() + + tot_loss = MetricsTracker() + + cur_batch_idx = params.get("cur_batch_idx", 0) + + for batch_idx, batch in enumerate(train_dl): + if batch_idx < cur_batch_idx: + continue + cur_batch_idx = batch_idx + + params.batch_idx_train += 1 + batch_size = len(batch["supervisions"]["text"]) + + try: + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, loss_info = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=True, + warmup=(params.batch_idx_train / params.model_warm_step), + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + scaler.scale(loss).backward() + scheduler.step_batch(params.batch_idx_train) + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad() + except: # noqa + display_and_save_batch(batch, params=params, sp=sp) + raise + + if params.print_diagnostics and batch_idx == 30: + return + + if ( + rank == 0 + and params.batch_idx_train > 0 + and params.batch_idx_train % params.average_period == 0 + ): + update_averaged_model( + params=params, + model_cur=model, + model_avg=model_avg, + ) + + if ( + params.batch_idx_train > 0 + and params.batch_idx_train % params.save_every_n == 0 + ): + params.cur_batch_idx = batch_idx + save_checkpoint_with_global_batch_idx( + out_dir=params.exp_dir, + global_batch_idx=params.batch_idx_train, + model=model, + model_avg=model_avg, + params=params, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + del params.cur_batch_idx + remove_checkpoints( + out_dir=params.exp_dir, + topk=params.keep_last_k, + rank=rank, + ) + + if batch_idx % params.log_interval == 0: + cur_lr = scheduler.get_last_lr()[0] + logging.info( + f"Epoch {params.cur_epoch}, " + f"batch {batch_idx}, loss[{loss_info}], " + f"tot_loss[{tot_loss}], batch size: {batch_size}, " + f"lr: {cur_lr:.2e}" + ) + + if tb_writer is not None: + tb_writer.add_scalar( + "train/learning_rate", cur_lr, params.batch_idx_train + ) + + loss_info.write_summary( + tb_writer, "train/current_", params.batch_idx_train + ) + tot_loss.write_summary( + tb_writer, "train/tot_", params.batch_idx_train + ) + + if batch_idx > 0 and batch_idx % params.valid_interval == 0: + logging.info("Computing validation loss") + valid_info = compute_validation_loss( + params=params, + model=model, + sp=sp, + valid_dl=valid_dl, + world_size=world_size, + ) + model.train() + logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") + if tb_writer is not None: + valid_info.write_summary( + tb_writer, "train/valid_", params.batch_idx_train + ) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + params.train_loss = loss_value + if params.train_loss < params.best_train_loss: + params.best_train_epoch = params.cur_epoch + params.best_train_loss = params.train_loss + + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + if params.full_libri is False: + params.valid_interval = 800 + + fix_random_seed(params.seed) + if world_size > 1: + setup_dist(rank, world_size, params.master_port) + + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info("Training started") + + if args.tensorboard and rank == 0: + tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") + else: + tb_writer = None + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", rank) + logging.info(f"Device: {device}") + + sp = spm.SentencePieceProcessor() + sp.load(params.bpe_model) + + # is defined in local/train_bpe_model.py + params.blank_id = sp.piece_to_id("") + params.vocab_size = sp.get_piece_size() + + logging.info(params) + + logging.info("About to create model") + model = get_transducer_model(params) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + assert params.save_every_n >= params.average_period + model_avg: Optional[nn.Module] = None + if rank == 0: + # model_avg is only used with rank 0 + model_avg = copy.deepcopy(model) + + assert params.start_epoch > 0, params.start_epoch + checkpoints = load_checkpoint_if_available( + params=params, model=model, model_avg=model_avg + ) + + model.to(device) + if world_size > 1: + logging.info("Using DDP") + model = DDP(model, device_ids=[rank]) + + optimizer = Eve(model.parameters(), lr=params.initial_lr) + + scheduler = Eden(optimizer, params.lr_batches, params.lr_epochs) + + if checkpoints and "optimizer" in checkpoints: + logging.info("Loading optimizer state dict") + optimizer.load_state_dict(checkpoints["optimizer"]) + + if ( + checkpoints + and "scheduler" in checkpoints + and checkpoints["scheduler"] is not None + ): + logging.info("Loading scheduler state dict") + scheduler.load_state_dict(checkpoints["scheduler"]) + + # # overwrite it + # scheduler.base_lrs = [params.initial_lr for _ in scheduler.base_lrs] + # print(scheduler.base_lrs) + + if params.print_diagnostics: + diagnostic = diagnostics.attach_diagnostics(model) + + librispeech = LibriSpeechAsrDataModule(args) + + train_cuts = librispeech.train_clean_100_cuts() + if params.full_libri: + train_cuts += librispeech.train_clean_360_cuts() + train_cuts += librispeech.train_other_500_cuts() + + def remove_short_and_long_utt(c: Cut): + # Keep only utterances with duration between 1 second and 20 seconds + # + # Caution: There is a reason to select 20.0 here. Please see + # ../local/display_manifest_statistics.py + # + # You should use ../local/display_manifest_statistics.py to get + # an utterance duration distribution for your dataset to select + # the threshold + return 1.0 <= c.duration <= 20.0 + + train_cuts = train_cuts.filter(remove_short_and_long_utt) + + if params.start_batch > 0 and checkpoints and "sampler" in checkpoints: + # We only load the sampler's state dict when it loads a checkpoint + # saved in the middle of an epoch + sampler_state_dict = checkpoints["sampler"] + else: + sampler_state_dict = None + + train_dl = librispeech.train_dataloaders( + train_cuts, sampler_state_dict=sampler_state_dict + ) + + valid_cuts = librispeech.dev_clean_cuts() + valid_cuts += librispeech.dev_other_cuts() + valid_dl = librispeech.valid_dataloaders(valid_cuts) + + if not params.print_diagnostics: + scan_pessimistic_batches_for_oom( + model=model, + train_dl=train_dl, + optimizer=optimizer, + sp=sp, + params=params, + warmup=0.0 if params.start_epoch == 1 else 1.0, + ) + + scaler = GradScaler(enabled=params.use_fp16) + if checkpoints and "grad_scaler" in checkpoints: + logging.info("Loading grad scaler state dict") + scaler.load_state_dict(checkpoints["grad_scaler"]) + + for epoch in range(params.start_epoch, params.num_epochs + 1): + scheduler.step_epoch(epoch - 1) + fix_random_seed(params.seed + epoch - 1) + train_dl.sampler.set_epoch(epoch - 1) + + if tb_writer is not None: + tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) + + params.cur_epoch = epoch + + train_one_epoch( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sp=sp, + train_dl=train_dl, + valid_dl=valid_dl, + scaler=scaler, + tb_writer=tb_writer, + world_size=world_size, + rank=rank, + ) + + if params.print_diagnostics: + diagnostic.print_diagnostics() + break + + save_checkpoint( + params=params, + model=model, + model_avg=model_avg, + optimizer=optimizer, + scheduler=scheduler, + sampler=train_dl.sampler, + scaler=scaler, + rank=rank, + ) + + logging.info("Done!") + + if world_size > 1: + torch.distributed.barrier() + cleanup_dist() + + +def scan_pessimistic_batches_for_oom( + model: Union[nn.Module, DDP], + train_dl: torch.utils.data.DataLoader, + optimizer: torch.optim.Optimizer, + sp: spm.SentencePieceProcessor, + params: AttributeDict, + warmup: float, +): + from lhotse.dataset import find_pessimistic_batches + + logging.info( + "Sanity check -- see if any of the batches in epoch 1 would cause OOM." + ) + batches, crit_values = find_pessimistic_batches(train_dl.sampler) + for criterion, cuts in batches.items(): + batch = train_dl.dataset[cuts] + try: + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, _ = compute_loss( + params=params, + model=model, + sp=sp, + batch=batch, + is_training=True, + warmup=warmup, + ) + loss.backward() + optimizer.step() + optimizer.zero_grad() + except RuntimeError as e: + if "CUDA out of memory" in str(e): + logging.error( + "Your GPU ran out of memory with the current " + "max_duration setting. We recommend decreasing " + "max_duration and trying again.\n" + f"Failing criterion: {criterion} " + f"(={crit_values[criterion]}) ..." + ) + raise + + +def main(): + parser = get_parser() + LibriSpeechAsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + world_size = args.world_size + assert world_size >= 1 + if world_size > 1: + mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True) + else: + run(rank=0, world_size=1, args=args) + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + +if __name__ == "__main__": + main() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/model.py b/egs/librispeech/ASR/pruned_transducer_stateless/model.py index e2c9eb789..73b651b3f 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless/model.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless/model.py @@ -15,6 +15,8 @@ # limitations under the License. +from typing import Tuple + import k2 import torch import torch.nn as nn @@ -67,7 +69,7 @@ class Transducer(nn.Module): am_scale: float = 0.0, lm_scale: float = 0.0, reduction: str = "sum", - ) -> torch.Tensor: + ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: x: diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/train.py b/egs/librispeech/ASR/pruned_transducer_stateless/train.py index c2e0f1f98..193c5050c 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless/train.py @@ -560,17 +560,15 @@ def compute_loss( simple_loss = simple_loss[simple_loss_is_finite] pruned_loss = pruned_loss[pruned_loss_is_finite] - # If the batch contains more than 10 utterances AND - # if either all simple_loss or pruned_loss is inf or nan, + # If either all simple_loss or pruned_loss is inf or nan, # we stop the training process by raising an exception - if feature.size(0) >= 10: - if torch.all(~simple_loss_is_finite) or torch.all( - ~pruned_loss_is_finite - ): - raise ValueError( - "There are too many utterances in this batch " - "leading to inf or nan losses." - ) + if torch.all(~simple_loss_is_finite) or torch.all( + ~pruned_loss_is_finite + ): + raise ValueError( + "There are too many utterances in this batch " + "leading to inf or nan losses." + ) simple_loss = simple_loss.sum() pruned_loss = pruned_loss.sum() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/model.py b/egs/librispeech/ASR/pruned_transducer_stateless2/model.py index 452102d21..ba7616c61 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/model.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/model.py @@ -15,6 +15,8 @@ # limitations under the License. +from typing import Tuple + import k2 import torch import torch.nn as nn @@ -79,7 +81,7 @@ class Transducer(nn.Module): lm_scale: float = 0.0, warmup: float = 1.0, reduction: str = "sum", - ) -> torch.Tensor: + ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: x: diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py b/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py index e93d6e4a3..cc3caecc7 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/scaling.py @@ -1,4 +1,4 @@ -# Copyright 2022 Xiaomi Corp. (authors: Daniel Povey) +# Copyright 2022 Xiaomi Corp. (authors: Daniel Povey, Zengwei Yao) # # See ../../../../LICENSE for clarification regarding multiple authors # @@ -20,8 +20,9 @@ from itertools import repeat from typing import Optional, Tuple import torch +import torch.backends.cudnn.rnn as rnn import torch.nn as nn -from torch import Tensor +from torch import _VF, Tensor from icefall.utils import is_jit_tracing @@ -379,6 +380,156 @@ class ScaledConv2d(nn.Conv2d): return self._conv_forward(input, self.get_weight()) +class ScaledLSTM(nn.LSTM): + # See docs for ScaledLinear. + # This class implements LSTM with scaling mechanism, using `torch._VF.lstm` + # Please refer to https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/rnn.py + def __init__( + self, + *args, + initial_scale: float = 1.0, + initial_speed: float = 1.0, + **kwargs + ): + if "bidirectional" in kwargs: + assert kwargs["bidirectional"] is False + super(ScaledLSTM, self).__init__(*args, **kwargs) + initial_scale = torch.tensor(initial_scale).log() + self._scales_names = [] + self._scales = [] + for name in self._flat_weights_names: + scale_name = name + "_scale" + self._scales_names.append(scale_name) + param = nn.Parameter(initial_scale.clone().detach()) + setattr(self, scale_name, param) + self._scales.append(param) + + self._reset_parameters( + initial_speed + ) # Overrides the reset_parameters in base class + + def _reset_parameters(self, initial_speed: float): + std = 0.1 / initial_speed + a = (3 ** 0.5) * std + scale = self.hidden_size ** -0.5 + v = scale / std + for idx, name in enumerate(self._flat_weights_names): + if "weight" in name: + nn.init.uniform_(self._flat_weights[idx], -a, a) + with torch.no_grad(): + self._scales[idx] += torch.tensor(v).log() + elif "bias" in name: + nn.init.constant_(self._flat_weights[idx], 0.0) + + def _flatten_parameters(self, flat_weights) -> None: + """Resets parameter data pointer so that they can use faster code paths. + + Right now, this works only if the module is on the GPU and cuDNN is enabled. + Otherwise, it's a no-op. + + This function is modified from https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/rnn.py # noqa + """ + # Short-circuits if _flat_weights is only partially instantiated + if len(flat_weights) != len(self._flat_weights_names): + return + + for w in flat_weights: + if not isinstance(w, Tensor): + return + # Short-circuits if any tensor in flat_weights is not acceptable to cuDNN + # or the tensors in flat_weights are of different dtypes + + first_fw = flat_weights[0] + dtype = first_fw.dtype + for fw in flat_weights: + if ( + not isinstance(fw.data, Tensor) + or not (fw.data.dtype == dtype) + or not fw.data.is_cuda + or not torch.backends.cudnn.is_acceptable(fw.data) + ): + return + + # If any parameters alias, we fall back to the slower, copying code path. This is + # a sufficient check, because overlapping parameter buffers that don't completely + # alias would break the assumptions of the uniqueness check in + # Module.named_parameters(). + unique_data_ptrs = set(p.data_ptr() for p in flat_weights) + if len(unique_data_ptrs) != len(flat_weights): + return + + with torch.cuda.device_of(first_fw): + + # Note: no_grad() is necessary since _cudnn_rnn_flatten_weight is + # an inplace operation on self._flat_weights + with torch.no_grad(): + if torch._use_cudnn_rnn_flatten_weight(): + num_weights = 4 if self.bias else 2 + if self.proj_size > 0: + num_weights += 1 + torch._cudnn_rnn_flatten_weight( + flat_weights, + num_weights, + self.input_size, + rnn.get_cudnn_mode(self.mode), + self.hidden_size, + self.proj_size, + self.num_layers, + self.batch_first, + bool(self.bidirectional), + ) + + def _get_flat_weights(self): + """Get scaled weights, and resets their data pointer.""" + flat_weights = [] + for idx in range(len(self._flat_weights_names)): + flat_weights.append( + self._flat_weights[idx] * self._scales[idx].exp() + ) + self._flatten_parameters(flat_weights) + return flat_weights + + def forward( + self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None + ): + # This function is modified from https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/rnn.py # noqa + # The change for calling `_VF.lstm()` is: + # self._flat_weights -> self._get_flat_weights() + if hx is None: + h_zeros = torch.zeros( + self.num_layers, + input.size(1), + self.proj_size if self.proj_size > 0 else self.hidden_size, + dtype=input.dtype, + device=input.device, + ) + c_zeros = torch.zeros( + self.num_layers, + input.size(1), + self.hidden_size, + dtype=input.dtype, + device=input.device, + ) + hx = (h_zeros, c_zeros) + + self.check_forward_args(input, hx, None) + result = _VF.lstm( + input, + hx, + self._get_flat_weights(), + self.bias, + self.num_layers, + self.dropout, + self.training, + self.bidirectional, + self.batch_first, + ) + + output = result[0] + hidden = result[1:] + return output, hidden + + class ActivationBalancer(torch.nn.Module): """ Modifies the backpropped derivatives of a function to try to encourage, for @@ -727,8 +878,22 @@ def _test_double_swish_deriv(): torch.autograd.gradcheck(m, x) +def _test_scaled_lstm(): + N, L = 2, 30 + dim_in, dim_hidden = 10, 20 + m = ScaledLSTM(input_size=dim_in, hidden_size=dim_hidden, bias=True) + x = torch.randn(L, N, dim_in) + h0 = torch.randn(1, N, dim_hidden) + c0 = torch.randn(1, N, dim_hidden) + y, (h, c) = m(x, (h0, c0)) + assert y.shape == (L, N, dim_hidden) + assert h.shape == (1, N, dim_hidden) + assert c.shape == (1, N, dim_hidden) + + if __name__ == "__main__": _test_activation_balancer_sign() _test_activation_balancer_magnitude() _test_basic_norm() _test_double_swish_deriv() + _test_scaled_lstm() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/train.py b/egs/librispeech/ASR/pruned_transducer_stateless2/train.py index c801bd2bd..5c2f67534 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless2/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless2/train.py @@ -621,17 +621,15 @@ def compute_loss( simple_loss = simple_loss[simple_loss_is_finite] pruned_loss = pruned_loss[pruned_loss_is_finite] - # If the batch contains more than 10 utterances AND - # if either all simple_loss or pruned_loss is inf or nan, + # If either all simple_loss or pruned_loss is inf or nan, # we stop the training process by raising an exception - if feature.size(0) >= 10: - if torch.all(~simple_loss_is_finite) or torch.all( - ~pruned_loss_is_finite - ): - raise ValueError( - "There are too many utterances in this batch " - "leading to inf or nan losses." - ) + if torch.all(~simple_loss_is_finite) or torch.all( + ~pruned_loss_is_finite + ): + raise ValueError( + "There are too many utterances in this batch " + "leading to inf or nan losses." + ) simple_loss = simple_loss.sum() pruned_loss = pruned_loss.sum() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/export.py b/egs/librispeech/ASR/pruned_transducer_stateless3/export.py index 2bb518bcd..fb3db282a 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/export.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/export.py @@ -603,9 +603,9 @@ def main(): model.to("cpu") model.eval() - convert_scaled_to_non_scaled(model, inplace=True) if params.onnx is True: + convert_scaled_to_non_scaled(model, inplace=True) opset_version = 11 logging.info("Exporting to onnx format") encoder_filename = params.exp_dir / "encoder.onnx" @@ -637,6 +637,7 @@ def main(): all_in_one_filename, ) elif params.jit is True: + convert_scaled_to_non_scaled(model, inplace=True) logging.info("Using torch.jit.script()") # We won't use the forward() method of the model in C++, so just ignore # it here. @@ -660,6 +661,7 @@ def main(): export_joiner_model_jit_trace(model.joiner, joiner_filename) elif params.jit_trace is True: + convert_scaled_to_non_scaled(model, inplace=True) logging.info("Using torch.jit.trace()") encoder_filename = params.exp_dir / "encoder_jit_trace.pt" export_encoder_model_jit_trace(model.encoder, encoder_filename) diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/model.py b/egs/librispeech/ASR/pruned_transducer_stateless3/model.py index ece340534..0d5f7cc6d 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/model.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/model.py @@ -15,7 +15,7 @@ # limitations under the License. -from typing import Optional +from typing import Optional, Tuple import k2 import torch @@ -106,7 +106,7 @@ class Transducer(nn.Module): lm_scale: float = 0.0, warmup: float = 1.0, reduction: str = "sum", - ) -> torch.Tensor: + ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: x: diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py b/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py index 992b71dd1..bb54c77a6 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py @@ -29,7 +29,13 @@ from typing import List import torch import torch.nn as nn -from scaling import ScaledConv1d, ScaledConv2d, ScaledEmbedding, ScaledLinear +from scaling import ( + ScaledConv1d, + ScaledConv2d, + ScaledEmbedding, + ScaledLinear, + ScaledLSTM, +) def scaled_linear_to_linear(scaled_linear: ScaledLinear) -> nn.Linear: @@ -168,6 +174,38 @@ def scaled_embedding_to_embedding( return embedding +def scaled_lstm_to_lstm(scaled_lstm: ScaledLSTM) -> nn.LSTM: + """Convert an instance of ScaledLSTM to nn.LSTM. + + Args: + scaled_lstm: + The layer to be converted. + Returns: + Return an instance of nn.LSTM that has the same `forward()` behavior + of the given `scaled_lstm`. + """ + assert isinstance(scaled_lstm, ScaledLSTM), type(scaled_lstm) + lstm = nn.LSTM( + input_size=scaled_lstm.input_size, + hidden_size=scaled_lstm.hidden_size, + num_layers=scaled_lstm.num_layers, + bias=scaled_lstm.bias, + batch_first=scaled_lstm.batch_first, + dropout=scaled_lstm.dropout, + bidirectional=scaled_lstm.bidirectional, + proj_size=scaled_lstm.proj_size, + ) + + assert lstm._flat_weights_names == scaled_lstm._flat_weights_names + for idx in range(len(scaled_lstm._flat_weights_names)): + scaled_weight = ( + scaled_lstm._flat_weights[idx] * scaled_lstm._scales[idx].exp() + ) + lstm._flat_weights[idx].data.copy_(scaled_weight) + + return lstm + + # Copied from https://pytorch.org/docs/1.9.0/_modules/torch/nn/modules/module.html#Module.get_submodule # get_submodule was added to nn.Module at v1.9.0 def get_submodule(model, target): @@ -218,6 +256,8 @@ def convert_scaled_to_non_scaled(model: nn.Module, inplace: bool = False): d[name] = scaled_conv2d_to_conv2d(m) elif isinstance(m, ScaledEmbedding): d[name] = scaled_embedding_to_embedding(m) + elif isinstance(m, ScaledLSTM): + d[name] = scaled_lstm_to_lstm(m) for k, v in d.items(): if "." in k: diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/train.py b/egs/librispeech/ASR/pruned_transducer_stateless3/train.py index be12e69ce..c2ef28c7b 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless3/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless3/train.py @@ -658,17 +658,15 @@ def compute_loss( simple_loss = simple_loss[simple_loss_is_finite] pruned_loss = pruned_loss[pruned_loss_is_finite] - # If the batch contains more than 10 utterances AND - # if either all simple_loss or pruned_loss is inf or nan, + # If either all simple_loss or pruned_loss is inf or nan, # we stop the training process by raising an exception - if feature.size(0) >= 10: - if torch.all(~simple_loss_is_finite) or torch.all( - ~pruned_loss_is_finite - ): - raise ValueError( - "There are too many utterances in this batch " - "leading to inf or nan losses." - ) + if torch.all(~simple_loss_is_finite) or torch.all( + ~pruned_loss_is_finite + ): + raise ValueError( + "There are too many utterances in this batch " + "leading to inf or nan losses." + ) simple_loss = simple_loss.sum() pruned_loss = pruned_loss.sum() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless4/train.py b/egs/librispeech/ASR/pruned_transducer_stateless4/train.py index 2ba28acd4..13a5b1a51 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless4/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless4/train.py @@ -651,17 +651,15 @@ def compute_loss( simple_loss = simple_loss[simple_loss_is_finite] pruned_loss = pruned_loss[pruned_loss_is_finite] - # If the batch contains more than 10 utterances AND - # if either all simple_loss or pruned_loss is inf or nan, + # If either all simple_loss or pruned_loss is inf or nan, # we stop the training process by raising an exception - if feature.size(0) >= 10: - if torch.all(~simple_loss_is_finite) or torch.all( - ~pruned_loss_is_finite - ): - raise ValueError( - "There are too many utterances in this batch " - "leading to inf or nan losses." - ) + if torch.all(~simple_loss_is_finite) or torch.all( + ~pruned_loss_is_finite + ): + raise ValueError( + "There are too many utterances in this batch " + "leading to inf or nan losses." + ) simple_loss = simple_loss.sum() pruned_loss = pruned_loss.sum() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless5/train.py b/egs/librispeech/ASR/pruned_transducer_stateless5/train.py index cee7d2bff..1fa668293 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless5/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless5/train.py @@ -679,14 +679,13 @@ def compute_loss( # If the batch contains more than 10 utterances AND # if either all simple_loss or pruned_loss is inf or nan, # we stop the training process by raising an exception - if feature.size(0) >= 10: - if torch.all(~simple_loss_is_finite) or torch.all( - ~pruned_loss_is_finite - ): - raise ValueError( - "There are too many utterances in this batch " - "leading to inf or nan losses." - ) + if torch.all(~simple_loss_is_finite) or torch.all( + ~pruned_loss_is_finite + ): + raise ValueError( + "There are too many utterances in this batch " + "leading to inf or nan losses." + ) simple_loss = simple_loss.sum() pruned_loss = pruned_loss.sum() diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/model.py b/egs/librispeech/ASR/pruned_transducer_stateless6/model.py index 9de0769d9..06c4b5204 100644 --- a/egs/librispeech/ASR/pruned_transducer_stateless6/model.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless6/model.py @@ -15,16 +15,17 @@ # limitations under the License. +from typing import Tuple + import k2 import torch import torch.nn as nn from encoder_interface import EncoderInterface +from multi_quantization.prediction import JointCodebookLoss from scaling import ScaledLinear from icefall.utils import add_sos -from multi_quantization.prediction import JointCodebookLoss - class Transducer(nn.Module): """It implements https://arxiv.org/pdf/1211.3711.pdf @@ -91,7 +92,7 @@ class Transducer(nn.Module): warmup: float = 1.0, reduction: str = "sum", codebook_indexes: torch.Tensor = None, - ) -> torch.Tensor: + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Args: x: diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/train.py b/egs/librispeech/ASR/pruned_transducer_stateless6/train.py index 294fd4c52..25d1c4ca6 100755 --- a/egs/librispeech/ASR/pruned_transducer_stateless6/train.py +++ b/egs/librispeech/ASR/pruned_transducer_stateless6/train.py @@ -655,14 +655,13 @@ def compute_loss( # If the batch contains more than 10 utterances AND # if either all simple_loss or pruned_loss is inf or nan, # we stop the training process by raising an exception - if feature.size(0) >= 10: - if torch.all(~simple_loss_is_finite) or torch.all( - ~pruned_loss_is_finite - ): - raise ValueError( - "There are too many utterances in this batch " - "leading to inf or nan losses." - ) + if torch.all(~simple_loss_is_finite) or torch.all( + ~pruned_loss_is_finite + ): + raise ValueError( + "There are too many utterances in this batch " + "leading to inf or nan losses." + ) simple_loss = simple_loss.sum() pruned_loss = pruned_loss.sum()

-@GL zD|Rm6>-c)`W{&Ug<+c&qo(RhP!U4Ne#yuUC5i%5QG8THTf7mq^9p~r;2ff==mtSm# zGf&1>^LC&nhVq&vuSZG4Mp4m6Wrx((Aj7`BW|Do`m2?Eo)oH!zTDad@53+((~SR9E5 z|Lo8Mj&2#&Q+eIyUpv$yjSU&)7GwL(O?GRo(&jC8GTK|sCzc_#z)Mx)JS@M?dn%Uw zBfSV&p{ORk-^(yg^17-Y=8H-OOD|WAmFtpINb{qR@Sb72y5|6OYrer_@ZtTQ_SVK% z?zM>D&u%>l)6-4^s7AV;`;^x^#+~qx%vy!SMD&}LbO%F(!>;oJd??1wU@#6!}Jw!928%~(W;as8Vx;%e!ohu>tz1B7AH0MEf>2#L( zvBJ#jA8ql!{`rf*IL&l=k3D%RcvV5kYt`n~)#XyX8{_kzeZ54UYcVq5$ zM|S3@YyFTAe8`J1uUonkiy?^{884P%-OTxvcETzPMS*w{<2Q!FvSuZLC1-^B1&1di z3&edVri8ZN4Y9WA^0_x6ca?4^ZL78_a|LU%f5Axctelq4+_|+kU*_}KOh)v@HcXmU zF=fPvN@rnNw%hv)GecI*sReJuxY9q2i)(_);}dkH%DHiErid%Iaa|5rhn}aw@%ayb zkX6x-7{3m@?};!y=59&%QbGoPK=aoOoTxuAu@R$X+b?VEO?JI)S6VtsDjIRNa=-F; zFmS?Oet|SR#YYeu4RtqDgFQvH``gtghfZW(J`P;q&q&+&FD_%5ifmHEj28%=qi<6D z`|$njm@(|#h=OqTHx{~yVIHk?-4QA2LkWe<&FbDm+G1_)7EW;TPkX zuN>(x2n-AsO7tFhFolyd{jOXi+3+m!P>dz;b zbFS5=BzM)%d2xxhJiwy-v?g(JW=ns>)09+fhMkdHN#-M@BSm9RO=%`zjf64Nke5p@6J~kM!hi8jFTk(~rxDeLoF&itz3vVvt z9QuEFBTG*v+cmzP`Snb|;?1{!!MONpnSA*2v_F`j+tN7A?F55omR_*I`QPQ$Z{`bq zdKU8H(F3}X2Q>XkHa)!ux82q@#_hx$2e87TEHYqu+$-#Y+pwe6{Q*8ewy7)$P*c>LWf+_Vta>k}ifJJdutEo1|I>^EQpQI{2xD`nY~i&>HWw1|UkTEsU&ZLKd|Mcs zLRKd!cUBMLFUXlW-^$eVdTTuTe6c80@pG|kY2TAR$uovdu?QD@=*@|@D)Yp%Dcm5W z$BTOz41cLCMhsPL47RvW6}1hqr~4P^Zp4j^Qt$YQ$@c8eQ8~PfO~)Hl&>v7t&E!uk z_q&_P`4!y3)P%8~As(M^l)!R>m_u;n3&K3x6iL1A5#;j#S2B9eA)tu+aaqmDl<%vd z9xf2Fqpi9T?HVvv#EUdiw3X8$2)M{W!0@5lT998RV%@4=fuqp2tF*i|7|>8X{zpSS ztsaM&CI2S`(3L_3gl@vmK9HyOhqyEl<9w{mhN^z0allkIDg2H6g?dHWh5yt;+g%vLW4rth)~Z3HleiqA(e zvUMk+SBL|)+w=r6q9-S~I;+f-Wo3P7MP@F-SEe-m!%5}{sl0flSWkG3M)NSYfUZBv z)!y=e*4)4Jt7`09jN<9qU}9S8F<_uA!v3c%0)mzWAxQ55*r9ur4$a=|kA-)4fUote zklKnqC>-!x0N_W2R1>dAVwT+a@!ino$txQrn@d}xw0i;G*=Q5&GXo>(j55=ezvFBq zoTWF`O(R`*-%i2cQ^bEbfYmF*dxhs-@|oOi?DS4XsN8p~x}!xk;GKw7EGQqKzcrTn zMjO_5M$JqzCly)T2S_%A)x$BJEuA?gCRr_4fvVS&k)1W$GL&h~AP>{Tq~qgY=P(}h zMzP`k{vcg#sUMW2N+D@x{|L;vENx{V@+i`uF!Sqx1GLw#0WG|vZ*5=(UAU1b~Z55SC4Z&{u+#M?;^8w8MC{#5G6(#UEMmanXS@r_)-iJFr=Fk|yV+0$re1vAy6)X)9Ae>{ z1KNuj%KzqMpwL7gu7+E_bS6E`!xtkSc8WwSs|!$X$BpQU4q9 zvpp({kuecfG3Lg4n;Taf!q{IrUKH^6{2G-coL zrwckAQ zU1?d@v_lO037-i@7ymrUAmV9?t_e=MZ<0KIvQ0uCetdVwljR2HuR^)03Do${JT9ha z39om8tuHU6!U`+W$xb8DEWL6S97P}WJ>D^Y9C;~kW05bDBqqsvGyZ&5tuPsv2RbV^qeF2jQ7T-0Hcfaao5^a>E>U;X#4PMhZ-4&TzDPRG1iLNHW!i|S)(BxU zxh3RVdcyc<2SK|c&Cc!cI6|#OOCUVtS9^`GVl1ayMQnb;&{7F#)bjt;s3~M`_JiU4 zXYVF(4B82@(cjM9_f+pVyYcr0x2c{{%Mbj0;OE=!Y>Lfx2W+SMt+Xm)Y`4a7H1&P0 z{>}@Js`$UU8uniSf03(CR*>ozY(xcF@iA~_d?NoXRs}7R{MKFX`MAC9$R;>qP@&7+ z?3NZ%?1PgK02Sc#Nq~NfJu<67W4Pi|rH2HoY`4E<9UBFpS7kf8LK5>SdlWyqeyXUY z_JJv ze??DU+&xqmgkO8Q$jo*tHY`TB@7qauuy%{qczT+fD+-fYj1D_CS<*FE-U8Ifj`iDT zl3--7dVs!qI9x6?#pnur>%503kEi6VmY2%P$(}Y-g-Z#{QfxT+l6J?XIh5p3>-G+4 ztP39!&Y_H?F;1fel8q?IZUz-a#nZ2iF}C2E%L@2UZz{^Twg)!F#`P!|Sjq8z+!#Pn zN?J!Q|4@G%Isr#)16<_PFzc@AKjW+-|Hm0gF~(|M!pM3s zt(x`f7vPhJhmhrD-4LZ%%T|`f%HjVl+D5oCaHK zKTLg#ws7tFA{OeWU(#GWCT_6AaxndQ;{2~8$>Uc{SAjj`%oHk55Dfu1O3P=K>F=k} z{Jg7hCa%oUn#Y~Nui@ezRnJL_R{ZlYGCs}*cbWJ5B| z%=thcZR%;@N~cW}lgUp>n{8d%=A72TduB*{uNC%7%SF-K&xI?4g|N%F`^xnRU;@@w znvkh#M@(8u=;>Lv6EFVwqi^=|blPdfUBC81%L3P@=PtD%>oLjI_r@Y~6ndm7yupI2 za%2>hgIwu&n?%7s@vHm02VOM#Tfj_N_aeK$moEw~suomsWo38yxAAMP`hl7k{3uI-Y^=^ll-9wlHC(A56aVpH zU-PEgRFOTZNtm8f5aga+m2J>N&|fhyuE-tqi#wlGdn>L<9Wj2$*QE1VX6^6(AAdS; zq++9G%g@-SrmJ+ok0?rhD{&UvK={2Z5*zefq%?1_FPhDIh+I2rl=T!5#cqC`U)a>x zy8RIf|L)u+$(suShkk$0oa>diTAD4qm=!p4z1Xyl>=9&Wyu%&W#>c9?RjwW^^UHEx zGV(Ey>Fq|kErqcFa#ZnuTreOnuGd6}(AZmv&Mo{*jbC`#LQu?H6`^C5TPr+ZP?2<5 zuPL6N^!b2?HGjajM`dL@KG!#VJ#)c^wh$i$+L=B}W~R$T&BoysM9iz=DR0z+sqqYu zAg`&GZNsN~mjPeM+!=UG%~MNYzieuTm<;9dt+9w~J)nFKb06oTwtEF(x+lMRH{5dUxZ4(} zbq2;1~L+)lva}8K$-Kx(vwOlEo8z- zvTMA&cZmyL%n_YGuCEW*JrpXRXZqy^VNu`*kPRip8e;EL&p`XI6HG+HTXW69B5D56&c4{ zbmp3y+pvXp$3|s61!l@c(v^zbdjr3J%W=5Kv|rkJYWWfA=-?a;&7l4P^<$t2n2W7i zHRfc+0kL?eo+@tWJ>}-O(r+yId9}RoOSjC~-1T$MZx1m|;e*OZ+L-G1B=M>*A4FnC zqQ5@4UAPl?cPTff>|iCMR?$^|B(htK^99B-m(MO~F(hltd5Z`><92U5JG`QJfJAv4 zBzNsW*=q69;m?5=enTjnX4(Tikpo*oxlSH={^Q*&a}B>u~wvp z6ARy}u>!$PdjT%t=R&r(UtSui^0II$V^xz!gYO_3TmWWPU{jD;h|8+a^#f)yenSUz z0=ZG4@}K{$-;g|eOS&N$AEofM$ap)i!L&@f(Wd9EmZ$N|WBqh-zv%Eb?gU0rOy?!g z7_oX(fl2y}sG%-O9STF4^IyBxF~2orObbU&S7%Kn^kq#+Al=g+W#qiY8? zB}`sSSaQ6dsJihkqWFuCUZVXly$TPwZoT3&^l+WErlP8<;Ngx9t%E>dBp~FrZ;l2I zb7jM-UED&<)o7AH;3Tp-PHFuiO#lfuJEjh^hwffSp>G4z1a&#}xSD^|9{b9R#FD)= zPz>0zA1^zU#ukq5hJ4x3m;cjTYOGtG3R#;i*4Q{P)6sIwo*cM=3JzyRg8j1$wKP%Fxnko_Q;Sk zQIP&zE#}MyMv-GH-`Y8Emt!OlFXG--Llj)sUD)G^Tpxs(Y^kX{`yV@bzuA$+~1YhF5xoa>v!Ce zVAI8J-dVd=co#3ypj4E(zI@-j-A3O;R7;**&j3k?6VuY3v1^gCaY4T$0zA$IE(A>S* z*R$W$wi8k|zN8%QQ}4l!fU~Gng}d+_zU94Yw<#R0q72!Z7kjX?y*3ZKMB=Sm-kNRj zr+_;~ubLW}&L}sP$`Wx(d(svAc~o5v=r9Afdx|9gP4Z?yS+%24AWKt(dZMI;a*B}} z#m5LVP^7!J0)n7+o6};6;%x9$?J1GN3Ht-|d|os2C+=a>^f+9^!V0g$V)nVS#MB=z zVYi98n1oU8ae{PU%l1xIo6g$ST1pCPk7J*Fw&hWkiVkU6c1|W%$oQ^Mcblu#(*XGg z^)kgwu=0XgyIKg=)n4V$cF6s68_!A0M>MJ~_DSKmE_|{Amq(m(!hS|kJC~BaL*_qO zU{-*&{x69-xA+e|OSNIvFN=oVIj=hAzK_akYv&89fu{TJW9}X8#6-putH(_cywwG< z8`7nxGdBqp;GY2d_#z*;|GRr3bp3LC+qRfd4XNzgs#?{xZh<0QhX7YWC zIcBNimY4$12?PjdsRZ4cYegi`4yXyr*SP--TudpJ?cOe&dtlxQi^Z<#?A)P-T)fgJ zyI9=ub&qazoHB;R%5 zPY2<;^y_;S zOHVSqG3uR#VS}M&@XxYay0;Hy*9NR?#?cykqY_dgS46M&0E~EH7GD&fn;v*yXs@3q zO(eyoKAoFm@~V>6ZrhOm0g*bNu*3(FOE9**D}AstXASV?UV*T7A=tn{I9`;ujoa%Z zgM>hSv+@vPG^*fs`j+Ii|xZS=wH4O4U zFfX18H$4;kyBn{{=f3*lsNCm%CnPPL=$d`YfR;Q>9q!SXz1>qs%5`jU!IFt-^FPk* z#wXtN2oE=PZJ09y&gc{?9mVNq;7GXxUAArEYi3A}>%#)bLzNBKPgV?N7gn%nm_6iN zHKuzZ?tRmgFSo7T>tlrMu6if2?Zhj*u&OhBTMI4ePw`s#V?A$Qk!hmNwxw4am=OL0 zUlf;%glShz1n|nm!-GJ*aMpm!usI@&jw@*QX#P@e2k`uxh;V0rK{A#$1aogoqh_DA zW`<{j;$sw<#*Bx|0wq?avW!R8=H)~fp9`QKZ&ReEqQ+x~8ATgbb>4G{zy5l|`=|U& z;CjDGNMaJcU=q7I>*yY_L6adZP#qe)hNFQuGt`C*0vD2-wAt$6yQ8X@CF~_HVvx1$ zF!pFvt0$f^WUAAe*Y38=Qxs*F4pWmHw>~GWsocoMuO5w?q%mX6L$IUxbe@V6Zle$% zwUvUlk_0nIQij5?O1ke;IJxe5^CnmI^h-aUn+-5ZY@Pa>0KMA}d-gl02#~M1!xuP& z1k_v^7op}(M*FPbWSYNZQPQTJmx#J=#uUdF%r6V~phzRR*XOK~Jt#8XBTo+6p~g;E zgNt{KSCJ9YX&KXtSxLS@Zx+LVHuD@o{-g}bb(|6&p%PL_L^SZK579fLDdDNsIk)I=Q*uy8rXl8M`+Q>AG$d z@0`qBdI;eW@u8swCc7D&V~d9#?)WKtH&ceyF?H++udkPQv2I| zlV>4FX#S2LNbYzB5yUho-Twq6CD{f>6xD7s^i?_z@8pMVbNd}~NLPP%PDrb|0(!{< zn;rH1G@1IQ%c6|TWSkAU(_cbK3V|32W}BH@6&I=7c?l37M?8)jep}b0g656Jj&-`% zZkt$^Dla&KZpiQbm~#&cM?@6qXg<9C5elPp>5~p;BSaT=>oyo?yEB1`E~&gxGwq{r zL4(tdw2ls{Q#pncAQ3qoPY#HDb;L5$yhMlT*dzIoV}l}BEG^x{b0uy&13Fkp-3RKN zfz+OO-%5AS&zS{2c2L*ozn1rkriM#rz(3a<0@qY0Zj!6YHa&*Zsu)RiIh<33e4`Lbi@sS5zPQ4t zP^eERBqliyY9JY)gDTX8w|z3aeHZ{;A1`qnM@urs$s+?6X`U_%-FMq~G8A?+Q0Mc) zUX{Ch7NTJ`DSEXgAB}*tw`5v4Qu7?+ue#a5M3B>_+O8c_{{17M_UZiFk$g6@RASOX zcy`w`kXTrgf2Z-8Yk%cfCOsK`Z~*z%uO4gvP6^dM;)9(E4jN+uLG;#_ znh40m&2rr>vCBVxs>=?hha<764qnUxm<#d=u!t3 z^D&=CX3F^LL*hoovuv4BRY8nM>B#LTx+`R&+^7%975~@q(ayuYEr1Q5@cT@7;k4+z zv#t!q**}?hC^Q_jmvPXsVYE__0PWjZD+hReAHFzl$t-}+(~zQ$`sr}H$D$Y~>@$%@ zC6eLZJ%$gy>28s)vccs~q9G3b+(A1TW#nCOEwPuE?!IZEj0^}8^kyh5V-~=J2rzui zZMnF9xAt%&6SAmO|Gsi|<2rag@VG_agESHjK9;4`G*@Pl`jQilUjGS#Xs9|3zFBL4 zntRzu)ApL**I>}n{32-V1Bk(jXwv5hiaueN8w`I$FZ9dKcG}xhRW0q-QqNUhz=6{I zzZ@v&mRQtN``7p7UWRCPTu9J+l=H=qi>WV+K0GR`wOwy`;?9ZxeAo?6RC^7mW?uRqk+Q>c4aBl=WQu)@78}UlMvm>dQ43*0n*=s8fzz_zE5?NK zt279xG^X&#Y|uU`QWJ!w#$PNVgZ|p>iWSBFArRCsxA6kq+bZ-;#Sa;{PY-@$!gqs{ z`}-T#nWYZ!*o4y@MRB23v=x3=YQnQ_BEQYTPE_V%AX3gdj_>(wk3c%m@{_7NqP8SA zaJZmnw_guFOM2>Oy=dS8JH|n`Jle;u{u!0LcOJxV2Agx9q4R*j3!!5bS$%arYuAFa zhL-eAD)?vq*ttZhp)iJ;afEJsYr^qT=+=*q_V;z~v(EB?>%CZK%fJ(I$9eA+Z8T7; z_Jfa+uo)0$KHO2qB2RBecK)vh>#y#WN0;9jqW4yAEBIg$;e0+PL%Wtu_>P#|<@9F{ z_hkbFRwBMiRrUI}1S78@xMf`0D1R{iJf5_9JEmrmaH1JGsL-TBS%)!XB-jcDB;moGn4 zY4{M1JV4Ray0%+|hD(?S&7n7$jQOJ1I|1{f`+Hbfbd&RH&ZRR?z?Bjc+r7vRJmOp3o zov%;q`EUZZ%&eC$!>{7>Z|5IHo^CKM_7y3D7_h<8r4$0#P6y*;yxfysx0G3xW9?Dy zdub7)mJlr{^8A@?-QOK(LPsSple+3Yk|3V|G4(0urlx)D3Md*sM;+Hej7=%Un*`hk z`6tDcWkqFd+vLnHAv6&@l;bLZImEMqb;aGrM{{}13+VD@g}S%U)zdNN`9J#3yYer~ zDk?jyB^s3cS+OH4f25AE!R=^mH{rwnz!#`&WVGds-~TCQR5`npMOfA*g}w6F{28*~ zD|dVRJ4&R+$=NwB!Hk0+axi#drFR3ifsF_^@zHy`b~e2%TpbsSKiRSKTaFS?oW8Ge z<69ftRa6ePOGnh6`3UQ-`_q>ajuhr&oj(r}F@rUurDt;WBG-o+V70k6;WlrRm+ZZ3 zq4xrRHmZ0u5wg_x^5XkZ&MkLft*qTSjpNF->`+A#s=Nhe9JO{dVm!VC$n6 z414H^Mwf37vu2#vkdYi6wch_555?vE?s1Pu8Ipmu{C596%ug-AL!OmBn&-9Y9;|qo ziA3Lq|*R$R5nG-6XV^Io8Af7c;|(W?^VDF%j` zDAmZ9rQ0_8_9rof5Sn)v&EgV?ZCCW5VZ>~lb)m|xs)|cH9n`UrEuB14WJHgi8J!Lg_7S7!Zke9ovYv-A;>N~G;~*!5s^Px)JU}kK}hPTT<*Z{ zZ_9vx>qFL5uBJ}LT9#J6!O(PR$b3aijJC|UTC>e zIc%iSRv8;yun>-f3h$gpTh{iBIi3y%YQAwG=HJEcJ_;I+;5p-J6%!&DL-WG%Gw@h>|+i;w;Z?)fW_U;K6&9^v?Rc;qU4A2>`N zGknOj3sx0T4IlfWX7+a+IMG2XuX8%YonJR#5tA@KhMTzlJwfkWWpe>^qhu1=XCC6t zz1Yrh=9~WE<+is8O{l_U`zAuU?)U1$ZT$I#IGR6@euJA`9Wt|TS@<4#-`qh>&PdKw z&6QGSm48y85@7Dr?3}?>@Ty?12doT*9#+_?aKT@JL&+u|f;OX459;vW3jxOEx!X|| z3cW%scqU3%TT_eC{YjXDsT~+yV!YYpjY6|5Pi70;)3<34Kbq;4SE%C1rFt+w% zFW4mAZ?(5|fwApsb1j#i^ zWyr;WAV+k2*97y87_1k&pnh{`>ijVHu%dzSeyV$g5bFMjolWM?_1+bkyuCp%YN%ij zC^1AFU*)Xb8tG}>@=CYPJC0fGpdp_WtFuA%CHVL=W;K?%mLIDx%~^M8?6Wh!EJ zmeX&zxhiO|TiZJSQ5Xa^*CHXt-49#qhDb~PcG&b3&Hrj52uRbRQ9{qbjL9Y=1Myen zd6~9VTGsyh`fZ!8wq4)+bHcD$&~w0yF!NielsbCifPS!ug^`K|nPYe{Y%@!PmTku4 zlo-_(EmZw$l?IqH;pU;P#S(*2L+|!IH?v(@C-_kO{?ESWBJm6u1?=@Az2X2RM2+N= zRMRzP!{?gP$8SI^B8BhdK=AnR#D&y{V)a?T-8A?-)uXjFPvNqBwj!J!vZVY9l@T4=WJlLt&k*k$rz~#js%up>Bhjtg(lwV}PJ% z?W~QXmre2vURD??6OuuYL6n}#(NT+jXM|DXK#`cH^wX+Ihw@aq`?5bE{F(JfBx*=! z(NHzegyLsDY_RW^md)C+^h0mlCBY|ZKMsxCsyJqMMNmfk*x|>Axpt_V9vYy5s@HJn zlhO$2OQ$OnL4i|zPky{UsG9b{2{Q`ksbttS=HXVva_~A@xSLeDO;v-|g6}T1TmIW0 zH?}S)#>|Zk8~X@LZ;dotYoj-eJd-}|nlsBqrTFx@O*1z2Z`Zs3)(km(0rxoOvEKW@ z6Vk@tQ&IUYj~p*EU(9EbNn9(Eab`c?Afj{#427rlS4w5OovaHy@=2?VcEIf4Mic;p zGSX45#eEJ=#A)1zpl=Et`1vdZL*M%<7Fe>^hTvj;(> zCSNy9L~ZitbTY`8?qL95hFI`BoF9PJ;c%o;TFLB#nXmMWC)1)m%vwMf#V4&eXBg1i zXoPu*@YmPQ<1ySg+j{0`pZ~_QCJ{FEj>)F}hRn@b(TuVoPMqcrLiv(UST+94(06;*;G zpU@4%{X6X&mw4bNA~g?L)m^H#F1jD~erUfbXEp&gNlvVaPSLj#=b@MYh$ndA^I~)4 z&7k8D{e|3Y8#n#l*9+{TI*>Q9dpjz~!AjR7m*L}@>p1OOgXcK~-Vv=s_Py(@Q zJj~z7-XoezT*q^v0bg;!FZZL7n5*7K;wz~^O1 zU)K9g4`WdI?u*`)Huq!Ypje-2PVViaAc(x;5NDb9e!AVLhEB$YBDm=2h)+LsLqp2Q zeDsD8h7CjWu&s4Td8>5EGzxydW`pvJPh#)+(aWreT{N)w*wpb63}J zDN}32GPG$&|5AlBE4-@vBkQk6GnxO1W(t(|@X!Ng%Kd+r=}^31Jmq;M9a6A814|)!^-k>09N)Qgq6GXuSKc=@ajdi8p6R&pFM zY5_F%Bq`5Dd~|&`_*pziz0A_UcqF9mgp8jKc{oOH1H)bFXKLMZxt)V3UTSlteC2uNd4vF#V0X417clHGb}7zfZv*Uxxop0|nMr^RADXhEDi~svJwkFA?35?-wM}y3 z{3HFV+2;b$dw%KAoXUc%uuZZOdJj|BI{xx1T%H$)`Ps)Z9qBz#2erSV(%^{Taz5GT zbPbtX#oeIB71lJBxewiK>%8he02{&f5rP+F?A;H6R@5kMXy}+oivGhXW4m;x^_FN8s(R2T3{p36g%2_8v+E#!Y&Q z{3|>8LijMXG90Paf^G6-uztf~5nKDQBkkQ%-}@D)8o4)jeqx&Mb~z~Cod08cys7tH z4vEfSq|F)YEI~MQ(eUmoaf_Lbw(+P*Mk&CVoWC10Uo{T2jmQ`)lmzrvV^e#P zXY>~#N6L6 z@sN6w%9(1^(tB=ql2D^oH8P>vnWozny@L{10V#XfoiV0b8`*PXj)$w73 z$&6ES_-hh|dJD7O*;P;Cy2E&Rz%OboMoT`1QG}LNzqMF5vw}ET`+d~9`BnzFP;Wdy z4IvLXX~HW?bhq#PuxV<`VuC!r*6&c^?e_=@sD7Zx9w2+xlr_e#0jpQ9B6BLx{8zgeKn z^a7OlPZhzrx1A%{@wW7UstiE?8yMXcH7p$vOHo9_POq>i{Y8UXe!Wxs0ue}qY%!d@eguI_yjE4RlEs8CgC_qzV zEC)ew#d;joQM4*7@`Y}tP#Z6yHVdf z1P&dTBUC=hrU%@Gbq*SZrr0bdvwCz#u4)V+MBOO>U@x$&P}^|JqX2qXe>9c4Ssn#~ zq@k~Qcda8;5)H;vE(rqrxgau~kSN-EF={f)2;DCNtEz*7h@<5yvp31Z!Ff^>0d?Hw zCx{;hlX5t|OY7yiT~g-76|=zX&Ce-fl@EzWiGcRLvX(EHf(v3kd4Wwn;i=x`y z*4LkgFb6I&?dj7~%GbYT(sf7rzK2Z3%;(1GHarso$p%3b6eFBWySr4n1aq zZWtG#MVd*7BUxLszuytM?dA&`RMygH{?d;dvS#g=E?xO`Cim%7+R}(O%VoL*c+m&{ zPGsJnv9oz0!f?5XFYuwDvuBc6xM1cDo7zHceWrpA2c`;*$0XU$R3++!!Oot9XetP4MK40Z( zt^gNR&d_tS>6U2_q6=GWiDwkSzMO)cyx&~^6oZHYY-Vvf`1@DTmMoFOLfoufa@cJkClqwnAG@g z0G}4nN-Ah;N?E2BH{#snwhR_(+ZBLSk(wTOmdvNU>_eT<1KeyYj*~%Oka1PGp0Q~_ zX=B+}=mX-I&-MYk*h; zyhvY>S_H#>VlWjf4&%?kQbaJcDw*c2EX2bX-jf~Q()g0330Z;&J$xlncmP z;S5EKPvx>Pknj3GG`wKSjN0spbgOdwK&<-G;Bx}aD;w2l zal|HbaU5+4oJ0v8@38yr(h{0SOufgRMn3d~aNG+Dq8SOQ8Bw}%Q*bTcs!7R|4sHK> z^1GR3qKKu66K=2X9*2L6!~@Ik?f3XW@Pa?OMc6(kZIbnITmbcMfKmIvk!|g*oR>T3 zRncS18bL>O6Mh6pvuv?!?K>#9*L@QJi=9H$a~<7Md02rPH7)b6ckY4<>IaqLhNo`~ zjZH`TPuuw<73&v&o#&Kr@GRxN>;fUzAQ#3k#Wqk{H5a}p~P-?#u$gg`zP>0kAb?+mG#{4`MZHs5oVocIEv1LcapR* zS!a(V#d2vwl;yd$Fqh4IGj1N27a-=^f`o^sJyQ;kiVm6&8^P_ei|y}ij=Y9-Z#hzL zE|;_WHVvDvZOsyZ_mhRdz=PA#>d6o9)oSL~7N=eY+*k}4q_2gHWDfTeMn5OCc_ zn|rM`t{e88G~$3Nyd!{o6I^C4d-=ZGmQ|9kpkQp3L&!_E=;UQdL2-%RB`j=M@K6XR zrF<9@s0#OyJ-&_CCMkRRUi?+};&J&uR~SdP*SfY&hZ~sv`wHXB|HIyQMK!f(?OG8~ z#D)k6s0c_`=`{!{O7BQl>AiPI1Ob(*^df{Jz4uNK1f=&CNCMJJ2sHsh$X&S4K4z>5aDgjMDx04fJ^syTkX5ACbvo*ih2oXQ@ z*Ho2%>Migy%n)UMR=nBZyk!xEmz*td&f)!8f>FC8w17%FR%<3}{>{sAl?{W

7~};6JkNzvDHHW4*;e5)>`Og48+qE-*eSBT<0jTN9@LZ6Qh(z__bWpFa^d8*=3re*q}310@7;n*g;B^*7)W^qbNI(FME`IPf5{{jK~O`LE_nK&mZ&-P{4- zciCZ+J74J89y(C=7J`Y~uYC3|Z{Wh)NJcu|v)c+8OJfE1eSZDja=K86iSTfJuo6!W zzhYL;C!s54qzLtb=@k9)mnF>n7qQuPAT%sF1O| zVq0%^v>TaZOd8i}wI?3$V%g{7Pg7K9 zN<(mZ`yrcS9q&t=&(-MDiL#h`{(93$S+Ze)zF7*^*ghOMfk#MU_q>UOyvNhQg}C3s zlS{#mLUVl+$M3?MG$-n~%pw8d%RgCZKr$5m?`J(P-OE=FSVoiSx+w;7@;2Ec%!v{p zJx%G$cNZmd0wvn~d%#3S3HNwEdGmUm89n9VU_k+h$q7y7ER)0f6*s!72V@WIu-e5= z4RU3wOIiTBh@g=S?s$id<{p}8Id9RFO}ae$(SyMjQ{EL!bJIahSw|QnWHwW`#(Vk! z`s%1!K~Q*s2V4{Cco*)Uz`hF}?;4wLD9dYL?a|nd2k9uRQ4Ea-QP{hU_cs8o{^E4u z#Gd;nmXRSBZ^Otg?~{}IxJWn3%^GY8o>M)swWqI?lCOb~pKsW_7G}1Mn`Ke#{?r3b z9BjoM*ONZMH5E5ccZo^n8Bzpi<9>#o?I{}qd~>&?T`tI>}D| zKh^N$-_MXe;{68=WF8Wiit2o!}pzw^Mh01kJ*8o*L zk%a7YG{2oged){%UILzcfC6vyiS|DR(SN!kN#0-G+>7jk(eG~lf&uuNR{_Az3yF5+ z=QJPjc;DF4d}OZq5mXbpDW6$VKClu;KNx_=VIm~b3`7Y6W-8)!;LZQ$KCl?IwKw6m zqvfuzL6RDNNak0+A4ByqD)5i!a?1s@1ST?!E@ov$!+A9$RgtLeZ`qsKK$1W2*k~D` z!>2&)fMXEU=q8UdaVC=QheNTe`Bp>9N%IMb&|T=t^U&sXF`FCXk#guA>!G+K zp`CTXAdmltpmSO_|0@4}x5Nn7=w9>rDK(n_8CLxaqW!J@UZT%GZeEI5-{33`vsT34?E*Ze16x&@%jQx&e zaBkCmXuU@$bXVrQmlYwfjH@X)y|G@rjivDVOu%M28JNzf{GBGA5kojTKti2B^Pv7NV=-Y_l%e}wyV8!vcnEK?0OO(6gV9M% zOq=WW5|7aAj+y-xFF&g+fxd3r=(rB9tw4^MWVyS>%2*IlrcVp6hWCqrzNwv-Z=3k7 zN`p##gfK0D-Bn#=YmeBRPsH%vX6a;!P=OPMf7tWYFEL#&PcF4oAfE6H*~mn#{AVBf ze@DvI)6dVlR{lhES-~=cDONBkQRnA}I5+aFG^=;WZ7|v5G2}Wav0rpcPfzr}6hK&} z>-m!faE!=qtDI6*ET#GF6|PevW&|CCj~Z;%PCk%Mk1r|CKif$GvP&+O06WA|x-yWS zKUusOLYv5&{&Y_YRyb*)C=$7<`GoT?LgUX>B1H3-wEs#Xs!wad5(UMrLTW+d?=?4X&4|rc&tHKB?j_T&s?{F2(@5vOi73PpEIZThE)+TBI*S>+f zxlO4Xjoz?C!dF$_=(E}QEqxu*t`~Xi>0KMPI7LbE^}N_q!4u>d&(5HTb2{CcP4|al6iPWvW)NIepK@kIs+Imnd6o1=@{NNDG*6>ph@sKj_;Wj#x zQbN}xE^>HZu1~MPh~If7n~@&?#O>j@w)D@RrzhG-?GCQA~;}QR`d3G35Fh6&-X3 zNGxH{H*YzuHA2%Bmrz+u8=?S8SnwJ3OG_0lO8QV3=U3n}w^A`^wvF2!azeabSBNcl z11UA){kY?8CEc1o@3-d-(c5!bbjCWK7YQ%W+${QS)mcsxX;oVA8#i2$TIiHF%KHma zw@6&fsgDkO-=b-q!}lj*85*?sZ0^_Xa4rApGKxC%D0;XCzTl>5fA8s%>HBaUBu_h@ zz5uvxavpl@mzRY-vgFY22ZfV^h#Mn|)#&63{y+AB_N%f?(Xj9yRo15d%yRb^W!<=`|*sihXZ4KuY0YzW?u7}5gMNx z=0qw`a(DHjz^hFzLap<+0dDvxjun=A&JugWuE)xln2rj zFR$FKTXTvcvdg?57B!zF#|_-H^=8k-`Q9r3 z$>Om)4ZeV4;fUkCR8)Df^RAC3-zqW0IgxdVZ6RR%JS*8T!rt20*w>dSnWV8*(_l1T zx$5C?p0va%UO*ebp178fX|0198Fv2O#VmRJcw^)P4KN|_^=w6rVPTcBS>WpjOJbX^ zPp6#N03HuTxV2zp&U;YOEx*@^~m_V2dAPzq{b$KdE}jIu5%oE_(m02T_YY#CT3aNwTG;ZzBs|wZgWb z9~Vw_Y#d$;j>%W~Y+Fm@0vO_+0zml*CkEV9ThrTsxE~`|J@qA;j^cS&1l4O{YHY=Z z`EwqW=RBSCqxB~Y)FWAGkx$G=q&qkHakDokOVnc6554tcq2}}BM#&VjT`j&{a#phl zuEuhQ=x6>~Z5;r_%155FB2asNqYRVL4nl{shAYPo=<5IE!=ZQ!OkjJ`F$=@|Q1PCH z_2YYZ)hB7UPUnwcm1a@%AeNQt-xp^vBg~Um3FxrbT|9uZPW$Rb9~Ea+ap?wc7zZHX8v zvsVc(W*if^CNQ*4X*gg7TOpmw1xKHWPCoAv&T(YcbGwJ-j5m`ew!?@qOmv&OiRp=Q z?O`w3dMO_Kv(Ml$$$!WLW(PO!qjH)pq=;o)U_T)n;UNAbg z<~v*?z#8yoX%II5Xm2lvHeu*-f0ibYbc#W8xon{&-dDnHM7sER0alul&58SW6)r@# zkE52~<3iHC7j{g}ow%Z#0Z(8Tv;n3AYH?H@+)`nDiswctRXJ7($eLH?)R)bBKP zmd5mt{4Pk}vg!rt<fm(EP5MF4IG zh2Bn9KC_#qk}$mY!-@4a1K%ZCFdpCeaHhvJ5BUDZHXl@ZmUvf#2Y%bYXIFV-J=i7z zbjHG#KdZvu+Hv|VDtGF<2iW2nHy^wlfa8k!2>og*%npq@ykfurk*q2Yt&UuFK!uMDl-`a?)PE|Ps%Ms+UZI0xnix;Z6F(SbY5x(eEDJ2HNVq62c!tAnz ziJm%-Bc?a{MyddFAfqo||CU0HGEDVp5*zV?v_arjMd2BM?jY(edm*kSNZ(KN)|2(K z{cN3$w4pYPCqo+RGk-XM?e&#c5=#l0%KG4}8Lt)u3Zt*t_mUgM*&*@wh z4iB#?O}4pteL1{NovmyWmmpYU6C-w8>Xb$7c9SMvqpqXZ?h{4E;eF`ANqw4il!MJ-AQS4ngSdGaZ%R#DqF8#_B&9(CGKff$Gs=>kB?OP{-K#=!@sj!do{dA#1d zI`f5fq%|WS=Z`o(9M0JT6!t9&eWVA+J4Yyq>bJEB4Duq)|# zL1e6TmWxp}TgLDVz6D4W>a)%i-?Qh4W%1BKaK_!+1)>7`&R{ag(%hYyI)!;>_kwy> zShY09-dE3M^cE6O;LLH^q6t&}x^y0(I}R5yT_nV~)OR88R^K_fK`(=`O0`H8-W>|R z0gNQ^aN!f{`?K|c z4#PR*E__E-m``HPqhCnp!#jNL82_&wnbe$HwE?E+0yx~XVffkmZ-cJa2GTa97Q8N! z;^i4qmoEX%CTNRA#Z6FN=^H3rvR8Q=F6KbT={n`fMThAvoNL!X$wh%VU*LQKOQAeA zhs9avL)0sP3Ky*IlGuMPtzq=eacjIr{d6E6BkOA)6Dd9Ej3bCU0bl}b+;2LoIsdvj zd^mC$kENAq$ysO}v+s?FmvYY=5p7R=XBSlTJ11yX^DhZ)ar56qebG_y_Kv9Y0HB9B zLB!nLR%yf}B%Mk2;~;*xl3CMJK#?zPNHV8W zGZ&R@&|SAAT>J0z{F6H*L$SQg4tZuQ8yNz@Si;0X$ExMRkgzV~o7WZ~i@=#Ne^lzG zRb(ihphXKfM*UBJ{x>;(+#)^m*bKYevlPVHYG^K>7&)81G@S1UvcBJYD<)0TYaTk1 z{#YiHJ|QOGl$o4gq`5~VdxLcTFfclr88rrtf-q1gsyG(lDcSxDzMKf&~0E6ji4 zawow8s0(G#MC1V?1gp5W$?`e)+w|C_;B4S{47;uKXM%zcH?c^;Lqhi5vwROl9$YNsc7pk{WAvpkvZzshIUs4cPMid zlkqr+Q(HZ@x^?qVUR>GIPa{Trc5`z{AY1%;y1g5pNRBcx^XoA$whDWgh$X8aVY^vE zZ%Ww8pC;%12w@1S1#=!Rg}iAhHF_S1_35Ya$&&l%$r$fYbNiN9$ElfSxn;g0`I3mn zNw5JfymE#Zk0t@$gjy>K{5-e{OGw@9EB|X4Hw(O9B;AX#{{cTk2yf3BkEK`)fQ_HO znqyN21@{Ucx%fS_?I6KYIY{ZXU#iS01#(7KV{^`GrQ`IJZnjXo-$^0$j)3Me9=>aQ zpwSbX@v{RTGt?as`J*QGg>aT ziuU}fq)1D+ME@O3iR!R`GWj5oDU7VzPtY%?=8;#0OAgf&QTJ`(yPGIL==qBy%S`+~ zHHhAEWFgRx+jJgr=bmt^_i+WQn5VS9R46diFC4tO%TZY}`?%mu&}XtM@1wF>{&=%we4})@(sQLlOoKFYB z>rs7uJ?|Rg;eN|q+<%a}k8pt8VcNdZ+gGhMDjV6+!*c$YM%NR)b3C93@lmqiMph=2 z%VuYW*Thqjes%kl5nN}`WWSyf?OC=n>vEe>9XQc{l5C?NjzL?HfQCu?FQu+;7nM00 z1s)yD%?oDwk0aA)#wpY$B-}4w@qsmt^d|p_XfVtlATP>s zBWdmk_Yg7Wl08+oe4~@UOega-z^wy|XXj29sxHFSftDodNvj>vyA~6Pg9}lAa^4RQ zO*%;al=7b~y%zV~mXrYvbe*vNda%C)!fgq=O_v9p{@~{wyYnT|RiL#HqfJcn>BWA& zo6zAe!llO{|D{GHQDWr*?tRB_GAfB9KGzKv2BnNlnv+w~rryX<=sL8W{)c~azkQ#E zOc_?U7(Pb@PN5M2@<|&A?yts1m*9@J@p8v^Na*LnkfC4yUfJmXWMu#y;CM~2{V4&@ zWc@Wy+H{IIoqc^f`2Ua9X^=RYI3B7^T+Pc*)Gz8@oLnYTMF50hDL@BIDEM&+s;}Vh z9r$5gpxrDUx-EggiJI?4EMVMZc>tc@PmMHQf7a|mknd(Rz;Sj^{jP;|`212=xqrQe z#Bjv1--)zDY21@wd~AE+f%y!{s@LT5Uvy7rn%5G=V1*{#??X z+!2cGFeJXcdtgT}zyrS2rYQ2~za!KGqz#Kcm00rk8@a#!MFV~;@*gy)ISMc~LM|)= z?bout4LHG4cjg76Lcu$J53d7@K6d^`8}cc+=2{hB{=&%d!*On`3gR{|T<^7V%(!;v z79k{DWG0b{diCc!z+nRX$T0p{LcfdqZga?UKXC*-|EU*3^-j%4YOO;3_LcO>WaaD6J}04&|OVU#*VHl*E?6QVr?2V~h9&8u+zMy-Ltw4&pYwP?V3bprw(4 zt3eJ|j!yFX4Bff8??&1X67mQ9zFE+}hYC=}{0IHIbjN7=>6s|mca0(g^vHa$J*m~E zx7g9RE&=G6ppBCk{@jgU>uO1_IRfk*2Vuz6tj^#)fx-KKF20u$I9Z$WX_TBla`-QP zDZi-r|G}D&J^PZ&chjo_C-qu#WvTeZv4_OS)ceqnK z8{m_{CHZ&w!e0Gvrzzn<83>#v+}&wHm!ki zA#>c15CowHrv@}@tKK618LvH!9)Grpz6LN>I$%UBw=l9n=$#v)=@GsnF4#acoLs2?9> zF39fr1zqp3Uw_m=LI@!IVB%6G!zV}$B)8t;Btk$Z4?JOEG13qxd-)YF9_W~I9Xk~L zh_#qOG-)QnZF1y%oE5gb*?)5ln>uJPSxgj3srK5RSGPy)b$Div6}81iPL@=IZc?g> zN%af^0V>b`wMdk2i!5p`bLW4pnxM{x% z@GvSsnWqW5n74ZJ{XD;)!Uy*ZcwLxcB`I>`JwAxVF*F*j`#=8p)-EC6A%KO$2Q6N3Kr zF5iD}<3??yQzMLriG+fbgnkjkG5=cU_m;px1L^{5;9)lO&l7hAtd$HZhMpg%xJ2kb z$NSIm{&RcpZt_33_aEi_k8%cl$bW3_KeqQD*Xo-G_4l||^Cud4*u`$&{r>;quehtw zfa~^Q6O3ABldyPld_Z`1l!f$u2*PB5=o;_!DT_(e!Tu=>BeT|#6rsF-G$5;ZBxU*0Kk!wR{EV9e_*(q~}}UckdN|Hg;GaRz&HLNsN0 z%a{5xaG;MW88d2qp)E(nFz~D6+ZJ;A+%p5+pImHnxw!G$ZT}YcO2^;5R?W4P1M*uS zep-Y5{Zb+JfLkwG$H*J_<2!^k$a{uK#MYiTmIkkh&sE?!IbM|ieBQUP>0XE7CSgM$ z#6@`EgN1+DKyF}AcC zS(MEF9C+U)Q=+q@#<4$y!H47Fv%-9jD@Ld_Nk$5vdq2PRBTjc4B6QWQ9?~NlLWQ0J zg7}&N9_*h{0t6g%I1nE<8DR}Q_;t+geTykb+A&w9_Y3&9h*;g$Jo&$N{_4`KsN2Sq{D0-VT;~pL*X|)Z+UxQE6m?$G zD-FswDVbxULqS8ZlUw2t*W?ZrTm|^Qw*A`=gk|u6WRy)JmBB)ix|#(z|I8Br!k8ln z#L`2CbVHASe5V)z^4ry_sBgLD5y4GJ0GNyhdG?>3Js{9D{Kf?p{BSMH6wEXeW1Lxvxz z0=KQ#jfHl8V<;y7D_5(rfdH$^@gG(Ahs^(@3L%mIQHB4g!k?D%-F1#PoO z1g4a~n<%!zNsOSx0;tnqnmj#~NxT|q8*U_A0A@27grzL7Bo{ULs_THJ;M`v5)6)6) zz-p*f%yz*mu7S^nr}!6|ZON7^Pwbw%)YtmP@!BCnxS@mDV#h-O%hlnO$V@VUe##?`*Zo3t%qgGr8CL5+SC>^ke-=K=-; z6Ia(#5U|^ZJiw&l&Ps4BcEMUo%NUiXOmRqVnjrS)AR`~3gMxE) zO>|X1L@#=(>n0&C^vngJDGT;1Q1bC!V9otr@_9#SXNQ?@5}B~LoD=m7mlA~(uje1 z3tcR92+eGk+ncyuh^zCpJP&WL4v~O?%?>RA_|9`Ctde$5ABj9av3)era0^jO1DnAk zg>-Niyg~mF44i19YL?8OLLfUl7Q)BIQhJKY?c(QiAO3d?_}vjlwgsF^1V^+KcS7Vy zpt>i^*Zzlx?)LOO@Vd={l9_h?@=_%NWjVmbBr=+?+TbT}J{o_C<#;~>pXj~x)!Vk@ znrzDqdM6Ciyad52i{yv}1<#F1xHIaryx5UA=cy~SK!WjP6DrGmi%y#vb z#ZcZ5m}Cjz?^oyXWDklUAwPM9a1D_2oK`7@QnUu_@P2Q&+iC7+P=L;E}649O{zQUqHTDy6K=6dl5DZk{X~QIYOX!h zW$kp^rN1|^CgK-NE_^*HhCO% z6*d6fi!OI$L)=-3_69?TwPyzg8*J~>PmIR01Yr%GcL*i{3TNxx8Bv%AOBq+^2g-+b zzG5varVY0|E?4_xi7q!)mM2QO0vyKdsx8kEFqOs>KXW^G#-AGbT2CL887mwwW>i*n zSHT>#F0Iptnp3S^sU_|Pu-mPG!R@)8u^(~0pE1l4Vsn3?A48Tk7Y4-?p7qj zgrauZR%@Ba_Dak3S)?`{anyK`CRt>Gs`}_tC7`phEr`ozFj7HPgvERbu1beW+C1SlC!uMs}_?RmQk&nfoI*UM=1+KUWzEM#OHlCr*6eA_`Rg&8LNqSddi zrXs7xZ;C>=bUDkI-e8K}-lj)T0(TePX%By6tblyE!@^$ZDuvfuzt>%x-gz)D#1?a3 zj!GfylL9#}poL%A;l|aD! zpoR5neDNEO?{DJu&Nk^039=dTfJGWWpj6M2%bgwR%82(esh(H1&zwXPfs2{--8r3l zu6#Xm8F85&t~bABc2S+kr|BK7KAR0~KMHb!0h$3-vOcdFUo9zJI10#9(}?HFMB0T-uy+f~Y&*fgMT{WuthrzP?lHR_Z2jIS8jp1rGD^2R7rahnS= zZAh8P5!q(IRGTB9i{KdOd2QO4*|AGoe}n)N%pTiVo7k$$9;lj}T-rf9lhPH21=R@`CtE_R$u-pm+fnWovNWWmV_4(Ch z>$rm6u2UHJDOlK}_18wT4;_)_ILmemuVu?L^bI29r^QAt0fve+;ZMnCip++%iB>C5 zs&^z$vH5DOcLz?@6SFzH%_{UxY*`Nj*u(e$`EG--<|Y*QuCEd>Ss92zuc(1&|M6r? z9KPRX-gnf4x7$*gRmiLW#C3-NjM=~H;t*^Ac@T}g?WyeUiUS}Im{^5==f2$DqB6@{ zROZlO^?P=dL^c*vl^H1axmlbxFDE)pA|gE7t;zC{xHzuZ9{Q-{jqDbt^nw2kTHC{P zN4q|=RHdc%)OMkic4H>+=T-Z#XnVe54031iO1|vV{78uch1^#R1?o?qQTo5h>QT;K zG^yXW$7ZWF6}r&ftWU+cx70fdspUEcFT(8RP~N$MFtmp86@D+F|k2K zqmdLtb-}&%?KXVg@yQJ8+6H^yARQt?1Spo;(~AY?aq%=*{&}`yzuCD?+OZgQ&$pXV8?xc?>_8isHtg z*#;nLBkRzZj$34yxzS_@5JL9(p34aZ|2^rjHUmV3taoh74CZKo_!DF;G7HtmIOy4y z1|yld_pPTH6r?v-1o=e)1w^vk>x6n;Wr4+Da*lqT7iFeP^wa^qb~9*Fw!;#bL(DLg z)`zTCA9gTt&bKLGpL{6)X2zyakhcD+`k;V0`D^xtYJQ7N(FiY& z|6-b~m%VaA$0(D&(T8lY?)Fy)=2sI4CA{q0W*q#`<@U>}tSmMr8Yv3n6pza00{As- zu68l2j|LHri+8pd!s_muzla`L*>o;7GREthc{1)`U>n1t`!dkZ`9n|=m*zt?+a|lC zgHKF33b*sD1+QZTbF2gX?BUH;x>748IaMTO$R4JECA(^CFC7dyM^bx%uK1>XOo~-p z788o1Qy#>v#Imi$)-v00uju@wt%AZpn1HFIJZUgbLws!5> zau_3e008XCaw+B4shH#KR@S|Egx8;FW3|`Wihk`ISWb<-=Il5qT&36Jo!sA;viQsw zvl}-znmgL(7#M#bu52~pP4#%0q_VL3Raz2Qmpb*yr)}n=?EyD@`#F=kK7#!yG}?G0 z5B>oCvfTbe{s*~fXr+Q91B+P$ADySycGyf8skCb9l>x4(CPx2Zdy3VMPGtXvNVARFzV$!RwjcVNzG=pO_S=9@REe*I#Q=#cWOMwz&Z_X zX}Cq^wm30HQ_#tdp!$X)tp3|+do(;2bI%U}jx+mnT}d+I;aQ%xFv+SWi>L8=hf&5s zaJ7!>sIpgFNVQ~1w!|3KI|fd1wwJ*}C@RQ%BHuETp@)A{sHuT?I=MTU<5>1px&7#1 z@QLMuFAH503Q{}?z8$weOEcXO9<#LnE^|_JvrcH{6EU+m+agUOz38NSR^{&@F$Hf9 z`?!ceXUsIp9L$pJ^DHDwD#Z&JYDdG%bn8c8N$_Qj^>#`a#>R#dW=v6AZ&M#9UU2}2 z|GUxrTV)3ftv(;rG0|B&7zDr_N+x+@U!wre)}>&Y*Q!4cC9~3`$~@L$N!(3sc|Ck| zl?}-{3ycHRZwkXYuKzp?d+RjDEOOyWq^UoSP>70v)1#foQU_2x+Q+^>nd?ka?fEQQ zgu1`@2}?)6FF9<7^ekX>1&I3#_)8T)&N&GJrgk}}BijyoZzu(0toHAuZQt(VbOW7u zP3v+iFsg3QJX^IMz07b}YWMbXw?UStY<+jqMsI(4i~K1sUsFPk#VMq{omn}D(?Y*z zr?FgDk3x=jh%Y$qdq3hU)TbSgTwJ$V`Iw#8%0?j2vn_c~Vb(%DOk z`<30vRdrvQ_Z4hE6FwccWq0&*Gp)Z&+Fv(pd@p#$ zp_z_j+7P+!X2oHsOp{V0IM?J(t5kW%tlmf*ci7(^cL?YYm$k{zCAK!5qUV21^W$)& zJ>G~KaAEnPwMcqani$D&4DG$GTZ#P z(idA7pAvjNWk0D^I7)FKra{}5M_T1G+xk4(gL+Gy)tf4g$zuMP-iVzVbaUrZc^s^= zd0%|ePN_<=$O;}=uT^EGHSK*@to-0cx$K#v?b~-G{sZ+lE4O;rqqBpjv!XpXjGinefhH-O$|u?B>KDeR?;=35w5+d7|=s{%j*oPt5P5{_p< zw(RtUf(N+rqHOB(SD^<3OjT0aEm`e;Us#Z^ zw@6yHWeXV8U@BMw)2bk~%Yhn0&cX5(2^pt@RD`4YNkOVnCbJD(jhE!G@fBmd-iTn2 zVEKV{N=&iLTD!iH%TP`Y6vyJXS7ggYLzoAN7UdQ}!=`nT(@Ay`x;R|LT+@nc6CuP~ zYGta!4})`Sha9HkVqx+Vde3*hnbeS%CgVOh@p`Mv2jVI=Bfw=Lej6lNK1Vlm(>z+U z*r%r2D(1Eeo0VX0o@vYH>iS)FT4?^3{9nkhMWxf_%^(Or_G>Ps&cJ$Bq5;ezX{n=W zPumdS+#0f|XyxkYPl%Fc742nP1Vgqg@WR(7Sb7x)>zl^vCWQ*79*^YX6FpgEMjnW& zS}KQY`HHwJsGDqcUoLl2S_Sm#)Q+`+n=_8yxZr|I(qY3u^0iJjkArp^hr9tGdg4#|EQHj<(HsLgjXt>1YVtw?VQ>a&vXqE0@qE zT{YOsyFs$PNY<08UxH~M>9d9MlHCVO8q}G@?`0RT+{DH)ojhY_O1;m5*^7-OO&>NF zPSO%CcQJQrYX6Q5&Dskw@TM+~#86vAddd z7Z)?^@~+$x{a*YG`&-lmw|w9Hj1=gwnxfE-5hX>qU0ko@H9IDR`P(KmhcA{07+uln zmk((_eoNOK-UtHoD2tI_s=Hm;>Ay{;u{@TGW?PxsnAh^N(B0_vqJL-p0d4~bgnU{* z%Na0@j?CC=*DJeU^3`>p_o~(8IIJ?ZboKiS(NE!!IUH|syoIhS-k?@X;tyXIiT?P7 z32QJw@qO9*bx~Go)w?|#6@eI)%jy6e_ieq5>;adrC)+7FUuZ;LgRi zedz}6y3Z5~XyfK&Cd#$J)v{?KEZ|xzF3ct}hZ^moV(Rqaf(0rZq$XX21!vO_wwHYf z1`jP%CZT36XdAXPR8SYVxzWC*f1xS=uKUgm(um`CPmF-b2ZDpAug%buoin0FO@>m# zX(m(DWGD(f1NT1!&YDW6c8&L%4|^3f#ED6|n;iR`CP??z2Ju{1lbgoY9HH)HJ}$rg zI8eHfFK>*YGruI6?WmIyxzG=pm z_%YzjKDGBjmrRoGotkJ#N-zOUIuz1ozEAW}h);GbPg3THExKa#C%@zZlebJ}`$U~j zF#?Bb;q&#|sR|n&Jaf8q;o2)GNx$4W7u6rC)MqtWNuPg-u(Ci+mLz@dnTe&S<_1Sl zq?#N*7k;U|?x#QDzm-S+wDdj8mz;z(AQ>pJ@|-P)<}6P&1#n4YiSV$g^fK};ZWJF8E)=kF|r#6 z>%m*$b$f6i+#GD~DnFd8w!@aol@-9UGd`{vs_T2ZtrW^J&tMQsWSRIQHVsljf%!nY zPxVCXjaKDc_#r!&Hb4sZQe9VfH+7r!cu-UTi`Jg&cxytUyVB&A(>7$xpA128oOcyy zyHC72#LWLVpGIbAM&oa3JRdnaBftG3Vhn81MS8qe6Uf!C-s!(~JgUaer0PPgX1)(o zu2BF)twuqyFA{Zn)l&P82N(2X*(1MFoJDjBETH-mngdyD+`>XCPCSP63dS0p?JWB$ zdrqI>OOn;Vim7%p8U5ZXZ6kMkD~B+^kt>|Bx%64S&_E$5)*bXJ39?tQV$-$8ay;Nz zt{0@x=bHz)dQDdP(&=Ap@n7tbKg8L(1UmP)AAr~^&WdQz$L;`o z;rLZkX2Mevm&S+9e2D;?T?2~RVO~y+7TlbrK$A&0xDpuZ)ed9;BntEFMU%UZ{JM^9 zOI1H8#-L&K#d5>+^%NdfIkl+lY?|H#V>9+4gGwMQHav+eFZ8oMBp4K;YGmcc&>bWc zEcR>~j$|_q7l4SKaS>F)bCO9WOKz2wM^TKbMh26_EN198D+5eIt%`n1hv})_lJz0L zT)Xv4xLLMn+8FeO_0wx4&Xg0i3+7RrnFb|GP>}#yBB;29cPL-Rkc@V>Wzyw!z#IfL z`}Y#x($U{y=v`+mm`}LRflT+hKOS8k(zLk@(drjF-@Ph?Xc>TmD#xVfA+4o^##j*C zF>_orE$y2vNLJf5q?qpfDE3@d0lK!|p}XJ82qIsdTwi9PC`n39;z z2*(x_j}tuRFp#4iL(ynwbr}VDa3~fk`J3qvri?&y=XI%&U!7>2MyK9!Kbhp5=5ns7 z)UK`|Gp3Pf(x{lBIG4#yM~6&GciK?B@rUQ4#44Qor6=tjZB%29XH7Nhd-G3~9nYuw z%`PUM6#~(={sM{Yfti{-O{_qb`EuBEjR7O&MY7|;?ZC~^kJ7=MV&{j;BqthG0=^y9 zoQDa$muH&%IilWC_Q^0=B$PwLm$?eP8Q~VI)?2Y`fWw^$r1AtDV7IfP-sleO`I04N zmyDGnNRNo`JqZ>+vFyZ`WRcBRjN2H^c@4PW51ewNYVa9#!oUqIYCs|A{Y#ioBY&?l zAQ_*%JWiW3>pUv~^lpOj)mZ}T91!vPInt@c_VN8MS#9)q?$UjUtBr0A>vaqSU?8#J zqm-mVNDx0jG-FPxdonjsCPP_$5=XgiYE=--dF_55DVduIz52ubqn*4yO^`afyX5+* zruV4~@%H9TuG89w%jryU!M3jwB+RSJt1Rpn?H;oH)P}wDp13S)8U^CqjcYLD zR=Z=v7q+=p?23?}KtF0-CckT4Tg(0wVM^coTAR83h93Bwz6MGG2-Hh3|B7*aivpCo zK2O{$gLJb6Fs^O9kFxws6o|nci2bo(iBl z9Y0+CO2D|DE#%$@BMM4Sc^Yhwm@jqm(qJ~TNdE()bA#{1bJ`@*#rG>z=s0wx!HXFY z9J0l2{jfWA;TNZ!iea2BYc8<-H2U{dF6+P9hKEWONz4eCF{v z7mGTsKPgW?Ec%pBvy~DRUB5yI*s{tPccDqUXF(hW68KC$Ef$$Ahf#G?#T6=ao)xhE z@<5r%RL3`Ws^RuV)aibJC0{nt{ady<#yAE$Pt2IO(N(#<4)+}qhWHIe7a*!arj1;& ztC9pJmGlPqRrV#B+TpffOUvz@pJS=NIV_*fi|*K#0sPTzmPk5J1WuM9m<3V!+AlRk z@2=W!EHzUMtyF8TX<7&o|LZzXv>NQ-9U>r^jn3H$7_<09JqR>O9(6UFt&W4oYN>dg zx{ZE^gV7)l3$qIvy@`;!eij?pyEA(`(UANAvt2SP*;)COBu2Dx!ED<`v@Tc9%Z>($ z+KW$JKpNO24R>P{1iE*7;Ohe4>E=SMd!rGwSlng1WXR8a1NZUveu=otY`urGTtmT9 zql;c=;$`|_tqP^L7F$nnF3u0Xp+GM1U+s1Rs%<_MgwAuhD@9O(Z8y1T54-H_g7;O_!iK z9?M`#9*EB+j(A+&f2<$_6dsS+3Q6->H!FqQC1pOgJ$Y&;ueHC`T_viQgX$IgMUtj@ z)j^s19(6ZxtiDvpn=@5`C)Mp{<;c#?D)_#drUkVvEU^2yUEgyeWG#jkJrj~S!3VVl zAc9jA5OJ-RuQ%Si5;)FtPf%5im1wS-*U9xU7PaD4s*KGHx%p(zMEs@~Rst5PDSqwY zj51LA)XC(eUZ@vY7FBLd-H%w@f8l&!q+DyH?IRVp*^JMil3gDK;0i2{t;tQ}NxF1x zt&IV|)7Yez?TgDia!&M=f^~AnS}KArr`9M;eBGJgFJPm#?kq6)+G=pAs88QCQ>Px! zWhZqoa?ldp;js>pwrxm}Q`@#znYX;KjV9W5t&vAvj5z@YPt925HULwRaDojA!+x*?|Hr-v*1`{ zKsz&UGjuWEBy%&3xTR=sXk1X&hz)+x`HJGQ6tjSy4TTw$yk7k2(XMl4Ehk1pzg93koRT16+-PuKZbuOp+SmOe(|bdI zPzcnb?l~Z~rFQN{obrNJR6NyHv(`6uZ>U5x2eAAbG#*g}0F&Z+C@$eQoSMcBVs{Vo zH3sn5tOxrVZoX2MqB$_L-_}AE>?$ZFoC40_+U$|b@!$^QuCETBX1=%5isIJpk7CalylMe#WBe@fT4A~E^ZUXD{B&0NW;Z3@lEt2?*i+M>-PCB~Ek`+F?W}V6n zRKCCNw(;x18BGC7M&Kq7?<1=N7Fy1^rgd+i5sOZ0OdZWV5fj>`}2wt`@PA z)q#AU=XMFxP`G@0xkb31iqKbba%+1jnRm?133R4>L--Q-QZ-dlIGE~VX3 z6dV7B2uV`E=c^{_f_)Ee(vJYnZLYO^k!FQa80iHG+Urd7KTuYMltMLmGLPXYJ9uxSfU0Y7=P|lZri_OLPRi zgl7+V&<@?IWFc;)ivgGRWiLf;d_`mkKOxT5GvWiHLboGQ+gRRp7yZ8 zayf)jf#zy%{kikjZ!iqEDs-_1VjG62;Xyj+{^XI*V+F5!vOWr}6E|&$k_@cdLo&V` z+uG5MAh>ZHqC#sH6TS}>e3NB`j>nHS^4SfF6-OPDA1B%Lf8M_XgRt7q9KBTb!Auei z3V$bKbXgR5A~6tbk)HYpb0g}xii*N@8J3O%Dn;hPacVE1-*>*_DOuvxSs9AFjkWGb z7IebNTS-T{BgjQOe~?Wugj^tDi&KK^sXHUJp9SO1L<3d(i!zJnh0HQa00C13i(rsTLDbN z8;u)0*__wp)w%8Y^D8C8W~o#q7*vZIcIQdo5Rk(JpUxH5oIhwCOLDC!ID>Sl%0VjE zG>`BY2F_FblrjP2k8Gj5Heg?$(P3W_hM0dhrv`9voF#siD~H^Bb`iqz0h-RBrBwmM ztL7d621$?)aq`>I10o%?c0ZP>-HqMg{UWrP&;WE*hb%fwR2Ykt{k8 zx8|&HM(wInavex9oE1|VHzanD3}E;i6W12wsR)z&Be)<9&r;^PtQ_jtJn@PwlxbKM zSl95luu7}ht?kM66+2JkNYIYnkAV;HhPzpw0dg_CWJ}C$K(3Q(y(`c*Te-Z??*^xv zCL~*^Xl#)|4=CUb{lFBypnc_U5c#}|>XXi##G%;jyQ9ue71=FMky^Hi*arDYa$QJS z8~kKvwOa1Vq(@f~EcC`BuiMyf=P#GSE!en_7okBL8@?lf3G8E#ZnDqb*K&fj4#&6lyv3H;{)@`e{c7450=57jRi6PeaTz{AF$& z?Gm>D98yXLcxB*A2JL#K@-U^;p}-RmVWQYOwvNwFto=L@)9Pfz+HD_T?Tp)G+e@A? z&M2k7$4T8FoFn^gvo}_vCB(Ph;lu(>NulFfB1OIoblQHX@4ll(ogY1u**l-v^hZM# z7HC=&HJ7i`>-}~`!)XJzJVN;{JJ@y$GjuN=Sr~jt#YY4|CpX-btI;~`JbuGg1h_#& z6#yJmZ7VSH5Ng%RU?vrV-tkCFtpSFv&A_e9E^vSA`$c?-Z~Yw25o)cW$taDF&+nFE zx91edYtcS*mrUhTykBCYC7ut!O*1%LFRx)QwNCo@0ENoSxJb1b5q+X#T)p7|JB-w% z%$Os3hx6iGA?UZ<7)b{3rs14AyU1H4P>|5lk&XKoV$pKjkWISsp}mgyKuhH-;T2|1 zAQfUTl$r**FWU>(lois5`5R2QKo-)?k}$MUVx^vAH+5>c*~8R7d^YLzmSv~H8;Ky3 z-@xjlD%nb^-!i^M9mx9rVCpoubl+QCf)g}T*Kef&PO5V#vv?w`Csjc)oUars!TvIf zPpP8gvp&b0gwTN*@~`@g%l<_cZizKE;Ivl!bikfIT`q*h0>HN+kG~&~!Qy{$Kn*a- zceMtm+h(0@j#c+V1nJ&{op1qxPHpF@qi;YBn%X&b*6SlMahSvrKizH0A_zF&69lIS3$A=;emCz!G`%88D{@?+yBt?Cg$=xo$;jY<4rC zU=BW9y{4Xu8JC<KRY^GKh3;wAvb(PM=WXon4yNX-(y1C24eJZYl#! z0*NDH*TtX9FT7HoXt84gB~t7YRY|&zL+3oJSC4L;_6V|sOj50`$*rb$ z{cMXv-<_W|z1zqB6iF>TYP!U!FUBfRn+p_wH^%Ea<)Y(!HCc43FwIMGw}O#P^~9Ti zX*0v1S7DX}D7;WLIP`%o_yfzPIz4b2S^}2Y+C^`f`dTj?h6rSw<(xd4wWC3NKe^-Ht&klH#&JJo?nW^VyWMKNAa-;3$02$?dPl%sPpOF)l6}Gf2yH{&6=bCL)dpl zHMw=&o}(xV3MvB9!2(K^E?p^t(xi7#5Rl$WD2bqeNR<|;k=}c+5$QG3I{^X&2tCw< zko$1Xd(QiPKkmJM7!C#w20MGLz1CcF&J{zxzC2q$u>1v&KO!j1Xu}uq(oyePa>U3J zM!L(j+!Id)JJ(JX$#Za?bj zNtzFU*EP_D*22FGdG8>(>&~!<)Por($eK)1YOZ{6jqi7E6Y9c+*i!4h1cwc2xBDh} z+fLtB`AzY;P~RV7a94c#ciQA%GZ?Sn1Jc?ZtE1N~0H%N=l@J@18n}_eT4hog>=S8~ zW5GhA8TwoRwtmw1Xx!R^L9oRd>&1fT+g}!w(n=^wX)F)9d#=Gur$Ml3QH0W1Pz9~^bRATuuQJijKbRag0Au9~<#~n(3HQidSwDO2MkNSkNmJT-bd_ z@cS@Pb;8ta2dH)VI=<0fWQ~d|f5+^P(`+p&-^rKkfECkwkwne2TSJro)Ui)Yfgu)K zkzsEA>NCP=dOM%`31v5n-$A13lJ+0RYqPq1!mM4Z2_3#?OWr_pIq-pJm;b>w98Iw? zFJ`g+q?UHTf?XxIqt9$yL9h5b_5E;h_qDR5P2SACSgXXG>JRxDhP~xJ@dEC>Jsc7? z_kY(Pm(1i_Pk0KsY#95j2p{&{v&b-RcBQkOuTxVAyx-;g)AMXY{U18B+1$tZ)b*NQ zqQS=NxiH8prpCh&7r-%6i;~7tcK9^gzhgsnZ6RH5W5P0CmIRU_Vn6!O2Ph%raJqHg zPmf;#!@{s!+eRxH&{oh?10exk#hGb74}A5+-}x4;cAqqs>S z8cJAdBju0Uc>*xSvdM+6Civ`*7e^^epI@1Xqr)DaK8!S=2DsT1cYi)}M&HjGa>A5iW;){@n3p}20vP-HA_5~fbUEGf@jreBTQiZ-KgO-y#IOGsNk z`g|vDm6pnfVQa!$fEh*Y*Qv!(a<7&EER#Ms{7$_JwXpFh7=ECz<0db;D&+!9vv8)Z zTZcW?7>}WtN6ZZ%2(hh0uk$orly0e^=$9Y%9Nt;9{^r;V(xFe=}zs1EN0+!^u+?EhV5xC zo2sSGi3b6t^#>Lmc|rSwH0#h@?zKhm*>+M?*7tc=_|9ZWzRug9bmm(`wwGhAdJHX@ zXb!}9hNEBv{zG!Y%`ecK39JNB10+Z2ZYG+_Y?2;YmjAgo#@=yJx7dPm4?jn2=w{;d8ehsPoVnR>$Tnn?89Emvu8}LTL_si zPcKIariIM8O%VG(_?PYvo2jIQ&cmm)roQmU9#Zo5zm@{f+R>~Rs)SEuG(N7xMHWk? zr}#HJ+gqXN$BcE3XL4c3^Th7GmrpETL%CmaYu{b0gq{Nk$^7MkQyqXgpqFe47hGD0 zyEd;T1SuU$X}(g7=Q9Zu&)Xp*xuv9Y2`n{|IKhIbZ0LWL=2Ia?DBV>RvOjp(9CvSK zg@wGQlAgu-aModL!|TJri4EJaY1#<3DFQwNDrt3NPQ#Zu!(YxN9E9B2sr+N;?9ay& zCYQ)J3&(BJVeblcI8|?Abf8&vHOO-$rI7nFHZd*Tv3UicIh55LcBJS@%-fg83ggBgs4SMnv>JoW6r^x=9-o`aA@3>YlMrV)b>l-<8|+ zJ?%+8J&>sWqo)%JzE~d)aM5I=%XZDDi-N=>rCxs~5B4@7+oog;=Aw+O3i4}4uEl-=Q_=PX z2r2hNmJCVW5Z`JG#0&1HUq*MlQ`8v+pT_F3@ezZ$h{%E@JswGFBY~w-?X+2aq#&`rXQInF9*8i$A!x zf{yL@>flfFvd9hJw_6+-ffeYr8OcvwERa%pdI`;h+8m|Ua7v_gx>F_%9roL}~~C){)ab_kH1`Q*fgMI0NLtUi=f1QS0+$$bu^oQKdJ zVx}};_Y2XE+((;3)?bWG+l#MBi?sn{-D>N*^HDj1b*rtg}kO zN40fEtf`+}7Qb#PR-w4;jXriIqFJ7r6f{jEv@=w$pAn7=t3WR9fu{Fa^LXI9L!%oTAGDQ*J4 zC?-ZXIS0`MxY83B!EcegF{(}&A^Th@vi-h*#mm0p^PHxBrykWQhmTWpT?k1B-EGIV z2!*D8AyUxLPyAwFt&+_qTdC&2jZ&$zz^vx|`(7O@(6wyuQ@_|;L@vJ^{=?ASnH5&P zLV-x7FulpxJA6}prYbN7aWxJXf|;?9uqD*%xKak#o~uq?GKrebDyQqKxGrN*qa8Vg zVXut+>A-sb#~2q{e7@j9Y`PgAec5;2qSRh;P~UrWqi5Uj^REnf@GHa~_wSXrM^~h+ z7A?oVW_vJH)+T+k$lb8)e>UJ(-qp3(8nzJI+$y<;)ZF&-Ka&lDmQaZdk-&C}PxZ-- zr^5R3*qYl4@3@*Q5@5~lyB=^BzwmkQtmu1I0Jl5A$9usw`2e`UoB;NK^YhILOMz14l)7KM&Jx}_UZ@YfpZ zCPF{I9r6Se4jzqVWtd3fT!!g2(>$mL3~<~=u!`xU!tqbcI_Bv{Ls7>LaSfIul5^GQsUQbw_x9h)kpUkYW=>RH1!9K>@=W6 zUh5I({d^;^t}@2`sA?i6OozovEE2Vo?;G#c>PEht2`oqqym}M%JX^cbg#{YDxfAP* zH$6!tbmo^XnE|xgwH-L+FpsE7Y4aKT{b_O;(poK475xF+$VrngJ?i(ZwpeQ4@Fm}B z5}Nnr&p$ahzb)B}mpJaJw`mh4=8n{D8ElGM3_p!|lzh74$+4niDEX!GafxeCS3c00 zQ$a{~2};A0jt|*La}xu0I2hna`Q)*RX#@pxX6&Tk+dcDR41sDaX3vj;VsT=?WmbgU zh~|{T2l2j<(LD7NsJZr0ufVsKHn_agP)*^3qh(+7P;<)P!W;PDW&;YnG?X_8`)vas z6SS~*L<`?WUf^~z+Y8bc)ZlZ*Q}h=aT?z(a4bVV|%8E9`pceeew)`*6jiW@GXtiTn z&haS<=srfVuWvi=`?s|0a}M~2NsWMuapynYp_q7G5y!(a1HJ~_Zhz#bh{4HVA?+3; zKu0!l&UIMh0OIW-#9mHtue~j6%z1kkn;2OtF>igiD|Q`;)SvZQp@wP>GKy6xZWpCB zmMgl@h`oM`_8=giHhvKwG{m{ufEfc1jShS3yuLf=Mo@=-@7=aerA^P=C7-!mw9dbL9 zmq*7*rSBg*1DS3eSg(lU#`%_gwI$AsrZ|P{Cy4vbYx>9yAgooHw~J10Nt^8psE=}p zMM|GBcZFL4RLNcj47aMyd`!xw$o)v(}Q+JSGw`Uh>wUtMqQC5;Bvo|(%g+>Q0dT2hnyY>XG(tB`N% zaX5aU)2G?^U!;@&#d8Xjo*c`1YQ^_oSbI?dE*0rF*7 zdtHA_J;JUvAx^Xwz~cH$y>)y)E$(kV3cmtjG^h6a%8>}@k1q6&n2Oa9kckx$*|0@h zpxIa-tJAbEcH8!^Ey*v_Mm*xgq(OdeEUQpEtLSWC-90YRRGxN_9{t5Wo10u)KiYM0 z%KBB@{I(97uXOwRnXIs%eey9CfJOQ3Uz9T)PYF{~;A$ESJ9_h-m zSt@xL8e%5K5r!$4u*;`B?O0}fl$EeI>$RX{ooK!1$+_ZfN0y$=gxVrniPg+iTPQyY z=IL!zduz)5TNtjjEZvzHv*&;R2(@T^#83iTf0r2b=;8foK}748s1oRA%NtN}{9CdX zef`-=JBe4r4t);&>Hah9?MrroWu!xE?~Q+frg?g_RC_!4cuh`<~l{ zg}I1ie={{jt|(AF)xf{(Bm+Ehgp2OjKl>ED`_@!6SHYYCL3nZoraDixJ<{2xU1e0+ zCN<`uqiW(~JDX2<+$WEV6l+mV@cz=^L3ZfZ=^^F48;vD&m>+oKl%FBIw~?!}BF1kA z?c3^IXXhxP=$jWV13bDDr0-aoByl{V)ETjAz0@(k+}Ll2kL$|(SlCg!K7`Ail4%i)m8>zP`HsbFcLZSoT8XjMYuA2VTK5s>!M8v zaw6C+Y4KrHNLgj*uJ`;e;z|gQy*P8EF>l(b6jX4Bc$OldP%QA%xW8S*&w6V{HfM8HpCu9!)j_hv!lZDIwMP|>~ z7LFMQVTP?_|Mj!Db$X#H|24{&nl#xq9`w@;LWp-fH>-6t!HpLijnq0J(^@RLA`bw& zb1fpW-p#t<3|Vcb2YhYz^=deUAkw7P&mcSV&4WaL{84r?aR9r8gv*$mvpjQVojW5Q zBO$rTL`r7K@4Q4xCh<5&Jmh3|(R~K8!p13e2KOmX+FpVCXe6esK@Js9RNl?-&5R5X z69v4(Zy7O%DfE|s=V0Ioxs{A_O3^Yx6q8!w6G58K92iu9T&FlFF83)%)c(doJLGh% zR;_8X->B}_E3~I@+rlDMijX6pp!{n*pwm}N1iYfXSE{uX#gbi=N6?nkk3A0T{opZtteA8=D*_{^}#-?lZHHKxV( z3K&ui?fFFzKl`JVU*;!^(Y_lW-tK*n`~K$C{=FE#`AI)v0n2{WGT~xY%pYaL9 z(hFA0YbU2F(C!N``~o&h1TY&unq{`qC=3FyRN?8ZiypBiW-bxP$m1d~dc zBGj_OP9InMrfa@-Lu`Vly>DoMJy#(K>6Qc{0yk4sj8iLm=iRD}G|T(m5Axr!k4mxf zZT&9g81ysgtaHy^H$F%5w3$2>6Y=mkef=)1x59s^))#9FKEmAU+HC`e{{>)KZ~aFR z>ab?u9H(+=cX$1M@J)4rX5r7d9X|Gzi?vGzo2S_+B2qd>T4`gW1(S)g04rrJdwPIm zgdt|&0SkKNQ3F68r^unrFKs!Gq_5u0LbWw(x-4hexQ!7((-!h|)*2&~pVt?o`|7hp zyKW42r9JQ#voX~@jEhxY%~N@?Uuhll4sqBYH>iAE5h6iB@bws0Zn5r-yL20T#BsEs zpJ5*ylAmM?s-;mCX!6;w$lSQnyl<~}A?s2EJ)p~;dAofuloai=UIMj7f9kP)a$+|& z3}K`w%m!aD1b5tggM3x_XIN(iV~xyyUmXjXqI>`vU2ZD(K3UG5jh}q&EBQW(qcS%# zOAyihAopueTkdGr>X^a}g$7>JODWK;)t`OcW`Nz3W_yEhv-|$N8^@hJL6gbvOV8Up zPJwCK@D()eDBj8-IYd46m48_+-%(4KGNOctGs2zY2d4YJWWfO~Sr&NvV9|G+*hV*{IgM))poGz|=3R^qajjAF_K9?qF+xZlo$h|*$2VfQ46WMagODowH*MGHPj(Y%M!;>8i z(&F5Ob2hdl|3Pb1*B*dc2o^;hJ?!3FWV_85u`2cppOcbngBd*NfFeIQrBvp;_YT&i zdf$-y;Y#WXAe+v7X-nsA^yb&gB}+ZkQsX5t{@KQC18{<&ZXDEQ<*m<2cu?civH9Na zdZ+Hj^2!B<*)-VNG(U6aUC#uK6Ox}!<^r2+b_@2hSfp-I&4PcIq^gG`BZp>>y?NW` z&9DG5si^^{ABDR2<{vMB$p+9#T;S#_$4@wEg62T_6xZhJfKlqBIneq^c^m%qcpmeI z0pCQZFDvY4(@Owi{GwVPQcn1Qq%+0y&D@~eG$~PQtC=o@ISkUIv2?LUlaZYH<-}Uw zX>lw?YzU-*OuMmdyKm=bD$TcRKCHi98S)J*dD-Quns2;*XHjRk%T-1MeJ7>t!t$ypk%XX;|ZKC@-hA z8dN`RPnMbyhKHQa_dEk81i`?t5g_kG0P0X1pQPc9NhD<-YqR5#Em z3-$?Y+}~X}^1hJdK^6be^77>l$aYE+d0q`#ZyvZqe9+C+<4}7?{};A}OyyQ`g7feFvI zeb(22?*H*StgY>dJZRU9L{0(3q#?(l&9mE>Y6)mWl5n_3gpC^bQ!fRiqM<9}co#6JEFXmt!Z5Dh@dVuA&lGJPsHyP7t zDFf599|`(A@h+4rVpqd->{}UWa~1??wsU^pr)jl5(HFr`0af}mA$UtM>643L(Q%Eu z!G8VHmc;6b<4A_I4!~v~P`Ckf^CI^mtc#AFs>%MmNhkI#PO&c9r{YItWJ8yk=XwX* z-u{?5F@?S9|z&|eL0u*RM|Kop`;rw6eVG1N6iFYLbi#M+8Uz4WGe^ONk zAhp^x+Af-XTEl=?XuWGd#%6>2RuiDmkv06erSQp!p5YySr-^ubz@&^@zDb)EVoE=m zOK!5&YGL>=KuyU~YvJe4xsb=I?4$kE?9wwpQTYMc0?z}CtZ*mAdaEOVLP04?Pi5qi z_|bR=AFKRB!-H1eJg?&;w$VuxL$2Ff%DV&f6T0_XW7j(3OYLso?yBU_pxH`dGc;Rh zvZd1~D3+9T$up|FBWx4B*sP#p9TW1aS0+_hws$I zgPeG?f{eL8nA9CSO0_bD2$e^a@n4zT)TyqyGt4aMt~4VkbCRU*-N!aB9x8RT**5QM zHE6(i|E*3U46}MFKi{D08;5S zhJc?WO=^*oCVv^&vb1Ruffhh#E)%H)upomnzn!_-k(Jj~?ab7T$Ai1iZK?WCK=2i} zO6c1on*Mj)m2>NZ_k*hM#Rnl7FFXadUg)#PpXBMcJel}jqT z>tb+wN2|#t6M^JfS^59~Ia+DgN$w=bw}VK7$Gwc`+ugPC;g1aR6?(xm6SdNSYL-!G zMDCu>k-$=z`Vkf|9xiBtdlz4-$%M0OGcW<*&G{=?G{>4Hz}ahCLB5 z7m#j~5)AwKIOZ>px6vk5J!{i`nVcuslLzi|V|v5cP*bIwN#`=Y=s$TCzEv+~Dm~47 z`zyr+yFovZQkl2R%9^31DK8&$>C(%)S8l(!(!+Vl99X88tT!R7*njW#)ATR6<9rMq z=4k7>)SPH|)1l<-Mu~F)ZmS^8b~Rb6ITebi3sff>(0JX9c3_HW&lwKICZ?~D)4>e~ zDH|N?9`4$DA_!3p%a3>Vz@SLBH?K*C!MR*VCQ`^G`JiB(dQa>X*a&-g%z((;Frva|TrN74#QzI+sX4EV@uB?HPFC^-H9ZBk1^%tQ? zmTem9DpxeP_WX}aw?6(s!dljJOUdqLZPV_OINKk8{`KuV@k=(vTK$oSl02zNQx68! z05*iD2(@cQdlEI-%BwFG-;PM0`r4>wZQSwO7;oo1!@}El_aa+tmC6)$VazCQz6si! z_4`FF9UF(wo1>;hCO5X^n!)T@z1vQvSz|PH>SHxT6_$R>OOp%`87wI zH}!plFAzr}`|DGb<94sv@4~PY=gmdx--#RuGP)PCag$no)B-eVKC4&6N|A4@k7k)u zvtFSrW?fFy*#Z%5k#_MO+#y?2rOnrv@-QAB)nn1%2uBi6)k_KXS))W>G)84fa!FF^U zJ+apdGC-W8AJtnOs3tnNd(3#wo$JX-x4xM)Epdan)Xlyi-F)yVaBCJbns36(g@k6i zIJE2{_2i|PY~Q^2{pb@u4*MEE2~UeT|5WOHsu8FT-Rpv7A&i6ttloy49`L4 z*JzR(#;n-VC--*3SYDcfzipmInULG?K3F}v(g*Iyf^nQmmsS-&1}CLC&3>S}$v)jz zb7)aw_c$vek8fWmr5ed%)SyUU{ZV8+1ldYtr1^E4O~9}Fo*eYOmrzsQHQ2=FN267x zTXRi}TC9yU6FMT9Aj~G;}>1hocw6;H%2=N2p8lC_)hW)FXywKXnCwNkqxET;;R0{a{o5#t{!Xjk%US4&IVvSQ?r`CQ9U#3%Y28x77}b%%E(Qe zbU=5{I3Q4uTXS{G=I$PZ6hKX8MX}rmUe#ZOQdnJj)Ma`;QOdAocB=I^Sx@6C^&?-6 z|8@IA&(Y2&MBO44ZR1UG{$Abu+-%{f5EE%mEGk$w(b%nnb;{z{gp5Z`?B4+RBS7X_ zR(0^@$0V%(x&KPZ&kZqmD=i4h%DGlYUn;2phG?p4P|D+Ss~}m>oecXdsXzNo4XJwC zy?kuWX=_2e)vPXByyXGly$rLTYqTXHOck&u+_aIV(~3{U$k9x%nuI;aP5L70sUTxB zq2=B%fg@j5b~;JFEXk8@DqeiajMHFM@=jlY@lrxwjjgTWH0qU<5EIDU=r}vhhew<8 z(FRkwB9vk{qvtxsCr1{~#oxCblRXcMrt^&wp9{Tq2Dr$NCKeX;!hGIsd0P2`@N=Mj zxdj&&Jlugt!Vzww>%r|6m>}M>@V$iItDOFJv*~7#-4_O z=}|JLDoHTIRcbwj-$-s_ZrC7^d)Kk99(&d>S7RiOkOspJo=rfrJ0=+g9W`HOx$hJi z38bkM&piuNFOFRX6(ZtS|HPic{1=lT%W2O?8U0NC@N>tm+Ph6d3t_z`vXL?!kq|<_ z&r?E(MBnLZ0n*PBlD1q4i+~;RPt@&|E~d7YiK(_6Z z7&iaqpM;~qxF=Z=z#XK2TK?nfB6{94)=tLaSy#Z=$o=Di#D{hzgvsAQRwAq)G27NnglBxB4wZc? z4J_dj<=^@Z6r#nz{#28VxcdSP6~s726k<3`6oWnu5Xu$=ktf_oba`8N80y?t?oRR| zW*BP>qtjTaj4Ej)*HV zA>7mxNt|=1Oxts9>b@8;9SEIya0ybXkrS!=zmCaasqn#||N1lu}cY;OC zp??h;4%uQ4AFJB+%`h93;eFp6=a9NKbIQe-D;xzes8Io*ucM2U+`(MOFByu^V5QtE zhjRtGX(loi^barp{lWlI#oa|RY}m%rb*!be}RlJ6B>d)cLvSHi)p6#$I9>KvAZ+mIQeNT|*=X|5mr4{y+WR#Oda zBurodUXeeY74vCTG~W;Hs5IP_Br1iN5Ht~|(qw^-bk zVDFx`8{^Yc#E<2mrg&9r$2Tz7cKA_10_!+hA323J@>q#Z)h*FP zXk5yvB9Ys;t_BakN$%}BAIB>ASD;jZinitj?J}r77_qpYh1Yqe%z}xQ7Ox29K`p*XXO!tVwO12njs4aUrnB^xiF^=R_tkA$O&q9T`al$lJUd*N%W ziqv{%p+Z*!i?(sT`NGX{GtpS%DHw+p{2sWzQ+4%H;AJybmidN*Saq3L!Ny|`gimp7 z&H^@4?6|%>$FnkYUWkvegi`XCTAFm2CS? z_r*9}>xN~ENJ;&x2kWsv?fsgPAQfdN1&7iqK|KJac;3qW&h% z6#s4Ov=l^hK>7tICx5fSie;2&Y~T_^JzGy0VsS36b!_Xmwq&!@Ac4K)OPw6Invy)u z9U;^G=hgc7bN>bFfLHm-r+a$W|1RV{KG(UeWS460S%kD%AMIy$@w^0RS1R+PsUD3N zy?LxI8y8W<@o`Go?*3^Y;x?3`9~1<5C?*Qom(_4-bwg28?N@Ot+v#0Z?w%B4&d8s3 zLpH;2w{VFLMVZeGyE@gs^HQ-xL2LEc8V1YM;G5iB80f8%uC%^u;VZo5XH};6tZt{ZYk5hzbRbL7L2JH2juJ#{ixRZdZcjIl zbbb@?ne5R)-|x{dh|+VGjSjUju!OUR2+|43r6iXM6va$~!?o%rcy{wwRou}wr|%Ub z?ZQdNS8vcgxx@z}PVfqL zy2H+Ii@vn$8Wt~_e>U*F_=HLF>{}x zERvJ~Wei;$yqe3% zmzz4xy?a(s(R6s^E8yC%x(%-DUx?y6QQ2?O@fG((Y>#Vr&Vleh1qU?ichb8oM-Qot zwZzq&8qUAEF1;rV*J({iCzfY3+l|F;7ZesPeI6)n28=7uQ=R)4SK5DcL=r<-$R+QZ zN&Y%igz; z8DPK$2HZSLOMq^J%hkada^N;9n*G@cPQcN`VGSkl8|5rer}&ii62pUaNScDbn?T&$+G$YWuCsUzp0qH1-*%+KAD7Eu+!4{q7;r zuRcFV7knivS*kxYNGnH5ZazN+t!rx7sLU2TMw)i@+LT3njwc9EQ7Tz0_lqx zX#8jdE^^MQVF;~-;Gp$Oew5d1)-z_RYd?8zH0RZ>QJTeRJQFg6G#90z+APIFo~u5H zUM1ds2-2$+e#P5rty4PZSZ7l(E8vmSXxT8xIC*S;ESV}p40~G}5~3glHtJ?S8TIpr zCi+iY)~)(gDH^NM9sr(S$CsCQAg+(M;v5SaU#wa0h9|lyK(Y!JfBo3ONf)Hwi|i=b zsg@#|2%a$)oYEg)8+=R21!Z=%qIOII=XgYBo&8TY3QJeg-{w*h-X(+8Ht<2g&oJ2o zqH~j=Gf!$_AY?eDtA3aDq+cOK@}GyE^;Xn#o#Mr%!>_}CHJlekHtPwo4FUJO7qijZ zAIYz3i`cw{=J>cED3hrYYAlZizFP)$x%pn%Uo3T0tV+b_DSB@NiHCaYuryY%w+STRjyDwH61#327_Hn zT1iNJW{GFpYNOd*^!xq>K?{}PTBAt75W`T7tmbL0^V$nyTo|7uLd~SetOWzzrE%xd znj02k^h;NIrj9r?EqJQZbxJKpIiemzPLvwLWho;CU%X+B$v zKm4V(iso0+))(>AhgV$qQD!qoQjx)Ub2O9^!-3zb43Q|_L{L>aIar5fo~+P|wxsSr zPBbE)we(lU!yLQjBx(`f?yt3W^N&1?4>tv$p(3Fr($#AEC+Rlyj`uu5H>=t_uUxs@VfrIS`WXKZie^cv-m#CAeh`cwr@K+gQa;(D2YC4pH|AD0&sV_q_gbuLO0LP|5?r0G5@A0us^sY7P40*&nPD}m zjw9(ch9Q>dya{jehNJO04P8ok49$`&x0Sz>!-@}OwE#WF6Kj3A292#X?fcBo3x2NC zZ8!>MrK(Olp5Z-2RiRx(0L@$FH#Vk!>*;omH#&B8CsLVL|7lwLBm%0&pICKy=VWUM z`nFG92ZC@gM^-IPzfv|`^7_Bgq+PV&rm=9i`#pAA?7mckyR%Vo@LpbRIL5KA<55U0 ziLSchsxZGXpP#h%&w5vHR={BR66Imv9_!qa z%NPA#VqN1l?_S*0Nd)hU(fOj{T>XSh=&EhU^^;QE!?D5HHb8t?>%YxDJ#U(DyD4E0 zk8mfSHs#;_T3{q&ca2>7Fl>;kq`vjgNI~HIsHVZA*j2z~d`qhYdXu4eXY=lU?sw_@ zTMD(ZFsd(|ev7#^xf6m7P~E}f^@7dbEwP0!>?PN!{5D?kBaEm>^F%Wt z+gM6;!~iWeUo^+f*F8q+GG*1&FF>nu*E=cYF+!S%BvUx(!`B2vcfrkcA8()3`fTVp zpMADjY56iCNo{(#IsfV)%%1#6vZx3V`|!*)f&l~XD21O!Vr$l;NC``X@W|qnw{tZd zln{2B;ouTd_a;mHeLzkNaSj}A-1eVrhz<3c@lag)r3}vLxGDUO-zo#lif>!+>4bRG zy<`^|!KH8{v3b8Ch5Abq*-fSyVQA!5Z;dK%6~zkc8?9?u1U0?QMxPepC`2LWgd=XV z?*6US5XimUU)pn)vND1@P52i>US0D6(Sk;0m=Ql>rLfWIvvrvNd3HGYcmmG&Xrk3t z4)Xk>@z^GC+Y9n`nU5b&TeTd&ft!)jaNi}IY4Yc?9AMu-veI|HZ(K2wW5Z@$>Kbreq$z)o8Ob*!Qvr8)2KksS4ei_Tw=kHL^_(X_Tz(vcy28fcEpJi(+bnGIdVq8S9Ut9xP{>j2-)v2DdxMh!}?Lrm#nr_WZr zZm&6=hNYYOWJ-$nvFq(HyvlqQ1Nv=vgz6PsF#ku4tx@r^dtPC=sZQu z*AiO$kVcPsS57j}gIg&C#<(F`%Q+uuwZ}E;)19K^wDk@ky9v`*HIsg^ubCt`Sx5L3 z59^=R5$pF+F^?SWT(?3@KS{%L=0rFi2px1J7_xIdEU$N8=NlxCb=;|M#bFY#Kkkhs zz;xHJ<0N6eTYFyhxC-()^hd{uGtdH$H^GCUVXx49Q8p0VDoMoh1tO?iX3RyV-5TWpqT_v)wB9`LDxPjQ)-gy%AF$wB0)AIFdL5=$NFPd9ip zi6bho%Q$Z2hqjw!Nl`dhjA7}!Pu1^H&z~D_R`U4W6!$!kkvMdF-L=-q;Bd<>@_X|` z1V^QYigABsv3`P0IFls-@I-3B*A4|oaGrf+>;Eyicc+Hx*l()BY~@pwbb`}mjk5D+ z@0vX(dTeB`Qi@Z9Fh<{1-jV2`IpvSLv1@}dnw6nfH zu+LV4^L!N1SUq0Ahil6y8O!E|43rR>rj zb&|UNjRdGRzukKUF(64YZQFO3zy8@SZ{9JnM1uW2 z!L?gb`*DB~M}$jY^3n%we;#Za)g#*y-288I52`mq4kIF#rnc6z0^-9DvXAx_yUx{l zo9gTt+RxEmu@+I{LUt2eZC2x*U;Cf7nzwApgb5L$rB~tbGoIB;{~6VYUC%mI>uU57 zvV~yw%0%6Y(`5(HW+$+-CU={v0PcJS$Fdgu3xfpv80dpbHiM{@YYb4be~+sm{^x#0 zaNnbos=2L$m&P9JK^4bo5cQR`lV3d?EQ#^clqXv2#uN|0@Y{Z?e<;xWl~T0D|1LDd zjeEs~8-q1NZ~JxDX=#`b+#53)ddy&>{zs)=FIA7}ABI9=Y?EIrXEsH|B@WxL2_2n#5-C|;4AcHy?&vbV60SwgshkcM2@pc>nI+%V zBmMV$Y$+~o6I6LOzfo*M?KXW$qfg$MKa@$c{zZ%PomOj3CH_l){cs=M-)FKLU!gWo zlCEt|0^{G-j?))>>7gg|-wPzPX$#u@#=%(0cG;)i%&!f~P-9$08DRew% zrDr%>>x^$o;A-K?^X5rrr}ZipJi&2iCljBHEE04d|(_CzESewZww1~!VZokuvq}Aw~vZa z7Wgt2hI+~1zzXv}%5DPsGiXaMCzmpOer`GX^6@|%iq+)e4(3DSN~B1wIzO4#JMOf! zA#1)l!SQOQ;KP9%;gMTO%8lcymf7*PmczEVfUn2f*Cd6g8aZ1y+TJLS<{Rj1X^Y`>tgXjN#ScjomrjogHB8uBB zJKxB@*G`*PK3}LRu5y~GN>S`0yYfm&u?m9}ijZ5vHhp<2!A5ufMD^SSgGP$GJ%&2E|#-=5)y=94o z&2=CgOca>&NPh2{>9xrf2{ZDq=c9zLfUo`Wci1MlU9y3X?3M6l1R|0-=IFh14KzIG zT^ir&#ms5WBsq(*lo*3)F4-VZ+yl`!{tZ*s==%Y^rfvQVelkaUU0BQF>hZY42{Tc& znwgEs_%V{m3IiW0Eabg^H02(9Gh4UVa9g55V?y0obyfrvZeO){RYCapFwkikFi}fr z@VIzk4vQLkISsZ>0ui~)saw7IZedX}iG0eJtvQn&E52bYbT69*Z+Gubq0M@|kdU1g zH&d_Fs})OMxqKc~u$<#Cr6maR7K6vYgs1`IwKZK`srzNaYy6kO`}Qq$Z7RA*<;#)D zG=so=lFdeja8`+ezwc3ZR9R`*37X8j@zmF^V431YX|xLE0-W_m%?ZuT;zdhAX*aYcr?aM3&Bf;g*TEzC4&BYw0{I- zT-P3#VA|PGK;Ge$WfckNP3kI_kiGIRy7Nas;C1azdNr@-DV~_reV|Y5hMMs_o`3@L zurISb+cgr~2`*GQ^%a%5QL$rsX2r%*1n^cj)l5TG4hIvLwWqxaubKKmW9^WrN*pOO zPOrWRm__~yW$c=N){zag=N6r+F$QisULeXjVPRKgFT#tto1&^765mfd$I2BZ1l8gX z$4$D_J-b?*_ZJ&;dm^Z;yJvk_IBVUOrCTYnxKyMGeGwC(Ih5>1*SwH|BV~%+$%`v6 z04Oz9dv@e3Q~zskt8(XCXsi<-I{YOZDtX$TxpLsilckpW^_Myv$*wKjhKrnQL_P%_ zM%0=Nuich=55U9~n@ukKgmS#wayqFJdSs^iZ*|!Z>_}tdTYDnzxsMTW>NUKcUK`u=)a%JUms!ii;rFn z!*%KhcP`amdm807#76mVTQRC~$)FNQjgL zNJ{r02+|=8q-7!dacM2#&BQbP0Lo*B=_cMZ?bAIct`_K1y|G4X}J#fSZ>_{{e2&niWi4Cc-+Cgi;}tsC;n z-sD^Ap1J}~psc~Kq&JQDL^qu(iDuBZ@Y&MFD)GriuNq?5dI?%Aqwr5ya6vR9)!}sr) z{F(&CK9er+M40Jei2pS>7ZKU+pVrKa?k6GpB&NqEb{$#8_`1tvmEnZavJoZ~V-p+J z2d7vU;Xwa*kuuAty}EKz!4$3TiAS$)r)wnWrnkS}3EO)031k1>!u$uHMw7=t1l2H2 zvNocUh0mjdpppiaR!o38w(-o-E}Y+XoO?v=3=PR&>ozZpYb5!hLNr+O5?(`zWDLK$ z9875{sdm=4KPaj1pFu5b$#)*plD6fb$vP?V z=&iVzgMGTPB&ussoqS~zC5D25s*nkn68y0p{!rA(dUwT~?oU7FTpjU%jitMP*IaRC`q>1JJTA`$JOLSYIu+eXYNwo8AY zGTOMSnUdaKaHG6l7vZF(#>;{5;K-y&nTyAw--%>$MEmvJ(V-XE=fE15D2~j;L_E`; zB<3j$Oz5}YaIhi>y@I8&2zafj0$aQ6P~Bkit7wLq-K48kYs-ocg$pgixSf>^)aav1 zMAmTncuel{n>#IN2ce$()8k}o6hKTn1mqZ;=~lrF1zo1Kn>5PWuAzfk2I*3X8fXUl zqV;|pSi+@r1w}#`?av{+X0>ud!2ISS@NKpDVXyiaR@f(iK68wZTr}>fTu`}Tb#=;d70}ce268c+|_UQ#XZdM2TgppYtLt@Roh4LuC6E1w84{Fn}0I>Lh z7Li^BKiGL*Y}C?Un$>pWEiB(^q2__9MWz=f2uim)T8~!E^R72kPq|W4pnCDXjD2>J zqz$hlzR+=VdshA&)Z0NAIxp$H7kcx64qq#$>WxSgi7D8!9tleA)-RA?o4RMP%&Nxr z_U8~t_4)&98JxPfCfLfjM2wN1$Z2>2)^#~d(Auj4~g2YpKL51hXa}%_2D(beUcvDWhpr( zFv_oZo~3pr$0JXS3(YzuZqmQq`d`Zdk=5RS{I0m}_*i`xW@Of`-o&x6Rd*oR`gjr9 z7z1Eh=L_*ihZ6@O;R$2}8X)I`vH#hF91#A;ex6;zgTPRU%@ze_mU4AlD9T| zm9nb?6l%nxzU(>*b|R_2WL@O=&o(E&I^VnADu0gv7TBn;Y|d@b*R?Pq8_-|i21tvWzrQj}C??3eDOwhKN_mF98;}lCCYu&^%$G~Ma6`rysGYEq)5GlVb<-Y%nH^lw_d67^QN-#rM8t~ zUGDsWH*_2QJ{vL~< zK2Yo}bOq(>_RX0s#)NmE_(Mn?if*qAv=oMMpEGQu)J#((y|BADkzq0XjvK8%-sHn? zr$>8kVXoEqb$5`Jsj8HkJmDE#zCtJ2$r<)=AAFRxwcT;806&r-HcHR!^kklM$%?NW z`m7(&5Ha#*&&UTl1Cj@sJs#a-`lH=k)L6a9qaQbW!;cYI(^DY%*qgtuKh2j*p;Tas zJnP*^3V@hu;`)y|AV$n$3wtpY{drPKR>-aYaM%~gZ0OrE$|MKAT0)dW*Ox39c^3ZU zK=(Lt%5L_F6p?9(aE`=92u1W4zdME1mLUK@)r-Z_E_(Zwbb61?E=h9R;eZVP!$vtw zAGX?5*mzERLNWC{on2tjp1am4mQ0#!y-KM-8j+Vx&XFp_Ob&7h&GLJuI739>?7om6 z-r5GxkTZTqtD~R&XsZ-v)4h?c3#!0iJngpFwK(S_m@mA!L7$hmp0P?rVI^y}_{204 zsKct}x50#&wvoZ0*sV>)3Wmw@o_1>TjbmueOFRAdjV}8kojwSx_)f~13CdmFU0k6a+}6YHAu^I&zC7F zeZIneRF~NA;BR2RC6XQ@CM{bF?a>RcQ&jCo?POv?XPG!#pMZJ`KRN-#lFGO5CmB(Ag6VkP7j#xENplNd#BKH#LtU1UPPh#HJ8J6 z7Xy?(Co6D4PaCCZ297r#6w{Nc=_&d0E4fqZUgkF&;FX~NdVVt~m|dEJhiLG;52*b~ z)3nTgAV7kCPqg~9Xa;`nLmvGwT4R2faRn*@)Pmc$pA(E|KA*Yku<3PCT~>6sd;DUC z>j=kPXQV($<#fZYU;mrA+z!}#oGLgpkb@rHoW0!et&iwlP7FM&)giHr~ zX@%?S*@=!Fr@am4x~C>X(}@S}3674VL;J9IHm5exk?`Fkry_z=k(PomABr;pWxKXd zT9zXZPpWm!<_}K3)@F7f^*Xyzr+eXLw}n$^tq0G0s&vcp8jcNP9cu{AF8C33aC5`) zF(bXWGv7|RBQ^s~TiGD4y=(Ue8w31p#)yi&QNciUG2sw32~Y+3_EezE14>SEWmR2W zhr>;S-#HYZ&MwCTu>a0^ou4!grx^k2JR!wx&TS9!5}?J>0con3g}s9pI8pSxK*wU8 zc45MOcI(EgJG4}T8P&v8tp7a?_D;UuK<#ztnuCi^O@1r8bfu!% z@bNu^qJ6a#AvuDc21E~^a-C#mML$5*DnWlVC`(TgHuP5Ow`Vj(Eo!?UY-X~p;8tl7iP5?xE{d3n|Z8>vm80ly2pBh;aJz)w&6reX#Qq&P7)8=z&nW=kdu zA-DOGH&Eoh;^J@|OO$poL#I(b9|Tf3Mlix73~jG&CI8ha-~KzN9L0DhVokRTY#HF% zeLt+3>HaGhIuj8(8+w^8V|SsGw%iKhJAHAqFwmQj{G9%9u%z^+Y`pU6tO2Wra<5{D z!PIl0{w*(XY_2n3LFW&+P5WArDQ?W0!g)3gOb*(F&fn%v-|3tEa5M{{NEa>iXBmeo z)8IZSGaFD)2qDIX{eZ*dI6Jzy54=lRmTNCusH-gxKUq!*un%n6scdH-Eh8S#%A%%- z-l=e@aeT=SG~*eiFA^-&){@QKk}{!SshfCcqg*lxmbC%3++$dojxQ$9&D42kGnoz* zuJ4Yz6ZLxe6Y8t=pR9v(53Qw&TuwRE%|f5GT>k}d(j+elzNxQ&A+2w)Pe8!VR4*CH zVt4kkl=GXpCZkTvY`fE^MSv$;doPqzP{05|2`IFCeH%Ds2}aquSBVXX&>#5Rrg(+} z+Wjny0V+>iK8J_h#_l*w;ma?aV!|17GviK&#;Qr_^%657YjU8JMD%m(ENR%0KQ$i= zfSe;DG&ljVJ{t@Z&24{deyhJ!UDS7Me|#NQPT_DUXsKE>SLi~vy?x?~&PG7~BQepaMm?wGl+Sj{br>yDXNv5+(7|J;F)QY~9=(9jiX z^oFq%Bh<&gj1_29@|cY+(JN08iSO5Mvm6ay@Xz;CPuiDf#q{I+*oP1$YXKu4_bs}) zmK_F+m*=yzK%%GzT=cH|v{ls0`zud^LFZ{2BohxV)CTv%o(i-_+%MIYp`_Y}L@aF- zopG=f$p*Kb6dYX^Vq!j*NUUBcQ|}3_z^m>1o)-VnS$5lv1$C4Gvf5LutAHG!i7&;Nz;a)&j_g<1t%g7$6p*CS-dtIEo{7HqZ}p8 zIrZ9bxFE5-4*YdI#Hf+T?IaO@+Hex8qz+{Y>nPrJ;2qJ^ssx!8*`DQb%mc=Cj-xyP zIPC6 zF5L^$%lK06{f8)+7b5Damioin6z~daeFdmCudo!-KQ#ezWbK-JoG3tS#@WfP;GxXj zeM=5``pV%C_4)b?MWsdYf2*5UClj7co;7mCj!@7&k}FRe52YfZ~wdM*#Z zPDOQ>>hA!GzfhT}v_QK~gbL4}e42sF5|SdT3YDo#+qV7`1^b)S>qzze_ZjwYBn5g?M2?p&V}NGx=xuB! z23Fn9Grs!G@Qip&8$Ts5jH^$x%@g?E$$7nXVAtiM+VM~DK=M7wWf(xsJa!N~_3{PA z^Wghve<+LOJ@g`Px_YNq!B?zgC)Op~S@jo^_$PLa?mDJD%&RowzUQi8yhf(u_pfmI z+aG=nap*kyQO_F@{OFs#Uy2aP|A?3Ul;3zY4JN1^G2As!Vj$HkA^)awpaQ&s9p3Ph z?za_iozDqc0rL^tG5;nSS_n_|=(1lAtUoQ+IeVzpfbl#jYU`vIdL_8Hx)$lu$bM6U z+aCE1iBAq)T)%?#FQ-fw1-{PeB&*|9+g3&EDpcJ03+VDHRQx`+w1fHg>RZHrgYmlx zy1$(p2!(~uZ{Y$>I{!ZeZN&_yy0}-I9#b~i8qUtH^rxLN=V`YB^8Y{wG$zPK#Qs_8 z0q*M?T{^9o5D|$RX!vjB%}Q(~_32!m&bM8mU$~JOc$e_M-tf4}S;*Xhya<%_6hz~W zkeI(fJpB*nzK#NR^I`@D09rKzes^z*P#Z@jXb0Jx#5YvjzC@b=9h}9Jb1TH}W4%IE z_yDRZ+oGuV`2+v~)=QQb8R}lF;sIaB@gRWw%15J!E&+$d%l|%lyJl1s=b*hp@cY`W zo>h$u@wVRk`_4^ycuE%ln2&k${ckAh&2ZSQ^yX{EsH7p}_+_0l{#!nV=>u@C&wunQ zCE}69tINd-KT5bn8<%iKaJ8ql=-y0(s?@2 zUO5r)&_yp;S5eT%{l`T+{xf3!pa08WF8)nB{KeRDS=#=2Tm;>64iH^tNPah^ z@d!UsC0^45-sM5!ROI#fMuB3S7#0QU{zg@c=jb(i*0zyW?}4lo@eL@5mvJ!}T=vHk=;g*!mvQEZ9* zGr{;p_S?`igVljNH1UmCvbj?fT<0bIh-Y|Dhq^ux+T-fBpv37tS_M{5pzj6HSlqp_ zpRCXK^sAWW@4R*Ob)YYSECidC$;A~52m0{RaU;J2+O->sFM-GdHP0aU{h2xZA{z;{ zW41NHH%@v!TmryVI=ZhbAq>2x6pKnOJo?-{BUX&(dKdW({>KstpFi=h+?N%nc|-K) zTU>@A1hTkutQw$2>3{N#PbQqI;$OCf;sSw)J;4C=>ND<#SEh9g8B7{|*dq(9*KmCm zdlyIv)X9Hl$ zK$`ERlojc(ui23`&>ej>_p8rPg08D)rlrf4l0W(X$N86!Gx=>(W*kd#$`98#c3lU; zWm^0m_}XW9^YUy-T;W3*$BGi&DBANt^@bF$mGjsa10B@B6~=K-!h0$s^o@KkAoIz+ zY^t9+|AYa-4Fe#}Ws2jg|GC}Ch5#tdCFv=<)S?2J&-q4$jJPCNk6-L^ zuCR5=N+QcM3O#xSly9=mm0u7LPU-BueYDxH?`bRBIUg@;k);i+<;K*EvTO zpHaj?6D&`GQuc<{vV7m$-ov1Cbhlb za&F$r5!upBm82iDdqO=eHl@(^$fRy>C>O|>VD8y!!IWMNdK985dHFVEd&WooTz~swZ9o`sh-7@`v<`H-0zTDgs zly$*gdkLY?nm?D|vbfFh0Wb};B~j^N2o=!mjAJYQQw*Zycmt>=XI6^t_h-ISzg>$c z+cOyQOxy3mws5_XjR|sL)(O*Dv*maYx3#Q#WV~w8aSZI59bNz2xyTb-^6rLoZnQO^ zDME346~TpZ-RUQ5QrPP>#{Yy0?y!eIv4t?tR76h`2l6bHwDIry;F^2RLm%=>2+vanmpBAvHh<~M1n>)`CjELx#D^aQ%GQL=q3Fa6Gj-|kWq{c$~RWi{f!EdR@7ePQ2l+#)po`DYj+FriDs{7yz$ zAYwm;jFDaNjayV{JX}_Litpv_az*%r?yxDaJ8CyhlQ|zZvgCYJhWM1I~#6IHzJ8 zZsi8B{$k5M{QtIX6mXpz4yK&?lLc=s&z1tZ>-$&@BYgrxH_ja@=8YFmcZXEC7^WR6 z9A`Q5H43a7r4G3!g3VnYE?=gi@V^X!vRHtiXx(LNJc)p_1HXqttZ~I|y1J_Z>87Yc ztn+0;wE5e=MH}s5o^oMQ9w1Wc@nCwh7u;Q!u9~-cZgDsP3@Eqkr}U(D_-v@xzwbY* zIk%{=J!A1A6&+=E&w$GQWgJ6U47kyAJ>DVH51sIV{94yIH_{57#Q~`UlVixN>!E{1 zQ0hA1&+AYAvOnWKJ2W0C`Z}1eUP*}qZurDJUToX4OTRyGYI=Mmr`vp_#EC>zdUUjm z88TmO0rVvzm{_wdxeZ~?jB1uH#UsWYlQ-mHH4c0r}-O;ygSUhx*7lhfg0M%V##bExh-U=uS8I({lMbA|(oLQcmk zsbxoPCiFh(wH!&(d586&BI@+tmq3jqz2ITJgf1119VOqeW5rx#cZQG$GTUr+(voDVoxR zt`x9kVnD6dDSYs_O>SPdXA(zZm~~tnki1I%vPN?6C*YY2_(kL~VmN}ka!O@R`Q`&W}uyp1#&w%v*WH7p;j&XSS;1B z8ka}zt|$yV<0;VTxR(EzZa6pkz{B<2Asbku#^(U` z%LQl)tNPj@ns!ryY83@iJgQQ^0`rAso#Wc`|XU4c*%o3T`dB znB2i7IshyL|5I4_I?I?^ zS1qJhMI*JW3D}0tCa&*x7@4Kdg~9=OXlLX3Spb`1SrTd0^wgp?g`)X0wotDZV;;R2 z;bxKQ8C?-W3^HLdWF4R9#`Xq!>nL5AXfIBGS~H~TmU`(YxyUhAZHGzi1I|XbRevEj z<810H$xBHrNrd1PH3Y!=vsAPnLv;F|!xhje52a9b0y*FRg3q>1S7Z%~M{l}hbceFG z%OamsK@BX&J;yb@%g0tFEoQCmXPr6sh9NTBE0A4;wICRn)N2-f=BpN45NB{2bpWYZ z{$r#Dt@eJvZ=2GU;1Ud|G>i7#89L;T2`C~b38DB7RT8g_`m)ed1 zrz?>bKyJ@==u%GPkS+qroqB@?*lJ)Bcnp-ASPWGUhh$5jYkrc(SeSfZgW2WpYV3}?#3-6Rm&cg!i_c5X_TOWhR|DgV4XFBlQi|BBVu8Wv@549AI$=x%63zV${O#y zESFrGmSf$sY8NR|Ov&QmH)P`w9@S$XJrOL9V9Ust)UY4rHa?rCq|@wHLPI>$^eQye zl{_ubo~MU~TzAw~rf4Lz%sh4*K-rgWq&_aEYn85Bii}$E|2S%C8dcfd zVAjukF<1>uCR?5i7RUN4x-T}1O!E{+K0SW_!~Yl&QNMp!tJ@9%cSWy3q@yyE&Sa^i zaj-#ewYe)rGcn(%q)WPmj&D{;nkI17Yd8?xPP9Ii>(6VqD6yQerZpAlxZi2D`#rq3 zBVAc*S>jIZiu2J)0dG|94lv;#?&`^CWw0^M7Os~`s?4bU<1>TI25k+iA+QQazb^Is z6b`I8bB8R1LM}EcBoF9TZJ{?$lE~1n54S(jc7+l<@Qq;B%Ovu!oJI9**09D-xtlf? zsB4;)A_!S_cLfCp3?Eb>O&uj zlE$aCDupI{2>gXUH~z>`HFefg=H%rueSG^#^MQNS@J%#!)y-HNNz}7f%p8j7nL;Jp zb^^FX&ZQdK)?J3vc9RZH!x#(SZf*yS z9&{|+FHNwY+O-bYUl|hw2Jx6Ab8v|qDe~v?c(=mE2R3PjgQgDfg9YC3p!8<**wA7M zn3$3~jJWrwb{eD{>?EC#KjT!oC?4-&N$wqW|H!JAcDyvc)o!Ch9pRK=P)jz(5k9Qt zHa-cVY0@v|+i>(X!oFg+=_D^1&!XnJAO3EDf?aUiC|*})T@`psiuzPxN1yA^L*zmQ zU~_sSqYDcV4~-w^Bt>>d66rz;i9kW>AdP8(UcF9$1?2drEMRiYka_sc*`LytUFaxL z5XoqnOg+Z^QIfAewKvBEG}nkp?hu zwrbaxe!nSos!`Dsq;IsmgJx=HtAL&*#BwKi(oLP4UyPXps&3E=NAtz(w)8R*)u89y zs?y95(_n0%w2aT^up>8NZ52Y>80R1$xbSYV>TP_oj6+N))02?PbsXk@qJ%q5sBX5j z?(|`A#P|Nm>`j7GnT8{^=yh#y(7fHU%WIjH)#3W%5`S8B@Y$)A$H`Zr6`)ROYj8Si z>7}*P$eJnDtu5G9oxwW{@jQP<1nlJWZ}=Lm?efxbujc1dP2{SJ@o76t$d4hoi9Rpm z-sr+G`1z4iwQh%$K=wrOmMXOOixowA5-1lDo5--j>ahJutE69|yn~>)I(xph@79{W zaK$1fDmASbg$?wlc}e_1~P8-GQ zuiwsSE1`WRu01bqYo4f~c>St=ASR!Y=k}w-(RQD6s12dLoQG z&W*{wgkx^_uqu5ABe$>4N!2$Lr!q3=H&ujIes|KBrCpwVgQ`Wl*Kd9?@$3zgc?6;* zSU_R`nE861^KyKt1hQG%mJx5dQeo4m+p)0=sDNvC8?H$eo_wYv)2MM2uFrClMULf> z+jG}F+_uen2m35n@-7vz*d%)Xw1tUcqo0Jr?sJ);McBBI#*s{{t54lwe=dt_7Xh%% zoS@Rq4Jmy#ql8V^M3b>h2_=4=>5bV5U#N(+s~)g&N_YsNu!z05atl5izz8@6U9UYT@rA{!Itc)%xF zF=oknWYK;%mPg07_2uHs_2mxtlPw)++KA(luKb;B;)B5*1$`xj}Xp9 z#UyZ!ORiNc>OQ^SWDB325uh7n-TaESvFvwGoyl0hd9yNlYxIK+v~)S^8$8{+1&5z< zyI^e0cf9cwW`va0_H8V)PjKi;arZV@Y^4UJ@8D-}tH+y^gzjPT<(VP}-GPNi3mF)! zEK`x?I4-?wu!Q~U<|Ya^Q^T>Scr#jtxFkwo;aYwd_@erDy|corG}o4u&JW|?{EJY@ z1QKfxI_;f?^t-RSJ8G@Q=5H;)PzTReK-+pE=nXAiz4sYeN}`qV138^lpTyzh;vR<9Y9yJC! zafbi66Q=+n*#rh)!t_jPxbWsD#;FyyF*B7-n`sH~vSV0D86Q|^QZ_hEJYBOLq3z%E zWHRi}Z&buh6Z(n8L_X{1C1M_z)stxmrQQ0iU zEQknoe3a8vi|RELlnd$U^xmhd46Jt~SHSsPLr~}Vs%-l>eyi;kz$X~xD^7=LoN7|8 z8@n13QODfIl8W@P?8AMU!TN4<-)&qC7{=f1(Bxgx>)S|VCsw!Hg@9>-q6FsR9xMQZ z7K8*GNtXT0QR{ORnF#ao&$*Q^5DPG$>g#m)0#mFfQQzRp;P-E|6$p`c0S+vFwS1&T zkaZ+j=_mkfQQw_@F__b|C5a6AZ^o++Q>GSC5)HNM`*Jdc?ofm6%f<=SgWXOxI1Q#M z{PA0|4|k(>l%w%cjbhzB3nSbF5PA5clWhAE(zG>GneJSLHa-H&U&pXY<*x6LBt)~jUg#i2QkQjc-A9&_9$KhhW|OU!4h6pd z)a?04-7Ul26X^3E<^@DshzrNj!F=+IE?yd3#IYd%Y6Vw$u9NGbR>Ya38fe<(bY=d? z61J*K?yC5fjL>pTV@u%B(qUPPR~ zSL9n9+(Y&N9M!#$ouwf%NW!KGKT94SnV)d)VKr6Rz8{@w7Cj#)dMS8V#|*#hu;e{= zJ3pNU9*~$PRHYTW?SFTG4`;l8(`%p^Ah*^_<|4XvA?sfAG$*qx7Coh-tUxI!@lRRc ztRVITOfto7`@vbXc2sxKs;m&nk4L~(kOg1}vcmm<-~QOvSuWbtw?JK@@uCURKikFf zZV=e-R*i0sBBy@o#=jcjO<$jrM2yqQDdmGhrbGxHW@si`EdS@xFbwmHnlf?&@0vC)q1Z zq=PM~Q+9W7rdeAsa{cp{ogz1E_G5ca46Ew5tKD44#>*or@@24zEPAiwg9M)2<-n&N zFtXW?KzB#*WyQf-nj-uKOPGM)x%8zvitz7ey`+s9EOu<`>(+GaoN36?pLoBf_?$cx z_h58kI+!7xaP@t{#bYaY zsFS_|?qDuwzn)2tCDnMP2`y0XSV7LMWPT)6%8=l5bF5bbcd&KwegjpH5#bdS&iw<0 zJy5Y+ISZ5IJcI?h1(XJ6;pS>w0@Dsvu7lMX2I%9ol7V1TM9Q48iEt#t6NY$xK-m-c z&sk)Z7R082RMJJS&%FrP6Rjze-3XiKsmDSH4ZyVlagS+ce9un-A{hC1T| z=2nE&Zuf8vplBFn9huT-xznVyJ$_5Q*~ills_+@ns=ftKWOWSiB^Mfi)_RvQ^723` zdW$WI@6f4hnqll z>m4Xi=mjiqNOeDOIHIiNuP28r&S5<&#Q;GIx{3H*Pu<7a!(xH$GkrSLH9Cld`}?q* z`@lx9(5z_TI~8Qk#fW1~dsV%)&2~=B#}CW~+eW()nIUU%R4=)y_tC%v6|iA9(nxQp z%%a5Bv)qzh^=H)P(mRzqh;?A5+a7p2$Cy!lKbO?j7%RF!eHd(O2%kBRa-a%Qm*u8E z)U)v7U=sujF72@Nl(ujBU{;B0$?fx@bDD3R1-B*ClG9X9Rzn9$ye32&og>iR8X+;Y zsS~1Tn`L~W%<=e?d-wCuW?8#9M53~1W3m`jI9OZcy=~S0^n8apen!4@ep|ufBn()- zB&{D*iQy33^TGVJ?v?h5=ZbfI>*a+-#M7j&Ag7(YLSZzz?Vu2+-6~K~(QGgar->hh z5yqP$nk^Kxn|0?BmP_<+0IK(Y%vwN=s(=k0O%4u&xwTG13;-Z>Gwt5cwqX3ac*%^` z3^FCbM=CxH3DGu?a4ks4?R-TgkWREe4Mje|f}F=yFY&iRq-r!hlBognzR+^m$hTBC zLj5@(7X800+~D9vuHuVaFhY)l*g6I7^l-Qf2}r zgu+Rs+Ew*D5I?gsVzu-;iglD}G1F8Wu>C@Ha-6*GBDr(1Jrhi1qI1kJtV%$%d^eK7 zNdAtI6RT$?~8>^dPn-Ni)o12UVDC>u}&uyvw*gS6BdJ1z( zs!p((s;>pUWbJ_^Q^G8Sx0Y4Er;Cr5m)B`|)aoAhA&&?)wptM62Cb9B{F~iJ`<9-k zduHb;`UXoAPiwy5&%)~u1TthM(LVYY6s?mm9TsmJgo!t3X`N)On~p_w-NPdQO*DsE#Q!{WEHrjwa@ zdSwoUQ?xZsaUXpx46f0xy6DZ1%WQAJ&+JKGq#J&$u1?&Y@GKl;d*Xg#8lvkNl{;O< zZ!+G%+NzeOu9>iz>9&Iob~cIUEO-gM_mEd>`{m*1AZofnsFGT)65~q-l40qLA<_$H zSrmqY1d>KLQNEZG+TB}-4GCpU)o&eeUd|o9PaZ`@YXp`bdIis%s8u_q_IND$Z95b9 zk8Acxq_&KYUQjnyz4MK$4QqX@E$|^`$gaAMqFF!FwmZwZU9?I}YF*^;bFwxc@X)u= z79Cx)Oq0xWyRgu*2$4_2&*XLns>AOY7KyZ<4bPa=u9cWF z#%dCbvsBR9Ewe7u7f&b>RX043jY6p(dnX^cojn@-@T{hHYPQ3g5-^5gCuT+PA1p{2W41iS%rPmN8}}4(Ykb#Ogo}Kz$%#){Yk@_Py)sSGm0qRgXN#c5@o7QL9fKET?1AO#etBl~G z@4AJ@Ag;VbMpabM-MS^J2J$K} z2aROA7AZw3Eu#;cz;Iv0be-)(-Oebef<75*?6}PBKs&P!vKn{RR{ai89>q!9JZSh@Rek&My?R}5Gv0;ZOIQ%x&t(CN6IGLauGRpz{X z?u1Ub-?hDF)+j&PamPALJcv#AF88@8Ugs@rGFhgEU2nz;%RaV_*tPdA>(C%!n0XlL zXm$V$68c!QJRV-}bYR3{+?j#MlK9bI(*_jS;%B#l`=SWY5UWBAV^8rkiF(LokC-}* zvUT53(kFS4>f=!DbjTcjUe9xoG?;3BLif3d5tFE!!sV2k|9JZ*@G|9tS{?{o2NFz2 zmt?|jagUH^xtB!0Jzu5Vok}Y9F2p*q8H=g$Ye5T_N4lbl^{MR|jgSD!k2U0d4ftGyN-a!1Xyf6t-J_yc#fiw6cXV0O~SWfA!mS{Co@JyN^AiJd-G!OibfAjJ{UnVI`Up zS|!3H_jqI!5%fxg&UkO1?zyMfQOkFJF#RyHtYbB0@kpl@4-`z!|LSSyaG#B)o%F*v z7FV$n$IuY;S^Qm7`$w`uW4}XA=2ggfiwD|B6F%WqI1(@I9O+HmI}By6z3Ww%-OFd& zZ?c5O9h4c45tqlie&^fq=VZti3DR5FlU8*3yaeSsF#n{mD>wQ?Y3_&{VplWZSvyFJ zF6{eh0wS04tmfYnq8J%UidG95&naC!qUXfg;k?co9ykoKo=XiDNVQEo>1XfIJudSV zAA-L}2?(bxk29D{j$f?MIuq?*yKoYXCjNP1P}8{L7TDk8BN|MW3bZsIs@780o43=( zft*4ax!jgiXw4n&RzKI`O)%O8^So{2l4kRIut!m`@pI^QCY|v}ex3MRPxX1k1MU9L zZHri$-xZn}bFefOL-b_s3E7(DBq?EWRd^oW`kDW7}=Go)p)$88Ux~8K- zUcbCKuCuo>RVrI6p%>8AB{O+E;6&k^!r;)%LW5Qn;In2=?LvPuY_)dFMOcgQOl2sb zUQAcedY4_0EH}F6=LhwIt%df+drj82@|`wC50^<^Bn^o;B)^)l7}&L2;F<8Hz3Jr+eZpt94er zzS%HNk?-fC>CYYd*R{hl4duEG7j+ga=0pxZOehQ(V|S^F5-VgHn?l~b4g&ZvECVK`jFtS|S|GU`UOr|>dWDOrpxbs(8>wVYkH z5H+RQ220=u4ynXbvft0(H$ z33+*Qj&2dY-RSid-YJXd5R4PPN`B#2o|udQwiQY_BQ-ot8`WK3mT^37i>Yf*h8GnP71p7Yh(fz9xPdKcqDrRzmr?aQb#^-8wkC~Qbn!$i@-sG6?$3)Q^T=0 zA%nVfx+LHGP(h!d1hEHTH%0aQJk8RS65?12s_B*7!pb)XpqJ&0D$#815j$gF>2P*R zkA5f{Y;U{e6Y*5@>2vmL1SWp=-UIH#NB;z_TklFoJP-Q8Zb@kepwrK<0^?0*KmST< zqY^Yhct4}qJN4c~(W{2t9R%nQX*KJig7F7KxiU||G_Rr;Iz)=zS1(?GlBaIIG8hHu zxfmY1k6(iYxfrzbzk;U9Y_IF~MoCkQyQ$f&h=X0v*_(JiHfqTl^_wqs#g@kq1vmLC z5DHV%CpOPL6I#Sf&(g~8hp}A1$#qi@8@31s+iAWD>sTO-c77QGg><~B50P0GAe9p8~R+?Ll)PkSvj4e`}X0^2E*1siT?02 zdfvYDYNkHRLkzW^@ArqZBr~I|^Gh1+rJ#s&QE-0@25aQgA)}9P!96vMrEY@gV7^UA z;Ci9h*s$sbC(iRt*HeqkNR(#oJkTS)^R|}?;&=vlEFZeWys$Lyw>)LR@N#jqeLX~X zA^Fh??4Fu$#HXklB}~zcTb7XB;!66#G>%!HnL(xQO|A!l6s`wWHb}3l$|F1L_bz}C zcWiP#;rKxfpEVY#!1#ci3(~vwK^c*mp9J|h^&`sNyiRf_s8KY+X;Tz=FZBPL_oCYS zX5Vh#8)&{aXP&YrjOP8*8}v)1OCG_GsKI92BM zhfOJiwW3ZSXt8*e&<%}=7HIwb5u55*J@OVFiaf-_Dm;*K!Km0gRd>>a7DQp@gn z=J=^>EJxKv{>kg=#*6cK?|_&Oc&G30CGj+|$FZ73P{qWZNk(jA2-yWXG9dVCWU4iK zBUZKVepM|raPOD0Xu~QPj_|3!t};C;@w4UqOth!CcH*qjmk3Pl+`>SaghP3k zz9`Rv2AE$&&QjUhqVKuX1KDhLnK!#PHYY2==UkSbce4s1J-k~J5qY#Bre#}aI0ETF zYv=WT-e{KjEZu5St!ePlSbO1|fNavBwuca?H@*6OL(I#uQA>f8!n-|uC?8HUx{dsz zN9#|pS6XLrr1yznWuw3!+9nngt>}o4(Ac5|2@DP4OiJ?_JTCd^)i+qmDuRMlE6s3* zVb5&EgHbJgsHK zR84-qs+24A>d(1HUF*nXaosa|G?MA>6$H0C?#-fQ=?`-#k!XaKC1Gg-R4GaSkant~gqx0#@uewGbSM&st}^rKW@ z&}#4=GPQqrUlki+vo_e#OzQYT428MaX<2Cu_NYI>qu!PJz6@PzUUhRnrH8nqlMURG zu+QRkB7i_cZUx5>4+sQ=0?GZ2FT_vvyB?KuN{O%q^SF|-D&E;yu)xPfSRnI8BYZzT zyWa9y(nU!f^Xd|yZf=wu&}Wd~$PhfuRm!GUq)~1Tr6)-v-SoL{75y{LCSFI(>mv@m zA%~Gdyya4z7$?pUN2jt$R!HNKX<8lr8fE^s1{q$F4~J}>FbQ$IQ(i<`QroV>z19sunl7$WN*otM$c zg%{q}m`*qJG=BK)y9$#|Z|lR#=|RK;$(6Oa>I*dqY(IUe`Nb#Ql;74OvJMRa`@shs3Rl%&ju_XR}y({Ib} z>5tj(l2ETM3e95CJNxR!)dyP!_%tM8ZJJvT-qx~-VMih_n6G4 zWls$H!W1LqT=JD`U!itH{x7!PGOo&Q3-?w*0cnvC5Rh)9djSF>(%mH`-Cfe%-OZxA zySqCU-Q8X1=GpG^?DKoyulMJB%z2IL`j0Ug^&1HzQh&ns-iVQ;p%r#Nc=oo>t#d!0 zM5gUkh^v-zInmw%<-*}q9CLAD{B1?l1uRlzq!+#K(aM`PpOY+%_!eupf_^kyKSCoPT9*+TZw@3bSh=BZ zGy(!#QXr?L%IF$CD$%}VrRK9bhcwYKu*83~lc)9@ow)<(a1BJV@4dhCx#23GyK1l> z-N9YwXnxhQL11pa9*#79WGM&0c#{(<_cE^0sG?~2nDb+@JAB<{1Q>gNJ)7)L;~V4< z;#i(oA|uC_a%f4yO@mg8Kd)1SKjz!lrnzjT{F?Bol1m*JJ6$KMM4!^mA0GJF$7q%U zK?%m;4O(c@Pt`T1)L0U+c3QXK^N8uNSuE48>AqI@a!Xlo}n|Hf8(dy3+!bwNo?unm} zq#fJuJ!^To!`YPdTBD+Ep_U8ccE_rh8Deb7byx`)IF}K>q&~s%SlLym>HYt)DO(<| z5|vZB-qrX|Q!D~qzy8pb^*#GW(m15QM+L(PH4pW5 z;vaGZ>Uau)CtQ1PzX)AzXw2L1K0JmJS@SAI@UuNXSh+OKKGeEkP8mVO-NLIZ`>gxp zzIf8&Z(_~7hk97a&Qo5*n4WRC!6u&}VTaR*z1o3r{OO*JW&Gm&&Lix5XyW&{5mZCD z5?-JiLCE>>Q!u1;?u^SHxAvnvIdRv{D-7z)4u&G507sYTgvTy?Q=a=ZDvn&Pw;##P z<8Aq)wx!+NZp4Ocf2w)#qCgK%+SL;%LQ@a{QZ!dk<~wZa^I9DCH9d8><^=i|kr?W; zwuzRLf$;zRzui%O1T*7ePiN`sAHG|t^ux++Gss10{Pp>U-C%jptYUb`0%;L&l3cqc znsBLgZFkxIRt_y6a$gre&ONLZFxlU9>fHNZz0vNL08j0w&h)|S1uH;n+6}}~q)#lR zeBCBpoiLD~4?4O~#USoyh{oBW<*G4EvE5HexhFEggobuEhdtpSp>)NrAuR+=6InPz z|AI|b7Z3zSh|e9* zV(b{7xcw;(FFPj!_*3@p5$rqP zHg)?i=dY+AMUJ#&Rqefn)!_ef68p>5W(B@vc+dL)cyDrCP_w&PED*egp^-CKD3JgP z?T>Zvn6EO_=@Vk_rpboGb54WQ+U?9s#8SSri<$F1UX##rN^7U;!^1K6V{eFC+bWgd z4aUZ&>NjL;3MlNLqM?3=6H>VU30jWgWo{fUCGJAi&KZ4zYWaOlY`1Kqa`P@*8&RuF zv_}nj>SC(jWn@8RNnw8q=R#p8J!6!69KYB&`JTs+q zGIlh%N_C-LIWGL0T_ktL%VgRrS}zok4E*lKCG6_ChN$#9sQaF92?zCLb?ZTC{mJAH z?*I0J1hB+GyoQCVS5(@Ba)!(kokh$8`chFbuh5Rnz23%kCiwLmJz1fPC9+Y66YeU< zz$KW{Hq}{alMWI*B%k(E(Sq40gsd#fv6j$JkbG9Q5>nu^y~L+K3*wqd&3@HOGfxx4Ui)A)p39cU;rq#=>oA>+(gfIje^;p9$=V*a z8xrU0qtQsv{qNTh7pDx$h`t^tc21R}9-UU= zus?Ly>W-UY(g!Mzg*T5BN2bK?Imwq946YMfD6k^Z&gI|KI4n2Fxfh$+qba4hOl%pV zmC-QYAlif})j zBy&5l>_gG(v`)z_#30gqVn=}!=@9uOWf`T_@>YOq+uasL>8* zV_yAP3R(|p1;2tx@1VAB8G9&MCt~Yty?bDxHDR-i^MrOIRO_g3`F-88>)>N1@~aZ9 zm4gj8^LAyb3Z228QKP%E5~G#&P62$EIoFm@K0%q78V>wd)Z_QwQCIM9P`~`uB!z!9 z$*7kEYet4l{W6p6q0qE&w{f*fvkFISaO{YsA6hu`Kzxsu^;bZ=djsNKLl&Al^5nKZ zM>l+>%SFtet%@aK1j7SK;zBAI996r;XYKQu%&hsG77>?YDIYrv&r1L{_1<+}Vrs9l z)Ow{Ac@>SW4&8`pdXuF@EpDq&{I64*xlr?a7`mQX1J4C_i)?9N4$vh&ZIb;tYX1@K zXg*mtHRVgHjRJCu^k1Amcms+(BxKTHYJ&TnA^eKea~p`HM}PKGZjT^Q7T}#-giScE z#>tTLk=qi0H0Jh9J%8l%wHUCA%V6Ou`3`sbizdl84}viD|H%A(N7z-ysxWc&P!!N} z7ct54dL-F%28Kz40c)CSCJN{iod4VZN!nmTcz+hJ*2>|#oV|6qU%Z~@7uC95c-vvw z-Q)Ij3$C7zC$Yky)VP8hd1}Y{(Ez?5oyK>q+@P)B`9^7JqIGw8y={;b+T9t;v=(%KFbMI}XJ&ARO4-T8mVlE@E+vO#9xqjdBH=#; z(>5{b4`LeH>Ta<)01ZTP@ENDwVLIBLAl%GI$$sFAhC?@E8RK~P0Fs)S03ut36Z^9M z^g`jox%)`E_U-O%jqG$HGU+myqihiOb5*O>apnyBUJv|;#JyWO{4kG(O4Frf1JW%N z&-1tYXy;;8%olUVB57L+EPSKNWtSnCVs40hbg0&FJvOG@Q#nv;HQcvKoU@>u_gyA= zM!-c$QN~5<_q%5N}A;JK+#;SN^SccP8(bVjq_k%BDBKd)+8Rt5TPu$$mMS-a&uZ1v5H>K`S6f z6`f8vgBj9y&LIq!tbR!n;oHJ|fy3bQbbF6)a(Vfb@hp~A+xt(3VJ`o-Cq($~6--Rz zT#qUU+`Z$H*)OMXMW47X$}zyG96<#Q4~9g+&(U3^-f&EEsKDa}hl@k`-iY4FkX zU>^}|A{o$4XFNMS6CJKa-7jQX866t_85zRb;5NoJbGYVbR6=te^twNOq_a7O{3LTQ zVUkF$`!ikWraoYz$j+))n8J1nCbTQe{A7iZn9ppG_bvW|Y?Y;fVE=&WCOsb*Fc_sr zicNlBJ?LJv514gQM-|9ZiizP&UkQ?ze*~8SHQx~eo>_)`^gbD~N{E;j7Vf zVWs0PLeUH^*-P?Xvg{*9*A>-rU%Zej;)_w7rD3Xx(hlA|dX+U4Ux7J+B*=29y?P$s zOtceRQ!N=vYci>zh-}VieZbtUgrZ?Sq*D8BrpKUlZx!Y^TQ7Kzc;UAT03{imUEI=Z zJo7ahb}`6;^#qNajWEvx`~qCfzm~KmY*sMcZrq%rRZ5{!t2$I-z^ZROw;XY1Ej z$w8Mu3cvN7Hg~Vz)03|<{zx4Cs(Sy)3KI>MNUwEEadBR5C*1eA<_FhS5}VWB4-Wes zh4#z4lgtt8u@9l>nZ?P<35Hyuu|VIij zPu*mHS}QO!n1+xfOb!|N(SKKAHsdwN8FcS4`^hcQzS6T!s*xgMuoZqvAoE9NBRhdk#u$Iiu+y#`HqEYu|P;49>3B zp9aOHr)1s`v?J7xlp-};r*IDHa4V27sEcxS3@;NDizYEBxT)%}?41hXWqe1>-$$n8 zz360ogHK@bwMX*!=n&56BXp(7?H=OA)xxpTG(u*y-&bDtc&V>ds?BHi^Tk1(A}c&>T?>6vpdcVxcxF%Y2!a6;MWag)Y31l9^JP z$3njV){k{8TTqgf$xyIYRnfRhS_QJyf{1&v>xI0htjmwQ>tsWsS4cMdG^~=5@2YTF zw?1^y@7$#<^Ph@O0Df{p-gKKej2|<$M_tStcel0({mUNaB;-#)?uLe*sBZ8p$N{45 z+mmqoyR22+%C*K-mq^~0r_=sn>}cP9LV8-?;zMyy6=wS6I>(b;EcclodYw3s24U}d z{@dLqnL|i9hNi3uO}KRBN1TtZ8xRQa9PeWm5($aI3j401;~A^CChOJ@9JC z$X6$be@{tH-%J-S7(bS!+E{Tp{yx@$<~7!O66PNAkjSZ@D27 zbUFX~svn2NP?}*JV_(RaEP|d7MgycPAJZxtk*I{UAsE*xQQ}T^SGvC2bJ&opzHW={ z!@*1?)!`_Nvl9gGhY62SG6^mv*CIn`qj}p6)&4`b?G#Bc;oG0x+oLAmQA`cjf|0U0 zt71a8wx2;Vnno#GioBn|7T#yQ{3^3u;K*SCxYSZ;r6$t|D!Z_hch`qM<&0QoO#Fmy zpzEq$oTpC(u|t)G!{!ZxuI#8ZqWg)e-9d+m9M9KBLX&m+AA|>@*0oW@-(4T)2nQ29 z*&H&*RYY{r87qn|eEjsD)=-QvN)3fBR%BZ&;LA$QMCvA zPFLBP`TkYJaj8^f(Jf;6p)yOrz9W>Sy#pSPK@6c1Y50u$%x3=?{gx>Bt6E*UHx;H; zg?0PRYwOJUq%9NyvO@prV~B6_ogP(zMG%mQNw6;4tBIk={5^>PV3Ptw71^D z{z35BZUqwY2+?4bMEsLd(_a!Y* zL%_)qxr0qTR)?4j@2MMC=2?G=U|LX|xxj(I@aSlh0dp*ZZN=9e=&v7ldw(0Np2^oi znkF3b`usM)TV}WKz0K!yb_*aIv|etqlj3p-W1T7S50Z#NWPs^b@r53X?`0(Fy%E$% zev=X`dQ&J_4P9k2H!3VT4()F3&MSLt4Vg9r%D_j4x`?+qIboP)_hSVvp%f|B?FSe#f z{qCUr>iny&m>K|>RldMO@FJt@R|0%DI%^(JwnuG4^`LiV< zCPxTUviDVqG6T_Kz1_AyDBu*}MTx{}J-VtW)_uQPbLiD%C&3mZwuW>--cnJ27|p4I zr#6y{%o!n4BoWw2qo!-4#5oc}3FWZfEf6LrAo&%Ew<`)^>Uq=eVXu_!OmP=V*xjZ) zv|(^wiNm-NkK@Ev;|ny)rSB$gqp0ivmUWOmL8p7RlgPRG#W4{>P1Nkt9V`W1<@^q; zE$w#K#DD%F)Pl-uUZ7G?UXEfL$T7eROytEZ&A#6Cqd9IP)F`+T&Z;u{NNXO%dA@VMpf?bi;`+=S?Iv^H&sk7Od6T8}jkxP35`Cg&fwReK0lL9GtX$rd zs0JoV!%_V@2BqTH7TbN0U7O{Ap|;Fs^=D!dB+@xj!*=~qmUw~0vuu2D!W7l-0pmCc z3~JXHkYo%eSOVL$?jy^6>my~3IhEQODu+*WkKB&P6WaLNM&q-5QF3+2BQCFT zWb#7b88VM-R0uw&IP0=?Q+4Y{pGy4_sF^$fKd`~(IPWr7u0-3&@e0=V>74A$Wr4(UNCOVhnrRVb2BAtzl5qV z^|ciyZOvKQOmMF|7o#R3 zo6MMw*>?PLkkL@bXmukMs#G6N4iGy15_c}V@sw5MHq#ukT#K4Bd>RnVoIL*^V2*O* z5fq!dWu_|Ptm#3IWVFpnu{P4zB0m0{XS04IToFBW@KaVkI2M3$f2x(a<%$7yIhu{D@+M&Zil z2g-{^`tZ30VhYAG5)P7pw%9%L3d_nSgU)aU^S5|57c9&W$1U?7bzlSUVFp15LV(lH zQ}EPwkBAhh6v4%9tO0i9;8{k#2TXZ}qms_F3_rxamRCP@(SW|cQakcT>dpa~aIwE@ zqR<9*o9sj4#u19DeI4YRoD-#6nEaQSGoR(eHeM&bAi(Un*Oc|K+gE zo3_vU@$cv1-_Z8SAJBP%+ur||40n4t?cuSdXC=&3;ppewycJ{Ix1;R1Rd+BoSt%E{ z;Bpby9jq+fRKi4Ky5;)Elqc9p7b9JbT=*SYHilrNt5<3kg8X@O(UJ#_z1F@&9_(Fp zis*q4W4xkJIHs7&VNwccX=SqyY^X3p!SC+BSE%6q;4!BNXx(6TvD|~fK|7ERniv|Q zL~%5n$wjfkkE3IVy5Z%x7 z$?B1}++Z}zbCcF-&Z943P@m!}(uXh3#Q>{jg7~gWiv+=o2fD>eqMCT3UI{8+c2!ty z`hoslH{=;GbEJ#j712ml_v@47dTIdgjL@<@b0zC6rPj7=Q?d@ssxm?~#MG! z+t5cE*HOb#-hTYDxfV#bM*R6XXRjoOI`tR-l&e`Hpp?8MWbv}Z)}x}uZ1EAVBe5Fv zN=IIchYf|)6kN%_=Y+LlX$Wz&yb)*Au?LyRh%o0fiJ zgiIPeY3s|>FpFv&)si~1z81gVuDtaqz7<_{K=?*+%BA(lcf#0%IIpj)*v;tqq!xN2p zhKzAN5jb?)!{VIaw6V@9+~U?^PvBf&2{f2YqA$L{_nzGlbm9%V5{)1(5S&N4G?($n z43z~~Xcf2FgLo5fQd;R@?jaZ$17Ed!8e3MsRU3h0Aflf$i+Vy+OH(w=Z}D^*XHO9r z+!BpcINNZ8tJVW5e@PH4`MDj016wOT8(h7*-eFyq^(xmJ`Vi^OjE45zC?A>p}=E{ zUkN!Izm|AyJ8-ZCNx?R%31CjBb!GFi^~gnT2YCqJbh4vq&?~IDj6L9>f;=ddijg`m ze2uqyKpgaK_}#<)UcNT)Pv^cglAi(V2)?{oh~4U`$Ep9qpP$C)s#j?CpVZBMydLqf z^=`mf$T#R#JiyATDQbrUb#o*OWzz*gwfgBK6R(5Khw2(o7ORr5sn~CCQU9-V^qP+V z`vpK1oS+;d{2vbb73x0-4SmjE7!ig4hUuBj1j9x|{}b35%+%YE?b{cqfN?9{RzM?u zcJ)&6`2TOkTZAv{e5@!i9%f|i;nU2MsUXx>DieMMX|zq(eS3b%S|eHFDom?I7W_#^ z>3YLsq}VZ}o2ozzmX5oI*oRHqlqt@OwYPq=?Y*pLjxcdv55B_>B?jYsnzi&Gl@&zH zS`-#IKWejjGaTlu*lO8shh2)T7X|c!v%nJbK~F>wn{9O%lKA~0(2*^YO;Bda1M3bA zl>-)w-RW2+@OwIAWE8cOPiv2cxc zu&^?v`av@mJhdxqDuGB;v8)Cf;bO0l=_3P8maXxEtjag$drck|n#H(ISsA35)A|eC ztVnf8sen>jUsmWkDoE_}mG+LOI_K-YBnfGc69Y3}hofoIu-LhX611_wNq6lEWP&!@ zIA&VCqw;}<L!(>^F$OOh{a$&LM+pPB(h}zTvwqp&}m!^q@3)#8!d@(LE|P;@i*901_TwjxvR5 z*ykF$FA$60&6Ax%R>&juFsf<=`BMpdmLUVnKhL`kMDu zT!CM+Il3V-gvE+xTwUqUlcdtBU>vh+_d!iwhY4cL4p5jP&7iALnA#$Iw>$6hwKCSy2|MJ9?+B zcXcxEJeYtw=o@%rmB>?0EQVt4%!=C(g!X z&c(|&5xQdwN8*oeW{>!n2b`(}i5yzFSGFrz3uf4E7w;X&2n+Mb$U}0Fz}hvJ+&V+I z%cd5oHm6ot+gtaqw>i7E94W?ZG9sHVk}x)19>C=p%Y||ZVt1YM@Ag-5*~^XWJKOag z@kqvP|EK#ff)%Lq5ixR!y!tnHf7 zMxhFZi_`X!l$Z&D>s>6ePZI!G`+(q zEm6Dz{9lWHxSVd*^uIOx5Z-4R{l44^zzNvTV~qk)Z3i;8zIy^DxphLK1VPATt~_5| zp?6|s7LOx;Hv;wX_q5xU4v8vn&M11g_cq3C`sh#wsm>z`Xtlp-og&b;)c#FiJf^xiNMUoJovfy(vs>DHueC_S{54X| zBJ4RjMSkmMY%=94ZhV#)#M9O&iM;Bt=aA|5EOe3PSQ4DIQZv`xEOo z^=>H^SgojDN3Vac%8qWfeK}8cE}z+1%6IE^fNmR9N0Is=K^5*;6*H zfoOzC9Yyi&Y_-n7Z%Ylb@?tsND6#KWT$$7^qP}98`8?@Eu{_q04Yu&MkFd)mR6)j;aKMP}7(k-(Tf@t>-GCsIkYGfPk3-NOMq(dfZ3gQC|M zXYUi=GNjMcji%l=a$H{ zW&h@a^T!~YFVntPn!^%#Wwt6!wv55?Z2YX5{E4Kp)sxy+q`Ourp+i;B46e`Xu+hbF z7ps!&a@9@aZLg1oM_Mitu53#co6x)ww-ONQ`?+p&%lj2Q-m9s7jo(XSn@1-pZ)V+g zyBDe(H3Iq4*Q_1$xO&VT3rzdFT7y~R$&c3^;1x!J=^JKfTcF7WO)T_=UHv-?!mIq9 zpr2V)`3PJo_OG%Y$NxKX3F|f0<47D5%IiZU6!Nf6j1fAz6O;OzW6zeVc9R; zNJZ!AX&l-F@mQu_95hU-0%sd+(mFjo);-aau1m!l;F%OTfofA9vCLb>?$KMYY6R3W zo{P^Iz)tSh_H8j^+{_;U>8jzh1YwwLk*bd}Q}KWjeHnf`*udhwKxsxml8(=v80Q{6 zB9$3{JWaYV-M^Uw@|_8fE~-4!qW*x{D4nVS;Bu^SEo=h4qkjQ6l9*q)vEAKqUIDoT^>T$wR$hOyIozx&OrDZ)Mhs8H<_Z zn(-5N?!pZMlW(gd%{R&_N9@)+rP=V0MF8O$O>y~qP%(JM_ob_>TXyLl*T(xZh#<}A zA^fev%rRj)RDn9-8!$egn|C@-jhK(tZxKqP)cig2w6Nb8rqlyrKN>e9#Vmcs8kN1) zOraj|^>+^KmcXV>zUEaH+KT8tHmffJI@XVb418+DGJWXqOIe<2)52JJF$9c%CVpeU zZPqB}@-C?T%Un;QIM*dwRHg5lx_0FL@0q&L>c;m2exf%_JcOF;C0JRKim@O^**Zm< z)QdLv#B9|a`eXQs_wo1;u?%mtbfmk-3mi+nEwKGL;zfN?TiCXpDSRNw-g^$)!TSce ziaXi5Zxp#!hRrcH z1)dYGM2XsxD_(VDUMO%eWBjF%#9>y^6ypQ}?~j@S`=gwrp6>4H5rpT#%+BcUA4`L1zOwO?uiFM zD)c_PvZJRxUB+ExUG!{u&*{4d@5{g_m6O4FF-}a$%q(PXeok?(|z4mK{Wh!`Y3Fdu2{(QAeLgxHb9WhxlEFM7V z9+WSVV_c6CbA5|r{Xv!|7E3sTZcBYUU2+$^uz%lO!W``{vps$my>Iimv^A1lCZfT% z=cgPYq?561BJc$6!`2Ea+OBu3`b_>Pp5l0&oDw2*xY1KYbfw`TX|nzm58j+l89m%= zY5tvTB$D+*r@zinv)CnEr;1ma$*=6v1J5UF<$4aE*7qLT$EJr>Mi#x@`%s06SF*&q zV96IAIvK^m7wPe5u!`-pjh6PObULZ&7b(T}@V)h>e&d5am7_B6 zqb#|C$B~RYuV;u`MH0S9y=uwC%_di;=}6=Yu6N&`P$~13M!0Jt6e!{^i$)NFRDQ{E zy#D*V4g;d+(N&E+=P93hxHyrgbX|*y$O>%gRWWRqf=O$-=V3l zFv+QFL#(;WfhYdXJXR`-)KU_oW!m-qm)cUSJNA4y>#MtVPd>EJ&Z?EaF5ig(jHvKk zm5QF8M>x*v`AJEO)Ivy8p$$DKhoXqOSxu!D1)Vxhqrq^o%m|s}=MB?#xxmaT)BUio z9PZ+TBM80spIX8gp{5Z#gv2s}M_TeDlM8zi&t7st!2NOs!`~I-nTfs0&@wDu1mZWy zu6FJU`t4^rs*xUXAl*mpsX$0Cv4ndLvqWs!TttS$CnX$Aq>m{kUpZ9cgTH&2YcS&} zQfg6pbOZV9goef>uoM>Yu)zlqC7@Wl9J4%I)tod-H7k$T4~6QQ(r*7#zZ+-z(zsJD zgVXwGZQY^FcLEpwh5v?luy6F?-$)g$4r4TCrT*9E|3eT4JQ?#JY?SLSHcAcXl{cBO zK`JRK90^;|n1am;*XvWs(z?kP41AI2?Mtr;=_oyxb_t!^nJv2Zg0)%bv4}dgeJr*#Y zY(nkL8wwXOvSgF!j z*Ix$Gq<@>~W`)BS+nL4YH1ZELY z`NpAf0UOtwF@0zeUswX@OSc@EAn(PQDBa#AFf~61Drl(NJJ~GVXs^(_QZd{VHHS*! znqVjK98uM{eJUpv#rh#}!xz=laXi@$vIgb|95ne5GvuL_N_Lt89R6@TSv@$cVSJk7 zcz)OC`oSVUnOvLhR=`N$VCS%kfK63ANP-NgV1Pqp`nRx~vWc)p+LTu&-zwzqK|6$%|gFz8gY{Yy01V~07f zab1ABjI~`KNAT)FAp8;4(Z)=u3%_0h{~USw{>lQ+%=uk1lvK$rE-TF;pgbQ}*_IeR zH&J?UT5<}7e|FVd^QI0*3A*?Z^P#Yzq4?b;)9y5IjEI;TrA(?f6fNI;Smj6Zk^-2B z!cS07-uBd1&|NZx0 zHfDfPfSZf#&%crr06Cfe%FDz5-dhMlp?EgdyQUbIghKV3b=kgC1DlUK`buCn(c7=O z$@A^dV#DxGP^Hbeu#Te5{P#&u<|BvIJ+3T0O}+1C?c3fVu&4%NQvK9$K1I|W2v0U` ze8RqbR(_ddcZ$A)7-hCmjR?>21c)-2@7Oz@MzdO9^$SiEI?vVrVGRXuC7uh zK|`)O?|hpCBhh~H0oBq$4|Bi>R$deBhSEapWUg76E)g~)GLN@R4;d{;B=r6mNa8(v z882_}$Ed1b_mHr78zK)&U0r8wmO(A!dcKG}KTp`8%Yq(%C_(d2o^;3(TJ~CfJFj~F z4OrOqoRKWS5fWl|G%r$>`m;O${=-F^r?0_v3U`0~s+@Hb_VtXql}!+p!}NNU61Kqh zWIQ}|#Q0t|BaKLoss^E#L=+t3@-OwgM_HWtifb~p$L^_s$-)%O7GRBGwA?wOhcM*~ z93Br}&$EikQsz;UkKxei5uoFMSh{oK*av@phuL%xWW6_?x^%ySCic_Kbc%~n1pj%t z0bgo61?tk`c`11Zn2uLraNRQ}bpG}_@-3f_Mwxkj@v`)RMe8O(x!x^x{fWnUq_FQ7 z=U*uu5RcJfT_OMv_16KkBctPkQaIO%8B;v*&h zGS<70Q^ezdH~>z__%o7jye2fE8;}-Lhg679Fw;&;C<@I%E#KR=>o=Z$vPKek+_Hi= zo55yIEYT%7aWf`uEOyfk{Nq)Wet!jQ*d}!Cx@Yql_z7G_Fn=$j;qwRpYN?trJV_Y{4*pRzvfu_vT8zZ1xnC47wcos*L|m& z+vNjJH0^LbHwge^*5=tKsj#;f_?<^!HKL1yUeP)F*A zTB{LqOfFZ1$;R~nM5aXICBYf-oPvzSARhU9_3X~qg!4iVOJ&0|N;u6+P{KIPeYDP5 zE8;TRjTx1DQJbLYeo5%XL&(=?$dBK2Fb?sS6UmwA!qoUk6l%--d!7&MZhv=>kyng^ ztLKEnnk!tDui!Vfx$ULatU#?0F<)sARg>Nr+7XNy$`+=tU;f~XADJKzQ z0*5uyM+Kv^mb#ZHElb_}$-(+;LqoRu=LdSsTaj&v46fKMJ*kn|z>~h1m88PO)*+1< z{dW`!D=Jk_lZ4LiOeT)F?<)i6-o%pGjqM0%1_XSC7BQaUI>KqUCurs@{J#co#^ygi3C<03Z#g)9eDxgMC z7+d9RM?n0$6C*9ted74Iac5WBwJJsGHQwU1r;7A@^Lg27o#!b!t7+$vskhz1C-EtZP?vy^In*Ck4CHn!Uj0Iy2HCblcPs59aohBP z7g)i-%-{Yxunb~16s8oZjmzj=!uW|o~1!i(R?7tmL*8k~2rdGkM<W&(J+- z`%bb)EYJHs$#-oqem5wnuZ^x!)E1-xds2LpRPg4{{y3zrP(M4hDi;N$O@QFiZp3)L z-ZlDD(Ea}rd=zh_ngf%wQGV9^OScnv{~ZiB)%(k4)dG<4k6^y@T6PMhlzsESCcr+? zmI(dkn+I&`_}h;eZ>0e_q}9$JVv#_Iyc571yrobWPU4swd9hGlB{q-@OpQ4PaGTRr z8Ow)SQhrnK4*F!1FvA?jY(_Dp5~q`{-<3gOEnKWB&$!6)Wj1>%6Pbs%%1B;T{>!OZ>}R?C|<(7G;EJUY;Z6<-E?tn(??AIK4G>~5UVGeAj>)2Cl1lr5GF*rNz+Vz=B# zGi!cKAr(z7D)Akbi7zBhI2b6gFV&x`-#`eX^@nPg{{9-m_snmVxJ5TX*d$T1c0AAXiR=C?b7@KY(6rQ!Aq+D?EC4L&`@ol_25 z;Kxd96F6NNU-W@B#%F~YX#&f7C4iaw0pZZ-eD{p`z2DFJ9AhT^Vh0j?^V`kDU{d;< zHkRH8FOJ7SR&k?Jf|tIrBj|G>nsV-aV(JV)VP-Py@rMV}HiuOo&kC9oHJ{!p81&zK zvHqN5_qrz#pUb^LVWkS$Z-ypu3cO!JBdS!P_eD_=kH-O{?G34i!D$eWAs+`d2ao*2 zYYx{nXm{*Lv7!41i;r6gV;y&rTG{ubbiR{9%+D3D%;ZL7&Uc?u`L=mWZ0rHHf(zhJ zm%9RPxaCk=*)MkI+-cd4x!!!g3ZpZ_Qy4h4Rbt149>;$3(9=Nv^c`jE1w&Q$8UZBr zP+UbE2DN$~-KXjJ#YxWhTNB()uF^|*>V%39TPxph1fF&*CVmh3U@9`rL5y^v*;>z$ zj(ZgMt4-BYkDAX}4iVpstZWucqk*Iacb4j+UGcjtP;fnqkEK#?iw)e61wn68WAds-l#m!a^*b{57hW#<^ZhaIc!pCke|~SLhUuL$JU=AHsgMa6GG?alQSG9^Bpz} zF^mpu!V|%-aH(wi(qtig&@dF99fNfuQQpP+mlSuU0LX#Cm!u0>BQTi6R4 z(sd8ppwCMRW>}e~JXRShI78MG=q2%{c%HMejkAvzH<>SUN$P18FGkP~q|AzS%3yZ7 zkYx)ii*!d&vt57omL=znFB+-MT*c^gPEkYm;O^D_W+fl4&O@f@#9D3>jWY<~`6wN) zlkb?aQ^^Wb_iY1{PtLJfb5fR{*}i2{kt$U)Ox)HAuGeF{ruo@UcA>3mb41@--tEr> zaob|VH5j2yF?5OYunJ?~9X5wA^+70y{+0pA{W3J3FrWYKacqV~;M*dbT-9&3Q1*Nq zzaXiB^-{r;k5z>6tR_=$eT&n-Pgzb@0+OzR;6-N_N2m5qxp*# zSnBbG#Tv;{NgPPe4Kjv0AS}S8hWWFCzM$zKT`2pK#@i)@WwTjc{e9`u z#%Q`28l}n}JqQu~{5~xY8b1rrPZiN#RoA^=ntoCLAq~(DyX0nV-RF+dMd?^{PDC29 zUkh#)<=p%U8Bvbn3A0chNwoP7$H~g&b+YRdl=UyP3Aj0IZ}O^qL>hJf*N7dq{p-V? zBK-AXv4AKle2fZts&gQe46WMvq4>^7+i-8SJ}#|q`(+g+=9HeeBw$|S#gxoT+7;+d zIL=Q|UWaQd4#Y8NH-oAgpnbB5suOyFc!hCsew-)(;`Pb{rqI&$;&l72abbdH*Iy`b zciq3gd>-&PFHZ{+WB0LmO5a?05Vp~%*8{;J6w81L1r;{1=^~aS6snpRt{usb^Gef{ zaG{UJcCONS7|lg?-T%J-W))YyV5NQ$!M42>ux)A95-9W|z7AT0*BScrHF~PWY%oH- zEe+9*q;#XLHsUDgXYdjofpgmiw}yi6rckORn-oJ12vB%~7@*8@6EIW;ozLU$y_u|n zDi9#TBa$1v%=;{4MEq?W28_B|B>KaO*H^&ZNGq0FMg9(d2kQG2WpwuY&7YmMAZB2x zlm~i${5AJ5YJjSS;*^?DB$Eoo%m}P)I~faykVIVi0%Ana?h;0^k<%rUB#(rw}~Q4wzb3kXhhCN4B_tv@BlmR`jf`(g zg%qZB{HV!pna})-NyM#YaNf(>itW!e=CKi73$BoK8nI@endv(#bkV)@oAUH%xIQLVR~sxL&)S>iQk%AW zpf!-y@Sr_Tx?jHPPF(o@1T4wri;NB&MNCxR+EP3%s)6uc8p=->R~B{3z}QbCNP0O| z48>^}%F~p97-|#&H-UtKhlc@JIse4AacpksU17H%M@F$Im+dd>O z)M{vzB5>H&nikd_#CV6pFd!6mt`ogxB>qR<@7Zzs2wK)6Z8K92oSs*j7X73@mbKRd zhM#zqCd8E+%DJe=uMg(Gk9d|M1>ANc-0+ie8P48J-nUC5h07Peh&SOI!Jb_`Ky;r@bCLcA1bzpE-ti$w2fy1r zZeHCXUNK|(VL!zw{N!j>9hH6Y%{B7vR>hPUDEy-7{;EK~_d|Q5aj|DQ6uu;Au{r*7 zH9wD43fHsnSW2VuTCzf{<-s|uTP&rB$CZBKl&erG8&|GCRbHj-drIo1bfZm%l8_@P zjQ(fU>;K)8hl1-qsoe3_Ov{?&8Q7xJ{H*0GAE$9RG(Qrwt+7DBgL-S;rye=`N+Acf;PKf=EeAZn{IdTR?Kt-AH#g zoCiVQ_dEZb@2utdt@FpX7R$94OL%zZo|${*nwe{C*Y}@1m#_%*R6Kn>Y;XH>S#Bem zaN}5T_VbB}ueDyMkYK1O%9C^~ktLugXH56nax03-V)HTTXKA}ABVb^Zihjy&DVGJT zgLy&dUbP5m=dq8MWa^z~(`)fhGalA6eZDny6zUw0rJ3^yVrY~0I!a?97~A%I-l;^e z;$iFYkDQj4L1nGka_lWtCxj@uIlO!h(u@Y-17L(Rlh32$0Y2uA?OB^o$q%57vd@YY zGd6p1_dIgpz>as$bu@J>j&8Wwwt@@`DQcj(MNL{VpI!607LV?t~aun47L3`<1k@MZN zKFa&L)8x68ATMYFl7jPgd9&3B|ACL%(t%LmGW