From cca562d538b1f166fb5e91f1cbc3b3753a744e5b Mon Sep 17 00:00:00 2001 From: root Date: Wed, 26 Feb 2025 04:53:23 +0000 Subject: [PATCH 01/57] migrate from speech llm --- .../local/compute_whisper_fbank.py | 185 ++++ egs/speech_llm/SPEECH2SPEECH/prepare.sh | 45 + .../SPEECH2SPEECH/slam_omni/data_module.py | 437 +++++++++ .../SPEECH2SPEECH/slam_omni/decode.py | 653 +++++++++++++ .../slam_omni/label_smoothing.py | 1 + .../SPEECH2SPEECH/slam_omni/model.py | 285 ++++++ .../SPEECH2SPEECH/slam_omni/speech_dataset.py | 176 ++++ .../SPEECH2SPEECH/slam_omni/train.py | 872 ++++++++++++++++++ .../whisper_encoder_forward_monkey_patch.py | 1 + 9 files changed, 2655 insertions(+) create mode 100755 egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py create mode 100644 egs/speech_llm/SPEECH2SPEECH/prepare.sh create mode 100644 egs/speech_llm/SPEECH2SPEECH/slam_omni/data_module.py create mode 100755 egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py create mode 120000 egs/speech_llm/SPEECH2SPEECH/slam_omni/label_smoothing.py create mode 100644 egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py create mode 100644 egs/speech_llm/SPEECH2SPEECH/slam_omni/speech_dataset.py create mode 100755 egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py create mode 120000 egs/speech_llm/SPEECH2SPEECH/slam_omni/whisper_encoder_forward_monkey_patch.py diff --git a/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py b/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py new file mode 100755 index 000000000..1c3a3d1e0 --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python3 +# Copyright 2021 Johns Hopkins University (Piotr Żelasko) +# Copyright 2021 Xiaomi Corp. (Fangjun Kuang) +# Copyright 2023 Xiaomi Corp. (Zengrui Jin) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import logging +from pathlib import Path + +import torch +from datasets import load_dataset +from lhotse import ( + CutSet, + LilcomChunkyWriter, + WhisperFbank, + WhisperFbankConfig, +) + +from icefall.utils import str2bool + +# Torch's multithreaded behavior needs to be disabled or +# it wastes a lot of CPU and slow things down. +# Do this outside of main() in case it needs to take effect +# even when we are not invoking the main (e.g. when spawning subprocesses). +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--num-mel-bins", + type=int, + default=80, + help="""The number of mel bins for Fbank""", + ) + parser.add_argument( + "--whisper-fbank", + type=str2bool, + default=True, + help="Use WhisperFbank instead of Fbank. Default: False.", + ) + parser.add_argument( + "--resample-to-16kHz", + type=str2bool, + default=True, + help="Resample audio to 16kHz. Default: False.", + ) + parser.add_argument( + "--speed-perturb", + type=str2bool, + default=False, + help="Enable 0.9 and 1.1 speed perturbation for data augmentation. Default: False.", + ) + parser.add_argument( + "--out-dir", + type=str, + default="data/fbank", + help="Output directory for the computed features", + ) + parser.add_argument( + "--huggingface-dataset-path-or-name", + type=str, + default="/workspace/Belle_1.4M-SLAM-Omni", + help="The path or name of the Huggingface dataset", + ) + parser.add_argument( + "--audio-key", + type=str, + default="question_audio", + help="The key in the Huggingface dataset containing the audio data", + ) + parser.add_argument( + "--text-key", + type=str, + default="answer", + help="The key in the Huggingface dataset containing the text data", + ) + + return parser + + +def compute_fbank(args): + in_out_dir = Path(args.out_dir) + in_out_dir.mkdir(parents=True, exist_ok=True) + # number of workers in dataloader + num_workers = 4 + + # number of seconds in a batch + batch_duration = 10 + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + if args.whisper_fbank: + extractor = WhisperFbank( + WhisperFbankConfig(num_filters=args.num_mel_bins, device=device) + ) + else: + extractor = KaldifeatFbank(KaldifeatFbankConfig(device=device)) + + logging.info(f"device: {device}") + + start = 0 + stop = 1601 + num_digits = 5 + for i in range(start, stop): + idx = f"{i}".zfill(num_digits) + # dataset = load_dataset(args.huggingface_dataset_path_or_name, streaming=True, split=partition) + parquet_files = [ + f"data/train-{idx}-of-01601.parquet", + ] + parquet_files = [f"{args.huggingface_dataset_path_or_name}/{f}" for f in parquet_files] + file_name = parquet_files[0] + logging.info(f"Loading dataset from {file_name}") + dataset = load_dataset('parquet', data_files=parquet_files, streaming=True, split='train') + + cut_set = CutSet.from_huggingface_dataset(dataset, audio_key=args.audio_key, text_key=args.text_key) + + logging.info("Splitting cuts into smaller chunks") + cut_set = cut_set.trim_to_supervisions( + keep_overlapping=False, min_duration=None + ) + + if args.resample_to_16kHz: + cut_set = cut_set.resample(16000) + if args.speed_perturb: + cut_set = cut_set + cut_set.perturb_speed(0.9) + cut_set.perturb_speed(1.1) + + logging.info("Computing features") + cut_set = cut_set.compute_and_store_features_batch( + extractor=extractor, + storage_path=f"{in_out_dir}/feats_{idx}", + num_workers=num_workers, + batch_duration=batch_duration, + storage_type=LilcomChunkyWriter, + overwrite=True, + ) + cuts_path = f"{in_out_dir}/cuts_belle.{idx}.jsonl.gz" + logging.info(f"Saving to {cuts_path}") + # cut_set.to_file(cuts_path) + remove_recording_item(cut_set, cuts_path) + +def remove_recording_item( + cuts, + output_cuts, +): + """ + don't store recording item + """ + with CutSet.open_writer(output_cuts) as writer: + for cut in cuts: + cut.recording.sources = None + writer.write(cut) + +def main(): + formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + logging.basicConfig(format=formatter, level=logging.INFO) + + parser = get_parser() + args = parser.parse_args() + logging.info(vars(args)) + + compute_fbank(args) + + +if __name__ == "__main__": + main() diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh new file mode 100644 index 000000000..87e7cd254 --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +# fix segmentation fault reported in https://github.com/k2-fsa/icefall/issues/674 +export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python +export PYTHONPATH=$PYTHONPATH:/workspace/slam/icefall_omni +set -eou pipefail + +stage=2 +stop_stage=2 +# All files generated by this script are saved in "data". +# You can safely remove "data" and rerun this script to regenerate it. +mkdir -p data + +log() { + # This function is from espnet + local fname=${BASH_SOURCE[1]##*/} + echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*" +} + + +if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then + log "stage 0: " + + +fi + +if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then + log "stage 1: Download whisper-large-v2 multi-hans-zh fbank feature from huggingface" + + python3 local/compute_whisper_fbank.py +fi + +if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then + log "stage 2: " + python3 ./slam_omni/decode.py \ + --max-duration 80 \ + --exp-dir slam_omni/exp_test_whisper_qwen2_1.5B \ + --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ + --llm-path-or-name models/qwen \ + --epoch 999 --avg 1 \ + --manifest-dir data/fbank \ + --use-flash-attn True \ + --use-lora True + +fi diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/data_module.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/data_module.py new file mode 100644 index 000000000..35d1e3494 --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/data_module.py @@ -0,0 +1,437 @@ +# Copyright 2021 Piotr Żelasko +# Copyright 2022 Xiaomi Corporation (Author: Mingshuang Luo) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import inspect +import logging +from functools import lru_cache +from pathlib import Path +from typing import Any, Dict, Optional + +import torch +from lhotse import CutSet, WhisperFbank, WhisperFbankConfig, load_manifest, load_manifest_lazy +from lhotse.dataset import ( # noqa F401 for PrecomputedFeatures + CutConcatenate, + CutMix, + DynamicBucketingSampler, + PrecomputedFeatures, + SimpleCutSampler, + SpecAugment, +) +from lhotse.dataset.input_strategies import ( # noqa F401 For AudioSamples + AudioSamples, + OnTheFlyFeatures, +) +from lhotse.utils import fix_random_seed +from torch.utils.data import DataLoader + +from icefall.utils import str2bool + +from speech_dataset import K2SpeechRecognitionDataset + +class _SeedWorkers: + def __init__(self, seed: int): + self.seed = seed + + def __call__(self, worker_id: int): + fix_random_seed(self.seed + worker_id) + + +class AsrDataModule: + """ + DataModule for k2 ASR experiments. + It assumes there is always one train and valid dataloader, + but there can be multiple test dataloaders (e.g. LibriSpeech test-clean + and test-other). + + It contains all the common data pipeline modules used in ASR + experiments, e.g.: + - dynamic batch size, + - bucketing samplers, + - cut concatenation, + - augmentation, + - on-the-fly feature extraction + + This class should be derived for specific corpora used in ASR tasks. + """ + + def __init__(self, args: argparse.Namespace): + self.args = args + + @classmethod + def add_arguments(cls, parser: argparse.ArgumentParser): + group = parser.add_argument_group( + title="ASR data related options", + description="These options are used for the preparation of " + "PyTorch DataLoaders from Lhotse CutSet's -- they control the " + "effective batch sizes, sampling strategies, applied data " + "augmentations, etc.", + ) + group.add_argument( + "--manifest-dir", + type=Path, + default=Path("data/fbank"), + help="Path to directory with train/valid/test cuts.", + ) + group.add_argument( + "--max-duration", + type=int, + default=300.0, + help="Maximum pooled recordings duration (seconds) in a " + "single batch. You can reduce it if it causes CUDA OOM.", + ) + group.add_argument( + "--bucketing-sampler", + type=str2bool, + default=True, + help="When enabled, the batches will come from buckets of " + "similar duration (saves padding frames).", + ) + group.add_argument( + "--num-buckets", + type=int, + default=30, + help="The number of buckets for the DynamicBucketingSampler" + "(you might want to increase it for larger datasets).", + ) + # group.add_argument( + # "--concatenate-cuts", + # type=str2bool, + # default=False, + # help="When enabled, utterances (cuts) will be concatenated " + # "to minimize the amount of padding.", + # ) + # group.add_argument( + # "--duration-factor", + # type=float, + # default=1.0, + # help="Determines the maximum duration of a concatenated cut " + # "relative to the duration of the longest cut in a batch.", + # ) + # group.add_argument( + # "--gap", + # type=float, + # default=1.0, + # help="The amount of padding (in seconds) inserted between " + # "concatenated cuts. This padding is filled with noise when " + # "noise augmentation is used.", + # ) + group.add_argument( + "--on-the-fly-feats", + type=str2bool, + default=False, + help="When enabled, use on-the-fly cut mixing and feature " + "extraction. Will drop existing precomputed feature manifests " + "if available.", + ) + group.add_argument( + "--shuffle", + type=str2bool, + default=True, + help="When enabled (=default), the examples will be " + "shuffled for each epoch.", + ) + group.add_argument( + "--drop-last", + type=str2bool, + default=True, + help="Whether to drop last batch. Used by sampler.", + ) + group.add_argument( + "--return-cuts", + type=str2bool, + default=True, + help="When enabled, each batch will have the " + "field: batch['supervisions']['cut'] with the cuts that " + "were used to construct it.", + ) + + group.add_argument( + "--num-workers", + type=int, + default=2, + help="The number of training dataloader workers that " + "collect the batches.", + ) + + group.add_argument( + "--enable-spec-aug", + type=str2bool, + default=True, + help="When enabled, use SpecAugment for training dataset.", + ) + + group.add_argument( + "--spec-aug-time-warp-factor", + type=int, + default=80, + help="Used only when --enable-spec-aug is True. " + "It specifies the factor for time warping in SpecAugment. " + "Larger values mean more warping. " + "A value less than 1 means to disable time warp.", + ) + + group.add_argument( + "--enable-musan", + type=str2bool, + default=True, + help="When enabled, select noise from MUSAN and mix it" + "with training dataset. ", + ) + + group.add_argument( + "--input-strategy", + type=str, + default="PrecomputedFeatures", + help="AudioSamples or PrecomputedFeatures", + ) + + group.add_argument( + "--huggingface-dataset-path-or-name", + type=str, + default="/workspace/Belle_1.4M-SLAM-Omni", + help="The path or name of the Huggingface dataset", + ) + group.add_argument( + "--audio-key", + type=str, + default="question_audio", + help="The key in the Huggingface dataset containing the audio data", + ) + group.add_argument( + "--text-key", + type=str, + default="answer", + help="The key in the Huggingface dataset containing the text data", + ) + group.add_argument( + "--resample-to-16kHz", + type=str2bool, + default=True, + help="Resample audio to 16kHz. Default: False.", + ) + + def train_dataloaders( + self, + cuts_train: CutSet, + sampler_state_dict: Optional[Dict[str, Any]] = None, + ) -> DataLoader: + """ + Args: + cuts_train: + CutSet for training. + sampler_state_dict: + The state dict for the training sampler. + """ + transforms = [] + if self.args.enable_musan: + logging.info("Enable MUSAN") + logging.info("About to get Musan cuts") + cuts_musan = load_manifest(self.args.manifest_dir / "musan_cuts.jsonl.gz") + transforms.append( + CutMix(cuts=cuts_musan, p=0.5, snr=(10, 20), preserve_id=True) + ) + else: + logging.info("Disable MUSAN") + + # if self.args.concatenate_cuts: + # logging.info( + # f"Using cut concatenation with duration factor " + # f"{self.args.duration_factor} and gap {self.args.gap}." + # ) + # # Cut concatenation should be the first transform in the list, + # # so that if we e.g. mix noise in, it will fill the gaps between + # # different utterances. + # transforms = [ + # CutConcatenate( + # duration_factor=self.args.duration_factor, gap=self.args.gap + # ) + # ] + transforms + + input_transforms = [] + if self.args.enable_spec_aug: + logging.info("Enable SpecAugment") + logging.info(f"Time warp factor: {self.args.spec_aug_time_warp_factor}") + # Set the value of num_frame_masks according to Lhotse's version. + # In different Lhotse's versions, the default of num_frame_masks is + # different. + num_frame_masks = 10 + num_frame_masks_parameter = inspect.signature( + SpecAugment.__init__ + ).parameters["num_frame_masks"] + if num_frame_masks_parameter.default == 1: + num_frame_masks = 2 + logging.info(f"Num frame mask: {num_frame_masks}") + input_transforms.append( + SpecAugment( + time_warp_factor=self.args.spec_aug_time_warp_factor, + num_frame_masks=num_frame_masks, + features_mask_size=27, + num_feature_masks=2, + frames_mask_size=100, + ) + ) + else: + logging.info("Disable SpecAugment") + + logging.info("About to create train dataset") + train = K2SpeechRecognitionDataset( + input_strategy=eval(self.args.input_strategy)(), + cut_transforms=transforms, + input_transforms=input_transforms, + return_cuts=self.args.return_cuts, + ) + + if self.args.on_the_fly_feats: + # NOTE: the PerturbSpeed transform should be added only if we + # remove it from data prep stage. + # Add on-the-fly speed perturbation; since originally it would + # have increased epoch size by 3, we will apply prob 2/3 and use + # 3x more epochs. + # Speed perturbation probably should come first before + # concatenation, but in principle the transforms order doesn't have + # to be strict (e.g. could be randomized) + # transforms = [PerturbSpeed(factors=[0.9, 1.1], p=2/3)] + transforms # noqa + # Drop feats to be on the safe side. + train = K2SpeechRecognitionDataset( + cut_transforms=transforms, + input_strategy=OnTheFlyFeatures(WhisperFbank(WhisperFbankConfig(num_filters=80, device='cuda'))), + input_transforms=input_transforms, + return_cuts=self.args.return_cuts, + ) + + if self.args.bucketing_sampler: + logging.info("Using DynamicBucketingSampler.") + train_sampler = DynamicBucketingSampler( + cuts_train, + max_duration=self.args.max_duration, + shuffle=self.args.shuffle, + num_buckets=self.args.num_buckets, + buffer_size=self.args.num_buckets * 2000, + shuffle_buffer_size=self.args.num_buckets * 5000, + drop_last=self.args.drop_last, + ) + else: + logging.info("Using SimpleCutSampler.") + train_sampler = SimpleCutSampler( + cuts_train, + max_duration=self.args.max_duration, + shuffle=self.args.shuffle, + ) + logging.info("About to create train dataloader") + + if sampler_state_dict is not None: + logging.info("Loading sampler state dict") + train_sampler.load_state_dict(sampler_state_dict) + + # 'seed' is derived from the current random state, which will have + # previously been set in the main process. + seed = torch.randint(0, 100000, ()).item() + worker_init_fn = _SeedWorkers(seed) + + train_dl = DataLoader( + train, + sampler=train_sampler, + batch_size=None, + num_workers=self.args.num_workers, + persistent_workers=True, + pin_memory=True, + worker_init_fn=worker_init_fn, + ) + + return train_dl + + def valid_dataloaders(self, cuts_valid: CutSet) -> DataLoader: + transforms = [] + # if self.args.concatenate_cuts: + # transforms = [ + # CutConcatenate( + # duration_factor=self.args.duration_factor, gap=self.args.gap + # ) + # ] + transforms + + logging.info("About to create dev dataset") + if self.args.on_the_fly_feats: + validate = K2SpeechRecognitionDataset( + cut_transforms=transforms, + input_strategy=OnTheFlyFeatures(WhisperFbank(WhisperFbankConfig(num_filters=80, device='cuda'))), + return_cuts=self.args.return_cuts, + ) + else: + validate = K2SpeechRecognitionDataset( + cut_transforms=transforms, + return_cuts=self.args.return_cuts, + ) + valid_sampler = DynamicBucketingSampler( + cuts_valid, + max_duration=self.args.max_duration, + shuffle=False, + ) + logging.info("About to create dev dataloader") + valid_dl = DataLoader( + validate, + sampler=valid_sampler, + batch_size=None, + num_workers=2, + persistent_workers=False, + ) + + return valid_dl + + def test_dataloaders(self, cuts: CutSet) -> DataLoader: + logging.debug("About to create test dataset") + test = K2SpeechRecognitionDataset( + input_strategy=OnTheFlyFeatures(WhisperFbank(WhisperFbankConfig(num_filters=80, device='cuda'))) + if self.args.on_the_fly_feats + else eval(self.args.input_strategy)(), + return_cuts=self.args.return_cuts, + ) + sampler = DynamicBucketingSampler( + cuts, + max_duration=self.args.max_duration, + shuffle=False, + ) + logging.debug("About to create test dataloader") + test_dl = DataLoader( + test, + batch_size=None, + sampler=sampler, + num_workers=self.args.num_workers, + ) + return test_dl + + @lru_cache() + def test_cuts(self) -> CutSet: + logging.info("About to get test cuts") + if self.args.on_the_fly_feats: + # dataset = load_dataset(args.huggingface_dataset_path_or_name, streaming=True, split=partition) + i, num_digits = 0, 5 + idx = f"{i}".zfill(num_digits) + parquet_files = [ + f"data/train-{idx}-of-01601.parquet", + ] + parquet_files = [f"{args.huggingface_dataset_path_or_name}/{f}" for f in parquet_files] + file_name = parquet_files[0] + logging.info(f"Loading dataset from {file_name}") + dataset = load_dataset('parquet', data_files=parquet_files, streaming=True, split='train') + cut_set = CutSet.from_huggingface_dataset(dataset, audio_key=args.audio_key, text_key=args.text_key) + if args.resample_to_16kHz: + cut_set = cut_set.resample(16000) + return cut_set + else: + return load_manifest_lazy(self.args.manifest_dir / "cuts_belle.00000.jsonl.gz") \ No newline at end of file diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py new file mode 100755 index 000000000..5f5334142 --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py @@ -0,0 +1,653 @@ +#!/usr/bin/env python3 +# Copyright 2021 Xiaomi Corporation (Author: Liyong Guo, +# Fangjun Kuang, +# Wei Kang) +# 2024 Yuekai Zhang +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +# Command for decoding using fine-tuned models: + +pip install huggingface_hub['cli'] +mkdir -p models/whisper models/qwen models/checkpoint +huggingface-cli download --local-dir models/checkpoint yuekai/icefall_asr_aishell_whisper_qwen2_1.5B + +# For aishell fine-tuned whisper model +huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_aishell_whisper exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt +# For multi-hans fine-tuned whisper model +# huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_multi-hans-zh_whisper v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt + +huggingface-cli download --local-dir models/qwen Qwen/Qwen2-7B-Instruct + +mkdir -p whisper_llm_zh/exp_aishell_whisper_qwen2_1.5B +ln -s models/checkpoint/epoch-10-avg-5.pt whisper_llm_zh/exp_aishell_whisper_qwen2_1.5B/epoch-999.pt + +python3 ./whisper_llm_zh/decode.py \ + --max-duration 80 \ + --exp-dir whisper_llm_zh/exp_aishell_whisper_qwen2_1.5B \ + --speech-encoder-path-or-name models/whisper/exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt \ + --llm-path-or-name models/qwen \ + --epoch 999 --avg 1 \ + --manifest-dir data/fbank \ + --use-flash-attn True \ + --use-lora True --dataset aishell +""" + +import argparse +import logging +from collections import defaultdict +from pathlib import Path +from typing import Dict, List, Optional, Tuple + +import k2 +import torch +import torch.nn as nn +import transformers +import whisper +from data_module import AsrDataModule +from lhotse.cut import Cut +from model import SPEECH_LLM, EncoderProjector +# from data_module import MultiDataset +from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training +from train import DEFAULT_SPEECH_TOKEN +from transformers import AutoModelForCausalLM, AutoTokenizer +from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward + +from icefall.checkpoint import average_checkpoints_with_averaged_model, load_checkpoint +from icefall.env import get_env_info +from icefall.utils import ( + AttributeDict, + setup_logger, + store_transcripts, + str2bool, + write_error_stats, +) + + +def average_checkpoints( + filenames: List[Path], device: torch.device = torch.device("cpu") +) -> dict: + """Average a list of checkpoints. + The function is mainly used for deepspeed converted checkpoint averaging, which only include model state_dict. + + Args: + filenames: + Filenames of the checkpoints to be averaged. We assume all + checkpoints are saved by :func:`save_checkpoint`. + device: + Move checkpoints to this device before averaging. + Returns: + Return a dict (i.e., state_dict) which is the average of all + model state dicts contained in the checkpoints. + """ + n = len(filenames) + + if "model" in torch.load(filenames[0], map_location=device): + avg = torch.load(filenames[0], map_location=device)["model"] + else: + avg = torch.load(filenames[0], map_location=device) + + # Identify shared parameters. Two parameters are said to be shared + # if they have the same data_ptr + uniqued: Dict[int, str] = dict() + + for k, v in avg.items(): + v_data_ptr = v.data_ptr() + if v_data_ptr in uniqued: + continue + uniqued[v_data_ptr] = k + + uniqued_names = list(uniqued.values()) + + for i in range(1, n): + if "model" in torch.load(filenames[i], map_location=device): + state_dict = torch.load(filenames[i], map_location=device)["model"] + else: + state_dict = torch.load(filenames[i], map_location=device) + for k in uniqued_names: + avg[k] += state_dict[k] + + for k in uniqued_names: + if avg[k].is_floating_point(): + avg[k] /= n + else: + avg[k] //= n + + return avg + + +def add_model_arguments(parser: argparse.ArgumentParser): + parser.add_argument( + "--llm-path-or-name", + type=str, + default="/workspace/asr/Qwen1.5-0.5B-Chat", + help="Path or name of the large language model.", + ) + + parser.add_argument( + "--speech-encoder-path-or-name", + type=str, + default="whisper-large-v2", + help="Path or name of the speech encoder.", + ) + + parser.add_argument( + "--encoder-projector-ds-rate", + type=int, + default=8, + help="Downsample rate for the encoder projector.", + ) + + parser.add_argument( + "--use-flash-attn", + type=str2bool, + default=True, + help="Whether to use flash attention.", + ) + + parser.add_argument( + "--use-lora", + type=str2bool, + default=True, + help="Whether to use lora fine-tuned llm checkpoint.", + ) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--epoch", + type=int, + default=-1, + help="It specifies the checkpoint to use for decoding." + "Note: Epoch counts from 0.", + ) + parser.add_argument( + "--avg", + type=int, + default=1, + help="Number of checkpoints to average. Automatically select " + "consecutive checkpoints before the checkpoint specified by " + "'--epoch'. ", + ) + + parser.add_argument( + "--method", + type=str, + default="beam-search", + help="""Decoding method. + Supported values are: + - beam-search + """, + ) + + parser.add_argument( + "--beam-size", + type=int, + default=1, + help="beam size for beam search decoding", + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="whisper/exp", + help="The experiment dir", + ) + + parser.add_argument( + "--remove-whisper-encoder-input-length-restriction", + type=str2bool, + default=True, + help="replace whisper encoder forward method to remove input length restriction", + ) + + # parser.add_argument( + # "--dataset", + # type=str, + # default="aishell", + # choices=["aishell", "speechio", "wenetspeech_test_meeting", "multi_hans_zh"], + # help="The dataset to decode", + # ) + + add_model_arguments(parser) + return parser + + +def get_params() -> AttributeDict: + params = AttributeDict( + { + "env_info": get_env_info(), + } + ) + return params + + +def decode_one_batch( + params: AttributeDict, + model: nn.Module, + tokenizer: AutoTokenizer, + batch: dict, +) -> Dict[str, List[List[int]]]: + """Decode one batch and return the result in a dict. The dict has the + following format: + + - key: "beam-search" + - value: A list of lists. Each sublist is a list of token IDs. + Args: + params: + It is returned by :func:`get_params`. + model: + The neural model. + batch: + It is returned by :meth:`torch.utils.data.DataLoader.__iter__`. + Returns: + Return a dict, whose key may be "beam-search". + """ + + def preprocess( + messages, + tokenizer: transformers.PreTrainedTokenizer, + max_len: int = 128, + ) -> Dict: + """Preprocesses the data for supervised fine-tuning.""" + texts = [] + TEMPLATE = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if loop.last %}{{''}}{% else %}{{ '<|im_end|>\n' }}{% endif %}{% endfor %}" + for i, msg in enumerate(messages): + texts.append( + tokenizer.apply_chat_template( + msg, + tokenize=True, + add_generation_prompt=False, + chat_template=TEMPLATE, + padding="longest", + max_length=max_len, + truncation=True, + ) + ) + max_len_texts = max([len(text) for text in texts]) + if tokenizer.padding_side == "right": + texts = [ + text + [tokenizer.pad_token_id] * (max_len_texts - len(text)) + for text in texts + ] + else: + texts = [ + [tokenizer.pad_token_id] * (max_len_texts - len(text)) + text + for text in texts + ] + + input_ids = torch.tensor(texts, dtype=torch.int) + + attention_mask = input_ids.ne(tokenizer.pad_token_id) + + return input_ids, attention_mask + + dtype = torch.float32 + device = model.llm.device + + feature = batch["inputs"] + assert feature.ndim == 3 + feature = feature.to(device, dtype=dtype).transpose(1, 2) + if not params.remove_whisper_encoder_input_length_restriction: + T = 3000 + if feature.shape[2] < T: + feature = torch.cat( + [ + feature, + torch.zeros( + feature.shape[0], feature.shape[1], T - feature.shape[2] + ).to(device, dtype=dtype), + ], + 2, + ) + + supervisions = batch["supervisions"] + feature_len = supervisions["num_frames"] + feature_len = feature_len.to(device, dtype=dtype) + + messages = [ + [ + {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}请转写音频为文字"}, + {"role": "assistant", "content": ""}, + ] + ] * len(feature) + + input_ids, attention_mask = preprocess(messages, tokenizer, max_len=128) + + generated_ids = model.decode( + feature, input_ids.to(device, dtype=torch.long), attention_mask.to(device) + ) + hyps = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) + + print(hyps) + print(supervisions) + + return {"beam-search": hyps} + + +def decode_dataset( + dl: torch.utils.data.DataLoader, + params: AttributeDict, + model: nn.Module, + tokenizer: AutoTokenizer, +) -> Dict[str, List[Tuple[str, List[str], List[str]]]]: + """Decode dataset. + + Args: + dl: + The dataloader. + params: + It is returned by :func:`get_params`. + model: + The neural model. + Returns: + Return a dict, whose key may be "beam-search". + """ + + def normalize_text_alimeeting(text: str, normalize: str = "m2met") -> str: + """ + Text normalization similar to M2MeT challenge baseline. + See: https://github.com/yufan-aslp/AliMeeting/blob/main/asr/local/text_normalize.pl + """ + if normalize == "none": + return text + elif normalize == "m2met": + import re + + text = text.replace(" ", "") + text = text.replace("", "") + text = text.replace("<%>", "") + text = text.replace("<->", "") + text = text.replace("<$>", "") + text = text.replace("<#>", "") + text = text.replace("<_>", "") + text = text.replace("", "") + text = text.replace("`", "") + text = text.replace("&", "") + text = text.replace(",", "") + if re.search("[a-zA-Z]", text): + text = text.upper() + text = text.replace("A", "A") + text = text.replace("a", "A") + text = text.replace("b", "B") + text = text.replace("c", "C") + text = text.replace("k", "K") + text = text.replace("t", "T") + text = text.replace(",", "") + text = text.replace("丶", "") + text = text.replace("。", "") + text = text.replace("、", "") + text = text.replace("?", "") + return text + + results = [] + + num_cuts = 0 + + try: + num_batches = len(dl) + except TypeError: + num_batches = "?" + + results = defaultdict(list) + for batch_idx, batch in enumerate(dl): + texts = batch["supervisions"]["text"] + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] + + hyps_dict = decode_one_batch( + params=params, + model=model, + batch=batch, + tokenizer=tokenizer, + ) + + for lm_scale, hyps in hyps_dict.items(): + this_batch = [] + assert len(hyps) == len(texts) + for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): + ref_text = normalize_text_alimeeting(ref_text) + ref_words = ref_text.split() + print(f"ref: {ref_text}") + print(f"hyp: {''.join(hyp_words)}") + this_batch.append((cut_id, ref_words, hyp_words)) + + results[lm_scale].extend(this_batch) + + num_cuts += len(batch["supervisions"]["text"]) + + if batch_idx % 100 == 0: + batch_str = f"{batch_idx}/{num_batches}" + + logging.info(f"batch {batch_str}, cuts processed until now is {num_cuts}") + return results + + +def save_results( + params: AttributeDict, + test_set_name: str, + results_dict: Dict[str, List[Tuple[str, List[str], List[str]]]], +): + + enable_log = True + test_set_wers = dict() + for key, results in results_dict.items(): + recog_path = ( + params.exp_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + ) + results = sorted(results) + store_transcripts(filename=recog_path, texts=results) + if enable_log: + logging.info(f"The transcripts are stored in {recog_path}") + + # The following prints out WERs, per-word error statistics and aligned + # ref/hyp pairs. + errs_filename = ( + params.exp_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + ) + # we compute CER for aishell dataset. + results_char = [] + for res in results: + results_char.append((res[0], list("".join(res[1])), list("".join(res[2])))) + with open(errs_filename, "w") as f: + wer = write_error_stats( + f, f"{test_set_name}-{key}", results_char, enable_log=enable_log + ) + test_set_wers[key] = wer + + if enable_log: + logging.info("Wrote detailed error stats to {}".format(errs_filename)) + + test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) + errs_info = params.exp_dir / f"cer-summary-{test_set_name}-{params.suffix}.txt" + with open(errs_info, "w") as f: + print("settings\tCER", file=f) + for key, val in test_set_wers: + print("{}\t{}".format(key, val), file=f) + + s = "\nFor {}, CER of different settings are:\n".format(test_set_name) + note = "\tbest for {}".format(test_set_name) + for key, val in test_set_wers: + s += "{}\t{}{}\n".format(key, val, note) + note = "" + logging.info(s) + + +@torch.no_grad() +def main(): + parser = get_parser() + AsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + params = get_params() + params.update(vars(args)) + params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + setup_logger( + f"{params.exp_dir}/log-{params.method}-beam{params.beam_size}/log-decode-{params.suffix}" + ) + + logging.info("Decoding started") + logging.info(params) + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda") + + logging.info(f"device: {device}") + + if params.remove_whisper_encoder_input_length_restriction: + replace_whisper_encoder_forward() + + whisper_model = whisper.load_model(params.speech_encoder_path_or_name, "cpu") + speech_encoder = whisper_model.encoder + speech_encoder_dim = whisper_model.dims.n_audio_state + tokenizer = AutoTokenizer.from_pretrained(params.llm_path_or_name) + + if params.use_flash_attn: + attn_implementation = "flash_attention_2" + # torch_dtype=torch.bfloat16 FIX ME + torch_dtype = torch.float16 + tokenizer.padding_side = "left" + + else: + attn_implementation = "eager" + torch_dtype = torch.float16 + tokenizer.padding_side = "right" + + llm = AutoModelForCausalLM.from_pretrained( + params.llm_path_or_name, + attn_implementation=attn_implementation, + torch_dtype=torch_dtype, + ) + if params.use_lora: + lora_config = LoraConfig( + r=64, + lora_alpha=16, + target_modules=[ + "q_proj", + "k_proj", + "v_proj", + "o_proj", + "up_proj", + "gate_proj", + "down_proj", + ], + task_type="CAUSAL_LM", + ) + llm = get_peft_model(llm, lora_config) + llm.print_trainable_parameters() + + special_tokens_dict = {"additional_special_tokens": [DEFAULT_SPEECH_TOKEN]} + tokenizer.add_special_tokens(special_tokens_dict) + llm.config.pad_token_id = tokenizer.convert_tokens_to_ids("<|endoftext|>") + llm.config.bos_token_id = tokenizer.convert_tokens_to_ids("<|im_start|>") + llm.config.eos_token_id = tokenizer.convert_tokens_to_ids("<|im_end|>") + + llm.config.default_speech_token_id = tokenizer.convert_tokens_to_ids( + DEFAULT_SPEECH_TOKEN + ) + + encoder_projector = EncoderProjector( + speech_encoder_dim, llm.config.hidden_size, params.encoder_projector_ds_rate + ) + + model = SPEECH_LLM( + speech_encoder, + llm, + encoder_projector, + ) + + if params.avg > 1: + start = params.epoch - params.avg + 1 + assert start >= 1, start + checkpoint = torch.load( + f"{params.exp_dir}/epoch-{params.epoch}.pt", map_location="cpu" + ) + assert "model" not in checkpoint + # deepspeed converted checkpoint only contains model state_dict + filenames = [ + f"{params.exp_dir}/epoch-{epoch}.pt" + for epoch in range(start, params.epoch + 1) + ] + avg_checkpoint = average_checkpoints(filenames) + model.load_state_dict(avg_checkpoint, strict=False) + + filename = f"{params.exp_dir}/epoch-{params.epoch}-avg-{params.avg}.pt" + torch.save(avg_checkpoint, filename) + else: + checkpoint = torch.load( + f"{params.exp_dir}/epoch-{params.epoch}.pt", map_location="cpu" + ) + model.load_state_dict(checkpoint, strict=False) + + model.to(device) + model.eval() + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + # we need cut ids to display recognition results. + args.return_cuts = True + + data_module = AsrDataModule(args) + # data_module = MultiDataset(args.manifest_dir) + + def remove_long_utt(c: Cut): + # Keep only utterances with duration in 30 seconds + # + if c.duration > 30.0: + logging.warning( + f"Exclude cut with ID {c.id} from training. Duration: {c.duration}" + ) + return False + return True + + # if params.dataset == "aishell": + # test_sets_cuts = data_module.aishell_test_cuts() + # elif params.dataset == "speechio": + # test_sets_cuts = data_module.speechio_test_cuts() + # elif params.dataset == "wenetspeech_test_meeting": + # test_sets_cuts = data_module.wenetspeech_test_meeting_cuts() + # else: + test_sets_cuts = data_module.test_cuts() + + test_sets = test_sets_cuts.keys() + test_dls = [ + data_module.test_dataloaders(test_sets_cuts[cuts_name].filter(remove_long_utt)) + for cuts_name in test_sets + ] + + for test_set, test_dl in zip(test_sets, test_dls): + results_dict = decode_dataset( + dl=test_dl, + params=params, + model=model, + tokenizer=tokenizer, + ) + + save_results(params=params, test_set_name=test_set, results_dict=results_dict) + + logging.info("Done!") + + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) + +if __name__ == "__main__": + main() diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/label_smoothing.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/label_smoothing.py new file mode 120000 index 000000000..e9d239fff --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/label_smoothing.py @@ -0,0 +1 @@ +../../../librispeech/ASR/conformer_ctc/label_smoothing.py \ No newline at end of file diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py new file mode 100644 index 000000000..829ef4e2d --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py @@ -0,0 +1,285 @@ +import torch +from torch import nn +from transformers.trainer_pt_utils import LabelSmoother + +IGNORE_TOKEN_ID = LabelSmoother.ignore_index + + +class EncoderProjector(nn.Module): + """ + The encoder projector module. It is used to project the encoder outputs to the same dimension as the language model. + Modified from https://github.com/X-LANCE/SLAM-LLM/blob/main/src/slam_llm/models/projector.py. + Args: + encoder_dim (:obj:`int`): The dimension of the encoder outputs. + llm_dim (:obj:`int`): The dimension of the language model. + downsample_rate (:obj:`int`, `optional`, defaults to 5): The downsample rate to use. + """ + + def __init__(self, encoder_dim, llm_dim, downsample_rate=5): + super().__init__() + self.downsample_rate = downsample_rate + self.linear1 = nn.Linear(encoder_dim * self.downsample_rate, llm_dim) + self.relu = nn.ReLU() + self.linear2 = nn.Linear(llm_dim, llm_dim) + + def forward(self, x): + + batch_size, seq_len, feat_dim = x.size() + num_frames_to_discard = seq_len % self.downsample_rate + if num_frames_to_discard > 0: + x = x[:, :-num_frames_to_discard, :] + seq_len = x.size(1) + + x = x.contiguous() + x = x.view( + batch_size, seq_len // self.downsample_rate, feat_dim * self.downsample_rate + ) + + x = self.linear1(x) + x = self.relu(x) + x = self.linear2(x) + return x + + +class SPEECH_LLM(nn.Module): + """ + The Speech-to-Text model. It consists of an encoder, a language model and an encoder projector. + The encoder is used to extract speech features from the input speech signal. + The encoder projector is used to project the encoder outputs to the same dimension as the language model. + The language model is used to generate the text from the speech features. + Args: + encoder (:obj:`nn.Module`): The encoder module. + llm (:obj:`nn.Module`): The language model module. + encoder_projector (:obj:`nn.Module`): The encoder projector module. + """ + + def __init__( + self, + encoder: nn.Module, + llm: nn.Module, + encoder_projector: nn.Module, + ): + super().__init__() + self.encoder = encoder + self.llm = llm + self.encoder_projector = encoder_projector + + def _merge_input_ids_with_speech_features( + self, speech_features, inputs_embeds, input_ids, attention_mask, labels=None + ): + """ + Merge the speech features with the input_ids and attention_mask. This is done by replacing the speech tokens + with the speech features and padding the input_ids to the maximum length of the speech features. + Modified from https://github.com/huggingface/transformers/blob/main/src/transformers/models/llava/modeling_llava.py#L277. + Args: + speech_features (:obj:`torch.Tensor`): The speech features to merge with the input_ids. + inputs_embeds (:obj:`torch.Tensor`): The embeddings of the input_ids. + input_ids (:obj:`torch.Tensor`): The input ids to merge. + attention_mask (:obj:`torch.Tensor`): The attention mask to merge. + labels (:obj:`torch.Tensor`, `optional`): The labels to merge. + Returns: + :obj:`Tuple(torch.Tensor)`: The merged embeddings, attention mask, labels and position ids. + """ + num_speechs, speech_len, embed_dim = speech_features.shape + batch_size, sequence_length = input_ids.shape + left_padding = not torch.sum( + input_ids[:, -1] == torch.tensor(self.llm.config.pad_token_id) + ) + # 1. Create a mask to know where special speech tokens are + special_speech_token_mask = input_ids == self.llm.config.default_speech_token_id + num_special_speech_tokens = torch.sum(special_speech_token_mask, dim=-1) + # Compute the maximum embed dimension + max_embed_dim = ( + num_special_speech_tokens.max() * (speech_len - 1) + ) + sequence_length + batch_indices, non_speech_indices = torch.where( + input_ids != self.llm.config.default_speech_token_id + ) + + # 2. Compute the positions where text should be written + # Calculate new positions for text tokens in merged speech-text sequence. + # `special_speech_token_mask` identifies speech tokens. Each speech token will be replaced by `nb_text_tokens_per_speechs - 1` text tokens. + # `torch.cumsum` computes how each speech token shifts subsequent text token positions. + # - 1 to adjust for zero-based indexing, as `cumsum` inherently increases indices by one. + new_token_positions = ( + torch.cumsum((special_speech_token_mask * (speech_len - 1) + 1), -1) - 1 + ) + nb_speech_pad = max_embed_dim - 1 - new_token_positions[:, -1] + if left_padding: + new_token_positions += nb_speech_pad[:, None] # offset for left padding + text_to_overwrite = new_token_positions[batch_indices, non_speech_indices] + + # 3. Create the full embedding, already padded to the maximum position + final_embedding = torch.zeros( + batch_size, + max_embed_dim, + embed_dim, + dtype=inputs_embeds.dtype, + device=inputs_embeds.device, + ) + final_attention_mask = torch.zeros( + batch_size, + max_embed_dim, + dtype=attention_mask.dtype, + device=inputs_embeds.device, + ) + if labels is not None: + final_labels = torch.full( + (batch_size, max_embed_dim), + IGNORE_TOKEN_ID, + dtype=input_ids.dtype, + device=input_ids.device, + ) + # In case the Vision model or the Language model has been offloaded to CPU, we need to manually + # set the corresponding tensors into their correct target device. + target_device = inputs_embeds.device + batch_indices, non_speech_indices, text_to_overwrite = ( + batch_indices.to(target_device), + non_speech_indices.to(target_device), + text_to_overwrite.to(target_device), + ) + attention_mask = attention_mask.to(target_device) + + # 4. Fill the embeddings based on the mask. If we have ["hey" "", "how", "are"] + # we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the speech features + final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[ + batch_indices, non_speech_indices + ] + final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[ + batch_indices, non_speech_indices + ] + if labels is not None: + final_labels[batch_indices, text_to_overwrite] = labels[ + batch_indices, non_speech_indices + ] + + # 5. Fill the embeddings corresponding to the speechs. Anything that is not `text_positions` needs filling (#29835) + speech_to_overwrite = torch.full( + (batch_size, max_embed_dim), + True, + dtype=torch.bool, + device=inputs_embeds.device, + ) + speech_to_overwrite[batch_indices, text_to_overwrite] = False + speech_to_overwrite &= speech_to_overwrite.cumsum(-1) - 1 >= nb_speech_pad[ + :, None + ].to(target_device) + + if speech_to_overwrite.sum() != speech_features.shape[:-1].numel(): + raise ValueError( + f"The input provided to the model are wrong. The number of speech tokens is {torch.sum(special_speech_token_mask)} while" + f" the number of speech given to the model is {num_speechs}. This prevents correct indexing and breaks batch generation." + ) + + final_embedding[speech_to_overwrite] = ( + speech_features.contiguous().reshape(-1, embed_dim).to(target_device) + ) + final_attention_mask |= speech_to_overwrite + position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_( + (final_attention_mask == 0), 1 + ) + + # 6. Mask out the embedding at padding positions, as we later use the past_key_value value to determine the non-attended tokens. + batch_indices, pad_indices = torch.where( + input_ids == self.llm.config.pad_token_id + ) + indices_to_mask = new_token_positions[batch_indices, pad_indices] + + final_embedding[batch_indices, indices_to_mask] = 0 + + if labels is None: + final_labels = None + + return final_embedding, final_attention_mask, final_labels, position_ids + + def forward( + self, + fbank: torch.Tensor = None, + input_ids: torch.LongTensor = None, + attention_mask: torch.Tensor = None, + labels: torch.LongTensor = None, + ): + encoder_outs = self.encoder(fbank) + + speech_features = self.encoder_projector(encoder_outs) + + inputs_embeds = self.llm.get_input_embeddings()(input_ids) + + ( + inputs_embeds, + attention_mask, + labels, + _, + ) = self._merge_input_ids_with_speech_features( + speech_features, inputs_embeds, input_ids, attention_mask, labels + ) + + model_outputs = self.llm( + inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels + ) + + with torch.no_grad(): + preds = torch.argmax(model_outputs.logits, -1) + acc = compute_accuracy( + preds.detach()[:, :-1], + labels.detach()[:, 1:], + ignore_label=IGNORE_TOKEN_ID, + ) + return model_outputs, acc + + def decode( + self, + fbank: torch.Tensor = None, + input_ids: torch.LongTensor = None, + attention_mask: torch.Tensor = None, + **kwargs, + ): + + encoder_outs = self.encoder(fbank) + speech_features = self.encoder_projector(encoder_outs) + speech_features = speech_features.to(torch.float16) + inputs_embeds = self.llm.get_input_embeddings()(input_ids) + ( + inputs_embeds, + attention_mask, + _, + position_ids, + ) = self._merge_input_ids_with_speech_features( + speech_features, inputs_embeds, input_ids, attention_mask + ) + generated_ids = self.llm.generate( + inputs_embeds=inputs_embeds, + max_new_tokens=kwargs.get("max_new_tokens", 200), + num_beams=kwargs.get("num_beams", 1), + do_sample=kwargs.get("do_sample", False), + min_length=kwargs.get("min_length", 1), + top_p=kwargs.get("top_p", 1.0), + repetition_penalty=kwargs.get("repetition_penalty", 1.0), + length_penalty=kwargs.get("length_penalty", 1.0), + temperature=kwargs.get("temperature", 1.0), + bos_token_id=self.llm.config.bos_token_id, + eos_token_id=self.llm.config.eos_token_id, + pad_token_id=self.llm.config.pad_token_id, + ) + + return generated_ids + + +def compute_accuracy(pad_outputs, pad_targets, ignore_label): + """Calculate accuracy. + Copied from https://github.com/X-LANCE/SLAM-LLM/blob/main/src/slam_llm/utils/metric.py + Args: + pad_outputs (LongTensor): Prediction tensors (B, Lmax). + pad_targets (LongTensor): Target label tensors (B, Lmax). + ignore_label (int): Ignore label id. + + Returns: + float: Accuracy value (0.0 - 1.0). + + """ + mask = pad_targets != ignore_label + numerator = torch.sum( + pad_outputs.masked_select(mask) == pad_targets.masked_select(mask) + ) + denominator = torch.sum(mask) + return numerator.float() / denominator.float() diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/speech_dataset.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/speech_dataset.py new file mode 100644 index 000000000..d0a77fd0e --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/speech_dataset.py @@ -0,0 +1,176 @@ +from typing import Callable, Dict, List, Union + +import torch +from torch.utils.data.dataloader import DataLoader, default_collate + +from lhotse import validate +from lhotse.cut import CutSet +from lhotse.dataset.input_strategies import BatchIO, PrecomputedFeatures +from lhotse.utils import compute_num_frames, ifnone +from lhotse.workarounds import Hdf5MemoryIssueFix + + +class K2SpeechRecognitionDataset(torch.utils.data.Dataset): + """ + The PyTorch Dataset for the speech recognition task using k2 library. + + This dataset expects to be queried with lists of cut IDs, + for which it loads features and automatically collates/batches them. + + To use it with a PyTorch DataLoader, set ``batch_size=None`` + and provide a :class:`SimpleCutSampler` sampler. + + Each item in this dataset is a dict of: + + .. code-block:: + + { + 'inputs': float tensor with shape determined by :attr:`input_strategy`: + - single-channel: + - features: (B, T, F) + - audio: (B, T) + - multi-channel: currently not supported + 'supervisions': [ + { + 'sequence_idx': Tensor[int] of shape (S,) + 'text': List[str] of len S + + # For feature input strategies + 'start_frame': Tensor[int] of shape (S,) + 'num_frames': Tensor[int] of shape (S,) + + # For audio input strategies + 'start_sample': Tensor[int] of shape (S,) + 'num_samples': Tensor[int] of shape (S,) + + # Optionally, when return_cuts=True + 'cut': List[AnyCut] of len S + } + ] + } + + Dimension symbols legend: + * ``B`` - batch size (number of Cuts) + * ``S`` - number of supervision segments (greater or equal to B, as each Cut may have multiple supervisions) + * ``T`` - number of frames of the longest Cut + * ``F`` - number of features + + The 'sequence_idx' field is the index of the Cut used to create the example in the Dataset. + """ + + def __init__( + self, + return_cuts: bool = False, + cut_transforms: List[Callable[[CutSet], CutSet]] = None, + input_transforms: List[Callable[[torch.Tensor], torch.Tensor]] = None, + input_strategy: BatchIO = PrecomputedFeatures(), + ): + """ + k2 ASR IterableDataset constructor. + + :param return_cuts: When ``True``, will additionally return a "cut" field in each batch with the Cut + objects used to create that batch. + :param cut_transforms: A list of transforms to be applied on each sampled batch, + before converting cuts to an input representation (audio/features). + Examples: cut concatenation, noise cuts mixing, etc. + :param input_transforms: A list of transforms to be applied on each sampled batch, + after the cuts are converted to audio/features. + Examples: normalization, SpecAugment, etc. + :param input_strategy: Converts cuts into a collated batch of audio/features. + By default, reads pre-computed features from disk. + """ + super().__init__() + # Initialize the fields + self.return_cuts = return_cuts + self.cut_transforms = ifnone(cut_transforms, []) + self.input_transforms = ifnone(input_transforms, []) + self.input_strategy = input_strategy + + # This attribute is a workaround to constantly growing HDF5 memory + # throughout the epoch. It regularly closes open file handles to + # reset the internal HDF5 caches. + self.hdf5_fix = Hdf5MemoryIssueFix(reset_interval=100) + + def __getitem__(self, cuts: CutSet) -> Dict[str, Union[torch.Tensor, List[str]]]: + """ + Return a new batch, with the batch size automatically determined using the constraints + of max_duration and max_cuts. + """ + validate_for_asr(cuts) + + self.hdf5_fix.update() + + # Sort the cuts by duration so that the first one determines the batch time dimensions. + cuts = cuts.sort_by_duration(ascending=False) + + # Optional CutSet transforms - e.g. padding, or speed perturbation that adjusts + # the supervision boundaries. + for tnfm in self.cut_transforms: + cuts = tnfm(cuts) + + # Sort the cuts again after transforms + cuts = cuts.sort_by_duration(ascending=False) + + # Get a tensor with batched feature matrices, shape (B, T, F) + # Collation performs auto-padding, if necessary. + input_tpl = self.input_strategy(cuts) + if len(input_tpl) == 3: + # An input strategy with fault tolerant audio reading mode. + # "cuts" may be a subset of the original "cuts" variable, + # that only has cuts for which we succesfully read the audio. + inputs, _, cuts = input_tpl + else: + inputs, _ = input_tpl + + # Get a dict of tensors that encode the positional information about supervisions + # in the batch of feature matrices. The tensors are named "sequence_idx", + # "start_frame/sample" and "num_frames/samples". + supervision_intervals = self.input_strategy.supervision_intervals(cuts) + + # Apply all available transforms on the inputs, i.e. either audio or features. + # This could be feature extraction, global MVN, SpecAugment, etc. + segments = torch.stack(list(supervision_intervals.values()), dim=1) + for tnfm in self.input_transforms: + inputs = tnfm(inputs, supervision_segments=segments) + + batch = { + "inputs": inputs, + "supervisions": default_collate( + [ + { + "text": supervision.text, + } + for sequence_idx, cut in enumerate(cuts) + for supervision in cut.supervisions + ] + ), + } + # Update the 'supervisions' field with sequence_idx and start/num frames/samples + batch["supervisions"].update(supervision_intervals) + if self.return_cuts: + batch["supervisions"]["cut"] = [ + cut for cut in cuts for sup in cut.supervisions + ] + + return batch + + +def validate_for_asr(cuts: CutSet) -> None: + validate(cuts) + tol = 2e-3 # 1ms + for cut in cuts: + for supervision in cut.supervisions: + assert supervision.start >= -tol, ( + f"Supervisions starting before the cut are not supported for ASR" + f" (sup id: {supervision.id}, cut id: {cut.id})" + ) + + # Supervision start time is relative to Cut ... + # https://lhotse.readthedocs.io/en/v0.10_e/cuts.html + # + # 'supervision.end' is end of supervision inside the Cut + assert supervision.end <= cut.duration + tol, ( + f"Supervisions ending after the cut " + f"are not supported for ASR" + f" (sup id: {supervision.id}, cut id: {cut.id})" + ) diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py new file mode 100755 index 000000000..d9489b1ae --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py @@ -0,0 +1,872 @@ +#!/usr/bin/env python3 +# Copyright 2023 Xiaomi Corp. (authors: Xiaoyu Yang) +# 2024 Yuekai Zhang +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +# fine-tuning with whisper and Qwen2 +pip install huggingface_hub['cli'] +mkdir -p models/whisper models/qwen + +# For aishell fine-tuned whisper model +huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_aishell_whisper exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt +# For multi-hans fine-tuned whisper model +# huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_multi-hans-zh_whisper v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt + +# huggingface-clie download --local-dir models/qwen Qwen/Qwen2-7B-Instruct +huggingface-clie download --local-dir models/qwen Qwen/Qwen2-1.5B-Instruct + +torchrun --nproc_per_node 8 ./whisper_llm_zh/train.py \ + --max-duration 200 \ + --exp-dir ./whisper_llm_zh/exp_test \ + --speech-encoder-path-or-name models/whisper/exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt \ + --llm-path-or-name Qwen/Qwen2-1.5B-Instruct \ + --manifest-dir data/fbank \ + --deepspeed \ + --deepspeed_config ./whisper_llm_zh/ds_config_zero1.json \ + --use-flash-attn True \ + --use-lora True --unfreeze-llm True +""" + +import argparse +import copy +import logging +import os +import random +import warnings +from pathlib import Path +from shutil import copyfile +from typing import Any, Dict, List, Optional, Tuple, Union + +import deepspeed +import k2 +import torch +import torch.multiprocessing as mp +import torch.nn as nn +import transformers +import whisper +from data_module import AsrDataModule +from deepspeed.utils.zero_to_fp32 import convert_zero_checkpoint_to_fp32_state_dict +from label_smoothing import LabelSmoothingLoss +from lhotse import CutSet, load_manifest +from lhotse.cut import Cut +from lhotse.dataset.sampling.base import CutSampler +from lhotse.utils import fix_random_seed +from model import IGNORE_TOKEN_ID, SPEECH_LLM, EncoderProjector +from multi_dataset import MultiDataset +from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training +from torch import Tensor +from torch.utils.tensorboard import SummaryWriter +from transformers import AutoModelForCausalLM, AutoTokenizer +from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward + +from icefall import diagnostics +from icefall.dist import get_rank, get_world_size +from icefall.env import get_env_info +from icefall.utils import ( + AttributeDict, + MetricsTracker, + filter_uneven_sized_batch, + setup_logger, + str2bool, +) + +DEFAULT_SPEECH_TOKEN = "" + + +def set_batch_count(model: nn.Module, batch_count: float) -> None: + for module in model.modules(): + if hasattr(module, "batch_count"): + module.batch_count = batch_count + + +def add_model_arguments(parser: argparse.ArgumentParser): + parser.add_argument( + "--llm-path-or-name", + type=str, + default="/workspace/asr/Qwen1.5-0.5B-Chat", + help="Path or name of the large language model.", + ) + + parser.add_argument( + "--speech-encoder-path-or-name", + type=str, + default="whisper-large-v2", + help="Path or name of the speech encoder.", + ) + + parser.add_argument( + "--encoder-projector-ds-rate", + type=int, + default=8, + help="Downsample rate for the encoder projector.", + ) + parser.add_argument( + "--use-flash-attn", + type=str2bool, + default=True, + help="Whether to use flash attention.", + ) + + parser.add_argument( + "--use-lora", + type=str2bool, + default=False, + help="Whether to use lora to fine-tune llm.", + ) + + parser.add_argument( + "--unfreeze-llm", + type=str2bool, + default=False, + help="Whether to unfreeze llm during training.", + ) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--tensorboard", + type=str2bool, + default=True, + help="Should various information be logged in tensorboard.", + ) + + parser.add_argument( + "--num-epochs", + type=int, + default=10, + help="Number of epochs to train.", + ) + + parser.add_argument( + "--start-epoch", + type=int, + default=1, + help="""Resume training from this epoch. It should be positive. + If larger than 1, it will load checkpoint from + exp-dir/epoch-{start_epoch-1}.pt + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="whisper_qwen/exp", + help="""The experiment dir. + It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--pretrained-model-path", + type=str, + default=None, + help="""The path to the pretrained model if it is not None. Training will + start from this model. e.g. ./wenetspeech/ASR/whisper/exp_large_v2/epoch-4-avg-3.pt + """, + ) + + parser.add_argument( + "--sampler-state-dict-path", + type=str, + default=None, + help="""The path to the sampler state dict if it is not None. Training will start from this sampler state dict. + """, + ) + + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + + parser.add_argument( + "--use-fp16", + type=str2bool, + default=True, + help="Whether to use half precision training.", + ) + + parser.add_argument( + "--use-aishell", + type=str2bool, + default=True, + help="Whether to only use aishell1 dataset for training.", + ) + + parser = deepspeed.add_config_arguments(parser) + add_model_arguments(parser) + + return parser + + +def get_params() -> AttributeDict: + """Return a dict containing training parameters. + + All training related parameters that are not passed from the commandline + are saved in the variable `params`. + + Commandline options are merged into `params` after they are parsed, so + you can also access them via `params`. + + Explanation of options saved in `params`: + + - frame_shift_ms: The frame shift in milliseconds. + - allowed_excess_duration_ratio: The allowed excess duration ratio. + - best_train_loss: The best training loss so far. + - best_valid_loss: The best validation loss so far. + - best_train_epoch: The epoch where the best training loss is achieved. + - best_valid_epoch: The epoch where the best validation loss is achieved. + - batch_idx_train: The batch index of the current batch. + - log_interval: Log training stats every `log_interval` batches. + - reset_interval: Reset the stats every `reset_interval` batches. + - valid_interval: Run validation every `valid_interval` batches. + - env_info: The environment information. + """ + params = AttributeDict( + { + "allowed_excess_duration_ratio": 0.1, + "subsampling_factor": 2, + "frame_shift_ms": 10, + "best_train_loss": float("inf"), + "best_valid_loss": float("inf"), + "best_train_epoch": -1, + "best_valid_epoch": -1, + "batch_idx_train": 0, + "log_interval": 50, + "reset_interval": 200, + "valid_interval": 5000, + "env_info": get_env_info(), + } + ) + + return params + + +def compute_loss( + params: AttributeDict, + tokenizer: AutoTokenizer, + model: nn.Module, + batch: dict, + is_training: bool, +) -> Tuple[Tensor, MetricsTracker]: + """ + Compute the loss for the given batch. + Args: + params: + It is returned by :func:`get_params`. + tokenizer: + The tokenizer used to encode the text. + model: + The model for training. + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + is_training: + Whether it is training. + Returns: + Return a tuple of two elements. The first element is the loss tensor. + """ + # For the uneven-sized batch, the total duration after padding would possibly + # cause OOM. Hence, for each batch, which is sorted descendingly by length, + # we simply drop the last few shortest samples, so that the retained total frames + # (after padding) would not exceed `allowed_max_frames`: + # `allowed_max_frames = int(max_frames * (1.0 + allowed_excess_duration_ratio))`, + # where `max_frames = max_duration * 1000 // frame_shift_ms`. + # We set allowed_excess_duration_ratio=0.1. + + def preprocess( + messages, + tokenizer: transformers.PreTrainedTokenizer, + max_len: int, + ) -> Dict: + """Preprocesses the data for supervised fine-tuning.""" + texts = [] + TEMPLATE = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if loop.last %}{{ '<|im_end|>'}}{% else %}{{ '<|im_end|>\n' }}{% endif %}{% endfor %}" + for i, msg in enumerate(messages): + texts.append( + tokenizer.apply_chat_template( + msg, + tokenize=True, + chat_template=TEMPLATE, + add_generation_prompt=False, + padding="longest", # FIX me change padding to longest + max_length=max_len, + truncation=True, + ) + ) + # padding texts to the same length, texts is a list of list, padding with tokenzier.pad_token_id + max_len_texts = max([len(text) for text in texts]) + if tokenizer.padding_side == "right": + texts = [ + text + [tokenizer.pad_token_id] * (max_len_texts - len(text)) + for text in texts + ] + else: + texts = [ + [tokenizer.pad_token_id] * (max_len_texts - len(text)) + text + for text in texts + ] + input_ids = torch.tensor(texts, dtype=torch.int) + # response = tokenizer.batch_decode(input_ids, skip_special_tokens=True)[0] + target_ids = input_ids.clone() + target_ids[target_ids == tokenizer.pad_token_id] = IGNORE_TOKEN_ID + # mask all tokens before token_id 151646 with IGNORE_TOKEN_ID + # first get the indices of the tokens + mask_prompt = True + if mask_prompt: + mask_indices = torch.where( + input_ids == tokenizer.convert_tokens_to_ids("assistant") + ) + for i in range(mask_indices[0].size(0)): + row = mask_indices[0][i] + col = mask_indices[1][i] + # + 2 to skip: 'assistant', '\n' + target_ids[row, : col + 2] = IGNORE_TOKEN_ID + + attention_mask = input_ids.ne(tokenizer.pad_token_id) + + return input_ids, attention_mask, target_ids + + def normalize_text_alimeeting(text: str, normalize: str = "m2met") -> str: + """ + Text normalization similar to M2MeT challenge baseline. + See: https://github.com/yufan-aslp/AliMeeting/blob/main/asr/local/text_normalize.pl + """ + if normalize == "none": + return text + elif normalize == "m2met": + import re + + text = text.replace(" ", "") + text = text.replace("", "") + text = text.replace("<%>", "") + text = text.replace("<->", "") + text = text.replace("<$>", "") + text = text.replace("<#>", "") + text = text.replace("<_>", "") + text = text.replace("", "") + text = text.replace("`", "") + text = text.replace("&", "") + text = text.replace(",", "") + if re.search("[a-zA-Z]", text): + text = text.upper() + text = text.replace("A", "A") + text = text.replace("a", "A") + text = text.replace("b", "B") + text = text.replace("c", "C") + text = text.replace("k", "K") + text = text.replace("t", "T") + text = text.replace(",", "") + text = text.replace("丶", "") + text = text.replace("。", "") + text = text.replace("、", "") + text = text.replace("?", "") + return text + + max_frames = params.max_duration * 1000 // params.frame_shift_ms + allowed_max_frames = int(max_frames * (1.0 + params.allowed_excess_duration_ratio)) + batch = filter_uneven_sized_batch(batch, allowed_max_frames) + + device = next(model.parameters()).device + feature = batch["inputs"] + + assert feature.ndim == 3 + feature = feature.to(device) + feature = feature.transpose(1, 2) # (N, C, T) + + batch_idx_train = params.batch_idx_train + supervisions = batch["supervisions"] + texts = batch["supervisions"]["text"] + # remove spaces in texts + texts = [normalize_text_alimeeting(text) for text in texts] + + messages = [] + for i, text in enumerate(texts): + message = [ + {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}请转写音频为文字"}, + {"role": "assistant", "content": text}, + ] + messages.append(message) + + input_ids, attention_mask, target_ids = preprocess(messages, tokenizer, max_len=128) + + target_ids = target_ids.type(torch.LongTensor) + input_ids = input_ids.type(torch.LongTensor) + + with torch.set_grad_enabled(is_training): + model_outputs, acc = model( + fbank=feature, + input_ids=input_ids.to(device), + attention_mask=attention_mask.to(device), + labels=target_ids.to(device), + ) + loss = model_outputs.loss + assert loss.requires_grad == is_training + + info = MetricsTracker() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + feature_lens = supervisions["num_frames"] + info["frames"] = (feature_lens // params.subsampling_factor).sum().item() + + # Note: We use reduction=sum while computing the loss. + info["loss"] = loss.detach().cpu().item() + info["acc"] = ( + acc * info["frames"] + ) # WAR: to avoid normalization by the number of frames + + return loss, info + + +def compute_validation_loss( + params: AttributeDict, + tokenizer: whisper.tokenizer.Tokenizer, + model: nn.Module, + valid_dl: torch.utils.data.DataLoader, + world_size: int = 1, +) -> MetricsTracker: + """Run the validation process.""" + model.eval() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(valid_dl): + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, loss_info = compute_loss( + params=params, + tokenizer=tokenizer, + model=model, + batch=batch, + is_training=False, + ) + assert loss.requires_grad is False + tot_loss = tot_loss + loss_info + + if world_size > 1: + tot_loss.reduce(loss.device) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + if loss_value < params.best_valid_loss: + params.best_valid_epoch = params.cur_epoch + params.best_valid_loss = loss_value + + return tot_loss + + +def train_one_epoch( + params: AttributeDict, + tokenizer: AutoTokenizer, + model: nn.Module, + optimizer: torch.optim.Optimizer, + scheduler: torch.optim.lr_scheduler, + train_dl: torch.utils.data.DataLoader, + valid_dl: torch.utils.data.DataLoader, + tb_writer: Optional[SummaryWriter] = None, + world_size: int = 1, + rank: int = 0, +) -> None: + """Train the model for one epoch. + + The training loss from the mean of all frames is saved in + `params.train_loss`. It runs the validation process every + `params.valid_interval` batches. + + Args: + params: + It is returned by :func:`get_params`. + model: + The model for training. + optimizer: + The optimizer we are using. + scheduler: + The learning rate scheduler, we call step() every step. + train_dl: + Dataloader for the training dataset. + valid_dl: + Dataloader for the validation dataset. + scaler: + The scaler used for mix precision training. + model_avg: + The stored model averaged from the start of training. + tb_writer: + Writer to write log messages to tensorboard. + world_size: + Number of nodes in DDP training. If it is 1, DDP is disabled. + rank: + The rank of the node in DDP training. If no DDP is used, it should + be set to 0. + """ + model.encoder_projector.train() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(train_dl): + params.batch_idx_train += 1 + batch_size = len(batch["supervisions"]["text"]) + if batch_idx % params.valid_interval == 0 and not params.print_diagnostics: + logging.info("Computing validation loss") + valid_info = compute_validation_loss( + params=params, + tokenizer=tokenizer, + model=model, + valid_dl=valid_dl, + world_size=world_size, + ) + model.train() + logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") + logging.info( + f"Maximum memory allocated so far is {torch.cuda.max_memory_allocated()//1000000}MB" + ) + if tb_writer is not None: + valid_info.write_summary( + tb_writer, "train/valid_", params.batch_idx_train + ) + if batch_idx != 0: + model.save_checkpoint( + save_dir=params.exp_dir, + tag=f"epoch-{params.cur_epoch}-checkpoint-{batch_idx}", + client_state={}, + exclude_frozen_parameters=True, + ) + + if rank == 0: + convert_zero_checkpoint_to_fp32_state_dict( + params.exp_dir, + f"{params.exp_dir}/epoch-{params.cur_epoch}-checkpoint-{batch_idx}.pt", + tag=f"epoch-{params.cur_epoch}-checkpoint-{batch_idx}", + exclude_frozen_parameters=True, + ) + # save sampler state dict into checkpoint + sampler_state_dict = train_dl.sampler.state_dict() + torch.save( + sampler_state_dict, + f"{params.exp_dir}/epoch-{params.cur_epoch}-checkpoint-{batch_idx}-sampler.pt", + ) + os.system( + f"rm -rf {params.exp_dir}/epoch-{params.cur_epoch}-checkpoint-{batch_idx}" + ) + try: + with torch.cuda.amp.autocast(enabled=params.use_fp16): + loss, loss_info = compute_loss( + params=params, + tokenizer=tokenizer, + model=model, + batch=batch, + is_training=True, + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + + # deepspeed's backward() is different from torch's backward() + # in that it does not accept a loss tensor as input. + # It computes the loss internally. + model.backward(loss) + model.step() + + except: # noqa + display_and_save_batch(batch, params=params) + raise + + if batch_idx % params.log_interval == 0: + try: + cur_lr = scheduler.get_last_lr()[0] + except: # noqa + cur_lr = 0.0 + + logging.info( + f"Epoch {params.cur_epoch}, " + f"batch {batch_idx}, loss[{loss_info}], " + f"tot_loss[{tot_loss}], batch size: {batch_size}, " + f"lr: {cur_lr:.2e}, " + ) + + if tb_writer is not None: + tb_writer.add_scalar( + "train/learning_rate", cur_lr, params.batch_idx_train + ) + + loss_info.write_summary( + tb_writer, "train/current_", params.batch_idx_train + ) + tot_loss.write_summary(tb_writer, "train/tot_", params.batch_idx_train) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + params.train_loss = loss_value + if params.train_loss < params.best_train_loss: + params.best_train_epoch = params.cur_epoch + params.best_train_loss = params.train_loss + + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + + fix_random_seed(params.seed) + + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info(params) + + logging.info("About to create model") + + replace_whisper_encoder_forward() + whisper_model = whisper.load_model(params.speech_encoder_path_or_name, "cpu") + speech_encoder = whisper_model.encoder + speech_encoder_dim = whisper_model.dims.n_audio_state + for name, param in speech_encoder.named_parameters(): + param.requires_grad = False + speech_encoder.eval() + + tokenizer = AutoTokenizer.from_pretrained(params.llm_path_or_name) + if params.use_flash_attn: + attn_implementation = "flash_attention_2" + # torch_dtype=torch.bfloat16 FIX ME + torch_dtype = torch.float16 + tokenizer.padding_side = "left" + + else: + attn_implementation = "eager" + torch_dtype = torch.float16 + tokenizer.padding_side = "right" + + llm = AutoModelForCausalLM.from_pretrained( + params.llm_path_or_name, + attn_implementation=attn_implementation, + torch_dtype=torch_dtype, + ) + + if not params.unfreeze_llm: + for name, param in llm.named_parameters(): + param.requires_grad = False + llm.eval() + else: + if params.use_lora: + lora_config = LoraConfig( + r=64, + lora_alpha=16, + target_modules=[ + "q_proj", + "k_proj", + "v_proj", + "o_proj", + "up_proj", + "gate_proj", + "down_proj", + ], + lora_dropout=0.05, + task_type="CAUSAL_LM", + ) + llm = get_peft_model(llm, lora_config) + llm.print_trainable_parameters() + + special_tokens_dict = {"additional_special_tokens": [DEFAULT_SPEECH_TOKEN]} + tokenizer.add_special_tokens(special_tokens_dict) + llm.config.pad_token_id = tokenizer.pad_token_id + llm.config.default_speech_token_id = tokenizer.convert_tokens_to_ids( + DEFAULT_SPEECH_TOKEN + ) + + encoder_projector = EncoderProjector( + speech_encoder_dim, llm.config.hidden_size, params.encoder_projector_ds_rate + ) + + model = SPEECH_LLM( + speech_encoder, + llm, + encoder_projector, + ) + + if params.pretrained_model_path: + checkpoint = torch.load(params.pretrained_model_path, map_location="cpu") + missing_keys, unexpected_keys = model.load_state_dict(checkpoint, strict=False) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + logging.info("Trainable parameters (excluding model.eval modules):") + for name, param in model.named_parameters(): + if param.requires_grad: + logging.info(f"{name}: {param.shape}") + + if torch.cuda.is_available(): + device = torch.device("cuda", rank) + else: + device = torch.device("cpu") + logging.info(f"Device: {device}") + model.to(device) + + assert params.deepspeed and world_size > 1 + logging.info("Using DeepSpeed") + model, optimizer, _, scheduler = deepspeed.initialize( + args=params, model=model, model_parameters=model.parameters() + ) + + data_module = AsrDataModule(args) + multi_dataset = MultiDataset(args.manifest_dir) + + def remove_short_and_long_utt(c: Cut): + # Keep only utterances with duration between 1 second and 20 seconds + # + # Caution: There is a reason to select 20.0 here. Please see + # ../local/display_manifest_statistics.py + # + # You should use ../local/display_manifest_statistics.py to get + # an utterance duration distribution for your dataset to select + # the threshold + if c.duration < 1.0 or c.duration > 20.0: + # logging.warning( + # f"Exclude cut with ID {c.id} from training. Duration: {c.duration}" + # ) + return False + return True + + if params.use_aishell: + train_cuts = multi_dataset.aishell_train_cuts() + else: + train_cuts = multi_dataset.train_cuts() + + train_cuts = train_cuts.filter(remove_short_and_long_utt) + + sampler_state_dict = None + if params.sampler_state_dict_path: + sampler_state_dict = torch.load(params.sampler_state_dict_path) + sampler_state_dict["max_duration"] = params.max_duration + # TODO: load sampler state dict + train_dl = data_module.train_dataloaders( + train_cuts, sampler_state_dict=sampler_state_dict + ) + + if params.use_aishell: + valid_cuts = multi_dataset.aishell_dev_cuts() + else: + valid_cuts = multi_dataset.dev_cuts() + valid_dl = data_module.valid_dataloaders(valid_cuts) + + if args.tensorboard and rank == 0: + tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") + else: + tb_writer = None + + logging.info(f"start training from epoch {params.start_epoch}") + for epoch in range(params.start_epoch, params.num_epochs + 1): + + fix_random_seed(params.seed + epoch - 1) + train_dl.sampler.set_epoch(epoch - 1) + + if tb_writer is not None: + tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) + + params.cur_epoch = epoch + + train_one_epoch( + params=params, + tokenizer=tokenizer, + model=model, + optimizer=optimizer, + scheduler=scheduler, + train_dl=train_dl, + valid_dl=valid_dl, + tb_writer=tb_writer, + world_size=world_size, + rank=rank, + ) + + model.save_checkpoint( + save_dir=params.exp_dir, + tag=f"epoch-{params.cur_epoch}", + client_state={}, + exclude_frozen_parameters=True, + ) + if rank == 0: + convert_zero_checkpoint_to_fp32_state_dict( + params.exp_dir, + f"{params.exp_dir}/epoch-{params.cur_epoch}.pt", + tag=f"epoch-{params.cur_epoch}", + exclude_frozen_parameters=True, + ) + # save sampler state dict into checkpoint + sampler_state_dict = train_dl.sampler.state_dict() + torch.save( + sampler_state_dict, + f"{params.exp_dir}/epoch-{params.cur_epoch}-sampler.pt", + ) + + os.system(f"rm -rf {params.exp_dir}/epoch-{params.cur_epoch}") + + logging.info("Done!") + + +def display_and_save_batch( + batch: dict, + params: AttributeDict, +) -> None: + """Display the batch statistics and save the batch into disk. + + Args: + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + params: + Parameters for training. See :func:`get_params`. + """ + from lhotse.utils import uuid4 + + filename = f"{params.exp_dir}/batch-{uuid4()}.pt" + logging.info(f"Saving batch to {filename}") + torch.save(batch, filename) + + supervisions = batch["supervisions"] + features = batch["inputs"] + + logging.info(f"features shape: {features.shape}") + + +def main(): + parser = get_parser() + AsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + world_size = get_world_size() + rank = get_rank() + + torch.set_num_threads(1) + torch.set_num_interop_threads(1) + run(rank=rank, world_size=world_size, args=args) + + +if __name__ == "__main__": + main() diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/whisper_encoder_forward_monkey_patch.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/whisper_encoder_forward_monkey_patch.py new file mode 120000 index 000000000..2a7808921 --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/whisper_encoder_forward_monkey_patch.py @@ -0,0 +1 @@ +../../../aishell/ASR/whisper/whisper_encoder_forward_monkey_patch.py \ No newline at end of file From e6897b10fa5b79c9312515702cf2766f7cfc54eb Mon Sep 17 00:00:00 2001 From: root Date: Wed, 26 Feb 2025 07:08:34 +0000 Subject: [PATCH 02/57] make asr decode results align --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 2 +- .../SPEECH2SPEECH/slam_omni/data_module.py | 16 ++++++++-------- egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py | 14 +++++++------- egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py | 2 +- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index 87e7cd254..b61241974 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -40,6 +40,6 @@ if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then --epoch 999 --avg 1 \ --manifest-dir data/fbank \ --use-flash-attn True \ - --use-lora True + --use-lora False # --on-the-fly-feats True fi diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/data_module.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/data_module.py index 35d1e3494..a8b1a4746 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/data_module.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/data_module.py @@ -39,9 +39,9 @@ from lhotse.dataset.input_strategies import ( # noqa F401 For AudioSamples ) from lhotse.utils import fix_random_seed from torch.utils.data import DataLoader +from datasets import load_dataset from icefall.utils import str2bool - from speech_dataset import K2SpeechRecognitionDataset class _SeedWorkers: @@ -396,7 +396,7 @@ class AsrDataModule: def test_dataloaders(self, cuts: CutSet) -> DataLoader: logging.debug("About to create test dataset") test = K2SpeechRecognitionDataset( - input_strategy=OnTheFlyFeatures(WhisperFbank(WhisperFbankConfig(num_filters=80, device='cuda'))) + input_strategy=OnTheFlyFeatures(WhisperFbank(WhisperFbankConfig(num_filters=80, device='cpu'))) if self.args.on_the_fly_feats else eval(self.args.input_strategy)(), return_cuts=self.args.return_cuts, @@ -419,19 +419,19 @@ class AsrDataModule: def test_cuts(self) -> CutSet: logging.info("About to get test cuts") if self.args.on_the_fly_feats: - # dataset = load_dataset(args.huggingface_dataset_path_or_name, streaming=True, split=partition) + # dataset = load_dataset(self.args.huggingface_dataset_path_or_name, streaming=True, split=partition) i, num_digits = 0, 5 idx = f"{i}".zfill(num_digits) parquet_files = [ f"data/train-{idx}-of-01601.parquet", ] - parquet_files = [f"{args.huggingface_dataset_path_or_name}/{f}" for f in parquet_files] + parquet_files = [f"{self.args.huggingface_dataset_path_or_name}/{f}" for f in parquet_files] file_name = parquet_files[0] logging.info(f"Loading dataset from {file_name}") dataset = load_dataset('parquet', data_files=parquet_files, streaming=True, split='train') - cut_set = CutSet.from_huggingface_dataset(dataset, audio_key=args.audio_key, text_key=args.text_key) - if args.resample_to_16kHz: + cut_set = CutSet.from_huggingface_dataset(dataset, audio_key=self.args.audio_key, text_key=self.args.text_key) + if self.args.resample_to_16kHz: cut_set = cut_set.resample(16000) - return cut_set + return {'test':cut_set} else: - return load_manifest_lazy(self.args.manifest_dir / "cuts_belle.00000.jsonl.gz") \ No newline at end of file + return {'test':load_manifest_lazy(self.args.manifest_dir / "cuts_belle.00000.jsonl.gz")} \ No newline at end of file diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py index 5f5334142..f878d32e7 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py @@ -318,9 +318,9 @@ def decode_one_batch( 2, ) - supervisions = batch["supervisions"] - feature_len = supervisions["num_frames"] - feature_len = feature_len.to(device, dtype=dtype) + # supervisions = batch["supervisions"] + # feature_len = supervisions["num_frames"] + # feature_len = feature_len.to(device, dtype=dtype) messages = [ [ @@ -336,9 +336,6 @@ def decode_one_batch( ) hyps = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) - print(hyps) - print(supervisions) - return {"beam-search": hyps} @@ -408,7 +405,10 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): - texts = batch["supervisions"]["text"] + answers = batch["supervisions"]["text"] + questions_with_history = [cut.custom["question"] for cut in batch["supervisions"]["cut"]] + answer_cosyvoice_speech_token = [cut.custom["answer_cosyvoice_speech_token"] for cut in batch["supervisions"]["cut"]] + texts = [question.split(': ')[-1].strip() for question in questions_with_history] cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py index d9489b1ae..1c3ccd2c6 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py @@ -66,7 +66,7 @@ from lhotse.cut import Cut from lhotse.dataset.sampling.base import CutSampler from lhotse.utils import fix_random_seed from model import IGNORE_TOKEN_ID, SPEECH_LLM, EncoderProjector -from multi_dataset import MultiDataset +# from multi_dataset import MultiDataset from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training from torch import Tensor from torch.utils.tensorboard import SummaryWriter From 6b69276b19ce348dfc6f9e83b08515fa8548ef8f Mon Sep 17 00:00:00 2001 From: root Date: Fri, 11 Apr 2025 06:50:25 +0000 Subject: [PATCH 03/57] add training stage --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 25 ++++++++++++++++--- .../slam_omni/ds_config_zero1.json | 1 + .../SPEECH2SPEECH/slam_omni/train.py | 10 +++++--- 3 files changed, 29 insertions(+), 7 deletions(-) create mode 120000 egs/speech_llm/SPEECH2SPEECH/slam_omni/ds_config_zero1.json diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index b61241974..6c7393379 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -5,8 +5,8 @@ export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python export PYTHONPATH=$PYTHONPATH:/workspace/slam/icefall_omni set -eou pipefail -stage=2 -stop_stage=2 +stage=1 +stop_stage=1 # All files generated by this script are saved in "data". # You can safely remove "data" and rerun this script to regenerate it. mkdir -p data @@ -20,8 +20,10 @@ log() { if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then log "stage 0: " - - + cd /workspace/slam/lhotse + git config --global --add safe.directory /workspace/slam/lhotse + pip install -e '.[dev]' + cd - fi if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then @@ -43,3 +45,18 @@ if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then --use-lora False # --on-the-fly-feats True fi + +ngpu=2 +if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then + log "stage 3: " +torchrun --nproc_per_node $ngpu ./slam_omni/train.py \ + --max-duration 200 \ + --exp-dir ./slam_omni/exp_test \ + --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ + --manifest-dir data/fbank \ + --deepspeed \ + --deepspeed_config ./slam_omni/ds_config_zero1.json \ + --use-flash-attn True \ + --use-lora True --unfreeze-llm True +fi \ No newline at end of file diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/ds_config_zero1.json b/egs/speech_llm/SPEECH2SPEECH/slam_omni/ds_config_zero1.json new file mode 120000 index 000000000..4fbacea32 --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/ds_config_zero1.json @@ -0,0 +1 @@ +../../ASR_LLM/whisper_llm_zh/ds_config_zero1.json \ No newline at end of file diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py index 1c3ccd2c6..f05e5c1ac 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py @@ -395,7 +395,12 @@ def compute_loss( feature = feature.transpose(1, 2) # (N, C, T) batch_idx_train = params.batch_idx_train - supervisions = batch["supervisions"] + + answers = batch["supervisions"]["text"] + questions_with_history = [cut.custom["question"] for cut in batch["supervisions"]["cut"]] + answer_cosyvoice_speech_token = [cut.custom["answer_cosyvoice_speech_token"] for cut in batch["supervisions"]["cut"]] + last_questions = [question.split(': ')[-1].strip() for question in questions_with_history] + texts = batch["supervisions"]["text"] # remove spaces in texts texts = [normalize_text_alimeeting(text) for text in texts] @@ -426,7 +431,7 @@ def compute_loss( info = MetricsTracker() with warnings.catch_warnings(): warnings.simplefilter("ignore") - feature_lens = supervisions["num_frames"] + feature_lens = batch["supervisions"]["num_frames"] info["frames"] = (feature_lens // params.subsampling_factor).sum().item() # Note: We use reduction=sum while computing the loss. @@ -848,7 +853,6 @@ def display_and_save_batch( logging.info(f"Saving batch to {filename}") torch.save(batch, filename) - supervisions = batch["supervisions"] features = batch["inputs"] logging.info(f"features shape: {features.shape}") From 202d764cfba2ad0b773dc0c134ec728a5fd315de Mon Sep 17 00:00:00 2001 From: root Date: Mon, 14 Apr 2025 05:35:07 +0000 Subject: [PATCH 04/57] remove text norm --- .../SPEECH2SPEECH/slam_omni/train.py | 46 ++----------------- 1 file changed, 5 insertions(+), 41 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py index f05e5c1ac..f79e5b64c 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py @@ -347,42 +347,6 @@ def compute_loss( return input_ids, attention_mask, target_ids - def normalize_text_alimeeting(text: str, normalize: str = "m2met") -> str: - """ - Text normalization similar to M2MeT challenge baseline. - See: https://github.com/yufan-aslp/AliMeeting/blob/main/asr/local/text_normalize.pl - """ - if normalize == "none": - return text - elif normalize == "m2met": - import re - - text = text.replace(" ", "") - text = text.replace("", "") - text = text.replace("<%>", "") - text = text.replace("<->", "") - text = text.replace("<$>", "") - text = text.replace("<#>", "") - text = text.replace("<_>", "") - text = text.replace("", "") - text = text.replace("`", "") - text = text.replace("&", "") - text = text.replace(",", "") - if re.search("[a-zA-Z]", text): - text = text.upper() - text = text.replace("A", "A") - text = text.replace("a", "A") - text = text.replace("b", "B") - text = text.replace("c", "C") - text = text.replace("k", "K") - text = text.replace("t", "T") - text = text.replace(",", "") - text = text.replace("丶", "") - text = text.replace("。", "") - text = text.replace("、", "") - text = text.replace("?", "") - return text - max_frames = params.max_duration * 1000 // params.frame_shift_ms allowed_max_frames = int(max_frames * (1.0 + params.allowed_excess_duration_ratio)) batch = filter_uneven_sized_batch(batch, allowed_max_frames) @@ -400,15 +364,15 @@ def compute_loss( questions_with_history = [cut.custom["question"] for cut in batch["supervisions"]["cut"]] answer_cosyvoice_speech_token = [cut.custom["answer_cosyvoice_speech_token"] for cut in batch["supervisions"]["cut"]] last_questions = [question.split(': ')[-1].strip() for question in questions_with_history] - - texts = batch["supervisions"]["text"] - # remove spaces in texts - texts = [normalize_text_alimeeting(text) for text in texts] + history_contexts = [question.rsplit(':', 1)[0].strip() for question in questions_with_history] + # USER: 生成一个关于夏天的诗歌。 ASSISTANT: 夏日炎炎,万物生长,阳光明媚,享受着夏日的美好时光。 USER: 给我列举一些新闻头条。 ASSISTANT: 当今社会的新闻永远不会停。: 告诉我如何烹饪鸡肉 + # : 对以下句子进行鉴赏:他心地善良。输出结果为“他是一个有善心的人。 messages = [] for i, text in enumerate(texts): + history_context = history_contexts[i] message = [ - {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}请转写音频为文字"}, + {"role": "user", "content": f"{history_context}{DEFAULT_SPEECH_TOKEN}请转写音频为文字"}, {"role": "assistant", "content": text}, ] messages.append(message) From 1d11662016f418bae1d58b1a1a067b19ed4637c2 Mon Sep 17 00:00:00 2001 From: Yuekai Zhang Date: Mon, 14 Apr 2025 14:32:42 +0800 Subject: [PATCH 05/57] fix multi rounds data --- .../SPEECH2SPEECH/slam_omni/train.py | 26 +++++++++++++++---- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py index f79e5b64c..7fc207455 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py @@ -303,6 +303,7 @@ def compute_loss( texts = [] TEMPLATE = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if loop.last %}{{ '<|im_end|>'}}{% else %}{{ '<|im_end|>\n' }}{% endif %}{% endfor %}" for i, msg in enumerate(messages): + print(msg,23333333333333) texts.append( tokenizer.apply_chat_template( msg, @@ -334,9 +335,14 @@ def compute_loss( # first get the indices of the tokens mask_prompt = True if mask_prompt: + default_speech_token_id = tokenizer.convert_tokens_to_ids(DEFAULT_SPEECH_TOKEN) + default_speech_token_indices = torch.where( + input_ids == default_speech_token_id + ) mask_indices = torch.where( input_ids == tokenizer.convert_tokens_to_ids("assistant") ) + print(mask_indices, default_speech_token_indices, default_speech_token_id) for i in range(mask_indices[0].size(0)): row = mask_indices[0][i] col = mask_indices[1][i] @@ -362,6 +368,7 @@ def compute_loss( answers = batch["supervisions"]["text"] questions_with_history = [cut.custom["question"] for cut in batch["supervisions"]["cut"]] + chat_rounds = [cut.custom["round"] for cut in batch["supervisions"]["cut"]] answer_cosyvoice_speech_token = [cut.custom["answer_cosyvoice_speech_token"] for cut in batch["supervisions"]["cut"]] last_questions = [question.split(': ')[-1].strip() for question in questions_with_history] history_contexts = [question.rsplit(':', 1)[0].strip() for question in questions_with_history] @@ -369,11 +376,20 @@ def compute_loss( # : 对以下句子进行鉴赏:他心地善良。输出结果为“他是一个有善心的人。 messages = [] - for i, text in enumerate(texts): - history_context = history_contexts[i] - message = [ - {"role": "user", "content": f"{history_context}{DEFAULT_SPEECH_TOKEN}请转写音频为文字"}, - {"role": "assistant", "content": text}, + for i, total_round in enumerate(chat_rounds): + message = [] + if total_round > 1: + history_question_answer = history_contexts[i].split('USER:') + for j in range(total_round - 1): + # USER: 生成一个关于夏天的诗歌。 ASSISTANT: 夏日炎炎,万物生长,阳光明媚,享受着夏日的美好时光。 USER: 给我列举一些新闻头条。 ASSISTANT: 当今社会的新闻永远不会停。 + question_answer = history_question_answer[j].split('ASSISTANT:') + message += [ + {"role": "user", "content": question_answer[0].strip()}, + {"role": "assistant", "content": question_answer[1].strip()} + ] + message += [ + {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, + {"role": "assistant", "content": answers[i]} ] messages.append(message) From 3ad075af6098256c1ceb5f9edf664c3ffa1d4ffb Mon Sep 17 00:00:00 2001 From: root Date: Tue, 15 Apr 2025 02:16:03 +0000 Subject: [PATCH 06/57] s2t training --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 32 ++++++++++--- .../SPEECH2SPEECH/slam_omni/data_module.py | 48 +++++++++++-------- .../SPEECH2SPEECH/slam_omni/train.py | 45 +++++++---------- 3 files changed, 70 insertions(+), 55 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index 6c7393379..e23f26684 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -5,8 +5,8 @@ export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python export PYTHONPATH=$PYTHONPATH:/workspace/slam/icefall_omni set -eou pipefail -stage=1 -stop_stage=1 +stage=$1 +stop_stage=$2 # All files generated by this script are saved in "data". # You can safely remove "data" and rerun this script to regenerate it. mkdir -p data @@ -20,10 +20,12 @@ log() { if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then log "stage 0: " + pip uninstall lhotse cd /workspace/slam/lhotse git config --global --add safe.directory /workspace/slam/lhotse pip install -e '.[dev]' cd - + pip install -r slam_omni/requirements.txt fi if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then @@ -32,6 +34,20 @@ if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then python3 local/compute_whisper_fbank.py fi + +if [ $stage -le 10 ] && [ $stop_stage -ge 10 ]; then + log "Stage 3: Combine features" + manifest_dir=data/fbank + if [ ! -f $manifest_dir/cuts_belle_00001-01600.jsonl.gz ]; then + pieces=$(find $manifest_dir -name "cuts_belle.*.jsonl.gz" | sort) + # # remove cust_belle_00000.jsonl.gz from pieces + # pieces=$(echo $pieces | sed 's/cuts_belle.00000.jsonl.gz//g') + echo $pieces | wc + lhotse combine $pieces data/fbank/cuts_belle_00001-01600.jsonl.gz + cd $manifest_dir && ln -s cuts_belle_00001-01600.jsonl.gz cuts_belle_train.jsonl.gz && cd - + fi +fi + if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then log "stage 2: " python3 ./slam_omni/decode.py \ @@ -46,17 +62,21 @@ if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then fi -ngpu=2 +ngpu=8 if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then log "stage 3: " torchrun --nproc_per_node $ngpu ./slam_omni/train.py \ - --max-duration 200 \ - --exp-dir ./slam_omni/exp_test \ + --max-duration 80 \ + --enable-musan False \ + --exp-dir ./slam_omni/exp_speech2text \ --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ --manifest-dir data/fbank \ --deepspeed \ --deepspeed_config ./slam_omni/ds_config_zero1.json \ --use-flash-attn True \ + --pretrained-model-path slam_omni/exp_speech2text/epoch-1-checkpoint-5000.pt/pytorch_model.bin \ + --sampler-state-dict-path slam_omni/exp_speech2text/epoch-1-checkpoint-5000-sampler.pt \ --use-lora True --unfreeze-llm True -fi \ No newline at end of file +fi + diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/data_module.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/data_module.py index a8b1a4746..11e3bc779 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/data_module.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/data_module.py @@ -357,26 +357,20 @@ class AsrDataModule: return train_dl def valid_dataloaders(self, cuts_valid: CutSet) -> DataLoader: - transforms = [] - # if self.args.concatenate_cuts: - # transforms = [ - # CutConcatenate( - # duration_factor=self.args.duration_factor, gap=self.args.gap - # ) - # ] + transforms - + """ + Args: + cuts_valid: + CutSet for validation. + """ logging.info("About to create dev dataset") - if self.args.on_the_fly_feats: - validate = K2SpeechRecognitionDataset( - cut_transforms=transforms, - input_strategy=OnTheFlyFeatures(WhisperFbank(WhisperFbankConfig(num_filters=80, device='cuda'))), - return_cuts=self.args.return_cuts, - ) - else: - validate = K2SpeechRecognitionDataset( - cut_transforms=transforms, - return_cuts=self.args.return_cuts, - ) + + validate = K2SpeechRecognitionDataset( + input_strategy=OnTheFlyFeatures(WhisperFbank(WhisperFbankConfig(num_filters=80, device='cuda'))) + if self.args.on_the_fly_feats + else eval(self.args.input_strategy)(), + return_cuts=self.args.return_cuts, + ) + valid_sampler = DynamicBucketingSampler( cuts_valid, max_duration=self.args.max_duration, @@ -434,4 +428,18 @@ class AsrDataModule: cut_set = cut_set.resample(16000) return {'test':cut_set} else: - return {'test':load_manifest_lazy(self.args.manifest_dir / "cuts_belle.00000.jsonl.gz")} \ No newline at end of file + return {'test':load_manifest_lazy(self.args.manifest_dir / "cuts_belle.00000.jsonl.gz")} + + @lru_cache() + def dev_cuts(self) -> CutSet: + logging.info("About to get test cuts") + if self.args.on_the_fly_feats: + pass + else: + return load_manifest_lazy(self.args.manifest_dir / "cuts_belle.00000.jsonl.gz") + + + @lru_cache() + def train_cuts(self) -> CutSet: + logging.info("About to get train cuts") + return load_manifest_lazy(self.args.manifest_dir / "cuts_belle_train.jsonl.gz") \ No newline at end of file diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py index 7fc207455..f0df303e4 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py @@ -206,13 +206,6 @@ def get_parser(): help="Whether to use half precision training.", ) - parser.add_argument( - "--use-aishell", - type=str2bool, - default=True, - help="Whether to only use aishell1 dataset for training.", - ) - parser = deepspeed.add_config_arguments(parser) add_model_arguments(parser) @@ -297,13 +290,11 @@ def compute_loss( def preprocess( messages, tokenizer: transformers.PreTrainedTokenizer, - max_len: int, ) -> Dict: """Preprocesses the data for supervised fine-tuning.""" texts = [] TEMPLATE = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if loop.last %}{{ '<|im_end|>'}}{% else %}{{ '<|im_end|>\n' }}{% endif %}{% endfor %}" for i, msg in enumerate(messages): - print(msg,23333333333333) texts.append( tokenizer.apply_chat_template( msg, @@ -311,11 +302,16 @@ def compute_loss( chat_template=TEMPLATE, add_generation_prompt=False, padding="longest", # FIX me change padding to longest - max_length=max_len, - truncation=True, + truncation=False, ) ) # padding texts to the same length, texts is a list of list, padding with tokenzier.pad_token_id + # remove too long text + texts = [ text for text in texts if len(text) < 1024 ] + if len(texts) != len(messages): + logging.warning( + f"Remove too long text, {messages} " + ) max_len_texts = max([len(text) for text in texts]) if tokenizer.padding_side == "right": texts = [ @@ -336,18 +332,14 @@ def compute_loss( mask_prompt = True if mask_prompt: default_speech_token_id = tokenizer.convert_tokens_to_ids(DEFAULT_SPEECH_TOKEN) - default_speech_token_indices = torch.where( + mask_indices = torch.where( input_ids == default_speech_token_id ) - mask_indices = torch.where( - input_ids == tokenizer.convert_tokens_to_ids("assistant") - ) - print(mask_indices, default_speech_token_indices, default_speech_token_id) for i in range(mask_indices[0].size(0)): row = mask_indices[0][i] col = mask_indices[1][i] - # + 2 to skip: 'assistant', '\n' - target_ids[row, : col + 2] = IGNORE_TOKEN_ID + # + 2 to skip: 'assistant', '\n' 151665, 151645, 198, 151644, 77091, 198 + target_ids[row, : col + 6] = IGNORE_TOKEN_ID attention_mask = input_ids.ne(tokenizer.pad_token_id) @@ -380,6 +372,7 @@ def compute_loss( message = [] if total_round > 1: history_question_answer = history_contexts[i].split('USER:') + history_question_answer = [item for item in history_question_answer if item] for j in range(total_round - 1): # USER: 生成一个关于夏天的诗歌。 ASSISTANT: 夏日炎炎,万物生长,阳光明媚,享受着夏日的美好时光。 USER: 给我列举一些新闻头条。 ASSISTANT: 当今社会的新闻永远不会停。 question_answer = history_question_answer[j].split('ASSISTANT:') @@ -393,7 +386,7 @@ def compute_loss( ] messages.append(message) - input_ids, attention_mask, target_ids = preprocess(messages, tokenizer, max_len=128) + input_ids, attention_mask, target_ids = preprocess(messages, tokenizer) target_ids = target_ids.type(torch.LongTensor) input_ids = input_ids.type(torch.LongTensor) @@ -508,7 +501,7 @@ def train_one_epoch( for batch_idx, batch in enumerate(train_dl): params.batch_idx_train += 1 batch_size = len(batch["supervisions"]["text"]) - if batch_idx % params.valid_interval == 0 and not params.print_diagnostics: + if batch_idx % params.valid_interval == 0: logging.info("Computing validation loss") valid_info = compute_validation_loss( params=params, @@ -720,7 +713,6 @@ def run(rank, world_size, args): ) data_module = AsrDataModule(args) - multi_dataset = MultiDataset(args.manifest_dir) def remove_short_and_long_utt(c: Cut): # Keep only utterances with duration between 1 second and 20 seconds @@ -738,10 +730,8 @@ def run(rank, world_size, args): return False return True - if params.use_aishell: - train_cuts = multi_dataset.aishell_train_cuts() - else: - train_cuts = multi_dataset.train_cuts() + + train_cuts = data_module.train_cuts() train_cuts = train_cuts.filter(remove_short_and_long_utt) @@ -754,10 +744,7 @@ def run(rank, world_size, args): train_cuts, sampler_state_dict=sampler_state_dict ) - if params.use_aishell: - valid_cuts = multi_dataset.aishell_dev_cuts() - else: - valid_cuts = multi_dataset.dev_cuts() + valid_cuts = data_module.dev_cuts() valid_dl = data_module.valid_dataloaders(valid_cuts) if args.tensorboard and rank == 0: From 0c02da82acfea0c7db0349524d6c83af708ea914 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 15 Apr 2025 06:53:20 +0000 Subject: [PATCH 07/57] refine decoding method --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 20 +++++- .../SPEECH2SPEECH/slam_omni/decode.py | 61 ++++++++++++------- .../SPEECH2SPEECH/slam_omni/model.py | 30 ++++++--- 3 files changed, 79 insertions(+), 32 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index e23f26684..75bd9c576 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -52,12 +52,28 @@ if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then log "stage 2: " python3 ./slam_omni/decode.py \ --max-duration 80 \ - --exp-dir slam_omni/exp_test_whisper_qwen2_1.5B \ + --exp-dir slam_omni/exp_speech2text \ --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ - --llm-path-or-name models/qwen \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ --epoch 999 --avg 1 \ --manifest-dir data/fbank \ --use-flash-attn True \ + --method pure_text_sampling \ + --use-lora True # --on-the-fly-feats True + +fi + +if [ $stage -le 20 ] && [ $stop_stage -ge 20 ]; then + log "stage 2: " + python3 ./slam_omni/decode.py \ + --max-duration 80 \ + --exp-dir slam_omni/exp_speech2text \ + --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ + --epoch 999 --avg 1 \ + --manifest-dir data/fbank \ + --use-flash-attn True \ + --method pure_text_sampling_original_0.5B \ --use-lora False # --on-the-fly-feats True fi diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py index f878d32e7..3feef8f1c 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py @@ -52,7 +52,6 @@ from collections import defaultdict from pathlib import Path from typing import Dict, List, Optional, Tuple -import k2 import torch import torch.nn as nn import transformers @@ -60,13 +59,12 @@ import whisper from data_module import AsrDataModule from lhotse.cut import Cut from model import SPEECH_LLM, EncoderProjector -# from data_module import MultiDataset + from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training from train import DEFAULT_SPEECH_TOKEN from transformers import AutoModelForCausalLM, AutoTokenizer from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward -from icefall.checkpoint import average_checkpoints_with_averaged_model, load_checkpoint from icefall.env import get_env_info from icefall.utils import ( AttributeDict, @@ -76,7 +74,6 @@ from icefall.utils import ( write_error_stats, ) - def average_checkpoints( filenames: List[Path], device: torch.device = torch.device("cpu") ) -> dict: @@ -133,7 +130,7 @@ def add_model_arguments(parser: argparse.ArgumentParser): parser.add_argument( "--llm-path-or-name", type=str, - default="/workspace/asr/Qwen1.5-0.5B-Chat", + default="", help="Path or name of the large language model.", ) @@ -264,7 +261,6 @@ def decode_one_batch( def preprocess( messages, tokenizer: transformers.PreTrainedTokenizer, - max_len: int = 128, ) -> Dict: """Preprocesses the data for supervised fine-tuning.""" texts = [] @@ -277,8 +273,7 @@ def decode_one_batch( add_generation_prompt=False, chat_template=TEMPLATE, padding="longest", - max_length=max_len, - truncation=True, + truncation=False, ) ) max_len_texts = max([len(text) for text in texts]) @@ -318,18 +313,38 @@ def decode_one_batch( 2, ) - # supervisions = batch["supervisions"] - # feature_len = supervisions["num_frames"] - # feature_len = feature_len.to(device, dtype=dtype) + chat_rounds = [cut.custom["round"] for cut in batch["supervisions"]["cut"]] - messages = [ - [ - {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}请转写音频为文字"}, - {"role": "assistant", "content": ""}, + # messages = [ + # [ + # {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}请转写音频为文字"}, + # {"role": "assistant", "content": ""}, + # ] + # ] * len(feature) + questions_with_history = [cut.custom["question"] for cut in batch["supervisions"]["cut"]] + history_contexts = [question.rsplit(':', 1)[0].strip() for question in questions_with_history] + last_questions = [question.split(': ')[-1].strip() for question in questions_with_history] + messages = [] + for i, total_round in enumerate(chat_rounds): + message = [] + if total_round > 1: + history_question_answer = history_contexts[i].split('USER:') + history_question_answer = [item for item in history_question_answer if item] + for j in range(total_round - 1): + # USER: 生成一个关于夏天的诗歌。 ASSISTANT: 夏日炎炎,万物生长,阳光明媚,享受着夏日的美好时光。 USER: 给我列举一些新闻头条。 ASSISTANT: 当今社会的新闻永远不会停。 + question_answer = history_question_answer[j].split('ASSISTANT:') + message += [ + {"role": "user", "content": question_answer[0].strip()}, + {"role": "assistant", "content": question_answer[1].strip()} + ] + message += [ + {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, + # {"role": "user", "content": f"{last_questions[i]}"}, + {"role": "assistant", "content": ""} ] - ] * len(feature) + messages.append(message) - input_ids, attention_mask = preprocess(messages, tokenizer, max_len=128) + input_ids, attention_mask = preprocess(messages, tokenizer) generated_ids = model.decode( feature, input_ids.to(device, dtype=torch.long), attention_mask.to(device) @@ -422,7 +437,7 @@ def decode_dataset( this_batch = [] assert len(hyps) == len(texts) for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): - ref_text = normalize_text_alimeeting(ref_text) + # ref_text = normalize_text_alimeeting(ref_text) ref_words = ref_text.split() print(f"ref: {ref_text}") print(f"hyp: {''.join(hyp_words)}") @@ -449,7 +464,7 @@ def save_results( test_set_wers = dict() for key, results in results_dict.items(): recog_path = ( - params.exp_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" + params.log_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt" ) results = sorted(results) store_transcripts(filename=recog_path, texts=results) @@ -459,7 +474,7 @@ def save_results( # The following prints out WERs, per-word error statistics and aligned # ref/hyp pairs. errs_filename = ( - params.exp_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" + params.log_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" ) # we compute CER for aishell dataset. results_char = [] @@ -475,7 +490,7 @@ def save_results( logging.info("Wrote detailed error stats to {}".format(errs_filename)) test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1]) - errs_info = params.exp_dir / f"cer-summary-{test_set_name}-{params.suffix}.txt" + errs_info = params.log_dir / f"cer-summary-{test_set_name}-{params.suffix}.txt" with open(errs_info, "w") as f: print("settings\tCER", file=f) for key, val in test_set_wers: @@ -499,8 +514,10 @@ def main(): params = get_params() params.update(vars(args)) params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" + params.log_dir = Path(params.exp_dir) / f"log-{params.method}" + params.log_dir.mkdir(parents=True, exist_ok=True) setup_logger( - f"{params.exp_dir}/log-{params.method}-beam{params.beam_size}/log-decode-{params.suffix}" + f"{params.exp_dir}/log-{params.method}/log-decode-{params.suffix}" ) logging.info("Decoding started") diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py index 829ef4e2d..5126a5d34 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py @@ -241,27 +241,41 @@ class SPEECH_LLM(nn.Module): inputs_embeds = self.llm.get_input_embeddings()(input_ids) ( inputs_embeds, - attention_mask, _, - position_ids, + _, + _, ) = self._merge_input_ids_with_speech_features( speech_features, inputs_embeds, input_ids, attention_mask ) generated_ids = self.llm.generate( inputs_embeds=inputs_embeds, - max_new_tokens=kwargs.get("max_new_tokens", 200), + max_new_tokens=kwargs.get("max_new_tokens", 1024), num_beams=kwargs.get("num_beams", 1), - do_sample=kwargs.get("do_sample", False), + do_sample=kwargs.get("do_sample", True), min_length=kwargs.get("min_length", 1), - top_p=kwargs.get("top_p", 1.0), - repetition_penalty=kwargs.get("repetition_penalty", 1.0), - length_penalty=kwargs.get("length_penalty", 1.0), - temperature=kwargs.get("temperature", 1.0), + top_p=kwargs.get("top_p", 0.5), + top_k=kwargs.get("top_k", 20), + repetition_penalty=kwargs.get("repetition_penalty", 1.1), + temperature=kwargs.get("temperature", 0.7), bos_token_id=self.llm.config.bos_token_id, eos_token_id=self.llm.config.eos_token_id, pad_token_id=self.llm.config.pad_token_id, ) + # generated_ids = self.llm.generate( + # inputs_embeds=inputs_embeds, + # max_new_tokens=kwargs.get("max_new_tokens", 200), + # num_beams=kwargs.get("num_beams", 1), + # do_sample=kwargs.get("do_sample", False), + # min_length=kwargs.get("min_length", 1), + # top_p=kwargs.get("top_p", 1.0), + # repetition_penalty=kwargs.get("repetition_penalty", 1.0), + # temperature=kwargs.get("temperature", 1.0), + # length_penalty=kwargs.get("length_penalty", 1.0), + # bos_token_id=self.llm.config.bos_token_id, + # eos_token_id=self.llm.config.eos_token_id, + # pad_token_id=self.llm.config.pad_token_id, + # ) return generated_ids From 458d697accdceea6bc86bc3978000cb88c69044a Mon Sep 17 00:00:00 2001 From: root Date: Tue, 15 Apr 2025 13:41:33 +0000 Subject: [PATCH 08/57] fix batch_size>1 decoding bug --- egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py | 1 + egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py index 3feef8f1c..66ccd9974 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py @@ -342,6 +342,7 @@ def decode_one_batch( # {"role": "user", "content": f"{last_questions[i]}"}, {"role": "assistant", "content": ""} ] + print(f"message: {message}, batch_size {len(chat_rounds)}") messages.append(message) input_ids, attention_mask = preprocess(messages, tokenizer) diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py index 5126a5d34..55541f03e 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py @@ -241,7 +241,7 @@ class SPEECH_LLM(nn.Module): inputs_embeds = self.llm.get_input_embeddings()(input_ids) ( inputs_embeds, - _, + attention_mask, _, _, ) = self._merge_input_ids_with_speech_features( @@ -249,6 +249,7 @@ class SPEECH_LLM(nn.Module): ) generated_ids = self.llm.generate( inputs_embeds=inputs_embeds, + attention_mask=attention_mask, max_new_tokens=kwargs.get("max_new_tokens", 1024), num_beams=kwargs.get("num_beams", 1), do_sample=kwargs.get("do_sample", True), From bdb60f6ddc10cc04d50061d0b13f3c87bc73c9bb Mon Sep 17 00:00:00 2001 From: root Date: Mon, 21 Apr 2025 01:00:06 +0000 Subject: [PATCH 09/57] add codec lm --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 19 +++ .../SPEECH2SPEECH/slam_omni/model.py | 116 +++++++++++++++- .../SPEECH2SPEECH/slam_omni/train.py | 129 ++++++++++++++++-- 3 files changed, 251 insertions(+), 13 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index 75bd9c576..ef0e87465 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -96,3 +96,22 @@ torchrun --nproc_per_node $ngpu ./slam_omni/train.py \ --use-lora True --unfreeze-llm True fi + +if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then + log "stage 4: " + ngpu=2 +torchrun --nproc_per_node $ngpu ./slam_omni/train.py \ + --max-duration 40 \ + --enable-musan False \ + --exp-dir ./slam_omni/exp_speech2text \ + --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ + --manifest-dir data/fbank \ + --deepspeed \ + --deepspeed_config ./slam_omni/ds_config_zero1.json \ + --use-flash-attn False \ + --use-lora True --unfreeze-llm False --enable-speech-output True + # --pretrained-model-path slam_omni/exp_speech2text/epoch-1-checkpoint-5000.pt/pytorch_model.bin \ + # --sampler-state-dict-path slam_omni/exp_speech2text/epoch-1-checkpoint-5000-sampler.pt \ + +fi \ No newline at end of file diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py index 55541f03e..f7e436806 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py @@ -58,11 +58,21 @@ class SPEECH_LLM(nn.Module): encoder: nn.Module, llm: nn.Module, encoder_projector: nn.Module, + codec_lm: nn.Module = None, ): super().__init__() self.encoder = encoder self.llm = llm self.encoder_projector = encoder_projector + self.codec_lm = codec_lm + if self.codec_lm: + self.speech_token_projector = nn.Linear( + self.llm.config.hidden_size, self.codec_lm.config.hidden_size + ) + self.codec_lm_head = nn.Linear( + self.codec_lm.config.hidden_size, self.codec_lm.config.vocab_size + ) + self.loss_fct = torch.nn.CrossEntropyLoss() def _merge_input_ids_with_speech_features( self, speech_features, inputs_embeds, input_ids, attention_mask, labels=None @@ -225,8 +235,112 @@ class SPEECH_LLM(nn.Module): labels.detach()[:, 1:], ignore_label=IGNORE_TOKEN_ID, ) - return model_outputs, acc + return model_outputs.loss, acc + def forward_with_speech_output( + self, + fbank: torch.Tensor = None, + input_ids: torch.LongTensor = None, + attention_mask: torch.Tensor = None, + labels: torch.LongTensor = None, + speech_codec_ids: torch.LongTensor = None, + ): + encoder_outs = self.encoder(fbank) + + speech_features = self.encoder_projector(encoder_outs) + + inputs_embeds = self.llm.get_input_embeddings()(input_ids) + + ( + inputs_embeds, + attention_mask, + labels, + _, + ) = self._merge_input_ids_with_speech_features( + speech_features, inputs_embeds, input_ids, attention_mask, labels + ) + + # get the label start_index in inputs_embeds from labels + text_label_start_index_list = [] + for i in range(labels.shape[0]): + text_label_start_index = torch.where(labels[i] != IGNORE_TOKEN_ID)[0][0] + text_label_start_index_list.append(text_label_start_index) + + model_outputs = self.llm( + inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels, output_hidden_states=True + ) + text_loss = model_outputs.loss + + # prepare codec lm inputs + audio_codes_lens = torch.tensor( + [len(x) for x in speech_codec_ids], dtype=torch.int64, device=input_ids.device + ) + # print(audio_codes_lens, "audio_codes_lens") + max_len_speech_codec = max(audio_codes_lens) + delay_step = 2 + audio_codes = torch.full( + (inputs_embeds.shape[0], max_len_speech_codec + inputs_embeds.shape[1] + 1), + self.codec_lm.config.pad_token_id, + dtype=torch.int64, + device=input_ids.device + ) + audio_labels = audio_codes.clone() + + for i, speech_codec in enumerate(speech_codec_ids): + text_label_start_index = text_label_start_index_list[i] + speech_codec = torch.tensor( + speech_codec, dtype=torch.int64, device=input_ids.device + ) + # print(inputs_embeds[i, text_label_start_index], "2333 test") + audio_codes[i, :text_label_start_index + delay_step + 1] = self.codec_lm.config.bos_token_id # mask token_id + audio_codes[i, text_label_start_index + delay_step + 1 : text_label_start_index + delay_step + 1 + len(speech_codec)] = speech_codec + audio_labels[i, text_label_start_index + delay_step : text_label_start_index + delay_step + len(speech_codec)] = speech_codec + audio_labels[i, text_label_start_index + delay_step + len(speech_codec)] = self.codec_lm.config.eos_token_id + + audio_attention_mask = audio_codes.ne(self.codec_lm.config.pad_token_id) + audio_embeddings = self.codec_lm.get_input_embeddings()(audio_codes) + + # input_ids: seq_len T1, audio_codec seq_len T2 + text_last_hidden_outputs = model_outputs.hidden_states[-1] + text_input_embeds = inputs_embeds + text_last_hidden_outputs + text_input_embeds = self.speech_token_projector(text_input_embeds) + + audio_embeddings[:, : text_input_embeds.shape[1]] += text_input_embeds + + speech_outputs = self.codec_lm( + attention_mask=audio_attention_mask, + inputs_embeds=audio_embeddings, + return_dict=True, + output_hidden_states=True, + ) + last_hidden_state = speech_outputs.hidden_states[-1].clone() + + audio_logits = self.codec_lm_head(last_hidden_state) # shape, B, T, vocab_size + audio_logits = audio_logits.contiguous().view(-1, self.codec_lm.config.vocab_size) + audio_labels = audio_labels.contiguous().view(-1) + audio_labels = audio_labels.masked_fill( + audio_labels == self.codec_lm.config.pad_token_id, IGNORE_TOKEN_ID + ) + codec_loss = self.loss_fct(audio_logits, audio_labels) + audio_preds = torch.argmax(audio_logits, -1) + + + with torch.no_grad(): + preds = torch.argmax(model_outputs.logits, -1) + acc = compute_accuracy( + preds.detach()[:, :-1], + labels.detach()[:, 1:], + ignore_label=IGNORE_TOKEN_ID, + ) + audio_acc = compute_accuracy( + audio_preds.detach(), + audio_labels.detach(), + ignore_label=IGNORE_TOKEN_ID, + ) + + + return text_loss, acc, codec_loss, audio_acc + def decode( self, fbank: torch.Tensor = None, diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py index f0df303e4..9823492bf 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py @@ -70,7 +70,12 @@ from model import IGNORE_TOKEN_ID, SPEECH_LLM, EncoderProjector from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training from torch import Tensor from torch.utils.tensorboard import SummaryWriter -from transformers import AutoModelForCausalLM, AutoTokenizer +from transformers import ( + AutoModelForCausalLM, + AutoTokenizer, + Qwen2Config, + Qwen2ForCausalLM, +) from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward from icefall import diagnostics @@ -135,6 +140,19 @@ def add_model_arguments(parser: argparse.ArgumentParser): help="Whether to unfreeze llm during training.", ) + parser.add_argument( + "--unfreeze-speech-projector", + type=str2bool, + default=False, + help="Whether to unfreeze speech adaptor during training.", + ) + + parser.add_argument( + "--enable-speech-output", + type=str2bool, + default=False, + help="Whether to enable speech codec output.", + ) def get_parser(): parser = argparse.ArgumentParser( @@ -307,7 +325,7 @@ def compute_loss( ) # padding texts to the same length, texts is a list of list, padding with tokenzier.pad_token_id # remove too long text - texts = [ text for text in texts if len(text) < 1024 ] + # texts = [ text for text in texts if len(text) < 1024 ] if len(texts) != len(messages): logging.warning( f"Remove too long text, {messages} " @@ -392,13 +410,22 @@ def compute_loss( input_ids = input_ids.type(torch.LongTensor) with torch.set_grad_enabled(is_training): - model_outputs, acc = model( - fbank=feature, - input_ids=input_ids.to(device), - attention_mask=attention_mask.to(device), - labels=target_ids.to(device), - ) - loss = model_outputs.loss + if not params.enable_speech_output: + loss, acc = model( + fbank=feature, + input_ids=input_ids.to(device), + attention_mask=attention_mask.to(device), + labels=target_ids.to(device), + ) + else: + text_loss, acc, codec_loss, codec_acc = model.forward_with_speech_output( + fbank=feature, + input_ids=input_ids.to(device), + attention_mask=attention_mask.to(device), + labels=target_ids.to(device), + speech_codec_ids=answer_cosyvoice_speech_token, + ) + loss = text_loss + codec_loss assert loss.requires_grad == is_training info = MetricsTracker() @@ -412,7 +439,12 @@ def compute_loss( info["acc"] = ( acc * info["frames"] ) # WAR: to avoid normalization by the number of frames - + if params.enable_speech_output: + info["codec_acc"] = ( + codec_acc * info["frames"] + ) + info["codec_loss"] = codec_loss.detach().cpu().item() + info["text_loss"] = text_loss.detach().cpu().item() return loss, info @@ -429,7 +461,7 @@ def compute_validation_loss( tot_loss = MetricsTracker() for batch_idx, batch in enumerate(valid_dl): - with torch.cuda.amp.autocast(enabled=params.use_fp16): + with torch.amp.autocast('cuda', enabled=params.use_fp16): loss, loss_info = compute_loss( params=params, tokenizer=tokenizer, @@ -544,7 +576,7 @@ def train_one_epoch( f"rm -rf {params.exp_dir}/epoch-{params.cur_epoch}-checkpoint-{batch_idx}" ) try: - with torch.cuda.amp.autocast(enabled=params.use_fp16): + with torch.amp.autocast('cuda', enabled=params.use_fp16): loss, loss_info = compute_loss( params=params, tokenizer=tokenizer, @@ -629,6 +661,7 @@ def run(rank, world_size, args): speech_encoder.eval() tokenizer = AutoTokenizer.from_pretrained(params.llm_path_or_name) + if params.use_flash_attn: attn_implementation = "flash_attention_2" # torch_dtype=torch.bfloat16 FIX ME @@ -672,6 +705,16 @@ def run(rank, world_size, args): special_tokens_dict = {"additional_special_tokens": [DEFAULT_SPEECH_TOKEN]} tokenizer.add_special_tokens(special_tokens_dict) + # original_tokenizer_vocab_size = len(tokenizer) + # cosyvoice2_token_size = 6561 + # new_tokens = [f"<|s_{i}|>" for i in range(cosyvoice2_token_size)] + [ + # "<|SPEECH_GENERATION_START|>" + # ] + # num_added_tokens = tokenizer.add_tokens(new_tokens) + # model.resize_token_embeddings(len(tokenizer)) + # model.vocab_size = len(tokenizer) + + llm.config.pad_token_id = tokenizer.pad_token_id llm.config.default_speech_token_id = tokenizer.convert_tokens_to_ids( DEFAULT_SPEECH_TOKEN @@ -680,11 +723,66 @@ def run(rank, world_size, args): encoder_projector = EncoderProjector( speech_encoder_dim, llm.config.hidden_size, params.encoder_projector_ds_rate ) + if not params.unfreeze_speech_projector: + for name, param in encoder_projector.named_parameters(): + param.requires_grad = False + encoder_projector.eval() + + + if params.enable_speech_output: + if params.use_flash_attn: + attn_implementation = "flash_attention_2" + else: + attn_implementation = "eager" + torch_dtype = torch.float16 + # codec_lm = AutoModelForCausalLM.from_pretrained( + # params.llm_path_or_name, + # attn_implementation=attn_implementation, + # torch_dtype=torch_dtype, + # ) + codec_vocab_size = 8192 + config = Qwen2Config( + vocab_size=codec_vocab_size, + hidden_size=1024, + num_hidden_layers=12, + num_attention_heads=16, + num_key_value_heads=16, + intermediate_size=2048, + max_position_embeddings=4096, + ) + codec_lm = Qwen2ForCausalLM(config=config) + # cosyvoice2_token_size = 6561 + codec_lm.resize_token_embeddings(codec_vocab_size) + codec_lm.vocab_size = codec_vocab_size + codec_lm.config.pad_token_id = codec_vocab_size - 1 + codec_lm.config.eos_token_id = codec_vocab_size - 2 + codec_lm.config.bos_token_id = codec_vocab_size - 3 + if params.use_lora: + lora_config = LoraConfig( + r=64, + lora_alpha=16, + target_modules=[ + "q_proj", + "k_proj", + "v_proj", + "o_proj", + "up_proj", + "gate_proj", + "down_proj", + ], + lora_dropout=0.05, + task_type="CAUSAL_LM", + ) + codec_lm = get_peft_model(codec_lm, lora_config) + codec_lm.print_trainable_parameters() + else: + codec_lm = None model = SPEECH_LLM( speech_encoder, llm, encoder_projector, + codec_lm, ) if params.pretrained_model_path: @@ -728,6 +826,13 @@ def run(rank, world_size, args): # f"Exclude cut with ID {c.id} from training. Duration: {c.duration}" # ) return False + # cut.custom["answer_cosyvoice_speech_token"] for cut in batch["supervisions"]["cut"] + codec_len = len(c.custom["answer_cosyvoice_speech_token"]) + if codec_len > 2048: + logging.warning( + f"Exclude cut with ID {c.id} from training. Duration: {c.duration}, lenth: {codec_len}" + ) + return False return True From b305cdacc01c806eba28f4fc18ccf58272259e6c Mon Sep 17 00:00:00 2001 From: root Date: Mon, 21 Apr 2025 06:23:10 +0000 Subject: [PATCH 10/57] fix padding side --- egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py | 7 +++++-- egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py index f7e436806..3a539c6ab 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py @@ -59,6 +59,7 @@ class SPEECH_LLM(nn.Module): llm: nn.Module, encoder_projector: nn.Module, codec_lm: nn.Module = None, + use_flash_attention: bool = False, ): super().__init__() self.encoder = encoder @@ -73,6 +74,7 @@ class SPEECH_LLM(nn.Module): self.codec_lm.config.hidden_size, self.codec_lm.config.vocab_size ) self.loss_fct = torch.nn.CrossEntropyLoss() + self.codec_lm_padding_side = "left" if use_flash_attention else "right" def _merge_input_ids_with_speech_features( self, speech_features, inputs_embeds, input_ids, attention_mask, labels=None @@ -291,12 +293,13 @@ class SPEECH_LLM(nn.Module): speech_codec = torch.tensor( speech_codec, dtype=torch.int64, device=input_ids.device ) - # print(inputs_embeds[i, text_label_start_index], "2333 test") audio_codes[i, :text_label_start_index + delay_step + 1] = self.codec_lm.config.bos_token_id # mask token_id audio_codes[i, text_label_start_index + delay_step + 1 : text_label_start_index + delay_step + 1 + len(speech_codec)] = speech_codec audio_labels[i, text_label_start_index + delay_step : text_label_start_index + delay_step + len(speech_codec)] = speech_codec audio_labels[i, text_label_start_index + delay_step + len(speech_codec)] = self.codec_lm.config.eos_token_id - + + if self.codec_lm_padding_side == "left": + pass audio_attention_mask = audio_codes.ne(self.codec_lm.config.pad_token_id) audio_embeddings = self.codec_lm.get_input_embeddings()(audio_codes) diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py index 9823492bf..0d217cf36 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py @@ -783,6 +783,7 @@ def run(rank, world_size, args): llm, encoder_projector, codec_lm, + params.use_flash_attn, ) if params.pretrained_model_path: From 7db40052d6a682b8f0743ca3289da13f98137f92 Mon Sep 17 00:00:00 2001 From: Yuekai Zhang Date: Mon, 21 Apr 2025 14:54:28 +0800 Subject: [PATCH 11/57] add flash attn support --- .../SPEECH2SPEECH/slam_omni/model.py | 62 ++++++++++++++++--- .../SPEECH2SPEECH/slam_omni/train.py | 18 ++++-- 2 files changed, 69 insertions(+), 11 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py index 3a539c6ab..22f627ecc 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py @@ -287,19 +287,50 @@ class SPEECH_LLM(nn.Module): device=input_ids.device ) audio_labels = audio_codes.clone() + total_len = audio_codes.shape[1] for i, speech_codec in enumerate(speech_codec_ids): text_label_start_index = text_label_start_index_list[i] speech_codec = torch.tensor( speech_codec, dtype=torch.int64, device=input_ids.device ) - audio_codes[i, :text_label_start_index + delay_step + 1] = self.codec_lm.config.bos_token_id # mask token_id - audio_codes[i, text_label_start_index + delay_step + 1 : text_label_start_index + delay_step + 1 + len(speech_codec)] = speech_codec - audio_labels[i, text_label_start_index + delay_step : text_label_start_index + delay_step + len(speech_codec)] = speech_codec - audio_labels[i, text_label_start_index + delay_step + len(speech_codec)] = self.codec_lm.config.eos_token_id + speech_codec_len = len(speech_codec) + + # Calculate lengths of non-padding content + codes_len = text_label_start_index + delay_step + 1 + speech_codec_len + # Actual label content length (speech codec tokens + eos token) + labels_actual_content_len = speech_codec_len + 1 + + if self.codec_lm_padding_side == "right": + # Fill audio_codes (right padding) + codes_end_idx = text_label_start_index + delay_step + 1 + speech_codec_len + audio_codes[i, :text_label_start_index + delay_step + 1] = self.codec_lm.config.bos_token_id # mask token_id + audio_codes[i, text_label_start_index + delay_step + 1 : codes_end_idx] = speech_codec + + # Fill audio_labels (right padding) + labels_start_idx = text_label_start_index + delay_step + labels_speech_end_idx = labels_start_idx + speech_codec_len + audio_labels[i, labels_start_idx : labels_speech_end_idx] = speech_codec + audio_labels[i, labels_speech_end_idx] = self.codec_lm.config.eos_token_id + + elif self.codec_lm_padding_side == "left": + # Calculate start indices for left padding (shifting content to the right) + codes_start_idx = total_len - codes_len + labels_start_idx = total_len - labels_actual_content_len # Start index for the actual label content + + # Fill audio_codes (left padding) + codes_speech_start_idx = codes_start_idx + text_label_start_index + delay_step + 1 + audio_codes[i, codes_start_idx : codes_speech_start_idx] = self.codec_lm.config.bos_token_id # mask token_id + audio_codes[i, codes_speech_start_idx : total_len] = speech_codec + + # Fill audio_labels (left padding) + labels_speech_end_idx = labels_start_idx + speech_codec_len + # Note: The beginning part remains pad_token_id + audio_labels[i, labels_start_idx : labels_speech_end_idx] = speech_codec + audio_labels[i, labels_speech_end_idx] = self.codec_lm.config.eos_token_id + else: + raise ValueError(f"Unsupported padding side: {self.codec_lm_padding_side}") - if self.codec_lm_padding_side == "left": - pass audio_attention_mask = audio_codes.ne(self.codec_lm.config.pad_token_id) audio_embeddings = self.codec_lm.get_input_embeddings()(audio_codes) @@ -308,7 +339,24 @@ class SPEECH_LLM(nn.Module): text_input_embeds = inputs_embeds + text_last_hidden_outputs text_input_embeds = self.speech_token_projector(text_input_embeds) - audio_embeddings[:, : text_input_embeds.shape[1]] += text_input_embeds + T_merged = text_input_embeds.shape[1] + T_audio = audio_embeddings.shape[1] + + if self.codec_lm_padding_side == "right": + # Add to the beginning for right padding + audio_embeddings[:, :T_merged] += text_input_embeds + elif self.codec_lm_padding_side == "left": + # Need to add to the shifted position for left padding + # Calculate the length of the non-padded sequence for each item + seq_lens = audio_attention_mask.sum(dim=1) # Shape (B) + for i in range(audio_embeddings.shape[0]): + item_len = seq_lens[i].item() # Get the non-padded length for item i + start_idx_content = T_audio - item_len # Start index of the content for item i + end_idx_target = start_idx_content + T_merged # End index of the target slice within the content + # Add the text_input_embeds to the calculated slice + audio_embeddings[i, start_idx_content:end_idx_target] += text_input_embeds[i] + else: + raise ValueError(f"Unsupported padding side: {self.codec_lm_padding_side}") speech_outputs = self.codec_lm( attention_mask=audio_attention_mask, diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py index 0d217cf36..143c10c68 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py @@ -383,7 +383,7 @@ def compute_loss( last_questions = [question.split(': ')[-1].strip() for question in questions_with_history] history_contexts = [question.rsplit(':', 1)[0].strip() for question in questions_with_history] # USER: 生成一个关于夏天的诗歌。 ASSISTANT: 夏日炎炎,万物生长,阳光明媚,享受着夏日的美好时光。 USER: 给我列举一些新闻头条。 ASSISTANT: 当今社会的新闻永远不会停。: 告诉我如何烹饪鸡肉 - # : 对以下句子进行鉴赏:他心地善良。输出结果为“他是一个有善心的人。 + # : 对以下句子进行鉴赏:他心地善良。输出结果为"他是一个有善心的人。 messages = [] for i, total_round in enumerate(chat_rounds): @@ -730,11 +730,14 @@ def run(rank, world_size, args): if params.enable_speech_output: + # Determine attn_implementation and torch_dtype based on use_flash_attn if params.use_flash_attn: attn_implementation = "flash_attention_2" + torch_dtype = torch.float16 # Or torch.bfloat16 if needed/supported else: attn_implementation = "eager" - torch_dtype = torch.float16 + torch_dtype = torch.float16 + # codec_lm = AutoModelForCausalLM.from_pretrained( # params.llm_path_or_name, # attn_implementation=attn_implementation, @@ -750,7 +753,14 @@ def run(rank, world_size, args): intermediate_size=2048, max_position_embeddings=4096, ) - codec_lm = Qwen2ForCausalLM(config=config) + # codec_lm = Qwen2ForCausalLM(config=config) + # Pass attn_implementation and torch_dtype to the constructor + # Use AutoModelForCausalLM.from_config for more generality + codec_lm = AutoModelForCausalLM.from_config( + config=config, + attn_implementation=attn_implementation, + torch_dtype=torch_dtype + ) # cosyvoice2_token_size = 6561 codec_lm.resize_token_embeddings(codec_vocab_size) codec_lm.vocab_size = codec_vocab_size @@ -829,7 +839,7 @@ def run(rank, world_size, args): return False # cut.custom["answer_cosyvoice_speech_token"] for cut in batch["supervisions"]["cut"] codec_len = len(c.custom["answer_cosyvoice_speech_token"]) - if codec_len > 2048: + if codec_len > 2200: logging.warning( f"Exclude cut with ID {c.id} from training. Duration: {c.duration}, lenth: {codec_len}" ) From 09d81b44a788b5336672c352023cbcd7c5130639 Mon Sep 17 00:00:00 2001 From: Yuekai Zhang Date: Mon, 21 Apr 2025 17:10:25 +0800 Subject: [PATCH 12/57] change padding side name --- egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py | 4 ++-- egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py index 22f627ecc..fb3921ba3 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py @@ -59,7 +59,7 @@ class SPEECH_LLM(nn.Module): llm: nn.Module, encoder_projector: nn.Module, codec_lm: nn.Module = None, - use_flash_attention: bool = False, + codec_lm_padding_side: str = "left", ): super().__init__() self.encoder = encoder @@ -74,7 +74,7 @@ class SPEECH_LLM(nn.Module): self.codec_lm.config.hidden_size, self.codec_lm.config.vocab_size ) self.loss_fct = torch.nn.CrossEntropyLoss() - self.codec_lm_padding_side = "left" if use_flash_attention else "right" + self.codec_lm_padding_side = codec_lm_padding_side def _merge_input_ids_with_speech_features( self, speech_features, inputs_embeds, input_ids, attention_mask, labels=None diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py index 143c10c68..ef7e7a464 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py @@ -793,7 +793,7 @@ def run(rank, world_size, args): llm, encoder_projector, codec_lm, - params.use_flash_attn, + codec_lm_padding_side= "left" if params.use_flash_attn else "right", ) if params.pretrained_model_path: From 23fdef2fd327e9624b3e93695a7f3d5fefd8fc9f Mon Sep 17 00:00:00 2001 From: Yuekai Zhang Date: Mon, 21 Apr 2025 17:57:57 +0800 Subject: [PATCH 13/57] add codec decode --- .../SPEECH2SPEECH/slam_omni/decode.py | 78 ++++++++- .../SPEECH2SPEECH/slam_omni/model.py | 163 ++++++++++++++++++ .../SPEECH2SPEECH/slam_omni/train.py | 27 ++- 3 files changed, 250 insertions(+), 18 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py index 66ccd9974..7de6f7b5d 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py @@ -346,10 +346,19 @@ def decode_one_batch( messages.append(message) input_ids, attention_mask = preprocess(messages, tokenizer) - - generated_ids = model.decode( - feature, input_ids.to(device, dtype=torch.long), attention_mask.to(device) - ) + if params.enable_speech_output: + generated_ids, generated_speech_output = model.decode_with_speech_output( + feature, input_ids.to(device, dtype=torch.long), attention_mask.to(device) + ) + cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] + for cut_id, speech_output in zip(cut_ids, generated_speech_output): + # save_path = params.exp_dir / f"speech_output/{cut_id}.wav" + #torchaudio.save(save_path, speech_output.cpu(), 16000) + print(f"speech_output: {speech_output}, cut_id: {cut_id}") + else: + generated_ids = model.decode( + feature, input_ids.to(device, dtype=torch.long), attention_mask.to(device) + ) hyps = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) return {"beam-search": hyps} @@ -586,10 +595,71 @@ def main(): speech_encoder_dim, llm.config.hidden_size, params.encoder_projector_ds_rate ) + if params.enable_speech_output: + # Determine attn_implementation and torch_dtype based on use_flash_attn + if params.use_flash_attn: + attn_implementation = "flash_attention_2" + torch_dtype = torch.float16 # Or torch.bfloat16 if needed/supported + else: + attn_implementation = "eager" + torch_dtype = torch.float16 + + # codec_lm = AutoModelForCausalLM.from_pretrained( + # params.llm_path_or_name, + # attn_implementation=attn_implementation, + # torch_dtype=torch_dtype, + # ) + codec_vocab_size = 8192 + config = Qwen2Config( + vocab_size=codec_vocab_size, + hidden_size=1024, + num_hidden_layers=12, + num_attention_heads=16, + num_key_value_heads=16, + intermediate_size=2048, + max_position_embeddings=4096, + ) + # codec_lm = Qwen2ForCausalLM(config=config) + # Pass attn_implementation and torch_dtype to the constructor + # Use AutoModelForCausalLM.from_config for more generality + codec_lm = AutoModelForCausalLM.from_config( + config=config, + attn_implementation=attn_implementation, + torch_dtype=torch_dtype + ) + # cosyvoice2_token_size = 6561 + codec_lm.resize_token_embeddings(codec_vocab_size) + codec_lm.vocab_size = codec_vocab_size + codec_lm.config.pad_token_id = codec_vocab_size - 1 + codec_lm.config.eos_token_id = codec_vocab_size - 2 + codec_lm.config.bos_token_id = codec_vocab_size - 3 + if params.use_lora: + lora_config = LoraConfig( + r=64, + lora_alpha=16, + target_modules=[ + "q_proj", + "k_proj", + "v_proj", + "o_proj", + "up_proj", + "gate_proj", + "down_proj", + ], + lora_dropout=0.05, + task_type="CAUSAL_LM", + ) + codec_lm = get_peft_model(codec_lm, lora_config) + codec_lm.print_trainable_parameters() + else: + codec_lm = None + model = SPEECH_LLM( speech_encoder, llm, encoder_projector, + codec_lm, + codec_lm_padding_side= "left" if params.use_flash_attn else "right", ) if params.avg > 1: diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py index fb3921ba3..8ec707583 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py @@ -1,6 +1,7 @@ import torch from torch import nn from transformers.trainer_pt_utils import LabelSmoother +from typing import List, Tuple # Added for type hints IGNORE_TOKEN_ID = LabelSmoother.ignore_index @@ -444,6 +445,168 @@ class SPEECH_LLM(nn.Module): # ) return generated_ids + def decode_with_speech_output( + self, + fbank: torch.Tensor = None, + input_ids: torch.LongTensor = None, # Prompt input_ids + attention_mask: torch.Tensor = None, # Prompt attention_mask + max_text_new_tokens: int = 1024, + max_speech_new_tokens: int = 1024, # Max length for speech tokens + llm_kwargs: dict = None, # Kwargs for text LLM generate + codec_lm_kwargs: dict = None # Kwargs for codec LM (e.g., temperature for sampling) - NOT IMPLEMENTED YET + ) -> Tuple[torch.LongTensor, List[List[int]]]: + """ + Generates text and corresponding speech tokens using the revised logic. + + Args: + fbank: Input audio features. + input_ids: Input token IDs for the text prompt. + attention_mask: Attention mask for the text prompt. + max_text_new_tokens: Max new tokens for text generation. + max_speech_new_tokens: Max new tokens for speech generation. + llm_kwargs: Additional arguments for self.llm.generate. + codec_lm_kwargs: Additional arguments for self.codec_lm.generate. + + Returns: + Tuple[torch.LongTensor, List[List[int]]]: + - generated_text_ids: Tensor of generated text token IDs (including prompt). + - generated_speech_tokens: List of lists, where each inner list contains + the generated speech codec tokens for a batch item. + """ + if not self.codec_lm or not self.speech_token_projector or not self.codec_lm_head: + raise ValueError("codec_lm and associated layers must be initialized to generate speech output.") + + device = next(self.parameters()).device # Use model's device + batch_size = fbank.shape[0] + + # --- 1. Prepare Prompt Embeddings --- + encoder_outs = self.encoder(fbank) + speech_features = self.encoder_projector(encoder_outs) + speech_features = speech_features.to(self.llm.dtype) # Ensure matching dtype + + prompt_embeds = self.llm.get_input_embeddings()(input_ids) + + # Merge speech features with prompt embeddings + ( + merged_prompt_inputs_embeds, + merged_prompt_attention_mask, + _, + _, + ) = self._merge_input_ids_with_speech_features( + speech_features, prompt_embeds, input_ids, attention_mask + ) + + # --- 2. Generate Text using LLM --- + # Use merged embeds/mask as input to generate + # Ensure kwargs passed are suitable for llm.generate + # Note: Using default generation params from `decode` if not provided in kwargs + final_llm_kwargs = { + "bos_token_id": self.llm.config.bos_token_id, + "eos_token_id": self.llm.config.eos_token_id, + "pad_token_id": self.llm.config.pad_token_id, + "num_beams": 1, + "do_sample": True, # Typically false for S2ST/S2TT tasks unless exploration needed + "top_p": 0.5, + "top_k": 20, + "repetition_penalty": 1.1, + "temperature": 0.7, + **(llm_kwargs or {}) # User-provided kwargs override defaults + } + + text_outputs = self.llm.generate( + inputs_embeds=merged_prompt_inputs_embeds, + attention_mask=merged_prompt_attention_mask, + max_new_tokens=max_text_new_tokens, + return_dict_in_generate=True, + **final_llm_kwargs + ) + for key in text_outputs: + print(key, text_outputs[key].shape) + # Assume text_outputs is the tensor of generated IDs [B, S_full] + generated_text_ids = text_outputs + exit(0) + + # --- 3. Get LLM Hidden States for the *Full* Generated Text Sequence --- + # Run a separate forward pass to reliably get hidden states for the complete sequence. + # This is simpler than parsing the complex output of generate with output_hidden_states=True. + full_text_embeds = self.llm.get_input_embeddings()(generated_text_ids) # [B, S_full, D_llm] + full_text_attention_mask = (generated_text_ids != self.llm.config.pad_token_id).long() # [B, S_full] + + # --- 4. Project Hidden States --- + projected_text_embeds = self.speech_token_projector(full_text_embeds) # Shape [B, S_full, D_codec] + + # --- 5. Generate Speech Tokens (Autoregressive Loop with Text Context) --- + self.codec_lm.to(device) + self.codec_lm_head.to(device) + + # Initial input for the codec LM is the BOS token + current_speech_input_ids = torch.full( + (batch_size, 1), self.codec_lm.config.bos_token_id, dtype=torch.long, device=device + ) + + past_key_values = None + generated_speech_tokens_list = [[] for _ in range(batch_size)] + unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=device) + + text_context_len = projected_text_embeds.shape[1] # S_full + + for t in range(max_speech_new_tokens): + # Get embedding for the *current* input token ID (initially BOS, then generated tokens) + current_speech_embeds = self.codec_lm.get_input_embeddings()(current_speech_input_ids) # [B, 1, D_codec] + + # Add the projected text embedding corresponding to the current timestep `t` + if t < text_context_len: + # Text context from the full generated text sequence + current_text_context_embed = projected_text_embeds[:, t:t+1, :] # [B, 1, D_codec] + inputs_embeds = current_speech_embeds + current_text_context_embed + else: + # No more text context to add + inputs_embeds = current_speech_embeds + + # Ensure inputs_embeds has the correct dtype for the codec_lm + inputs_embeds = inputs_embeds.to(next(self.codec_lm.parameters()).dtype) + + # Forward pass through codec LM for one step + # We provide inputs_embeds directly, bypassing prepare_inputs_for_generation + codec_outputs = self.codec_lm( + inputs_embeds=inputs_embeds, # Combined embedding for this step + past_key_values=past_key_values, + use_cache=True, + return_dict=True, + # No attention mask needed here when using past_key_values and single token input + ) + + # Get logits for the *last* token generated in this step + next_token_logits = self.codec_lm_head(codec_outputs.last_hidden_state[:, -1:, :]) # Use -1 index + + # --- Process Output & Update State --- + # Greedy decoding (can be replaced with sampling based on codec_lm_kwargs) + # TODO: Implement sampling/beam search for codec LM if needed + next_token_ids = torch.argmax(next_token_logits, dim=-1) # Greedy [B, 1] + + # Mask out finished sequences + next_token_ids = next_token_ids * unfinished_sequences.unsqueeze(-1) + \ + self.codec_lm.config.pad_token_id * (1 - unfinished_sequences.unsqueeze(-1)) + + # Store generated tokens for unfinished sequences + for i in range(batch_size): + if unfinished_sequences[i]: + token_id = next_token_ids[i].item() + if token_id == self.codec_lm.config.eos_token_id: + unfinished_sequences[i] = 0 # Mark as finished + elif token_id != self.codec_lm.config.pad_token_id: + generated_speech_tokens_list[i].append(token_id) + + # Prepare for next iteration + current_speech_input_ids = next_token_ids # Use the newly generated token ID as input for next step + past_key_values = codec_outputs.past_key_values # Update KV cache + + # Stop if all sequences are finished + if unfinished_sequences.max() == 0: + break + + # --- 6. Return Results --- + return generated_text_ids, generated_speech_tokens_list def compute_accuracy(pad_outputs, pad_targets, ignore_label): """Calculate accuracy. diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py index ef7e7a464..7b2e0e1f4 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py @@ -133,20 +133,6 @@ def add_model_arguments(parser: argparse.ArgumentParser): help="Whether to use lora to fine-tune llm.", ) - parser.add_argument( - "--unfreeze-llm", - type=str2bool, - default=False, - help="Whether to unfreeze llm during training.", - ) - - parser.add_argument( - "--unfreeze-speech-projector", - type=str2bool, - default=False, - help="Whether to unfreeze speech adaptor during training.", - ) - parser.add_argument( "--enable-speech-output", type=str2bool, @@ -224,6 +210,19 @@ def get_parser(): help="Whether to use half precision training.", ) + parser.add_argument( + "--unfreeze-llm", + type=str2bool, + default=False, + help="Whether to unfreeze llm during training.", + ) + + parser.add_argument( + "--unfreeze-speech-projector", + type=str2bool, + default=False, + help="Whether to unfreeze speech adaptor during training.", + ) parser = deepspeed.add_config_arguments(parser) add_model_arguments(parser) From 478d56efd8088c1c24cc82f275e2fbd18bf158ee Mon Sep 17 00:00:00 2001 From: root Date: Wed, 23 Apr 2025 07:33:27 +0000 Subject: [PATCH 14/57] fix bugs when padding right --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 52 ++--- .../SPEECH2SPEECH/slam_omni/decode.py | 58 ++--- .../SPEECH2SPEECH/slam_omni/model.py | 219 ++++++++++++------ .../SPEECH2SPEECH/slam_omni/train.py | 6 +- 4 files changed, 187 insertions(+), 148 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index ef0e87465..7e145865e 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -35,8 +35,8 @@ if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then fi -if [ $stage -le 10 ] && [ $stop_stage -ge 10 ]; then - log "Stage 3: Combine features" +if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then + log "Stage 2: Combine features" manifest_dir=data/fbank if [ ! -f $manifest_dir/cuts_belle_00001-01600.jsonl.gz ]; then pieces=$(find $manifest_dir -name "cuts_belle.*.jsonl.gz" | sort) @@ -48,39 +48,27 @@ if [ $stage -le 10 ] && [ $stop_stage -ge 10 ]; then fi fi -if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then - log "stage 2: " + +if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then + log "stage 3: " python3 ./slam_omni/decode.py \ - --max-duration 80 \ - --exp-dir slam_omni/exp_speech2text \ + --max-duration 1 \ + --exp-dir slam_omni/exp_speech2speech_test_flash_attn \ --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ - --epoch 999 --avg 1 \ + --epoch 997 --avg 1 \ --manifest-dir data/fbank \ --use-flash-attn True \ - --method pure_text_sampling \ + --method small_test_speech2speech \ + --enable-speech-output True \ --use-lora True # --on-the-fly-feats True fi -if [ $stage -le 20 ] && [ $stop_stage -ge 20 ]; then - log "stage 2: " - python3 ./slam_omni/decode.py \ - --max-duration 80 \ - --exp-dir slam_omni/exp_speech2text \ - --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ - --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ - --epoch 999 --avg 1 \ - --manifest-dir data/fbank \ - --use-flash-attn True \ - --method pure_text_sampling_original_0.5B \ - --use-lora False # --on-the-fly-feats True -fi - -ngpu=8 -if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then - log "stage 3: " +if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then + log "stage 4: " + ngpu=8 torchrun --nproc_per_node $ngpu ./slam_omni/train.py \ --max-duration 80 \ --enable-musan False \ @@ -97,21 +85,23 @@ torchrun --nproc_per_node $ngpu ./slam_omni/train.py \ fi -if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then - log "stage 4: " +if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then + log "stage 5: " ngpu=2 + exp_dir=./slam_omni/exp_speech2speech_test_flash_attn torchrun --nproc_per_node $ngpu ./slam_omni/train.py \ --max-duration 40 \ --enable-musan False \ - --exp-dir ./slam_omni/exp_speech2text \ + --exp-dir $exp_dir \ --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ --manifest-dir data/fbank \ --deepspeed \ --deepspeed_config ./slam_omni/ds_config_zero1.json \ - --use-flash-attn False \ - --use-lora True --unfreeze-llm False --enable-speech-output True + --use-flash-attn True \ + --pretrained-model-path $exp_dir/epoch-1-checkpoint-35000.pt/pytorch_model.bin \ + --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True # --pretrained-model-path slam_omni/exp_speech2text/epoch-1-checkpoint-5000.pt/pytorch_model.bin \ - # --sampler-state-dict-path slam_omni/exp_speech2text/epoch-1-checkpoint-5000-sampler.pt \ + # --sampler-state-dict-path $exp_dir/epoch-1-checkpoint-35000-sampler.pt \ fi \ No newline at end of file diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py index 7de6f7b5d..2727b330b 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py @@ -62,9 +62,9 @@ from model import SPEECH_LLM, EncoderProjector from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training from train import DEFAULT_SPEECH_TOKEN -from transformers import AutoModelForCausalLM, AutoTokenizer +from transformers import AutoModelForCausalLM, AutoTokenizer, Qwen2Config from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward - +from train import add_model_arguments from icefall.env import get_env_info from icefall.utils import ( AttributeDict, @@ -126,43 +126,6 @@ def average_checkpoints( return avg -def add_model_arguments(parser: argparse.ArgumentParser): - parser.add_argument( - "--llm-path-or-name", - type=str, - default="", - help="Path or name of the large language model.", - ) - - parser.add_argument( - "--speech-encoder-path-or-name", - type=str, - default="whisper-large-v2", - help="Path or name of the speech encoder.", - ) - - parser.add_argument( - "--encoder-projector-ds-rate", - type=int, - default=8, - help="Downsample rate for the encoder projector.", - ) - - parser.add_argument( - "--use-flash-attn", - type=str2bool, - default=True, - help="Whether to use flash attention.", - ) - - parser.add_argument( - "--use-lora", - type=str2bool, - default=True, - help="Whether to use lora fine-tuned llm checkpoint.", - ) - - def get_parser(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter @@ -351,16 +314,21 @@ def decode_one_batch( feature, input_ids.to(device, dtype=torch.long), attention_mask.to(device) ) cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] - for cut_id, speech_output in zip(cut_ids, generated_speech_output): - # save_path = params.exp_dir / f"speech_output/{cut_id}.wav" - #torchaudio.save(save_path, speech_output.cpu(), 16000) - print(f"speech_output: {speech_output}, cut_id: {cut_id}") + with open("test.txt", 'w') as f: + for cut_id in cut_ids: + # save_path = params.exp_dir / f"speech_output/{cut_id}.wav" + #torchaudio.save(save_path, speech_output.cpu(), 16000) + print(f"speech_output: {generated_speech_output}, cut_id: {cut_id}") + save_str = " ".join([str(i) for i in generated_speech_output]) + f.write(f"{cut_id}|{save_str}\n") + else: generated_ids = model.decode( feature, input_ids.to(device, dtype=torch.long), attention_mask.to(device) ) - hyps = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) - + hyps = tokenizer.batch_decode(generated_ids, skip_special_tokens=False) + print(f"hyps: {hyps}") + exit(0) return {"beam-search": hyps} diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py index 8ec707583..3f93db154 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py @@ -2,7 +2,7 @@ import torch from torch import nn from transformers.trainer_pt_utils import LabelSmoother from typing import List, Tuple # Added for type hints - +from torchmetrics.classification import MulticlassAccuracy IGNORE_TOKEN_ID = LabelSmoother.ignore_index @@ -74,9 +74,21 @@ class SPEECH_LLM(nn.Module): self.codec_lm_head = nn.Linear( self.codec_lm.config.hidden_size, self.codec_lm.config.vocab_size ) + # to torch.float16 + self.speech_token_projector = self.speech_token_projector.to( + dtype=torch.float16 + ) + self.codec_lm_head = self.codec_lm_head.to(dtype=torch.float16) self.loss_fct = torch.nn.CrossEntropyLoss() self.codec_lm_padding_side = codec_lm_padding_side + self.audio_accuracy_metric = MulticlassAccuracy( + self.codec_lm.vocab_size, + top_k=10, + average="micro", + multidim_average="global", + ignore_index=IGNORE_TOKEN_ID, + ) def _merge_input_ids_with_speech_features( self, speech_features, inputs_embeds, input_ids, attention_mask, labels=None ): @@ -332,7 +344,7 @@ class SPEECH_LLM(nn.Module): else: raise ValueError(f"Unsupported padding side: {self.codec_lm_padding_side}") - audio_attention_mask = audio_codes.ne(self.codec_lm.config.pad_token_id) + audio_attention_mask = audio_codes.ne(self.codec_lm.config.pad_token_id) # TODO: do we need to change bos tokens to pad token or mask token? audio_embeddings = self.codec_lm.get_input_embeddings()(audio_codes) # input_ids: seq_len T1, audio_codec seq_len T2 @@ -355,7 +367,12 @@ class SPEECH_LLM(nn.Module): start_idx_content = T_audio - item_len # Start index of the content for item i end_idx_target = start_idx_content + T_merged # End index of the target slice within the content # Add the text_input_embeds to the calculated slice - audio_embeddings[i, start_idx_content:end_idx_target] += text_input_embeds[i] + if end_idx_target > T_audio: + # If the text input is longer than the audio input, we need to pad the audio input + cut_off_len = T_audio - start_idx_content + audio_embeddings[i, start_idx_content:end_idx_target] = text_input_embeds[i, :cut_off_len] + else: + audio_embeddings[i, start_idx_content:end_idx_target] += text_input_embeds[i] else: raise ValueError(f"Unsupported padding side: {self.codec_lm_padding_side}") @@ -389,9 +406,12 @@ class SPEECH_LLM(nn.Module): audio_labels.detach(), ignore_label=IGNORE_TOKEN_ID, ) + audio_topk_acc = self.audio_accuracy_metric( + audio_logits.detach(), + audio_labels.detach()).item() - return text_loss, acc, codec_loss, audio_acc + return text_loss, acc, codec_loss, audio_acc, audio_topk_acc def decode( self, @@ -473,6 +493,7 @@ class SPEECH_LLM(nn.Module): - generated_speech_tokens: List of lists, where each inner list contains the generated speech codec tokens for a batch item. """ + assert fbank.shape[0] == 1, "Batch size must be 1 for speech generation." if not self.codec_lm or not self.speech_token_projector or not self.codec_lm_head: raise ValueError("codec_lm and associated layers must be initialized to generate speech output.") @@ -518,93 +539,88 @@ class SPEECH_LLM(nn.Module): attention_mask=merged_prompt_attention_mask, max_new_tokens=max_text_new_tokens, return_dict_in_generate=True, + output_hidden_states=True, **final_llm_kwargs ) - for key in text_outputs: - print(key, text_outputs[key].shape) - # Assume text_outputs is the tensor of generated IDs [B, S_full] - generated_text_ids = text_outputs - exit(0) - # --- 3. Get LLM Hidden States for the *Full* Generated Text Sequence --- - # Run a separate forward pass to reliably get hidden states for the complete sequence. - # This is simpler than parsing the complex output of generate with output_hidden_states=True. - full_text_embeds = self.llm.get_input_embeddings()(generated_text_ids) # [B, S_full, D_llm] - full_text_attention_mask = (generated_text_ids != self.llm.config.pad_token_id).long() # [B, S_full] + generated_text_ids = text_outputs.sequences # [B, S_full] + thinker_token_embeds = [ + token_hidden_states[0].to(self.llm.device) for token_hidden_states in text_outputs.hidden_states + ] + thinker_hidden_states = [ + token_hidden_states[-1].to(self.llm.device) for token_hidden_states in text_outputs.hidden_states + ] + thinker_reply_part = torch.cat(thinker_hidden_states[1:], dim=1) + torch.cat(thinker_token_embeds[1:], dim=1) + thinker_prompt_part = thinker_hidden_states[0] + thinker_token_embeds[0] - # --- 4. Project Hidden States --- - projected_text_embeds = self.speech_token_projector(full_text_embeds) # Shape [B, S_full, D_codec] - - # --- 5. Generate Speech Tokens (Autoregressive Loop with Text Context) --- - self.codec_lm.to(device) - self.codec_lm_head.to(device) - - # Initial input for the codec LM is the BOS token - current_speech_input_ids = torch.full( - (batch_size, 1), self.codec_lm.config.bos_token_id, dtype=torch.long, device=device + thinker_prompt_part = self.speech_token_projector(thinker_prompt_part) # [B, S_full, D_codec] + thinker_reply_part = self.speech_token_projector(thinker_reply_part) # [B, S_full, D_codec] + + + delay_step = 2 + thinker_prompt_part_seq_len = thinker_prompt_part.shape[1] + talker_input_ids = torch.full( + (batch_size, thinker_prompt_part_seq_len + delay_step), self.codec_lm.config.bos_token_id, dtype=torch.long, device=self.llm.device ) + talker_inputs_embeds = self.codec_lm.get_input_embeddings()(talker_input_ids) # [B, S_full, D_codec] + thinker_input_embeds = torch.cat( + [ + thinker_prompt_part, + thinker_reply_part[:, :delay_step, :], + ], + dim=1, + ) + talker_inputs_embeds += thinker_input_embeds + thinker_reply_part = thinker_reply_part[:, delay_step:, :] # [B, S_full, D_codec] + past_key_values = None - generated_speech_tokens_list = [[] for _ in range(batch_size)] - unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=device) - - text_context_len = projected_text_embeds.shape[1] # S_full - + # generated_speech_tokens_list = [[] for _ in range(batch_size)] + # unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=device) + generated_speech_tokens_list = [] + next_token_ids = None + # text_context_len = projected_text_embeds.shape[1] # S_full for t in range(max_speech_new_tokens): # Get embedding for the *current* input token ID (initially BOS, then generated tokens) - current_speech_embeds = self.codec_lm.get_input_embeddings()(current_speech_input_ids) # [B, 1, D_codec] - - # Add the projected text embedding corresponding to the current timestep `t` - if t < text_context_len: - # Text context from the full generated text sequence - current_text_context_embed = projected_text_embeds[:, t:t+1, :] # [B, 1, D_codec] - inputs_embeds = current_speech_embeds + current_text_context_embed - else: - # No more text context to add - inputs_embeds = current_speech_embeds + # current_speech_embeds = self.codec_lm.get_input_embeddings()(current_speech_input_ids) # [B, 1, D_codec] + if next_token_ids is not None: + talker_inputs_embeds = self.codec_lm.get_input_embeddings()(next_token_ids) # [B, 1, D_codec] + if thinker_reply_part.shape[1] > 0: + talker_inputs_embeds += thinker_reply_part[:, :1, :] + thinker_reply_part = thinker_reply_part[:, 1:, :] # Remove the first token for next step + # # Add the projected text embedding corresponding to the current timestep `t` + # if t < text_context_len: + # # Text context from the full generated text sequence + # current_text_context_embed = projected_text_embeds[:, t:t+1, :] # [B, 1, D_codec] + # inputs_embeds = current_speech_embeds + current_text_context_embed + # else: + # # No more text context to add + # inputs_embeds = current_speech_embeds - # Ensure inputs_embeds has the correct dtype for the codec_lm - inputs_embeds = inputs_embeds.to(next(self.codec_lm.parameters()).dtype) - # Forward pass through codec LM for one step # We provide inputs_embeds directly, bypassing prepare_inputs_for_generation codec_outputs = self.codec_lm( - inputs_embeds=inputs_embeds, # Combined embedding for this step + inputs_embeds=talker_inputs_embeds, # Combined embedding for this step past_key_values=past_key_values, use_cache=True, return_dict=True, + output_hidden_states=True, # No attention mask needed here when using past_key_values and single token input ) - + last_token_hidden_state = codec_outputs.hidden_states[-1][:, -1, :] # [B, D_codec] # Get logits for the *last* token generated in this step - next_token_logits = self.codec_lm_head(codec_outputs.last_hidden_state[:, -1:, :]) # Use -1 index - - # --- Process Output & Update State --- - # Greedy decoding (can be replaced with sampling based on codec_lm_kwargs) - # TODO: Implement sampling/beam search for codec LM if needed - next_token_ids = torch.argmax(next_token_logits, dim=-1) # Greedy [B, 1] - - # Mask out finished sequences - next_token_ids = next_token_ids * unfinished_sequences.unsqueeze(-1) + \ - self.codec_lm.config.pad_token_id * (1 - unfinished_sequences.unsqueeze(-1)) - - # Store generated tokens for unfinished sequences - for i in range(batch_size): - if unfinished_sequences[i]: - token_id = next_token_ids[i].item() - if token_id == self.codec_lm.config.eos_token_id: - unfinished_sequences[i] = 0 # Mark as finished - elif token_id != self.codec_lm.config.pad_token_id: - generated_speech_tokens_list[i].append(token_id) - - # Prepare for next iteration - current_speech_input_ids = next_token_ids # Use the newly generated token ID as input for next step - past_key_values = codec_outputs.past_key_values # Update KV cache - - # Stop if all sequences are finished - if unfinished_sequences.max() == 0: + next_token_logits = self.codec_lm_head(last_token_hidden_state) # Use -1 index + # suppress tokens between 4096:len(vocab)-3 + next_token_logits[:, 4096:-3] = -float("Inf") + next_token_ids = topk_sampling( + next_token_logits, + ) + print(next_token_ids, "next_token_ids", t, next_token_ids.shape) + if next_token_ids[0, 0] == self.codec_lm.config.eos_token_id: break - + # current_speech_input_ids = next_token_ids # Use the newly generated token ID as input for next step + past_key_values = codec_outputs.past_key_values # Update KV cache + generated_speech_tokens_list.append(next_token_ids.squeeze(1).cpu().tolist()[0]) # --- 6. Return Results --- return generated_text_ids, generated_speech_tokens_list @@ -626,3 +642,64 @@ def compute_accuracy(pad_outputs, pad_targets, ignore_label): ) denominator = torch.sum(mask) return numerator.float() / denominator.float() + + +def topk_sampling( + logits, + top_k=50, + top_p=0.95, + temperature=0.8, +): + if temperature != 1.0: + logits = logits / temperature + # Top-p/top-k filtering + logits_filtered = top_k_top_p_filtering( + logits.clone(), top_k=top_k, top_p=top_p, min_tokens_to_keep=2 + ) + # Sample + probs = torch.nn.functional.softmax(logits_filtered, dim=-1) + tokens = torch.multinomial(probs, num_samples=1) + + return tokens + + +# https://github.com/microsoft/unilm/blob/master/xtune/src/transformers/modeling_utils.py +def top_k_top_p_filtering( + logits, top_k=20, top_p=0.5, filter_value=-float("Inf"), min_tokens_to_keep=1 +): + """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering + Args: + logits: logits distribution shape (batch size, vocabulary size) + if top_k > 0: keep only top k tokens with highest probability (top-k filtering). + if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). + Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) + Make sure we keep at least min_tokens_to_keep per batch example in the output + From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 + """ + if top_k > 0: + top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check + # Remove all tokens with a probability less than the last token of the top-k + indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] + logits[indices_to_remove] = filter_value + + if top_p < 1.0: + sorted_logits, sorted_indices = torch.sort(logits, descending=True) + cumulative_probs = torch.cumsum( + torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1 + ) + + # Remove tokens with cumulative probability above the threshold (token with 0 are kept) + sorted_indices_to_remove = cumulative_probs > top_p + if min_tokens_to_keep > 1: + # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) + sorted_indices_to_remove[..., :min_tokens_to_keep] = 0 + # Shift the indices to the right to keep also the first token above the threshold + sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() + sorted_indices_to_remove[..., 0] = 0 + + # scatter sorted tensors to original indexing + indices_to_remove = sorted_indices_to_remove.scatter( + 1, sorted_indices, sorted_indices_to_remove + ) + logits[indices_to_remove] = filter_value + return logits \ No newline at end of file diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py index 7b2e0e1f4..c9ecf9400 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py @@ -417,7 +417,7 @@ def compute_loss( labels=target_ids.to(device), ) else: - text_loss, acc, codec_loss, codec_acc = model.forward_with_speech_output( + text_loss, acc, codec_loss, codec_acc, codec_topk_acc = model.forward_with_speech_output( fbank=feature, input_ids=input_ids.to(device), attention_mask=attention_mask.to(device), @@ -442,6 +442,9 @@ def compute_loss( info["codec_acc"] = ( codec_acc * info["frames"] ) + info["codec_topk_acc"] = ( + codec_topk_acc * info["frames"] + ) info["codec_loss"] = codec_loss.detach().cpu().item() info["text_loss"] = text_loss.detach().cpu().item() return loss, info @@ -743,6 +746,7 @@ def run(rank, world_size, args): # torch_dtype=torch_dtype, # ) codec_vocab_size = 8192 + # TODO: modify above vocab size or supress_tokens when decoding config = Qwen2Config( vocab_size=codec_vocab_size, hidden_size=1024, From 2e9be4670310fee9aacbaad6b4f06d1e58ddc1d3 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 24 Apr 2025 08:24:11 +0000 Subject: [PATCH 15/57] debug --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 2 +- .../SPEECH2SPEECH/slam_omni/decode.py | 12 +++++++----- .../SPEECH2SPEECH/slam_omni/model.py | 19 +++++++++++-------- .../SPEECH2SPEECH/slam_omni/train.py | 2 +- 4 files changed, 20 insertions(+), 15 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index 7e145865e..a1c31a252 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -59,7 +59,7 @@ if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then --epoch 997 --avg 1 \ --manifest-dir data/fbank \ --use-flash-attn True \ - --method small_test_speech2speech \ + --method small_test_speech2speech_rerun \ --enable-speech-output True \ --use-lora True # --on-the-fly-feats True diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py index 2727b330b..f23cd5f5d 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py @@ -314,10 +314,13 @@ def decode_one_batch( feature, input_ids.to(device, dtype=torch.long), attention_mask.to(device) ) cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] - with open("test.txt", 'w') as f: - for cut_id in cut_ids: - # save_path = params.exp_dir / f"speech_output/{cut_id}.wav" - #torchaudio.save(save_path, speech_output.cpu(), 16000) + for cut_id in cut_ids: + speech_token_file_name = ( + params.log_dir / f"{cut_id}.txt" + ) + with open(speech_token_file_name, 'w') as f: + # save_path = params.exp_dir / f"speech_output/{cut_id}.wav" + #torchaudio.save(save_path, speech_output.cpu(), 16000) print(f"speech_output: {generated_speech_output}, cut_id: {cut_id}") save_str = " ".join([str(i) for i in generated_speech_output]) f.write(f"{cut_id}|{save_str}\n") @@ -328,7 +331,6 @@ def decode_one_batch( ) hyps = tokenizer.batch_decode(generated_ids, skip_special_tokens=False) print(f"hyps: {hyps}") - exit(0) return {"beam-search": hyps} diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py index 3f93db154..1c110470e 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py @@ -280,6 +280,8 @@ class SPEECH_LLM(nn.Module): for i in range(labels.shape[0]): text_label_start_index = torch.where(labels[i] != IGNORE_TOKEN_ID)[0][0] text_label_start_index_list.append(text_label_start_index) + # TODO1: check text_label_start_index position + print(i, input_ids[i], input_ids[i].shape, labels[i], labels[i].shape, text_label_start_index, labels[i][text_label_start_index]) model_outputs = self.llm( inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels, output_hidden_states=True @@ -316,7 +318,7 @@ class SPEECH_LLM(nn.Module): if self.codec_lm_padding_side == "right": # Fill audio_codes (right padding) - codes_end_idx = text_label_start_index + delay_step + 1 + speech_codec_len + codes_end_idx = codes_len audio_codes[i, :text_label_start_index + delay_step + 1] = self.codec_lm.config.bos_token_id # mask token_id audio_codes[i, text_label_start_index + delay_step + 1 : codes_end_idx] = speech_codec @@ -349,7 +351,7 @@ class SPEECH_LLM(nn.Module): # input_ids: seq_len T1, audio_codec seq_len T2 text_last_hidden_outputs = model_outputs.hidden_states[-1] - text_input_embeds = inputs_embeds + text_last_hidden_outputs + text_input_embeds = inputs_embeds + text_last_hidden_outputs # TODO: 计算不对,output tokens' embedding? text_input_embeds = self.speech_token_projector(text_input_embeds) T_merged = text_input_embeds.shape[1] @@ -362,6 +364,7 @@ class SPEECH_LLM(nn.Module): # Need to add to the shifted position for left padding # Calculate the length of the non-padded sequence for each item seq_lens = audio_attention_mask.sum(dim=1) # Shape (B) + print(seq_lens[0], audio_codes[0], "======================") for i in range(audio_embeddings.shape[0]): item_len = seq_lens[i].item() # Get the non-padded length for item i start_idx_content = T_audio - item_len # Start index of the content for item i @@ -560,18 +563,18 @@ class SPEECH_LLM(nn.Module): delay_step = 2 thinker_prompt_part_seq_len = thinker_prompt_part.shape[1] talker_input_ids = torch.full( - (batch_size, thinker_prompt_part_seq_len + delay_step), self.codec_lm.config.bos_token_id, dtype=torch.long, device=self.llm.device + (batch_size, thinker_prompt_part_seq_len + delay_step + 1), self.codec_lm.config.bos_token_id, dtype=torch.long, device=self.llm.device ) talker_inputs_embeds = self.codec_lm.get_input_embeddings()(talker_input_ids) # [B, S_full, D_codec] thinker_input_embeds = torch.cat( [ thinker_prompt_part, - thinker_reply_part[:, :delay_step, :], + thinker_reply_part[:, :delay_step + 1, :], ], dim=1, ) talker_inputs_embeds += thinker_input_embeds - thinker_reply_part = thinker_reply_part[:, delay_step:, :] # [B, S_full, D_codec] + thinker_reply_part = thinker_reply_part[:, delay_step + 1:, :] # [B, S_full, D_codec] past_key_values = None @@ -583,7 +586,7 @@ class SPEECH_LLM(nn.Module): for t in range(max_speech_new_tokens): # Get embedding for the *current* input token ID (initially BOS, then generated tokens) # current_speech_embeds = self.codec_lm.get_input_embeddings()(current_speech_input_ids) # [B, 1, D_codec] - if next_token_ids is not None: + if t > 0: talker_inputs_embeds = self.codec_lm.get_input_embeddings()(next_token_ids) # [B, 1, D_codec] if thinker_reply_part.shape[1] > 0: talker_inputs_embeds += thinker_reply_part[:, :1, :] @@ -607,11 +610,11 @@ class SPEECH_LLM(nn.Module): output_hidden_states=True, # No attention mask needed here when using past_key_values and single token input ) - last_token_hidden_state = codec_outputs.hidden_states[-1][:, -1, :] # [B, D_codec] + last_token_hidden_state = codec_outputs.hidden_states[-1][:, -1, :] # [B, D_codec] #TODO: check shape here # Get logits for the *last* token generated in this step next_token_logits = self.codec_lm_head(last_token_hidden_state) # Use -1 index # suppress tokens between 4096:len(vocab)-3 - next_token_logits[:, 4096:-3] = -float("Inf") + next_token_logits[:, 4096:-3] = -float("Inf") # TODO: where we should supress tokens? next_token_ids = topk_sampling( next_token_logits, ) diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py index c9ecf9400..add33b52a 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py @@ -355,7 +355,7 @@ def compute_loss( for i in range(mask_indices[0].size(0)): row = mask_indices[0][i] col = mask_indices[1][i] - # + 2 to skip: 'assistant', '\n' 151665, 151645, 198, 151644, 77091, 198 + # + 6 to skip: 'assistant', '\n' 151665, 151645, 198, 151644, 77091, 198 target_ids[row, : col + 6] = IGNORE_TOKEN_ID attention_mask = input_ids.ne(tokenizer.pad_token_id) From 3642dfd8c350fe31141f4862db0a1d85cca4fcd5 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 25 Apr 2025 05:36:18 +0000 Subject: [PATCH 16/57] refactor code --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 37 +-- .../SPEECH2SPEECH/slam_omni/decode.py | 39 ++-- .../SPEECH2SPEECH/slam_omni/model.py | 210 +++++++++--------- .../SPEECH2SPEECH/slam_omni/train.py | 39 ++-- 4 files changed, 171 insertions(+), 154 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index a1c31a252..e0a2fa507 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -51,9 +51,10 @@ fi if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then log "stage 3: " + exp_dir=./slam_omni/exp_speech2speech_rerun python3 ./slam_omni/decode.py \ --max-duration 1 \ - --exp-dir slam_omni/exp_speech2speech_test_flash_attn \ + --exp-dir $exp_dir \ --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ --epoch 997 --avg 1 \ @@ -87,21 +88,23 @@ fi if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then log "stage 5: " - ngpu=2 - exp_dir=./slam_omni/exp_speech2speech_test_flash_attn -torchrun --nproc_per_node $ngpu ./slam_omni/train.py \ - --max-duration 40 \ - --enable-musan False \ - --exp-dir $exp_dir \ - --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ - --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ - --manifest-dir data/fbank \ - --deepspeed \ - --deepspeed_config ./slam_omni/ds_config_zero1.json \ - --use-flash-attn True \ - --pretrained-model-path $exp_dir/epoch-1-checkpoint-35000.pt/pytorch_model.bin \ - --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True - # --pretrained-model-path slam_omni/exp_speech2text/epoch-1-checkpoint-5000.pt/pytorch_model.bin \ - # --sampler-state-dict-path $exp_dir/epoch-1-checkpoint-35000-sampler.pt \ + ngpu=8 + exp_dir=./slam_omni/exp_speech2speech_rerun + # exp_dir_new=./slam_omni/exp_s2s + torchrun --nproc_per_node $ngpu ./slam_omni/train.py \ + --max-duration 50 \ + --enable-musan False \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ + --manifest-dir data/fbank \ + --deepspeed \ + --deepspeed_config ./slam_omni/ds_config_zero1.json \ + --use-flash-attn True \ + --pretrained-model-path $exp_dir/epoch-1-checkpoint-15000.pt/pytorch_model.bin \ + --sampler-state-dict-path $exp_dir/epoch-1-checkpoint-15000-sampler.pt \ + --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True + # --pretrained-model-path slam_omni/exp_speech2text/epoch-1-checkpoint-5000.pt/pytorch_model.bin \ + # --sampler-state-dict-path $exp_dir/epoch-1-checkpoint-35000-sampler.pt \ fi \ No newline at end of file diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py index f23cd5f5d..54a8983df 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py @@ -579,7 +579,7 @@ def main(): # attn_implementation=attn_implementation, # torch_dtype=torch_dtype, # ) - codec_vocab_size = 8192 + codec_vocab_size = 4096 + 4 config = Qwen2Config( vocab_size=codec_vocab_size, hidden_size=1024, @@ -603,24 +603,25 @@ def main(): codec_lm.config.pad_token_id = codec_vocab_size - 1 codec_lm.config.eos_token_id = codec_vocab_size - 2 codec_lm.config.bos_token_id = codec_vocab_size - 3 - if params.use_lora: - lora_config = LoraConfig( - r=64, - lora_alpha=16, - target_modules=[ - "q_proj", - "k_proj", - "v_proj", - "o_proj", - "up_proj", - "gate_proj", - "down_proj", - ], - lora_dropout=0.05, - task_type="CAUSAL_LM", - ) - codec_lm = get_peft_model(codec_lm, lora_config) - codec_lm.print_trainable_parameters() + codec_lm.config.mask_token_id = codec_vocab_size - 4 + # if params.use_lora: + # lora_config = LoraConfig( + # r=64, + # lora_alpha=16, + # target_modules=[ + # "q_proj", + # "k_proj", + # "v_proj", + # "o_proj", + # "up_proj", + # "gate_proj", + # "down_proj", + # ], + # lora_dropout=0.05, + # task_type="CAUSAL_LM", + # ) + # codec_lm = get_peft_model(codec_lm, lora_config) + # codec_lm.print_trainable_parameters() else: codec_lm = None diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py index 1c110470e..c5f31226d 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py @@ -4,7 +4,7 @@ from transformers.trainer_pt_utils import LabelSmoother from typing import List, Tuple # Added for type hints from torchmetrics.classification import MulticlassAccuracy IGNORE_TOKEN_ID = LabelSmoother.ignore_index - +import logging class EncoderProjector(nn.Module): """ @@ -69,7 +69,7 @@ class SPEECH_LLM(nn.Module): self.codec_lm = codec_lm if self.codec_lm: self.speech_token_projector = nn.Linear( - self.llm.config.hidden_size, self.codec_lm.config.hidden_size + self.llm.config.hidden_size + self.llm.config.hidden_size, self.codec_lm.config.hidden_size ) self.codec_lm_head = nn.Linear( self.codec_lm.config.hidden_size, self.codec_lm.config.vocab_size @@ -274,110 +274,92 @@ class SPEECH_LLM(nn.Module): ) = self._merge_input_ids_with_speech_features( speech_features, inputs_embeds, input_ids, attention_mask, labels ) - - # get the label start_index in inputs_embeds from labels - text_label_start_index_list = [] + input_seq_len = attention_mask.sum(dim=1) # shape, B + text_label_start_index_list, text_input_start_index_list, input_question_len_list = [], [], [] for i in range(labels.shape[0]): - text_label_start_index = torch.where(labels[i] != IGNORE_TOKEN_ID)[0][0] - text_label_start_index_list.append(text_label_start_index) - # TODO1: check text_label_start_index position - print(i, input_ids[i], input_ids[i].shape, labels[i], labels[i].shape, text_label_start_index, labels[i][text_label_start_index]) + input_embeds_valid_index = torch.where(attention_mask[i] != 0)[0] + input_embeds_start_index = input_embeds_valid_index[0] + text_labels_valid_index = torch.where(labels[i] != IGNORE_TOKEN_ID)[0] + text_labels_start_index = text_labels_valid_index[0] + + assert input_seq_len[i] == input_embeds_valid_index[-1] - input_embeds_start_index + 1, f"input_seq_len: {input_seq_len[i]}, input_embeds_valid_index: {input_embeds_valid_index}, input_embeds_start_index: {input_embeds_start_index}" + assert input_embeds_valid_index[-1] == text_labels_valid_index[-1], f"input_embeds_valid_index: {input_embeds_valid_index}, text_labels_valid_index: {text_labels_valid_index}" + input_question_len = text_labels_start_index - input_embeds_start_index + assert input_question_len + text_labels_valid_index[-1] - text_labels_start_index + 1 == input_seq_len[i] + text_label_start_index_list.append(text_labels_start_index) + text_input_start_index_list.append(input_embeds_start_index) + input_question_len_list.append(input_question_len) model_outputs = self.llm( inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels, output_hidden_states=True ) text_loss = model_outputs.loss - + delay_step = 1 # prepare codec lm inputs - audio_codes_lens = torch.tensor( - [len(x) for x in speech_codec_ids], dtype=torch.int64, device=input_ids.device - ) - # print(audio_codes_lens, "audio_codes_lens") + audio_codes_lens = [len(x) + input_question_len_list[i] + delay_step + 1 for i, x in enumerate(speech_codec_ids)] max_len_speech_codec = max(audio_codes_lens) - delay_step = 2 - audio_codes = torch.full( - (inputs_embeds.shape[0], max_len_speech_codec + inputs_embeds.shape[1] + 1), - self.codec_lm.config.pad_token_id, + + if self.codec_lm_padding_side == "right": + audio_codes = [ + [self.codec_lm.config.mask_token_id] * (input_question_len_list[i] + delay_step) + [self.codec_lm.config.bos_token_id] + x + [self.codec_lm.config.pad_token_id] * (max_len_speech_codec - audio_codes_lens[i]) + for i, x in enumerate(speech_codec_ids) + ] + audio_labels = [ + [self.codec_lm.config.pad_token_id] * (input_question_len_list[i] + delay_step) + x + [self.codec_lm.config.eos_token_id] + [self.codec_lm.config.pad_token_id] * (max_len_speech_codec - audio_codes_lens[i]) + for i, x in enumerate(speech_codec_ids) + ] + elif self.codec_lm_padding_side == "left": + audio_codes = [ + [self.codec_lm.config.pad_token_id] * (max_len_speech_codec - audio_codes_lens[i]) + [self.codec_lm.config.mask_token_id] * (input_question_len_list[i] + delay_step) + [self.codec_lm.config.bos_token_id] + x + for i, x in enumerate(speech_codec_ids) + ] + audio_labels = [ + [self.codec_lm.config.pad_token_id] * (max_len_speech_codec - audio_codes_lens[i]) + [self.codec_lm.config.pad_token_id] * (input_question_len_list[i] + delay_step) + x + [self.codec_lm.config.eos_token_id] + for i, x in enumerate(speech_codec_ids) + ] + audio_codes = torch.tensor( + audio_codes, + dtype=torch.int64, + device=input_ids.device + ) + audio_labels = torch.tensor( + audio_labels, dtype=torch.int64, device=input_ids.device ) - audio_labels = audio_codes.clone() - total_len = audio_codes.shape[1] - for i, speech_codec in enumerate(speech_codec_ids): - text_label_start_index = text_label_start_index_list[i] - speech_codec = torch.tensor( - speech_codec, dtype=torch.int64, device=input_ids.device - ) - speech_codec_len = len(speech_codec) - - # Calculate lengths of non-padding content - codes_len = text_label_start_index + delay_step + 1 + speech_codec_len - # Actual label content length (speech codec tokens + eos token) - labels_actual_content_len = speech_codec_len + 1 - - if self.codec_lm_padding_side == "right": - # Fill audio_codes (right padding) - codes_end_idx = codes_len - audio_codes[i, :text_label_start_index + delay_step + 1] = self.codec_lm.config.bos_token_id # mask token_id - audio_codes[i, text_label_start_index + delay_step + 1 : codes_end_idx] = speech_codec - - # Fill audio_labels (right padding) - labels_start_idx = text_label_start_index + delay_step - labels_speech_end_idx = labels_start_idx + speech_codec_len - audio_labels[i, labels_start_idx : labels_speech_end_idx] = speech_codec - audio_labels[i, labels_speech_end_idx] = self.codec_lm.config.eos_token_id - - elif self.codec_lm_padding_side == "left": - # Calculate start indices for left padding (shifting content to the right) - codes_start_idx = total_len - codes_len - labels_start_idx = total_len - labels_actual_content_len # Start index for the actual label content - - # Fill audio_codes (left padding) - codes_speech_start_idx = codes_start_idx + text_label_start_index + delay_step + 1 - audio_codes[i, codes_start_idx : codes_speech_start_idx] = self.codec_lm.config.bos_token_id # mask token_id - audio_codes[i, codes_speech_start_idx : total_len] = speech_codec - - # Fill audio_labels (left padding) - labels_speech_end_idx = labels_start_idx + speech_codec_len - # Note: The beginning part remains pad_token_id - audio_labels[i, labels_start_idx : labels_speech_end_idx] = speech_codec - audio_labels[i, labels_speech_end_idx] = self.codec_lm.config.eos_token_id - else: - raise ValueError(f"Unsupported padding side: {self.codec_lm_padding_side}") - - audio_attention_mask = audio_codes.ne(self.codec_lm.config.pad_token_id) # TODO: do we need to change bos tokens to pad token or mask token? + audio_attention_mask = audio_codes.ne(self.codec_lm.config.pad_token_id) audio_embeddings = self.codec_lm.get_input_embeddings()(audio_codes) - # input_ids: seq_len T1, audio_codec seq_len T2 - text_last_hidden_outputs = model_outputs.hidden_states[-1] - text_input_embeds = inputs_embeds + text_last_hidden_outputs # TODO: 计算不对,output tokens' embedding? - text_input_embeds = self.speech_token_projector(text_input_embeds) + text_last_hidden_lists, text_embeds_list, text_input_embeds_list = [], [], [] + for i in range(len(text_label_start_index_list)): + text_last_hidden = model_outputs.hidden_states[-1][i, text_input_start_index_list[i]:text_input_start_index_list[i] + input_seq_len[i] - 1] + text_last_hidden_lists.append(text_last_hidden) + text_embed = inputs_embeds[i, text_input_start_index_list[i] + 1:text_input_start_index_list[i] + input_seq_len[i]] # exclude bos + text_embeds_list.append(text_embed) - T_merged = text_input_embeds.shape[1] - T_audio = audio_embeddings.shape[1] - - if self.codec_lm_padding_side == "right": - # Add to the beginning for right padding - audio_embeddings[:, :T_merged] += text_input_embeds - elif self.codec_lm_padding_side == "left": - # Need to add to the shifted position for left padding - # Calculate the length of the non-padded sequence for each item - seq_lens = audio_attention_mask.sum(dim=1) # Shape (B) - print(seq_lens[0], audio_codes[0], "======================") - for i in range(audio_embeddings.shape[0]): - item_len = seq_lens[i].item() # Get the non-padded length for item i - start_idx_content = T_audio - item_len # Start index of the content for item i - end_idx_target = start_idx_content + T_merged # End index of the target slice within the content - # Add the text_input_embeds to the calculated slice - if end_idx_target > T_audio: - # If the text input is longer than the audio input, we need to pad the audio input - cut_off_len = T_audio - start_idx_content - audio_embeddings[i, start_idx_content:end_idx_target] = text_input_embeds[i, :cut_off_len] - else: - audio_embeddings[i, start_idx_content:end_idx_target] += text_input_embeds[i] - else: - raise ValueError(f"Unsupported padding side: {self.codec_lm_padding_side}") + text_input_embeds = torch.cat( + [ + text_last_hidden, + text_embed, + ], + dim=-1, + )# shape, T, D1 + D2 + text_input_embeds = self.speech_token_projector(text_input_embeds) # shape, T, D_codec + text_input_embeds_list.append(text_input_embeds) + + for i in range(audio_embeddings.shape[0]): + text_input_embeds = text_input_embeds_list[i] + if self.codec_lm_padding_side == "right": + audio_embeddings[i, :text_input_embeds.shape[0]] += text_input_embeds + elif self.codec_lm_padding_side == "left": + start_idx = torch.where(audio_codes[i] == self.codec_lm.config.mask_token_id)[0][0] + start_idx_re_compute = torch.where(audio_attention_mask[i] != 0)[0][0] + assert start_idx == start_idx_re_compute, f"start_idx: {start_idx}, start_idx_re_compute: {start_idx_re_compute}" + if text_input_embeds.shape[0] > audio_embeddings.shape[1] - start_idx: + text_input_embeds = text_input_embeds[:audio_embeddings.shape[1] - start_idx] + logging.warning(f"Truncate text_input_embeds: {text_input_embeds.shape} to {audio_embeddings.shape[1] - start_idx}") + audio_embeddings[i, start_idx:start_idx + text_input_embeds.shape[0]] += text_input_embeds speech_outputs = self.codec_lm( attention_mask=audio_attention_mask, @@ -545,26 +527,56 @@ class SPEECH_LLM(nn.Module): output_hidden_states=True, **final_llm_kwargs ) - + delay_step = 1 generated_text_ids = text_outputs.sequences # [B, S_full] - thinker_token_embeds = [ + eos_token_id = self.llm.config.eos_token_id + eos_token_embedding = self.llm.get_input_embeddings()(torch.tensor([[eos_token_id]], device=device)) # 1,D + assert generated_text_ids[0, -1] == eos_token_id, f"Last token is not EOS: {generated_text_ids[0, -1]} != {eos_token_id}" + thinker_token_embeds_org = [ token_hidden_states[0].to(self.llm.device) for token_hidden_states in text_outputs.hidden_states ] + # shift one for thinker token_embeds, drop the first embeds, and add the eos token + first_thinker_token_embed = torch.cat( + [ + thinker_token_embeds_org[0][:, 1:], + thinker_token_embeds_org[1], + ], + dim=1, + ) + + thinker_token_embeds = [first_thinker_token_embed] + thinker_token_embeds_org[2:] + [eos_token_embedding] thinker_hidden_states = [ token_hidden_states[-1].to(self.llm.device) for token_hidden_states in text_outputs.hidden_states ] - thinker_reply_part = torch.cat(thinker_hidden_states[1:], dim=1) + torch.cat(thinker_token_embeds[1:], dim=1) - thinker_prompt_part = thinker_hidden_states[0] + thinker_token_embeds[0] + # thinker_reply_part = torch.cat(thinker_hidden_states[1:], dim=1) + torch.cat(thinker_token_embeds[1:], dim=1) + thinker_reply_part = [torch.cat( + [ + thinker_hidden_state, + thinker_token_embed, + ], + dim=-1, + ) + for thinker_hidden_state, thinker_token_embed in zip(thinker_hidden_states[1:], thinker_token_embeds[1:]) + ] + thinker_reply_part = torch.cat(thinker_reply_part, dim=1) + # thinker_prompt_part = thinker_hidden_states[0] + thinker_token_embeds[0] + thinker_prompt_part = torch.cat( + [ + thinker_hidden_states[0], + thinker_token_embeds[0], + ], + dim=-1, + ) thinker_prompt_part = self.speech_token_projector(thinker_prompt_part) # [B, S_full, D_codec] thinker_reply_part = self.speech_token_projector(thinker_reply_part) # [B, S_full, D_codec] - - delay_step = 2 + thinker_prompt_part_seq_len = thinker_prompt_part.shape[1] talker_input_ids = torch.full( - (batch_size, thinker_prompt_part_seq_len + delay_step + 1), self.codec_lm.config.bos_token_id, dtype=torch.long, device=self.llm.device + (batch_size, thinker_prompt_part_seq_len + delay_step + 1), self.codec_lm.config.mask_token_id, dtype=torch.long, device=self.llm.device ) + talker_input_ids[:,-1] = self.codec_lm.config.bos_token_id talker_inputs_embeds = self.codec_lm.get_input_embeddings()(talker_input_ids) # [B, S_full, D_codec] thinker_input_embeds = torch.cat( [ @@ -614,7 +626,7 @@ class SPEECH_LLM(nn.Module): # Get logits for the *last* token generated in this step next_token_logits = self.codec_lm_head(last_token_hidden_state) # Use -1 index # suppress tokens between 4096:len(vocab)-3 - next_token_logits[:, 4096:-3] = -float("Inf") # TODO: where we should supress tokens? + # next_token_logits[:, 4096:-3] = -float("Inf") # TODO: where we should supress tokens? next_token_ids = topk_sampling( next_token_logits, ) diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py index add33b52a..f5356dc43 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py @@ -745,7 +745,7 @@ def run(rank, world_size, args): # attn_implementation=attn_implementation, # torch_dtype=torch_dtype, # ) - codec_vocab_size = 8192 + codec_vocab_size = 4096 + 4 # TODO: modify above vocab size or supress_tokens when decoding config = Qwen2Config( vocab_size=codec_vocab_size, @@ -770,24 +770,25 @@ def run(rank, world_size, args): codec_lm.config.pad_token_id = codec_vocab_size - 1 codec_lm.config.eos_token_id = codec_vocab_size - 2 codec_lm.config.bos_token_id = codec_vocab_size - 3 - if params.use_lora: - lora_config = LoraConfig( - r=64, - lora_alpha=16, - target_modules=[ - "q_proj", - "k_proj", - "v_proj", - "o_proj", - "up_proj", - "gate_proj", - "down_proj", - ], - lora_dropout=0.05, - task_type="CAUSAL_LM", - ) - codec_lm = get_peft_model(codec_lm, lora_config) - codec_lm.print_trainable_parameters() + codec_lm.config.mask_token_id = codec_vocab_size - 4 + # if params.use_lora: + # lora_config = LoraConfig( + # r=64, + # lora_alpha=16, + # target_modules=[ + # "q_proj", + # "k_proj", + # "v_proj", + # "o_proj", + # "up_proj", + # "gate_proj", + # "down_proj", + # ], + # lora_dropout=0.05, + # task_type="CAUSAL_LM", + # ) + # codec_lm = get_peft_model(codec_lm, lora_config) + # codec_lm.print_trainable_parameters() else: codec_lm = None From 6955639d22c9c8569f3d2104e5e5e76503f9bc9a Mon Sep 17 00:00:00 2001 From: Yuekai Zhang Date: Fri, 25 Apr 2025 14:08:25 +0800 Subject: [PATCH 17/57] add qwen omni web demo --- egs/speech_llm/SPEECH2SPEECH/web_demo.py | 413 +++++++++++++++++++++++ 1 file changed, 413 insertions(+) create mode 100644 egs/speech_llm/SPEECH2SPEECH/web_demo.py diff --git a/egs/speech_llm/SPEECH2SPEECH/web_demo.py b/egs/speech_llm/SPEECH2SPEECH/web_demo.py new file mode 100644 index 000000000..ba1aca157 --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/web_demo.py @@ -0,0 +1,413 @@ +# Modified from https://github.com/QwenLM/Qwen2.5-Omni/blob/main/web_demo.py +import io +import os +import ffmpeg + +import numpy as np +import gradio as gr +import soundfile as sf + +#import modelscope_studio.components.base as ms +#import modelscope_studio.components.antd as antd +import gradio.processing_utils as processing_utils + +#from transformers import Qwen2_5OmniForConditionalGeneration, Qwen2_5OmniProcessor +from gradio_client import utils as client_utils +#from qwen_omni_utils import process_mm_info +from argparse import ArgumentParser + +def _load_model_processor(args): + if args.cpu_only: + device_map = 'cpu' + else: + device_map = 'auto' + + # Check if flash-attn2 flag is enabled and load model accordingly + if args.flash_attn2: + # model = Qwen2_5OmniForConditionalGeneration.from_pretrained(args.checkpoint_path, + # torch_dtype='auto', + # attn_implementation='flash_attention_2', + # device_map=device_map) + # else: + # model = Qwen2_5OmniForConditionalGeneration.from_pretrained(args.checkpoint_path, device_map=device_map, torch_dtype='auto') + + # processor = Qwen2_5OmniProcessor.from_pretrained(args.checkpoint_path) + return model, processor + +def _launch_demo(args, model, processor): + # Voice settings + VOICE_LIST = ['Chelsie', 'Ethan'] + DEFAULT_VOICE = 'Chelsie' + + default_system_prompt = 'You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.' + + language = args.ui_language + + # def get_text(text: str, cn_text: str): + # if language == 'en': + # return text + # if language == 'zh': + # return cn_text + # return text + + # def convert_webm_to_mp4(input_file, output_file): + # try: + # ( + # ffmpeg + # .input(input_file) + # .output(output_file, acodec='aac', ar='16000', audio_bitrate='192k') + # .run(quiet=True, overwrite_output=True) + # ) + # print(f"Conversion successful: {output_file}") + # except ffmpeg.Error as e: + # print("An error occurred during conversion.") + # print(e.stderr.decode('utf-8')) + + def format_history(history: list, system_prompt: str): + messages = [] + # messages.append({"role": "system", "content": [{"type": "text", "text": system_prompt}]}) + for item in history: + if isinstance(item["content"], str): + messages.append({"role": item['role'], "content": item['content']}) + elif item["role"] == "user" and (isinstance(item["content"], list) or + isinstance(item["content"], tuple)): + file_path = item["content"][0] + + mime_type = client_utils.get_mimetype(file_path) + if mime_type.startswith("image"): + messages.append({ + "role": + item['role'], + "content": [{ + "type": "image", + "image": file_path + }] + }) + elif mime_type.startswith("video"): + messages.append({ + "role": + item['role'], + "content": [{ + "type": "video", + "video": file_path + }] + }) + elif mime_type.startswith("audio"): + messages.append({ + "role": + item['role'], + "content": [{ + "type": "audio", + "audio": file_path, + }] + }) + return messages + + def predict(messages, voice=DEFAULT_VOICE): + print('predict history: ', messages) + + text = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) + + audios, images, videos = process_mm_info(messages, use_audio_in_video=True) + + inputs = processor(text=text, audio=audios, images=images, videos=videos, return_tensors="pt", padding=True, use_audio_in_video=True) + inputs = inputs.to(model.device).to(model.dtype) + + text_ids, audio = model.generate(**inputs, speaker=voice, use_audio_in_video=True) + + response = processor.batch_decode(text_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False) + response = response[0].split("\n")[-1] + yield {"type": "text", "data": response} + + audio = np.array(audio * 32767).astype(np.int16) + wav_io = io.BytesIO() + sf.write(wav_io, audio, samplerate=24000, format="WAV") + wav_io.seek(0) + wav_bytes = wav_io.getvalue() + audio_path = processing_utils.save_bytes_to_cache( + wav_bytes, "audio.wav", cache_dir=demo.GRADIO_CACHE) + yield {"type": "audio", "data": audio_path} + + def media_predict(audio, video, history, system_prompt, voice_choice): + # First yield + yield ( + None, # microphone + None, # webcam + history, # media_chatbot + gr.update(visible=False), # submit_btn + gr.update(visible=True), # stop_btn + ) + + if video is not None: + convert_webm_to_mp4(video, video.replace('.webm', '.mp4')) + video = video.replace(".webm", ".mp4") + files = [audio, video] + + for f in files: + if f: + history.append({"role": "user", "content": (f, )}) + + formatted_history = format_history(history=history, + system_prompt=system_prompt,) + + + history.append({"role": "assistant", "content": ""}) + + for chunk in predict(formatted_history, voice_choice): + if chunk["type"] == "text": + history[-1]["content"] = chunk["data"] + yield ( + None, # microphone + None, # webcam + history, # media_chatbot + gr.update(visible=False), # submit_btn + gr.update(visible=True), # stop_btn + ) + if chunk["type"] == "audio": + history.append({ + "role": "assistant", + "content": gr.Audio(chunk["data"]) + }) + + # Final yield + yield ( + None, # microphone + None, # webcam + history, # media_chatbot + gr.update(visible=True), # submit_btn + gr.update(visible=False), # stop_btn + ) + + def chat_predict(text, audio, image, video, history, system_prompt, voice_choice): + # Process text input + if text: + history.append({"role": "user", "content": text}) + + # Process audio input + if audio: + history.append({"role": "user", "content": (audio, )}) + + # Process image input + if image: + history.append({"role": "user", "content": (image, )}) + + # Process video input + if video: + history.append({"role": "user", "content": (video, )}) + + formatted_history = format_history(history=history, + system_prompt=system_prompt) + + yield None, None, None, None, history + + history.append({"role": "assistant", "content": ""}) + for chunk in predict(formatted_history, voice_choice): + if chunk["type"] == "text": + history[-1]["content"] = chunk["data"] + yield gr.skip(), gr.skip(), gr.skip(), gr.skip( + ), history + if chunk["type"] == "audio": + history.append({ + "role": "assistant", + "content": gr.Audio(chunk["data"]) + }) + yield gr.skip(), gr.skip(), gr.skip(), gr.skip(), history + + with gr.Blocks() as demo, ms.Application(), antd.ConfigProvider(): + with gr.Sidebar(open=False): + system_prompt_textbox = gr.Textbox(label="System Prompt", + value=default_system_prompt) + with antd.Flex(gap="small", justify="center", align="center"): + with antd.Flex(vertical=True, gap="small", align="center"): + antd.Typography.Title("Qwen2.5-Omni Demo", + level=1, + elem_style=dict(margin=0, fontSize=28)) + with antd.Flex(vertical=True, gap="small"): + antd.Typography.Text(get_text("🎯 Instructions for use:", + "🎯 使用说明:"), + strong=True) + antd.Typography.Text( + get_text( + "1️⃣ Click the Audio Record button or the Camera Record button.", + "1️⃣ 点击音频录制按钮,或摄像头-录制按钮")) + antd.Typography.Text( + get_text("2️⃣ Input audio or video.", "2️⃣ 输入音频或者视频")) + antd.Typography.Text( + get_text( + "3️⃣ Click the submit button and wait for the model's response.", + "3️⃣ 点击提交并等待模型的回答")) + voice_choice = gr.Dropdown(label="Voice Choice", + choices=VOICE_LIST, + value=DEFAULT_VOICE) + with gr.Tabs(): + with gr.Tab("Online"): + with gr.Row(): + with gr.Column(scale=1): + microphone = gr.Audio(sources=['microphone'], + type="filepath") + webcam = gr.Video(sources=['webcam'], + height=400, + include_audio=True) + submit_btn = gr.Button(get_text("Submit", "提交"), + variant="primary") + stop_btn = gr.Button(get_text("Stop", "停止"), visible=False) + clear_btn = gr.Button(get_text("Clear History", "清除历史")) + with gr.Column(scale=2): + media_chatbot = gr.Chatbot(height=650, type="messages") + + def clear_history(): + return [], gr.update(value=None), gr.update(value=None) + + submit_event = submit_btn.click(fn=media_predict, + inputs=[ + microphone, webcam, + media_chatbot, + system_prompt_textbox, + voice_choice + ], + outputs=[ + microphone, webcam, + media_chatbot, submit_btn, + stop_btn + ]) + stop_btn.click( + fn=lambda: + (gr.update(visible=True), gr.update(visible=False)), + inputs=None, + outputs=[submit_btn, stop_btn], + cancels=[submit_event], + queue=False) + clear_btn.click(fn=clear_history, + inputs=None, + outputs=[media_chatbot, microphone, webcam]) + + with gr.Tab("Offline"): + chatbot = gr.Chatbot(type="messages", height=650) + + # Media upload section in one row + with gr.Row(equal_height=True): + audio_input = gr.Audio(sources=["upload"], + type="filepath", + label="Upload Audio", + elem_classes="media-upload", + scale=1) + image_input = gr.Image(sources=["upload"], + type="filepath", + label="Upload Image", + elem_classes="media-upload", + scale=1) + video_input = gr.Video(sources=["upload"], + label="Upload Video", + elem_classes="media-upload", + scale=1) + + # Text input section + text_input = gr.Textbox(show_label=False, + placeholder="Enter text here...") + + # Control buttons + with gr.Row(): + submit_btn = gr.Button(get_text("Submit", "提交"), + variant="primary", + size="lg") + stop_btn = gr.Button(get_text("Stop", "停止"), + visible=False, + size="lg") + clear_btn = gr.Button(get_text("Clear History", "清除历史"), + size="lg") + + def clear_chat_history(): + return [], gr.update(value=None), gr.update( + value=None), gr.update(value=None), gr.update(value=None) + + submit_event = gr.on( + triggers=[submit_btn.click, text_input.submit], + fn=chat_predict, + inputs=[ + text_input, audio_input, image_input, video_input, chatbot, + system_prompt_textbox, voice_choice + ], + outputs=[ + text_input, audio_input, image_input, video_input, chatbot + ]) + + stop_btn.click(fn=lambda: + (gr.update(visible=True), gr.update(visible=False)), + inputs=None, + outputs=[submit_btn, stop_btn], + cancels=[submit_event], + queue=False) + + clear_btn.click(fn=clear_chat_history, + inputs=None, + outputs=[ + chatbot, text_input, audio_input, image_input, + video_input + ]) + + # Add some custom CSS to improve the layout + gr.HTML(""" + + """) + + demo.queue(default_concurrency_limit=100, max_size=100).launch(max_threads=100, + ssr_mode=False, + share=args.share, + inbrowser=args.inbrowser, + server_port=args.server_port, + server_name=args.server_name,) + + +DEFAULT_CKPT_PATH = "Qwen/Qwen2.5-Omni-7B" +def _get_args(): + parser = ArgumentParser() + + parser.add_argument('-c', + '--checkpoint-path', + type=str, + default=DEFAULT_CKPT_PATH, + help='Checkpoint name or path, default to %(default)r') + parser.add_argument('--cpu-only', action='store_true', help='Run demo with CPU only') + + parser.add_argument('--flash-attn2', + action='store_true', + default=False, + help='Enable flash_attention_2 when loading the model.') + parser.add_argument('--share', + action='store_true', + default=False, + help='Create a publicly shareable link for the interface.') + parser.add_argument('--inbrowser', + action='store_true', + default=False, + help='Automatically launch the interface in a new tab on the default browser.') + parser.add_argument('--server-port', type=int, default=7860, help='Demo server port.') + parser.add_argument('--server-name', type=str, default='127.0.0.1', help='Demo server name.') + parser.add_argument('--ui-language', type=str, choices=['en', 'zh'], default='en', help='Display language for the UI.') + + args = parser.parse_args() + return args + +if __name__ == "__main__": + args = _get_args() + model, processor = _load_model_processor(args) + _launch_demo(args, model, processor) \ No newline at end of file From 6ea7ec8543c5f799a753d17549f08177580785f5 Mon Sep 17 00:00:00 2001 From: Yuekai Zhang Date: Fri, 25 Apr 2025 14:10:30 +0800 Subject: [PATCH 18/57] remove offline tab --- egs/speech_llm/SPEECH2SPEECH/web_demo.py | 123 ----------------------- 1 file changed, 123 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/web_demo.py b/egs/speech_llm/SPEECH2SPEECH/web_demo.py index ba1aca157..6e2cfb18f 100644 --- a/egs/speech_llm/SPEECH2SPEECH/web_demo.py +++ b/egs/speech_llm/SPEECH2SPEECH/web_demo.py @@ -178,41 +178,6 @@ def _launch_demo(args, model, processor): gr.update(visible=False), # stop_btn ) - def chat_predict(text, audio, image, video, history, system_prompt, voice_choice): - # Process text input - if text: - history.append({"role": "user", "content": text}) - - # Process audio input - if audio: - history.append({"role": "user", "content": (audio, )}) - - # Process image input - if image: - history.append({"role": "user", "content": (image, )}) - - # Process video input - if video: - history.append({"role": "user", "content": (video, )}) - - formatted_history = format_history(history=history, - system_prompt=system_prompt) - - yield None, None, None, None, history - - history.append({"role": "assistant", "content": ""}) - for chunk in predict(formatted_history, voice_choice): - if chunk["type"] == "text": - history[-1]["content"] = chunk["data"] - yield gr.skip(), gr.skip(), gr.skip(), gr.skip( - ), history - if chunk["type"] == "audio": - history.append({ - "role": "assistant", - "content": gr.Audio(chunk["data"]) - }) - yield gr.skip(), gr.skip(), gr.skip(), gr.skip(), history - with gr.Blocks() as demo, ms.Application(), antd.ConfigProvider(): with gr.Sidebar(open=False): system_prompt_textbox = gr.Textbox(label="System Prompt", @@ -281,94 +246,6 @@ def _launch_demo(args, model, processor): inputs=None, outputs=[media_chatbot, microphone, webcam]) - with gr.Tab("Offline"): - chatbot = gr.Chatbot(type="messages", height=650) - - # Media upload section in one row - with gr.Row(equal_height=True): - audio_input = gr.Audio(sources=["upload"], - type="filepath", - label="Upload Audio", - elem_classes="media-upload", - scale=1) - image_input = gr.Image(sources=["upload"], - type="filepath", - label="Upload Image", - elem_classes="media-upload", - scale=1) - video_input = gr.Video(sources=["upload"], - label="Upload Video", - elem_classes="media-upload", - scale=1) - - # Text input section - text_input = gr.Textbox(show_label=False, - placeholder="Enter text here...") - - # Control buttons - with gr.Row(): - submit_btn = gr.Button(get_text("Submit", "提交"), - variant="primary", - size="lg") - stop_btn = gr.Button(get_text("Stop", "停止"), - visible=False, - size="lg") - clear_btn = gr.Button(get_text("Clear History", "清除历史"), - size="lg") - - def clear_chat_history(): - return [], gr.update(value=None), gr.update( - value=None), gr.update(value=None), gr.update(value=None) - - submit_event = gr.on( - triggers=[submit_btn.click, text_input.submit], - fn=chat_predict, - inputs=[ - text_input, audio_input, image_input, video_input, chatbot, - system_prompt_textbox, voice_choice - ], - outputs=[ - text_input, audio_input, image_input, video_input, chatbot - ]) - - stop_btn.click(fn=lambda: - (gr.update(visible=True), gr.update(visible=False)), - inputs=None, - outputs=[submit_btn, stop_btn], - cancels=[submit_event], - queue=False) - - clear_btn.click(fn=clear_chat_history, - inputs=None, - outputs=[ - chatbot, text_input, audio_input, image_input, - video_input - ]) - - # Add some custom CSS to improve the layout - gr.HTML(""" - - """) - demo.queue(default_concurrency_limit=100, max_size=100).launch(max_threads=100, ssr_mode=False, share=args.share, From 9a07363a8dff6bc3b2658dbda3d7e113cba8fff0 Mon Sep 17 00:00:00 2001 From: Yuekai Zhang Date: Fri, 25 Apr 2025 14:21:50 +0800 Subject: [PATCH 19/57] remove unsed --- egs/speech_llm/SPEECH2SPEECH/web_demo.py | 192 ++++++----------------- 1 file changed, 48 insertions(+), 144 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/web_demo.py b/egs/speech_llm/SPEECH2SPEECH/web_demo.py index 6e2cfb18f..f856bf26f 100644 --- a/egs/speech_llm/SPEECH2SPEECH/web_demo.py +++ b/egs/speech_llm/SPEECH2SPEECH/web_demo.py @@ -1,26 +1,18 @@ # Modified from https://github.com/QwenLM/Qwen2.5-Omni/blob/main/web_demo.py import io -import os -import ffmpeg import numpy as np import gradio as gr import soundfile as sf -#import modelscope_studio.components.base as ms -#import modelscope_studio.components.antd as antd import gradio.processing_utils as processing_utils -#from transformers import Qwen2_5OmniForConditionalGeneration, Qwen2_5OmniProcessor +from transformers import AutoModelForCausalLM from gradio_client import utils as client_utils -#from qwen_omni_utils import process_mm_info + from argparse import ArgumentParser def _load_model_processor(args): - if args.cpu_only: - device_map = 'cpu' - else: - device_map = 'auto' # Check if flash-attn2 flag is enabled and load model accordingly if args.flash_attn2: @@ -35,37 +27,9 @@ def _load_model_processor(args): return model, processor def _launch_demo(args, model, processor): - # Voice settings - VOICE_LIST = ['Chelsie', 'Ethan'] - DEFAULT_VOICE = 'Chelsie' - default_system_prompt = 'You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.' - - language = args.ui_language - - # def get_text(text: str, cn_text: str): - # if language == 'en': - # return text - # if language == 'zh': - # return cn_text - # return text - - # def convert_webm_to_mp4(input_file, output_file): - # try: - # ( - # ffmpeg - # .input(input_file) - # .output(output_file, acodec='aac', ar='16000', audio_bitrate='192k') - # .run(quiet=True, overwrite_output=True) - # ) - # print(f"Conversion successful: {output_file}") - # except ffmpeg.Error as e: - # print("An error occurred during conversion.") - # print(e.stderr.decode('utf-8')) - - def format_history(history: list, system_prompt: str): + def format_history(history: list): messages = [] - # messages.append({"role": "system", "content": [{"type": "text", "text": system_prompt}]}) for item in history: if isinstance(item["content"], str): messages.append({"role": item['role'], "content": item['content']}) @@ -74,25 +38,7 @@ def _launch_demo(args, model, processor): file_path = item["content"][0] mime_type = client_utils.get_mimetype(file_path) - if mime_type.startswith("image"): - messages.append({ - "role": - item['role'], - "content": [{ - "type": "image", - "image": file_path - }] - }) - elif mime_type.startswith("video"): - messages.append({ - "role": - item['role'], - "content": [{ - "type": "video", - "video": file_path - }] - }) - elif mime_type.startswith("audio"): + if mime_type.startswith("audio"): messages.append({ "role": item['role'], @@ -103,17 +49,17 @@ def _launch_demo(args, model, processor): }) return messages - def predict(messages, voice=DEFAULT_VOICE): + def predict(messages): print('predict history: ', messages) text = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) - audios, images, videos = process_mm_info(messages, use_audio_in_video=True) + audios = [msg['content'][0]['audio'] for msg in messages if msg['role'] == 'user' and isinstance(msg['content'], list) and msg['content'][0]['type'] == 'audio'] - inputs = processor(text=text, audio=audios, images=images, videos=videos, return_tensors="pt", padding=True, use_audio_in_video=True) + inputs = processor(text=text, audio=audios, return_tensors="pt", padding=True) inputs = inputs.to(model.device).to(model.dtype) - text_ids, audio = model.generate(**inputs, speaker=voice, use_audio_in_video=True) + text_ids, audio = model.generate(**inputs) response = processor.batch_decode(text_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False) response = response[0].split("\n")[-1] @@ -128,37 +74,31 @@ def _launch_demo(args, model, processor): wav_bytes, "audio.wav", cache_dir=demo.GRADIO_CACHE) yield {"type": "audio", "data": audio_path} - def media_predict(audio, video, history, system_prompt, voice_choice): + def media_predict(audio, history): # First yield yield ( None, # microphone - None, # webcam history, # media_chatbot gr.update(visible=False), # submit_btn gr.update(visible=True), # stop_btn ) - if video is not None: - convert_webm_to_mp4(video, video.replace('.webm', '.mp4')) - video = video.replace(".webm", ".mp4") - files = [audio, video] + files = [audio] for f in files: if f: history.append({"role": "user", "content": (f, )}) - formatted_history = format_history(history=history, - system_prompt=system_prompt,) + formatted_history = format_history(history=history) history.append({"role": "assistant", "content": ""}) - for chunk in predict(formatted_history, voice_choice): + for chunk in predict(formatted_history): if chunk["type"] == "text": history[-1]["content"] = chunk["data"] yield ( None, # microphone - None, # webcam history, # media_chatbot gr.update(visible=False), # submit_btn gr.update(visible=True), # stop_btn @@ -172,79 +112,47 @@ def _launch_demo(args, model, processor): # Final yield yield ( None, # microphone - None, # webcam history, # media_chatbot gr.update(visible=True), # submit_btn gr.update(visible=False), # stop_btn ) - with gr.Blocks() as demo, ms.Application(), antd.ConfigProvider(): - with gr.Sidebar(open=False): - system_prompt_textbox = gr.Textbox(label="System Prompt", - value=default_system_prompt) - with antd.Flex(gap="small", justify="center", align="center"): - with antd.Flex(vertical=True, gap="small", align="center"): - antd.Typography.Title("Qwen2.5-Omni Demo", - level=1, - elem_style=dict(margin=0, fontSize=28)) - with antd.Flex(vertical=True, gap="small"): - antd.Typography.Text(get_text("🎯 Instructions for use:", - "🎯 使用说明:"), - strong=True) - antd.Typography.Text( - get_text( - "1️⃣ Click the Audio Record button or the Camera Record button.", - "1️⃣ 点击音频录制按钮,或摄像头-录制按钮")) - antd.Typography.Text( - get_text("2️⃣ Input audio or video.", "2️⃣ 输入音频或者视频")) - antd.Typography.Text( - get_text( - "3️⃣ Click the submit button and wait for the model's response.", - "3️⃣ 点击提交并等待模型的回答")) - voice_choice = gr.Dropdown(label="Voice Choice", - choices=VOICE_LIST, - value=DEFAULT_VOICE) - with gr.Tabs(): - with gr.Tab("Online"): - with gr.Row(): - with gr.Column(scale=1): - microphone = gr.Audio(sources=['microphone'], - type="filepath") - webcam = gr.Video(sources=['webcam'], - height=400, - include_audio=True) - submit_btn = gr.Button(get_text("Submit", "提交"), - variant="primary") - stop_btn = gr.Button(get_text("Stop", "停止"), visible=False) - clear_btn = gr.Button(get_text("Clear History", "清除历史")) - with gr.Column(scale=2): - media_chatbot = gr.Chatbot(height=650, type="messages") + with gr.Blocks() as demo: + with gr.Tab("Online"): + with gr.Row(): + with gr.Column(scale=1): + microphone = gr.Audio(sources=['microphone'], + type="filepath") + submit_btn = gr.Button(get_text("Submit", "提交"), + variant="primary") + stop_btn = gr.Button(get_text("Stop", "停止"), visible=False) + clear_btn = gr.Button(get_text("Clear History", "清除历史")) + with gr.Column(scale=2): + media_chatbot = gr.Chatbot(height=650, type="messages") - def clear_history(): - return [], gr.update(value=None), gr.update(value=None) + def clear_history(): + return [], gr.update(value=None) - submit_event = submit_btn.click(fn=media_predict, - inputs=[ - microphone, webcam, - media_chatbot, - system_prompt_textbox, - voice_choice - ], - outputs=[ - microphone, webcam, - media_chatbot, submit_btn, - stop_btn - ]) - stop_btn.click( - fn=lambda: - (gr.update(visible=True), gr.update(visible=False)), - inputs=None, - outputs=[submit_btn, stop_btn], - cancels=[submit_event], - queue=False) - clear_btn.click(fn=clear_history, - inputs=None, - outputs=[media_chatbot, microphone, webcam]) + submit_event = submit_btn.click(fn=media_predict, + inputs=[ + microphone, + media_chatbot, + ], + outputs=[ + microphone, + media_chatbot, submit_btn, + stop_btn + ]) + stop_btn.click( + fn=lambda: + (gr.update(visible=True), gr.update(visible=False)), + inputs=None, + outputs=[submit_btn, stop_btn], + cancels=[submit_event], + queue=False) + clear_btn.click(fn=clear_history, + inputs=None, + outputs=[media_chatbot, microphone]) demo.queue(default_concurrency_limit=100, max_size=100).launch(max_threads=100, ssr_mode=False, @@ -254,16 +162,13 @@ def _launch_demo(args, model, processor): server_name=args.server_name,) -DEFAULT_CKPT_PATH = "Qwen/Qwen2.5-Omni-7B" def _get_args(): parser = ArgumentParser() - parser.add_argument('-c', - '--checkpoint-path', + parser.add_argument('--checkpoint-path', type=str, - default=DEFAULT_CKPT_PATH, + default=None, help='Checkpoint name or path, default to %(default)r') - parser.add_argument('--cpu-only', action='store_true', help='Run demo with CPU only') parser.add_argument('--flash-attn2', action='store_true', @@ -279,7 +184,6 @@ def _get_args(): help='Automatically launch the interface in a new tab on the default browser.') parser.add_argument('--server-port', type=int, default=7860, help='Demo server port.') parser.add_argument('--server-name', type=str, default='127.0.0.1', help='Demo server name.') - parser.add_argument('--ui-language', type=str, choices=['en', 'zh'], default='en', help='Display language for the UI.') args = parser.parse_args() return args From 72addd40f506330659ff9bc5b8bb98584b260828 Mon Sep 17 00:00:00 2001 From: Yuekai Zhang Date: Fri, 25 Apr 2025 14:22:16 +0800 Subject: [PATCH 20/57] change place --- egs/speech_llm/SPEECH2SPEECH/{ => slam_omni}/web_demo.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename egs/speech_llm/SPEECH2SPEECH/{ => slam_omni}/web_demo.py (100%) diff --git a/egs/speech_llm/SPEECH2SPEECH/web_demo.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/web_demo.py similarity index 100% rename from egs/speech_llm/SPEECH2SPEECH/web_demo.py rename to egs/speech_llm/SPEECH2SPEECH/slam_omni/web_demo.py From 47920c233698d001eb1ca6f7436d1d792ea358c7 Mon Sep 17 00:00:00 2001 From: Yuekai Zhang Date: Fri, 25 Apr 2025 16:05:37 +0800 Subject: [PATCH 21/57] add gradio demo --- .../SPEECH2SPEECH/slam_omni/train.py | 6 + .../SPEECH2SPEECH/slam_omni/web_demo.py | 267 +++++++++++++++--- 2 files changed, 226 insertions(+), 47 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py index f5356dc43..3b971dd89 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py @@ -99,6 +99,12 @@ def set_batch_count(model: nn.Module, batch_count: float) -> None: def add_model_arguments(parser: argparse.ArgumentParser): + parser.add_argument( + "--remove-whisper-encoder-input-length-restriction", + type=str2bool, + default=True, + help="replace whisper encoder forward method to remove input length restriction", + ) parser.add_argument( "--llm-path-or-name", type=str, diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/web_demo.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/web_demo.py index f856bf26f..ebcbc36ed 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/web_demo.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/web_demo.py @@ -7,26 +7,177 @@ import soundfile as sf import gradio.processing_utils as processing_utils -from transformers import AutoModelForCausalLM +from transformers import AutoModelForCausalLM, AutoTokenizer, Qwen2Config from gradio_client import utils as client_utils from argparse import ArgumentParser +import whisper +import torch +from peft import LoraConfig, get_peft_model +from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward +from model import SPEECH_LLM, EncoderProjector +from train import DEFAULT_SPEECH_TOKEN, add_model_arguments -def _load_model_processor(args): +from cosyvoice.cli.cosyvoice import CosyVoice - # Check if flash-attn2 flag is enabled and load model accordingly - if args.flash_attn2: - # model = Qwen2_5OmniForConditionalGeneration.from_pretrained(args.checkpoint_path, - # torch_dtype='auto', - # attn_implementation='flash_attention_2', - # device_map=device_map) - # else: - # model = Qwen2_5OmniForConditionalGeneration.from_pretrained(args.checkpoint_path, device_map=device_map, torch_dtype='auto') +def get_model(params, device="cuda"): + """Load and prepare the speech-to-speech model.""" + if params.remove_whisper_encoder_input_length_restriction: + replace_whisper_encoder_forward() - # processor = Qwen2_5OmniProcessor.from_pretrained(args.checkpoint_path) - return model, processor + whisper_model = whisper.load_model(params.speech_encoder_path_or_name, "cpu") + speech_encoder = whisper_model.encoder + speech_encoder_dim = whisper_model.dims.n_audio_state + tokenizer = AutoTokenizer.from_pretrained(params.llm_path_or_name) -def _launch_demo(args, model, processor): + if params.use_flash_attn: + attn_implementation = "flash_attention_2" + else: + attn_implementation = "eager" + + llm = AutoModelForCausalLM.from_pretrained( + params.llm_path_or_name, + attn_implementation=attn_implementation, + torch_dtype=torch.float16, + ) + if params.use_lora: + lora_config = LoraConfig( + r=64, + lora_alpha=16, + target_modules=[ + "q_proj", + "k_proj", + "v_proj", + "o_proj", + "up_proj", + "gate_proj", + "down_proj", + ], + task_type="CAUSAL_LM", + ) + llm = get_peft_model(llm, lora_config) + llm.print_trainable_parameters() + + special_tokens_dict = {"additional_special_tokens": [DEFAULT_SPEECH_TOKEN]} + tokenizer.add_special_tokens(special_tokens_dict) + llm.config.pad_token_id = tokenizer.convert_tokens_to_ids("<|endoftext|>") + llm.config.bos_token_id = tokenizer.convert_tokens_to_ids("<|im_start|>") + llm.config.eos_token_id = tokenizer.convert_tokens_to_ids("<|im_end|>") + + llm.config.default_speech_token_id = tokenizer.convert_tokens_to_ids( + DEFAULT_SPEECH_TOKEN + ) + + encoder_projector = EncoderProjector( + speech_encoder_dim, llm.config.hidden_size, params.encoder_projector_ds_rate + ) + + codec_vocab_size = 4096 + 4 + config = Qwen2Config( + vocab_size=codec_vocab_size, + hidden_size=1024, + num_hidden_layers=12, + num_attention_heads=16, + num_key_value_heads=16, + intermediate_size=2048, + max_position_embeddings=4096, + ) + codec_lm = AutoModelForCausalLM.from_config( + config=config, + attn_implementation=attn_implementation, + torch_dtype=torch.float16 + ) + codec_lm.resize_token_embeddings(codec_vocab_size) + codec_lm.vocab_size = codec_vocab_size + codec_lm.config.pad_token_id = codec_vocab_size - 1 + codec_lm.config.eos_token_id = codec_vocab_size - 2 + codec_lm.config.bos_token_id = codec_vocab_size - 3 + codec_lm.config.mask_token_id = codec_vocab_size - 4 + + model = SPEECH_LLM( + speech_encoder, + llm, + encoder_projector, + codec_lm, + codec_lm_padding_side= "left" if params.use_flash_attn else "right", + ) + + checkpoint = torch.load( + f"{params.checkpoint_path}", map_location="cpu" + ) + model.load_state_dict(checkpoint, strict=False) + + model.to(device) + model.eval() + return model, tokenizer + + +def audio_decode_cosyvoice(audio_tokens, codec_decoder): + """ + Generate audio from tokens with optional tone and prompt embedding. + + Args: + audio_tokens (list): List of audio tokens to be processed. + codec_decoder: Codec decoder for generating audio. + + Returns: + torch.Tensor: Generated audio waveform. + """ + flow_embedding = codec_decoder.frontend.spk2info['中文女']['embedding'] + flow_prompt_speech_token = torch.zeros(1, 0, dtype=torch.int32) + prompt_speech_feat = torch.zeros(1, 0, 80) + tts_mel, _ = codec_decoder.model.flow.inference(token=audio_tokens.to(codec_decoder.model.device), + token_len=torch.tensor([audio_tokens.shape[1]], dtype=torch.int32).to(codec_decoder.model.device), + prompt_token=flow_prompt_speech_token.to(codec_decoder.model.device), + prompt_token_len=torch.tensor([flow_prompt_speech_token.shape[1]], dtype=torch.int32).to(codec_decoder.model.device), + prompt_feat=prompt_speech_feat.to(codec_decoder.model.device), + prompt_feat_len=torch.tensor([prompt_speech_feat.shape[1]], dtype=torch.int32).to(codec_decoder.model.device), + embedding=flow_embedding.to(codec_decoder.model.device), + flow_cache=torch.zeros(1, 80, 0, 2).to(codec_decoder.model.device),) + + + audio_hat, _ = codec_decoder.model.hift.inference(speech_feat=tts_mel, cache_source=torch.zeros(1, 1, 0)) + + return audio_hat + +def preprocess( + messages, + tokenizer, +): + """Preprocesses the data for supervised fine-tuning.""" + texts = [] + TEMPLATE = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if loop.last %}{{''}}{% else %}{{ '<|im_end|>\n' }}{% endif %}{% endfor %}" + for i, msg in enumerate(messages): + texts.append( + tokenizer.apply_chat_template( + msg, + tokenize=True, + add_generation_prompt=False, + chat_template=TEMPLATE, + padding="longest", + truncation=False, + ) + ) + max_len_texts = max([len(text) for text in texts]) + if tokenizer.padding_side == "right": + texts = [ + text + [tokenizer.pad_token_id] * (max_len_texts - len(text)) + for text in texts + ] + else: + texts = [ + [tokenizer.pad_token_id] * (max_len_texts - len(text)) + text + for text in texts + ] + + input_ids = torch.tensor(texts, dtype=torch.int) + + attention_mask = input_ids.ne(tokenizer.pad_token_id) + + return input_ids, attention_mask + + +def _launch_demo(args, model, tokenizer, token2wav_model): def format_history(history: list): messages = [] @@ -36,42 +187,58 @@ def _launch_demo(args, model, processor): elif item["role"] == "user" and (isinstance(item["content"], list) or isinstance(item["content"], tuple)): file_path = item["content"][0] - + # TODO: check if the file_path's transcript is already in the history mime_type = client_utils.get_mimetype(file_path) if mime_type.startswith("audio"): messages.append({ "role": item['role'], - "content": [{ - "type": "audio", - "audio": file_path, - }] + "content": item["content"][1] # append audio transcript here }) + print('predict history: ', messages) + messages = messages[-2:] # TODO: WAR: add history later return messages - def predict(messages): - print('predict history: ', messages) + def decode( + model, + token2wav_model, + tokenizer, + feature, + messages, + ): + """Decode one + Returns: + pass + """ - text = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False) + dtype = torch.float32 + device = model.llm.device - audios = [msg['content'][0]['audio'] for msg in messages if msg['role'] == 'user' and isinstance(msg['content'], list) and msg['content'][0]['type'] == 'audio'] + feature = feature.to(device, dtype=dtype).transpose(1, 2) + assert feature.shape[2] == 80 + + input_ids, attention_mask = preprocess([messages], tokenizer) - inputs = processor(text=text, audio=audios, return_tensors="pt", padding=True) - inputs = inputs.to(model.device).to(model.dtype) + generated_ids, audio_tokens = model.decode_with_speech_output( + feature, input_ids.to(device, dtype=torch.long), attention_mask.to(device) + ) - text_ids, audio = model.generate(**inputs) - - response = processor.batch_decode(text_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False) - response = response[0].split("\n")[-1] - yield {"type": "text", "data": response} + hyps = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) + yield {"type": "text", "data": hyps} + audio_tokens = [token for token in audio_tokens if token < 4096] + audio_tokens = torch.tensor(audio_tokens, dtype=torch.int32).unsqueeze(0) + audio_hat = audio_decode_cosyvoice(audio_tokens, token2wav_model) + audio = audio_hat.squeeze(0).cpu().numpy() + # sf.write(f'{wav_name}.wav', audio_hat.squeeze(0).cpu().numpy(), 22050) audio = np.array(audio * 32767).astype(np.int16) wav_io = io.BytesIO() - sf.write(wav_io, audio, samplerate=24000, format="WAV") + sf.write(wav_io, audio, samplerate=22050, format="WAV") wav_io.seek(0) wav_bytes = wav_io.getvalue() audio_path = processing_utils.save_bytes_to_cache( wav_bytes, "audio.wav", cache_dir=demo.GRADIO_CACHE) + yield {"type": "audio", "data": audio_path} def media_predict(audio, history): @@ -83,18 +250,21 @@ def _launch_demo(args, model, processor): gr.update(visible=True), # stop_btn ) - files = [audio] - - for f in files: - if f: - history.append({"role": "user", "content": (f, )}) - + assert audio is not None + # get audio transcript here + history.append({"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}) + history.append({"role": "assistant", "content": ""}) formatted_history = format_history(history=history) + # audio_transcript = get_audio_transcript(audio) + audio_transcript = "audio transcript" + history[-2]["content"] = (audio, audio_transcript) + fbank = whisper.log_mel_spectrogram(audio, model.llm.device) + print('fbank: ', fbank.shape) + assert fbank.ndim == 3 - history.append({"role": "assistant", "content": ""}) - - for chunk in predict(formatted_history): + # history.append({"role": "assistant", "content": ""}) + for chunk in decode(model, token2wav_model, tokenizer, fbank, formatted_history): if chunk["type"] == "text": history[-1]["content"] = chunk["data"] yield ( @@ -123,10 +293,9 @@ def _launch_demo(args, model, processor): with gr.Column(scale=1): microphone = gr.Audio(sources=['microphone'], type="filepath") - submit_btn = gr.Button(get_text("Submit", "提交"), - variant="primary") - stop_btn = gr.Button(get_text("Stop", "停止"), visible=False) - clear_btn = gr.Button(get_text("Clear History", "清除历史")) + submit_btn = gr.Button("Submit", variant="primary") + stop_btn = gr.Button("Stop", visible=False) + clear_btn = gr.Button("Clear History") with gr.Column(scale=2): media_chatbot = gr.Chatbot(height=650, type="messages") @@ -169,7 +338,10 @@ def _get_args(): type=str, default=None, help='Checkpoint name or path, default to %(default)r') - + parser.add_argument('--token2wav-path', + type=str, + default=None, + help='Token2Wav path, default to %(default)r') parser.add_argument('--flash-attn2', action='store_true', default=False, @@ -184,11 +356,12 @@ def _get_args(): help='Automatically launch the interface in a new tab on the default browser.') parser.add_argument('--server-port', type=int, default=7860, help='Demo server port.') parser.add_argument('--server-name', type=str, default='127.0.0.1', help='Demo server name.') - + add_model_arguments(parser) args = parser.parse_args() return args if __name__ == "__main__": args = _get_args() - model, processor = _load_model_processor(args) - _launch_demo(args, model, processor) \ No newline at end of file + model, tokenizer = get_model(args) + cosyvoice = CosyVoice(args.token2wav_path, load_jit=False, load_trt=False, fp16=False) + _launch_demo(args, model, tokenizer, cosyvoice) \ No newline at end of file From 71a0a442a6366edbdbc567448255aec63ef688cd Mon Sep 17 00:00:00 2001 From: root Date: Fri, 25 Apr 2025 10:05:07 +0000 Subject: [PATCH 22/57] add history cache --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 26 ++++ .../SPEECH2SPEECH/slam_omni/model.py | 2 +- .../SPEECH2SPEECH/slam_omni/web_demo.py | 119 +++++++++++++----- 3 files changed, 116 insertions(+), 31 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index e0a2fa507..1b49daa65 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -107,4 +107,30 @@ if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then # --pretrained-model-path slam_omni/exp_speech2text/epoch-1-checkpoint-5000.pt/pytorch_model.bin \ # --sampler-state-dict-path $exp_dir/epoch-1-checkpoint-35000-sampler.pt \ +fi + +if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then + log "stage 6: " + export PYTHONPATH=$PYTHONPATH:/workspace/CosyVoice + exp_dir=./slam_omni/exp_speech2speech_rerun + python3 ./slam_omni/web_demo.py \ + --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ + --checkpoint-path $exp_dir/epoch-998.pt \ + --use-flash-attn True \ + --enable-speech-output True \ + --asr-model-dir local/sherpa-onnx-paraformer-zh-2023-09-14 \ + --use-lora True --token2wav-path /workspace/CosyVoice-300M-SFT --share + +fi + +if [ $stage -le 7 ] && [ $stop_stage -ge 7 ]; then + log "stage 7: " + model_path=local/sherpa-onnx-paraformer-zh-2023-09-14 + + if [ ! -d $model_path ]; then + pip install sherpa-onnx + wget -nc https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2 + tar xvf sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2 -C local + fi fi \ No newline at end of file diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py index c5f31226d..0cc93c237 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py @@ -630,7 +630,7 @@ class SPEECH_LLM(nn.Module): next_token_ids = topk_sampling( next_token_logits, ) - print(next_token_ids, "next_token_ids", t, next_token_ids.shape) + # print(next_token_ids, "next_token_ids", t, next_token_ids.shape) if next_token_ids[0, 0] == self.codec_lm.config.eos_token_id: break # current_speech_input_ids = next_token_ids # Use the newly generated token ID as input for next step diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/web_demo.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/web_demo.py index ebcbc36ed..3155174fb 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/web_demo.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/web_demo.py @@ -6,7 +6,7 @@ import gradio as gr import soundfile as sf import gradio.processing_utils as processing_utils - +import tempfile from transformers import AutoModelForCausalLM, AutoTokenizer, Qwen2Config from gradio_client import utils as client_utils @@ -17,8 +17,11 @@ from peft import LoraConfig, get_peft_model from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward from model import SPEECH_LLM, EncoderProjector from train import DEFAULT_SPEECH_TOKEN, add_model_arguments - +import sherpa_onnx from cosyvoice.cli.cosyvoice import CosyVoice +import sys +sys.path.append('/workspace/CosyVoice/third_party/Matcha-TTS') + def get_model(params, device="cuda"): """Load and prepare the speech-to-speech model.""" @@ -177,26 +180,26 @@ def preprocess( return input_ids, attention_mask -def _launch_demo(args, model, tokenizer, token2wav_model): +def _launch_demo(args, model, tokenizer, token2wav_model, asr_model): def format_history(history: list): messages = [] for item in history: if isinstance(item["content"], str): messages.append({"role": item['role'], "content": item['content']}) - elif item["role"] == "user" and (isinstance(item["content"], list) or - isinstance(item["content"], tuple)): - file_path = item["content"][0] - # TODO: check if the file_path's transcript is already in the history - mime_type = client_utils.get_mimetype(file_path) - if mime_type.startswith("audio"): - messages.append({ - "role": - item['role'], - "content": item["content"][1] # append audio transcript here - }) + # elif item["role"] == "user" and (isinstance(item["content"], list) or + # isinstance(item["content"], tuple)): + # file_path = item["content"][0] + # # TODO: check if the file_path's transcript is already in the history + # mime_type = client_utils.get_mimetype(file_path) + # if mime_type.startswith("audio"): + # messages.append({ + # "role": + # item['role'], + # "content": item["content"][1] # append audio transcript here + # }) print('predict history: ', messages) - messages = messages[-2:] # TODO: WAR: add history later + # messages = messages[-2:] # TODO: WAR: add history later return messages def decode( @@ -214,8 +217,8 @@ def _launch_demo(args, model, tokenizer, token2wav_model): dtype = torch.float32 device = model.llm.device - feature = feature.to(device, dtype=dtype).transpose(1, 2) - assert feature.shape[2] == 80 + feature = feature.to(device, dtype=dtype)#.transpose(1, 2) + # assert feature.shape[2] == 80 input_ids, attention_mask = preprocess([messages], tokenizer) @@ -224,7 +227,9 @@ def _launch_demo(args, model, tokenizer, token2wav_model): ) hyps = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) - yield {"type": "text", "data": hyps} + # print('hyps: ', hyps, 23333333333333333333333333) + yield {"type": "text", "data": hyps[0]} + # yield {"type": "text", "data": hyps} audio_tokens = [token for token in audio_tokens if token < 4096] audio_tokens = torch.tensor(audio_tokens, dtype=torch.int32).unsqueeze(0) @@ -232,6 +237,10 @@ def _launch_demo(args, model, tokenizer, token2wav_model): audio = audio_hat.squeeze(0).cpu().numpy() # sf.write(f'{wav_name}.wav', audio_hat.squeeze(0).cpu().numpy(), 22050) audio = np.array(audio * 32767).astype(np.int16) + # yield {"type": "audio", "data": (22050, audio)} + # with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmpfile: + # sf.write(tmpfile.name, audio, 22050, format="WAV") + # audio_path = tmpfile.name wav_io = io.BytesIO() sf.write(wav_io, audio, samplerate=22050, format="WAV") wav_io.seek(0) @@ -249,18 +258,22 @@ def _launch_demo(args, model, tokenizer, token2wav_model): gr.update(visible=False), # submit_btn gr.update(visible=True), # stop_btn ) - - assert audio is not None - # get audio transcript here + print(2333, history, audio) + history.append({"role": "user", "content": (audio,)}) history.append({"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}) history.append({"role": "assistant", "content": ""}) - formatted_history = format_history(history=history) + formatted_history = format_history(history=history) # only keep string text format - # audio_transcript = get_audio_transcript(audio) - audio_transcript = "audio transcript" - history[-2]["content"] = (audio, audio_transcript) - fbank = whisper.log_mel_spectrogram(audio, model.llm.device) - print('fbank: ', fbank.shape) + assert audio is not None + audio_transcript = get_transcript( + audio, + asr_model, + ) + print('audio_transcript: ', audio_transcript) + history[-2]["content"] = audio_transcript + + fbank = whisper.log_mel_spectrogram(audio, device=model.llm.device) + fbank = fbank.unsqueeze(0) assert fbank.ndim == 3 # history.append({"role": "assistant", "content": ""}) @@ -342,6 +355,10 @@ def _get_args(): type=str, default=None, help='Token2Wav path, default to %(default)r') + parser.add_argument('--asr-model-dir', + type=str, + default=None, + help='ASR model dir, default to %(default)r') parser.add_argument('--flash-attn2', action='store_true', default=False, @@ -354,14 +371,56 @@ def _get_args(): action='store_true', default=False, help='Automatically launch the interface in a new tab on the default browser.') - parser.add_argument('--server-port', type=int, default=7860, help='Demo server port.') + parser.add_argument('--server-port', type=int, default=8001, help='Demo server port.') parser.add_argument('--server-name', type=str, default='127.0.0.1', help='Demo server name.') add_model_arguments(parser) args = parser.parse_args() return args + +def read_wave(wave_filename: str): + """ + Args: + wave_filename: + Path to a wave file. It should be single channel and can be of type + 32-bit floating point PCM. Its sample rate does not need to be 24kHz. + + Returns: + Return a tuple containing: + - A 1-D array of dtype np.float32 containing the samples, + which are normalized to the range [-1, 1]. + - Sample rate of the wave file. + """ + + samples, sample_rate = sf.read(wave_filename, dtype="float32") + assert ( + samples.ndim == 1 + ), f"Expected single channel, but got {samples.ndim} channels." + + samples_float32 = samples.astype(np.float32) + + return samples_float32, sample_rate + +def get_transcript(audio_path, recognizer): + samples, sample_rate = read_wave(audio_path) + s = recognizer.create_stream() + s.accept_waveform(sample_rate, samples) + recognizer.decode_streams([s]) + return s.result.text + if __name__ == "__main__": args = _get_args() model, tokenizer = get_model(args) - cosyvoice = CosyVoice(args.token2wav_path, load_jit=False, load_trt=False, fp16=False) - _launch_demo(args, model, tokenizer, cosyvoice) \ No newline at end of file + token2wav = CosyVoice(args.token2wav_path, load_jit=False, load_trt=False, fp16=False) + + asr_model = sherpa_onnx.OfflineRecognizer.from_paraformer( + paraformer=f"{args.asr_model_dir}/model.int8.onnx", + tokens=f"{args.asr_model_dir}/tokens.txt", + num_threads=2, + sample_rate=16000, + feature_dim=80, + decoding_method="greedy_search", + debug=False, + ) + + _launch_demo(args, model, tokenizer, token2wav, asr_model) \ No newline at end of file From d742043e756155c7ae980e2ca51b87302fe5981d Mon Sep 17 00:00:00 2001 From: Yuekai Zhang Date: Fri, 25 Apr 2025 18:31:43 +0800 Subject: [PATCH 23/57] refactor decode part --- .../SPEECH2SPEECH/slam_omni/decode.py | 316 +++++++++--------- 1 file changed, 158 insertions(+), 158 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py index 54a8983df..5cda487e3 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py @@ -60,7 +60,7 @@ from data_module import AsrDataModule from lhotse.cut import Cut from model import SPEECH_LLM, EncoderProjector -from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training +from peft import LoraConfig, get_peft_model from train import DEFAULT_SPEECH_TOKEN from transformers import AutoModelForCausalLM, AutoTokenizer, Qwen2Config from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward @@ -70,10 +70,164 @@ from icefall.utils import ( AttributeDict, setup_logger, store_transcripts, - str2bool, write_error_stats, + average_checkpoints, ) +def get_model(params, device): + """Load and prepare the speech-to-speech model.""" + if params.remove_whisper_encoder_input_length_restriction: + replace_whisper_encoder_forward() + + whisper_model = whisper.load_model(params.speech_encoder_path_or_name, "cpu") + speech_encoder = whisper_model.encoder + speech_encoder_dim = whisper_model.dims.n_audio_state + tokenizer = AutoTokenizer.from_pretrained(params.llm_path_or_name) + + if params.use_flash_attn: + attn_implementation = "flash_attention_2" + # torch_dtype=torch.bfloat16 FIX ME + torch_dtype = torch.float16 + tokenizer.padding_side = "left" + + else: + attn_implementation = "eager" + torch_dtype = torch.float16 + tokenizer.padding_side = "right" + + llm = AutoModelForCausalLM.from_pretrained( + params.llm_path_or_name, + attn_implementation=attn_implementation, + torch_dtype=torch_dtype, + ) + if params.use_lora: + lora_config = LoraConfig( + r=64, + lora_alpha=16, + target_modules=[ + "q_proj", + "k_proj", + "v_proj", + "o_proj", + "up_proj", + "gate_proj", + "down_proj", + ], + task_type="CAUSAL_LM", + ) + llm = get_peft_model(llm, lora_config) + llm.print_trainable_parameters() + + special_tokens_dict = {"additional_special_tokens": [DEFAULT_SPEECH_TOKEN]} + tokenizer.add_special_tokens(special_tokens_dict) + llm.config.pad_token_id = tokenizer.convert_tokens_to_ids("<|endoftext|>") + llm.config.bos_token_id = tokenizer.convert_tokens_to_ids("<|im_start|>") + llm.config.eos_token_id = tokenizer.convert_tokens_to_ids("<|im_end|>") + + llm.config.default_speech_token_id = tokenizer.convert_tokens_to_ids( + DEFAULT_SPEECH_TOKEN + ) + + encoder_projector = EncoderProjector( + speech_encoder_dim, llm.config.hidden_size, params.encoder_projector_ds_rate + ) + + if params.enable_speech_output: + # Determine attn_implementation and torch_dtype based on use_flash_attn + if params.use_flash_attn: + attn_implementation = "flash_attention_2" + torch_dtype = torch.float16 # Or torch.bfloat16 if needed/supported + else: + attn_implementation = "eager" + torch_dtype = torch.float16 + + # codec_lm = AutoModelForCausalLM.from_pretrained( + # params.llm_path_or_name, + # attn_implementation=attn_implementation, + # torch_dtype=torch_dtype, + # ) + codec_vocab_size = 4096 + 4 + config = Qwen2Config( + vocab_size=codec_vocab_size, + hidden_size=1024, + num_hidden_layers=12, + num_attention_heads=16, + num_key_value_heads=16, + intermediate_size=2048, + max_position_embeddings=4096, + ) + # codec_lm = Qwen2ForCausalLM(config=config) + # Pass attn_implementation and torch_dtype to the constructor + # Use AutoModelForCausalLM.from_config for more generality + codec_lm = AutoModelForCausalLM.from_config( + config=config, + attn_implementation=attn_implementation, + torch_dtype=torch_dtype + ) + # cosyvoice2_token_size = 6561 + codec_lm.resize_token_embeddings(codec_vocab_size) + codec_lm.vocab_size = codec_vocab_size + codec_lm.config.pad_token_id = codec_vocab_size - 1 + codec_lm.config.eos_token_id = codec_vocab_size - 2 + codec_lm.config.bos_token_id = codec_vocab_size - 3 + codec_lm.config.mask_token_id = codec_vocab_size - 4 + # if params.use_lora: + # lora_config = LoraConfig( + # r=64, + # lora_alpha=16, + # target_modules=[ + # "q_proj", + # "k_proj", + # "v_proj", + # "o_proj", + # "up_proj", + # "gate_proj", + # "down_proj", + # ], + # lora_dropout=0.05, + # task_type="CAUSAL_LM", + # ) + # codec_lm = get_peft_model(codec_lm, lora_config) + # codec_lm.print_trainable_parameters() + else: + codec_lm = None + + model = SPEECH_LLM( + speech_encoder, + llm, + encoder_projector, + codec_lm, + codec_lm_padding_side= "left" if params.use_flash_attn else "right", + ) + + if params.avg > 1: + start = params.epoch - params.avg + 1 + assert start >= 1, start + checkpoint = torch.load( + f"{params.exp_dir}/epoch-{params.epoch}.pt", map_location="cpu" + ) + assert "model" not in checkpoint + # deepspeed converted checkpoint only contains model state_dict + filenames = [ + f"{params.exp_dir}/epoch-{epoch}.pt" + for epoch in range(start, params.epoch + 1) + ] + avg_checkpoint = average_checkpoints(filenames) + model.load_state_dict(avg_checkpoint, strict=False) + + filename = f"{params.exp_dir}/epoch-{params.epoch}-avg-{params.avg}.pt" + torch.save(avg_checkpoint, filename) + else: + checkpoint = torch.load( + f"{params.exp_dir}/epoch-{params.epoch}.pt", map_location="cpu" + ) + model.load_state_dict(checkpoint, strict=False) + + model.to(device) + model.eval() + return model, tokenizer + + def average_checkpoints( filenames: List[Path], device: torch.device = torch.device("cpu") ) -> dict: @@ -171,13 +325,6 @@ def get_parser(): help="The experiment dir", ) - parser.add_argument( - "--remove-whisper-encoder-input-length-restriction", - type=str2bool, - default=True, - help="replace whisper encoder forward method to remove input length restriction", - ) - # parser.add_argument( # "--dataset", # type=str, @@ -321,7 +468,7 @@ def decode_one_batch( with open(speech_token_file_name, 'w') as f: # save_path = params.exp_dir / f"speech_output/{cut_id}.wav" #torchaudio.save(save_path, speech_output.cpu(), 16000) - print(f"speech_output: {generated_speech_output}, cut_id: {cut_id}") + # print(f"speech_output: {generated_speech_output}, cut_id: {cut_id}") save_str = " ".join([str(i) for i in generated_speech_output]) f.write(f"{cut_id}|{save_str}\n") @@ -509,155 +656,8 @@ def main(): logging.info(f"device: {device}") - if params.remove_whisper_encoder_input_length_restriction: - replace_whisper_encoder_forward() + model, tokenizer = get_model(params, device) - whisper_model = whisper.load_model(params.speech_encoder_path_or_name, "cpu") - speech_encoder = whisper_model.encoder - speech_encoder_dim = whisper_model.dims.n_audio_state - tokenizer = AutoTokenizer.from_pretrained(params.llm_path_or_name) - - if params.use_flash_attn: - attn_implementation = "flash_attention_2" - # torch_dtype=torch.bfloat16 FIX ME - torch_dtype = torch.float16 - tokenizer.padding_side = "left" - - else: - attn_implementation = "eager" - torch_dtype = torch.float16 - tokenizer.padding_side = "right" - - llm = AutoModelForCausalLM.from_pretrained( - params.llm_path_or_name, - attn_implementation=attn_implementation, - torch_dtype=torch_dtype, - ) - if params.use_lora: - lora_config = LoraConfig( - r=64, - lora_alpha=16, - target_modules=[ - "q_proj", - "k_proj", - "v_proj", - "o_proj", - "up_proj", - "gate_proj", - "down_proj", - ], - task_type="CAUSAL_LM", - ) - llm = get_peft_model(llm, lora_config) - llm.print_trainable_parameters() - - special_tokens_dict = {"additional_special_tokens": [DEFAULT_SPEECH_TOKEN]} - tokenizer.add_special_tokens(special_tokens_dict) - llm.config.pad_token_id = tokenizer.convert_tokens_to_ids("<|endoftext|>") - llm.config.bos_token_id = tokenizer.convert_tokens_to_ids("<|im_start|>") - llm.config.eos_token_id = tokenizer.convert_tokens_to_ids("<|im_end|>") - - llm.config.default_speech_token_id = tokenizer.convert_tokens_to_ids( - DEFAULT_SPEECH_TOKEN - ) - - encoder_projector = EncoderProjector( - speech_encoder_dim, llm.config.hidden_size, params.encoder_projector_ds_rate - ) - - if params.enable_speech_output: - # Determine attn_implementation and torch_dtype based on use_flash_attn - if params.use_flash_attn: - attn_implementation = "flash_attention_2" - torch_dtype = torch.float16 # Or torch.bfloat16 if needed/supported - else: - attn_implementation = "eager" - torch_dtype = torch.float16 - - # codec_lm = AutoModelForCausalLM.from_pretrained( - # params.llm_path_or_name, - # attn_implementation=attn_implementation, - # torch_dtype=torch_dtype, - # ) - codec_vocab_size = 4096 + 4 - config = Qwen2Config( - vocab_size=codec_vocab_size, - hidden_size=1024, - num_hidden_layers=12, - num_attention_heads=16, - num_key_value_heads=16, - intermediate_size=2048, - max_position_embeddings=4096, - ) - # codec_lm = Qwen2ForCausalLM(config=config) - # Pass attn_implementation and torch_dtype to the constructor - # Use AutoModelForCausalLM.from_config for more generality - codec_lm = AutoModelForCausalLM.from_config( - config=config, - attn_implementation=attn_implementation, - torch_dtype=torch_dtype - ) - # cosyvoice2_token_size = 6561 - codec_lm.resize_token_embeddings(codec_vocab_size) - codec_lm.vocab_size = codec_vocab_size - codec_lm.config.pad_token_id = codec_vocab_size - 1 - codec_lm.config.eos_token_id = codec_vocab_size - 2 - codec_lm.config.bos_token_id = codec_vocab_size - 3 - codec_lm.config.mask_token_id = codec_vocab_size - 4 - # if params.use_lora: - # lora_config = LoraConfig( - # r=64, - # lora_alpha=16, - # target_modules=[ - # "q_proj", - # "k_proj", - # "v_proj", - # "o_proj", - # "up_proj", - # "gate_proj", - # "down_proj", - # ], - # lora_dropout=0.05, - # task_type="CAUSAL_LM", - # ) - # codec_lm = get_peft_model(codec_lm, lora_config) - # codec_lm.print_trainable_parameters() - else: - codec_lm = None - - model = SPEECH_LLM( - speech_encoder, - llm, - encoder_projector, - codec_lm, - codec_lm_padding_side= "left" if params.use_flash_attn else "right", - ) - - if params.avg > 1: - start = params.epoch - params.avg + 1 - assert start >= 1, start - checkpoint = torch.load( - f"{params.exp_dir}/epoch-{params.epoch}.pt", map_location="cpu" - ) - assert "model" not in checkpoint - # deepspeed converted checkpoint only contains model state_dict - filenames = [ - f"{params.exp_dir}/epoch-{epoch}.pt" - for epoch in range(start, params.epoch + 1) - ] - avg_checkpoint = average_checkpoints(filenames) - model.load_state_dict(avg_checkpoint, strict=False) - - filename = f"{params.exp_dir}/epoch-{params.epoch}-avg-{params.avg}.pt" - torch.save(avg_checkpoint, filename) - else: - checkpoint = torch.load( - f"{params.exp_dir}/epoch-{params.epoch}.pt", map_location="cpu" - ) - model.load_state_dict(checkpoint, strict=False) - - model.to(device) - model.eval() num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") From 448a4eeea79281753efc7406f3130f5d498fa19b Mon Sep 17 00:00:00 2001 From: root Date: Tue, 29 Apr 2025 07:33:34 +0000 Subject: [PATCH 24/57] update hf dataset loading into lhotse --- .../local/compute_whisper_fbank.py | 63 ++++---- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 27 ++-- .../SPEECH2SPEECH/slam_omni/data_module.py | 53 +++++-- .../SPEECH2SPEECH/slam_omni/decode.py | 134 ++++++++++++++---- .../SPEECH2SPEECH/slam_omni/train.py | 76 +++++----- 5 files changed, 229 insertions(+), 124 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py b/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py index 1c3a3d1e0..b01a35c7d 100755 --- a/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py +++ b/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py @@ -2,6 +2,7 @@ # Copyright 2021 Johns Hopkins University (Piotr Żelasko) # Copyright 2021 Xiaomi Corp. (Fangjun Kuang) # Copyright 2023 Xiaomi Corp. (Zengrui Jin) +# Copyright 2025 Nvidia (Yuekai Zhang) # # See ../../../../LICENSE for clarification regarding multiple authors # @@ -23,12 +24,7 @@ from pathlib import Path import torch from datasets import load_dataset -from lhotse import ( - CutSet, - LilcomChunkyWriter, - WhisperFbank, - WhisperFbankConfig, -) +from lhotse import CutSet, LilcomChunkyWriter, WhisperFbank, WhisperFbankConfig from icefall.utils import str2bool @@ -93,7 +89,12 @@ def get_parser(): default="answer", help="The key in the Huggingface dataset containing the text data", ) - + parser.add_argument( + "--prefix", + type=str, + default="belle", + help="""The dataset prefix to use when saving the features""", + ) return parser @@ -114,27 +115,28 @@ def compute_fbank(args): WhisperFbankConfig(num_filters=args.num_mel_bins, device=device) ) else: - extractor = KaldifeatFbank(KaldifeatFbankConfig(device=device)) + raise NotImplementedError("Only WhisperFbank is implemented.") logging.info(f"device: {device}") - start = 0 - stop = 1601 + dataset = load_dataset( + args.huggingface_dataset_path_or_name, streaming=True, split="train" + ) + num_shards = dataset.num_shards num_digits = 5 - for i in range(start, stop): + for i in range(num_shards): + shard = dataset.shard(num_shards, i) + shard = shard.take(10) # for testing + logging.info( + f"Loading dataset shard {i} from {args.huggingface_dataset_path_or_name}" + ) + idx = f"{i}".zfill(num_digits) - # dataset = load_dataset(args.huggingface_dataset_path_or_name, streaming=True, split=partition) - parquet_files = [ - f"data/train-{idx}-of-01601.parquet", - ] - parquet_files = [f"{args.huggingface_dataset_path_or_name}/{f}" for f in parquet_files] - file_name = parquet_files[0] - logging.info(f"Loading dataset from {file_name}") - dataset = load_dataset('parquet', data_files=parquet_files, streaming=True, split='train') - cut_set = CutSet.from_huggingface_dataset(dataset, audio_key=args.audio_key, text_key=args.text_key) + cut_set = CutSet.from_huggingface_dataset( + shard, audio_key=args.audio_key, text_key=args.text_key + ) - logging.info("Splitting cuts into smaller chunks") cut_set = cut_set.trim_to_supervisions( keep_overlapping=False, min_duration=None ) @@ -153,22 +155,13 @@ def compute_fbank(args): storage_type=LilcomChunkyWriter, overwrite=True, ) - cuts_path = f"{in_out_dir}/cuts_belle.{idx}.jsonl.gz" + cuts_path = f"{in_out_dir}/{args.prefix}_cuts.{idx}.jsonl.gz" logging.info(f"Saving to {cuts_path}") - # cut_set.to_file(cuts_path) - remove_recording_item(cut_set, cuts_path) + # see https://github.com/lhotse-speech/lhotse/issues/1125 + cut_set.drop_recordings().to_file(cuts_path) + if i > 1: + break -def remove_recording_item( - cuts, - output_cuts, -): - """ - don't store recording item - """ - with CutSet.open_writer(output_cuts) as writer: - for cut in cuts: - cut.recording.sources = None - writer.write(cut) def main(): formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index 1b49daa65..47320ab66 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -20,10 +20,10 @@ log() { if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then log "stage 0: " - pip uninstall lhotse - cd /workspace/slam/lhotse - git config --global --add safe.directory /workspace/slam/lhotse - pip install -e '.[dev]' + #pip uninstall lhotse + #cd /workspace/slam/lhotse + #git config --global --add safe.directory /workspace/slam/lhotse + #pip install -e '.[dev]' cd - pip install -r slam_omni/requirements.txt fi @@ -31,7 +31,12 @@ fi if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then log "stage 1: Download whisper-large-v2 multi-hans-zh fbank feature from huggingface" - python3 local/compute_whisper_fbank.py + python3 local/compute_whisper_fbank.py \ + --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ + --out-dir data/fbank_test \ + --huggingface-dataset-path-or-name /workspace/Belle_1.4M-SLAM-Omni \ + --audio-key question_audio --text-key answer \ + --prefix belle fi @@ -42,7 +47,7 @@ if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then pieces=$(find $manifest_dir -name "cuts_belle.*.jsonl.gz" | sort) # # remove cust_belle_00000.jsonl.gz from pieces # pieces=$(echo $pieces | sed 's/cuts_belle.00000.jsonl.gz//g') - echo $pieces | wc + echo $pieces | wc lhotse combine $pieces data/fbank/cuts_belle_00001-01600.jsonl.gz cd $manifest_dir && ln -s cuts_belle_00001-01600.jsonl.gz cuts_belle_train.jsonl.gz && cd - fi @@ -52,16 +57,18 @@ fi if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then log "stage 3: " exp_dir=./slam_omni/exp_speech2speech_rerun + export PYTHONPATH=$PYTHONPATH:/workspace/CosyVoice python3 ./slam_omni/decode.py \ --max-duration 1 \ --exp-dir $exp_dir \ --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ - --epoch 997 --avg 1 \ + --epoch 999 --avg 1 \ --manifest-dir data/fbank \ --use-flash-attn True \ - --method small_test_speech2speech_rerun \ + --method e2e-epoch10_speech2speech_rerun \ --enable-speech-output True \ + --token2wav-path /workspace/CosyVoice-300M-SFT \ --use-lora True # --on-the-fly-feats True fi @@ -120,7 +127,7 @@ if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then --use-flash-attn True \ --enable-speech-output True \ --asr-model-dir local/sherpa-onnx-paraformer-zh-2023-09-14 \ - --use-lora True --token2wav-path /workspace/CosyVoice-300M-SFT --share + --use-lora True --token2wav-path /workspace/CosyVoice-300M-SFT --share fi @@ -133,4 +140,4 @@ if [ $stage -le 7 ] && [ $stop_stage -ge 7 ]; then wget -nc https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2 tar xvf sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2 -C local fi -fi \ No newline at end of file +fi diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/data_module.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/data_module.py index 11e3bc779..7cab52f73 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/data_module.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/data_module.py @@ -24,7 +24,14 @@ from pathlib import Path from typing import Any, Dict, Optional import torch -from lhotse import CutSet, WhisperFbank, WhisperFbankConfig, load_manifest, load_manifest_lazy +from datasets import load_dataset +from lhotse import ( + CutSet, + WhisperFbank, + WhisperFbankConfig, + load_manifest, + load_manifest_lazy, +) from lhotse.dataset import ( # noqa F401 for PrecomputedFeatures CutConcatenate, CutMix, @@ -38,11 +45,11 @@ from lhotse.dataset.input_strategies import ( # noqa F401 For AudioSamples OnTheFlyFeatures, ) from lhotse.utils import fix_random_seed +from speech_dataset import K2SpeechRecognitionDataset from torch.utils.data import DataLoader -from datasets import load_dataset from icefall.utils import str2bool -from speech_dataset import K2SpeechRecognitionDataset + class _SeedWorkers: def __init__(self, seed: int): @@ -310,7 +317,9 @@ class AsrDataModule: # Drop feats to be on the safe side. train = K2SpeechRecognitionDataset( cut_transforms=transforms, - input_strategy=OnTheFlyFeatures(WhisperFbank(WhisperFbankConfig(num_filters=80, device='cuda'))), + input_strategy=OnTheFlyFeatures( + WhisperFbank(WhisperFbankConfig(num_filters=80, device="cuda")) + ), input_transforms=input_transforms, return_cuts=self.args.return_cuts, ) @@ -365,7 +374,9 @@ class AsrDataModule: logging.info("About to create dev dataset") validate = K2SpeechRecognitionDataset( - input_strategy=OnTheFlyFeatures(WhisperFbank(WhisperFbankConfig(num_filters=80, device='cuda'))) + input_strategy=OnTheFlyFeatures( + WhisperFbank(WhisperFbankConfig(num_filters=80, device="cuda")) + ) if self.args.on_the_fly_feats else eval(self.args.input_strategy)(), return_cuts=self.args.return_cuts, @@ -390,7 +401,9 @@ class AsrDataModule: def test_dataloaders(self, cuts: CutSet) -> DataLoader: logging.debug("About to create test dataset") test = K2SpeechRecognitionDataset( - input_strategy=OnTheFlyFeatures(WhisperFbank(WhisperFbankConfig(num_filters=80, device='cpu'))) + input_strategy=OnTheFlyFeatures( + WhisperFbank(WhisperFbankConfig(num_filters=80, device="cpu")) + ) if self.args.on_the_fly_feats else eval(self.args.input_strategy)(), return_cuts=self.args.return_cuts, @@ -419,16 +432,27 @@ class AsrDataModule: parquet_files = [ f"data/train-{idx}-of-01601.parquet", ] - parquet_files = [f"{self.args.huggingface_dataset_path_or_name}/{f}" for f in parquet_files] + parquet_files = [ + f"{self.args.huggingface_dataset_path_or_name}/{f}" + for f in parquet_files + ] file_name = parquet_files[0] logging.info(f"Loading dataset from {file_name}") - dataset = load_dataset('parquet', data_files=parquet_files, streaming=True, split='train') - cut_set = CutSet.from_huggingface_dataset(dataset, audio_key=self.args.audio_key, text_key=self.args.text_key) + dataset = load_dataset( + "parquet", data_files=parquet_files, streaming=True, split="train" + ) + cut_set = CutSet.from_huggingface_dataset( + dataset, audio_key=self.args.audio_key, text_key=self.args.text_key + ) if self.args.resample_to_16kHz: cut_set = cut_set.resample(16000) - return {'test':cut_set} + return {"test": cut_set} else: - return {'test':load_manifest_lazy(self.args.manifest_dir / "cuts_belle.00000.jsonl.gz")} + # return {'test':load_manifest_lazy(self.args.manifest_dir / "cuts_belle.00000.jsonl.gz")} + # return {'test':load_manifest_lazy(self.args.manifest_dir / "cuts_test_small.jsonl.gz")} + return { + "test": load_manifest_lazy("data/fbank_test/belle_cuts.00000.jsonl.gz") + } @lru_cache() def dev_cuts(self) -> CutSet: @@ -436,10 +460,11 @@ class AsrDataModule: if self.args.on_the_fly_feats: pass else: - return load_manifest_lazy(self.args.manifest_dir / "cuts_belle.00000.jsonl.gz") - + return load_manifest_lazy( + self.args.manifest_dir / "cuts_belle.00000.jsonl.gz" + ) @lru_cache() def train_cuts(self) -> CutSet: logging.info("About to get train cuts") - return load_manifest_lazy(self.args.manifest_dir / "cuts_belle_train.jsonl.gz") \ No newline at end of file + return load_manifest_lazy(self.args.manifest_dir / "cuts_belle_train.jsonl.gz") diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py index 5cda487e3..acd882d18 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py @@ -23,7 +23,7 @@ Usage: pip install huggingface_hub['cli'] mkdir -p models/whisper models/qwen models/checkpoint -huggingface-cli download --local-dir models/checkpoint yuekai/icefall_asr_aishell_whisper_qwen2_1.5B +huggingface-cli download --local-dir models/checkpoint yuekai/icefall_asr_aishell_whisper_qwen2_1.5B # For aishell fine-tuned whisper model huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_aishell_whisper exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt @@ -48,32 +48,74 @@ python3 ./whisper_llm_zh/decode.py \ import argparse import logging +import sys from collections import defaultdict from pathlib import Path from typing import Dict, List, Optional, Tuple +import soundfile as sf import torch import torch.nn as nn import transformers import whisper +from cosyvoice.cli.cosyvoice import CosyVoice from data_module import AsrDataModule from lhotse.cut import Cut from model import SPEECH_LLM, EncoderProjector - from peft import LoraConfig, get_peft_model -from train import DEFAULT_SPEECH_TOKEN +from train import DEFAULT_SPEECH_TOKEN, add_model_arguments from transformers import AutoModelForCausalLM, AutoTokenizer, Qwen2Config from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward -from train import add_model_arguments + from icefall.env import get_env_info from icefall.utils import ( AttributeDict, setup_logger, store_transcripts, write_error_stats, - average_checkpoints, ) +sys.path.append("/workspace/CosyVoice/third_party/Matcha-TTS") + + +def audio_decode_cosyvoice(audio_tokens, codec_decoder): + """ + Generate audio from tokens with optional tone and prompt embedding. + + Args: + audio_tokens (list): List of audio tokens to be processed. + codec_decoder: Codec decoder for generating audio. + + Returns: + torch.Tensor: Generated audio waveform. + """ + flow_embedding = codec_decoder.frontend.spk2info["中文女"]["embedding"] + flow_prompt_speech_token = torch.zeros(1, 0, dtype=torch.int32) + prompt_speech_feat = torch.zeros(1, 0, 80) + tts_mel, _ = codec_decoder.model.flow.inference( + token=audio_tokens.to(codec_decoder.model.device), + token_len=torch.tensor([audio_tokens.shape[1]], dtype=torch.int32).to( + codec_decoder.model.device + ), + prompt_token=flow_prompt_speech_token.to(codec_decoder.model.device), + prompt_token_len=torch.tensor( + [flow_prompt_speech_token.shape[1]], dtype=torch.int32 + ).to(codec_decoder.model.device), + prompt_feat=prompt_speech_feat.to(codec_decoder.model.device), + prompt_feat_len=torch.tensor( + [prompt_speech_feat.shape[1]], dtype=torch.int32 + ).to(codec_decoder.model.device), + embedding=flow_embedding.to(codec_decoder.model.device), + flow_cache=torch.zeros(1, 80, 0, 2).to(codec_decoder.model.device), + ) + + audio_hat, _ = codec_decoder.model.hift.inference( + speech_feat=tts_mel, cache_source=torch.zeros(1, 1, 0) + ) + + return audio_hat + + def get_model(params, device): """Load and prepare the speech-to-speech model.""" if params.remove_whisper_encoder_input_length_restriction: @@ -136,7 +178,7 @@ def get_model(params, device): # Determine attn_implementation and torch_dtype based on use_flash_attn if params.use_flash_attn: attn_implementation = "flash_attention_2" - torch_dtype = torch.float16 # Or torch.bfloat16 if needed/supported + torch_dtype = torch.float16 # Or torch.bfloat16 if needed/supported else: attn_implementation = "eager" torch_dtype = torch.float16 @@ -162,7 +204,7 @@ def get_model(params, device): codec_lm = AutoModelForCausalLM.from_config( config=config, attn_implementation=attn_implementation, - torch_dtype=torch_dtype + torch_dtype=torch_dtype, ) # cosyvoice2_token_size = 6561 codec_lm.resize_token_embeddings(codec_vocab_size) @@ -197,7 +239,7 @@ def get_model(params, device): llm, encoder_projector, codec_lm, - codec_lm_padding_side= "left" if params.use_flash_attn else "right", + codec_lm_padding_side="left" if params.use_flash_attn else "right", ) if params.avg > 1: @@ -325,6 +367,12 @@ def get_parser(): help="The experiment dir", ) + parser.add_argument( + "--token2wav-path", + type=str, + default="/workspace/CosyVoice-300M-SFT", + help="The path to the token2wav model", + ) # parser.add_argument( # "--dataset", # type=str, @@ -350,6 +398,7 @@ def decode_one_batch( params: AttributeDict, model: nn.Module, tokenizer: AutoTokenizer, + token2wav_model: nn.Module, batch: dict, ) -> Dict[str, List[List[int]]]: """Decode one batch and return the result in a dict. The dict has the @@ -431,26 +480,32 @@ def decode_one_batch( # {"role": "assistant", "content": ""}, # ] # ] * len(feature) - questions_with_history = [cut.custom["question"] for cut in batch["supervisions"]["cut"]] - history_contexts = [question.rsplit(':', 1)[0].strip() for question in questions_with_history] - last_questions = [question.split(': ')[-1].strip() for question in questions_with_history] + questions_with_history = [ + cut.custom["question"] for cut in batch["supervisions"]["cut"] + ] + history_contexts = [ + question.rsplit(":", 1)[0].strip() for question in questions_with_history + ] + last_questions = [ + question.split(": ")[-1].strip() for question in questions_with_history + ] messages = [] for i, total_round in enumerate(chat_rounds): message = [] if total_round > 1: - history_question_answer = history_contexts[i].split('USER:') + history_question_answer = history_contexts[i].split("USER:") history_question_answer = [item for item in history_question_answer if item] for j in range(total_round - 1): # USER: 生成一个关于夏天的诗歌。 ASSISTANT: 夏日炎炎,万物生长,阳光明媚,享受着夏日的美好时光。 USER: 给我列举一些新闻头条。 ASSISTANT: 当今社会的新闻永远不会停。 - question_answer = history_question_answer[j].split('ASSISTANT:') + question_answer = history_question_answer[j].split("ASSISTANT:") message += [ {"role": "user", "content": question_answer[0].strip()}, - {"role": "assistant", "content": question_answer[1].strip()} + {"role": "assistant", "content": question_answer[1].strip()}, ] message += [ {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, # {"role": "user", "content": f"{last_questions[i]}"}, - {"role": "assistant", "content": ""} + {"role": "assistant", "content": ""}, ] print(f"message: {message}, batch_size {len(chat_rounds)}") messages.append(message) @@ -461,16 +516,21 @@ def decode_one_batch( feature, input_ids.to(device, dtype=torch.long), attention_mask.to(device) ) cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] - for cut_id in cut_ids: - speech_token_file_name = ( - params.log_dir / f"{cut_id}.txt" - ) - with open(speech_token_file_name, 'w') as f: - # save_path = params.exp_dir / f"speech_output/{cut_id}.wav" - #torchaudio.save(save_path, speech_output.cpu(), 16000) - # print(f"speech_output: {generated_speech_output}, cut_id: {cut_id}") - save_str = " ".join([str(i) for i in generated_speech_output]) - f.write(f"{cut_id}|{save_str}\n") + generated_speech_output = [ + generated_speech_output + ] # WAR: only support batch = 1 for now + for cut_id, audio_tokens in zip(cut_ids, generated_speech_output): + speech_file_name = params.log_dir / f"{cut_id}.wav" + audio_tokens = [token for token in audio_tokens if token < 4096] + audio_tokens = torch.tensor(audio_tokens, dtype=torch.int32).unsqueeze(0) + audio_hat = audio_decode_cosyvoice(audio_tokens, token2wav_model) + sf.write(speech_file_name, audio_hat.squeeze(0).cpu().numpy(), 22050) + # with open(speech_token_file_name, 'w') as f: + # # save_path = params.exp_dir / f"speech_output/{cut_id}.wav" + # #torchaudio.save(save_path, speech_output.cpu(), 16000) + # # print(f"speech_output: {generated_speech_output}, cut_id: {cut_id}") + # save_str = " ".join([str(i) for i in generated_speech_output]) + # f.write(f"{cut_id}|{save_str}\n") else: generated_ids = model.decode( @@ -486,6 +546,7 @@ def decode_dataset( params: AttributeDict, model: nn.Module, tokenizer: AutoTokenizer, + token2wav_model: nn.Module, ) -> Dict[str, List[Tuple[str, List[str], List[str]]]]: """Decode dataset. @@ -548,14 +609,23 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): answers = batch["supervisions"]["text"] - questions_with_history = [cut.custom["question"] for cut in batch["supervisions"]["cut"]] - answer_cosyvoice_speech_token = [cut.custom["answer_cosyvoice_speech_token"] for cut in batch["supervisions"]["cut"]] - texts = [question.split(': ')[-1].strip() for question in questions_with_history] + questions_with_history = [ + cut.custom["question"] for cut in batch["supervisions"]["cut"] + ] + answer_cosyvoice_speech_token = [ + cut.custom["answer_cosyvoice_speech_token"] + for cut in batch["supervisions"]["cut"] + ] + texts = [ + question.split(": ")[-1].strip() + for question in questions_with_history + ] cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( params=params, model=model, + token2wav_model=token2wav_model, batch=batch, tokenizer=tokenizer, ) @@ -643,9 +713,7 @@ def main(): params.suffix = f"epoch-{params.epoch}-avg-{params.avg}" params.log_dir = Path(params.exp_dir) / f"log-{params.method}" params.log_dir.mkdir(parents=True, exist_ok=True) - setup_logger( - f"{params.exp_dir}/log-{params.method}/log-decode-{params.suffix}" - ) + setup_logger(f"{params.exp_dir}/log-{params.method}/log-decode-{params.suffix}") logging.info("Decoding started") logging.info(params) @@ -657,6 +725,9 @@ def main(): logging.info(f"device: {device}") model, tokenizer = get_model(params, device) + token2wav_model = CosyVoice( + params.token2wav_path, load_jit=False, load_trt=False, fp16=False + ) num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") @@ -697,6 +768,7 @@ def main(): dl=test_dl, params=params, model=model, + token2wav_model=token2wav_model, tokenizer=tokenizer, ) diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py index 3b971dd89..1438a2624 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py @@ -66,8 +66,9 @@ from lhotse.cut import Cut from lhotse.dataset.sampling.base import CutSampler from lhotse.utils import fix_random_seed from model import IGNORE_TOKEN_ID, SPEECH_LLM, EncoderProjector + # from multi_dataset import MultiDataset -from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training +from peft import LoraConfig, get_peft_model from torch import Tensor from torch.utils.tensorboard import SummaryWriter from transformers import ( @@ -146,6 +147,7 @@ def add_model_arguments(parser: argparse.ArgumentParser): help="Whether to enable speech codec output.", ) + def get_parser(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter @@ -332,9 +334,7 @@ def compute_loss( # remove too long text # texts = [ text for text in texts if len(text) < 1024 ] if len(texts) != len(messages): - logging.warning( - f"Remove too long text, {messages} " - ) + logging.warning(f"Remove too long text, {messages} ") max_len_texts = max([len(text) for text in texts]) if tokenizer.padding_side == "right": texts = [ @@ -354,10 +354,10 @@ def compute_loss( # first get the indices of the tokens mask_prompt = True if mask_prompt: - default_speech_token_id = tokenizer.convert_tokens_to_ids(DEFAULT_SPEECH_TOKEN) - mask_indices = torch.where( - input_ids == default_speech_token_id + default_speech_token_id = tokenizer.convert_tokens_to_ids( + DEFAULT_SPEECH_TOKEN ) + mask_indices = torch.where(input_ids == default_speech_token_id) for i in range(mask_indices[0].size(0)): row = mask_indices[0][i] col = mask_indices[1][i] @@ -382,30 +382,39 @@ def compute_loss( batch_idx_train = params.batch_idx_train answers = batch["supervisions"]["text"] - questions_with_history = [cut.custom["question"] for cut in batch["supervisions"]["cut"]] + questions_with_history = [ + cut.custom["question"] for cut in batch["supervisions"]["cut"] + ] chat_rounds = [cut.custom["round"] for cut in batch["supervisions"]["cut"]] - answer_cosyvoice_speech_token = [cut.custom["answer_cosyvoice_speech_token"] for cut in batch["supervisions"]["cut"]] - last_questions = [question.split(': ')[-1].strip() for question in questions_with_history] - history_contexts = [question.rsplit(':', 1)[0].strip() for question in questions_with_history] - # USER: 生成一个关于夏天的诗歌。 ASSISTANT: 夏日炎炎,万物生长,阳光明媚,享受着夏日的美好时光。 USER: 给我列举一些新闻头条。 ASSISTANT: 当今社会的新闻永远不会停。: 告诉我如何烹饪鸡肉 + answer_cosyvoice_speech_token = [ + cut.custom["answer_cosyvoice_speech_token"] + for cut in batch["supervisions"]["cut"] + ] + last_questions = [ + question.split(": ")[-1].strip() for question in questions_with_history + ] + history_contexts = [ + question.rsplit(":", 1)[0].strip() for question in questions_with_history + ] + # USER: 生成一个关于夏天的诗歌。 ASSISTANT: 夏日炎炎,万物生长,阳光明媚,享受着夏日的美好时光。 USER: 给我列举一些新闻头条。 ASSISTANT: 当今社会的新闻永远不会停。: 告诉我如何烹饪鸡肉 # : 对以下句子进行鉴赏:他心地善良。输出结果为"他是一个有善心的人。 messages = [] for i, total_round in enumerate(chat_rounds): message = [] if total_round > 1: - history_question_answer = history_contexts[i].split('USER:') + history_question_answer = history_contexts[i].split("USER:") history_question_answer = [item for item in history_question_answer if item] for j in range(total_round - 1): # USER: 生成一个关于夏天的诗歌。 ASSISTANT: 夏日炎炎,万物生长,阳光明媚,享受着夏日的美好时光。 USER: 给我列举一些新闻头条。 ASSISTANT: 当今社会的新闻永远不会停。 - question_answer = history_question_answer[j].split('ASSISTANT:') + question_answer = history_question_answer[j].split("ASSISTANT:") message += [ {"role": "user", "content": question_answer[0].strip()}, - {"role": "assistant", "content": question_answer[1].strip()} + {"role": "assistant", "content": question_answer[1].strip()}, ] message += [ {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, - {"role": "assistant", "content": answers[i]} + {"role": "assistant", "content": answers[i]}, ] messages.append(message) @@ -423,7 +432,13 @@ def compute_loss( labels=target_ids.to(device), ) else: - text_loss, acc, codec_loss, codec_acc, codec_topk_acc = model.forward_with_speech_output( + ( + text_loss, + acc, + codec_loss, + codec_acc, + codec_topk_acc, + ) = model.forward_with_speech_output( fbank=feature, input_ids=input_ids.to(device), attention_mask=attention_mask.to(device), @@ -445,12 +460,8 @@ def compute_loss( acc * info["frames"] ) # WAR: to avoid normalization by the number of frames if params.enable_speech_output: - info["codec_acc"] = ( - codec_acc * info["frames"] - ) - info["codec_topk_acc"] = ( - codec_topk_acc * info["frames"] - ) + info["codec_acc"] = codec_acc * info["frames"] + info["codec_topk_acc"] = codec_topk_acc * info["frames"] info["codec_loss"] = codec_loss.detach().cpu().item() info["text_loss"] = text_loss.detach().cpu().item() return loss, info @@ -469,7 +480,7 @@ def compute_validation_loss( tot_loss = MetricsTracker() for batch_idx, batch in enumerate(valid_dl): - with torch.amp.autocast('cuda', enabled=params.use_fp16): + with torch.amp.autocast("cuda", enabled=params.use_fp16): loss, loss_info = compute_loss( params=params, tokenizer=tokenizer, @@ -584,7 +595,7 @@ def train_one_epoch( f"rm -rf {params.exp_dir}/epoch-{params.cur_epoch}-checkpoint-{batch_idx}" ) try: - with torch.amp.autocast('cuda', enabled=params.use_fp16): + with torch.amp.autocast("cuda", enabled=params.use_fp16): loss, loss_info = compute_loss( params=params, tokenizer=tokenizer, @@ -722,7 +733,6 @@ def run(rank, world_size, args): # model.resize_token_embeddings(len(tokenizer)) # model.vocab_size = len(tokenizer) - llm.config.pad_token_id = tokenizer.pad_token_id llm.config.default_speech_token_id = tokenizer.convert_tokens_to_ids( DEFAULT_SPEECH_TOKEN @@ -736,12 +746,11 @@ def run(rank, world_size, args): param.requires_grad = False encoder_projector.eval() - if params.enable_speech_output: # Determine attn_implementation and torch_dtype based on use_flash_attn if params.use_flash_attn: attn_implementation = "flash_attention_2" - torch_dtype = torch.float16 # Or torch.bfloat16 if needed/supported + torch_dtype = torch.float16 # Or torch.bfloat16 if needed/supported else: attn_implementation = "eager" torch_dtype = torch.float16 @@ -766,9 +775,9 @@ def run(rank, world_size, args): # Pass attn_implementation and torch_dtype to the constructor # Use AutoModelForCausalLM.from_config for more generality codec_lm = AutoModelForCausalLM.from_config( - config=config, - attn_implementation=attn_implementation, - torch_dtype=torch_dtype + config=config, + attn_implementation=attn_implementation, + torch_dtype=torch_dtype, ) # cosyvoice2_token_size = 6561 codec_lm.resize_token_embeddings(codec_vocab_size) @@ -803,7 +812,7 @@ def run(rank, world_size, args): llm, encoder_projector, codec_lm, - codec_lm_padding_side= "left" if params.use_flash_attn else "right", + codec_lm_padding_side="left" if params.use_flash_attn else "right", ) if params.pretrained_model_path: @@ -851,12 +860,11 @@ def run(rank, world_size, args): codec_len = len(c.custom["answer_cosyvoice_speech_token"]) if codec_len > 2200: logging.warning( - f"Exclude cut with ID {c.id} from training. Duration: {c.duration}, lenth: {codec_len}" + f"Exclude cut with ID {c.id} from training. Duration: {c.duration}, lenth: {codec_len}" ) return False return True - train_cuts = data_module.train_cuts() train_cuts = train_cuts.filter(remove_short_and_long_utt) From 360f0aa397d822d1b0114b57af94b9308a45999b Mon Sep 17 00:00:00 2001 From: root Date: Tue, 29 Apr 2025 08:49:12 +0000 Subject: [PATCH 25/57] update README --- egs/speech_llm/SPEECH2SPEECH/README.md | 89 ++++++ .../SPEECH2SPEECH/assets/framework.jpg | Bin 0 -> 339257 bytes .../local/compute_whisper_fbank.py | 14 +- .../{slam_omni => qwen_omni}/data_module.py | 0 .../{slam_omni => qwen_omni}/decode.py | 0 .../ds_config_zero1.json | 0 .../label_smoothing.py | 0 .../{slam_omni => qwen_omni}/model.py | 271 ++++++++++++------ .../speech_dataset.py | 3 +- .../{slam_omni => qwen_omni}/train.py | 0 .../{slam_omni => qwen_omni}/web_demo.py | 263 ++++++++--------- .../whisper_encoder_forward_monkey_patch.py | 0 12 files changed, 423 insertions(+), 217 deletions(-) create mode 100644 egs/speech_llm/SPEECH2SPEECH/README.md create mode 100644 egs/speech_llm/SPEECH2SPEECH/assets/framework.jpg rename egs/speech_llm/SPEECH2SPEECH/{slam_omni => qwen_omni}/data_module.py (100%) rename egs/speech_llm/SPEECH2SPEECH/{slam_omni => qwen_omni}/decode.py (100%) rename egs/speech_llm/SPEECH2SPEECH/{slam_omni => qwen_omni}/ds_config_zero1.json (100%) rename egs/speech_llm/SPEECH2SPEECH/{slam_omni => qwen_omni}/label_smoothing.py (100%) rename egs/speech_llm/SPEECH2SPEECH/{slam_omni => qwen_omni}/model.py (77%) rename egs/speech_llm/SPEECH2SPEECH/{slam_omni => qwen_omni}/speech_dataset.py (99%) rename egs/speech_llm/SPEECH2SPEECH/{slam_omni => qwen_omni}/train.py (100%) rename egs/speech_llm/SPEECH2SPEECH/{slam_omni => qwen_omni}/web_demo.py (60%) rename egs/speech_llm/SPEECH2SPEECH/{slam_omni => qwen_omni}/whisper_encoder_forward_monkey_patch.py (100%) diff --git a/egs/speech_llm/SPEECH2SPEECH/README.md b/egs/speech_llm/SPEECH2SPEECH/README.md new file mode 100644 index 000000000..e4738eeef --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/README.md @@ -0,0 +1,89 @@ + +# Introduction + +This recipe includes scripts for training speech2speech models. + +# SPEECH2SPEECH + +The following table lists the folders for different tasks. + +|Recipe | Speech Input | Speech Output | Comment| +|--------------|--------------|---------------|--------| +|Qwen-omni like| Continuous Embeddins| Cosyvoice1 50Hz Single-codebook Token | Text-driven; using Thinker LLM for text token, small Talker LLM for speech token | + +### [Qwen-omni like Speech2speech Recipe](./qwen_omni) + +[Qwen2.5-Omni](https://github.com/QwenLM/Qwen2.5-Omni) style model using [worstchan/Belle_1.4M-SLAM-Omni](https://huggingface.co/datasets/worstchan/Belle_1.4M-SLAM-Omni) dataset. + +
+

+ +

+
+ +Command for training is: +```bash +pip install -r whisper_llm_zh/requirements.txt + +pip install huggingface_hub['cli'] +mkdir -p models/whisper models/qwen + +# For aishell fine-tuned whisper model +huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_aishell_whisper exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt +# For multi-hans fine-tuned whisper model +# huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_multi-hans-zh_whisper v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt + +# huggingface-clie download --local-dir models/qwen Qwen/Qwen2-7B-Instruct +huggingface-clie download --local-dir models/qwen Qwen/Qwen2-1.5B-Instruct + +# First, we only train the projector and freeze other modules. +torchrun --nproc_per_node 8 ./whisper_llm_zh/train.py \ + --max-duration 200 \ + --exp-dir ./whisper_llm_zh/exp_test \ + --speech-encoder-path-or-name models/whisper/exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt \ + --llm-path-or-name Qwen/Qwen2-1.5B-Instruct \ + --manifest-dir data/fbank \ + --deepspeed \ + --deepspeed_config ./whisper_llm_zh/ds_config_zero1.json \ + --use-flash-attn True \ + --use-lora False --unfreeze-llm False + +# Then we jointly train the projector and LLM LoRA modules. +torchrun --nproc_per_node 8 ./whisper_llm_zh/train.py \ + --max-duration 200 \ + --exp-dir ./whisper_llm_zh/exp_test \ + --speech-encoder-path-or-name models/whisper/exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt \ + --llm-path-or-name Qwen/Qwen2-1.5B-Instruct \ + --manifest-dir data/fbank \ + --deepspeed \ + --deepspeed_config ./whisper_llm_zh/ds_config_zero1.json \ + --use-flash-attn True \ + --use-lora True --unfreeze-llm True + --pretrained-model-path ./whisper_llm_zh/exp_test/epoch-3.pt +``` + +Command for decoding: +```bash +mkdir -p models/whisper models/qwen models/checkpoint +huggingface-cli download --local-dir models/checkpoint yuekai/icefall_asr_aishell_whisper_qwen2_1.5B + +# For aishell fine-tuned whisper model +huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_aishell_whisper exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt +# For multi-hans fine-tuned whisper model +# huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_multi-hans-zh_whisper v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt + +huggingface-clie download --local-dir models/qwen Qwen/Qwen2-7B-Instruct + +mkdir -p whisper_llm_zh/exp_aishell_whisper_qwen2_1.5B +ln -s models/checkpoint/epoch-10-avg-5.pt whisper_llm_zh/exp_aishell_whisper_qwen2_1.5B/epoch-999.pt + +python3 ./whisper_llm_zh/decode.py \ + --max-duration 80 \ + --exp-dir whisper_llm_zh/exp_aishell_whisper_qwen2_1.5B \ + --speech-encoder-path-or-name models/whisper/exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt \ + --llm-path-or-name models/qwen \ + --epoch 999 --avg 1 \ + --manifest-dir data/fbank \ + --use-flash-attn True \ + --use-lora True --dataset aishell +``` diff --git a/egs/speech_llm/SPEECH2SPEECH/assets/framework.jpg b/egs/speech_llm/SPEECH2SPEECH/assets/framework.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d708bb256f799cfd07f32c779cf1bf315a3376f5 GIT binary patch literal 339257 zcmeFYcT`i+*EVL5fs~jV1!3B2`d|fCxxO2?<4d2LS~H3kXV+-U%HM z0qMOYG%10E5(6pB_5EheteIIeYrgsGU9)~k?mxLVH)o%_&)(1T?0rtAPL=@n+qwq2 z00aU79)oY-WEQvy(9+QS`+zSx@NkLm0CEZ(?Y{y4uP+DAW>gTeCv z9mgq7vCG#_bD2J&7x&^;3{CmSAaSFjo#);#PEyIoGf}j{48#ED7&kn?}mK_F22*(8D0} zDg;{hryNF{#P7D{Rn^JOHI(p=DnBjb$}5L_NRupP{x=fmUVC^YYD}B}Va-mT$+vKcNf;#57U|;h zs-+~L*4*}f_gcrYcWp0IfAT8@`k9a`f66ZrQ6v+xERj*tEAVg6jAv9?e#{p`b!H>) zAD8zT93J|NIjpYdIG=7nMxyFG+w9_Aby=B2BBWdc1l|maM@~@HOC;a@K;O0L72XTx zQ-?axgz*RACZf8R1EW9-zG}ipH}y8lYSQVg0YtyGHz<8 z@&|^y2c2%}dbR1wo5y|dr|dm}QGR=88@r$5qxC!gT2Mxh`k?He0odJ|@e;Z65|1Y> zl?^dZ!`2qAOM3UT@%TOIygT+MTgO*R`YgiOwAe1-uABFIvk`yt}}X z61j$~uF|hKzsUXkYo?Qk`dOdRtRegm)(3|~_a+^j0JK|FjZrMwI@pi(jg?NNVSt8_ zEJWb8nBJZtuiasG;Nw^9F0_+>%9D_;BD0u%!hPa2Z^W`^)J*AQXZDtbZGX3w;~gYo zaZ6kiFBEa3ioNU%E9%0a+k7hvbp*wNayffJ9!|e}R`rp+l2mNps%?9pNON6qrRdLE zMjLiTMxo#IUrcorE$=bpT0U|};I7ilIYs+b`uhj^VVm-hR#~Dfrl4ho%7XWg!u)tX zr{{fq=`+t%M2s4AwPN;tiSDb=l$1wi0Jo_Qz-|8X1Q0m^mhwS2MnL+#3j0g@mZ2Ov z=^7@H9dq37ACeU9J0ykT(Q#~u`|;)<5kF!4WJ~-O`t`y^JTkP5D!gD+)X$Tf5+qbC zVRb6uQ>JfGM5iZA?QWqE6F_(-JE(n$I*epR&~(z}YU2~L(S`LfYheqk{U(Z{N3pj; z-&54?r?vHA>g8#W?bndbs2G9n*7T2LlaT>RSMGz$&+g8-V)T}o1lH8rr9~mRCbZJA zz%gg6?9T(}OgK6-R$Fls$>cl46^uecA ze|!>|Tb>Yz6Nx>CBUIp3O^ojG?G+fx8W#Wf9Nc}o`ga%pE1gi-TtIgaEQBgfw8JDt zeVT-eVa}>{HEJb%w{;CL(0YPn)Ri%wghW0-N4}&9(U3V`PD_j5vIKaYyS6~sJC)9N@$}kVMX#L6g1Zo%m%ZKPGfL>pwt{df*O0)5wqP#$W~Zy4 z_SQT0RMis3cT7(Db9^ombdylrI)O-p;H_CUTqj|vQwVTW{Y}p()R>lbEzd81b?-;c$wza`HQ+1%Me`y zEx-uy9JagM2|9=zC97~e&DbvXN*H?Te88xDhEPhblLABAU%ABb1L@b~Cm#d%uh&eF z)k`e48n2G*M3Ri_Ez!wAim5QRF-Q;DlkypHmO7|moc)!23lAkrOlZVo-QDD;$}Tnc z(d@`ZOL(iTOG0cz3pV{{Cf+$0SsJWc&Ua+8_f?l2II$Gc(I_%3#ZRvDk$HEi2J)Rq z66??j@QF3py$CTP!a@b$7jSn_>{f z1on17XC;<~dQ1I$?Pvq{dMzh}&%IOr8|h}bVm64W7>phWVHBl#p){IBcm>G*P!XA# zMQf+w8?@QyiRC|u(fc|cjj9B{>Ha>ZU*UrsFKv1@rx!zWgoI@$xAveIdGeQqQEI1v z9rYdd5AESz?D-GSb3`+e8Ci}vzii~wVatTqBTPms>jaElJOQe9KJ4Al7k6m2NEc+g zc4Tn4G4N^tVRQnt%OF<{TGf%0HM`WDdnii~Bs9}wRIykbGg>eDQnd*u++Y-2W39(A zZk}C?CF+~_!dS!S`v3y_H9qcRc+K`CuN={B5?Q$ zIRB|x6BB@K!O1V}RC(nG7GsRloWIRpHjI0Hj1ROwi#u0@H`Crtn%8KK2;Q2yitP3#^FKup*% zBBX7I9U@TT;Djgf7R3FP+w9Bj$KUS`7UWtk-wFa8_fa^Wn{yNU-8T6^>${pOYnf@v(lZ+ zFG?oxc5@L~FY4zUqMa7``DY6Gg+Dd-w~OHJ4o%Nucq_APQ}B9*iFo4mZ|z=RdU|sp zQop=DN(-^9~=jlT=nfx3YuMqviW8i%GdAu{$2-n#z!dIOXG1D7k1r>hQ19AS``DK1iIoFLR zNl}kWH$2SXDdd62b9t3Nt&;dlX-nACB%#ud$YPzInm9$YEs#) zi}QMrgF=QX-K)gc0rbg%q5{Cor~2|Z6hB-9C(@o4SkbZ+0en7 zB2g#Zdf93Lf?tck!`d57Vw;tMDFq*%eP_ir5rY~*oNln6(&Cf>7M!#u88x(NA< z81x{apsIOKGHT%N_Ur8wv)*4N>a+!bZ*`xOzW7TgzeMGGVD#l?%g^n|vl9guwJBB8 z1ZPgL9>Xe6Uvaa&lue>JZ4(*GFX3OGCKh@o;dYo(WJ;g}h>&3PFc;9_e)DDxEOt_m zR8HBe;#=G10j8F7I!pucSOdv~hsFxt5s^$e=*U1c=^0rDR;>i*rA{>pU*Tka@nwMe zYxM1}T>Z>1xDS4U-q+}ILIVhDL@xU6b&>`V*4}EXqL0GCg0`DvCsXU*o$eP8}L-~;sApE6kt zc#fV-k8=b;!C(a?>VuB5|84-mwM=2>MYy9rwr@B7p>WMV{D@ocyb8szl`gxh-oYg{ za1MOgKS@O;y#%7dZrj|>S()yBVXlBO^&lfFg$`wBgZFVstt;udQ&+f71_{|v+3m$%N! znzucz2xHuR8ZET^%P?@|!YHv~cI+;Ft*H%thg}kityR;t(w0cyBVZTvIfl~!U6swp zyLmL_6j32*+1_&~6s8gWhyr4{6tXp?6aghOkFn(~BSkheE)^BG94ujd0?y2w;S1w= z$;~5L_P}F^{~L@E_8dh2NTI_ABV>L&)Nt(3Rj8Nky|+|ls%gw=d$hlg zg@7P4ZgW{YAErFKmBYoYByh58#xCMu5-FpZaBoA5vz2iwH1W}Q#+eUb?C1vk9+(fj zf|9wz2_AUi#w?w}?yDNIg~4HlB+VvG_`1-L@&$I`)rv%)*kdJ=F*!dsX16WDx1P_- z>@7bu)Fm4TReV47$l}G%>vSCLr$a89n)w+$?7ul$rowuAZ_(s??M=zQ=J!fWZ-o~t zU4~pcRN2p{0HKK_CEHidfuy*ONq?QG&fGfKv$x4+44c$p)=LVWyGuWE_!EAQSb$Cn zTc8^wJ1xtZkOVtld8QMZ7OVPUnQOOGr29Cla+;&n?#Hj-7>l&|$PPHYo>ICk?bCSO zY)r8F8Ocfuk zm%#q^#yrAM1FF=YjzOlHnV$)q^Zu;%V=4U6Blvw14cr2UY)28L13HoH z;N)J>78;SzNTqxt7&T38W?DQj&G29U6y`S8%mEq-Q1TGsfw>}37dy^_jT+Z=YnLZY zwGGvl1y5(b3J!2eq)ySgs{c6ThU{iV9K3Rh7+C37>h|Qiz2(U~AZA9FT=C%|wwLKc zY9Nq>rZSU*aC}%+bX*cUSsPD>5Fc^hu^M#ESa9=CeEhB3N$v4I6<^-B^Y=C5ZxO07QnVjY$uR zH!!X(j|pyq5n*(Z!z6LI*1G#Rf7TQH@i#u7OW+&4J9ekdCTBVPH@P1tL1ikyWn%os ztVH5hveBM#Jk2p!`1L)@m~@l3pVlMdX-Z&l3H7#>oV)Zh+-5<{q(}D#Wt(K*dSIOd zKj$*ieongE879;HKgi8Lg7cr_zwI3G94+JP2z^8}$i5Ts(ru{FEaPzWy!Fi-Js-^3 zr^oXJejSl1;&0t&1Re;QokM1-wTmL6%=`1I3&~vBXg=(PuaCASkAr(7wd{QA1}1&= z_7=RAe*La>NY2?K-OPbB0ZhQ&V&o$3cB!Dn zUpM=i4*6^v#+2-3=T5xYZYx4Wer*37=zMFG49oANmh~Ie>WkX~gX4fR;4>LZ-XlS= z1tcD1Z!@0ydyx%`dPQ~9nu+H~YDQgFWqP@E;XU_xw6{(WYJ2rxWC8=iu6Yfv1=ot> zOUh4ox9)6QasHdB&NO^eF`?|{`wwafgV4VAHu|^7_Gc%+%7m8O3E=R)oSM97K5kbT zK+fuIN0GRcJt<<5t!7}<9zOy4N&>B^y_$9uvDWf$D97C+m85Va;hf9j@l93$J`MgY z7?#p>YEOWQjQ$fq$HevoAn|~|%ZH1B-`Rs~?p7(B1YVbxrG|2hlL~m?s%7F>bY`hy z&x4KrYn${R)}Y;OH$T>-?Onf#{z6S*pph^h zl-mgq$A_wR+DQw*xb8YV((dn8I`ZFtMERO}>(pCaOPXGR8wN+l9QwA}U6e{taLCD} zAvWPMj(5@KVOm$wi_yFb4~`uhZ-2>i_s!1y6*HlAD?l%`#9T9s{F6VymRLZNpbjk7 z^?WP}thzS&F|+HgA-7djG+YoV_)h;Gulnz(m~0Pi_fL0ZoN7a4*PyX>6|mm&DbLPV znO*X)?aq`PLRkV|UGP{RJ)6rYUMH6^Q2b7=j3z3;z5V5*pV{?*$R}i_e-xg+Q>1D= z{J=9-42HfkTDmZ@r_)5buOA~@a%s=0PCZ)JVnW zSw=gDtONzaZq+t8T4MI^n98KLUsp|&9BF)i0{B5_T}>KK0DF-c?JYt95mFF1bpLyl z@kHuNU#za6{T(?k-nkzkdpw=8?tQcM@i|XX7r?7crY76%sy%y7fEX%-8262stp~E} zHA6Z9zb+0N^~BYW4g0^o_hmeEi2}TPl_AGz%OXVS!tKAyCQyU?0bIZSNN9Y1K%=6d zW|8$7Hpv^z3QJ^Cs6_I0QF8AIKv#}G0iwyYcn8en_B!UOv5~C^qluSxrmUD*g8NSE zfLGOv`O2G8P2bW&R3F288k30Lh3(K|0fZ=WsRz74^43R63d{*PJ-ejZTl$151UEwA z<&#RDmsQVVPoDr5RrNu>mY*%_bhtU{9evHzwH#OO(hn`-EYwsC^Jch~;x(k!35XiW zQx`q@dG@4Ly`mRIIj_{RU6W2sKa_5V5zm=)m_)i4Zna9)wt|E0V%Xs_ZNQ%^4OV;F z!Y1kJkYID3vNXrMi+rfAvhPghK~jzL2=Ei2Eae2)@HIPre~rh_NT%Orv`m?DdvNjY z_lBE7ggVoU6}O|lRel!>Jy5|X_2dzyJUfwRHg9(2vDC=jT-uHQcvQM`z~|tM3bAP8 zw^F<>!y6Jxbd0h5f)`8lQka%^s$UBwv-F(+ z&lYF+2)uY|_!25X=?{#%S^k?%$wQ6O){op)y~%d{mRD#bogkJ;?>XiM>V}q+P*ioY z(g_eTfD7ocJuMKQWw>>0LtqRN*x9oji?By^HT10h__^Zftg?@6+a*eYT2JK6Z@g+k zxo4|fU|+%h>R9tF*znAw_Sa!^RUV$1R4c7_edU!!wCEFnD4n!TkMW%5>O#m*lFc#h zs%9tG1^0+tJpT2;clz z9_{fVS5W_Ae*GSMpR7B}FP_Bll_i!O2MKi%L89BzI#!;9_%kuH*yS1X&=w$?e=Rm- z3q@5~;P{CdlyD$9A= zc!Ja8n-Ym1$0G0l5S{E424$5n=fK{jvKBwg;xrQDU$E3-FguXhIa4znWcVKCtw^$T zG@Cy+{$7&*Nrq4j5G;YCCGZi|NoqQts*!N{v(y$|M*Qh|oq3XidwY5`BZm-lUOad> zC)aJUc)z$41!~bTNT4PN2}O*FF#5?ucb^CZ=3S4z+Ef8Mf1%}5@+mgUPj6DHXcS)s z58%o%>*3TZL|rHsQRGn_o-Io0?ZnQUBK8I3{Ldvt1(t?~K>Xf$+C6i$AX%FTx-OVD z%B8V({KZ#4@o01pktAT4v9!2uIOFf!L7pLY%x^Y7SEc0WkG6qH(P&9s!q_M z$uCx&cj=NscGp`OHI@=x2mnvV3A=#&KI|u4*W6Dx#qZR$-cv~_9BqecbQH)3==TE< zgL`i877%q|Y$rg+_mMKJ8Na~ZMnTXtGYx;j;I0B!s_q2D_)0`uFPJ6f%oo3BlDV_C z&{AItUivM`bN|dpvF?*FNYA}WxTnzZHAS-P&eDrgY1{PUExR84Pmhh1x)5?L z)L)5&1*0@;t8aYusfy|>*Yx)kuD$&(7NWd3eV@u$j>|@)I^GMI@CV+%gGa^t$?BK+ zY0lfqS9fy9e~eeqU6lF6X^v2ZN#;B;?*>zF{VUeh-Y{+ME&a^X9Ydz5s`N4Hih=Uo z<|n8bamFho`qCj#lNUkI64%>>h4E5(aqu9y+=@c$O@$v>5z!9~>@B&I4wQP%1bkql zr!w)AuKZc7$fuqruHtU+;&eZ|l`u1J$?BzA2~Z*CfJ4y|s>*H5y3(R7;*UgbVEA4c)QA)jYJAJQTlq3xoMBPL^%W$kDU z*^*v*YM7`TaLn$+M<(3(@yI;+o2p}o`Uhw-?6K$cXA??|U(y+PFmY+U$6WPQNyZhc zTYDO_QMdG*ogNNlgy|%zRaKvQ9=?}yfW(5zEZs8k>hjQQTgmK>R;E!c2@C~hoV$D* zHT>DlNlE-DOx-?dCB^1T$8Dwa_U5@LZg51}u*a|nyBbu-S%)s$bCYDVLSp!x3CYp5 zn&BPnr`I~GxA3i!btw%C%bKVV{2WEy5BwWjB9DWc%D_+@`K6~P6Tei}Bja{cZk7!D z=Qw~?G7G@MmfoO*56csfRlXU*lFiws6U`TplvH_Xv0OJhIboWC(2Tp5>V4u022#bV zo-w>xd1P*9D$|9pa5dC~hr6}_@tk4N!oo0hg}0=_MnA`0(ddPlR)_WjC(6df~GpdMaa0&lP|>dvSy^cBcI0YjmgJ4 zs~-Y>^@#BT67R{Wz>EdU~Gv?n=D zp5X+T5*`A-DF6_Lp!NxK&SRzaE^@Z=!*3IfJhU!cogb@wuTAeC$=viXR(~!2vKos} z zwRaJev<}yy)R6PE_fQF~GFSx*E8c3=$~1Dfmt!K`ZVcki?sJVIm{leV6+b3V zZ_J=PivKSa3)Be^g>ex%Fz}Nnp9Mi)4|wPQ;}>oRgjj$a1>XId+c2?^P)w{T)#19% zTTozQD{1WgL$WoYO>IZjzBK~8q!}?RFxdeO%aRT!U-dbcS46}F#RbLvvCPol{$TOz zPkjF4hAb&**bA_b>=+n8<}9R|7%pd0f?mb}2?gpI<1-U3eKc$_-ZhUJ)PL$L zSc=hI(i6TNrfU(_%g=shXD7udanw#Io~UQBKXn7Yu)-HzpTN%0M>Fe8PVm>=Mrf zbS)AsQ5AKewr)jbeymLDzRU&DC%{*dW_l#DkTU5f?_Cp!oltkbFf0a>{ZCcUm3Hnrd8-P2DPI6X_{*!q!Sr_e>H*Kfai-36XG z^NJqu3WM8G8BgH`En&{Pf`Ox5ylZRgy;;BdFILHu5mp_g{Pux^PcycKUWs(}BOi;z zU9qc-Y$6%MFHh#owl9qaVFV$F>~r;o_kQ;^MDoPP$}yel!LF2DU!pZ{i(af=C-acH z8ViW~zIKrr`+9D4iI2=mnP$I53eggZW2BjTt|WV3AKGL^yaGKy2*GEwaRM+lKfxPX zJj}Mh=YAXR_EAPEXAoAy^-HO9d&XzjlsMPb+9*sPpl2u*rLAI{B2}IV&$W4GAK4Js zs0*5!V$PCYKVu|^^S8^kKU&;y>ZENjW>|5A|0RJyqFqgk6}O0f_^z02iaVRwf`^(~ zO14(xx$iDse_ZfG-yu$(Z;q)u(fXq9H9@_vU=Z zDEKKNR;hWXK`>0_F$-n=&dSqPBnVi~ke^aMkp&BglglPp+GS0BKcfAZ18y-=zIBNE zMPrP_QyKVcZ^4~>NqLdWe*mpnD${3B{Z}OfR^vpvJtJnGBcWtHN)5r1@7n1Zag4;p z`?|3=x_;O{**njQ%1u9Z$0hZ!;-P4hF!BRZz*d%A#+O+Lt%($ZVqACQ!0Jv7fq$|~;7!2g$}>*M3T83DA-2LutWHe!gC9bVhIXG&XR8H=xXJ9qs81UkUUJmW=y~12c{#Rou9CGW+|ndV z31Q*~=zz#n`BDLj%&m&qP106}JGrXNpX)c}wQ`P}L6f6rPn$*5VzqCb$46YJR=j(+ zzfXU>zZ^j3@?6U77NL1|>>sxjEYTq>>1M!ZIDQa4GiGh``0c{2_=ptM$BLDy4|3RF z=&d8kT%QTTLxpk@!B?ZrvBQQ_7M5BYL%_N~UcSXh-I|K%n|(c}zyq0X?=++KFJ z$j)?a2ITKiJnac^`U;F3b?u)pE842*hT^2M1!lL~N;TBCjFGv5SQLzWFN<3amapFE z<(HBSLs+CmnzotR!Oefbz6Z1oh2$})n4B5F*+RIZ}RD=`@?k`Tz)Zz*$nV> z!93(UeF*P9?YyLjzmGWs{WQQP_2SSN?U$$5q z|C%*TYWH}kAs6bK;^pm*9v2EFWsVjA!M(0v3OE5MxuwbszDYzgE0zhoZpG35tpRVM zQG;upr(~kR{A6zPWcwo27pq5Q{MUN{9d14AIy+v#~$(1~|4Q9W|aK&&>PUDSZ( zkNK~*i=o83WJII+3h1gG7#JTIMi2~vZbn|0zQp9uYE*QTq&+jKFxd90&U-2nLO-&I zGq*cZx@t}B7jb60M~WtAPJqcZP~Iv? zF*gT|0@+oXG|}<|umy?S#)I)6pif#j>4QR+CvUMv@ztH2BF1(HLjB-Bg%PJRiELLx zt@KfOk(2Q!z_FPM5G-w%fy9}{I1y_|2sngjr;^+{j9ut)#jalav*i~Qm-FSNDQjig zR+u*GYx3UuzvfZKeOdo8=A@!=JgTY?pFQ4=6SH0(g!M>ldO>*%Gkh66H7e}bs4z#eSMoXWVhv>O9s(h|X|V#ddZl03^x zID8X$G3f5d_xAbG_A&~OqJDKbie5rIRQEp|BFdJN-{qE`08iqelrwJQ$j5m{N=dI5 z33PEMz>Q}B(UZ!QN8*QT5LIx!*N7>&UER12fr!>p-uEwEpGcG_<)<`Be#?KKDshXx zKR!xwrKo(ke7dQp4#k0TUW}wlkFB~M8d-Nv@CBqPM(j(wu>YJo3kmKD5;zK?GJ~yK z=i=N!xmYff8k^e!;!j!d5$F?df>YidMUAln2wCJ9N~50~AGkDd_eJPjMEGjtE68#c zb(IX69vjcT>zC|UKRLRUN!$NYt)pn}#s`VRnN|@?XR_IIGaW__75U+tLA&qz-N z8-_8F)GM-FAvewrc7<6icWGIJ-W7$H^ICfx-E$SIE&9&y2^pjpAnSDL` zt~Q*H9(LtS3K-;ZM#x#_-bI0$B!03I@k^ZFGK|UN&1RT#tflGGz5GbWgj+$&a9}H; zZIAKZFUWo!nMb&{%L*h%d`sB8*YXmh&6ZyIe}4b@o5s$`JTYF@#d*anlJ~}Ia$(`k zk=AMAzZJYplv+wSg`R}V(RV>PY-!JZ#fWfuG&Z!nKT5Sx;^2SUcl2^S!yy%+PeHBf z!RP@>N5-J zJK&-lI=vYKnzcW6uo~&=scaoAwS%eAjGQWnkdzjA$tRWcWxhAWg|-h=4s!Fp|9!13 z3%_ELV8>UAL_Z1q+&!tZaGke8!ibxrWH8czPc6fMpFaj3QUCxugC_u)o0h;xbo&fv z#nX3rM%fD0;$ZR3vO=$Nd|XPUn(yD_TGP2KHWkkk7sGSYBjLJxgTxP{g9-7RXZz61 zeL?`PH%Y~WGw|oERjVrd;;idrZAvH0urVHM7UcYx>6_|B#9xo_iDm=PfR~iq=CdCb z-Cpe`lHK!Nc`x(tl$ZOtF?@`mfB4i*3vx|FAJGoBc^t~mFca)>QL%@8R?A%wr|)Bh zPPBu${QO#rZy=d>6R!a@UH$`brT=-IzX@iK0=XbFl^1?C`xemvv(fB^zW>cc^O=ur zr@v08R?$GP7Xd4R`%7@awdT(t-XW#(v%V(8 z7_*oRPk1#{qkm1kze2gmfShy&kJ|XT~$W=CICP>5h@c zj?W9}_aCC5+;?KFZaH=SYxiJKSw-O zqVZ_Lj?5E8y_41hJ3J~xZa6-GY>HIC5^JC_u*#%PdG=?MoVL%*1#akgc_|#N2EO}v zX(9jU9}45d&PJexm-rhl)E#SVe^Xd+ZPZ3 zD_L&OASsAqt7%a}mv3*N=|z@le|dTvpkGB_Wr- zN%zbw6AcO8#Gr3*6TET*$1~N8%5EoNMon@`t@*ugi@_gz>80uJ(!)v+U5i3UhpF9W zJ(4#3QQBQxP3G}~1r9vCLJtV#?mPcdQhaVltP<`1gAMUQcX{^&`0k92DWXm+u;V4R zT1BdUH|wCYi%NF$M|NHl%$>%gQ@Ju^6yqacL^o&M)=nA`nyaqaWuv_ZZcL%fP#^4s)2_}J! zzHB0c^(kVCOL>>H4Sy^)L*Ds+Gv5%|{)jG~mmruCojz>MxKCsU(?^!d$@`L~av9N{ zlCnL2+7uO5*k9-bv2sI>9MGQOsA_XER}@Asc1QaRQN8sN{KCX47bR>Fo_YRjn#j)Y zcM|VzzPVJiB_5(rcispYzbJwPHG;)9jik;zMzEE9v9rm@i}lOrqOFleF3*Wu<_(ea z>2Xr|W#MZ2=B+Y^^pG%D$Gauqsc4!SGdis^V{rFj>C%!klV-BrJ zzAWMVrvNmipAS1tGAD-p{$u65U0!TK%yn|HAD4f=>+n7CdQQW>#4Y9!uXiERLb*}D zQA_Vp6-luO$Q-c>Bi}U}_mV0ZSSKFja>u*ojgeT!-Mzw}Lz3B+VNUcJ^>VY=*z>ge zAd7IueeR)NrR2Aseum)3`J*s$s>Z&mby>!lSI4fPHd5+#g;m6~@0$G5qg6RJ(BX!x z%|vZ4<7O?1#j0Hi<{eguie=R>S(igO1J>T@5v}KYpS|y0q2pSUi8_j&Pm~>T`4qym zMAa&cUtH=dRU-*XkuVW#McbnQDt$*4%3gF0L$=U@w+J{VtQeVQ;e5D&HWbv>MVo$j^2TskdBK>~ zPjy@U5m%b13oS8bny0yiKoHRFe{h$6b#<{#Bx&)xbgu~yv2&?B@;=77vz*7Uaas4# zhzY|!|8k=K`0H$pym(1*%k7E<$n%H)l!BQ&LO$hlGh;as+K#G{j&5euu5p>*)A&*& z8GVnDR`Qc-L)e1K)BAP)1~#b*FZ4FdxFP#_+Y%%h*mfH3PN2sLaPN_Mmqx+#xQgY+ z6Tp-kDlyLyLw7sY*7(m3QayqLvEtxRp-;2=QIB)bgV%nDvLlHDd3> z*3NvA6-*fx;?+e*o>{eUGQm0_EXZJtf71uQ4K^{n0y$h{h{n7FVV3e8uIi>5RRf() zC9HQdizKVKU+LJ{>@tirJ!iit$U1AnxeV-wF-%gx`lx+gbgmGw{44n-aoKk?aMv{U zrh|izjENufPbLGc?&}t_KRYB{5D)zC@d@hv0qt5@tdZXSn@=5GJaveod6q_rv`(($ z6^I0Dkq%9F-mcCt%&Ci(y?pqjyg@-^ha5n7Rn%QAsN^LRgip zLVQzQ|3#*4i9XGTF6m>BUt9@2P6GpqIT%G=k_sf8Y61T!5V{d+6!Z%Q^~qv z%6A;jFabT6t*Sv6C|Rts>)jRd_K++lIRokZMA;uWk z+kXtJIGWL=Kez6(jdG^m!X!)(|tWJf^w?aOUKj)bEQgeL^cP< zZ|E9h_;#$o(DIkc%!upn5#d6dA<|5?Q@JO#%>CR;j4t9_UinTYah_4&&yceJtmQa4 z3r0lj(+Xk$I>_y7^xncH~pKfI8=hl%rQzzUU8$ zic*>LKzTn*m@I@pTqpC=%=Y+G>G6_Os6bh^7@qExQKu`~{B|gR>u;|9wigfH&#~*7 z+I@ZVUvP(h4QvazkLF8kW+d6b9VWC4mr(4)0DV+lv$Y|`{C4gHbJ*nOrbOy3rlY$S zk8={;woPW`33lHpC9O;gL~XaCgZZZmPGic!y4Skpc7`o=afMQ6&ki>(`!6w?mHU1Q zfhlMU!aXVFNJl7(1zZC!-`2DkbF7`hH&q>c|3;!)1~2|>QK?+zZI&lj=r|=kxF4^? zU-GEI-6W)f>?R@bZ^z#}Ubu=#2e_@`B35H#U*futsC%w`3EP6(ou40X{wRuOPG6T< z{HA>OG&1UdRkI_DE&G!zZ_1RbhI{oxfB)ooN%o#l2_50sZ@{tniXEA|m+*p^2gP13 z3oJxWhBfP8Xe*T{=9G` zgnVkLLBe9E+Ff{IW?Sr((O}zXN}__(rJ%!2w|yv;=_N^(Y>-CC9M>|$Ew{BwPWtAZ zak;YbZ9XvQPS%mI%bY|H)k^iv?b+GO8%?>!Hs;$rVD?$8^6aE1UdHjYA(*nM+6V69 z443fL;oj!H#MvIL9=P9fgI1owCvZ8XQJI)YvbLb4LCY5R(TGJ@cG#omYXRewj7`0s zUfoleuLg?#%gUYP29_s z?vM2^%g(VMWfwRno?74!Id$FM(i8^nS1~$Ax<<)HaJy!`Pw#pE)iTh~ptiB9Ddtla zUqsVoBm^;HCv*to3e>d2?PM1a2c#~W1z99s-6_rWhlee#{rNNPGx4y z3+SYLQpkLma&up>%dqHT!Ht#H3>4`J72L86^!rS@ZUpIQaP!}i{dtK5*iAu`u80ai~*p>o^JxMg0cYrTv=)nWTA)w0YX z)YQM*qddgs+=gHPt#3kTqRH5p={1f=R-M6aDmA|@&sqP9RDY6wQ{w9xnKs;YdO86z zw33oj(~q20M?O^@(^-E#6z|ZSHKFdEY9*=bsp-9%Vd^-@6p5~6T$VW4Cx9j8uo$8z z;THb6EoTd^tQZ5zXD-FXmerhkJRcV?4vXur?Am3Wmi(qlXW#5hWlX>^_iC(n4GAHJ zk=!L7)s%oPcV(+bHJG>W$r}N4gHN6`DhqO|g-Wc*?_sC(>HoAuM6_l;ad&d%YTikclE zQWo4@Ly2t_wD}23u;p0^|3k`a!Zce7elRq;8kD*=9o(4m z9n8uiP-I7$2<;)1|4&Ae(nh&(rkCH@V#5m#wVPvaXT;a^K&@M0U#UU&6ru@?%BdnX zCT+Vj*@RT0PVqkv^M9uj?&=MO^bS>AZ-@Utz=fBR36c$2YVq;6y9N&xC$!Fz#NbzkYbfVPHz(vSu30C*?$rhbMH@7{3BlKcJ2Qp#Zpi*&0>kz%7*WRN zK3tqq2yVx8vxu9vu+ee#UzUrgY2*`RT29!|ni5^XUR7YEXq9tODv{3MU|f;YTix1B zNzb;R>@6cw{xh7%@>!w=OS1cdTsarx^+fN|SIx}d40dB3s2&FWFHHYSOt{2XewO?f z*Xc!L$zaP*gvoLe5x!17w}^`*hEv&xj?bI`mFrYcdrS!cHOr)gT)X4NdCG3f-XW&t zFW9Pr&om`wlhikIMeuSp-BIyyp1K=j?(%XA{I5+{-SeGZu-$R}o@)@8ljF>D=nlaD zb>->dOrl#wseO&PNoaT42lz`R$E9!Lp5$Bh`Dx1hFjPoXxp-~ItNWGtwkeu&zatNn zRL)9Rk9sNxTMg+Y%4RbYg&CJ#Sj+sIvZ+|Y%z4^n6Qay+2-yhFN3f|IX}$pVzjqyf zz@N(R>D8^s*zr1(_;{#N<>?F~zgn+&3yPpKtRljS=m*tBEc91qoX3(+?ls?H^=9M1ii7Yi?IphgB(cRoc*iZKq?5G`qS5RK|8wx2GlZeARSo zg1@p)y(AsSTril;UGcNFqI%(Zf<(`4tyDGE z*P0I?eHU*~nm{Oc6V%n3!AcpkAeD&~7tn$9Of#e|jQG0CV}F;e*X|#>)i-zZ^UI<{ z9ep=tC+!I7|2*DVmVwNV@eIop;a&eO=MI)7IGKyf>pCKC7^)^$!n+MtyPgOBxL9Hx zMQKH?UO-i4Qu_0b;q~R?w*hiJQHpc{?m;Prxt6Lf+hHO0Q~?|{?p5YPVWDP0N92Y6 zc6XkJ`PJFtkU_^zgCW-DMruajZ2OE!7mVHw!8hsNI{j(u%6$vl>-Ap)oo*!3vOQSJ z;rO8@kz(x&!cV&pst|PyhBImjvK@uqW8h60`ZqRYH!H8s%_^T>)77 zU)$a8CyBtViG3vfax8?T+ok;8*`-;zte{bhWvbSeZ^P26|8KAmFS}Qk*>%PJ9>A= zT+5xOijTVOnGJ;kGB4j;W7qxl$@*toUZW~+N3zKVmO=ruq)$+Q445SajV_iCb6mgJ zI|Udaf)HJ;ToWjl-I+xU46B5BB(rZc{4roeOWL#O+K&a>A?aV&mRDz)u=dS-^_lh0 z08(8qitPXzgE&1#7Qryf*8~RK3$dEF)RDL$872CL<+WmXJUm~>SNt7rA073&l`HVo zS?2aup;5R>!L4tnYTtWQ(2GJEe((o~dT$$Y^+`^e{AcghfA+5Z#|Qr71OGEVU>krB zCqxjvaL_x6CqVDQrBSjvH)`|K`z*s4z5j*1HxGyU|NF&9$etzpHnJokJ6T7`mL!#Z ztL#m-grPAbvSlX}G73@I*Q{e-Lza@UjS;eCh8n{xop+yeUFSOY_j{jn{{8)~`~G9D zQkeJaHLvBlJQlqYmAH)fk%jje1Lx=`&sWmkd6rU9yUpp2k#9Yl<0rCjjb(RkU9eIJ z3}lk!JKCs1c3^N0Jj5Q!huw#din_=LqR*L%-6u+IA7IXdFJqKwE)CIId@{0?q^0>{{n&q1}= z42FcGapl`F1vvCY_2qe4;xlSPrxXcoLd{>9)RSG|V06D#1+^YHsPPpU$!MPmd~1Cg zr*hhlu5PZoh;#a?{DBmXX;+MVht`+d>e@nxQ{n((5WfB?2di>lUtZbACswRipYf;Q zE9f$L;(f);G>+x*+k<&l{a`xS%ucmnMzi7=LW?-F zA@`P<5nnpRNd2oH(o*xd@w`n{}ZT5_5Y&5gT_`T7n##xALH?mW)GQBO`HgXjlm z0V)P0YPrE#N+5|$Na{ss610e4lZtdxH0O8tn`njtH%$zD-q;3Y8y+QYZo4e+lF9x6 z3YD=rUQZ6YC@B_FZ@~Qv!V|+ICaex0{J|1K zS<5D%o?~yK<`DU=lIS&zW%vx4zlKh8c%2F4& z_>o-~xNdfLf^c@8_!NcJ+GTo;+3Zw*p73ke>Us*dxRv}E%Q=&Cqmp-u2b00itF_(A zkx-O46wMCts!l+h$B!Kn{n4eqR>9t3-v^blC8Rj&j6MB31i1~xJB$&xDZtu@jz@e3 z3v~+lF0tVSo_dht3GU;WmUjJiO*Qkr#}7E<_UTKYqE63F9=#|!GGRz*uvTWW;+$%8x!+GB}1Fd)(m@#c}#3f%r8oSKYTd|OF{cpvUNim z!{TX-Rx2HsH|)PMaEN~polJsJzIX}UItZ%y^}8zmI;pIhHi@c(yK0h*Lrx{)P+Xe^ z{#i1c=2ErooNGg=E<#SD8Pb=pa)o|prDOv|;53ZR1aTLi`Vz+}-~)*5h^#xg?Eod# zbBq-d6@{+9$vOjpuj|Q8TRM3lD(1bTS7X^W^dTmM+IT$gtY5MI)id2#RaDQf;Z;>x zbXj`D>^QQ{?kP|-KyyMnC3071Q`8}+Hy=8gw0a2NR+j45#JS{;G{vWU6wUfoeTLKq z?5!hG(u!MR;aSokr7N705iyP4KL$IS_^5E87L0U7#q= zjvv+I9Rf5~ZGsvx{Ob-y15htf=$yQg-1Hgt!LWI(W70Z4i}BOVLhJnDKs#CPRQ}Fc zk~tvYb2u2uet+o%!7Cr7P6>-ZV}X}MM(2gYOx}(w}kiFc62b25l#nGIz9lX5qo?y zH8m7{Y!qn-zCjyW(wjxd0=z*^JsTW!Z&wj%GnKi%zc-w!q(-eMcR(tTO z(#b94whE8_slH|#c5R=Od0X<{%JxDe^ z6{ZBl8AszY1gq4>gfq>x4T^fS%cjDyN#U_~rLo`lPDmX_F>T549bH;Y!;}#RsLv?3 zR>tgBZ%pe!vAd3LGC6F}E6Lf~nd*6%ix{~k_~`mcTcIw$>Sh4zO@_knQp-ca8v^Da zJttN#)u&Gti&)J(Hq27UwG5&M1Ico!M)-rmU-bG06`Fi>xN z{?Vg5tTjZ57(@PZRbjXbxi}7Jm+P5Yd|^!|m~e%tb~g9jFwfPTStritKdVwJo)CXB zT8$P2KO+s&SQ5$3r0vyDD6V!e!OBj!ESUlc7VA4j40JLl2EQmxs9;Wjw^)Zug?eon ziWnN;-_*k`aq^tZuwa|n5U1FNsb6j80aZUmqiT(W1wy6qARAfKRRAc<@``LkxkD|7 zn-Zk@mo#TF0SwdI zsgZDSgJeWjfkh13eVh`u*Kp(6QjSm6mWs?Y7g1xL;?;KfDLAwsiONMnaLdAoWmLU? zTI8w~uQSOV=N+d$8rnKjxzf}$?{^}m^K#8#fC{U@Jv-P9E$#*l5F&Td)q<>yn52nO z-*ujzBQ5vV4*OK9%`N*L@JuALk_7{eQsX7yP_jK@386~O`vdZK(KSNP zUO=tbRK4U|qs4fZIht$qw)0>Ws-g|AMMI8!K!b06=znECBgqpHq-?SW+?_PLg61Wa z_0+)Fu-^|xW_9Fyp`yF~_mJ{|`C3jf;T|>Ial>Z}vG)+;ZjDO)0UWet14R-C!F#|0 zm!jp@CoH|cnbTDFl8=9Vajqy%<)xwpzwOXsgB%Ud^dpwnZey?qj48&Y4K9<;4L)n< zTbVcwgicJcc|R7vIn6nm$y$UsJv8pU;vs4AmR+-XVaWC57KqC9=GoC>;ty(rJ2eZg z_y*&Xcg0nf^aN+_${}Tz-eSzE?45DLKjhS8c`~23!sQxFA1Knuc`peeNwVsaVLRn7V$2_%tVufJcO0Nui&pIM(TfTP3nf0b;0Y` zV!jJG&NU4e{C=4ms=5*Z)7p8A2l%(m0s|8XI;cUaU7r9-kOB$MSXm8J*sZKQlfm`= z%DWc$%Vv`yLAv#5BQ^daPGAPRAP>+OpplbC6Ls2djYSccI^KQqYe?ai<|{KTH~)J|E_DCDW10M*8G{8Igqr1V*k-ZlP1Mh_7ck# ztF@?ku&A#(;iKokC@ z28S-$;A?>X+nKWHTkGbh&Dx5M-Lm6UGGekE3;+yHe9{&W0V|Tj;*xp#E=>36#C9Z~ z>MoYP(mQHrA{sdTj@?ii^zF!Pb$ORwYnPAgPvQLnkADhHh`7{@;e?1m+ z^vbwhz*FjZS(T0Kpazsq!@1&DQN2{# zN3BiKOEh<)NzeHspcdN~FzV8igl?dI$_1X#mZ)G{NmzmN^C+Xez@Mg-A>+fWQ0 zf3EJwUbFXlEomUHEB8QMlI^!vz<@NLC1M15nxa??3mE)Bkj0h`6myiW#g(sAH6$sn z7)f4z_$&c*f=NEJuMc!G${?TmzFigo87-guea=92VbB}vH58l^Z zkok>e++2`h6|77GIxg0DbnmfDS`>{Peu0!kptr!URq*o{n@+RWCK%eJ<%H%JD32CN zR|}0pfpaH%85scd2S7Zh#N`XdsiL{1fE)*JfHPA9xh2u!&QQVh<`xb-)INdp@|uq zPW{S99(QWhc*^{lyB}ySPx}SC5+JciPpz+TA@5VKag~sspvBFmtUgckw-z>@m!CXu z3W}oJPVHc3T0iXlf%Zj_&Vmy(COGZk`d}V}=vLTXXl|3{&1+dZ$x3;3ms)mRa_@M& zvPbMTouCWeZuO?{CJ_Uul?~evdWttbWHA1LSDwg=dMCbv7S(RI!z^bd&7KVWy@rGA5TRjI<=#QZT&5e&CT%Ms1x(~IxVK;-RS zSDw%9+Sp31UPqbdnuQ|uwH6{47NKX=n{KnduPn%D7Tu7o3LZmP!2-I$4QUh}ppGUk z>&6UIhFg*XDy~%o`0a?Vc$yuViyB^Qsymnd`hYGub3P2KOim)2y9xUs@m7GKYQB>; zQ;H%*UUxIJ8NczU=<0*)EctBt?}Y4;jG=93Fy-_%@fW1QKdtHqiu<<<=M_?t@4KVW z*4uJKNCT!$;*zkrl#nz}V$At{y0lau`cDeLE-FKVfSgI}tQ&~(0Dt!NO3Hb{=LNR? zfCsM&H5KU1naw}P+0Z>j_H{Q^1s^lvmwRY}c<(3b`rz)+rceZLQ}0V}AVcVWQS4*6TcjEk;k;`_3$X zRGCjf_pi;qL3S%EY9uLr%Uaoq=+olTWgAws-Mre;1Z~MX3p!|s8_D`|HDf=+5Uhy& z7uKn3FR<>VsOfMdA=epi*UuRnAhw*MCuuEkH8e%s#-aVk+r&}>2cKo>;4K#o))HDGQ=DY$hqg~=cU++vmFyMw}IE{vV=tvq4j2ZLz zc&6_~UwC3ZOL}FOtPp754ImzLJ5z%2(>s`eK}a;FFey^9X>?T3YkKKk75zQTkKm|q zNvn2H7sozTn|u}u#2N7e6a26k9JjS<=H_iMA1B!fSu+G*lpvDnb=l|qb(_2!f&rLz%8DmMvEJ!ozO3yITqQsb{k zU_;UsF6vt~g_{W(t^zjhiF{9NT>~!xSS3{R$tPsJ!BUbFumi<^vYNiO&>||fhc(No z@2jMgm|>s9n3<<4G%2_Hdy38$08i3=tD-%Ky&kSY5&(#T?A#vu{Z-isbs|C>oCQH? zH%$tGa5U}%hG*xiE4nt z+4b%*!VEYrTb+gl8{<1l--n;B{KbVa1zDw=7%|Q_aJ+^Ms2GCNm-%VRurwT?JyVaZ zLRXR4>`C75-+m~v*UYF){xm7F&;IP{1)Cfb=O^8^8&YWigmZqLc5M|mL46|g2lP1y ztwy^D51fSffJt&?-Fv(+uO&RpUmtmC`WMw?i$5_$I{xjF@0KP(Kzje)Z&db&~(;y$nYLh)`KZricrabj_@{HKwT_s*&Iu^{$e zAl6?1JDgW#g3JTI>0s?)?d{)nacHP5D&{xC3H|;+B;KqVV4=#=dIbOisInb^Vq_Hi zheTQikVuu0o43{JMv>ieGU1SWN{k=cR0qS#-=BEM8`R66q7eqa|^I_gc7z8x_a zxJhFu3?z%PtwQnH5D~WookDRTidJ#I*s%~_jr*$?N9SzfKUMVS`W~5sz|WEH=(*9s zPE~4!o;IP_m~@G$YdPN5#2iw$(b;l(v0tVkgYr)H0vp$YDX*u48OIE@jckDEK{Elz z0>L|cWrF=uIV^M0lycj#OF-*2(a$^O#XX*`CU_AVmYV&G1LVC;15Z)xid{B^m-4+U ze6_zmmP%I}& z(GAc*j79GR1Hj=aku5T<6SxQdMFz2mvk3hksket1ywW#WVBxQ5QJ+Wx3qP-e7-e7s z{A4j8gqTRN`~&*vO?#zy9!=pvTQ$~E<2uF10{qx{7DwM%21#4T`j!Z%j4(YkICyZy zxiIjV4xKFoTmA>Me-C*Y`BUf*sLbLj1e*&6zGg@Nl>Pg4D`*wCrmBxa8uxZWLe%jU zb#vQIYF%SrwPaQm^oeGxKL$DghVfCEqc0%q(bL$_8280<$~K&zxrZrZF{x$H_MW-y zJB{`K2mAT}Md3$V#<|VachByiH@RRp@GBhhWuKG(W2_WHd;B(vrY2z5X)JMOv$hW$M^PGj;FM2e5l;cAI<83*UIqnn18R)NsTY0ZIE%Ue~6x+V~EzlS~LwtSf0rML(J_xrrH-`-y>}mha?G3Ai8KxWX*vmTolyacQ?%8HEUv-0VMV zaH{PHR~CHL62Ik#ym0&6yp?<0MRht7K&NKdrk29}$Phq&J_Dvkj35Z~dwtP3huL%f zO#95>>TGEGU|rt)iG0U`XPn;jpf<$81=)RtroQYr<`+!nCGF#uJXA*6-~P^+N#?ur zXo2fe_sRA!8p{Vl=pfOY)J@REdiU#JUG6#1-?#7#FaK0CA6W%itP_y+IVkpV5lsJ> zyl-)?dde-)O&rdK|IihYn-sEqdeipU=2hFZN@CyHl_q|B94PZAspoyP)g2%Dm1>d{ zItn2HSIfdxWdnqKMp%B2qs_eUVMGKe#Cl<} zyD|DGnoS*zar+BZdFvtxy{|n}ZEwX47=cV*gNW575Ntl6yoT(7qGy$vP2eUte>czUQQF&bZ3NIB=Ez0j;UJaUiz~QGic_*FTJWfc_m_ z2{SmrP7FYvq8b9?+q=W>mR~A!yIgk!m99~9c1n(wJ^ za}NFi%`Fdfa%gS`l`niV4|>bwb)$Jw?s0JThRByo+r8qPS3#vf={RkQB1@KnLWCZ@ z>~6^FO?>Zr-B7y_r@{ZeJD&18M@Aw!6*lqrdgk84e#58j`~j6y0O(&cv{P+HCR4X1 zN;UtS{XmTDx=88i+To+P-qfcQdd^+OYWM?cW+y*wEKl^|GOUzhPaH|zIBY6;S8tM0 z^S;w>rdUXq{|n3c5?1O2=`L0bvf|c@VkhZZun{@fuE$(Y>|r=y2#Q@YJ$w;8n3ZWR z{k!N?A^HLuTdosKfl#aeQv04A?{-X+gnQO|*ydJ~$9TPFZ)mAjbzKa7cG(aDH!&o~ zp{=0@b~M(0!ug-`gGhFoIBbLs$b3?3h9fJXjy5G~OBOoUj7-_gD%bSPxa-<$Ud0J= zl+gduvLejJYq&8R;o2-8Uq|(-0b=1APg||(1d$tM`LEg{5Bd?lw4S`DNC70a6f9yO zMY;$Gaec-7)K;2g2wD&-OSsJXyYDTFr!zJ~{5t0dd|WAh8E>V1VakityZWNx>vQK< zEW3M%k;-Pli_@bUq8?^eWc%t#r8$v4xB7n6Q>!=wMyP@K<8!ax7WNE$r?%SB-5yiD z3f~w41>8+lK6Mhr)Qf7SsK5aQiRfIoF#c>Y7s@Egmp|7wS5W0@mTP+!_73&Ov$vcK zWu=IEyR|Y3Us|{DM9*F-;5gx9DLK5i)9@k50$MV}-5=un(7*EVbS6X5Pz$T!e)w7( z@)AOjHVHn3P$FqA_t69ih^)xB=7O0wW`jzao>r}j5^_FWr(1Ip?@<0{A1N-!?)V!B zz_kb>SI`*Sh)>56SJhDGeb|%B3bE&l-tLcD*pdy=KMAGAjK!^g9_KpWTnlD=o@Q^vb*|Z2N;+bLUO6x&h3<%S|c!hd#j_F zA~vPp|9gDFgkL4+n`qTWH|r^@JB9|Lz+1>e!}7F}J5^vRrDPKnb7Px{l>eN2rv1yP z-FChSNZ0x7k{fU!&W`?aEpGuD?q1|v?>K+`uP?UK*P3OH%`u7Xb4OUKoNp+BRtp+D83l3K-dCfss+3}Ufx5@dz378^r$L6< z&p!kNE+oaM$J{Pd-rWi{6{hk=*ll(I__w&QLcz4YK+0u&y$_GP>Xx@dGe7B8+^1M# zN(nX^uw=0VPTJi|D zs>08x244S6M-Dgkg5~$+1;n&#eSjX1O?~@uY*W%a+kBnH{!Sv>(07nI-R(j;J-Z2T zy#R&x^h(*T(`t%$Pa$99tooRfRg>1_g?c|xi|`Y5#qFNba6WJ64|i6c2=y|8j_(7E zyuYlx|9<_~oP$$PoK|lDJH;9AU%Fx?=tDf-i~r5L|MNlBL;5@4RqN7+x}uEa*h@|@ zOLpo4(RZ{Kg`t_ivvt!2*17&k+r4=7{S`&C0nHryE8q5L_1a=V{gD8B7(PaT(pWa| zNw{j@wr8h6o=x1qyT!pFk7je4Xo8u7uI7#^B6<_*H(%P93x$qmp0uBxbUc~D8UIL8 z*SyJBe5nVcYoB+ME!&tCK*NqRT|7|@0MljR4}eFwM`0%|;@B1=_RMDBij{6xy<6My zU#8}d_W2dYCxP z#lD}%BrP@H#ldW=Bd_TV+e{93BOZ>4-E+%b6#yT?fQ=t3pa?MDXv&p*%H!hd&5i*ld4>aXz z{r<2@Q2Y{usNLM;mGmnaSCT}1UfXcpIVT1Zi#2!#!Yj?~5f$;KJ9EGZAxJ{^>bylq zzJrzIbR^k+j5c%sRH^i{FYq;3jr)Lq^`P2_)Q>mn27}>}G-1NhlYr_?C(5^}o5RvP zcgi#6c;ie!8QD_NmxeA@Z2|I@IsqgprLM#f?=}~^fm)|}9);7ln)h$J3o^{VRMy%5 z{s(ma7=^Xl*#rcC{PBIq0=)nia_|jip>-@w?dpNEiWkTghrS`@_BjqOqq ziEQNJi1~U;zqIe6x)y&OceHzknUs`}>us!uC_8)}n?{n_=mIlu%OMeZR=6Ko8Sa%^ zJ`xibrJan~#0MD@E`H}uM=ZUGGI&UeCo{nTjwK;~<*+YN=1T*IQP-sXr4pax(@svl z@5EkBFZ85vruSVqJbzg=rHC$OEaI?#B2LF`02$YL25-_F6b>!f&Z+bb)`)p?J;6-+ zb%-ZZ>J}n<7GRj)C2P|r3dN`|XhQQOk)GMsQW}5_nYGZYR(N>b(PZ?*Z7?O zT<<6Hsdr^#-(4^Qf_^E{C8Bbet8L-0k!D7Ypq3h7}P=JKUo>;oz5=Etv! z#f=|b9%uh}RXq7{8n{0D;shJ!!0xEtw-x zGRzy}KUc5{`hF@RW{39%(;0l)N@^}1VC%=XbH8zRb@9~^dyjFlMEp2tJz;ji+QxjN zF0C}=m$jidlCXz;8&AGMk)1_|lQOq&)J=w@N$mo_A?Yx-H7n8W+E-#GjxtQ=VY08k zgXs)deUSCdJ&|lHdvQM-*%k#%4`>qx?LsFNFy7}A~Jyw95O1Yg|-m>ytiIm8nnO4Gg~K+}{nII5BTbi=&jb$wgy zM*wTm{k*ZlCG$+VlZ*A*19U^8znMN8h?C9_u2e_qX{>OlIwk&Eq`%fQRq5I0>05YC z(mtc$eYt`Sois?TfNrW4U}{xxf=MD~qf*N;U(}l8^L6eUn-l+)2afLtXGGKil4Kgxv^ z29Ce_BV_>f0UWYHb|(O6uNByr5zuj&eqPM&FErX%95KhzhqH8CgQWGsT{49Yp2(fpnE2?Xws`j|ssQ!tm-_gN-*)Dio;|j?-PP z!qXJ}m9Y!6#*^x#rWQ^(*zw#8d+N&R_- zv3Kg5KStR%=Kw+g>@pLlf)kniv-cWq(AoUvxIAlK)6&`55;rn}Ih*%5KanA*UpU%f zE{(P51Ym=Bz8@Yyc0hC?q^RYcXGSZLJ=It6VD z4R(OwKu4W{iWg5LYyd)o1WsI2Y%w*htCeq;_UPbGJ z2nzjpe7w8^T!jn=c%~idYEqh*^L0VMR3^LTJ?)Ss^7c)Su4~Whl+X zS)zAq#YJRL9Nh7_-Kf-IJ3?bCJWNvnukiU(3&Tkh22n2NUlQVPIu!|S|E#ox#?bcZ z4MITd?ntaKBJ2GCUT#?X34SZR7z#K+VW8ZY7#RDs5fpE9bL5XdrU zM#?)cUwt`jEfpkOs9O8f7E=;!sImJTI90W2pHVfO-PIVYTlo$IrTDceilu67mg{G4 zvf@n93A*&V#Tm6vTCaHZl#id3AENd7U`xMkv0rsbJeFHN(9-`TV=5 z#4`zOfv3kp2|IW6CGyQ@pz5+sVyk6rV@gVcrmgLyg0hp-fK=+a8BqEAi#CAqIhM}Z zZUFfN!9*>hh%X5ee#V^YC^`C|cl9U1f}yx1W9X@lCX1-}C5i7moR5JZO|2Mse^CUT zZ=ob@ss+{G+K9^YpbfTvad&blt*?J{qh#K2mFg8VlQk9*(5D1w4EI!nN#vEoXBiq0 zb_*=buW0Jg1ZQJM2P&6w)LC+jG53T)RH1 zX+2J?{Kd`79-Zq)Ok=YU6Kg|8-v566SDphfP$X^)qww0{klmaZ*?tHoMGepiVcT-_ z&n3Fmcl^$tX;xZyG}84~SvYsmS47adODtkG2ig0Tric8TwxUGg1u851Hi4*SB?{^= z1-%QOH$U1uSLDsBOO}a5 z_LIanfH>KS1Ym40L!P4Xw}95w@a=zl511z(@Mx8{00mNR^4zgE#jrwv%6=Y+RnecI zi8m8gX-|sn*GNMR2G8fOsNoHgAF}U)Pwee%w?@#`p5@YbcgX5^gB1up%$$Mg_nhV+E{RadA zOzrU*N6k6NIqcQfwL{4Y=&P$csvDx&3(`V~g6Go1?x}zo;1P_|!ef z9Go!W-?-NRI7RaQ|LFCf`h2?v0fxswD)Gvpp>6LOg0+6;OSWb9V1l323w}`fH;-{M ziB#>Os%?|64FLYdH|IDf8|n9MbC#`Dq)j`=r&MUbW+vU_lBoC$UFiR{{IeE7lP5`C zC1f{0Rf4-0IgskMePb?t{32no8Fs?XgR!~EQ2I`A%>S|zms_n)5@#a05y7Nu?ViBW zXNhb>&Ow?(%mDCwJ%9^W8Vgx$j_?*5SepP0;3*nufSr^AiW4ZYmDm0Otv1kYHP~e% zd+jLzKH$wTP|yTI;3P0j*QGkT^9;-mXPbSYY=!f*5AFQ)y%+Q5;cwjf!&$HL*$=pC z-Vv04Jzuc$phpLdEV@&T0h9|-Xwow7`GOOs#FCRfmgJuzm`B?~xH5{h0m0Pz10D{m%x&3Ah2lbxE-iO$dI%UYwo< zY`!+2cw6)Ep;)ov{=j#LGb;8bC@DMn}L-rj#ftPOc_J12G{vVAL5el>g z^geP4aFl3PqG~wJa=Hxuy;EeVs{!PPfaa%7N8oR~s15DVg2Hx?LCwH=Q?kBQlbKPp z3^U*mBh?=({5{M5)3dnuKmFIAB)ipW8s8CWlm@WAKdu4ZANy{{=Et+wa&AY<=GfV< z8Y_%%&>3!-@5udLS#Bbx;lNyoVHEG9J=Xq^pi1W-Q>J1ane~?H4sJRcHVm~s?fn=g z-P)y2EbW>e5(lC1HDkfO)cdQJjcv1VcWIs@tDu$9b8As}7SBuvcRS|-PxE0WA<@|) z!~_a|!!5?GA+<5!H7wYhQXYp-6b%6%Xwtr~4;$jmec;cADv<7d0X zWeA8ezP@AsCvTm%nynqrH%#zyt5=l^J?B6F6l5keV7Bebgbg5_%kAM5f}i?=katP|_ zYB)_4sxut!A`VvrZBLSPz)tmw(zC)-vj&nd3jR}*{Rpu}0Ai5%wyacYzJQB~%R-@jL+UV+ z>7t$;22d4@uP9M7Q!s6gH$B@sEJ*vJ;kkqXtaXz1!PNfO=fXL<{7mxd6&?`2si z-kM}x>yW>mD0`vaxFNaRL5gGfrBhiTCmJCScLoe=IZrN-st1L4(eVu;VfIT~A>Il* zf#n%R-Wh4K_upM)SOsP5b4*b9-LQJX4Ys~zuyNetg4BKP&2;7)`~x^)ta+4yKUcvc z27)UK`3?!yi)yf<@IJmo@+64#+X8G?n-%8l0Pqd8lSsIX=JNgA!_*PJHLqtJa}4J_ z!`C`j>qsxja=uIa2Et8*u!D?VJ_nN=tj`gX~u1fsQQ>V+-1zEuVy2lVn$wm>l3nQ@>;5 z*7UgFJ0vB#dxZ0BPBAUG%1PRsI%$^AZf4WPv>gdwBwqq@$x8?h#2DtwKrBxG;l^!( zVryK!@#jj~bP&Jcohl0jpFXb3^)b<>R2=_CsB9^xiIT0PNq1i5`(fhTG4>5f0WU?h z!nkCgSF~u`IAWu*Xpt3R!2_=BUuYVn1Ix62SE(-{x=E2bmY$4LCJC2v zHaK5R>%BF;FehQ!ErN;C&+0 zi3M0#UWBqJxPn(B{!5=$jST8VsOf2^Qk>~{n8Je-Ro4UQqzg8F1EqQ-^#jr`fF7{P z!Y3hnJWF9QK=$-BY1hqz*3%sAg7V%EJ1TrIxoQyMF7#t=P>AvY7-awgv&2MAL$BQX z-+qLwyMy{>i?^r;5FYoJ*Td^glaDr&y^Fm?y${cmuhz{FBkRVO_Pj#F3G%xW&K^OZ z5jID5K;6k;s-h7>a)CUkZ*H5TCe=?Qy7?oP3;uq?ZMtK9{OMKKe@e*!jUDDc`%ftu zvV*^$QS!HxEX~njy=dZiR*D@a^%@(F+T{eSEX1gJjha(>7FDC~Nr}7-d1s#pFD@E& zo$(aOmb@p(Ri~PHDkv4&0;s!6qPx&@^Mi#KrX-u2+9w|+uGd_CCh3$Ta#se%5T066 zjWE6S51#id(hJ22TkZkGuLe0q=8SM@yj!i6Un;Y9@o~4M-UT}As;GQ$)t1Ivj$doH#W?5B?(oNT{uKPv3JZoFi z)LGjSM;uv~$z#h8XUG^80oiI)EkD(Mi=#>*rDIArHvfK$q84Fn1n(t=|?B~iI{bR${IdZ}$E+;3q-Mu7nI+N}ji4Skl>sCM8D+cbO@ipx?>>5CpoXy^! zZF{i_Ki~tY)ruB80(u6a4|^(NiiTrS%FoRyDtI>#KKK!WeuHk#>lp z!0(ad-@=~4a=UZJg3Ev8ZiZ%EIzPY2%rMD&?`5Ifo6z}!Bcr$H=y#R@LEb66)=sR} z)9HcM(2j^sX=sKY;BUe{${aI%z~1RmZb&_L_qi9iG}ld{0w)STNs+7vt@CmG+8kWH z{W;g-m0E9*%tBLD$4%P{!6ZZv>5Uu|wtn5VssYjF?MMK!|+S z;4r;RP#78WDC2i}Xc0zMPI?>sAb<C~ zd|9+X3B!c#yR}s)IUU=On-|#@*OCf-_<=XQ&qT(=kZ*N(h$)Ue5ZE7&Y0PtYEz8da zgYT9}W%e@;&6*{%gr3pqwJY|-SS%GgxUTnWa2P4l2_^v@dwl}B7a>Us$0hB$cezS< zht$kC-}`=fc*Ak=d`fInE8j~Sr**#~KW!pstQ+3(KNHcfCN5SDt)=6VlE&jAX z`gcqcpooE7oUY=nUj7JxY*gnvtLYor`Qv@KwJOtS0rrk~cvts&9Yus-(T(D6AOmS3 z3#sigK|dw`K}*YI<3)emliIdZll6=jHwtIqzz)p>e$?6o=zZUl^3qP4-y@g8xY0vRODahG_-jStZ6BLfusKzCmW+@iEkUwd768?5p&J- zdd}M6$XDqXpN!*|xLTuFq<6D<1KUA}nAyT@aD!=2^{H*}oKAJ$GLvs!)n#hjfm_2f zcvh6A`ja{XC-#t)(irySO z2g<$qOjS!hOdRN)L_;Wy(|X{5(yf~9)~|JveaGAPtr)7$Yz^}ucI~u#vhSUwQwWy; z`~6>}BXiIS25d`EG$7tHUvEJYT#{({ir|HP>(|T`FO~?u^Z3=|rL$i!NlKWz`}R@v zZ$*5h>3#!w3Y7S22JNLZx9?}#n^_@#VYX`4eCnqVJvBYsJ7?7wn|MAXlx(R93Y zKWXZueymd5fvG?eJYZ#lZB%x_5^CFA@3pGOA{N{aB{>ZUbEZB@`0d8A9TAqG-g0WD zbE;~!PaG;m_5RX%!eGKZm#g~=TdGE4`B(LN zl7ASDYJGwadPy{0PSw)NM$Di<0TAp9z%&;*pcIW+deh1XoSMYzm2d7Jx_Ug+KIyZf zd!iwID19`2&cmmhJq%O?uKb;igYW@bLDT344eGsKqnaftvTf<39D={TKybQhAZ+4( zKX7)I6(M^{fIl-f&=g6)BLIH<3adU41^ z6u(B>w$%xrTuIEV+4Kg6UTeH5kcT-{-7wB{zGJlN)Ve|x_#N#uV8%H;h)E+N zR$mwe`DRmRv2mvT*OB`}XlwO&H3m#$5yGz+2;Xl-_q?w9QK&JwR)0S~ia_20=0Bu(Su6A^R(HXiQqSjpe8G%Ccw_To1 zY9F}YTzlxQm?P_D|NS%5DC%F}%hi*AK*I2Qc!A#V;-vmgE&N98TGr3AjUSxm>Vpay zQd@9nIe_oqW+e>t7rL*vYx&~0a3T;9=WpLGl`cB-anFwshB~4A40n0!A3?%m-Mf$7 zd;d~k;*L{M!I1=|euNw;X4T?geHjYu5QQnef!ks`LEBQ=7sZ%(>4>xOdNyxJgV$-F zKWij$JnRim+jaDtayI@L;xc45G_QH<5$n7gmAovt_tR&jy9DNV4rJpvy2#| zW|^OZ(4GSKDrgMw83!SUvnUFGK-EC)8%BY5A zSLR!z>splcP(IK5Io@R;e5Tiw&MAb?5f!exz#K| zi5XUzfhPyjHO*H`Lt8Z)%ScJrnr9k5ko~N#e{}i0IQOaE_}sW^RY|leGnQDLsK*9` zy$BNc4N0@7x3Bg-O^R@$C;Gt7@-l$+56)5Pi@#re5%lvSeda@}vEx2hMPLNCXkWwy zYwHM#KdbSk{kiPRbBCEKW|hea^=sF;oG*2V>bOVjax`dz==ae7k=E7zfyTlIFrU1E zZTaoq-@>^G>udrCPR~T zk7Bz1J&FYc4kFFtOrLfN`{Lb5=zd|P|H0mSMl}_!?V>?ZlopEg5~YZW6zK>O5v7P! zl_EkwKtRNRNYjuYO79>bs00N>ML>EH2)zgxigY0)5kYAQ2nJHT(|yjp_xtwV<9xUM zxObfUgTY8f)>?DT`ObH~^?9OBdS&-UFFJiWQWbBe>U5*}eSR&Qj^w=f5QPv|jS*eS zYW)1L#9=x2%yd@W7mb-G^>t787#^1qcn>CIYd=n@g_ofQGVnqiWFqY&Rpny+2%2jt zd3@R+_KvgdvM=Us3cWRzd!uZ&ligN}wf=LRcFcZNB7iQ>8hpdzO(tBT^1ju-CLMbu ziv8Ms_LCd$QOdH(YVS@(;H3?%FF+prfg~B63gsd;4R?_U612efjlSQ4*Tza$rt0x~ zOs-R}v4pA7kH{}%&e(U{pL;>#Q8ubksV=Esb9ImKmpXX4O^P1RU@~)Y^BHpo##&&G zp~d7&xpnbP0VL6E+ornHiK#s|M^aP^PV;RHZo*DY#VVy1Bn>VgY7|-}@U){U2`Csf zVZOHRP1!>XhM7^~o!dN-BbjWNt7F+(DyJr=_D#!CT-8Xb^%w}7+yz|eMf140q1pcV zb&(~5lktO@cMwKjh|LaM9&Z8#wMP5{^6E8T&i{cNnhafRKMgQO-*Pv)RJiXi@R5aZ z8J+Z%5sWmMb(_QSVCG*C_O!^bhA-3B+gM`M=AH-!hr*VW10~G2aHZkaTVb z%u)x!$G!{qpXeutIPma_|Ay)hz+BZrwR>8QJ?r!-`$_PlpZ$O{eaReKp}ZbDbqXeD zo_GIyGn52l@FR({Yfnc*xw_FZl+PbIqa!WM$W67%yoN_?wQcX;;SOk<4tTVRp_r%a zoPtLqehdEP=9&$^dKUJCP^qx$NHyAaVdhV4htsygHMgGy_}2!D*S-z*^c8VMf2Mwg ztIe2iyx~S)D*rnD15u*6V+Ax_6XiuwRuKi;t%Y0K?Jr$>AxBNrGZBk-KnL_rj$AZh z-jj33ElJ|Uk5FP(I-p%}KH2L^#ZfmV%3dbEYub>w`lP#<%}7tGAVRGpKc9EK%SDT_ z{!nB53(CCQCr^5GLighsQnn`6y&Nygwma?>X`oCKA$er<00|KXz;wtv+Ls!Smh2-Y z2J|0zQSl{cOK2yBew5W$2l8vy;ZBaUE$Xsk%a`iqnTC_UN`>3qk&Qx={+d$cTZ~6(v z)2ZJCJ_a*d-*Ei!LtkaJ;x?WTrnOn$;Y+Pc29ACYZrv+HEnWf2^0)=F0JG)}R$>A0BEGZNB*WwK_-FyQv!zc` zUE+ujLb|7dUKmSUxc!`4Ow90m!}_#JH(HkVfTG>qB14JJ(W)RN@MxK7%;+dd|CkHh zc)5`(>|eMhE86~i-eY2MeFeCE>?@-H-|5zGQr#*P7~=}D`W9u^d*2rL3SOTYNRW8E zc}FrcC#R|T-QImyw&a5$=#)@(kQGDAW=jiFlOK{I!=rrWyvdv<%w*mgqr|v2&~`??1NhP ztC2ZpWS6wiqbs$E;k`8%;yf*i?p`@Kfs|a8i-A27<5KafNO_+VX)+LO9u4dj!H zt=Z{zwq~}fY}UAIzHo{$;i3Jsz|`a%t^5+s6jk%Z-X_^T3?iC--n&-X*wp$_Mq)rp zZqoyb7a0(n$B=RH2J$G?;h8Q3RMR+PKuN;6Tgl}L=ER!DG__H;{ga*-HYcmOga~H{ zKO+)imF!?Jnw0}3b2opmJJ;U2CM{wog<_hc)4qD!YhQgIWCJ@%CTVwKCpWk8@G4|{ zO&kbREB3#xs!NLVh-fkhsu`=lap_k72YtiZr1h~3Y#DC;C9L{$2BmHrMcs;d%#!n- zhIQ$|K7>CruO0IPH8D2`A-C}fce{2TPPMFmm2V7mp;HRTArUOyCD@F?UaH+Q(gUF@ zKIkqj&$`(cB^o9I=X!kFd*6lX|JArxFjGdvI>vEK98LmWu?7dB)Xpl_QTUx_z-&u- zW^T2mSBrVOb3bUYT!U>1R>85FFu&@%?Lu*0>;w`3fK7VTn053EL96JIY08c50FfMt z)0xbn6tTUE)xXOteK#ex4*~N7h2p0X=_hEi-E%6GvALHdrOT^?TlEQ78=?^XE%i~V zu|#754N2aqa$v~J7a*$Fis+}%_>5Zyl9{g5;O5{)%WVH%*vjJ){W=LN_IF3PzHr== z<2#TK4WSlytFWQ8b9I`W$Qa&G7e@n<@3ordXWC-(rptMXi1sXRvbX>Q7FT7@5O~lgUe~d#a){xRVMn!3);ALPX`i)dlPex8j8Lp;V*QznlgA9RTEr7heNHQ@2uGm{{~` zp4XGry5OGffET67(bWJ~`x#V{W^$f-H1kNd`S?lKFMe{aY~G$p9_Rc`>F@O>NF0Ff zn+Qv3F>nSIJG_rbPT2hn_!oip#jeJd7Y`rEoWHZL=D89!drt`IR!hqyawUa5?{2()ZMDQ%WUu`Ba~4yr3Zg%pnJ9Sx+%Ai2Q*v*!7Y~Uh zI8lDb+6fC!4`i?2<*whkG^{$^pZ9W)q!+tAi1o8)fNFoz%`Z)EAl|HfO2FC zgMSbq!03C?YDdpX5;IvB4uC_pgNT1we*fva-zY`u1hB^#c<>M@%-6dBLt#gSDaZ6J zlO-#3BREm)6e&U?$|qdwMgFzELuy}*eC6Bh(%KdJ#WNyJ7mI&5H~ZrxSiDOx7+QFl z#ZEn8dau+4s#Nqy`nG`L{$z(f_WdST-BL|N^#u|Bn4>TQU*HI8ugubK(C&V8qH+*`3_%&woTxIp zo9D(fkN6nmgo(wP_DP(+a4&Yw`OEE=5IyVH#$UBWADDDtAHizzD?Y{W$7qQjA~L>( zBR)*JebhQvCu>qyc}3VUKlHb{@G-d{ZZlX|92!T5u|}}9btx#_aJms;DUXw)oT89B zCrx3tSC`g%GK!2dL!KWPNWU1&d(}zdFwB{8mukW|3g|x90sV6r+>13q3wm=)+UmFd zteTk?OQHgyiD^5eX>NwQi5LCxHb4R~L#y?T^XowUkjx5BkVsqVJ`S8T|6Thppsl(k z$&*N!+6#KNU(b}je&IfEVK+WKt6qC7!z$CpIII-{Z3msguF$C&Qb;Ts>Vk5i9U(0- z;8YQFs{H4qkk_|v^J3rmkfuj$BqHBybEL5-ZM{d*QU%!Pnx(#4E>gqgTh14Wej3XX zVajE9V0t&=4pmk3=s*N%_6v0key8(I<0wyRLbspD)UTbW(pSgz>dfWRKYfCRM<|D~ zPT94{p^h_~aJOD?!uQe^t%jNeiRN!#`TodnsY%w&G-1@x^+!#YQzcwQPueF!v)2ey zm$!X~aGvl-+tT}ol?%UobIki~GbW$X|6sX}Q`+`ftYMAp1#3%T%|7?6!?0YKhyg$C z9G%5U3?O3TugT2Ow7W%4^&tfj=69-^whdifsf{fbBVi9hpwC_#v(YnHQUUZ+m|?X) zkWVa-U+U9@t>Wq2*H~lQ;s(w6%ltHZl1a>Pw#8V^f#iGajYjJEn|vp+YNj9KMN*^u z#z^8f%Yn%*Lh-Dm`G+FgTU3(?e8q8h?%b}sSjepl_ES{ySF#c>xlSSY?%C5Hp^q^Y zA+6=pA?~^xv>r}VT6}b83l>e_ zW?UAk%ke7wnH?kEZfKB6`mY9{|yiPX8^-}c7;5{mjq@|eoXp!Z-eN_FEyEoqo59=GE zmVFEd0?#|}NjdD{J!9A_XU#Ey1BIn|h)NtdougQSRKJvt%=+5#Umtz90!Tcj^L^qK zmlK}vX9s)pa3Z%Iv?pbXWBV90m!arRQh|laSSXq|H#;4WmU42a4|!Su;8yFEk}7H~ zRduAg!tjq;*jO{Liops>dHbNzS=x(vhlRBFe#+k>`ES2p>q-m}I6theE___Ftl{>z zbPK01ICli-Q5vRVVl-8zwVg&#u=(Z_b{Q=GB+e@j62I-AlvOhpKZ{$Iwm&AZ;9{B& zz0G{bP$FW5F>UJlMxK<7X_5+uLiEX?GvZQJNGDOb$e(Jcp%GUTVKa0x zj|cWd8lnctJ%d3~mo70U&f9;#A}hzsC; z6SHP|P4|$&9Qvj!6u8i_EWObd5rzR(B~ddd*Fxp&=p9YHr>l`UE;{Z$V(#3Zih8df zJIN-;9X}8ET%S&92gX|Q!ypymP?JmJ)w9Yyw&6+>PSK)8VOhr(ZoE3~ArUe6pMd1>(}afvHP7E(^Y zv7tFaegF1i7tlo7D+V;0&zNicRzv1~zp0zC+&Y0 z{e}JIZlJ&39zZ*o`kq*5Z{40(>?|fv5LfnPXjXJXu_?KJEPU{}RpnkxE$cq?%l;h&E5i93|r%NMbjIC-|?|?&n22SV=J( zCo;k5aaiBYiv*qGoiU5{@EzoEY^tt*?7#e7^@V8SFBx05d8xvn%Lp^Zz6May9mnGG z##0lFMt_w{I?E5-Z^`so=)YaE_^G12UoN)oxOqOmB=yoY%wPU0}MA<4%9N55M zaRsNGm&7~blu-(An*#bGt{1|ar7yjCHshY1^yR@r@R9HRP1UJQdZN=EOLk+b^GF`p9SbTr8R2V=eP7 zh)K#*cUDMb39@DpHCefHywuImK)|T0;9+6D62hlFb3-XhG}7_g-F!@minzhq8~4k< zVyf&{wutzt7@~6aJc4JfK7KZr^xHa1(+s(WmG)TU2#m5;3w|Z@ZFLZW4n&X79UL)` z1CwXZfDxV1Q)Ry_ZnS^?w85TMpd#3&_5sua_}_ozh!&sFcLqjZ60<;)2WnT-Le*}A z3tvnTq9%qV$LeJ1OT8jVEjSz4$VBN_yDR=u=)zB7NXU5je9;^St@L%M*Qx>AsO^9= z2-72LtWFl*Rb(v`2|O2;;@cv%G|;vv+8wZJuXZM)b?#)HkH*Xvk5s3Q;Iw7&DZC(G za3i)g@{^(6;fP?D|FSTS;l_-_-2^}sraw zeAV02=;&3lTl(EM4gnbmdomtdbJ5bL6dd{V`5i>d@pFp8{?e_6Gf12|A8i;e>u3<(rT_!^lws8{Z{j}GFHjKb3%lFxXhYLo*BrT&8rtfAv zJdSGF+otc|iD%42=&jR&dR9PciQJO0`O zDQXaN?Mt4erRpiRZuHX6Gq`PjW}MZ^ihBxJ0#oJY7FOE|bL?n>1GvKJfS zX3deVvA-Uj!_4;CumRHq8`Rc9~~;N6ImAi52e8nwr5?eYsLHq7ySw^MZcM7MNlq7r$R^} zpg?0UQMqUy#to#v(c8j5NDNKz_4{@wYc@Ns=-d>4!;ayPHO2Mm|5t;yDt4pf=`JW0 zYI*nIWujJ2*(cWNQIGoga092~1-2B-`>UyC-G}#t>PPram28S^4+CnB)>h3xh$rVBN-v`-5NTyE{!@0zyl@3{PYpT zGZLL2%wSK9-?~e#h&=bZ$^Ld<96O((uBhgYBd~8yFryeSx^`&D1zH8A@uYW2~gaG-@ zw3pep#Dr`Q{aif!O$fqdqp48g*0e!7?+^5;vCpecwv;0Lj9j;VUqQTSVTM$lh9_?R z>c%>QUyx;{(wRc%X#-6F`@-L2d8-MJ5mgs@BCCVbEwl|OO^qnE!4x$csjA~?6v)?3 zVI7Fab2daO$m6C+q7XC@V)-GI6-6PYkoxJ-~@xFm}d)w8`6(8^JSL|KBL zG%myW87d(}l}A_}#sxnr)EZ>9;vI=j4SomZ9bZMN4$2iQaEe02DvkU)9`rB-VcX}% zaW`R6yPu255)La2A5vUF`p>@WrsHEpn)>RDX2&Kz~;z_0Jj{&E;qL~*hYJMKOl(kk8aFYL`SG=h}sZ$T^RJX)65^0>;Xnd->-KVE(1}9IkWK(#P19jIV2giH*6jr(;`CG zb8uW%JYnXqG*wiWeCFrt>z}_HyS`FYYjE!fFQ(sS4L;g_L*pn3Jv|q?8q|B#!pbBs zFg9yL)Ai-&gPfl>jvV$t;&s{}JK60(I6+-Ts0S(!%@Hu#sFAZ?H=za>r#UO}*->~DQCVMKw|Q32L9Q{*xmCEWJ4~~O zA@)~jLYD!2gJl|}j@;9p!ryuCKvMLJ_ifc06j{||RRsT~*nH#43rB3F0Wqx=$Iip#9aJQ9!X7}%qz*PsA#1*olh@)v8r6Q184vo`?Zo~P~tc0IdWOLaXnQ}bh} zkS&SOWq)oS!gf<@yvTu^u9eyVpOvi%Pay)Wjg}fh3p${5>FgnB;o9Mha}SO`R6O%0 zrq2U?H{@twMn=XU+BwwRk_Pr6djYM6l$Fr7i|vF(o*sC8v#Gj2mY8x>ckcW-rN#4i z%cQQ#Py747TVmTK1O@RVy`RulH$fsf#pHACIAilYssvjh-08m0DcW5s`xutdYY2`| zoFUc2?sm!DUFl+;pRG>vMo)8UK|ShLTlpa1uZB~uzy(-Cc?ZT&{1kN}B3?OY^_F(U z>BKCMuTDSUcp zyjRVV_>~@cK|dUqZx!=$J;+%YfhzhZ>`10%c|R-#vyr0`joZS5kjk*S{m&pkT@G6E zSeJc>*xw+^`fFrsw*Zkj2L~kW)1h9>84OdS8U1F64Z#JZ+#U4n4g29%JS)M~mnUY0 zC^&{NoMj6#l~~mMS}Hm?-QP8P3YIZ~TjcN=qnh7E; zht6#Yx9CvlSLLv4CTUNdE-#a^WauzS0iOUlp!M3TcN6fZFTel1ftd%dgP3 z>aij=?spIl;BnLp*34!z1nJ%5zn5hYM#N*}dPmFarUI&M&703BzP&D>#E^{Z=zO6n zB%ZtiCXUkIvD-!1j)|JN=i{SpDY9x+ky&K_wIcn1ciWD?)FiqSJxAE^f(c_*4yv#o z*L=fJ%t{R=?3i&U-y=(3Hdi)$*%I}ttxq(Qtf(TUJ)wLuJp8C_f5e&cZ9^K9@*B7? zZG#Hdv~VQy)zlCF9```voA!dGwUVgE?;^BZrII1s95=-5Nd%g*W@$eSL6rKW$ECBb zdF-2^^p%hD4dzlj zwijCm&nM$~s3ZkQ&mvhN7ImuNXD((HOKx)u_u?6>k5h>cyYny`$+#3FDl5tPnfjX{ zFww)(7@Dx8)~zB*79qx)Sr@7&R?3u>7(=|{2bEvh`|tY@kY@n|Y(Hi`U-~pN0VP<7 zQUb*{1)Tv>dt}{;eO3K<^bZWkCW>@kmN~}%KF##XJKjP8{}wpvGA#+no<$=7_dtuG zw8u4|G24wXGixB1cErj3m~rgK-u&)Ys+*Ab&ueC_qB0S1D+A6@E|!p2J8XZb4lwj* z`tpAWPv5$QMr0}59UBvBzvGu5p{gy(9f)`iqZq+nd8E^$A9~ zdPa8p4s9K|u zMwHWQeioBPLUjGz2IIrhha(T|w9ZCcPksyvOZHnagj2ef$*@=gr-ArVR1MGlIlY92 z_kP{KeUuBM6+M1*WN>4Z17mDWPFRg&>#QCtQ1A@FkKHbkmD$ij?`uJ2v;wvL$p=>- z%}e{+h?`e_^TbDzkguO@eRMCO+Sn>rOna} ze9*F#>{V?NJ|-72qg?u|_>*LK6SHT+iGH&2T)|F~h zW)yow#P1C?g{aPUgbwxb+e%&6DvB&JW0yYld?JCNLbd8zr^zeC>pKw%0yHigD!%=U z<2WIWNk+kE|C3HK6Dizv+hy+a!jwQ=3 zhim=RcGn!(6TFbBlZ&_a^?c5|u@-#6CQC0uCc>be8IALN3ilvHH!eo|p#osZelNro;PHaZ6klo=~-rf;SJ+yOM?B}~^E~-}?3UoZM|6sPTWEmI7o1kw)Q`!uaA50|J zMu-wa%aw05B9;A$^m`W!#gaY*-~o;TtdLM+waQ<_>c{O1Dwn=xm-N>Bw(WF>rOD)- zwL+cbGm|@4Py;>BPiV2}aFEv@+rw)yt5NwzAgALX4sT1@dn@hZkJ_yJSMGlOYLYJC zVQ-fzyXS03PGE(zb%`R(n)$wo1HSqPLg)3lhJSdwOm);RT4B+7k~ABtA~7cZ2GuKuY;km&$KZ>J2KD%qK$GC1 zpeO#1xz`l)4z@xTL3)!=rO=6sf>kllhpE_AHWDmuyu940iCp3=5(e>kvwDBuFBlh!%L6 z{pbCz&g9{QxHisVnWLn4SD??`2SkIIZ2##dZ}yUqzi^W?EKU#uQ_@)9gSrH~g4cQH zOTz%OK(sk|jUPrc3Miom++n_jgM92kgZ&*Xi|0VXIXmQiyHCaVBJff-~C$PR9N~=sVymF=Ih}M@8HW~n+`mSN5YR`01xr{D!St} zN@T1IWw-vV=QYN6zXk1^r|X=l?oKc_`OOO3JWLEN1`H?m8D>37@J%SxsR`S$n-jTp z@^#>MX1v+J<*5N<_IJSoVB$Ym4LnO(@&fTl)+H+vlGy?O^}`1t2gvEM1{#1Fu*M*U zT1*y+@t`0d-E$;nO;|QOo-w7&d|KyuZlj|nw0YMfY|l5nRoN3F>nStZ#G^%2{I(ki z5JpztOLlg$t~5JI%8ZXY8tJ{puXbA4J3^|`ICKya1`b~`cp@>I*$E8BA0Y@0AO!z~ z15~zm+XCD;p@%NPP)Q*|yPz>F*%a#`SM6^^Qfx=*TH5_C9`?hSMjr@BNP81Vb=DtA)mN zh#R~uW=*m5g+<2wzs|&_(~7f}#DalK*BcOds`dRk4AZ8B!jLU^?yXCp$60a{_mH|C z095M}i9{HLth%QEln;S8afp4JfKj{vZ4dK6vGY|7qoy& zAK1G$ptVPH=dYV`>JKD$3Z&Jnn7Hy$L=1~_v_*m@(lK{vM0TUPSi~o|Bi)xmU>*EHvPvLut)T*evWiXs0l%zF;`#?CT6X)XwaqOFv%xk;nd$dN z=Z7l3=2xzN9(N{1z{8d|0=Lg4mH+L_6O|tsdSQzVD_X{YD9H%1LQaad6@5P$NiAM{`Iyg z7172c*oELj%U!FEs|!y{^LdA&^7Y2E-J2GHjM1Q;>^h|$6UXY(3-3=9@e zV-CPcr{{4?2w*zwiTgLX{(JxO?0=xev=jK?H<17>CIRL2o)&VsvSs+_Xy(0VC2==5 z^GI{n19A`de?2#)s~{KfB?WW*lZ)NYi&qadWL9s%6{veVIrV)_xXwijUuk~w{8@(O zZ@Z=Ni3^A9E77KZs}RD9SG5ZXzJN^x*%hvSsj~h{kRsvWt?}{N;53aYE5)f#Z^dOI z9dlyY&dQ}j%5y=zPZ}JGx=I1|1NCSQ2_&SJrG>r}QT+x7g742ht_{>T{ycd8q1;t@ zbJ?WH?kmaNErkC$ibZrW@raE}O(4zU`T)-q*a8vw{~RSnr#WBi2=~>j894RTX&_qH zEz?N$t>}qXiXjKP_#%U!(hsyFcd4=;3P!YmwrABT%jfYxEu#nJ{Z+-X{(toCB>O%6-%9@NXdOE+y5~9zRTVwM-7(9eg^p`Ak zKEFkpS!5vX$dINT-(NzqFzWn0WE8IRA^m8UW>DzrOJlaFIqMu9$O)mHx^L(RRv(64 za66cZ)t8#L+n8o`f9Zd7F`gqQL{aQ!ANyK|?%BKOD=w{Uqci}*-+_s_pYsK9YSY{2 z#J*h2)V^PYOLKIL1s^G9t{5VZ!v@H7Z41-9+s~cvkkt@!d+E-LQ!eQ>}99` zO3EWGJitVH+%j9WikZB2T^Qj#_0cP5L1t~Hnfja@=>GQKp|t$Zi9`Gse;PV~n?`Ow zW4>JZ1G$Y8Kq-O(4vVz%DO}Y(><6D7mfP_4e)>%8AINeE3csSqSpd#x0OcjYLSv|S zC?-l;9z^6i0i0DUW=mS=cM%#&(vF=f=D7L9UzAg$NX#~I=DEl0*J44Zf?J};xtz_qMG&2wgA#82}Q$O?p;}}@GY!)XE$dAKmi!r$NmS3-Mp!6zs zp%ea#Ck8x6;SZ$W%8D+W&0t{XaJ_)TQUF0arp;ggAfr6cJH!bmf-yxt!r<{NUE(^W zy!luGOE#MiaFJ-i{y-pD;A22nt|2xzF!%pJ{$&7)wakZbA7tVmh#<}fDnb(^#elc3 z&M+EY7x#m&XxW5mr_MRG6PoIhLi`9alrGCa{Qj1f@+i0~e#f+~M`f1`vTQ%*=5THl za>}N%TIY>7?n|SQ#X|CZVhX{l#D0TVjs80TF%*#=g8s6LU5TS4m{OB7US120B+;+OnECql z<;q=s@F31SvOsq~

W-A6l?f2A^L{K1zE=NoZxAYN$#6MD9E-m0YS+eaZ2N&+3S6 zyp`M@8vCxj+keM9wEqtHD#}Sc$p*(T?F1m3{oO8ysmMu81*?zw@txI!zx;fDgoT~5 znULF;$YRqCOJI85{mb>=!{Z8F5l#JNJo|wOa zzhSH+H^0ZXfHDzZjTG~-`FWqW#Pj`+5rL8*x6^}SvTwc?CMJ{Ll=!E|Kf7k|`c|Rj zS3!H{)4A0)7Yy_N71ZnBE$jdN@9b02;{=f{Yy_@qF`f}Zr29$?A}-Pu(s+v`ns~m? zc5vOfa?gFEx6>`=aL^AI+&=`;6xeqCXc^2`5VEw?swvWVJ`z6guJ&zT*#PYo0Z>^{ zKd%X8LHCCmct0!XDI%!l&0ZKY`&UT9tD9aCm~WRWK)wsRd#WVAJh_W1_hB;r$yL!B zG0l%lD;UdfXdXB9+a(8|T1nX|*XURjiB9QWxY$3k*zm6igD}b#E(P{WmSzsZzaZ7Z z$aE3Aa2NBx0vv)6Q72~`W~jZGwO0_RV&XDJ8op#g0eTeq2eNY_g?69Nld=nXl7U`) z2C^y_%eh~rY`w#5lHd=Zw1*BVyP{WsX1(zM%Q%B1_b?BzfG^Yr9OFL_d7FRpcEl$( ztQsp8%4FRQV7(~<^Vx_KOXIeBjTQvgSsSQ?j*_7do8(JF-P`T7uC3L%7hl)77XB^p zr22KaOTE{=MYNu31AH(sMd()(^XnZdpk($DVOz}`6vDb>(z_78#&zTVUrIKn`doT8 zmBTXMXXtb-q&>?mQob^`Zgu!o{0Y41Kd`&LsnCu=SHsD$DJvEqoOZr?7HY$qqhqEq zwfVWuc_ve}w>K72#5SMl;tdl6vE{y-Ml3EcgBlnX8Af+^8j8<6pEDSj5v6c!9VkH1 zAMt-^@>=#XZ($j+a60Barx z>Wu$cN=*KBIqjouOm@fMJ5*S6%>NFHX|TzR!rj5f0-*73DX(@HrQs&M*NzTL8MjVM zTa#UEZ>?Dm#@AYM4MgI{uK=B%kxwJ0TB4#vCVU6STdYu592{OkqBU?fW>$HA#%Tb zl0i_!ZyX6jHPie)*18Atsy~;`+yOEU1)}>74q|@bFJR&;rh1+64AF(9AF4qzWwUTl zvu{__sGj-?%l$sAz6|M|#YSJEx?O);SgZ z%ghiutl{mJ{_NT9i`w6xyW#Xp@A$54S1_SA7lhVfu~Gko{wp2!4n2(E)UdEzxBP-u z2-WJauF6?m^HZ*&n=V9Nxlxv!boj(4Ltt4L?k~Y~STX=|z^kfxB;)u=0MxdCR%G}N z`7tigub^CN8^UZvI(IuGI?w8gjPTCyj;k!&pq7|55r)Qk8-7>O04!2_I#|bKptyMSD1oea{6ie2 zunJg*nY0JMJj7iDu&@bu`YN233n$wDKx|UO{y@Tufx&kjF*D`0*}rCx!8!sH#o*sz zXs0k~^r(Ni)02PmO2G7NFCW1zk~Ud%E7s8z+SOfq2EzqI@B!+1nAx0#(Qg6zCDx=H zw)qpw;zoQ!6aV!#Fv=*71HSa0CIN%&2_aN~u~>oTB( zw81=9e!5HIcI?GkF-J=%JO4Be3l$87S`FXRd)9ljA?(J?L)qgl4!obv_--q*4uaF^ zs1s$KMS|}JG?qK@#CDLq8)?DQmeVD^zO3TKhHb+f|Iep&(LSPYKXLkI&N|jfF>l~s z18x0<68TSd5tN7d8t1#t0jQTLNnrY%UN7gSZ2z7{4>A+j)mAq2OX-Y0_iofexBAec zq*n)yxQ+#rbB&5;K|uUv!#NCa@*!D3b1=K%+ubeLKal8P8qkL4d)jcc!$5k-K|6mS zVga?#$d^%BCmz3haOc6@>e9acX8I<2PsdPs!dqL7Z|O;O&Uhe~F)b({rZ0Va;50Tb zu-AbmAQ<%#67VriQ&)a%-@yGIRpz{$4{Out0= z0FI=y0Qczjl$)&pQwGtBDF_3bWIhrIQxX^g_bR2B6_@e|Loo!B z+Pwrsl>7uU#AC(aJn}yn{KPxfXINcG1O{9ri3oLSpEikI(d0U#yQM2Xu2f2n?M&|y zgY@Xf1mAQymADswsqSAMKG6q=%m4|V37E@BhR=r&HPN5C$49S4&W%r>lC|C0^a_HAfjEd58v79$qA0M?9Gg4G86m7=jTiPxd724bGH-syPL>maBrwJbrGa6QqYu5mC z^#iutZ7zo+PQAM50Az!5L24BDjo2qgjoX6z$~K9=8A4pG1VMcS$3u)aBFc^>@X@I{ zS)$Z#_Ri|b1%I^f(=vXSudZ+A_EL8I;t%FS>)<|qch*grxdtbe#m_Lt?Z;dA$L_4G zuiOM|j|pCiCMWM6-{-(5s;bMrM{!eeTalR$2f85{!KGOq4To-(FwMKX?6a-M4UU9< zv`(jLgr9ae5-)nhhpR4kN0q`4`&GviR`lZxw=4xmS)tl z^Y^6wPzZal9jCqABYLzbnLWUTi>lp)5c0##zeg*7VTsXvEvSf;_s;pF_nL2A5hk&{ zF1+2AI22`Ib8~N~uKV-!z+V##Wh>e-FD4&02G|2qP!N;x-UFIIoaio$a~Wi0EZi$4 zdG0(24BX2&GP?CtK&NY0Cag&Gn005nox_y1aH7DxYTvHOb}6%<8Z+fS-%?60wIk5a zkrd1=v@Z!9nBHBxv=V!231T0tpPJSHNoAwKI}m$tw+OMg+Vu`XB95;`)W<-=LhJi9 z!TOo;SSjjpjq}p_!m}w4rAlLV<-msGe{;DF+!p>N-)O;Wd8LC^(1=Z=qPsBq2E z(&;vJj|{7-!q0`DuU9RqJd#%QRXy_nmq&1UKLW}KWER2a3$OzE7knrsoq{L@np;tA z2Rhj`Q>bM=jI`gR-kDa|7mc-?TZomD4wJf&LX&ezaeIyKzIJ9X_ubc9r<<8$KWq=GA4KI_z1$!r zl)E0=yNFAKaR7T^lF%Un=dVU4*Ae(pX9|R{)-_{guj|up+z`siZ%|V-)?qJ+ah`d? z>kvKx8VrYPLFt=H>kGjiMK9@RZ+kZ!Z)Nd|oSK>K^e2i%e5!ns>iYcPo^(fx``e|3 zlgqnT*K*zhY_-#VD=L0LULgYg+|<*Vp|hfVpFWwMFk~0t@eEZYu1BH8f7t%WM9Y>+egCl z&4lO8SPZ}7wiq1iekfFJ#MQ_4-R`h!YT+LBwDm^(N(ucI>SiUjv*iel4C`3Eu>n7= z!tI@#H27XPdGEfcyf2x1*A|}ie@S&%@CjPap2oQiXfYb3GQPBV) zFuoI+IN`!i)o#Z=ydU(UJZ`I>_~xg}$+5F^<)oVx?{D>YKu>-Bupk*>aOH-%3H=P! zoMo1sfLnov0rTGl2gl9F`gh|vS@IY8&zF`umFU1u?ct8F%I{;~OCes2i_9l*khgl2 zD_lec?3&dsrQ|c_my&9@UPymm3y?V0|LI6%a5~3K;$u&1`@`HsrzHYSA!RixfPRf} ztR=%aS^^_ZyW>!*iL6Sh_*(K-^=nkOAi9surqz2gaSvp-5+?@?pR93esL3{kHbb;H z#V)Q?ll&&_+`%0?!j(v!m|`wjUhJj7ROnkA2ujzKp@IsJ1I4lyzU_Ikjq$pKbG6M4 z{7$FjcJ3aKGp%Dwj55ByaQAE<5v^|8q5+m`7&N4f5gk^3CA`q|6Th9A+T*$1JL&Nd ztJ^C|R{Z+Sepw2F_GSn&%Ue{@1K4WiY>N7JT+n#+AZRVUS1!^EeagE1%=PJjzx>0e zt2bKpJs3UT6cIjX;FwB_Z2QtUNae9yH#GGIwT_Xis_vdM4>*}{$f>t&QmjMkfc%w+ zyBAQ$L1d|Q^hjjSJge@2>aBq5plT&RSm+aJYU1R6v>ki7{vG5sdNwJh-3dBfzU_rM zK+F1RE)Uw`ef)oq$9`O0Gs!+}`Y!aaX@O!|=GO+XK?q8hqCH=?s!R7~7>uGp=~8uY z4du7_xUzE5htYe7@?h7jBa#Upu&Yu;s~C(J1A(N% zT3e2#%L-Tc4~$Hk$jx28R9P7o(LL!ES6BAc?{KXzw2dXstcSaqU!eSs`K5@AM?kB8 zd)3xdCD4tf-fOFM#@lY_3;sU!LF-~Pd)BX%NCM?DG6pV=3M&9`?p_@UaL_0;z5Z!5 z$ih-|tyJlzJNCuv3+cmg4#xhisbV>xWp7s<@f1V3?DxQy`RX5mipLs6s#ae;F zEe0<*%CoB322iTZB9ef|GNT;&uU+)f&x4IK4ho36(@-CS*IOWhznXZX=R;2KEm)+ zhk4dUFI#0fz_qVAkTR(~vUihW_E*i8k*poZ*xrfuxj9DC`< z(Zkd5RVxa@j5?l)WMTspn_m~b7GqCx50TdHbO>TsJDqpXj4$=zVN?{|8a;{?F;HYp z=3-U|6kn&5*7aLIgJ6734+_(|>ZU#QYHKIc)eb?}!uD(X$uwYSN4lHC0*LDgyRv;R z3_dTLH$}tO`qi!%#!!W+=&J7o-IgyO>fKHdpXFpP5coJ2nLtLoQ#f47&Z?5}@cV=^^%bh^n zJ{RU?Ci(`MHhksz9xIz)Rt&-A?Yc&0DVlfo*8PQ=((Ht4y^7+W0aM>hbdE_YMqU2B zH|eLn!uO=u*2BS2WC zPr5H4fghw~kV86Vk^+fZFFap5awRn;CkZ8L+1pjQb1io9D`webx;pTw@?MExThpY0 z=;JDpJIP2oe!#B4VXTiHeATNN-X?5s(@M0Yxzi2vP$A(uwpY z(t8OB2+|Wu2qgJ=CVTJooxRrj?sxCG=bq<2-yaysqcgwEGRBziJH|U+zG9Z{*J9HZlf6^Af2LdulzwD`IPe@7Ezr)qIq#xxP z;w8^oo)*NYm&o=lX~vID4-!`vK`A^@w@z_*aH_Gno*`~0(f0#=FV{PAyz_0Ec?CXy z{rZ}mp6Q3T>9*Z(_QGiGlmSLTl{wjf7#mMh5BM=sQtzFyy0$nn``kU#{Y-T3NnS6m zpJ}F~4)sWlYn7TU9@sn$aqlOil*?4+&q!~4Gf1j^`)lCFd${Xb5 zzBX=oreF2}7W=+YC{S&N1n9>}D~mK6B7XO{ZXUJLL{s2-iBP|G$EUi(YDC9~mv<}*Aj2fz3k_8=TUlsAwd z(7>i4r_uxuPaJg4&$G$bKecw`#g;WHrJ9KLO3Wr3ZjQUYmakN#=uzWowp2HY2k9Y( zRKF8?Zs2!mvov4nb6QS~^$m515E|~AKXK5n2{mc4F`KnmbKC(@ifb|on~)@3Ehnke zD$Sh(S1yZoo2(1*sikD8GoQaHos~MKD;jbE?n@U09c<}tRC7Y7xVLx9sgx&u7TThX zS6(XJ$BHaTKhZV5$fTZyZByJAFn|-QI!bf&vnOQ+PY-Iy*jL0B<^BRfA_F;Dh8mbm zy^6#eE<7#VXNke8CSftRl%zu~OvV9lf0grDfI3x-c6yYmMHG)=gyxRem%3`&QOcgU zI2|%E9_NjHSE>I^YRoNu)$4C^Y3>v9j98o_g4<3YKsJdIlB~!xInhk$P&BUi@>Tg@ z?%h2~h5PSj2Y*uDW4cHBDewR^N;MhNJTBGvo}G%8P69HqSuumxlZUiUb)8feoV^BH zvv#jl)0BuAzWY633EQ?pJ&dMfnwPjBO6G4^CTMt=ed@na-l4a^QuAc|l+kc6@=@3M zAF*Wn?mZIL?Pp(1Z(mwvJoUo1nF68)XiA&m6q#)p4u+9M; zHZeBrsG*bYSMmXnOg)k1tG3`HpLzR!dRmq-EcP8wM*xa`8tgm|`;nDUy1pARMsH5I zKza06!G|rOmn}Ts_e$%~ysxP^I!*-F#LGAKeA2m=yN;Xj3vda+-mFP;q+I@GLZd$ik`rLo#a16l zl?^xigs30t4a<8U^51b>Y;X*%)RYPd4wq~GXv@?1Im?1Wq>cTx<~mVqlE>vA2+?E!HF7b1ti(rQ+Y& zSQ*gF1yOw$chwadqz#^m8v)eAAp>^g#&YI#EcnXU*3Oj+ssCh5VORte`x^$dW5FHy zi^&HQ4Bg~6>@|{6@*6fRH&^Ba=@g@uRv0oEG_@S8hOrt1_VwJn?fQ9;96Faty89Po zoQbMNR)MYpuATupu|Y}I`q$}(|5y|-iPnb;!J@3(Fqu^c-&%^3=81U8sGP*n&Qz1|B%?)LD~QK33!C6 zefS$T19s1tHAr7`SQZ8n_WwrjBb^6VS7HGe*M?hGnbli&MNNSjD@7d!BLG#btA7mi zzYE)g#~Wl#{A0BLmDd0(Y7%sl2P{3)Z&;tV$wUGP8G-tFLIpvBGjv1!=a5ZSs;E}2 zFi=+Ejhgc?vX+53jL5jw-QY8S%yxm73%mV}()OU*v};sRgJNL5uTnNPE&u)=Lv0dC zqadK_X0VAhoKyjku7Ufw?PeuLvm5u*??u_d`RtKwD+-S4cA2>*FZ?4}lI`y4^L~FG zjV_P&eGYpt#hlMs*1hvp{j_PksMf~f)G0o^0>whgAw-csL$uSR7bTQt0sW-;D$CgG6moA=tg^*O>sihev|PN7z&+KUS_ zg8l5$07AdY*9!?-jaWi!-x=#q&2#AZ#x>^7r*x1(8f*!!ODc;?1VgPl+`&ABQ^;N3 zA$O7;)fzJdd`&(0g2x}0zRHec0!V$Cf5*3ik;bI;DGE3<@upO8t-t@U=r=6=q)8K; zq>l`n;G^czgsG($$msBOaLgaAOgX$UeE(@zTTo+6qL|yJ=>tO8-qzV<*amjQ0AhC} zC4n*xj;m|68gd0>Y}B%~l3k+ktzhj)lZ(c7ud{AnYLU6Bf7ar_WfGpGs~B`HydD?) zbQ690?a|{{bz2V})8=#clIwQgIebg<~od^{s= zUin;(mIUVu@5zIxXD>9do&)QKY7G_IGYL6EcjhZBrww;^BrM9E;%Tbi9~;g7Hl3x#UtLJi;*KSu5lNMD;a#LqY4;K+}VcV7yc_)jDCWfj8cq~+< zlM?dYpOoUhYPM=5N|N&;ur6ki;wO(sA%fId(|?(?>SetUb+v-+?0x?9mCU4k*`)#R(R|KL=EjM~RxVw$$#e+J zpEop6VX~OuLBs{Yg`+1pF-HA1=MuF;ftPxdCxkHATbDe}y;5apXcn_7Uur)Xew^*J@Y3yQ(=0{!x&*kb3>dwLu zwx&+tzM&z}UuBeU&uxEQmuN7T|0H*D1I(hM?ny?}6sQ44XT^t_p10egTGZ4YP z@yG+zLMZoG_emt!hRt60DG>E9;wEOQjRO&KRlw9AEhekbO5xjf zml@Zwa=k#y3Lkb3*))oc`dTBjT61VDjC%p$RH0US=@6n^c}r{at|HsYVDiRg*iYyv zs#Tfn#RytKtQ!nY;pse_l)@s$oBIQKer_37X7tl!h1A43VoTnH-tp6d#POd7CHjY^ zY}8#V-GVjN~Ng zE_M=7yZp`#RIz@b^Yu7Z9U_XL&rRtMczE?m8hpBa(*^Oa=iqD2pRXhQQn{rRE?XbV zRv19Jofq~NPPm)#i5KJ3h$ycFF=0|Gbr4)yPf(AN+6m)OIfzU6?Iu(QJs7c$`L)Sa8Aimdu z9}}2UF#6g=xQLHFUup2tMw?$56fEytbIQS9~bxByLn=yGT2E?;49j86R~;< z>d4vg2DL9h`UehM0a7^-q?_3A8cC`YO%nis8GiDaB^wzjE_VF65ta81SumArddN7{ zG>xl^8!%@xw~f+ZpqGX(nvJ2Wiem*DQsi{np2RhqqYB3_`DVpHWI<@xH_G8^xoiDZ z_pqiGLt|9_CIfk$!$oICV%A-o-fNNZU9QYY!Q^UbteJ4_?Vj7FBPzR1p6WR>?GsY{ zCP9H8sFHFcjxcxZ1>@Dm39HYhGo ziOom!16i5TC3t!G(z!Tzksj`;J|*NeT1~zByYtT4a5{>2E~BR5!zHNqaH=w#p0u%v zoW%~GgxG~hSfW!M=l;xS4Ior1M(#*V;T7()lCF&W*5M|ct$$8!F{0s_PtdjLJ+;hs zSP&adW&(d)B}6mvyFs;_#D%nTqUm-A%Yx094zlrUzizr;${j1J(E8y1)u9lcm*r%N z3iwa=jlyuer=wy_Ocn+d7-*euV zX@t_4=y3RM!~+mg{wlh7s9$lhsp#{RTJ$EOdIY~DdCKLT3&_F0if=#;57wfJ=}#>O z8!@}yhNQNEYs6RO1p(M0#~Oz(qiux+4=)IK60hy@Tt0F>zr^T`rMSoCC$Gk29P&F( zZ{}pKBGgcmSp2TjSZ;5%I+TFAoP%_7Y4Eg1pG=9dU0nB@2eFS~0j3X(rO81@WhGn| zG0eEhbZ|3(j#5Rg^qs~D+gWjt;lpX?-pA+aIbISF(S0rHwe`TTVM!uRN13Tboa{qH z3XNL+kR(0x@*MclU8MV22kTJ=EfF{Zc{ytCzQ_`Ctfa*JhMo1sg9ecIrt)ky4BaUH zU7xC=HobJ~x4(}(@N!idkJ~#50Oqq3HtfR><#0}&N&h_D=XWRFwx7QS(x|}7w4#vy zRaziNrc~XWoI?qvqCl$^YjR%isV`N+P1y7yUAHGo($7^wj)`^G3hPUqBj~xUiMmjl zmu`|S0Ws&I(&#GKm#jVyk^MtP&b6?rh(uGNA;q=~y{;42H_ZyvOwNDE@WYOSW4o42PNo73q1jOtX5Z&PoTvf8(`D98;uL~SuxA#Nuza9hkHc~o}}|Au)X zhCQU=cAhk}Qx)e+Z~V+-DfwcJ?kY<6A~XwW{n<{{;NN3NhAX26b$Dh!E?<(?6vqOU z$ASc9x+ZeZcU4`>R(m5;now}*>X$cf6-&TrZJ8awIf7*B&AQJ0etcx(ph<*)nRiiB z@&HBw^PoaY=wk^Fr~ItXtH-;!h7&ZUzkHSwk{S#E65-pN%@4WhT2#4@-l>#f{t?HE z0OwxZOk(!@1$8)wsb0El^y7=NweJ!q8iS#l(++uaPR5ps7ZEUlBi!=?3YkmT>}q<` z;}FpSL(d=g*)J~3|14;C#Fx&VFZM-%i|h;3!+yvcHQ9n%MGoR0D^g{`7YVt3EDTI$ zUzGt%qSYa?N6);gsUcUafq}<`0$m9Sr2xy8^XlJ}_CDUyZeEI-LJ!ANC}di4%9b8>C-y85_98jGDzRF3BU^QFVq>IjD`A1Eu9O#PTdn zAw6VC0q?T38MLY>LhpEH^EG@#g^cG_X8|b|#awBC^(CB%nBq9s{ru6-%9uBwoGRtt z78pk}1^$eF%yhT1jz0U(EyuYyg6rg@3vogQ+q9*mDg3A##kaeP_4E8WOaq49JKQj_ zNjF}vpeA^GX8Ft&L5=T%aX&S^-x=fzFtz$;3 zoLG|LzA*u=*(9!rT}g9^65Lu$OUZ~uX42dGJI$2jfX-y?jgBI|6p;o26(x(xcR8c= zE=SSrRw@yE>_5MJFOSyC`yHH|8 z@a#opPw>sf|1*cOQkp(kk065iRvW>u^l2n%cIy9d>E2?%yNl9bD1knMe9_%!q5PoEFG~$Ydr5pOumN~4Te=YWq4O!rh4qbVCRy$=8e7hM?S6Uax`4UisVY76qypP zj#pk)-BT0&LAMR@zi8j!Vh|o)ET6eFPG=)2+B6pUraO z%89DQ)um%vyV8T!{Ck0Ph%9DtiM-Fu|E3Am@>8=ny}T#0tk>GQNk4W(uPZ+Gs`6DM zH>(6v(xgB=Ht5{rL@OpzKoa5g&r7jBxrC0!x(rXYhmTtolwGRP((lu|_TcJPnC|!; zr#cXf_6&U=JQD!#$#XBum%e~iuIFx^Q4-zbSQ)Dpk9-3sU2;_DwdjG=?J7d6yb=Xo z{Gx2~ujWrDs#t2833{<`a}MB47TRidL+zOTh=;f(9Mwcz2oz1Z(HvT6sBQoL!IbH( z_a@t0$S3N<8p|guL8m_EH7_E%9tQ-6kKi7u89kq14qii2)rbPL(=%^QVZTb~BWx0y zbT(Luxyxi%j*f>bZekkG%hsbxk&9k=qtvUb_-jRE5r(-j~?MVOFEBi36yVwjCf9n9O0uJY@Rb_ENWiXLp!Itt5TVYaZX zuaC9LSLFpudc68R?4PX_ctBU9m1c{o)vXby z?wkBt0AW^--_=lM#{QruhOQkqj|%6cGO~B5ysb zIZskZ!cIIh4f}9AZ)KOf^8#K8x5pPDov3KZ#|RCc^5;wN9tZq+o4!aS;LBc>}fD{!?Km&;N zp?7Bk(hlnDgYX`TBO2Z3I#rg_6VfbuQcK&?1iyVF2A(&@$>AoG5vwTBosc$yfl{=A z6lnte6eB4>kS!7w-e z4Se18-{9|@7?cC&@Wb}0iQtOFr0F4Qr%`m2c_NeX+_^1o%#~RO6q{_}>3XOcS=>MH z8BfZoIi!CY9bNp@!CZfX+t~bD`izhYs{KLJ9v^2!6`NCj&QJ8H2<3JX(T}+aI-ncg zEc`Sp?nwF(r!>vgrS&(?XI@4Jh0xD#C~bT+h8yRv|DqvQ)vs8Ie`3d2)M0&>6lah# zI4mOT7xoPRda)z!bhBl zK3j4AxQ3))vVERP8k7rw&in2F>Hk-)sX5&9*=WjUmdd**njNWkQ7*rwp z8aLVga|?3>6x~pTAnZTT-G9xm_$-K=LG=9Ra;yKm2#c0E6izjnMkNk)ZY~-z{4mg~ zy2vBxuK&JP;g9Wh$3o?<8{o##)bl{XlloZXY7PTbp!*TKx37f=k(({C5r$4L+=6}qvJQM18khKW@u<|>@;cfPZcZ#>`%NH zK#~$rV@^L_{+b2_8CcNV61oD0fv#x0`(rdYg3-`P=!QJG9V%PEZF`ksD;ECt)*OD< zZ`e+0-NrIv`}&$sC%t@w!P%P(swDC`|MqZy4ECchkJ@cxRnm20h4$0IL2lvO!q6`~ZihlXg7W&7 zbyVpbps;A&wB6bP8h{SEN5pTdc_T=+L^O0E4dUVC2b|_;El9^f@S;|Cg9^PO#q9&( z5VHh?&!nSQz{mcbPgw(fNW|p!m}nHX~@ zcSGp$o^*xKj{fg%(s3e{re4dmE zUAlt(2WkWu*#UbN;4JF4PUJMg)3>`zUw32=e+#iGI>Xy?&?E9nfyd(2r#wZ2X=jO( z1sj&>H3Hj+F476!%)rlNcRnHnHs4ZvXZfZ!AFvoj*d^9fs7rjmp-xw`ZT_9WL$=N0hAFp^uKe8t(<$tLXI39{WG`^S5jRY=+zn zih2}If8*3(L<>jFH4PMhA~*L74nU&SLR%*%;aj|msHIFO+Wwau=Fd)@I$;YK2qfJV zIIcI)E0PmEUKMEjrUBhRB8W?T!iWB_^IZQ~q5%~|%YgZw+yu-w0!sQ6KsH$%M$w}H z)Js78IB*uEzGFdt0v3}BrlZm*>K~KxH}gH`|2Nm3$w|M>a92Hns!eKN)3^pma?(kL z)xXc_`A&#Uf5&4@p=tapxb>PJxH}80H~*72H;oE8N!S)p8&il`8NzIDAs9!2d)52- z@58BgQc^%v;6N<`uw-H$a7M0!gcT5(Z^0~3gT1I$vsA00eQC{?c@X z29V3lEKJu1hzBw-wf_+h{3|YCom^?agR=i<1Vz~azHPN3qx+X!1mrt1f*y@n8iI5w zE5U%Py{rq>+@kLo!#KoFIr%?!DzJ$EwU>_902cq{RT?z>8}_RaI{;LIXTgT6$I+gH z;_>+G6JMG zpc#eOG6%v6^*EjmCV>`>T8if=&$|H{xPCS z9j3sRo^C#s#Bdozt*uEDki}pj{?~{5 zPZg8?__%+)GEtp~aMn5T(~QCZ<)H;*%;OR?a^RsUy%~Mr^e(2h7f(;n7YaZe!ZfM4 z4FjI-XEX5cxlyRC^T-$kMR)+W{?n?+(j)UotMs;1XMOLp4`ZqCbJy_HYvLg0_`nHW zwLp>E5QGcf`=CI?ma5BdShHN$=iKwTzhQ3}vnGX^^qgh&O^BB@5cdlN6Y)uKx-0t6 z|9kz|5lRtcyBYqU{^ROQXfK%5(;*ND18DvhjqV_sPseC07rw~~BFgy*&o;2}-nYw_Yt1sylhU_~9NtiS-=DeHB7 zvVj+DWvRJ2=Iz?1dRKjy*@HyW{y|xx8?Ks7x~G4^i)QJ(0+WMFQhDaTtV9PD+;QAv zM<2Ocdg2!cxhMILJv*th2$Rk(Kg%-3%d{Q?PS-_p9=aY0^G<5qerLinSFz`fTlTp2 zqwHSwYnOT|K0Lf;nthN-!d6Hs&l?_un@mOSRktCj-KOSw`Fy6{tUn{1TcH$g6)as@ zQK2V=dBCM5{r;ZZk!DrG7nslo&dej&u$?Tp-BDEL{Dq9V4RsU3=yeP=+5YK`^X0WL z7nU#WKU@)BU2Y+b{#gL-K4|w|u50mhiZFz~)#F4kcxyhY&+^hpWPeW1fxQwNA2`_M z(9}G#8wiS6)J5EgI8leMka(BlxV3Lq|GT2Xx>TK@_d4n++3Ax!(Yth8K*EX{Dv)H~ zZI+QTr55Go9P?Q>f)2yB$}YNe@ifBx$F-f|yCHQdC*>IlLzlK zGICyL?9JmHSVg&-zGKyD(wS6N6=D%ykx7t<_?a+pw%g!j`L}E5;(Xa*{(NMw1|(H3 z{2M3Je6JnFWMn0JI?Omsze+7Lr%p(pB{#m6yJFh8#Z-K+wCG@Zzz<-Snrl}^tgl;2 z)ouT5N+Ie*x1ExA1q*dNO=w8-Q943pT=(JRySspMj!2E%s-%Q~C_R`i%y?Up6SJ@3 z>CYcoL~{?0Gf^MS85zXmfwdNirrpmaLTy=DL_uNY%hZ~~Bw0G&!X#((MwGt>#fgi0 z$w)S_^eN?C@NV_HMViMeHD!e)n1W4%I{ecz=lLs2LQ6JczN>sSIdQzpVzfm%)^uoB ze(qTtiQv7HAs56sf!;vgB#H`>acw&nu?JCgwxHf0ZyQ2WgVZ&~H|Z-U87A;~Xj4qX zetQ@0$Ogy7LEuq>r!DYEi+URyM;^oT9Sy-}i7|c(|FgTSKMz?1VxJ=Md=ryo;Fuh8UF}0N##C9=hUmM(b?Wgw9{#;303x&epV|lp{W|4jL zJ}>tk=d-PRYsh*G^(bd88csht2pzJtKNlXdE+)L;zGY43J<;suc2Y;oc;d)|S8aNo zZ8)WohQPE_OdDc+q_1zlwth)vLlK!WjqHewGv7OYq8u#dTuv8Q68t&}fAlQXj-wSl znYP>-{pH9AN-7me3L!M2WBv4}I5app?XN|wx8|kOE+xmkNcT@Z)o!qvB%{Ntbo>_3 z-pjs@+)s6a_6d;$tjMCmmW|jbcQqfIRZw8!A%6z`C9@;2HSB0L@xJIXZ`nis*G@K;J#Z&|BK zSO_VdbAI+4b`C~siku4I_5-PoDl3k8N-Bpv=@{g5+o|Z)Qpisc2z>iOh1YKmB;& zxMoReS+T1!x9Ph*TD7%J10$CcMY~uPX*UA~H8>eB{nSz&G?aQhIxY{IRQT%gXax&d zTHf?IE|aLV=RDiCNx>uG?;4U_#Y=aF9~k)39xbPuT{ytkr?j6*>a6-?GJO_o=Wzf} z$WVb_zE=(|;F?$*1g>eV$|N)+Xc4?|`5G(eyjh=zs=XK>rb(XDI7oHSChInE@^96R z=#Z-OiU$h5pPZ3umv6Z1bJu2c_stjP$G-WWzpy{?RK~5JHnV@(aItdOLb{goN^-C!S<6zFwZJc7S z84e8)0%Y>n>?sCvVk)x(d0|J8 zaTZAjsjP*_OsiAmjBwIQT?Hn>&me2`*9iX`wzA5a3dbyN&b~P3UD0uRFzyQTYLSY zuld4JW7CIP-3hNIBVj$MRXkH5`o&D5hUsRQX4uC1$*{dFd|3}Q~W_>T&?khWS7k$ z!7YYaiEWp3EKmB?A{>*uffB-74 zPW&(y7?W5t}QpY*PW5jQe;Yw0`A#eG^A?L9*V)%?(y!H3kgjCD;5aWfig%mQYp)A*#B3*@4)+$>8Mp392a64z>1k7ced{rsp}JBM4Il% z|3DKt4mVr&D0psBgcH74XetaGs7!OdUC@@Rd{#od|C-+yqe33Ex`i*MGd#@CC-YZ@ zSjjumSXJSZ)|gw^<3^t1C0XE#qjec%MfrBp2ncF616&Zp?(b;YTzdNrPc$x_%Y#z0bxm zAelTDRp8n=oiyPEG?*-=IJam)?JJ@?r-Hmix0}9`e)1cWNgg zJfud7frhy8Y>xtt(12eqLOI$MO!$aUR)%Nf9LEdYqHP~_YToI=6eY)>$TRHEn5@76 z&q*pih27c&SV9dC;11LNw7R!xpeSO0BhW~SPyv!%Zvmh&1tgeG?1(a@8UpRG!@%uR z$NfOj-~lsw9BrqarGiU1oskJ_{NNS_2xa~6j}9MFwOut>;iN{ry9a`9J8*zXAX~!9 zpZ3RRa5Mh+3DsFW%K(>{+dDd5e|<_g={pigRmFRY0JORi0feId`Ox){I59#JQPMTw;`+QCNQ+T0&_3i$ zfFr8*utFS-@|hg-{a&ykA*z*EGv8zK6_MpM~u7%N?j#92Rn)l#=^f|WEIR>F4g z%&O(DZNutxESJsw(5VZT*Uy-4*gj}F*?I`Ej^-K&!(+V=*Heo7L!gSX{mF#6N6rzL8A*bO9T8u>y82&ScjdP9Rk0# z2gtMG%y5KJOc92`gQMroBj_)6>1u8iH?Y%ysYs~S1puHNuw}-p_!$fyzKSpb)7#&O zT$=(>QD7fX{R^M&q|1(Q(gBw^E%Xm5xqtt(9k%GN;~fDm>`A~6)H>kZR!hMiTGfWn zPngqtX;Y9UP`b&9#LYGVW{3$G)ppKS4ArO@z6C(e7Udn8w>tk1K=W}E(u@Zha62%T z=j`GvFtaP5Z|PGsDE9%D=D--^9YDE25c<;z{5l%G_+Ghd@#$e5lT71hgYd zmI#kWkT8!BYYdQG^|FkV{J0JnKj7@c*;VS+0FR72P9w zHu=WeS5DP=*$!Wp{qS7WHx24W3H@y@uRb@WTs1Gt%JThk!}y7-c_UKyA4NS@AGf^! zhD`mnCJA|kZVC=Kl5Rx?te+^gRGW-=mixG5{0We&G4t5hGGqF%%;&a+d?8cdy%%gk zVn%mnP=r1VkU;eY5~v(l0EO~b?H`o#iIo~{%8rdN!+)eD?V_fH6GT&N5=d@YGDk1Z zrB~0+c-!y+5zTocAhoLQZ2(A}L7-|7rAt2-4>VCZ#kqlj2c=e^Ky5Gv%I<_v&C9mG z5M(iD~^^jElJ}7hp zK|1nYNE@iH{_T^%uKjCEgkn@Su#6Zo?KP^w1E{{%WJ0@nW;*|C6qMHjRV%BZI$qap zG=*U8;PKEuZ7_2a4-_G~IfrEldx}T`6hBgCkh-RuwNK6*(qV0;_kmYX)o6K!W$v$N z(27R%^R#%i!~B;qPRbLmF1H&`OnSQ&yXYdZXR|Sav!~XMoszkHBK@P?sqwy_a&wU4 zw~cLpoTEek0BZkuMf|0NZyn6>DI*}EcXR@H?b;n7X!L^9!B&% zvR;&Y5KUmKfL`Du4A2X_Sq0oc2#|$T)&1L-#U2NCL32kS^J5ZF9)#P16A$zyKR`fv zupKB5a)Q;KzKkHU+Q8Svz~NW=Ck9;Fqw79l1koH6VA00AT_U?yJ8c6=c5B5YVMGNWv4C3(bB6Nih8X z^2w31s9KE;5kqT;eIGdks+$)X70EHQ_w~)OV|i&PVn6UbKU(>pAjly0Z_f|7uzCs? ziF2`bDr$w7YHmUngFgCXYP^xY)7VktUQR+RBSqc$f`Ht zX@}2g9Q{4E2}9#>KyJHY&GI8bUiiAMxWHsUE^uFcy^0gkl#pP}1qSdwAUi3Ir18uKbNIfR*M7S8y|uZ&u53U4vf`=XtBF3|W?IA_ zr`&;gdlm-D0<{Bv>|{g)TP#4L)EH7JQF~)1G~51SkH7Nj$%8NB&bUX;aaC1*9`;-I z{CFj%?6DGzagmY3j2kcZbzwO{Cf3XGsbiH^^iLQ zkE_(uknkN}6VcVUkl3ycya;fc80?{LiMRRNW%rD*W zdXlIwlkiHM2D2d3?+2blJcjcC8HNZj@#sg0{WVhF*@N|P_NqhNc~kQO3)W838KX-# zAoB3Vs@!hPN{P@bSoP8|Pb3e*MqaJ1x!!m}fqGsbJX!Hj@yBdC)I;xI(AxZ+5P+Q=N7jjsR69=E^BJ!k*GQ~oru#%N_j zo`i@iH+a8$?Mt|!oOM)NG}xa8?c*RP-K6@^)a(LydlViNm5~!aI4a6SCp`QTq0%mR zFWSkC9kX)CLn7?dSNLAAT~~qt+Zn1ZkU@P&tS2^@B$l3gz`yylx~*O%4lX9nizrv#IJCze6uhZOj`*RK(oqx_uEr)Ha4Bt>3 zMpE3UH`5+b;9LPh7Sv#0z4QT(w|Txs4ms^{A8O3f#|m?}xz!1}IsDdZtaRQkKY^lN zKOwU2Bqq8_%!bTqFu*VJilmeEP(dc<<+MSj9^Az{J>u3`A<4%l4w=BO{6m}FC!h_e z5JLh~qsFg@1IENr^PVMafAjOxQX$gzjaL<%1-Ugv?*=QKFhz(X9>9mvC=R4N^z$nJ z$%s>V3Y3k<65bw;_X!jCV<#WSPJTVpG<^2kq!%yKGUgz{ZeI)!MY4k0ZOyA>?!9qb+;Y|1iMLs7Q(qW8-P#ASsnFy>`&p~~{m^OZnjyN;N~+&OEz19# zq0R0do#&c~vz#ft>KWP!!xFl+A|urclpIoNm>+P~EZ@l-N%idx&#Oe=3l{dRKl1FT zetb7>-{;mfE&U)-GnuU>eW|?rz-UUS-?Jrh4j&lR4}0Ly!D?1jT=vX)VMZxMFnV3` z2GgWiy2s56;#itlKoi8l0Gh`OBUi-5yMT`qK3(W9g7nqo{knPVq;OSXKB%Tcx7E)G zs*W@or!&F=*r;n1FDk3eq$1pYGMM79d>C)gb!s%`>Wtdm43k*F5gpTdSCXYku=q%Zci;QB1CnoMnObX`IOlbr;T|HMf&19I0O4*( z%-dCr@9vonCzs4-*Ipej8yg=j_42V{1=Dx$24_3}18~iL#k8eJI|lHkNkBurZF1gW&Sj{9xJe`YNLf4-{i4J2U8_O_qjHG*_rR6(rB%;N_*)B zhL}W;R}?@EiLZfsNwpsn7~vAQM;VZ%pn7N2ub>NX&46)CQD%^zElrND{mvVy^QCY7 zy!gVM0^HXxu|gjbN6x%na);5|qMx}RD#yM=OumADSqZ5*O-J;pqsRyN$+=JWd{PN7 zHsW;9S~};65!B27^zxqTtt8(JHJ4)Ur7+tzC=?n%tUADj5Q|p^Q0rm#TPDz6V1o`9 z5-~KAu!{VHzDM#`C=@_spWR8s zxT}v^m%)NCAXppK`eqJ65yXKEv*19b04bKQ~NYz_2Y6gN_%Qt$gx{ ztbhFR&8kU8{YyBnG5y-H&v1#* z7Un0qFqfA-IF$if6OgEb125MEG566uvb>E5x}l%S5JMxkLOJ@2Pd;ZFjfpw+EK%`@ zi{O1ey-9`#0Zo-bFOCx0AMrae8|7Oxz8jx^T0N>ZZ9Q}8r4(YbdHnYiK7$6@|!Z*djWrmNm?s!Tva`Y%-JY~#3!CRcvAZJIrWvg_!T z&+@&ui%q|lgKilD`}I35k{O)TK~QNaN$NzO;sj9_7mL?6Cc37B?>!ooR=t$6lHz(V z!s68<+`xwxr^?m%=#UGdgSCnyYLrLRqRB^o2m)wmV4SR99C=h-KC6}^D&1aL(!uUx z#4fI2lcymq_xH*MF)*mR)eaOd29|D4hS#5?ytF3G-U*)JCH<^Aa5zThL|ftRrUuiW z$X8hi$^=7^9l&sQ=6`mhD<1^0tGeXSUO9`UBKK6?#46sJR0KOb8txo!m3{m&*hB{X{bC8s6Gt+~ zZAs(6WoS_a0X;vRNxK1!h#w+#KC5CSot%8<>VV)^*GwB89qY_g@+nUD=GmK)=xI^- zP^wL{YU)_~8-Q?D?i@SNU$Gv<{J%o1j6eg;ry#>gn`pEvpa;)-sI02nrZ~(WWr%Sn zjp&~`BE#)BnmpmUG{`WY3=@hz!a9mC$XqnPN0apypP9N<7H&=r&u3C7%GteNYK8L1 zBBn{nM|yQp<)BhF?%H4i)PX2A3ER0Wh2C+B@jC-uc&miBi)+JbU|ewlLT8};=|SZ} z=FDfsvo44_HKrVETOiCv=07YdO`bU~zVe{wQtrZYi)e0TB_C|(3JA&~GVKxUkW{DpyfWJ5%| zpRwc8+Uh*;w^jSOR$ceDU=vZ3@u)AQ&)!Z}&@IRcNsdGWkN11xyqa0bkpcITlE{Y< z!8s3n(8q7^yZty56jW9UPI&DN`XL`8xQ{l4H3=!m_WUgF8#z+0am(#PLSbxV^zzfY zN6!}+QDrdEj_PJKUh3h!ha(O0gdN6}SI?riv-oH9xr}648js8W(12V(zfC7fKWHh> zN4wz1Pu_2Uu%UVQmoF>qYR*F=F*IBa4L5_&=gD=g4BuIF%<Wc}dX`3h)Y*uUh*@R$qEv*%t2dg~Xpd3g&BZe06M!DE1+kGyZK#^Er# z_45zMM#u81GLG`1>Xe1dOx zx8LDnVOF!IJ@%GOx5MBy!A=|Jy&g3czp z@@mU1j#bt6BJ1M~?>Dw|X~F=f*@t+98_^;fHDS;-$4MP|-&LM(^M?{Uu1@UT`=Yg! z>-(f$B3arcoBoY|EZ}ZBR5AZz#Jjh4(8X-b9B*nraG+pJTjAHkSko3`6N{gEceNC) zUm|1Sq${{#48f!ch?^RbR2uPY95)*)*uu=sZTl4*-%IODhb?*59@{f~D3D*RhB`OK zypAB6MByIbqO3T}Nu3c@4y05dES)G2sYp$k3pT&la`Su8=FCxEaUo}IzgA$DJg5=A zne%|u2;l0-&8SR!q;ip_TOR97ezle!>G_=1W_9d&x(J&^rD)3JU^kMqfF6#5B9b7J z4I@ZD7d})mNHH#;ej?J~fyp+Fr4hN0M@Egy)=nQiq;!1#Ojx<6R?;<1E4`berHJbU@eQbA*4p!j<-QgqLgRTimIT}`S}pOOg+>1l_TD?HscqjE4}u~!NS78B6cmsS zN{fvqB8W;Aq5=W}BGQ6}grfA0h@cduDkaiYAfZ?39aI9Spb$t9LLgAB;+vG+db z-FMFz_r2fmzO(;gti?*!%35>H`7NK4{CrbVJBHRv2wb@VBq(5ya)x-)ud;^SGIqHI7kC<=Tgb0b_o0s#seFv4PE&T z2AwQbv5d;w0f(L~cl;h$mx+G)Hm<~86X(P{LDEeE)D$;|pm{W_y<{2p}r&@aKM z|Fzi*?AO82vLU8-b56m=fBelxV?QYL@MS;DGm`7~V4+`_!arEtfZE?5w>R%M;t%Y6 zf_cDyq&hgO49fdr5Cou)thjIsMZ5JiFbq8!~4*7sLLSa zFLJ|+H0&#;Iyq3)aQ#HINow0`=enfeDW@Y{bLkUnd>r9W@)G&;^EG5-pv{ypCOD>C_O(fMo9`Bzmu-in{-by8o({{F|@w zzhKn;*97?21o+=*0z{ppXU(#M$a?$E($#K~zFh zy)%$jx5j;E;6yVmq2l>rc6afSS7NV8zi`uH|E^B(`@gUlfNfmAj~BzNXp~{T_cY#W z$ed-GTQ8V1B5JUVlh&mZ2AJsk(urIoxD_D$_-6f$Rqp_&?@MKRurrzgGJfY>45*P4 z;&Oi4z*nJw`#LrPoiqQs{X7j5Z@#zF7o^ey^HstV1M*^%i>HJWQc3sXx7$fD#GMGcO;Xs_^; z)n=DW_C#Nmn_)&4ZOL0t)0css5CF#=rFkH?TGMB%^_bBF?5?Htpt89&=tueUGZBcM zEAG}oO>x~aMc2Pxt@H)OYzT6VsFY`m45XZ*mK3k9(y>co0 zE`$}pp|F#zlmKehJS==7M+CPQ`5q3X8w>;6k35I(na@~@eqcO~ z?R9o$W0s>w?we8YG)=nohpaEnPx<p@pc)a~m!r{E_p|DgalBk#+qV0mQO}k5b8hcg@6Ocyz$75{=<3i;(Um52x#p3! z8-13FFOgx}U!%w}*03Vd$n3D!LG7ET6bUu%> z`BM#@0sMBJ!ua&}pl5({1#brIAycM%4j(DSoieY9tF3$Y=*d^1tu?no|c+30m2S}K!ldASg?aB zq&G(Z0LNG0q<5IP{l7tCy#N{)Uk9)pgIp{4AE;R!Mqu}r9&=4&iy;U;;K&CQ^JLvI zq$|OJ^Tde=6gDzlUOCp57X0c{y}6Ci>N%VngCO7YVB;H(0XRI(7PcT*KZRS&*bBpoS15w#h06lr1c4_S9yP8;H z?UtkSP04~Pwgh(F>q#C&Fkrxp?S|mGoqdwJo2=A&|1fsOzs#vsJ3Gh8>ze}FZ+*I3 z!u?&>)n4WOHz5NuoS>b@D{$>LFhi~6${N%t%fC%~ z5qRlI%;_-S^^NBJv|TZfb=dS`)BQC-|C=hAhdn~6IWY>bKi*%Sv7HVYYX8<{+_ILO zW?f`;z?ucLJ{PGP3(ye2M$*ihgG+$I)8u`+UD~JC>EJ@~IO(@?fo-b|2~&N+ItCE5 z7wRS>3a5eMXB5<6;xSb&eGxB?RLRtde3rz$HMY$TV*PmLTxoJQtNvlgdsV(&aW7Lx zzt3_36Xj{E0U8&u)Z9=4a!+qRXqRCl9^U&AILOzWXK0jmMS0ugr!`Bc9Q7EQ{xSrv z4Zoo@+1i;?l}=12>$GX%1iS?13fdE16;jcgn>r6tWq68b8ec!QwX?XePaxd_*?RrC zbv5%7{>=`0T+A@Kl={HVoEv38T3uV|J8#*`-DRU3V|~g&^DTEmZrWko<@&^u;#}we zk90fm9Z=%@{$}YOQluHJ;C~P^cd-6ljjd0TYV(~r_RthmDZLZVn$JjuU;hoV9~rn% z_=-HcfQ?erH6kCHw@zu9xR{a6Wrb|WlZm_?*1LawA^!9^i@h?$sVxUV7P1IkNR4uj z8cj1o>5?vHkk(pfNlCNr)1`Gv z?Dy2u+p}EACH^sEWS&{a?+9QlDE8Qk~gid(&2GobVz=_yB+0 zr3_QvNDt*MLUNzY%aB9wYhozl9zp3SwS(~!GA_o=`=!RVuM!5P1SloIM)L^rB_dSV z&L{zOxSa3l=pC<1nR30NJT0a&GUvl=_uRZ$q+q zo!7HAdw}6v`XT*fG=o(kY0h>m>D5g9hrN1csQv6KS82w~j%?L&fA6WfwpyuUq<{js zC+1upmIe}iKlxrA`e1cLt^k8d=^$2zy@hCfF92Y{*acV!rnZw9@2M+KHn$km7o`fD zIC(6anjCm~?e%-*8ENzuOkZAofL|>2*qPv@Rj@d63kw>bZXAgg7OZDRUxM)N+tF3T9P}Qt= z(49V8)mwMJ_|#JWJ-o`)SxPD>EMurYpb2WIJB+LbTMq&tYdUm|+!k{g+;>{NG~ZOg zt70fF(j8fFrnLQI?cJKKF>s68eTxALUSHaMV6G$)O0{m<=a?IT;5YDhI~;1xeL7}7 z!16R))!?naWs%T^7Vt0MKgsOGK>fuz{52+Yua@^p?BHJTKMvWx@$k~QqO2&%?K>s` zy1N8}mB0$i0k8$^jCAG^`duoG6xf!{K^mZp$L4BFo?j$VYJs8Cr7pCsiwB=e z)J4+ev(#mRM?9-dqVxBRQl->rMb~Cd!q(-X^cPYOcj@uQyuEnkOM%F(xF46}ZBCdu z02#2U*@ap>O$GVG{2@lr$Ni;&imBR@$P424fD36mA z-D7n*>GaZRpkb2#8^l#lni5J1x9d7sK|Y-MWW?X<+S8d=@#OCK_xt+Q-tIDlhB12( z)%^v8L_VeF+30x>#fu%7H|`T#&Z}1a{GZQY9)1b(9t_JX9C0kOF%oBYOsSq|!(ZLW zmgG$7eXXDoh=fNVXdVoBNpKzDq$C9YXC#>ZRgYo$g>yNX2S{8S$F2V+pw{mOh>Z-c zv>k+l25#WxNkDn_rvDPW{HERV4@3x|&1=yZJ`gV1p~vx}{^o$%K~&Oe%WSVpTMV-4 zqiL=Xy;q$?*Dn7CX|)BWS|AQigE7bK@nTF3IwFUXK+Su(gcn5F7uwoTQ?Dd^9#`UX zG~iY)_!3p|vSjWGHzXGVDfeayBLHhpL0=K@YykrTu$8g*PncN6-!Jq;jfN`Qzc3oH=Mf{*7Gz=Ut2( zrR^>18|FA5M$`BG{`#(NB7Q%m;LHGAGo8G^jD^NxNJaQxymw+ERR#}YEm1l+|D#u+ zdFUPMKOh(!OOs!jow0j7k!=XVVDG7KYX6hseT^EYTWlE^U+zEWxU)1NVB)xG2 zsI*D!{FezC{F4(DE8wjEUK|XN)_;vUHfNyN)(kX+F$^4bc;|Z?1~g}ZRiG@%>E|z9 zxpJppOs_vEw*Gh6mS4=79-!D%QrB{8%L!MW8@e4G4$tp#E*Cm++ z^B8!YFI_Ub5?*B}v~;;`#P79yAu64A5Iyp9*)xY5eV{tiJKnE~qGRhFYE*wk9Q8`e zcd>l$VmhmCt;xNL<0GRayL;IZdlV3#7S7i)P$a#zaB1L+xiP zp~SP$4l_2SZ<`v>Ue~_%z*WHOGCDl`m>es&%Fpr>KTgOfp$|QlKfA0BCw`p*>U>{s zc#O7=?qnZcrnu01>s~V(n8`pnu}81zpM3EDUk$kb+voi?zrrOEwQq1=UqhruE{O{!$ zHG-L})V=f)K#>ee0$;S0ZTyR=Z9SIrdy&2Ei?~t{$*E%cKtk~r&YZOleU9P4I9%Gi z2B*7Y8~{$yCi}!#xjJk&GW>G&u{tZe;Pwv&9jE-_Gw$ich1&bC2NK(9{8Vd7W=&!{ z?%;%fyy`;Bj^4ynS#>La{@Xp58{Vt@$n{BJcq}9iUI5~akwnvloayu?N7XU9zsAFf z`;J9IWfgB~_~Z;@eY)7be8cr%Tk%xcx|Mx%u<5PlY=!X#(|JX2!*pYOOuuL~Rz^M| zF-Y;7wC|u#4C97uE26@C<^9)HB@HMqRm8w6{$4L7wk%{N@z{&AT`R>WAj11s-B>4= zfD>ofY}G`&H}&Yf+ZaLIG5VP?x*~yV=GD!iYMw)e%3U!R?rrZ=3-U3nM^%1x_~H`$ zNp&2piSEeZ!w_=N{Lkj`O<)z#W{fAk{CHNsWUhYW!#%^!-k^Nrt^~ht$~{B7UUNlm z?}31P1DmCO@|P~jZXQOW*O4*L@!pGMhj0$*sA%UArJDg~hN|RxjgIkiu->C-!9g`r z1E+|LBrLYU1)04F{`{0#yP-R-Ij!dR%JFVdcYNx>-g~TyC$GZ>+EHABv9z+Jg z`W68QuL^$W5&JO>i`0<3*jH$_bR=ThL|UP-10_-J1nLSojY^}&1Pi3A0d5>EiLxdiPE33mW8!sZ_0x?Q_exQD z6F&X}o_zL-VP~%|u-Afj`Yw7hTZ8&y9w9=PpuU|)L}d&9Jn#7aP_K`KT96iB-`DsE z<9s<*rQ@Y0-Df_qEX5N$O-{xYY!9A#Jdi#Fr-788_$auZ=D?gl3ougj zrD|)+RDb9nY_v`^96XeyFfkn3Et{2m{B+RXg#!?q*GbU8mv~M54d|2rf{LGCWaQ)Y z%f_qb<1Z2%-L$PMB(6V)X91IZOL!R z=M4QE*51B15jN&t>_vXKo(+L8L|)=R;28EEa9=Zi%u+<(0Qy%1&WG^~?z)zUsTFBj z+igQl>`Vt-UGDF~N1Y1Yr|pz+V<{TPT+yfH+R_|WuCR$ab@n&GN(HUww}?7`iL(fb zO-xbhGZKFOc!_@=F1OY<5^;F$dHJTTlhsjm)xA%ZT_8dhzHgm2N9JIOPShe2w)wMa zj8LsPpBp8!XliV0`-q#>S*ZtU3Rh?`kY%_3f?(&@;`96Y|KB8$e@BT2E0Pba9W!w@ zm4hw{R4;z{20vZ?XW07xzdDv$M`>YXYR#ldpIQ#(7`_gPTXK+l7`>uLJWCHPK#ZyQWh z8ka*sNx=!aQpZqh9MQ}}%kjVqSX{C$fa51xh_DOf&KFuw8iv9o0ncOYLcqA*)?s6mZ(i}^tems8*n30{G? zjY#GI{(TL+9`%jmgIW0c`6z+}?6IF7M4t_;65{A7rZli9+7HNG)Mi2>S5r!}@`(B} z%_cwllhwR|e}J}MevS%)IwN^vNsp@&UIimAkmZ|q@Q)fy$T@SXMY%u5i5?H!#$Sm( z(UgeqE{M3in!j&!pQ95^f;p)Ldvo(Po)xEu473}(S%rJDW;Xhw_Gil8FS5vKW#`Z= z$zhwVuUV0&)7CH^K%*XzkPo7cj_Pud&MdUuUraz5RrY_TnN?rsQP6pYk;!O{XpQN!w*^B5DaOM4ck$KdCun|pdlIhX zGf>n4Ct3Ff9|7tu)-5G)*49^b9q*jviq75=Xsj}cz1kz$g3#)lv}WJE1$&oR#1uq^ zTQVZ?Hy)AA+liMg$8|4zaA(FdaCyZVV{LV*xS0nsN$Cd`y3fY<*;w z>$%PYhr&y=OLWuPh~+kYnffe0xvvj}9{JXqJ`ER^%iFUTDA>q&cXM+`&zYCZLZk;4lhzK-Jy<+d~qyf#DdbE+b$>`TmW@TsxUS_*87)iF=N zRCUHGG^hmemblK(ay#0)PXw%oN-X={{w^+`4bkb8ExHfc7XLGk8wUjXJ3*)~ODrJF zPKmT+t5My_j*_OWfoHD;8yyue903_-Kd^9W1VNR1?XC&PJ5NCTPa`oWb9X#iIT+c)pABlI4k9o7(#EPBG*ilnz^81m@=m`Af zl68T3Y{D%@44AuLixss&MZRn0kRDMV>Xtm#Td%eEP`$dLz zGqrspJu~p(W=Z1-`P+Q9=8A9b?o}=IynJ$DKWiD<8Pm*DRvMq)q4P+mhUG{~0w<%Z zBEXtmvVR3MML5aJTN6$2Y1bxLb${yXA85$vs)oI!s$cE9Cl5|~QM!8H%JIv|Ciuqr zBA!{Li*3>Q6DhT1Lo8-4%hZZ`bpD%LS-JXT#Ay8UJ~p=xGZ#oVZRmCWpC9FfSVLe~ zjcqN${;w6_jsEDt_dTIyc>dd_5}WKj&>MBCdgIj3{`}R(b@rNI)&*~>f=Cmnx!*JW zw8zZ5UHjFA_QJD8jdgvKCJMR75-sa@?(zg}(FH^(0p#o&GOT^JI?;ahQF6YcY;8;M^6|{?@t381)=3`l)PJQyz1xU8eqz)JG5RMc(QS;xyEhxV`{! zet=lvp;=SY36Tl-xeVONj`w?B6ePRMG0sp|rl+JF@=l05YOyPKKbPVY{urXeg z0y7jq1@*oVWdIL({SLU?fFf3PZ3*oPQhWj0HY-JyZnMsm{PeCqyQ!Od-LAu4uA(6B zQk7Q#q!udujo>t@s{(S#AV$t6miIK~)eo`R$ju{XT!zd2-W^F64S))Nxo&>;txw#e zC0%_Z{Uh>=1af60A`Y3iNR6Qga42IUCi1JCx8LC?p8ZXQJump zy;=sv(wq2g^~8s8ih>PhDGdt&H=z@R(l2K)lf5oL-Kq$S`Sl8Z8P?|m6^x1-V3$>h zUg1Be7Vr8e_x)=$VtyCq?b3-wmJ?Dq7EE5Go!7l5=i)ET#&Cn)HCa6>Ym8z8{jD4rY}frUkEak{(3#{B&Yobf3e3jZ`ORg*iVzO(z#Lf znXV6NGZ*Vq&gq?oEecIVv}yB?IV$;ol0vKPt+22P$Q;uj*!lPs$4hq{L~9}sjkINQ zPR2Gi^&AVUhbw-VlZ{R_9(-8^8Xt#- z@k_F)yhy9D|^q-1x*h5-0P(?g&c1pmY5 zd^_JhI0P?>tXfLYbh z#xwPO_BJ-Bl1}g9jY-bs%$wvKd>NwLaDxEnL7f>+$e73Se%C)dV#4jY87o>o*L5Jg z(m?toWMf4h-VEE@z(wje`9i{k(e>tx-iqJy5;!0Bq3gt4obkIQ?H$f#w_IM|j#(c1 zVX~5WSZg~|WC&&L#D+s@PJhnf`Esc{tbWYJisg-Q*CN|^ZS+g`&tk(08)b)DCe9D9 z%-bjeq=C)#Fb&Zw=PZP-@IqL3l@FZ~#VL`@Bhd$z2J)yyr-tdK`SqpoH%g{D{aIC? zmxz9`%unc)xWW`C^(PXD{1b+Q>dz#hM%9+JJLqSgPbj1?lY`QygbYn|dGp7!1XyEP(KC-k?svp`1s+%4qt*3r0m9q($UpO9s#C7JmEElj7w;nX zT8o03vaEblLFIEXLel3fcD*i!$o;}n-2r07Tpd!Zq=_~~x$oJ>NUN#y4(drA12z>g zs}f;*b!05nlAsq`>cEZhXohh&z(_sA-zJ)wf`jy<9{1*L34f=0p?aj*6(XsqdAdQYJe1ViJT3K#<&zNQ zS+1S^!2bSw<;f8ep@S6c8Bk*Rs_Re60i|C-{Z=r%(!){k`PnyXG+D}O`tG%q1CfWba}#Z4R*L5BOcoM8rbh0C@X%+E`x-T0y7_M15DnTF zWzoNQ$LERcyYe2nl)Wi($xiWt$6j*pX3b@NQ#5o+%KtU9AI?$p>9&KDB^5#FJ8PZR zmU3If5mxtcki&Ev& zVsXa~$#K4Bhp>cNyXF3~3;RDh{{O+nz9F1aNu`Ob43I?HnEIO0#h)iTB{YRCRk-v% zlvi|!vq|RGQXt_q;u70E8)X6J3K=Y}`62eOJ?M?a+s*!0Wqq)0gh}$2dTyal!7kax)`(}7k zwn}`@-j6E%oXHz?E8w6bKBQ;Nrew^LnPUdoY0fc{oZc+*S#)APpz3XFr@E2nll8cj zH0V~u1JdaOVI1Z3;|DDpwPQ$6mNy+;)8kvTDJ3S;`2m=`bytJj<>|l}bq_szlZHr( z5pQXM)4r^CO!zhqfY#mB9r`-efXv#Cs8DLxBC#iOq&I8F&CKJrr{zLVh$xfLmz$DR zk|&C{@uyeT4vZ=ucwdvP^UQt7CC2TVwsD+lY3T{z9CRp8AQBlQuhYIl%e+qZ5EA)N zc4BG5m3eN|x(8D?K&*G79YRVC&z_H&v7&~S0a%2~_mLE*x@e@wtCpmIU1wf@5%DC< zNhCf7Xf9s#Af`4T(qq9q9wY3J;Qns+Qg7@@$kj#v1PjibDh_^me;4oiut|t&!bjI! zblDIh9d{T!;j_qrxq|r5Sd#U^#gfA1sHpEZ?mCb3eTcEosj=@~PbxhVzW>Yf$_SbJ zA}hOTzSQ1MJimWpC|IGd+j~b%+h!>$O-VGx3i z-9p=dh0O*K3aP`?ufIVYC-6Hy=NZm_nik~Op+J5J2n0f?Ag8<@4EqgovCfZWXZvq1 zbm*%9BJGR+77gg9-oWNnei%bY0h!gO*%&&gJMok6^|{Zl?O~^J2f8_>tz2zU=PeE7 zIFR)CKrEIc^z`BoBAmc5|3aZDP5S z9W4Lz7l?M`l~M@QqGT02s3uJj^bC#$yxGHqxdpv(emg%nB;X}DP;1o z=4<$Q#WGzpw4KS^r#5~@F89Zot{$VO3-{9xPl5^zboBLqWJf7Kho#C{6M_CZP5ZW( zck4i;=J+(eE#C5QinX5>Y$|GnO}d+v-%R$yRSv@Lo28Pz3;Kd!hyo#TD&vMo6jK*< zeauTkeAGnz+?k2>7FFI(rvovnyRf}tZ`d}s@1BBNE&=&f0JdpXK+DnDM^+n5NCFFc z%Kc4GQ*u@eUEe%e%I&`Tt%tc0?e za|aBQaXaxyZIcFR`B4*s~yVPa~jVO$M*q>}25prDq&&FLiwwwK{lV4c3m@(8m3P2-;xqOFJ*0pyv1bQs=&h9gpd`$6h(R}`7nbwzEaYI^4 zWkhnBc>mw=q}zXHQzsgkUx3y1nt=WIV06S4;ZiwRMi*NF^kMc zJq#smA9TA9whi>e?*Z6oVcpb*(GeJUPoe=7Y!7U*Ab=B`; zgB#8+&C+$?O2>_(-5oxo43__lHaeXNqL&xsS!OKzza!a-|87qSyxbCCWR?b&sY+kr zr!fBsNq0DY&J7g^YD>-Yv-G2DzCwW5buC5}dO+pb9b)WrCmqB4 zoI_WyzL4d7-fg$&C?XJ1cInuqQ3eO-?>(o4{co#bKrK}~hpAS@+~3LM%b{gpYVbcq zDB!14*5SmQodF7d3JVghe;k@^TT>Pp@4!!u#m{j7tcxsD07f-IfBFrg;{As43?MZ@ z;$FzV_KA|{>9v*((SQAS--9!w=Fx&MEwzj{x=JJ^CZ9JEMttY##GGYLtT*IT^Q(Oc z;;Fv_U(6k-33dU zSu4hJG2(xNxX)5O5DzVp3(S33szJc*%kHz+X_y(cucco2597hWMOkyyusg3*dlMk~eSh~`{^;uT z|0XDrgrD&s7yyWeBuxVt6WZw}x0m-x1ZH|?ig>2;8CRB`OWCy@df`E(=2~|b8?1j| z%ACmwEkn0ui$)S=+t(ts+l1{DMr!iMi{H$iO1ZeYbR+%theE^0rD3OZcv_Qp>OmBO z4ae~V&GnpkU9$oz!u;J+`?J72Rfa?6nW?8`oAaE_!gLktDrs%5lgK)!r@hc71sg>9 z7A4y4j#tNlGmkR=N@`kbV%y0~(hPai6M)m`S^9!U(side@Av zhgN4A$}!evdn9m$kav1N935LICb$G9a%IOfYo0Y zRB5S&U_m`U>;fu~T$n|c?!Z9k?!_N(Uvd*3?k$XsZUFkP+lsgBgUa&I=8pr9UhN-S z@Berwmsgs}nK&Q2#)fG=jpn5$UKq}w@K=;J8LHuOI3_u>b~x6mKKYxUkFr&zjI!gD{VM+-Sqi4-z5jle*(e3qdOkhU%l`g;U`mDS@)oT)gk z;g{joqg$4Hf9wT83;fnV{5ZTC(X7NtKV4C0f3RfcNp_#J62;?TK!RHx9|z}BlLU91 z)I;mViP#2L@>wRQJ1aE4 zNLc#rK^&F=mKfJrF?27y@^b}YVBhUb14^T}27{Q@k*tpO7Hhbkt3dA9<`vD>iG!K< zv>G`RqfHRfzFzJFQ;m4F1Fokvh* z4iAJ@*JJtUG62JsCEhvMDif{O_7YZ$`}Em5iz9yS^W4StYrKR_tTTA5Ud-|7HR55? z+Oi9zN{7FZ>*ZilP{QM;c`9{SDJL9^bIm0)W!A*eRUEnDnEip^Fia{&5y#SwqET-F{{DiCcg^+C@N4>TI}6kqj*u5=V4P5^Q|ejJ=3TKMiT}N$Md83>VcKT0`#C1kDKV>^k~nk`6K>(c?aKM?kzTW-rtc> zUOK(;W-_Yyy%3X~s=q`gKec^VY9)RiT=Nl!oF%ju{aOXQx|1#hgmp=Ul_mPI1-JQZ z3JOMQTRmKEPu%NVe;&VDm9%G%^ZqqNg%A}_Qfjrvb7H(Hy+}L6GAhd5^&s?6ky=cG54->3Ec0XB(7)0u9$&v5_OOW++ z-vnJYefYGXou9Vf>We~$?u;{@tnSx$yO7J236cx89iEuOPd`&h4R@iW721ie zE86Ot#0agw;N`5$ciZzppm)w6WD0L5CWfepImzd@frG2#?aLS2cv`NQ`UvYRc&0GV zPUDTx;`Vm7bRAN)7>15CD6lKQ_ttIQ6yoaGbD~oe5cyol>4dFz5LO-@>rVzIDQKNW zvUO|5x3CGT5slzNjU<7}qc{3BwZlpj9@t(qf*ti98F&a6@=J`vaO!F-5Rc*p3Owvk zfz3y5JiBMKCha?-^2SBB@XA6A8K8AMtwzwGDkx~RQXFM(r@H2q_d#U*o4)S^@*3}A z!sa}zLb#|D1TAbuWarurqx!00T@yxTc|gdSi$4Uv_Y9CepWj)u^QW@ktO*|zzGR{kF;6(f+$Gu|R4|g@|s+H&h4?1%y`{J1D zj2wO0A6sa-H(a{e`^Tl@Y?tNgFC=LWmpqyFd3&E{3GD-;?~`7cCFL^@fb=YJfNFgC z!}6AXpVYve`W@e8a2wrN*9Dk+H%rau!NNO_EAjWD=#&r-J$U7{%@}Fi@a!JYHrYj-mX0MKWY8KE_s8D5!l^AIT8A#V($4 zKJE*5h$z7=TY>rv-0=ZMzOD!XE=--K&9gV-Mb#~)KmYLjp#XpIwadxMo!f9vn&hxr z?Kyv4m6jb0{V1L)=gJWA28}r?EA%(Wv65c&NlY^w;{gnPE=r0iMg1~Nk)p=?1*Dal zKs^UD0)n0@yLTCTE1ozB1AUqyfHeD0MIpW3VY7SCN+8s@_O@lM`cBN}UCl!KY|nmZ&gh!?$=9y`1`LkZrmv=%t@;O}vs_Y4%T#r?`3TA) zDk-NH4s^Zhfb*XCfhS~KvgxEj>1rNqR}+lA?JQY#7ts8hlAf<$Bp4A?KwWUbd|I5s zL&mP0Bs^G)nPp=hqekfH3C*Qj3>SzWne3N#uY#?9v#`}NBt!Uggb&=rp8^0KY@#I- z7;H{FYVZwY3z#k&_4X=m=X@0BaIRfOuKwwxn_7^IcUu|@L0{}2)j|Dtwh>7{LttRr zn7su`V_o_Uvg2Q$3!UisRZsvVG{U>tpXYvqtb$5wt&j+jK`Lp3R~5CbMdE9;TDzu? zCfPQZy}sjO9}0Y3%V`!q-;3d)SW_v+74R| z|KJO#9TA%YsWI14mdBI>Qll*2@cbB0JUdm3VhYmVg(5Slg2LJF%SV#k8w7lUk6co; zXAet>uvO`a$|t0?qc}+Q6!h3LiUM_uc7}cmQ9~W+ynpq>&V&At2F(;>$whYRg?^-D zUi{s=UwCEbwpOIc*tQ%oPuwnQ8R|^V9y%Or_PV#U@$sAQb$oMYO}$hsvpt81S5Ng- zWOYmEV=ln&c$Ws1oLCQCJ<^sf!YNd2)$En$mWcMK*Y{jCj@4Yy)%b&6&rJpuE|L9@0B-AteT}3 zuJdqeT5ttsq}Gnh1wOCfuUzVu{eE!R!Mxz0J{Od!lfhe zw~C{bU$gCnu)fKKg0l6|&94(3@R_1Hqn3Bh&chb&)Ax;J9$oIo*K;X^#1VJ1aT@Pu z%%bHts|`@3JgiiPWQ#1N37UsvN!*CUyHkx?!}R z!}X6r$Gb{A?ti{b-kLghItF{fe^ZFYD)>s*MB!qk;g9_eyk}Vl!#`TEF8?%Qa=DY< z_tWO^ACHM4Z+7x>zW@EIkA+;!kVO}=05-{7~7r#U?wcNTFko1dpFX? zq;kua%s#Wr=jUlJwot&cq(ue6#zW+p!)THBzw87FX^#8Iah){Nw$D<-3N_!}*@On! zu_-&ISg$53^(TicsKkXPrY}LMN4DB+nTqHxsXYx1$pxc+NW__U0SBJYRHpWIeMyC; zn4FMFop|+;k%ktChf|J{ltTTN>^^BgxuTR_gq@Mx{n2y%`22~xM-FVVRKj^ng<>$2QB&SA&MS_bH*&@bQFZCfmQ_$&f)M9dCBb<1yWnbbE>^NVbf|0vQan899yrwOFvcV z%3VzHtx~UW`g&k5{}79G07pN^$#f>irB{F-CWzLhbC1>R38KSa;a+%N$Za9F9;|)) ztajLFisQtRVF0liviKXsW99_rHlBJku~q5eU`O41*rP)+4$h)>=Q`==Yk7-brUy&+ zElQ{nR!>}7g_A7d)Aj8z_G*w3s!(sGz}s>pyD9eWVDX!ao)VlQX;Ctbm8tH7i2TihV4FDQVKOcTL&1Y!$xI!6jVK~VCv ztLQc<6-L;G9vo1sT4tl)e`>LxHQ+6f#^il$S?#N|a`CD<_u-tE_^YoSfhD!Gqxh-% z`R90XG#e7uDYDOK_ve~|b{}5}1x^Km*JGvH%{$r&k(nec_1G3YLpBbx{PQBH(|47W zfK}56)169J007?opxXKRHe}~w41E&#S-xg80?V4&2JpMXda+D%YysQ}6N8sx0(^7} z0xT7+bIdFP;teB_2BYau+n~qr13$OCv#$}Scvb4wvKvG6xf#hTsT9!23oAyhwP(^X zx5Xs;3(D*beB|%M*$+q-JUI}vJhB4T+ zA+KqhjB~?^(;xG=8H3~*VN2wJkdrqMZ$=^33pvDsyu_(aCTB1KBkq=hacwRCfl8)V{q65&+G8g;$eN|5G7iGj@y8va&Cw0M^E|@r17vfi7pHWTS$T~hR?0L*Y zvGrQlhbwnD4`ynf=w>mIbYtCM|Fe@+|JtPMoOLIB+M5uOLx@-F2t;t}bGuE9H1@wL zk`J!8c^jnFprY)eZ^s%Qv3Gy>AuI9A8%BTr7OdH48OiJqFAj1VQ!{hostA~95}oclPB0rfRq!~fh3vJxkf!Y5nfGnvLl}bE-dEU zcV|p^qvC!dBYr59jlK>%xE>eUWOXZk@1G?#Rn<>U)+4?55Qrr7E^`jHLzmnxB zr*w+uXvUC3-ni#}F77m)N$2|#{Q3_kN|?>?bypY%euYDX`EZnF8$+7CLd1)BoaqFI`cHX1udjVgHbpk?j zz`RiZ2bZU7Sh|1_(|myfLgELP!Ao6SUc%%8?F*UoIXoi*Ox8ntLi9l#4K7%ww*^jN z2X(cfqYei5z*~j+l7Td0Hc>D;hvkcNx$#1nu8LC3#Piu5mS0T1g9G$9-~tI>9stj9 za8-Wuayw#O!2`CrsKpHNSjRH=`u+xyhPMyw{2ak-=)^!6dl==+llYZ6nl7@n^T~K` z;42&ZQf<`n`lL@6mIW0Sq~*%zCY||TpE`L65C08v3o+@`2CLI*rD4E$k<^z(7BXk- zvTfBHwL89^i%PC<8`gRz-pQvBUn3pIlPf`=guTF2FLti&W%l7Y{Gp=Mo+FKKY%RxU z%%X+zaqm2<@8;f7d*~{&amA@$8Er}p2Cab(#>)mcG)i9_&_bDS{wF}SMc!>(+ z3I)la-RY1U^hZ{+X#-awq=9MiIZPs$wx49nkpkm!Vkm?0P4JX{SPyu>BRAiP&ZI;? zj_^Mt{aF3hDTza>f`@%N;C3kac^$YY6=5{70R+`o#LAv)jWcf(F~_g))<-%;>=&Za zz+*c?N+%wCNk2;ZwAO(arx>F9S7}|udeJS5GAudetr5(&O|1=Fhrjfyh;|B?h)o7W^>q_ar{Z+&p#t$Nbvt{ zqm*ZDs(wM~ArnNb&3a4;1fYBlvas#Ga@SsE1|{)i%zl$*ScZ-zb6 zmjx%zM)@~T8_DNqEJkopw-I%1-mvwX;z?zF@0F*|e?6UPp}L~EOOv_w4fGzB7IYBo z%)UU3i0APd=+vsb{Am|x7v}C;WTvD8NQznxwPi&W<2UuZ9{29J z^?4MksC{KSuaeH+!w*s31rLp^7=gQc^Euvi>e3G{Kd`NAkQ6sc8uLkwwqrk>k6F{) zwhxUlIW#}FKAvs(?k!B+P9ndze7C8UEt|q2?>gdoga#A!CwVee5n;lOYljo zXck2X5}p~bmTkQseA|A|Y!`LTHKoJRtTvgP-Q8tQ;skCZV%g2K zexd=rXbF*jgwi)>MOCM7(X@qkOnI51Wvk zpR{l2_iJVt?HJ7Ar?!GpRme>z5`wfqcC@e}HIXANbX(1Qfv}EKWF4ZFQ|Zu?vcL>6 z>D+yfdodxY7nM{WTo>x%=jCAC%n79yP+Yt*O6Y7NRYs{clSrbG4K=h!QXtr`*ECZQGwX@;O?|4(f zBIie$$djMHo(*JTDB;KXO{=D@yf=n^cyj1My4m*)EKLV0TWzR?uhVb_>g*+Tmg?wf z*m~C5Qx=i77K?wL>r4MXj3@u6{rUeeEgHoITij??Lj4!>h2dcYSi;Y%h^?ThKU4V! zQm}0bhrRj(X(Fc{=H8{^+kl(O$=&e3!`tQ>@ZkNE-$L(`--Z9;>hemO%I!l6*me89 zi0(p-8zM7D<$Y{F;>Y4qC2N?J)guSA&*_<&rS^z8#v7*EN!_8vd1>Z)GMj-zo2mF+ z2Vw*HY{4sI%txeF;UO63k5 zTsFYO_1lT%dHi&_tCDVL^dRxg!u8Hj21(jo7@2cwpG0&aoku9ZPBcrbW_LfUO@!TS z%vialZmz8;VN7G4ufJj>vv3zKA0^%^np3-%-0c$vX9PQT{=uy9IEXEE9Ukd$;Fzrl z!HvZ&5n)4Y@W%}G0;|Y+P6?Bt=03__GBdsstVo3<>q0H;4VG3olr?)nSKpX-80F3%|F zmvD{SuxrDxNx24rmf4UslJ%S($~oJ z@CM!S+pVqFeLZ*ksf_sxGdE@7aZt7=#{uVZL_z+ymU}#p@|7jrC;nbLQ2}4e9;I z732&7K~aNPyxkg0>ixa&bp8U4VflId?F5`}PznnnMWcd?QmlJ6khZG@Z!r}NPsm26 z;;UP4bh~;jVltNbg}s_6$y_pv(e){^)6-#%Gd4Ankq)zH_+DAewTTA#$l#MB98HgOU_F?1W5TV268Sy+xy`}nW5+= zOMHM;(r%lGN~+g6uDC%@_w4!QGbSlUa>naf8E30#=wlBn(?t%z6}~6t1mnYe1Th4zT$l1r_Zad!k`bR#i{I{yFF{=T_FIgjS0pRgP3)tezprx! z9m`G2$QL|^0meqNYBZqT;zoXh1JDtI5G8cZg(J@9-A8dEY&~Rc2fW@HS58STln(TH9J#pj`F&1a9 z+R-fVWAk?ovqglR4cw+~F`a#lQG@;(b<;Zp_;gw%8rwiR~_qz`W515OEP&yOZ%^n8Qa}! zQk@@aL$i?|fF&~s2l`n5VFx1e$kV#K%qgXNg;9-hi<>XWzxwi<%J5q$jM~ZLX0+`` z@JKaJGl|P15B5UN4A9*k<2fmLQt#Q@ckahN>>mr&MN*PmyixSjpXxR>nmXxo(PX0n z`m}9>mL~x_wh%L+`1benxA2S?dT3st-+At!usmUf2+$FRcM-eWXN78#&AxB(kknI_ zoDxi3Gp~Kfr?DFN^{FiZa{M^#jx?kqUKi|_tv9q!0Y41+m3FWu0&|QSGxyw_pPfH5oz?|#QFC8?x)4t+`{dOAG$jM&4xL!( znq@@^cNg8y)I?PO`gJ5yz;9-#rg*KF=Dr$`X+BIOQRjz_F2q@R+W@RWmwiPm%8@>i(}f3`X<$9j$w^vEp$mvC>rOokv!o zKOu`8?CazE_L=)``#wLh)WIF((7yRu1X|X6=9O~drv<*Q3wi9>RNlX?pKRpIM8P72 zX*0mja6C2Nb7j4puKYj-H=1VA>?~ZqnicU%C|~q2Ttn>21^8?7C2+Z~OvNZ=?+~m6 z`t4^T_9^QPQ3dGo`9hgX`kONc(f*YC3VjdpxWK;f}7egzr zK<8}ET_usrX}0%o*tRbhihaL*aY~b;(mO(Z67)*VxqI#ggkrSp3K(_}Jp zk>NGv&|9gUke~`Me>Wvw+J8@c$R3Y4t(Llhs-9cTyu-`G8OoIS1sl#N(;mf8Nyi=r z`{ywW7qHj*9)N?zw+LwqNvW*vhqLxw+vuh5_B={~Xl2oS*MIJn*=xx&_bMBVu?fc$^8yJzU*^I^xWPPA$U}q` zpB~{~r>-P!`sl{xgQ~FIAyvqEEurT>-A)_}WoU!pZG0j!ZU!>9!njIBP3;4&{^&}5 z@&s}#!~L{jn)pECz*3W>qIS?gw=d0+EnI{;3Gzdn*UM}r-KOL)i#RS}wq~-3* z&L^jO9yjo0E<34&G8q_l`5KGHZThuLa^8Ud6jzMjRda(Uw=!7DG4zbDKQJ=da1~~I z3NQ*3xS?5q#ILwDW( zrz=1>leqw8_h{IP4Ph&McVB(=iARw4RDQP9^}fCkM)788;%`zQvUg6fnLV(&)dthS zxa~Mvb9grruKyz}ZoxyTuhh%j1g8&Qr3eeL%EyJ1bQwokh!I>fTAQ(++O-i^F@QuN4qxE zlw(g{kdZ}7pYN@3#)_Z8B_AGmxz^WbYXt{h@0A<=c*}tH^h;o&-T`HMZ9@@Oqs)N`eEgX^T!=ObE$t1g;Zcb@n1a0;oU4u?RF3k z@AIji2~66Vy2x=%b*ab3wYQuk+caSi zI-PGkjZa^r3Dk?TzH{6Ko~=fg%On`^2+jk;zHtB=1(Yj$9F=9GV^X67dpxL{pMtAl#v*I)V7SbF8T3{?q58yDnCe zttT}A&|}5?+FRi#eGCJcYaNS;Y(7oxcaR~ll@)d&^l_$_KCIegE1U_9hO1~F7}+Agr00D+yK=r!`6aqz zjueGj9UlI~PLv$_f~F%r-ggHg&iD|vSs%f`$ZfN+%17~%_-^6pF#sJJ-b*Oe`v`C9 z$dIHzLDTIH;=*+V+PgB2j5^FCvVlv&-c;00OxMQr*Hl$*j?azloDb+8f2_xSX{T+6>l5ZvEJm`jA^L8~`15j*)J`JmMN{bOac(?31lfW_5dXX%Kw*sUl4g7Z06Cm8-R7VpF5nA51{m=V2-aRsj^7FK%)o zv@`SzLmN^F)q0C6j*!FW3YgHvuwHeHWDBM) z!^qVw(&r3wzrRI2M2KEb%Q3|1-~A{-rnaH^VJY;00l3Gl{X@d=&$?`2hkZ?gSaJ00 z(OuXxD0=yC_K7fvQMpd@zv$~9sN~Q@{Pn-%6}PDzUOiT+cs0+O(VBSFISJDuOKSlJ zH!(fuRJ@6u($$p4Y64~h;6C)K&}wM-2K6Mu3vU!t`UJ`>U5RO`4%7XS>OCW5#j{fr zd{Kh6ymTr>svn)zV(pnjh;BzSkuTP$H_PCg6X$JjzPYN|h31|-$)_afd{g-o?Qh68 zhBm;FX~BZI7R%m%!2BHUAi8#&o0#OHJQ3e-Sy#iseaM`%Y$zBb$gO|DCo51&g34q~ zQX!7$5quJm7Z8_66x2fC9jPX4XpdmF-U5>)+t{ru&tI_g#k~oY^wwbO z1b9Bdp`!umc5}GW1#R-(tISSBgH?bwmtj#BlgHj;PL#1jN`T6f79S2{cp)MmB4K1N z)YDh8KT;}=n%kjNQICYN?5-HUiPM+sFLIOL zGKrZ;Za$}TOnyc4suSf?ad^dN17M<$R;XwN*}eF#7J4caVbT<}-8oB#ACL2XLw^r# z^L=w$MCoBlnYc0p>$eA3Xr>j!=N&D0OVs3U+jkFLp~h+=3@2MwJ!2>FAn9vEuj*Cu z+i5$-bdGa$p}bn*TFF1=l3SP_P_+Ko=Oo78GrFek<)`BZY=fwKs$2{Hy|>SI z=^qA)?%MlAlh3^iG`(te{?c%1jhCjnTt;?CgFZ*vBjd}EW9CwE!+Gqz@SiLDt)D2* zI4z07g!T4@v1i;ldzh!*1+19YDMn&iUEiNcvi#ag>u!bofQr=1B_zKbXIC&W`Qg;Od`0e`n7OcBeTj z<+8JfRE}e6Qm5ujxVnWGTm|dU`2o|!yZaT19=@DJ9o9wqjx8g`<-{j?25>_;< zjN94teg#{-xz4aW2afB%E=TbMXdA31x?M}F%-jY)Vp~__=5y+GoBTVKc0VVE$A%gv zw}6tqgXVKdJ)W1$!;4L7#F9+6IfdQYbwzicZ`xe{7@W@W2Ler(aaN&CV}2lsVhgNj zSPvad(+4% zNt(7B_TQ%elAQGL)J&Y2z=-j8wJ9B_Nwq$AZDa%!-AU13MFNmwozP+j6|gvw;;Xb* z2yn1CIalvlzPz)pu5ABB5)v7bCaZvDYV8VAqgh7%3*~pbW>lW@@<`LUch;*m29f3I z1rr?~ZLO^@er%F%EpOl}L_2E0WawO7!o|YYSJO)M|ERaXqq2P#;#9PhSvw z4&lA|b*|%0m@9?#uf`D|CO?nZ<8GgyRl%3NMROT3ou7R4erZ&~vYYu2MCW1x4cRH_ zQ-u}#ZSFU}?{Aw|kzhO8+Lk9?K1JKLl_7tivQXL7CUQyl9Nv06>b8x9R2cc}yg5f; z+=Ff}FBF}B9j>P>(eUi0qF|ay)td+W@C`|UWo3# zgwTk&3_b4|32Hgt{JqqCmK9GHx|_2E5({w-ml?akX;y<*jMU~QaGDyLv{d;82cK2s zbibW-LOi8|MRTR@sSta`dU)B{Z?7W8KKz!pDvW(!JQ8pfP|Qx8CaE}3N6^(vvsrI0 zRueIoKM9mSKP6_?UR>1+x%cJRuWmX*z14*^UEjZ2H|z&p4x$Tw%8=+qC~tSb7sfZ8 z^=`O4Zs9Sk*^{y)VeLdakYs6a*|G1@mpVgZkV@7YIEUj6L!L3A%T@Y|KHlD6cpzW4 zdfkDqTl;Lfps=~Q#ho06W!}w1lH)XT)}sKyMr6j#csj)G9Lat*y_r4LYqF4#_{^g> zi9Wz6$ax~lK$y*T&kbwz#a;qixIw|qMaD|pf9j7e)X7==YOOme**Uw^tzi&!`b8we z5(`Azwb7h`$leZ&x!$}Fv}$iKkE;1*%B&;OC2Rc1L~xmgvath%kM^VCtFlc{9#y zgyWfF)BBob?*f#RTluB2McCY#0XL!)&gYq~z$I|{*-Thr>1C~Rdms?U&xwWI$836sD)to51(za>UnjM1a znA?P(2<#f15&aP}k{cWxA9-Ce4JOT_H}Gl0MewQff>ak(1^EpLpx*E(U@FLnxBBvG zj`vqC%Xj{$>WZIsy@ki4tT_ihg%mDXCMnLu>bkl*p5%OXWV-+jL$gxF?>pTjz}vT( z17F=Z!DYWLAYVKE=?mYSIA;PxbDw*d7|^Z>i%^RXk-a!v>SwPobMcZ(q3!9WQ@}~) zL#Q+##8>M&13T#wne(qrEB<^Zip{6|UG=j&RVUm1O*O&;XI#Q>uPE@`S-7XV3o#5D zKzbp&VRfuvF3kI|n9N4}jhnH$__3z(zZgrZP+n>YFS!4K*m`dzt;z)_E#K|YFdsW= z17!sH9D(_YrdwQ-);nURnWS>{nB2X_Xwh)CUao{i+7Uq29Dq$pQBN2ELIRxC8_9v- zAASPoUY}J=^#f_{oeRwT4|gu{AI=(y(xe;PTPuxg*^Cb9CD<}tY@ zwX9}dueBcz$)-uLK&Edh3vl+xm4#bzuTfdRsfUiHABrFg=LOm|1bKt|!EDx_Gi9Dp zlA8N)ec;J0zBAJWhR!~j63Beyo|}Wb-0>(-1Ss+Dqkeamj4R>z2I!fp(+&$ z#j{gGn(V>1{h@{U8x+1^4=F;^BjphnziA8ROT~(g);88u)dtU;<4`DM%zas~^0l;Zq#qXzKAFs8 z)XF+s*qI(QsGZ(X5M@Hn|nw+y6T+p?it;q zB<^UPTUNlz^a@PqK9cJjRTMcyl{b-Zvc|GH1jO;&bMmy76xZi5lBs40nLf@dZ3qta zbvXVy`_E07Cp=Qwc9qq~V658_g2!xwpdOA%`4W6Iqf86ub`&?jH${Jj zAhl8Qeu^wfdDFx>&*%nXYaG{Rj?mV1B_Z;_QR5xZ7nw&R z0^Zl<%-b_uC9vgA47@J^?nOpANE5E);ZR*MLNuAj30t0+uXe#Wm|3~o#m;%Obcn>~AzsrACl z&uz?Eb}`In@b#wOb$3n96#aPbDMuq$<%3? zE)C|SR`l(rjJc^uI%xwCea*=ih}~0N<@EaGnC--Gl24(6kr;D zutD12-XZ@0lkhVS6E^KY--LtV&0tZ=WF<Ijhv-F^he_5^Jtq2|hEW%3x;d)f~tm*QHq5!9TCht5C^~3>|)Z=Hg z_hz|2CtJ+hba#QY*Vaod4-^@#5i<3J=x8Jd(YK>vbk8d7&O3tK)YfT~{C-rbzmb6O zP}e+Uns+a_69qx?1@2mTS#`{@X`h~#zwtuCVQrGT?8nlhYs|g;51gNm*O#{&eh2U! z+iJ##s=z zsCW-X>wddIDr(OQ)_x>kzFoaKX+g=}E*Yl0uu3A-y&6M-UH-sXP^MA&0?%VMk5QPGL4MGy(KdZa4D>{T#7gk!`&!7Z6&S>SB zUK{yt_M=MgR{LWgKJz(ah|Dij-V0Ve1zv<#KXT| zr=;(lY^C9o42ed`?%I89V#oNe%|{|?L@lghET@rTJmM5?h|3f@9pY(%X>wT*;y+c{ z2|+*!kEzUxIJ%M3!{Bf+wIL|RZnfJ^Oy5tDdMu>z7rq*Q;>+7VSu;oMgbuPe-+i zw=90$Lz7ql)B0B{{H`YzI!v|#(YgY&uiQ#K&d1!V|JV{H74D~Fx!`LVUCC|LCc*@A zAUFBdP|WTE@Ti&>{;3bt;xBAE-ge5kD|DZWb9w=s@GU^I4j^kO!g(qVedJe@Pi1=e z_h;~s&ZCa}8Y>C_p+<+jTa$Y%cHT_ToNzq;31(BkAB;QIcfB0fo6pnRvJ|&Vgy;R{ z3bX}FGOR|{p0oX_IvPje84sA)+f*2pFT+iDPC2_DE4R){VmKYU=1#i^Bg$5gvvSJ- z$2p#z!sj`P`qzeaANHT~rr^-+I(N)qU<7F6+t6$iXP8|NW*K&8=V> zz-B{zR3sOi-v`5!dhkijGkzZx*}V?~zJF9M!tH|j5KgeEes3}<3PO92sZkUd6&0Gg zIZT$n#t0v1MtgNEg-WH`+e466s8%)9A{FMId{7IcXvP8Y7zVXtf)oUOrZG?r!+Dc&kbk_cEt9yx>#F=zkZmi$MSECU;|H2*2Gfvv2WczJ|`IGqfVcQ z$#awm`vPHs$k;&o30CBIfo@9Bz#mAS0J#KgcS3(4JwsqsH|@<)*ydZTU1FtT=hJzN zXRcIT=y3#YvT;iYR($jqG|TCx%+}cx`0a?rCQIAQZ+jo)5^#@eG-7#u1(Q@qq{c#Z zjljRxNERX}c0wnHuKpmax;!rK#U)yiu2B1?mc_rdiubC|mO7oj07)rb$vf9a9hd{% zOE?lrZLI1hxRlnDF>3OBqOx>!vQFNc^%tg5m)1k;!KuOP z;Ms1A#$h6)#-3#z&p#7+f1D;(67$k&ChgAal_K35nnSP;(KA!tA>gFjAY}yse;=r@ z-iA8vw~3UM*F3OoTNC(9&KQAZf2g;NQ%f>9v<`EFstT2&ilJ2jfU()yLgG~Op@>B_ zD)Ir&ab|}O&zup#Dsm6b>7l+;ivQ^;fH?1KAYLNV&@G#aa=m3T_;L~-8Vh&sr1hK25p6+(+4CHxP>%O|11sMA#V zN?W&8eXew^tnFiI1Cps3U6=AMUqS4_ar+LAyb3w1H8fEMK+$s|)Gr_ZPyh23lOLt; z{iL14AD9SoJDx}@Wn=8A_y{m)Oe>lL7Y^km8eJsVT~{YMjqUaL@Tj-)-;k+emntkO zT!#S{&3Q$}Nuq@%phguD9Y)3D<}Nplyvgq4ilO0n$5Oy03X!H=f;a(xhUkqMyk{E< z3d81tlS!L@LFe2Pd4YqAF^hLyFfVu~)D(fgX>WUQivCUgLr!dR#}J^%wJK0y)8sS6 zB%5vQc8p@~%C&I^)v9xn;jOVEm2##s<9$fEY5LQO``;f-z59HZ*S2>vcb!z;X`j{N+W*s?y@tYZrR* z*uf)={eZ5%Rz@-)pT>75>k5AXlD<=c&Ywq{Z1gw=KHT|wDqgSWvDnR6ahb)BZWF!j zt_Uu$XqyoVqX_=ODTEny6knL|U~`z&I8kE-^XZf2F%yZMFlUyjq^=v7_t#XAoA|jX zKNN#|MuU3TyPvbytlPyQ}Xn=PT#BW-dv}-t%E2 zx*6f4Qhf^CZ7h@G3#L98Kd4!E#)O{+Myn_3X(4P3_tCe9A3UMR99;deQTAI{eV@0k zp}H{^IC|tQ`vg_vm zJZM8?!fjBk|CHXdT9u5u{Re_UsfUP|kliSLZmS_d(VJp~U1LfXmFvE)&bZ&erQ0KN zuHpRhAhJ7CrANYIw;|zJdvnNV9o_o57!>;+G_IL(3?VTe%>AQA*kPuud^0O<>(aE6 zXvr}e7B+oIQu-R&FK`wRY{Ta;u(nNx`7@1plJVG^@lt^cND=64>m)s|>?P%yXbWN4IXDS{D)iQ7}5mSo*$!^IU1T_!?eAFz&9sy`+NR zS4hsgq62$oD$@bUfI66o$!LM-YE9lD-d}z!Jxn$`HNj)Z9E3FJxVjQ*e}mc4EsxgR z5Ug4+Kso9sNEr|UHkAoBP^j&yBwrKkU0bi0G)(mjzEaNEP63UO+NF?wYM5Zn+4|JeMR528*CeCHZ#uc9DqR1t|Q4gI&BZ^?;m0!5_JTwhU(P~ z>=iHPy4qM5%QY*6!n>r1awNXuW6S;EI}cFKAPs)(9|tje_>-7_9K~~GQEXbbN+_SLsowO@($IXt@sZ=lF{Um$(E zvm(ao#>+BAH_V8b^gAQTiJ%n-dW+#%LayXWou9TTDBcN_B3-Srq*y}qLiuwP&e&Tb zXLIjRs{TQ7I-9tXUINyaS()_p>2J~s*6Pdl3F=@A%U%m@pyrsdTDM&^<#c6wt&Joq&sdNjjyX#mSrz`BUU%^~mq3_(WcZ zVe;7Ulc{rT`(LrgtrAjB$E07hkU`$4&6I*Et)fAOWi9rV3mDg<+;c=0+Qx`+vkSMf z{hLmmRBfN^>&K)NpAm&9S>-|Y;)h~U)h+GlDOx?^Iox*GQ(3Y;=Txsq#ap7!wOGGH z0mYi2T*qSu%I&et(mlemplHI@=|Y;un%Ri#onwUV)6LIH20R|`EMK<$Fz!h^5nx-K zI^FQ};YLf>)Plt7sNxDk;(dQ??bpMoU5U-3_!yP#6vOB>qw z5?atKMuvSu$`B>2@yW3pRyV&r3HGmRtaFvEXZQUQUdl+Pq(OH~^EyK$2;O1Pr9rq7 zC9r4#k3SGbs>pPE$*S+rSvbptdT-liuF6%rsw-7u2Nrh8n~o^g1Mq88E{V zXnLBPr$&_9Yhm_y|KJdyMFDRU^BoypFO4VFE{uWR?wWZr8uHur@929jY5hI%(c10j zB*hLd$%`jVx-7-B&%*h>hF{qIr6lXq4#%4*3X^RKWAXEIlWy~k2t6;QoLr&d_IN{n zz8hyxZ5T2(j$OEgIUx2z_Sq*NxAOpw7r2Ev&62fvOr+E|xow1^XNd<@(DYWgtuap7 z{@pRY9!r-yp*!QBx|j|<2U@U6D2}}$PUP1BT@KSv0KjZIS7OIh8qcm|J9xRWKWuQ! zHT_X=*Gwfm(11gq14!i`zKy)4ADZU}@#j7y5uB(@VP)=c&I+mobqIRuJ<(%cuqz<* z$|nzwaldH4xaUS0##!O$)A^~7(->)PK|)>;+UEtx$7O;1be z?rqEcYINlgN@dc=CO@0wZ*dFW)arn-YYWapc>^Ec%3z*Kw7OK=&=`1zvcaL%lhA3t z?PP~Oyk~TuB3m&u2TCOTRj*(@VrmiMA|;0^IZaj_SDYz88|EyTs%+e8?se*yyPSe3 zOwwbzAWDCoW)Ip44@XrWv?)f-o*|T1;@!Tac{H%0)!(RHCpxr$ePI|G;qmgzbO$}Z zsB=Q9B_K#C#-Z4#$_Tk1O$PXW+$W+JZlt*P!C29Io1X=Xy+SVUEiqblkk<1ezl`vN zUG*{&Ef8LbY+C6PX9(0TIODq_)t`Y#DfH$YN8?oOS1vu11*Sn^=30R(TiX#(Chc>0 zRoq}A7R_W`n-N`b*(ugJWXHHOUOVkPL95bLD(v|eS_lOBkWBY)(M9vyQSeHiL^3Za zgxI(cxmA>Biqo<8&S2IGjvd%JOzkT9%p%4h{WzB0yLLYjg0!T5Y1w%(H+i1A45R4U zj;|FUEJ@1LPTdPcd6GNg4zaOptr^;YpNogpHn)2gPG37nrIg%=TZ5f*;MR{wh??At zlHeB_p9GB**oNrW`E9ivmO_mD!viVKSo25NO-V96-B%^d2Mf_Q3(IxXmT5K4;n2 zA5=7@B^`0yH*AoKl+b1svds9C07^De8R&-1EgR0u<8)kuBFo}cQkHp74dfbk_mqX! z`%AwsMu4p=+`;o{bmy<;Gt_UnTIMm$qIKDVFKgCs?o64>-SrQ7YjVT1qtRFFmZg@J zWRbI|$V@(hX><$789*JzJSLti@d*$8+4*_n>U*v6y1AvQ)O!EEsUX29)r5q<3@l?5 z6Ztd|gD>f%3KM)nn5`WJ0m`6cWqIXJ59A&(x0w@JKZ+m1Y}ZA_ZcC2 zgMerFGJ9%_Y*I{y;#q7QlAUz(1H_3o9qCchlSUHCVF$Mk%C+{nh`sps1)m7=jWUER z(H7bsA~3lxt5q0%rKZw!wIi57O>XBK9oMQCaqqS{Jg}lf0%HmG#4$5mOZ;37Pj)j~ z`mLVF zIhD(1Vq0JJet}*Y0yzO8j}UJZO_7g*ub?xq<=R;Cz85C0R`91-_DyxIx#Ai9!E1=# zE26EWL7FZ`X!$u1Og4iIe)%rpXjH#Dg53yN6GfIe2PDnHek)_lCb{?Bv|ZTTvByq;yFDLKsRG zQUBHi_DTAuM3j11vF+M7kE7=2g?0n?r&1Lp5GEp95k(g+3^Gc55Dl>azZQsh=?1%( zQ{&VxQLt0vpkt-A*6L3#Yi-6mNr;9N=CUzd_|mhR1EVt8hT!M+bBK2r6r&rGlD)7# z%(W4d5jM4pD>s`y#Tj}mo%r~&*Y{S~XFUywOsFi_;*dPUO%HK7R_&9UFP>&37TcP* z1$ij=4=@*5-PEUZ=Ijwo>JWe&g8b2Xf}lCDun;2BY;5s(=5S2ZX#LU`%*&qMWc;~x zPx%56avKb36oRhZknHs)5Vj8^MzU2G+pg}e?Mq!7!KF6`mqd$}KK!thL?a~(k;ESJ zTm~PHu9rBDm+izHfM(idF$kAij)rPMlQ*_sD`>p974hZ*N_Z7}}DEqSgZ4HjU ziV{Sa;ZZT?eB$T06?7%Y#?Os&$MwQ@mi>m+;tLH#9JNeOz2KkXcy#4Et*iRC1dALoolw-h@j z)rR{SGQ}?!t4cns+juk7pY(bhO*W&%BYYOjr?%T*LUOfc#}t`XneSImOxs;keAS<1 z;1we#9rQ4vM{6&S4gKE%KFj|=_P-L(mLtEzs@)QFMTp+(=2c_0$(5AckefzHGl-HN zDX)!~4V6kIK8s#q22n}^^@Jz78nZEh1uikok_{uLSKReJ5z#NmD{d zu9)rCBla7;ke#9bg!{%crH)GH2}twkjm?Tc=Jxa1UP+axhaPekYs0<5pH5v%Q(sOL zu8zFWmK$iTF)YLw05Z z<%Vj9g9fpsTn^4cmJKL1_wsoYcu&Q8OTAmdSma(GFGu3kEm;k(5J@u5Qr2%P^LI{)8!(GbpWdXbdxJMfLOcWy_Ct~R^}_@d8`2OC}d z_9m_ku}`7#EVv7pnqOrTEe zelbyEKby!6oG5p@Jqgcnk4-!ryDrVQ1sl_dgtlmuo-Kf=uJS!Gd&fTM*UDmj-f+HT~B^<3L!&8Y0*gkx;Iu3-D5<0qyjqb z6w!MYDpCV=du|(LB9Y-vZjpTuSIS|YLf?P(!Y%6BLd)BKTY3M}BbFDbQe+u?MVAkg z87`TB^=!Gjs_}DEC&^*MG!^0AMyIo)LPDGgp}{BCrSn*T-wF}+S6X{F-+hedueA1r zt<8ju;a_R(-5a-jA+!bVHw+-)e*Ay-Lb{^{L8Ak@q4a+|hP&CYU0`s!1eoY%KGnaH zuEM4OsT$n2`lOF<|JCRFulWEi-e*3VoRitbPU1h8Oi_GlGeDdwpCY363yGf}`le}~ zk2@FmCFa;8pLaSvd!;aUY&Otu^)`o(y8PirR zTHRNlz1O{BE?=73_3Cw`T{TUHD9ess1u(S1f)7Wd9YE4T92+N}Coo?GWCWB=1L0iP z8&=GeX#EVW&7@&P1Md@l_ji0!FvY8<&>#O_+U%;DtZ<21U9&u6+FB*8`(;vA)!A)3 zEDK+wZk-ihnYj};`Zsfd%uhOt9EDaY6qVZ`PK+XS2n=5u(ia@g#FqC|eof##F)Uki zML0#AH||Euy|iLTx5blr5LyE75M(X`*7j%`goX#F_2<#<$uD&Z83@Fs%7)v%I_ABvbw3H&+tK_*2DmU3`X1|KAAWF zSvdb1dI|}>3~vn>6wB>1Qb_gI@bm~ibmKQ#42MXGdXvm?44nhC3s&K!WTQMgh9o_X z<ctQy~%KmV3-*6Zf(qWO~7l>m4CC)zE@l%FL^hP42%f@ zg~8`1!vc#@p6MrBon`?-T ze7WMnnq<85Z+IW6LSj!hZiURS@R=9rRQJGCiiq5BSPkfH)_}C1Wnf9rQc2MldE0P; zF!|6w7_HBL_Ox{4-*ZGf{6BlU{x|$Rkn;9Vc__P=sE>`Ps9$y!!$+%S`yr=#qe-ZF zASSX>9{Z0c3^%T&_F(fI*x3s4<7U8RTLOiD5rVObze0jeHFsAdR85zCb`}S2bm0QPRqwW+9o&6laJjnpx9K-pI#J zQ7-PdeX4J4$Q?FuOaEEj*yyoXtt^>#%|tT01YmCz{9|gO;Fr0B8L&Uft``ykK}&L^ z4ZhD<^AGc`a|n3{?Gds~0QA<-b0s3anL-yR%U3C}`ko&sNF`MRYod^+Ufjt?>+DDLz^QHS4@BGt*u3u-QjVjR zz$fVyarw7v`31CrRH6p(0ZGLAkhVaLlm2_P<_5K)of?MP2>>8rH2KmbdJl|nUyOr` z4$35-KWH5~QuCoj&H1Cq6&0iK;|L{uKzni>s_isRp1loBu_r*XlD)bh4CcD|id<_y z6;P-8+WW$qCjqq&{)a3Wbq=;4iCnOnbR@(pwGMY0YBwDWA52VzX!*l*yB$TWUfyoK zf*`+cn2?{(ojj<+8F25QHs6qFR?&6mV*v2$K}ucF1*%YR!rvjzu5`>zJ}un$oSgP3@u^nU4r^x;qz0uXpm=X2 zg#-g_DhI=5;%9ttYp#0cyu+LKrWGKnB7Wk1TAJLM%PhB)UpTY;{sLTeugEU?Q(kX^ z*frp%Ki4@#Gr3+B7{{os3}!{2vwzxF?#eBlz7ln}bx5NEG}8Y%G0kId`31&x10tt~ z1?{~~-mf#Vz7sJ}$Q(h^OITrZ8r&>ocdzvl^q-y z5}U$+A#vv)2#Izbdi12`ABMygGL%x(;+U=q>StCdAcwL1pjoSZRUG)-_`9Oo|H0my z$3wmM{o_+A?NixOm`;*PQd-FNsZ+KTr8Jh9lNL#mB+Bq9LRp%F&>|C(BqkvhnGh;T zLPCsv%P?aZGiK(~?=_vf>ppR=bIyHT_jP~o>;9cTJT#wtmU(|(@7L@3dcHRP4nvEF zm3>*V*mCrhI@?2d zz8@Jfsp%vN=JaFwgkaLJFbVB?(j_*;`TBQ)8}Ryo$9zTRszNdw2yUV=>Hd|!Vf8Fc{JPo;`m`PeIrijR&))Y(91M#HFrRHv#W}QSj<^0P$=y4O8jf#|zY}vX z?HaGSxy_DRzia#p7-~zKJsJdVZ;KhpVbUvNg70exhWd0!h_gEbDH02WmijHacEqUehx=+T(Hv~q>CXi`SQ(8h}?rI*pJ8Q(HSB%|C6viWmky%{o3-#3Ui>Yll zwpSe|9vN)+f1tGD*5t!Kh8T!1`FXYeDbk}P82C}t&zDw%%w!YdIW%7mX{$Jn_8xi$ zKxpSq@k{?1zZ@ohpVmxT@%R646G6u3B{Er07yCfUpa+nbD~Bd>ompFqF891&-dk~f zWZ#vB9m{`GpP|2ye&uA3-)o%8YWv5GEMo8GR>DWMEtQy>K$UwV^^?-Gp`AVE8l$>z zR`u2^9o}2NZu<*^il?#r9M0xA^Hatoyxymep=IxiHK2$4p)b5McWx6Zw@+VA=In=P zsiHS09xTf^|7d4kef8rWEd{5+N)AA@D|fI8`p6o1{{xMAU?Ik~Qx?2q1qxJA?lF2@ zDJEGH4Rfa{(Jea6@;aQ4SEqoGs97MFA1e1E&gg1O9tpAYF z5^CC1`?)SHdeKx!>>Urk3=0v=f~jQXE$oLMtuqhj%Z`!f8cJB2w>ufp{( zVFYz}xzL_;MQb)W|H8e%8Lye-2Om}0m02hnkdE!y_i#L~?^bbtZVQ(T# zFEAc&K#xWO$h^X#}K7F;)$jp0+ieC&;+Z7)m~3%UtWa*A)~7QRyHbh&!(R&v1* zRT7%QG%gihB`>^B-ol;1wkg_R-RFPIec{ce+r_KQC35c`Ii2FX`G>hWC#OxTFK(r- z5}wNy=kMiaG&z^mPi$5IyA$mtOvo*>+XYPx)FK)=boNO)His#A^1CjuU zEMMQFQ8F8dsGN^Nxs%(%Ei26q%$A=&=QY}5@y=Ht=RJWx-KUZU*`$-x+4_Rjb==DZ z&6%n8^QV10eqgK1`psf97C+US@?(@rL}tx%#-ov7o&8M|?h5fhcNBM|g6mi8+WqL~ zZtGKbZIxmzmtOL~l@{>)FI(bdmYk0ukzlC-kRL?ku$Or<>^l*yx;C4uYAkcJpJpzQ z8{C%OYiOl$ZGVXG$*aYN;z?*so-X(T2lEy{8on;_1-G?qlR^d`LGyKd79Alh01DJD z%+cjGNu`d98m@kkXL>#4_7tO0a@f|_ez%-2_)ZXSFF#LUZBNzUT;)P=?GhsDE>l9R z5f~qI*KEyxaxpbKrl>|$eH&>}z@lSqVRiJ)db;Gbz(74vrO z`pnmEaxc#whn9wu6gE$qr!q)45ne`*xzx(0?QUYc&~}maG_81V;w;r^`J%@%)329k z6L%rXLABlF$@xk0AE9nv>=SvP@ErY^nYcjx2^+bnx0tNVJ{ZM%KJqC1&7;pcdE2h= zt{SdfCF8jzz+lz`>7B^#l)>O8HGyPhaU+47d`F-rOvs(4?3Y!_dm&v{wKu~0+}p-| zcFs8g4GSNiP`>oYn-M)B#^>_838YKZ2Pl-%38o6O^;DcNoth?A2gU7Bie-_z&PlCY zX0pxGJ991D?jJvUpq*B)CD zHzJAQw1DP6Y_d>gPu7!J>jvt!h22`ZXu`_pCpUFGrE}d&dI7O-1;wB;3Nf`MHB{^! zon=a>t$)tIRp6C6*wxcOWCU#vXkA{}IQCBT1eK3Dg_>WDvX-h)H zNa6G;xf9q>-<;Ztm-LJ-(bEuhH@jEdoVt5^W!ut^JFrLEM}BISPauZII!-vLlm2$I zSuWC)D5LR+nl4i$^FTJH&bpYffzhzOcZQnl7u?EHHscO{%S$w0$p_YSCb79(QRu>n zit8NaL+;Emcd*+Irik1-@gVLXXB9V^KZh%3$M(oKQ|R$gWwmVp)mE^%Db-n~^l|!{ zmrJ5_FDwsVZ~tjU_9S0MPhHaYF30B~VoS6jLQMw!s4xiY|IEi7&7b`R*S3qB^#w<& z^T2{*&uNpOUpv#Gm5SpA79f)`N!w3Wo|Q$*D6Sq!8`PvX-aewrY&waY9?(4S%u{a!+Fo(#9T2`?tF zqf=gZVDdjbMS^yn7%jSxy{KeK47G9j-YwLt2PS31mw1e6v+qQ+TJx<(f`*@5pGDZ3 zeq&con2g)h_#c^BUXLW3JT#?vu;!CP0W-#l=& zwmwB>34Ue6^CfZ~%PeQt>aeflxH-*)LU7$!d+2jK14|mU*^g41%q}FK{2;BT@P4VA zWc{Y4nhExEn6ouLX{Dcj?O)KMO9=)jhz~{WDU*-VrFuiiB^~ybhstMaouWBF7`mVD?O9TI_R;zKf@54S?}VO5;U638o?Y30*ca&M zdFyU%TJhq<=8kiE$z|D2sX`&IVk(mglcWK{LGI+xC>+L6UNq3QZ))5hmN z&kUdC^Ak7NhMSa#=9^*5tORmd0WNY*GBOJe$`Fkf%$ORh84_(iaXK?(Re#Ltw~WZQ zQuIxbrjI=A451&SUuay!qK3HZ%9uaS`@qpi$LVAzkp1UhF z&F!wTC^(sBbcr z)Mt}ooR2jd_fD%HN%f>MP57FYFqm)p!Snkw*@t5jhjMu8G8-qyMab_;ui=_b?OOz%=>dSs2rlL1Rk2In=xiB%&EeZ&|w^s?E7EipZ2#=7n4G1fP}HjbJ`by~jQ8dso~ zFo$7NapC)D?g8E+7(@LtQ}l-P9E4!%I?NzYUM`FyJ+9`u@qwCt11j8w%Kwq>nld66 zh;8Mfd=!+r!(cS{;V=0zz;L78Q{hE(C{#on`6FjItE+JX*QHtQ(pj>!f*LC?xFu)4 z(qQwW4?|p+#h15BHKwe1Fw57#P;+TkCN$b$eF$?%bd;8Ux;NTkUcU38L(>Z<&AnR? zzZjaNpTNzRoY3(FH>MZA1sI#}3-zC^cV1dTdQUr5X8M}3tI}n^ms5)N<0wmqOI0bw zUC#sdnr}9ry69&om3$JOtir0<7QLMn2<1T2yC1$eelVkA<>Czg_7(RIE?;nWrNx}% z#V2l^h8r{fzyl`MpKZL9XUUSVwAq&b` zF6RVmS)Yxxi2gdzaN>Z8dS-EnL3+@8u^(V!MW}_xoGGSjfv$c-;k~5;>WZ_^y9eJ8 zIGA4za8Y4E3fgYSla!!ychH+fQ;`Kr(ILPLhw_C9SPGB@l+aj*QCfqrKWAX5C5!y0 zt2-yaw1Dr6kgd>h#lqDJ(yZ9PCg^Mz3vjM16?gfw%#ycZwS1@4K}R0IZt$}&~s zsiXeX;Z_8T+(9Dvj7fjR2ME?CaJ9E)Rb?cznT7c4SR{K=w?fB=A(wea?HuZ%eZ7hAj8fW~7vX0z7K?kojjMOv_|vpD=!`PL z?B~lnq0{Vef?bdDkgG;ti&O?gc?Og)Z&^nTC0wU@;caX$Ne-ZEs|8C)f_h$chcz|Dy3 zsfiVd{Lk`zu;Kb6?E1Qx95gx_JcNHG#F4dZ9wFre>wjLXs2o znodh}RJ|PaXq5b@)X8GnQH>%PbXsg54g z&7D*YYF90aG+G4JRn z|2$`U7-<99>uY-SdSK7bqS&SD^xY|ubndREP-5}kMzv>9c7CR%)HZ(B*;bSD{qc2E zZ6hKZA5S{(SG)t~z6bJ9$Pu6$S8>mPmrR}5SGYEP!WmiN z#rw3`o6=38ts|TT@&SX&wWsR$WCy$U)bE@SasI9L-c+bC5(hs3^z@Z;WDu8BYO_s9PD2Xl;yPkYq&ap@jSE>wfIv6RPO-!-x!Zcjv#d8gee%`qTW989v$&`n zek)cV3Mtx2-z&~%d+!~s+i{tmXxDxHW@KK%oyG!A&0Y`Jl@H1#l8K=e=V#u(?R$I9 z=0%W2NdIHEaEO+V%z}E&O0(F)0tJ~0^6~>ZdE^guOR2MB%#F|G)Y@B2et>(`iQA*F z65caKq35YjQj3fD5rxH7nSIA->z!B4NKW?_2oBcdOA&YxOge&Nc2=;mhvp57m?7nziVM?6p%BHE+D3c{TI& z=m)A)+YwXsA79>|(B$*uIXwvx^*UM3>~f=gwU2_&TfK!J$W|@S%->t~xu=vNuhT1? zrz1Dl%=VC(nsmf{Ml>mpk)snh)=l&(pyP8@n|8=%qO&}mj98rT`fN+by>qRfpETZh zdHsyHrk;J?N3+l8NpsbjLo{4HwF?&DFSO(ya=P+v=;g7flTW7a+nVw?B*p=T()pYO zS+nq&?OFI7kG>$%a!AEXz(?@BPUy6w0jyj1R9%Ld!RBM!@Be19CIKm)uiD9@Ju$h}0~ z?98eukRBa)66S7{sbcYTw!w^PTfH)T4cy+JLjVjoQ-T{2?l2_jbzTdus2x zTG+gB#>awvuN>qH#AEIFH?PW_!p(X@(8fNI%~-Rk?A^d3<;vfi%%75zWR|q8U5saM z^txY+|JrE(%Y2-#R;7*ui<(cEo7TGu`q%X1x$&K5L@8WP=7Rf$894h&<`q|OzI5wTDTn_Pe1+u#E=6#tKyQEYS zx6mplV$YN88nw2k*NZny58FH#k~pt9WsBMM7bmsF{Ja(} z0IfS9)be+K2xBv4Y2zHdf3FU*6aJabwj@oT8b-z>f~<}f451i)>{oKu-o@AGL@GJB zJiKJlad`{Mk89qwc@y-V4eodC*yC3%#|@HXuWMKl)J}S~swN_oC&en924Qa`*|C?)^v?ly{q$Vd5sKR+%z@&F4CU4rDp!8gt)eu3fgSR z{niAlTbVsM&>=Bb>Fkvi-e!`+e0A&)<}^Aux)eqrEgSjMSzmuZ1N=K(=buPlVs#3n z2C*HqLK0^~J=!hL^e)lgr}RomQR`>U&#rww5}qUu@Prp!3aw4UpbV5&rmQu~{>2a2ADy{N=m)s#9fFyu4dRER^`QR*47h}9FFeJ*x;4rUnS$irYPL^k^JLQx2A(l*QKrp-Yc~o7;b#^{F z-*Q-fo$-_2j*^CrxJXSUoB4{L=4i23ZCmk7ybsCeI82$fxvbfBO_u&=$5o$)^<=-l zvnW^O*AFt37c`icPNcVa5%weq=)n+zYJNiFan{LvT@L9?W+dRAtDdlEB!G;`AU>1H~aHZ*YB{v`~5ZM9!?>R-a}m)`xGf&EJu z{t|}2gyC=K?0-%e^yGv$=uOv1ta5Y#S-Tb5&RG%aVJ-y$Ao3*-j!bPXV?&)K^1(Zi z(^}RrVtTqYqzy=IsgTa)IWHS;YQEp=3&};d-a?ysN!_iGR^@OIXZ@4am9Dn5C1 zbHVcc*TimLESz~i$#?3KIZlJEE&YYinZoAL zUa-8IqHU^`RQm-N_EwBV(CX;8k%N%zfV1U%d&c<+=PWKuEOS6nD{x#S0$YW zl`NCc$ygO#LPzFK!zZ*r*GYcJwSF5v`-WFlkn-^R$hYBwt#*E;DJHJ87jPV7sOCYQ8*UcJHS|HVmw)QofwcGiYy3Vj#^2}N%`$n@(=ECTB zhlU{{jKrq;e0)k1cyT>?&vavcN3MRs(L+r*<@I)Gj(GGJ9Im2dFrf#ZYKb=eh*bsG zg&IPZBcxd9F(F&BRQI2vKgX70HwfPs_lXjP3c*k)>BSLPAi^R6hiaqnt&6|$- zoW)))cuwV1v19VjL!w9BW2CX{HShorYq}zRcKQ@yOPok`Hwrb}V$dTd?lS|Cq9yZy z98stzr3^C3u^ZRR(w zdkS+B>X=2jp)*+@&6b?}p-^$^M_=818t^?o;eBELcXSezhbpY7h_C5Cme%6isNHhV ze}eg&V!aUKlE!k=HE(@BvquCM^AsTk$+;X0I?B_)ra_!|v05n7tAxjbXg2ueTc%m> zI_+WT7kuI9fi8oiJ?|Os_~J(IPL<>9f}Pb&Gb^0~jgB>0MhxtnOo|U0xRG~%#N5~l zzM9ege%RqNetyv^T1g?aXf0tsx#8 zWIs;cpCDzb^FHFCVI9KC!{$RNt^q3=!$aw5J7MerPcwlwv6&(iCj*~(K8h}&7>+$) zt57}}s=bv)87U_lxe=&UnNkdp$O-k2jQ1!!{}eJ0wa-NF#e zgp7u^v&h&3y0J4bACo@3(>1jGuCOx)E#PxOU+6C^AaQlzl-4Wbjzq$sD<&xb8G=Tt z*c9t8xB*?l*aP1lSUl?uy|c<^uz}v}Eh?Uh4{cTxD8M;vU_r+oVNtoKX@!K*+1zaC zNvn$T6e!~kI6xv7HJtBP*FhTWLC*9s#xCbSvc85H`Dpno?|i148gH18JL1A6p|ETT zo+dEu5#Gp2%Gd+B%9wl#EWcl#>KEMeQW9pEFz!HY6TP!7uZi;{S};m!^vV{f(P7bc z^%{W4$cW-~j1_*S=EB+pu4^9+P3f zx-DxIWxKY+S#mTDVFLXKL(GveiEQ_j5HciBBIcxfEp*zUu{)r}z7oIk!5({`okbay z?a)7M%vVWh|ZOTP4#jJ9eZTB?3cxNt^L0v zEeYf0XBq?yj*52dt^Qz?r4B0=*7X~QIG$=ep4wyGv_oHTdI|rX>pbM8BO?$*e^U6r zqw#u&8cfTr9{BRja(qW$yW^$GGJTEO?24?6CwT?&OkKE3lM46xzX0tR6mFh$K({($ zneHWmgRDX=!McOq6zj9PYDm?igfL_M8584#*CTsb5m;h4X=%IRqe0mcd&`k{&!g&z zE3PbMtm;nlOA2}MW9D-EpTZ-1EA#I1k3y#4N)9+>|LfbL{ySwM{?W6}BoNS-!7T`X z3RHJ@-JwOGP1lft#oS7)U?&{7H(FZ4fR7X`KN}fcp3=tPE;R$pwhl1c6=Zb9vu#M9 zySKgPRSIwZ&Y|cw?>K0Q66N)u*vr%91-0I(DZ4;z!BM?=Y?l!51jw6{c&K9rR{HEl z>7b(3?#|27Tduh4NYseO1a+XzZDs^h{a{yfuhP_3Hb|C|o?*0NHzQ1p+8ISZy zp^iGJmOE-6#G{UTj^WRcnyI&etJYc*kVf9zw8M$-Ve}}+Y1(nUQr*1T>bbQW-drqc z51YNIMAvg&jFQyppim>@I`!>+N8*G0HjVe>xUD59YY)SpfG4}Q5M~_JBe1X31q9yx z)(XpJ(Em>JWmPT zOtY()@lP$R%$LVo&CxX9^wa!<6Aj#aolZaa*kwBE4{l?@IE~VrrzUV_k)D%g*?2;V zbqz;4%Of_` z;2a5oNCtz02$ZqK*mJCAFs}&nb93w9+31uHugUv#CV3?N<|ha7SxXy!zOjpVZ0Pl$ z{66%r>lXdrxhMZc3%!WDnQud0#vN|4q2jp`MQHugXITlPQWxP(50^)iPZ@YuZVmEr zob!|1V5OfOeP?BGhCx_(c&;r7=k1-pbP7AckikG%G^quo=5B6%=ER?2^d!e-Y8ohu z;_h1c>J0^#3)@5A8kNYOk$mr5J1tAek4f2^B@bdb*cSt(Z#-KB|B1??XHi<;W6em8b#gt~o~-9lD9r;e-2_9(vY zaUoZ!6=Ax}IjZ1L3wZxw|NcRp$8|(roBz)Jb16$sJ@1_O>~&&Mi9x91vR8Jin)N03 zTmiD^YePbHtG!cW4S%|OE@MrvxHWe-Hm!1oC$&ZS1q>m2d%H{{DyKeqZP}7EM6(9* z_rAtn!0kryWHZmCSuTkU+wpfk0@b&T2d72aD{{SCRX&j`kA@YP@x#Pm!>Cln^);)Q z&{qh|B&7OVS!^a4C9&SRW%0Qg^?#aHRhdz&pv|f&Y*1Gd6=PYQ8;jo3s8UT4*W&Jsh9WxDwyeTh^+yzD(XXXj$^I??QL?%O$uGrYx6$ESEV>GRT4V>r#wq{i`;t zR(Xb#eYfx z{i!=u^d<>tefWP^ol5Sqx-pJ>mEtkk2sRyLaC&PCF_;V-%yb&xkS|Wmd7oDc?Vw~L zJ9!d1c8AL!K5D36o2@KcY-SB$c@P~o1at~g*QeEGd6r@$N0HLk-sl7K9;HNOA^00m z76KJO(eUF6SO4yF<>)gz#?RizDT6*UA+#d7`ZWW5P(jLRWnf?%f^6#jV zbG2Jx$OPJpXQG*)A##``$AR3GYY@h_O4o3B-7sRHaUkYkr`B%Y+^|Hgp`M)0vDW~K z7=Xh66%-Nu+s6ydx7JhUmNb_LR?xXyb8XT*Nlbbj-4QFQx|1=;@unPxWW?yx4a|_v zg_pIib8leME+v%Fm3lyu6+lHgIJS3FiTHq!Lw50$^-NT?oT&0@a9^MM{>bJ-HnsIP zqUzl2YKAlTl*Sb*ERXSLCyYOoMqMzTkQgDd27%B{gFX{Zdj~ZD4pbT1$k| z1R&Fz&0ukgU_Hkj62?WtSzEUV4O{4=(p3WFjj$!2E{--I7J!cX#dSS(&x$^&s5e?K3fpVAX!XqJ8`cKs-FbTCL!tedXZ{vTv(uY9 zt|_>NuQ>UJ6@L3pZ%k^+C!s!s!M2qAX^Z$>@8bU*ZC_TCy+!eeiy)v2-C(V+=k0(b z;)CF12Dh5=r)_~lDWBDT--8}r2D8PhhcC6*BW?S)yeEZmz$>w{Vv2kdks2U`k{&4s z_Ui10a&c+)>F9f#IVrN(s}B8V5vLm#OUorsIys@K5^3J#IRiU~@HV=FfmYegxecCc z6bNtwI#ed*UR|^IBxQwJyymjq7h*nz`wZ%gV3OkJ=v@F9nK+jb6Izo9Mp8`VQKcPu&6hxt|z2qw>`0o5?(}Q&H1GE!1|Dz`;v6J%g zlO6T&H7=>^NGp2k3eLnwFn8ZfOHbeJ%hg=uPRl@>entnJeL(l6KLU+r)SJ>lzepc2 z%Qb_TQVvZhSv`~_n1_eT$QN|ZhELE&Q$Et=O}l;rV>dN?HBG_7S$0HT)Y$dJ*yg*$ z7Nw5hq|fw6F2!En+Y0J;o_8;E{Gf7ouf;53Ldd!2Ax*K~Wj+lxE&g;tEUlQzpXEdv zJ(+gh+Q27J-u+A^-3?Thzk9RUsPsA)flLu7Dp01=KNnDkRvuw+)q)|-B`qMR@Vf2W zSR>BHelF2Y68z=^(CI3lOcLxMu+nmYmH4qZ6y%3X=U(qRl5pL&FkJVt7baEXk91j9 z&{-rI=v;0nP@zpn-=!y@O?hhA(x(fSbwrEbLP zGw&9Ms;1uCT0~AZowjqfy0}8Yv&LLE&K-aj-bpZs-W(B1t#>xqQDq&&y;x)(88%wa zUju!11xsPUO^l%xmu5<(${WzlYS2>X0)w;M5g9dq}r z-f@ldYl`h=a(`$_waFiq8g2~kXxR60^SX!L_gtUUtzUV9AhsknlH)R_zLi|V`ivQm z1R%XKKx)TFJ$;h6VB6ce7@e@2XWUe}V3EEP#__^T_$tg5|EO&K^U|M$mG}@Iu(Ab` zHU(OC(c-noe&Um&r8(X?nU}5(yoh+E9APUwi9BQoZf6%FoMEW5c62(s`^u9DZ`m@t zn8i&k;Ui)EX~3TS(D-Yl`qhX}1|xo5WWXuH^luHt%f8^s2+(Y=1p8QHO5mo9cAd0` zxQ$wLhiA568GZu#PW%f_c@-iG=-twp`~|m(Acz4WZjJ2Pv)Aqe`UV>$9Wdh&Kb`6S zqd(upbP$yH#tddpFMf}#_)x#8YJZUK3#NXR>co&(Q=Giha1ebt zP?Us$vvdihcSbVkfY%@BdsBduCcK*8e9CWlKdj-JkD@h{`i(m3-x%~zgK6zH4fBHl%;IjeTV-EIG*_pdm~#hpj0(w~!OhM(PA^8HesLT1z3sGbL~{L}Q& zjaOpGnfsxx0ggMi8a=_E{vp=&}&^VbDL%=A5eMax-9-Q>l%-CmxpT#mt1*| z9foyvlRy6-&`W$S`?Z;J6ZM7c1n7x%bQU!+Gjb3Rp@gXpS%N+3YJ2XM6`kGAF4+3v zn?s8k*=vk*^P@jpl26#H?Y+ob8-Mx2MwWfpsB&a+$y1BoX-z+pNX#Am?y#B69OR(3O zRYP92&Ptx4iS?3d3n%W0+#S-o0>7Vhpk{oZv`G1*ipREOEni1Ai*NL)(CVY?Pu|R? zzHpq}HusakL(imLGo`9li_6O952sKaAXwRz$@K%3pSmrF5=a7@v_@o;NJ7c1cONe2 zMjS2eQ@^}m_VNZ{XM{JEa~ZT)WeZ-gB5Q?SIo}YiaAb>PQr3}Y%Os!W=BPcU-7C!D zmoMcbOnLCFlU@+G7DxAdf8Ox*Wt@TDyr$9n+VtE3mkg|eF=S2N3}#?&q9J$$NBT!q z^%encO);lg#Zz2XmG=NPMjAaFMc==SmJ&ShW1-6ORb`;__m%uK z#-vUdx4FnF&1SVZgKEykx?~ly`ez89IWLJ07V#G zbZ)hmycG3OvY?njOcW$tMhjO0*WuwsC85<8d=pNm9r>P~iUy(B}Z?5nz z3P8dIf8nyGnlOx7vNO`DuV=;TVT`7sV%JT}t&|BEt&&u8T8A|On!fV21)m5}BML4q zFh=@>@P3}ECv>Ef&Js>Lb<=_PBwz-89;v>r&>38f@N|Rp2;GCcqO)b79lkX7vlT3rQ$(&;e3%9CFvj=F-(p?5SEb9CV!Nlw1*g zsLFMZccMvSe=-X{?mA-+kkDPEtLTfvE%XjR9=;Xb&^~uR85?XOa1RmcIcQtNo4G~g7OFQA-*CPZ^@pL>Ydgzg18L!5358!ejM#AELhbo7j)2tzNcrNa9SF-!KOU-f)ttMxNHOW0tiK(US} z6k7?4DUTFTxQ3au(J5AtL7|2EAK66;@;*uz2v9GiQmyw`26i4yYSUZ@Fd*1mrV4gG z3gynGHXD9-jm%{UpDn)NI$QZ4(F2F5L$d&%<0w2?xC6zJG&B>o)N@$Y=}5D}dwQ3w z1B9EpF{wCxf;A1Ygb0m={Dtr|w~nIRN$AURp!&R^1KO=aR7@<13omafClcUFDIyRz zW2H`CHd>AHxEP1>8)qPX4XdONIHS^NuX#0n=(4q7Ig1c*s=>+dx>u=-mPO1hmo?F$_gwQ8 z?$F)ojtqdr!3byhQRt_s>Z2e~gWAQ(B|>iv>|A8K!}@#gN)xrrUCE_;dVvfc#?L${ zP&iby=K;58O$y#Yqf|3lCB>@tPAHzc=@fy-1`qN*%HD$(h~0ugfE3Ii0;q>H-ylIH z-waS4(g~SXCW3M%GOQ7Rc3EwKs?$pF%uG>ij=VTVMhDhW1r`L>xh`a|6{45m47Q}) zil;DjPywVLVUtCDR{kYty25;^Q!{4adkOY_F9*qhLbW%zX3HB7ukVq9hX$q70bYzV z%Gow5=SK)my=I=Y_kN;`x7-cVjBd99x}>L*Vkvja!b|y0j=Pfy4vq#fib`i6n3gT6 zu5{#S6W$F=?jrf1^S}8Q-+mMP=n`lfQ*Sl+F|Y2U-Ck9Zgcl5+wpj?mQFkJY+6^oD zxBvpTXKu&QoUu zq9Y*om7&!>+qry9U%Q_Va+@Gcw;H#Vg@ys}E^O4;bfFUAg~EFDmBkh;AKpXK3ScYs zrG^Zfe=TSeKVWeHKBVTaBVuVwix099v+bjbeGz0~)22zyQ5BQTkq|h0&z!+ez zNK&piw!s-j2q7!tKp}J{4d4A{e~G}i>Ea~=-Qfq-#Xa9qLq*YDuYP0 zz#V*tQW)UgMjwh8PJ={?$4JD)adFXqpUjJYZPtQ#D}xE%@N!kQhVUsk7}r;PP6-nx zBkf9g;3;R5glqL}-A{es>s@Rs6w=wN!7yL_{Eg>v z1xzFWJD7MA4HIvuLdAz^SHIw<*d;BYQwRnSEdY<^gUBTO`bUZz=K|_Kg`I>ITGwDP zDV#jIB%rvqVV`xdNu0nVs*F5k&UNT=t>Jc$#%91(Hh%Ssz8aTSYLnBH%TKD`Y+MCTd11fa~%UvS&s*Vv8) zU)>3uXR@y@@(C@5y-FfHYx!+IiGCBm-=5G-92n~v8eeehfUPF*57=s=Ews;ZuY$3e zd8BJ^V2{6X2UPsGwsxR6^3@So`K9Pbncbp=PxVP9wyOyw6frYmi-YjIJ0jd@UDH#2 zipY(Q*JRPfzWS-ZypR8?pSoV4C8BhiDg4QG@a_`R0-p4bbxFp*3vtD97+;0m&jAJ) zcti)Rkv>$W>Qg}c-7&eeK%|LA1&VHgh@z_pimuAZmWpS)q@j)w0zP-|-DYElN{xG76YiQ}2RXD+sq1n8)C;*i_+~{^!h|pg1yll{ zo@|I^~?3 zUlkG9a{H)MP(&wdn)?eN{)*d&)>NWh(@2j=nf;)>il}_aEEpyhMaTO9LU}o{70bH; zFmoGwZ9h+{Two+b3lQGQ4UU8&2-*^3XlT&P?QhS(kTeD?B@pp8c3#oMKgD(`R9LyJ12H^X}v&SWQ|%-{^SM} zGDs7VaZ#K*NPcwkE49{=3aV?xSljs(;rlED&GM>8;0VI^0gQdp3L%YFGy~=7LN8~| zf)Dhec3a6j*-<%nOv@+fK3wGO>8~e>jId`X6TaONz8RbhbA(nLuzAi9AFCe z8;K?)Rc8@ewswGjFx~L8i)iH0(i+wctVmTEOjrwTJ8iA-D!r`^lR!Tvf-luAXVD!Q zleg3v?+-UIAGZO3)R0t|h`2yNg{%;)6Q1|E6l5jIn(+#Ic6^ z=@B8#;H14?3HO(X!S~qkABY&Z!B+gVX&<)>SnQN{$4w%kzV|)iJ;_kPz*B&8@@?pZ^hF>$gTCI%l8A9QO zwCfCq3c0IjHnw!!JNarj{&xfE2|Ne>G9Z%m^K@V~owFAR2&;xs&BFf%)M539Fa|72 zv1gS6iVOHv?mQ@%Rbdr2d^2u(UP3y(?Y)i%XfDDi$pz@k?~L0%wLd1^8d?f-RLDd9h8;wf&8P3Azes!dY(w++g4@K@!HfK93Krc+{A~LhM=iX;W%I`j z_s$TG%z`OhKY+PXEQD7qfUj)+%Nb}rBAlN!S_bOp6I@A^3nJl45Nb9M+sY~Qw%vI^prE5`oQ7{;)8HAG z+OYbu7G%`*DXq%W1=D}{1-B5G&|QMftwcfO9;8uK1v__U`_ z2z3zZ3+nTPy*JLceJ?;>Xd)jig7+)MiJBd_tr{uSvbV_?MpOCNuNNbOtE zc?n-|BS3W3FZd-+@heB^6zI|*I!uCBLjMhbRzU(d*;*jL zSA!1P*ZEa5i?W9QOC!R@<>D;p%=<1zyTz)s*HB(MCYN5>L{y-J`##zM&MRF3!DSAl%AW&gMr zA~F@fg>A=@aw0!yELAr4kt@`iN~&XAzu-Kh4u2;n5p~jKhS|#C%vAJ?NBU1IUuQtoZ$vEUju0?^^phb2(oFaVNrlF7z2(907kO$ zLbdF)&tq*cvwt$u77Z9o$ zfZ^TllU36eEs*lr8pwIT+7qrGuNH9s+O^BLiP+$h>$hps5(TsYtrOw9iuboGAH25( zDmDxQcUeJT@aJyQkOUAy&o6wzEkK%ofEc8*0|eBjVe7d`)lLv_%N|)PRL5#dk*=jw zOq?*dZrObxFpN&oMyJ&Rbo&tvC47Gsy%ZAWO@9)|c?&n#4E#Dv6Otk0^x?7vTN2TC zrsZF76D0&kAeI>@ie=uUVw04)6~oqFaDz1xvJu!b!bw%^#3r$~|z(sV;r{g&9%kyB&yNHqzmZf)Y3g@$7sE2*I5CZz*8v zXxUG|;2{_T`uqE|U&K_fr-b&m65KUBY`ox}q5dIYq@VcxIA@CkV?c$<4I728i4Itr z17veYmYf#h9#iz}WD({O{hR@VgYv*U)RTna&nAAyoso7Fq66zeA-bU5D+Op#Id5?~ zV+8*NXSN%O>jBx@f%tt@2fv2X`HV~Sfz#CaXz#%f$k4N%ZROm;l)5AIEQcaqztVuc zi-gnEg^tI4K6T28bvYrJ1XCPC=>s#Vf*oYFxj&^_us;nYZ!?KJ8eb=vD?HPzHuvBY zmzgE_%f;^#5l4+ut)GK4&T4PC_HNfc$VgDG!wVoMq)is#UPqes-l3g*n6&T;e8-wM zkQ)h@uoEByOBM~|Bcttw$k5t2Fu=)R;te;8;5LhT1Q`@Uco<$;i%25)o8guA3;dKt z>7AUz2u3!8p*+z%@&C@VH@EilB>U-1Di&fb1pGRN;T;e!YFzc+-3)JAZ)3~L$SX^3 zfBaV7xGm(=6gp{qgj42+I3hm!wbZQ^USsHb4$+OvFUl~bTd!Jvqcb|RF^PO||`5ymRPhdSOYh|r9=a^%> z;~nqlElU97cG1CW@tp&o^m!6pgt|@5 zBJ-G}thbC4wNfc1?iHoVr(%{>f4EM1lCHvqU_a3QWq`v0ycgQ>%JZwz4L2`m2eJlM z4px*B%cha7I27NE?NOVP_cP;()$PeivT&9ct6<6oF0-ph62;eJxwasSq&lFOkz4N`|b zcXI6<*>OvdY?$fH%N_2pvKr4JVWKGB2?Sp#lY#VIkF*g)w^w&AG`koezw{&kC34aI z58dcP!GvH(7yzIOWK_BO*E}hl;JBFBK*S(sr4PWdy;<^rI z%RiFg#_aZn3H$F>*WTG8>QJAfa8GGR7VNW9e;k=bq>5k8v z6J88K19_*!N3*jNT{^7Kf4Y!mvaYv_^xyFL`_k~YB?93!g(Zn4*P$crRSA&jd(cDv zZGyVCeI800&+4T3fJ2~;W`BP z2Gi$YxUVQC*bgT{7mW!l`fFk!&;ECM;6HhRi_7m&C;v0&51TM!8lq_7VG)%wS;m^f z$cQYP`J|pPh2lakdFflW*>puL3a1&=Y7STE9S)y6b9Gl4bKkD-`Py-OpW72)EU5&9oznv_ru8 z%Y)%K!Qc*D#;miEJ%=^HoTsH4lzGOjz8EX&>gBqkZwg7#h*Q<~nn`N@N1d4fmblwL zNsadZyw%bg&r&EbYBW&8dx?~o<6lPO>>{1h9(is2CJ0T2E(NM|uvG(BLHqnvYttyC zDvc1jxy*G{AvLu!7Pr6Gw#XXH+Y7gLN8Di?s0v+b2bIQF_(2nVGDQ@Fe?)y`@#qTK z`5STw>v&3UW@%ld39+=&O8(TWCN!gW*>Q3u>gnY{>C*qetd;$21Q6Hp-#w^)XKVl1 zRsY#NsN&E-9Um%v}^ z%;-xfPUr43KCH*W+_dkGmgL+p4A8=leHR8{iG_LL@Q#$HD-V&qGuyUhYB^J)(Mzs=&fCH{3Cxb)R^tgp?`M`c*(}a zWd};6R?IG26m|^j011a5h7*VJsF+GS{8U4_;0KU{Bwtw`i12fDfl!Q{qZHO;dBktc ze3qtL*o!Rh)^j#Tc4xU-4kXGsrc|x0fYIo<+vx3gh7d7D_Ct0GiBYp?>@J|mbO|x> zdbZW#6M)O*nMix_#($qwpnt4(aOq2>120qhb5ivPjTuEgJ2cYB2xX_JF|tb-PNHr5 zu3S7_L&xey$8?)&|Eet3Mdxm2iqBCsZ>_F2ILX)GCt3egJ+KzqeIVx##NYn73wt&D z-7af?5}A8&brp63g&Tqv`%$^NK!h_v1_9IbZP=f8QLTT&-~Ye3ROU$g2Xg4b`xV;y zS~5*AZnx0jJRS^INQoRxzfhl=gqlc49Y!9Yb<=oBk0w_+NS>A-dXfFa!VlwPU4G)v zofVnU3S&C8aRKjZm;~txyU(3aUw6r{ymdUi{C?Oji1gd!Cp~FB(1|cUfm(JVxHeU@ zkcVSPWV^xR?1l=|Tt z*tzw8{>tcozwk-ov%+mkR|^zGCIINYZ59g$6!U$q7={vKJGo11i1}hLHEu_#Z+na8 z7FT>grzkN=`in3Aq?q##AozDF<_3g)2|+J!6URN*mz1QO%RW82N3AdTF}lw-SBmYi z4pUVSt6q?@U6RwbwjfSnnEeft?9I%Qr9|3v^^~?%TUK$7in!~pvlVds;Lu`MC z^|?8K8vcL*K?4O6@QxV(II+!vCiy^td=MYP8^lON6eSAo)oo3_jrroRTOzTGi5(2x z6*SP5dyK+scL7&g0@U^E>mB6vQm9-_XWR=u{Hhr6v+j*E1KO|C0D`MHn3_{`U&pguTP z_WKTySKsHV2k1TP%QVfc?Ei;7bn<`pHgWn_S`F*|lr!HAfwmU`4Dz0-2F~Z7e3h`T zXaZJxdnbQ%a4Q))!ZSbHC6ydU%)?noH{eb)Xwq9@@XK95w<~I#c6Ad1ZIe_18M&Ng_bu z5`&rnB=h?dgGk`>PeCvc9JhVX1hkUFAk`OW1uO)){#!5Y$`${u$$bN=*Ow&cRc4^*n*AJqozs?}tn97!m%b?px4xmK0rS;HP| zm&mEKilXWCPXgC0O!UOnjP=`MRrS7~5C&Yw1ABfmvi|@2_kS%<`)6|P56rq>`~JPc z<=3d$mU?xO=6M53 zj6{I(nl{L_{6;OFfq(+;F`ObTN0Ly9p}>e;VDT*tdPEgzTUU1LxrVBUOIvQX-voBA zo)^`J1N7}K+L--wvLP^crn_7`kMSKRz|pTh6?G~PX&04c%z7zFmU)>ce|Q1o2r};b z@kFcCYl{|5J+tvSVn`Pm8J_Mp70$bcvs4Xy>35;e4G#7d6X;~1;TNH5thKqqM+yT!3co`mGaml3K?%XY-q8UR$ zRMVFiL3;o|9`KIV4E}>Vg)Uz80wq}BNK3FCrM+>4{cL(|2564j;OxJ>H}r3AJ9j=B zI6T%L(c8N}H@+D-K#)zTqaYiEqNJcGaX>5Z-*2rC^_#Z`KY!8SH%~Vbm16~=GJxf- z7i?(oNPl~8nZLPduo}V!@}_7ga9k1CbgAcBP(Pr60t244=J+bFlt;p}%J6UyaYN8T!lQ{@Zf$%jEuAL;vas{aQnRIYqy`++SYq zudoFKoxj4CUvcPPgYRE)=>J4uAyM-pSqxRDg3GtD0GYDfW$3lyl*LwB=g6$vnGx^V zG-uw<13}AxLJ<19l23afLQ}Gq!AQ ziHsk-lxjMtE#U%l54ptne!i$9``IX4qUT2gVMZUdA397=^yq>Z^!&@}x<+6V$xX5P zr-wM3Ftars~sUXX9rTIWz=I04~2B9O+ccA zm(#7+m=G733cGhXiuNZ6g~Tr;4YG#+yTN+Yp-O;V?x&U9b4+=tJU*11tdZdyuN3sP zZb6~j@T4iy{+Wn&{hQlKDYMDWIWAl5-!1LWBy(@ED^Dk>TxeSe4B0QTc$z!=Rpt1U z-!cz=k$AUTk!mw37xfZ9ll1bAySuSXhnHy2UTNQE7L8@PyCc3KZOnV09)cLY`fL`4 z<*Jk?WYwfN$5is?0G1KS(|jG-8c4izxnEeVq(#Ki*t7_YQxWtzpABF~Km9{Z>!XjKi2SFRnb? zLib6{hVT~G&R;9U@-W=~S;lykuzMH|S3mfTZlAytC9I1Wnb&LAqS)fK`}U@?yvFH> z3+!=yL^?{=r)W?!i(eRDBKb{1NrrAd^8>>R@FH95?YoCZc{rOM5;{*G3)YhSC(~5_ zwW%JaX}M()04Hk@c&HVq`6S7WwsqLB0nOZRQjY8k$_k`G#r8Iu4}}FBi$X8TG_t-1 zx+Yr)$~2vTC`1j&SCT|FYW_Y%|78u%|L5`ozn_}_x%*~#PTIwjdmFLD{S}~4PR@Sn zKd~raEpU)6Nx*@I$B{6gex#)yLSx$gsR%%fn5+F4P2@4{)=$A=zKfD9$f}ku$Rp(# zlY~a~ETYpUg)vtqBitZf?0Vbt{TG5R7-|g2EjfTEGe(j{GWkQBw59PAgycwy-mBq- zk48CNN6Uh3&x)}(vn1Ld?l-#DEcFER9Mq$I!+sz>CtD+ql;iJ>P4tu)%5q-lxu<+) za$tr*?n_2&OUueb-%;K@mo4VtVU;K{7}2$WR*!$N^>CWd^}ub+ypNUd9>3~Ti}aJx zI&Q;{6)Yd{ER>w!yY$QBrC16lYfUV^%!&|Ei#;&*LrAShK1e9>wUOq0_Ph7v0(asY z#aNTfB-lPIkF0yZ?*m4&kALB$XB6z_q6X~Ik>j5;NvXPh$9rBSH(X7yv4>l~GZ4L` z-uA8ot=Nct@!ix&ro2)Y$3x)`#9^XW+}$YmaaEUU;>)4RG;a0S6jkxUq!3&7&<6Jq zuMscuT-|rc?n*BlS;nt^I6u8C(?09DJLH~Qp)X1urEv5l{OhD>T9HaiI1}ulP`(~g+ zX<(p~aqjAg_X-6A1jBdrTFfnTUkB%4%X=pj&MhPrCcA$Z(og#1@HtG%Q$atE!j2*mC&O@Rx}9j0`2D7i3Qa zD@h}7)I~^=l<<0Jq4bOSWZM_xAjO0HE=C+F3H|dHg|p!jztldZtj7R6^EAD3G|`qw4NY*@@SCk4pHv z%Vv#vALG#9d~oXM`5gX`WyWhM^?2uHX6JXb)*V!n-8qQt7(#}M4Rqu8Toox(Fy=_} zUeA?G^KKTHtY)S@z}^IMBK-rJ*9luwp%^~fv4szoLoS>xiyngPf-e$nw5^%BRVO>Y z9{vDnJIyo#KZx=~2SX2+G;~uyRl9M5d0dY~{-#7$Z%pUh{;@`YAYA}UjVpzWsKI^*3&XzQ`hCJ&$9ywFgn z1>BSnb4uVqwGyqP$I?H3&E3En{{|3E4J*^a@8=_m{aKawS?M^=iAL6nCS^bhG_9r% zu+{_|7$}?6y3u^5d)dNga5%QEmd#=8*#nYaVfrjMV-l_7!tJgsl}3KuJ298bU6MNv zp6=p^WOED*Z@x{!60<%JqCC1t6522D#|3ZBoyRkUJIUKVy4n^I%_%e1YU|M1s{F&| zjxEE3&$Z-mITC13um}xHIPDo$ezr9FrAd7;*3Cmc;jVx&CXuKtV(B((Cv&|puX1TqtKBqO*}^m`!}26M{Iv2ebDh#bCEr+7e?vlw;@~&1Ru}KE z|9azJ=stEe7`GOdKo7Ep_so_jd`TKwo?R=*XL~r)$IXTrV;1gI-b+21_lzR)QaA5~ zy%vwDiDi)kva~2_O`myPzCx5keM6BxAFkmK_|rUOO@#WOc=u*uRJnhjuy3^G2H7}w zGqw+xnEw3)*CGE}t=u^nNt&q{&Q5ZzLmna-;Vvm!*eki1z7(tYU@XltFW}pz!)Imp z?c~PktfL))JT+L**0dHKHhxfXnWq8C-c#u{DZ;FtRPwHNwz&5sKP3az-?YDuj-Jjj zzA=h!%j_XWh$ckw>-F%g+&kF;Yj|&sO;#WNY;^;++*G3;n$FCt73;UTEl3WcbzVf) z+iT#Rn^7Ty5S*b{Ur(L;@#Dw8vtmw+?&U92JY~T3a!O4===8PB4bbB@2~_g4fq0uW z5HRhd#3>>$eEhbMbH@xdwQH@+VHE{AKUeCX!APnl8&c#+Or8SiFhW5xuGPs*SxRkM z!LIGONPtb#5ewTK{zSu;CT~Ds*oSgm4IMA$&XP@?kQ4Z}7?5`Ei<{7id|9i@d%hHz zMy_PuX_pWg4~3_}@n(>rkP+fnLRTzJ4dL*F^seuAp<4=~D{|E{MJZlaGK8g(>uw!~ zQr#L8tIzbtVh?C_J^@7JB;V|}MU!D~1s*H~#OP^x)L`jCzCq`$FZ#|{d zq){w_t zrZW-`-itH??#@BfwDYd~h<$)3EyBwlJg2>9N$5#fBw#o5&^mxSh5f+U*lW|3*O}*A zZsIjv6Mrq_BHP`zR!k~8p{9kY`@0x0j8Bjq-?{pnimFIiS!RkJEjA_}E3lDqF_VTz zeMHjtiwZSNR^{Rb8{td{yRRdbc#EiFESF0*?Q-%FyPosM5DZ0iX_?nTOM+E}r&0)> z5#xB%u3+sw_}GHTafo8!p$-8l!KR%2dSYsBa?_DB9b3$5sxcCR!8L`CXvh2&0O`Du zd>^4+MY=CD@B%;8e1=s^SJ!3wx%nyedIgV!K-DvZ^AC%H8j~fno$$u z;lpE1v6HKj<@T7|5ihUnZXdpP(RV7hB>kA-f8zU|NdQFB5b#VysV|kxhQ^7GeVcL+ zyOM~gl95lvCh9B74M`oxD%vHt*oq8Cg!RoXL@X`MhyMxxE1cK%oh!fLaU0sK7C>ufHA||%tB!Tn~c4b!vqH7{t~rQ zGxO{BlakFH3cfjC_NCAlIu>?9NBls!jzjjmwLuKb8O3q{(p(ylc6Zce$NqKcg;wsK zvtPFha@dN>^$n6*_B*Re?+qHl-<@hqhO<$-4lG=14MGI0o~Na|QataQ!(kgm5)7MbAasQ^ zt&*gg!wM+gWLJb5e(d3s`k*|2Pm|u|=}$L`q~jMX;&SfNAJw^@w#Vz+;8z&_9BNqV zva1$~7(iHxb)AATwrg4=ROG7l%c9K_MtEAU-V#;40NE*}*ZTtG8<$1kIa`q0+xF<>kouSCS6q^C8>uRLEB4H(@OB^T<(rc<~V>Bxs-reqb zy6&*8z40aXIBz)ntGT#nvK(=i`e~WsMOG&9*YB2rrECFiF88G2vC?AqtzS18kc|U`|#YQs>B;%9K4BaG0k)g zWt)XOz_kc@itO+WsGpBVT>Rjg*kmuGO*GVASvhWb-WjdG*laH~gC@zOSLHW4;FIfT zcU&R+Z}X^p{Qj)3*i#dECR0@)K1w>wfNylntGRca6?PLo1e=7{Kg+;dgdz@1b}%nr zk6q0-*GO&TZQD+m>x#Y45zF*=(?P?}Hd@w!zR{gCt*l%fz9A~2drq_I{0Q(-+LWsW zk-%|i?f9M_T`DofS)|lS^6L2Ql(|T!`375SIVo!1kup8kAI#l1$Huj}wA>0_fh{6VEu9x1`5oXq_w zbI2qcZoBO+p1^467)Q*I{&q#2g;{li6CQ^-qRrs~Es~>fp>c=1>fa_C;r~JxDZ6w$ zNXgMfyNhef#O2wk z@Ja57DPuKFzSm<~i7SOcYy-$o))P>kV;!Q?t`|RS)B#6i!Z};YaFg8g!vNxj{yM(VXOvvACv; z!AZzk^LpI7S{B;%>C;fSH499e(Ubb8dI20JYwu~ZX zkeqO+m~!X37iJCPHwnboBFg0k-()dg4zh}`CW#zsxzyim>N*W4iTE|YFq>qmGvh$O z1`1Z>3N)dbkgSe7#>+)%s)o88rXv0rx+#_gCI*HsniinZI94A4if~#m;IMa`hUWCo zAZ4l9)x0Fm$=N7;y)8lI8^Sw(ze#u^ku^tiRcT0sx^ULP_QU(_XAT0BRpwYJFjcmV z(Y0nSx$(P+tONs^%vx)>zLya{NjO%qMBn?2dHuOO@Rhy5(`S%qc~p zP5*p$4fnl~m^8K{+ox#EWn_QEDUudm#kRXzhH|#5gdji@Nnab2%a_|^QqFh39ep;g zQJ4X7m-3nMu2+lr(&~uAL}75H3;xuk-=mV%{kL|o(TbRTw=viHSEYN5Q{j>W_iwC? z#uN60aEaKR#)!LA~; zFdj_^L2~QTN6^062g*6-5|bNv5S^pzGcfZpc2ZfxJ47Kc_h1IDT8iZS);|;{RD|C$ zgW(_l7%UeUKJdBsiTJl`f!=$zjla5?D?bjsRTzu91|^BBM8h0`77^_jDCyI*czYY^ zi#eW{HGJ!>`SGwB0~hgicHg+^x9omw@qUO*VJPA!>~71{AcsbRxGUFYK3$3+R*gn! z?wQrNFA_JDDc#0DeJS|ddjPtKi!Z;#-#D5umFhrSe$kGS$o2o4ujF4_@}tx0#(TWS zsqBPJgOYBz_H9^Ma&77%L?MUmc--*F`A^WDBN7GTK&~d_DFW2R*h%ROZW zBgKZWgyCUT>GP#;L9uT7{iK8Z{cv$q%Z}$i6jc2LIv5}a$GK_R^T|6*^<5D?Lqp?r z#9lj_*U5KZsY;@P9zND)<`a3`aAb#-s6t}OLokt0**5+m{`{8x9$|f@107W|eZp5v z#v8_rL>`|8ojBccG-bJc7K+~-FRm^Z@{wxhkIlLo?ICZvGGM)vP6Z-D(LucbCZs^W z&H5*iyLJ z(YkQE#Y6wEwpk=Vuv!%fp41RoplfP82!_L-G~Xh21zO@=CdbK)b^xVFE(Gq+9|Zaf z`$@XgHY_Q58Y$k4T0056t_QTA0&qbuxAFg?ni=%#e#>5DZ+`=#78c);6;&+gYMo#o zJ!yF`{(T&8WYfV_`N4!)Hiw?-sQ1{iercIPcQLDF4+pDPiR$Vn_gV%X(gC=L`|1#r zDYcNsUHdBN!?->{KjD)?N(IVq#>N~d1dM{KTLB6tRCiy=p0MZ_@Y z2{44i3JFqm!{;A4T$ibeNqHLO_TZrl;{du+FO2t;e&fFWx;q&R%b4MhL|xK#0=Y)} zXljG?K1>$rf%NMX2$!`@^Wn{p8M1HQ)W|;Xyx7Wk%7JMX3I%WQkfvuI{1%K0A+8y3 z@^ZfS+k$e&bROsV<3 z;?x0&GAlW8AP<;~o zP+I_%!w>pLn=zBZ=a#nAJwCEub1%r>*k5o%&Hl6;OOA->K2e4kXhs3n4LbtCEgkZA zZh(GtZ;sM5Vv#nm>v3Dj`tVuJi9@~&!Yj^V$RQ!{;vMf^x^*<^#9b&Ks`hj>lfU!1 z#({T7d!`((GKv`I7!@lkTn^)ZY`{{pp|miLx9(tBBNGGhzFCYG9K+xI;JvU~s^+K^5+X!Y{LfDBQr?^<%{3ro#Y5{!p|zT;IYwEZr5l##xBW1Y(3`VpBcuvW zZA9xpu~A^M6_Z_&w3GPRSk64ppqxc@1p7XRFuRUh`;3-t{pzZ0h^64*pm}ru1J#Gi z0Q?kzw>9+nyup|>Go`>O!uv(C$AHh_s8bUAdO}ICh_|-%JhTLOgLh?E>r%4np`=UL zgZWDPS5`fg-uTZc1kcYZ7|*mzTyd5*o%bxfMOVCxf1a!)A zp$YdYMJaCQ`vg|pddYtOqNZn*%(RE!r_R+0O+Q#6>|nLfJnVqBfPY}EwXNi^c5IgJ z+lCLq)JX&0Go9+(ZXC^@ZVZrhylw-ldBxhmEw>6SNJbRHvekNY&G`(u8>^Hk_8IVYVc7GACY^6d_3*rCRalf0bu7BD@Yi!WcqSIa#PotW}}j&2C4MHTUwRyZWM6XVD@ zo9QNP$HSCbR3 zqaALE?X`FIk$%_694iv@K)~5$u?S!tA$14^LPW!OxHer09u=n4b$HCimD5=Bh|mkA zsfKX2eNIu9*>%9J!qB45Q5L|LCs8e#_+%Qk1WM)Ei;P96l%zD{ z#>18it^-K~LCFEN>xt*ePloASWqP8+D?C_>#aniTRV$KS;5@_S{Wa>2Y&{bDSc!Fg z`gNnV(3-Awzxkw?d&b0+)}31n8%%ZBHR)S_q1&8;(1NARKO$5(YjI)Pq@&prEF zfAAKyj=+oh*D%XNlLeB>fiOe%EW-u}oiZ6>rl?)-W9P0W4iD^EGX zz6IBOC~HgSBlVO0XkU)l3ULo=^O_MfOiP|Lx@K2nl=Ho2Byp$HJI^Oh`8u0ZCgN)7 zpzk&bg*6}pB04UFbu~Lg5&w!1aTSkH3=NoT_<9>6pTzIW9(5LZf!<#KG_%|Fa8`~} z>*LYd(zG#HiD{G<&IF2kK)U|=`$)0H;M%b9n=jo(!}T2%K?^74Ba@NLnxV}A1$vKN zGI&jEu4e0F9BeK0AGG;)&+e*{6vr#>wTTC2l8-*co5_x*(Y?!n3IGXeCdx5vDH?!% z+_wR|$wj%EWHW#~3U%(Yw~AV%a5Sf@(+_4)Zx*JZJkUejF8O$CFnun*j$U9iw4AvuW++v}bRwE}9awYT_6ipT>V5i)(47dohI_ zRoWUu8o6NUL2t})lK5_uX0AolehL8SII9H~h0Loe4{7zq*NGli^go8ua*ou5VX>h9kgHGZaX3|ykO`ke@aU7Y0HiM zs$*$__1YkL#fT)Gn!-LzmnFSVm&YS3aL?vKn)qE~ZO*6nNWF@;^^jN0uBd6z^u4|a zKMTAE9oUU7lIwUxxn;Mj&n7gE(5-cm z?t+%vJ4cYhsEH=9JU>~CBLKwSfOwfCGbwloB>B6dG&OQY4P~X>mF)aXqgIqv$=UAo z*DDy%Z(sm&EEQiy;k^w@@mSD2##zv{Ii>2U;iG&PiwANxi-WImX2wit>gN;NUt z-@QgckxwD+PzzDFA#9YJq*dG^jKLqYuQ^}t9HD(R-Z9)k`hud^&{IBU7a?2jrmqjq zEzyktn=tHB#zA4ki|w@34L~^ei^d4CR^@$pxZbyayv)00l_5p7$hBIN5rn&Q8=iODU$xsV%7{@d6;|&RjZHC!*wt;*)yk3 zY}K<$J^WNnwsmJ7FUBY~N-^Jja>7ZE<*+fL!C7g?9lux;U{<$E?;luqgv*2*{qkFV z)^OBG+t;_aPCcQM{WjY1AW3v9<6!jv0UDR1v9RK|CQAyJXI>%<@CNlTPXD?Q9TSm@ zWf-$=*_CCn^kFlVc9&yk)Uz2G;?n!*cjonw*HQJgFjvrLMTu$*5apm7RU$+mDp2cj zfknZNXr96q3Q*{=cud$mW+&sZnK_N&v5ZvhN{?3T#Gv`1dype1LQl& zQ-zE_#yJpt`6P=5WmnWISMGQ#dz&H=gvT4x{WF%%M-!e4-f z3LOJbT}Y)~0SQ^xt7p?;W73oLskrpSyHO+I_{(Rqukcw;$LwLA7@QxYL;BFbmyG$x zgJFUzh)`HAwR`>*^H^!qen`PRTbQ8P{ZW$cG1 z1&3qX3rPQ_@}kS|b+D(HehLMZt7i6J8bTZ=<>&Y#XBX`Q?@L!|<$RBnEfAcMufw}L z#aX?+s^=(-dG6hMn9%hYH4(Fej8h!WsxymLp9|Z(us>SRhC_;>U%Q(r=?50i^nHGj{Bm6cA@PpX#m?oMcwG_$dS}g?VN=nO|Chi)`0b0*N3g20Xtc?=!10nsfzBvamLXStumE96eTi~|4yV@L1pQ>sb$d;#=JtTTI<<CZTLDDK|;vQbp;YBbSQKZtbay0*Bs4MJJ0iH1w+gcIx6B-h`^^qM^>9(J$#(N3M~LV#2OZ~4Y#CSP$}?IrrKTyeEtlaJGa1~lf(i4!_x zNK~c4ZPa{4tpxR$^;8LHgyy~(6FyasJyiFlvMD(r@T|@xt}q&K)g%B9FQF)Qo}?FVlB+2H$H8 zQX$JBzebUD{e>1UCz~8G&CMDON?QA_BF8Czq3ws6s&G0)IJ9*FqyLZFW8}}NPTuLw zm3Sjj@~#&-r=)(j*5O3=!;yyx-4(-QKecEH=$C&}7=#@~xj?s##v`%Iz^6K2WbxMG zIjsk_mVjkPjg--z-vCXnzWBFOcao`r=s6Lh5b$#2?xA+ba@6C1d2dk*efuYo*1w6h z{`&lTqtE}{GZN@(0Kz86(`7)!r#DWE3fO{@{?H=?zZwOkL`*cnI(6+4mddfY0>c}^ zww19Q02mknOt{y+S9Y-bQJ=xe!a+edkXC8>yeK^E3;8kV)>91A{@ZK*SN()Z0)a<$ zu+hSzsh9s7@qMX3`T_y@T=XDc!hQ*4!y92lwK3HEf!H7*=ll-rmb79(pz%(Q*sV5U-Lp(;VvvD9-TyzBT9<9r{J#PAhBHdgaF{Xu$Q_{Op zq#0z@#Y}4+}K79c!mDkH$xYP(A0fbfIAcqq}A4VXnYmK9zg2y15JsG;WRY^ z$|=9)H-Di!vr03&g{9oZ+Gl4j_(OO0Wr23^!aGFZF6tzr@Q&Co|Lm`p*hy#lYk&Iv z+Eveh43HC?!Y8*5Odl>4>n^v$gIWvJ`wQLFQ&hbQau~h=5 z2E6d{#Z671(|ZY&NE`|Zlm|uu5JDB;v&b``9QOAJ1Sm`T11@8+*v}PY#sD;w`+)E= zBh>y2>W4xhjV*Z_^8}DHe+Rq3evfY??riEoF9D{=S0xyQo@wwSbcZb$v>+6tIZw;| z0d+zD9?$_G(*Nm}S^tOL^3?YnvMh2)z4N(W)^~sB&f-z02?PKXQDO8D$SNXouQoW$B71`UFULoGirg}jZ zdOp3ek|*zEnw~V2SBVQQNrxzbM7`r`V507ofgiJ>d^vdn^5N zNGeVCYP2T8@zj~1f_CyT8cOa<04H~@87*s@MmGtIV~gwD1HXiXV@Qrd_Ti*89;5f zH?R+(C1rIew6$~8CS;F)A=$U9{z9|YyH@H8KTo3Ly~HPzx1Lm=Rlo;hl1H6!T|>qRzva0BgKJez^Aptt(@|k*UGufwh+p-oG!M6Xiu6Tz&nZ+0_7-G+ zS+wTfe3a)c=KTt&=00Lq?e3Y%%CZpbHq<1Px47*GdqUwc7k?3 zYEom$A!FVvDDy&v&}Ct+Mm+>vat&m+%{DrPCOmPHS6mO3{ScSuIZJQ`n zCr`SnF2An0Vz)ZumUP z+us|VSkelfF2WgzKCp{1e@zfAQg7wYcJfhUgPehQ!KHWEKNLH>SuOkkMF~ZyuMNnG z^-<|>Q05*s+xh09nH4lmeFc5`?vVaeTCmRH6)acvv0Iou)rS|;W&839_;v8h$r!Qp zL=HYiqhJovn|c%1=nMreFuCX<9u=Z$FKV;=u-}ItsD|73-IGBM6)acEWmz?Py=&xs z^7f`)a(m2^H#IH_mmL}G#+O>j9u#G2uH3z#Q+4QfF80Db37zF%Hy0htD%d^tJ6$oj z=frl!lwZ=ykN%S*Lj|D=Xl_8l#}8GWm_V^Fjt_m8AQ(q(1VV~Njfrn%4?*Nb9L=6R zSpP8i%3>~J88DSjFi{g>i<_{Ge5oZA8e0qrh26wPlp#+NN;3w^5$BJE42&|BS3L{4 z%6lm8b0_n3Fr8R*up!+>v{Wo9h@jlt3_l>P>FM`+!Tvzc%JPBjGQ!F5?}32`ZjLV; z7M8U-*jcw=`BFeA(RV0&iI(U$9*oF@%L!jwFiSg`81i9K?_k zh$|$k2}}%4gXA?C*uHw;J4xl73EX@@X@4d+hyJ2lyMUOZi{n#j!O25<@mGR7_np>` z_`lF_`T}~rBRHvf)q=>c>Gb`SOB1oCLFQUQoXRiX z@cP1s@iSK@#d9OaT^Mt(P2jz@$EF}7E3T^}Q#CZ@vTkh&OVW`pgLqcV-u7D#6_rnF z{X;H);M~3|ai6`6yKCjy=Ec*{g2V??t3tQnY?}PJ{<#f<<3r{966rrzR;)QDx5eCZ zkIOxE??*<;9C9DRxtFHpzX!_g0&3)O?N-i7qn4!j4tI~fm$f&dSN5bgYYsAz15vG@ zR1hQ+MX@^^Uzu_cGNd;+bgVXGucp()m>Jo*J|H=jXgvP0o$2{Z8v)iTnH!_&p>{FN z@s3VGK{d$8gxI25qbLfXH#CO!LUwWC9i2qsLFy~4TVXi-r)3zXWjiSvWgW% zydI&xB^o||ezrn^?o3M?vxw-SQ=w>WyfcLhi!+NvSY!c7odB~KZ9cmNk2p)SsUQI> zo6(OB^o~ZNEc>S0wfp(|=Jo|^e|S~tKun>&tCS;4{HPWhsC0VmS{5KwgaU1n-n`af z>$d7B3y4abF{qbpsc*NK6z@Fv^?NH_{}|Os;r+AquIk*8VUoxQ(wVu%Q4fw6#1LN4-YMrUGz`5 zWGC6b(emnExNd0mCDELcefLGghl903qPOS3ge>!{!$hK>lO#Qy-I=WhYqCsjf_c#c z(^Srym>%)8PS&&&_4;{P3_9B#& z%e;toYD&xxZ_b*uWO?%aG!*^eYM1+R~Of_t%I{25To(lGZ2eU{hk~ zy4+arcEa*zT%Y>}=7Xu_AFV(R+!A7G0f8M?0$~t zdGF^i~5BdN#V)&P%QET2R3166dZz^aB>ryz^_ui828utnA4)$~6 zg|*we9lm54qqez+y2plhn${I~LxoOTX2_|U^Lk`Vf1T0FcB5J>!j?-w^&E&p%d3hOmlU zwEPy*Anq>V)5|E82-(j`+=fmmlVS*)8$E>2i#@uhs>T#{9?RdXb-(+tI5S{ajVCtq zg2O%PTtO(d6HY$0*@lMbKJh+GWKL2I@7GXxIUJ$tEILX$BE!t;*KL{N81TBcqRSKl zm=nGRp_>~+6zfn%QUz$!SUVryU%nsl>L@+;)rEJnw$;(sJ{jB=wvqLAy?w1VF5m_9 zRuQ{7&aEqrnJP>eZ8=87JiHU3`NHr#?K#P;>1!b`k842#u&zWi7@ft0qPE`HLvpR} zl0E!0z&dg{W%;d$2&y87H%*MvfO&-S$cM8<06z^FVFylUCbX7^vcs`n4kN0#$I7Nnf zmK;N2MSd<+tR+7w>sc(|KK}%><7Lj$Jdx|+a}IjIK?D_ZIxSek@Vrk?ye%tvx13mZ#Y8en@MKG1Je{S1P(eC0ssca& z5yqobEp9xzQw=F*{jMHT@agAmwgazo;HNa0lcS{>@iMZXsOpppKvV<46a-Jy=CP@hX0%$^=}v{3gm>=`PY2kBbuc3G?NIo=(-=Jd6?t+8p2*xfaaQ`i zHs5LJRA;on#Ah`pRBn9$-?t_ms2zmn|Gar~VWLNe5qN=z@GU1Drag6!5;oFj90p;D z&rM~MG!}AC>@p>fV;z{OBdode&D9zN7e(@W{(lQN1gn>M5x|YkYBv2bid8j(oN)b*3%p5 zJkjTLW1qLUkb$qm#74Fx_Vl0$58Cr~W3-)?-U_KFz2*BH9#iRXP*%JGSDs!H!*K+X z?DnJaZlvqB|V~px#KZ<;5y#%9j4brbN5K;8j{T_OOw>0Ap zl(bM!eilSgCBK9};BqorJaPQ6O84}=@f8DZ*5EaXuO>{!cMg3G@y38OO|${klN?Ws zU$9P&PYs&hlxtyK& zpvbaI&s*4QTIxC_m~!8Q~itl9dLl??u=w^S6-im7p|>D zO6d_?XG^pjJspfQc+%Ujo-ab(w1RIO>MxS(7^9j|M3IwprAidf8m#6*j@YR)PjWk^ zbW4KVTq*>rIw!`@c#dE?H?`dfuNmx!3^yb`CC-In+B5CD_tP09 zTIwFdbGD=*{MWeOoLN)jQZn(a`>!-o6MwukwauH-Sbl%_l11M#XTHdx8vMq=%fmD!fi>FHpdTw=GUw*hu`si<{NW zt3I0h!ywXG+KffiCsJ?HFxut>`;J)_+Z`-ac(9IKw$}4=Iy<3nIuecEiMMIn_S2g^ zvS4mNu|a+V!b(_|w0Ab4QBGbhC$ai+LSh-qHA7vWB_B622ywB*^ecYmiM<8UO?!?# zD*@(Lqvxz(Q++cLyRnJeWFgyGLp%opHkM5UWJT(}6mzlrucUV9tmEgRe?x?tC5a&E z$S0;qa4Mk{7k`T=ZwNN^mA>L6Xiatuff$Lum*6mF4l;;npj<=Fa3DXS_yLbpezo0y zU2RrAo6FJ8(ZTj*VcfzA`QDp;3;H>lu0KYDFd9T=DsN0%E<$(*6W_u>$Oiba@}H_U zD*b4Vp3u{H4(&I>1BW?#hr46rKLsCZYn#&NfHsz4GVh>#W%CuL^{`-h@%jgLYs5K>f6T2oJl!c=iPjIjoKFpY2h z>_W9^K4~l^&jYXh;AU2DdP{HAqjaObR`x)I&3`CGGfHM3OIXex*BCNYZfL!7HZz=M zZ?F?fu<)JiX}|Y<*cU0|aqXK3a{E$!1GauC9gaF^&hLZtVU~(P=m2lX9eH{k$aHEg zK$$$uPuHPw)6;RIKgAK)qh$#>2foELmr(;Ep1KL&-}eHp?}ixxwE*u(QEp#s6%!d> zNgTV?GFs~`&)Ss#-G!bEmdqk+;=$ZG>RrFM} zEAM_*&T1bwvSIqlJ)>?zA{~?YIp&zO+;lXpbpFjZ#5ICXOc;*Uc!zcm*FNibe)#x< z=;h*TGV-?Xt>u`dO0SJ$6y(zVSV$MX1p9ba+QCucWd)TMnb!OLpW7vaGQFRe7LZU)Q3Fg?%>`!V(*nj5jEK&piXr) z;uQHeM72|lfmngYu{Pbcm8`aq;AeT@qMj}DCS&c~Ji*Uv1^zXcex!rY-;QR|hF64w zp5)`YibN#VgZ)(?trD8goA^;|sV-G{E9Eu$_OZ-8&DJa0#dH>B3RkGxJnH@iU7ZR` zpfbM>wOg6M=j2_F(;#fi`8#x)`j}Xma2h+rv9TW-wqg7w_8yyv5~M;2rus=X#EAsp z+OqxF)=U&sD48135R?^St9}|7d+2@IWc(r(C5UnzWQ{NzV=3v(bI4qK{w( zOu_xof@l((rjGfl`9>fi&zvm1iRWKLCByle`#0IYkF@7ZG3Tw!8b#udmTC4z^EiGg z7M?PYI{uUc+N=$b_)LPdr_CO3rhnwQby9en-e(_AMtuD?dHlA9#fmwL^lF2Kh+BE2 z=s$$K)}Ii(0L~6O&>|iRv-q;1&G|A^|7zZhkw^LWI@~F|i<1T6Fc%x4t%V@rBEq>H zPvwgF!`zc1vf_+@YcimR$#2h`>3?}d;v4C9eFH^jMmiuMc2?bznQ`}ln|~|w58eMh z_`<&hq%@FHqCqE$PZMM~QYq7B~Jgs`tNS2}f&D#n+3D@FVN{L=Su(80cQ^Sx&$RPx+K(Tf^A9?H{0Z@?q0$-NdkTWunGD1^D&L#VMspx>6V*5y z`^s)@-(bgXlvF={_3gV)@f!lJt{Nw>*PB5a`b2jc5C-%E$o&IY4%GZhbk!JkqX~yT z@|Emn)tt33UbnkDr5h8X)u)o5K?-WTa9g749YvE3d5h8A0xArqqf5i6z@{17getJb?)9@U^u=a-*(TWu zlQUh6aYl&dQi%YD#nOvAfC5@E(h5kfdN^O6DHD`)v3rP6>KhL>|K4q*qfI7CGF+TV zOg9*gU-nOz{e-$TmyBl9RgX9|8ian(a@3cRe0$I)uia6xrpZxTwKq;sOpmv^io?pL z8y9f#X);4PH1c^fD{c^9GVdFMfKmAg@s1<4KO&59UDaypIaXgjJi5KIV(R|j?eH6r zWC*Mk0ClvU0_sh$pS_MaOxDNQ)*LTen^kG4_s)eb*t=KUh^;JWxtrOYY9zQd870M> zehxx8lt|c|Yy$HAHz`+tSnrA*9eWvbW#l>cocn(B`Pr>0CI1zrI9HcnF1HzvaUJ1Z zLU3c}%i#>V!sOI-qxzs0!{ zTy1;@cB-_a{$A<*+Eaz$TuG57o}3STPqWHfN8CuizQy>;Rnq2wpV!y2e^X4ajPO`A zg95I65S)vgd#!$`=Cl&Ge@Nur>8ld3R}%9~f)eQviTenF&|EsJIDz~8sB2wS4H5P5 znUO1ho=+~YFAu}68SfGng7r*`L)3iE4-1-yav8lgcfB80I>2D4OB`R|Wu%qX3mXZ-m` zp|;F!ZJcjnT>Z@Kq(f)5UZC%>Oikw-ERF9L9)U&)(MQ92dJt3g3)qcs&)Qy5&k>V2naA@KpW02^gVhgtilX#_8ZY+Utq=gL&o;(~|RSaQyw= z%p|1KC&x7O=WWI1cd#}3uBawPnh-+eW1g2vq*}121++*JW|Xiwc@>qVopn#-czhJi z#LF~4ubhK0OIQyy7C!TOgKG1UQ+Vrh^FvibVV}Fa_Y#+poc98`??1*0LB6*@=+eIy zYLLeW)9rK^QL1f_&1g}r^26ChYucG%M9N zVirbD@QNg)b@MG5_`6}c=UleKW}f>M^`tcK^}}W%d_abwme;ABK5@M|@n_$PaFea! z_tofg-xo*E%qLdr%5L$nk{_ z0)`5qXX6RTfn1=0y#IPHFs|#K6f`cEnx(*x`YN^!blg5bA{0P0=1O*6NHUMw$EM~Q z)n;`~^pwT=KgMi;O8-hIl`3`_WldB8kCCy=cIj)@7pz} zlV-Xgi1w~6AT44jjnvOb9#G@H5A=t~xlr{92(ctOx z$`i}$sj{n7155x|fVKIB@vrsFl zZD{zybVKH84GvMs_e_=!YU+N_$7>;?l@GK)E*eIC0V9F>EN1jKWVq%Hs&;T=4>vf8 z!Ew`vQPs43y|N1fQppjgeO^qh@pb;pL`nLm`#(9b#V+aiwxW~q4Y=_iLN~7v+%{rW zbI|^h?}%az9>p^?4tx(@Ot3vzJ*Ga{sgPX+`8*gVZ!YmqlvK$B0UZJ51+NBNX5kEdm$T-}0d21?h` zQwF@`jkhK1sNc=BK{9IRt=1UA7-?X`h>QG>dxB|ZQrwGFuqEm_&#^y((YF!Ca!Okrkw)Ikv z@5$c-F!#kd4vQZXE3$4x_Mv!n73jk-G3{$a?yEHu+AzW?xmax6T~WKK!zVwWm-V$4 zOyBL@cHZVadP4g4VY4uhzTGq9@4`^{j)7+)suTs+FZaG#SsOPsC4k2-=!snEd}hJJ z#&ksk!Vtw2hWZEorzJ+_A85YwK#&8or>rAI@8Act{WI26mShTr71ef8R}ch%5IG=V zTENV_A)u0!K{N}v9R`@Bt z(1%Y8Hu|@!kGb}GFPkvrm4(iAs?AmUQhDsVfnE|_Dim>jl**6$pnWm{m&D3jAos$h z-uj>$OP_dX0&|$v!2?x7wUpcRiN<>TampF;%E&@URL~&VnrPo9->``0JSSJ)j5#Z1 ze5d%9U5MM%+nGC}@7CrS*L|%LS5yS7wnkkuVZI) z$tx|F$o$0J9^k`XM#a4%Y~s+ZADqmO{;;k-OCD^0<`zvbl{Fr|NIpF9E-L?#>I7%SO*n{n`vTpW!KsD74mo;o9cod+&^==gic2Fkes z>$aH$CSM2FH}hu#LzCM|0rjeOxq8E#x6s?sja(W_rLUgNbvKNz9|m?hE~Lr#eo# zQ`;xSk86HQuf^UOIPW_NH0q%Q^x`b(=DvcCRJZL9>8Z5r!!Z&Hd-FE6zE zFuj{UaITm62U93N9uF$X{zNx=E6BxsH+5WX9ZW^?(w;{!KtZo0)q(B=m91&={2 zh)>n~!n(a;XdkdkpLGHH;VTz{cEp_#{NF!RH>ugQWMF5@V61RA-^prFfP+Su6ot0$ z+6~bsNeMemTDGUB2bv3mz77T?`&E@`TJ-7|Nv&a`(1hzyfZs6hN?~2tjN5?<*m~LZ zph;oBOtozA6MfMp)8Uf+@A^7hZ<(zq6X%P}Nw<0s2T?izY3>yV)A&HK?@SniC>~tXKM}ux#5dVZy$hbGvRU zNzHE%o6!q9ha3cCS3SBgcxbgRfd+wG(4Hy9%tK6$3?(iuLR(>sUq5_C6B{FCC`J!w zI8M)c6|a;XjWr0<9pbz*Z~1?W#Yu$7)523TcCv}Eo1)Uhsgqncyt;ftcxmRlgS4C9 zh|;?&Md@sav^mZQS7J3OfhmnT_Sr6`*vKNUgclcgJ&$Dl~6D7@l1 zSWpCLV$q}r3j$`4#jR9o`PLgbAtRER>Jc`c^X+(ftrG#pTjS24by+M=r(#5AaY<(`SmK~gy_aF8DlDBV43Lx7S zQw7P#=jV=mAI~9une@m35SPru?X;Kk-7h_+#|pMO(THj9fYI}lc4 zed>{t?+Q;NF4o^-&eyiTpUicRZRn#+^ktSX^z{CuH{f3;!Xz;bkN-!of6Nt6WS_7qi>u-Rcwmb)FpAGh=MZoZ>) zN7hEG{F#a_>gSTzzg7^(MiK^pxyPF%MM+2v?cPFW#R){_m)+&hQJp z+xoIMZDK=xB+pZf^@X^TLA9+IY~0XhAWYrX7#|y<4?Ik}qraN$P2E&3_HtBwxz4#& zwYn^xeT*~N8noFmY&yJyiNSJhJeM1?4{dk9($LRQN4Wnq1lRZ&KL66rx9h8L2l`-( zNGKKr0UYvKeW?=`)v2EcO|uh(H*ar?9CLj86%zN=Iv^{fAJrzb*pM{8*;(@;<90Jb z;F44ZbKh`ep=hU{hLlb$B-CwVf=Jv!P4z5dJ89MnMyV}wMTL4}MBDae_|cykZ{|WTT9B;R zTim%$I_evt8#P@o?ezX|`7i;Jdvmo?od|9xvc5eL+6I4Ee*{IEW1o79oeb$(@)t}J zhp1#$C`AuXsO)eny>P^hZR_Wi zPMK&&%kvG#lOk#uKnx-E#tRVesoaQ;DfGjQ3YATc*{){}YgQrtHGz5o#9~w7H;}%* z<@*N@9Mg4Xb~#*=BjqN%y+ao4VdyBAv`bP*d31#!%RVavLd zh5=-}bt~);z?3Q>M6nx}fIS>Ks|xJMIWcvLG4dmV9qyUJGHp)m&KCbR`I916S^7S1 zf8tm)mszQ7Rzb3$2_#zslLja%^MxoF)=g1$bAw21-!jvct8o&X5bnPOdmL-UWp8pE zspq~3Qx2R6q-%sqfkdhs#bXUy!~Z~AG>FLG+M-aeX7?>F_h9C?A8YM=xmx<(O!(mo z=_yZZylb+Z$7g`mx}_`V=DMBO$+5*o&m%4pEGH&wFWf(G;K!DE{J2uEwy&U(ptz(lT+wn1H1MEb zVo_CnR3rOo*HgrXPslNq$XU< zlzj1Zr>?kU_r<)eK_T+~2nnR6qjN)dan_xpS^yaKK)Czffhn8yvyoZ-JONcxbv@b# z+TNBpzBcI)=U~u)?B|ofla3?|;`m$df+TYaCgR9~sFqbU?HB6-w(3jbIVul~mYYn_ zs`;L;e6w_TCV1!5rqS)zv(S+pOf2>QQkq=aZe`W)<{_W4kZq=NE~&;aQ@cpLzk0db z_)g)k6#$w2rn?HZL{ozPqEz$_>!vW7+i>)A=__~t$raq%XSW!(YdO=D976{CgnALC zlOKeQz;}2;HpLk}@hG7RlfxFFFmgL+UuwoF$Mtz?>|F!DwPdp2^Fx9+8}xdPiH-^#GTXd<=-s7dYwu+=5OO>4eGXa6EeU@C{nLio?c9SH7 zp=WgbDW;pE?x5N&D2AX~p-=J!{wn#RGku667J;tWY}}4$-mXV(xYfxDlVXV%kss#> zSHt1SNW~FV6FO|YMbzFL@2ECVVru)~7Nc+>!;K+v4))+rFI4wWNyV(_kAgZ6TM4C9Ugx$#LBfZ+T;jEMq!Iz!X(~YPN4*d|rrFe_ z7tgmEkoKf<>-{;_`FS<&HYXKj zp2xaTKE7XIsfz7?cGU=j?4H#m_Y*NV^G@hPx^n%vwk)B2mv5GfP=`6FXv}lws8*St z^sx)N+ukSLy62z8AE`i&%yGe8U{g{(LlNHb_8^t_E!a7H0kq!ByZ6z%*w7`tq|Qo;z@;yquSR99XQy7U$>?#OMWB{l{6?_xODWC z2}g{C@qzjrN_JFt7)=b*hnm9T)Y8BP%LzJs{D!bZ90tP*$4q*@i40qu+^KeBN8dS; zq#GvYGv-|-hF<$3?Y4lj`kP8MBlADirEjVFZ@?c{(%!pr|cQN=%fnSu`LDy*}1@MYSmc zqQye9#`Iyo^3$*8|5@!bAha*-zel?cblI?EiSid#u$tALes_Ksj+|80j~W)?#~7sw zqp_BPz{xV$2hI2L<){jaR&ieA zd&%zgkSWKU&Fpm6nB=YgiJueSSMi)gC+Ph-psu#~YD#BH{>kaC#)}Jl_L*vWf>JN{ z>{PxZclC!`T!Ecxh!9M|!8e)|5>Mluvup{DI8V-jwAskat0x*#B2#Z=G@{C2`rG{Y z7|OLdY6LAwmmyT&FwacCt3|U=qt-?|!*Qi>Aht5~e2>iS2AAx>$wT(catuFV1S5r* z0C&{kW+S4#Ri4)eukQLyHf6Kp0p8wAL2eSsxnZ9u2~sCiv_;3Zp&K1QbyKW`R*gJK zbt5le-txy0mkF&6?+W*&zxG{Kw>TJZvnX1+`1}lHQqYwR?$p8&akiRlK&WmhDLKrnpB~kwN3dc3nXtFVX99&HZu@ zzi)=FwND~2D zqjpe9Dc?aU-S`+m50eF-f8?Y-bQd}`y$HJ>Ay}rm+wtM*?5f33NruY|(cbYvt%Pyn zLNfp9CO?O4!Ou*{ho}yz2o3R(N DRR_1WZ^~3lyEnam3)&B(BIxT z7gyXAz1bz?6XWdRD`(;GR?^DGkK%em@cRAq2N_#WArS+>hQxd+A{yaY3HpYOVdAx; zH`OwmC77>g{#3d3BATI1qZfXWc7|+A$ZCf%MTomCV3@wE-DA(XlNhaK(;8+huq}!D zApSI9!$fBQ>4*A9HNsJby+4-f9@+ObS*i=o3ZfVoTZh1?3hP%n1RA!nKbTe zzQcaf>dveF3k{kAG$;Y6WJ7TZ9g+M!Las~B=f+faMx7fTZ`j~mWzww|aX|e68E56HQUen+@vE<_7B6bAR2l#04+P8mDF!Ol` zUR~I?Pb4w9;UMLO0o}<(#eNZ?gp6f2;bKf~89FyxONoa%%CHV2T&4*E709`1b&!mL zzS^5}9r2ZjH?$R&5PlGP%ON9c+3v(M3U-}8*#$b9w--)7Qb~xQiqJB&MScBQ^km^lrW~xOsZUC3z!bAU~ScvM08~ zFiii5uG;NFiPA^Umf}T;E8R!AZ8*=mBSllM4BW9(X32d0Njz1<^z|#rCrk2QZ{{pu z=ifJ;UZQi6P2Y$ct5EFmhU2aLSXM0W}@&j1y+=Na1eF}Uw)B`gn_bVy1 zRupD^;Wmd^eFS%GU(QuwrprndM$G99vsBsfJ5&P#wr#(z0OSDN=Dw3ctY%I6!psjS zdC&VtE4E^oz|+WEAHT=HVM-Ln2Y7q+;^pS`#+Jh0`!Rnxd9vXVZ~;E5 zBWLu+NDVEs!1A z)nQDwxv5^&J}?($IV~W#I-$BhfB;Ln%>;X^z?AfcpqBMERPP!1$Muf$h$_3wIF->G zktOr9oiZ9Sf`Tm$OI*+D6_Uuxq;6VOvtY?IS>1_N5Tw8ZXfn>VX-;P&r`pbNMu~Y` zyQ`$BUH#(W2SbLHC=iU3x^sVo@~M{;?YY`=DlT6HnFh5_s6gwWoWl0=Mn9awnTPyjl@f-6EVYPxzV5 zQ#RC_lOZdf%zONho@ffh!+I43O74Ivm_Zm>M8qLELuI?{m^=E9hFnOx+CaQrk$I># z`2Bmcmj))MJd^YRkZlD z@TvL0bFgDnuq{QqQROH2CUCG$sL&wgoL63!Q4p?2}Tyv)QejNI9YJUMXCYMhTlg5zAw^B3+1%18&Wx>OTTxEKfBB-HI2 ziBcOKL_11*wQ;+Nwr2SyyIIE~4t4z9M@u4dL!6&k@8_Z7;PWNGhljmwTkxGiEjT?} zFgv$ogcD^yP11sGCgbqHtvxawILdqsNWD@%-%)c4PeCrR_SPY*ya8 z1xLfhHZfk}i}bKB-!F76+0Aq`u6?`C@k}(+;UtSpW6BHpDcUW9`9>`vZDIETf5%z% z4h*}|i^sZyN)A6N9Zc>v1!rkzp7Jm`813*hCN-NN)q~x;l-q&WUo8dIk|%F|osEKn zV!a!Ny^R7EY}?HUgLwqhzPvF0jWEUE^S;Hgy1Y(L6E2zLgNDXq)>UPLC$J8(TNrvE zhK~AfgBiQ{7qB_?J(w%6$VA;^^e=z1Sj?VCiL~|>mI8dOPKZUJYqS8B68F$VTyvW8 zxHuzM!=Wz#)%2^hV^f&^agMcF+-m`IG<0IY5dET!rPW#J=hMwIjKU?s={3n={fZJx zD=lEXgMeLGe;C}iwAlT*)S6}V!cVMkY1=E~2{RTGo)9z9FY<5S=8K1ZO}CELWY(a- z>7xD=MbwZIs7%pc{prxe09w!Za#K}k1#q9uSTqNciJaAOH8jL7lhr%igpgD0L$XkK zT6sCvdudYWBe(d$8$2d_zajbQ@bjp@g8)q6b~k=ej?xF{s)X?*9F}>;gDx?aIO9>5 zQ@=kj(!h1_eDLK~S2E;R8|L}n;RhgO(7MtZw8GIM&b7Zs%7hMHO6%J1Kn!@4+IS|c z*#024f3rGqvG>!bf!u@aXBhg`Yr&JbjCDgn)?rm?iOuHBaWw<7ole3WPBxxueEuEM zhe8)KPs%X3xMZ|k^%M3A;`RDRhUF#*eTuPva05I5e$fr3$;3q|q2?cpNi-7`HcSY= zHeY7q!WF}ARrOI4615s~BuOm!9MB8A#LKbs;fU#NItyy?VY9=wW2kJ~$^v_#?bylM zswdvh>#f#>`n6M(*^eBRSIf#EL8)Bv^h;T7bHDb=K&i&1b_>v;XSd*SFv8WeWF*^| z@J96-vu#ziZJVuBrtI<)wN=5R2TV_%TY?zzFzh?yU|@ByYg;dPp^vV$FcE&u#`|-| zZlvaO6`!$*?ECi>I)=l4HKtco&<$u237ZLxLy_$CVG z!1p2QA#xyq5-7gG5fKipzagDjyxT_@j$Aun)PLh+^kK+0Iu*;JiXoq@6k2UJpOREr z*RRb7`%j`2zcqjU<4TvhR5imHPU`_hgZk5yu^9#$FD<)8!4ocqbsJ;~-LnB^Y{qh#z#9)WylB=J9hDiudWQaCp6E|yc15u zuf+Ab)Xj7hIqa1y=tnbjMaFx19Zh+wHvXl@*O%omE>4o=VbKkl`>hZ*i1YzF>F599 z5C6Mg{Quf-4FuTfIfKj}0AS5846l0&$C!5bs~>cu>;*ip;Lo4CgCgGVWHhyS`=6d# zfL^X8PHa#Jt2kf0GUet+fSt#oqkUUoWZ!lYqd5T+uli^jU<}ni_x8RG-W%?99J{iHQ7fi9G8HFjOyy@hOKuvibWvccz`W5w=vY8?!8xf$gq}E5wYD2h0n`mW zJyE`mxpE*Es}5C@OOG+z{9YNI`SI=+}=0@Bh?ZIh)7Z$@UM;I zEheKC$hjc@)yx(Bt$tY<>L>F)X2iumpY>m*h}cj9p>`cTx}4)w_1E35!@rUQch=vJ zM%f8EyU(yi>#mE9?R})2K()z6xDxb9316Ch7t=V%fe6B8{DZ9@b^8(9&6}ER$J|nl z4-{R+%kzF@e-j<1y6ke%Z7tEdMYbK7sod)% z5m5C>3G763BTpgpUQ;g-S&aqx3>wsuB;rh7gkcjs>~m!Nn1g5h7)Z3gn$@rZkbJlJ z4Vm1;S@!fmWm<+}wJerd59tELOyPZ@#Ua)ZF zVr6tYAF|{`iiOE;d*Z}DF=;?fAXdd(*nQ-O*omPXTDS>X%sZ~sGxv;1YJRcbb^3`~ z_af*iTJfFoTIl2=&Mo$i&)f3NpUvVZXD{EN;Pj~5bvcLCwUZCL(PR-!Ri16R2S)Bn z)fq?K_Kk8_4PotKBIy?vU@xR<>#E@Mhi^t$qe`2%_yA~%w0xn-f@ zm&Py~3YfbTuQ&aN+-eJLVzedW1YYSVDegh_Y)L36RT+52ZUDbHQVvie%b8y0@{|0% zA(Ul}Mcvt$FV6gUg`ZP%6Td(Oy0qAQ%^_(ja672BoQ5i#-TXjM$7c{syHFf(U*88P zuDb#%(}Rb|nkuJ)XnMbrG}sF>JTqsMwF53z=%pICPL&4xDf!86P7jKZL(J)4D3_Z` z^=N33K*zmYY3&DDKb5z);uZzhGg6-ySCdf-1#k*{V~V}Nmxh+hvI`6DZPCZho$3)J z?TE!Np+Wh8?FX1JfNb&Q?mB8?&n0i9zs_Jwl)iUaW3awxM*z{XMMYuAzB(!(P+kTN zw)YgM!cMcnE9?E6RDNRVJSMRgMLuO6+ahPz@e&)=taJ21UR~LK*VEe{wO>Kp5=yvk zCY3py`W(8ZyxX;!8leSlm3w5V<46&GFjcdH+W$!GyveTM-k!dH@~_?=3yVoAobVY< zLzBFjFW1utb4<#$n_Ljb=QbZLCR6S~^`ZxXD-(AYu%W=cc0U`0B}G)3c`Q$#*kk_< zi6|9uoBnc-4fgNDhLSHM20;xN_-?iMRUe<`7Qj}Uz#eS2E(PgIOpEeN+sh6Du8qrn z;O4KSGu|0HoL<7(ClM-#o^HUTbdWy|(kNkzXm)UyreGcucfZ4{7gN2kO3TdYnUUQ{ z%ss1ELg#cfl=t}{DugG;YNhc3=P4K%2ya-W#(-$1#MDMX*SjXQ&0%U+3Sx7*z-X-BrW?`OW3 z`sd@_Mb}|Bei3#_@5uhTv2Q`1lkoGyLY+ND=T1$4TER+`T3^Zitr7;MXStHjHw%hj zIVwj{q-7fVZi2SBSLD3;L%YlI2eoSLX?8;?DNT?{+2(5dzU#>xE>_Oz4BusU>^p$y ziPl0nOs^`*v?k9?5I$5<_${LC_!WsPjRmxzE_sJH;QxB&tOCf@q*iyKBJEi@T?07+ zT$9!Cz$fu^v} zJ7aL2)3O?kx-L0qG#tQy0#~4w(uDd z@u-$-sml28I)_-{@pQovF!kvJML)*D+THr7TAF(&X-SK=_0%8jtM~m3lG0D4V;7ot z;0u~4I2~B*LlgA{4UJnAppK7f)22#AQb59>qaDq$JkqR*`C(npUq2q;89YME6Pyun zoINFa&VS_TmX1L%ybd2tHK#q+JsAq`nArE8dnj312Y1cC62~Cm^{~Kp*2walrHSyv zL4l9rxBeg55%ADD~U{@X7JoeM+4C#dR$@l+{= zRggeH%}(!kQsO?g7q5^($F`qTHVzVxAfj4nj>KHfB@ZB*t-@=!5XE3{b7;Z}cq zFqeJWaM-WaC0QBy-1c8=Ln*3oc7S`|uRw>Cc}!V-KoxzKms@U#(~5srkZtRdc5E-e zdrF^84a&@1m|DG~!)Qhri>e&Hb9!g`%qJYHO{cqHs!8i^Im@KeENQjiT2=u?YikFX z=83tr_C)EIxBq9T03c4S!G)W>n5jJBresI4+WOY4P}N#n6--szwxe%)SfS6L`t!70 zgG`}cKyU)|s~VVy2C5ODa&Yh?P-EclMw7VW!AsWTPP%`N zJ5i*g4FKR|hByf}DJ9gB0kM?!0A$C9EZ2Zb8Te+MyR#G3+aCV{D~<;tdUN&kzcx_E zDAQgbm4PwTq4EJ-Am}ay2qd5(lkNcMOS4%UnLn`Yb1Q~+yJtF0J>;9_bgFXtu=nJV zTApbgr9>*&GULICV=#dF1m@!X_rE({FJAiHhxUxL{eQ|Dsfrr$Axz98Xcl}5G!-?LeAL4jT3{2Q6@|9|aYaUU4l z|I~`3T6D?ZKpuO7dh9nuFbMSkJ&E?9xREUL&1uh8!knCF)m>={{SlEHd!;WmsoSGi z*TFCW3X>JwNSI(Bh={v{G|@0OALH1Ng=^QF#7kW%)Pq^kQ5!@zyx~ zFs{C4OwNws_F8A|)d{YpbpK@bg7xn#|8*|lzseE(t6yh>{qtnge*ZrOFMYTO{}~i{ zEA(;Np1_rz+@;88pr=?)_)PbpUii0dGPKp1{LyPvQ!k%qZ%s{KIU{ z;^SE48$vh^K`kilhEW7n(`X9!eg%AeHNKqF6Z`b?bpmzhn}`%u#&F!@21btVr_;nH9Pk#4*eQr<+ z4$TOrhiYk05NWPv^N~??-(+%hU_sCD2haDLfKsV24K`R%3Hc7SvFf-Qt1exRbTfnr zI+fUTUme;L-(@7-0*#rx$Y!9ho0A1Pf%*TXb=xPzmN)7McH`}XUb4%*2`zwG1A55VJq6&s zPJ|E@=$A(pp|s?@9(wKvGlYN4v&$vCr%sy>_>*6cR&pgvx^8Jx=)CGQ1Jo7*Ozz^p z`WvJ7yb1ZrXb;5~Hizi^W6QC<+neO;bH8~1I9~M6X@B+iheL_Fe>oTSe{ky7H`Kx^ zNi0KWmlTg)_NL8c)ob23`%|};A`A9ZkQU84O$Gp4e;7^UoAuHh*szwag4t1r+E?`Y zl|?@SHrJE(giSgQzVF|Nc{=c_JNps;uUEescs<5fvW-G8-y;>lME#6*_-iMEcN)9;STE;&*`o7^Tb8Pf8WQs7f{rK+_v8k zil7RLrf`wAglYSul&}?yUQgK-!Jf&-&DYV zimtPxVM)b=lsbH2NNl>#A!~ z9_(ql;ti8TwF9J1S?OPv%JsiKu$&!2$kNJFenZgc=-&|W?ac~nx^J5ZKUHRMKN0n3 zDY}BC_#b~e;(1VB&ZyN@FeP*w5PR5zeh9)F_}ZzzWCLi~u)jVCsj>K9mp-||NT&mNOkITB$AaHvz`q~eFUR%KpHULdkfnI_iB2Lrhpj$3=1jhIFu3tfB?Uc zxfA2J;J{>5#DD*4KHsyt2Xf;MJvIQA$nX!(IC&o$1+UvFQviJ*_nKPFrGn3N2F>Wm zlAP%*Mt|lJk(<6Sx9?wz`G`1x>V4%51jK-+mSTI0$m{pj^6cfoyl?*>dtV+8b^q=? zsI-W(@1u~)mLe@OZK9-!(o~kIBq3=xYRp%J>>(5}6=jRDOPH|>$-dJVOR~>c#xOJA zo{#(d`knjkzMu2^J?A{nIp=wu{-`nEn(uslKG)}3-q&@#uf48B)tJ*UDpCi!om;@B zdY$`@Fhz4B!A>Ur;ay76>q6wjHD?ZoBe8(gngiuhbu2}&!vEGMf`5E~za5CbZeP$E zSj>)VfLc1Bnc#r_{pU2MA1WtmA@Ql0pY(C^Lv{QTDa%Zu$QfZ!J>36+IDBAWmt-Mr#{=e@n_eP7VdU@TL|3kwvR zYg1ZM9-Br}ID1eIKo}?VS>W%#6Pc+pRN&RC;=d!V6ME(MWz67%b;pr~-35}(>cPi@ zk8yKJ6~yi^82uelNnwJN`rDNIvo}^9`+#0-l|N#yBCsR07Yfr;`JKY~N%@eQN1sBG z47*|I-=R|ey^1Qu0VO~vN$9nL>|J9In{29Alu2Jz>GA;c<_z?hGKg&fdD=EC7tvC? z#=sy4`RTn)hTzfNKvshrf>}R}gJcJhWx;_x0JbBC6$w{%6S<0+r1xWeV<4$TOuPuh zU>$HM`Ck5ES^t5CpK2utEdj-0*{m`^bhQb%?r)~7(=5dH^kjC>vwUk}kx_f&K*G!m zm>N^;Z$b$i@!!20Yq&~EUwnfc;+&(S{u7rrqwrK8#5$7Ank08KW2PT7T`jbpoVfjd z5tE9MV;I8*Y-n*2%jFqc-QjTZ?T{Gh)3>q#)s_^=n)4_nLLcF0TRDU3^oLDIG+nOW zc{ChSVf`G-ujxxnJVZNSAbL9D_o|+772xpS5nqAj=m7`%nj>)i|J9C>!NKsT14k1H zqM0hVqrK+toU7Gl^Z13<3HTc8+GNo&vB<1@lm*3l3hflsg87b62AUj%8ozrpE3Mk~ zFN-=u`A=L_auRsRK!uCkaR$Y&bNn1(3oDA{Asd;4TX;TCI!+F}0BWG>LKCvXS9H7-QjL(49_1%um zs=zYZQ^6=xK)JBRfx2j@ewv_k7#1h2!C3%vNDm#EwsOlG?^f0tpUxXb$PV9_OC|np zjkA~7NIp_-en((|T)tGkr!)WbMoWUpF=OoW1%ir{bO8O%{WDJ^*KLMo&F?>jdPWz^8CuBcZLdZ&Ke7*k!MbY9(jQg(6n-)|y478HpnNmJ zfGgKDPx>Owk!Jzhtzyscfh$dbelY%0geS4SEpW!TonfH;VqWFz!yh4zrAvk8f&|bQ zVC`U>7U`b-V$l zb41pBBz&QTyor!6geAvw>T%h7BfXr!`@qKsb!>P6lJbA&+OvkoB# zDlr1FPAy?0)B_^B6FYMjRQu2iVrozy2z^rgK^fgGY(H0OdiPZ40^u#lS7SHg-G7J< z#)Z8G$U2NufvJ?))|-~fE>Jp!Lmc*iCfMH*CK$1)L26|ZEU^&RUPJkUnQKMn0JZhN z=Ud4SVPAU1`lIqxIK=OWKxm>CeC*5|+vLyJMuw){V^m35R}+ZoRDilt+dF!buj zozB4fw-1OwZkgI;elg9KhmVgJ-^Q=Qlb0p*8=)L&6s7{Cte= zFJKvsGYjqPtWCfRn?%?BCK*f8@za^M3cN`b1-@S%cz9q+8|3pdP^BTAk6^uh=pLb; zoe*F?wIEzxC(1o)18yYjnROv;i!NWZPi3w6_xJ=O_zt z-x2eYAlQ*!I6Lgur`$T7&wdb_2GY)K`@0}SrY&%L8x&H0P;7nkI_f} ze2uXK+eb7E`Hm?37gKM#xPM7Z!0`BH3P2zLOOjbDWj5&iY|RX+%LQyee?9Q3^5BqK zbENNjlo!zlRA51a*!N%w0Nm0bTp&F_{@J20?nJUoQHqt2JOS)G=0GLbncrZ0*ZB(K zf)~_{<%UvT)xbDd?K|R=D%=I)rEsnwrV_Yjm2iRLtfddPdvUT9sO*)C7r+7ee}N|c zNAJl89I9YQ7-&nP5a(?BHGNTz3LWw-B^NyuK zMdZAzA7%FrxJ6`~oIv8htHn|gJpOnMr*F}G5PT5}s@rpM;b&Mmf&$`Npc zYU1=$bQc;f42FYpUkbUf)-W`g0R?oZ+{$7g*u7AjH1Q_6 zX2N;Jc8hDrjCu$shli}=*JE{Xdzp{8FF}a}sVl4mDL~%m_yHYUxO*Vi!b;P!DfL2* zM&8?8B^eCCYOwS81j0nU8HddS{`N7CQs(Pw~3c=X^jx!oWfSPh%J ze5z3AkXyl(z!v<+1O>jT^9Z(lscYIh^|$YH9ryYwVnRE6Ez8S!PiBU$;tvSg)&CCj z1a8;YYbQt&>agoL=CtUk@m}ayQ|#BzQ`JG(t^1G2T=uK!y}xZE`7mG|@njZq-+^E8 z-h2tBOTr^{gI7P9G3BfN$}#ZD&=oi8<(wdKU&Y|7zR%-$f+kY)&mx2xqZZK2Puvu| z1$!_4Buk1$Sv%6DJ`o(0<9mmD%*1R}e1v4%gIy($;uFO(ZmWC|*e($yh-WkHx$$6p ziwg5y5tpd1_r?CprZ&xS;>iY%;oAcOytb$9Zi`&cbYDVDR|xYE|C7WnBUrc3B4 z{1H&0^*|%mk^7Oa?_jElxpouJ@6tmTK@nUjTZm;$*2Vd+vtP9O;EOR4+^+IWUe~F1 z>rMosfW}|f;Xo0YtMAlgo_^0rsKkmM9S9a3X-m=>D_dTaT>oH;x$)k+-byljPYODX zFYJAS=z57f(1_*3@lu)Hthv4dG_s3xkRDkkV{%?I&)!-h>`0JX!*kG+z^xlQWA8o3kAZ0c#Ed-J0yR3LEf*f+Huh)N9z2XVpg2^vgr}?W4NH` z!s+gUtr7%srrsdIpV1vrA zEWM)`Azx>30S89P?=dXXT;@ntYo8=kzy6kMM(wRxan?{eVuK+=tCDw4$$hsD;8)P} zL+1EQB*hyK5D_fTGU9TAeDz%=>67`*&8CIqK&i&wfQ%0>_KYj7tUT>52)Jx#rTtir zG-$$#Vbe#QSWm0>KxG@``n%oiH*#;lJG~}_tsRVi8mN4H>tsm7Td|WV)skK2Wow)nL%drTg%mTKGf`8ac+?R(m(4*GG*|bu~ls(bb)YKYip;&dr^*mBW-ecR3G$ zaavu@2Ur`}2aR8|eDzLAM89NJ8G3#^seTyu4ud)OIy7n#6~+Yv8fA zO(8Urmo@6|xk}-moI4j*6;%<3J%4u1(PMhXvQ5IaYtrS?jc%YXj;anLJAwzM%Ikxc zQpom1A)HUe8eFVIYeZSun{oBm(m2@fwtT)&mA_bIE;W@4)1v{i>>g8ZuR_>YrAh(Y9Y7 zaCz(J?Qd>>6~sv*=7K$u(i}ww0L^n@P5OzrO$o2i+7V-VxlQ}K2d-^X-RQ|0kZr$< zjQ}9Rac3V$E(dxC& z-MMcmj`eb#_ytVW9WLlXPTNkQWq%R-h5*1PjC z4aDfk;55idvoo$#g1YXmvpnupKkOA(tTE@bD*N&HP*Cq7si8oc-^PQRd*5gD*MZNt>Fv6r(6ax-ulp| zK&xPMKUR43-r`QLw>Ym!jj6O~hr=FCLgk@1cmqO>5W@ROXSq@EUc4nYouG}ybsx_E zK$jSwi;Kw3$PKORoa7kp(O+d3p?eu!R9L>>Qq945>MnwP#*alBAWa+qd|8xiNBjyd z@nVzHT<@k{XFcQoqD!bf-6m%13$rcP843DPRYRMcc<${GH10Pn$95Gkw>PVE9x(VS zz*LlKdRy1%%AD-%=KkiKHM@o&HSJ5g2F!G|F9=&k;KkO=AcPvERm-5O)BsGB?zm?^ z$v2j_l~8vJuNPGroaDM{lemFlkV&ZOmYVmwF5GXgMe-815kQZaPl>bO7uHtxLArVT zwnEI8eGKJn^_%Z{-qjb3M`V;XD&3q|zeD?iR&P$8DuQQmhXG!htqnI}rSKx`GuCiD zQQld5O2kouiB7yqP;6H4=GmUC{eH8rHOJJogdX5IO|yE2;F&9OrB18?{@yyg8cVf*N*sp) zSneYtw&_}6U0P&L$MHjJ%O5Q~HdiYbG_t&jmB(AQp{@}-9ZMx3ksQinSd5CQenH5pNgo zO6`4C5Qz@##nVM@c|TEkmV7&)FW|<0e&r>OwkVMGxJcJ+vFEA>_jy39yv_y%QvU}C$Bh}=;GHIVj zu5ZHQKTl9!@|?bSn#W}TSz#s82bBUB4mVY9w%uM@F3z$h@yJ5-1{>e)`>BeV8wVfe zR~o$(tnp*<(Sq{WwuBY}peWD>lSx){HqtBOk1wkD3^v6Tg^qTs*nFBkJf2)EuuUQN zoQFau(SRF)vL`;qKvH^!bd>?OplJSbC;7U&GIDlDfLm>HsU5ZB;lmvF%c@?Mf$VUp zZN*3{Q!E1RK;`6BU_FSrQ)QH8EG-SZ;Y)`v$e^J@q@$w~#f%)`*b%paHn9D0L7aQm z3LbOGfiy?FWj;5bAkR|8uu^UgVEF)H`@WRX`7bLn($7W6TwXpH{Q0VG{$Y0a6{(fo zXBqGLX65}@ip8kxs)@C12b|3DLYB_!5v0IuhtrvTHkJCy2ez9eCih=eQz$Qp6R0^H zy5-yHC$-+_ht%wZN&v8D}oyg9lFSMzf9_p~WD&u!5$+7x=$=G^$e3gOU~XDU7s zaoavx@7leXNzp;|k@S^spz8P(*wOhJwmswiODP% z<^U_?qKe<0dU2K}%`)_o1&XiQQEn#rvaQDTs8!nrd7*no3+eZumHgdLF}1|>ya~s5 zaMO8RK*KKcEfpxr4>=ttC* z+U&LR&TDab=%~@f(XUeHr*%zl?DuWk?@kyZOzWPS2wuZN_oKTFnaK^9evA*^i5Bcd z5Ne5uZ@i;gv|aUWtl0C&I{|b(cQHw7IMuzzPg2LVw<=f<>Zh=_Boqg#^auM~n-!A1 zDvl9%3AHzSuBNlHOD2C~ex6dq>YDCjc^g7LxV0j<=C8@;xF1lCMSy&+k)uU}mfNrC z7xI(W$=|t0ESeVQl~;v)Hu7FvdC0(^Z^v~d+Dbk75a8d&74J;PWu0Usc)&`MVV?`> zN)b2DWj{WcWw89yNkcYmW^1I|3tpGmmqG^IAjlptnl_Mkuo~lWL1l)SxpeGfi=)lm z#@=VYoP89QHsb%07QJ3`tLQD<>^D6jMqa_VGnY+ew^qQUU6MHL7zOGEO zye8RGtT?6Cad>Z--EMxxuSAxTkFO#l_%Y>bDnZqL{BiOo`54c2S_*!uBg*!rK?)>JS zs`eqf`I)(_6#6M^@iL3yTJQQen_W|G6|?jB&r+*b~-CfFUaX0cidkz=!mt0sv-kLVp{mzN~%+jXY>+ z6)LAJ>_rElnA;tZi`H{GZZIEfB_~DuJHqt|xX-s(qrc&zJoO;3gAG(c2G0)Q&Ek}E zsJ>|8v@HeHdrT??qV^OOa5+c4+>_Yrn0*cYfSr=~Z?56c`h?1hDr7v&F}uS@JaH{Mer_<|HtSkb|zB zKJ3C^e()eOGc((M2iWnAp?@mS`0B%19|k+}b4$RYE>eugW{(>(uyYMu<5e6<&UXa( zwCc05B~()%?21PScWkxD!hLeFouumgc-Ntl3CZjO7v| zN1YoD#HLj~{v9Fy83ix>87Q}gO*jrj8Wgn;pTZr2tW4QpKQZqK0YBt@=~axC#l8hq z14@c^xCg-N&e-hMpD$U*TEPtDShC)=Zf+YERsF&oh{=7z`5*WB2?T%j6G(1~@`^eI zawddFe+7WkhyMa9f6)#rqOc$eU%kOV^bG%RAny;M#PrSo@rbw~RjlJ?OeoGo(C&;w;H+~PmyxV}PJ5}asksWV$N8&pB~ z6>X>Ni7c;g-#~u}WYGBUXg1h?ls2j_3z`AqI;jO}ymY2x+(06FdGvQg6bjUU1#>e< zPshe@fniN!DJcDZp#j6iZyo?L#xI=wA%UcuK!kuss_w!Kz!e2kT+M8f;9p%7(ScXZ zx+2*ni>+XXE0FQO35hFl0%Th@wSzowg}Mbu+ztJfxJ!UQ;tn9!9dA;g5!oD5RtU&i zupoLKs9XqMTy*C0sAi)J0y54K@zs8NEcN_3RkKqhQzX$X@(gLWG~V zH2e1%hMNNl^;8#SNyEYY^_)`51PQLjHVomfICGrGGEoK}p^YP9Qi4x-K_h43uexwdcca*i6vi zPu|P9$|d5lFCC=!#YWTF<^1NY8(ZFnZ;Dd9ZL872dzcpO^v+Y@Sy!w*gK?{vN?pm=|`AFP95THA%z^f|2 zFOQ~Y!G%B{HYKGrLiQx@JqeYNJ7E|7i0@51#}vn?+g%nWgvlq0fZxaTocll~@r@}r zjIe$T$&0cJUd|H2=4TYPX@kl1?};WMUwUgzlss+rD6aRBJAE5)``rHHh>CZByRHx; z!nw%}8_14vx=t;}7z<1xpJn9dH}A}_3@nzFy=*6E@L;L_(iSlF<5HMxmLsFI`DEh& zo#Q>A+bOJjlx77}5Emu50OJ%9OB_@3#REzI8d${R*7cTa*(*f@u zLmqD$pX^Mq4o&vea7fhHY^H+7Y@kMuU*(wOw~U z_8|1Orw9mnp?LAG{lv}`Q`S-TK@%X6VuzW=Z>mzRx`d&1>OMRis@5g)Uh3a;+V<&n zwVHSPZI#{i+Doum&J#TSnk&BTNeg6S(cECSi_Uj#nP|Rql6arM^ZTB?SX*FJxkAe? zfv(HEf%c~6KKini0ZoY6Fl74ii#xh{aSzxVWn|j(wBA;Q*t~a2PJZlqf^aJKIf;hs z3}$Atq~fdkKm#LH3a|1GC*8emq^zP%(D}|c-JLA~@!OqC(`m%39k0wWezQ}ZXeO#v ze=UIZP$B^Es6oqf&N7Z~>^5sXvq3C-oA;@qgD(YF`mJx=zkXwKqO3a(^V8r8%Hgpg z6hB;JtxgAik*G`+QtrBroES$j?J8r0*=CB?q8t_f@_D0`z`<<=sV^8CcBxh=38jDp zcQ9+qCSuYkHUn9)`Of1g$2VEI&s*;e9*`Rp+Izclnem3XfuNffsPuiGpS!SSIGfy5 zYS^0_6$3eM1@z{woLdyBk5c+LDtG|FupUVL79p?P=sX*E=Cj-(;}-S7?1HTKAdFm- zgb8e+n?HSWV00~Bmj-nJ;yUj0VK6_}9V=FdQgKg9v;Hb1#QNIW)49W|`kakdXAnkG zMs!js>H1YZP)y|Uy7&XB%DL0gq)9+*`yXaaM7C3r{6*oRCuFUAR%?W6C}Ozq0`T>EIoSWi6t=snZxdLLW(JM|6?*$wLMTZNbo_N7cmX+EMb)zCmC z?ul_zXFvG;^!oNyYz+L7pvwwmq?Dkf$m>~ghV;C0tU$L;B@FyYC^f zR3tdhQKj{Z3{Zt|>@A51Si*0;sh==lZ|v*<$o#cn5%F@nYwkLeFE;LdU)+;v6T4EU zj!?D42v8-rhG|RJ^w(d8M>o%&W(Wvdjmb+h@bYe+mZ_f}Z+h6Kg3CLS{4ROLJ&&_5 zoS2l(0j3D!N-!gzpo7217{sn8D_WFpY<`vb=uP0mj#KwJi84IV4r)fppX$ADt#)^v z;)v}tAytw(4b7f9W#%vr_uJo}sQdh)EGXg8b1L#g;gXnf_RI{EXo^m8*6=T-E!gAYv5N0_%TS#hXONZ3EY!gzmValpIO^-b)Wvq}Y<^W-l(@QufA zn9iD(iG5W4f4-{-q$P%ry&tOPX8{-fQX7Z@<1#JAsz_5aX2#uXb&u^9dBP>=vF)N#_GiGl4Ze_?)rPc`7?m3 zpO2uJk`z#q?NK<-#67%2VyXI6oxR(!OuDGe`%1I*rI&h29qFB(wUW$O9tFi=$%|HC zd?H%~ZbD^#%}D^mUnuMF67M)0O>SylPn|Qik82^#ieuhvGQpxTIWXldoSmGIB}%9-Ckw&w1&|I^P0?bMTGnu!)*9? zCJQ^TCeX14uJSi544wacA!%@>Z1ww4gv{+wBk*5_IqOO2mT^!z=s!a0zdCW9gLd5+ zTXkK@{m^wU@7+~VVS=s;=xe8~b+{1}N1@;kXwC-EQ$W_jp^wtlKBpwAJi2yS;(ovZ z?=vP1l@c2Wqq(0ldfp+#nBaVP-{&u(_(TbL6l&Qs_X!!~S~%i9>*|^{?MCoUR>VZ} zseO9Tz)y22MSD|(@pf!+$HjVYc@wC3kw1XY+mBt*?Gu=@(yXhuJdKg=63F|+^ajS| zc`FaL3)$%c#t>5jvHaTmY-ovjS@gN=Ew*? z30GWvX_xx?>E!u&lRjE5C5V5$HLOn^1Ka2SgwwDNw1G>@cpc%UK;(iOp=I z(4$M<)*H{<5pC3d1u4E;r{v%(dC&Yx@oBydN0~`L+^CQ4R}}|$4wMl1+9$6$>ja8- z8C_RtzU-2BtNExx$LGi87pu%=zq;#NvOr9`C6pxsj<0P!?AzwpiE_kxU{_M?ArTz9 zn6QO$i)GjEmgt5qBfYhbKc8GRvm^6JrzUc7uP&cLsnJExXN3@3la*3|&7+oKgT$k} zgOQIoI<$rCl-k{IpJI=y8YXVO+Hm+}+Vz3Kk3uz^8#^4C8QdW10QE6mb)cS~mF(<8 z71L8IwkL1!(A2r?d0RJRD`1R_`8cYjXD#a1A2)wHtTjyuBKT?4Hf3+ zDlLS-Oi@0C#L~<`0TGW9awbcY9(j-@M$apY<)e2QebD!HQ15q*-hO<@zg9IiIzA-k z^S157fS8M?tGx;UL*`tRDTV7-c;?y$``39z=}~lrW$$>0R*cKvkga-gi+>*vDP2{f zGFGrje*?}KLvN$l%D;+p$jv-&)qRXdNG1UJYEtOre#Mae$aTQ$h|%AROX*8+pMwP8 zwo@ZyWI@CDoAmS?y7`Ncu0w;HI`~|RiE8*4?gZ=odgj3h!lxOWCbNrf=uc39o0$5$ zSz&f>0arckdG;>tvF@>*c^^({$(7W+a=ly~=sTTJjPSu3GEZ_7QFg{#3bp9B`{%vy zI`bH8aMCql)Wto27f!Y_$Gy58*!ythtxsY{ch921tn=LriL!dX#j1)aKUfcXOcd2o zMUBsW@aS~{T&Z;TcQ?JT(Swx-U%Oij)r#zwS7#rFYA%p@p^kLQqIe3{p3u-xb(_xL zf7$GHt!S)dOKW4L`NNzi>kf>nR_JeCQZbDBi2-`;fUS4j>oOWv;OYRnFzbD+Ejje~ zmlu^D8@-^WxY1GE^<7en68%2j8ikJZ<_fZ0MyMp|uG_}R(p4IgkK2x3ykl0*d(fk7 z*Rp|(mqJm1HySm4ITpZyq&k2?7%P)BxbMKEip|6F*07&dol*9NOb+=PKc7^tiK3V90Ryn5A=-&{8BU3_KOmBS<{E=+F_* zve@-`>!{~@>$iR}!dFMnAIsRj-dAF32KZSl8T590S1TpA5auM^U{aXy)i|siwso=I2v}3nI5}$%>!nnP*3mA4l z>AE*?;;x&H7%jSb7dl)0lS47-o?sa7&2|@(z6JddjaW{SCiJ4RD}8Cf{)RN<0-=C`^2ODLc(J^QrmJ! zCdi2PTN)FOQ$o$x?d=mjsxgkPdsqcGQZpio2AutOfHW`q;(~Zc5%& z#1rkUD7!2#c$`3tSq@Kx(Dp+7AV}Y;T_jXSP!6u_5`X%^io#J6&sgR+*?B$ujDZZ% zVM;5pOunc816B%vGTFlu$_XVjY#>s+Q1dN|_gsXPp)uVgdZFE8yZkomvnT0&vcjju z&UBRvxHLi&02&FT;sCG zmO5Tde{ChZaippgWN_C%?DW4{rn(OONpf9J*n8QTs0CwYV1b7-Yb$kpYT4~0Gi7n; zujOfLHl3LbEK97v)`a6@9_GG-dF<8uDBeVwLQP%wh%4gGq>=IYDEzhJtV2>B=Jj7d zEqiUf!WNrAq18~yK0SLHsw;~|sVJbxCj?&C{`U1f<46k0vL-Jt(pk3gq2ubx#wuRb z%4Dy~hLy6*MR`6E+R$=Dju?h{>wR0u5wA_r0y{}s=7(_CHF@2SN?la!>boCatkeew zw10+joyT2m*chDMp$Gf!JFmobm$_`3Iov++B1L)YZ7+PRdAGwObtnow zKEcM~x?1sC44Hv8|F^h(KH)A1|Mdm$Q`A#;8qT!b;S*x7jWQkChvZ|o?pSs-ASB_l_+eS`L3y{!EOYi2TwXCXFSwi< z7`Yhauj_?J$HnM>{`|!?YH|0D%eOa5dMpPd4`Fc`L%6{{zDm^9VDOd2Xtp>2-4C`p zPE!6jX?2DQ=%0-NEkC!AAcen{-g4(GUX>OoYd;_1BJwG(>1cHGGhh@+i>KYJ&}*ys zP)AwyN21HEh1%~#UkTLE99PNleWRpc7x+|MK_npd###g(z?{pHnp1V4j`ON(n4LCv zp`?-<-ONino!+_J>x*>+5E8()y7()IS!fUZp}rUu8rq+%#o3*~SUdVHb|-yjjQS^x zeSrY4Wbs`ApY5tn)e6@UBD0k7Vfq`&C8(pox{YChiM&C>$M z`N*decRwK=uH_bCV`8;f4EAxjk|?Z?tYSEqgLz_a`cR?{Hpe2SjyE^8E%rR;|CA(c z$S!^-snLFBb2hx-pc!1ys@b5=chrT0-Qyfnf5W^XARPG}= z-x1KD6p9t4pWqAr$&CH%-}T=g{JGx+e4Xe0oVQtQbB;pPXY3wjhJKJ&Zus!T$D1GJ zUsLd^VEiI@8NPo+QltH7&JDylDlICB;?DFia~t&3P}6!NF`?M5wR1=N0M zPECbwGs6KNPP18Rmq#oR|Lq=ZY%2Drj4I(6>tB2D`#({7B3YCp_@D<&0yc>PQaT@r z3V%pyfS5bCn}QlN*?aS~Rr9-fRbR^o&rT z9TPZ}9o-;B5`WKpUXE}Z%L?EEw!F<=qNZo%cz!hB{MN?v=TzU++P`R&u_j|Ucku9C zc)U9CwI4fdX8#WvN!ttNg8MVSBj#o)i+t@iKXt-gL(}8$T|@;7o%(0VE;yrD5tOA* zsfXX@B()z|#--OKilLtya_uUZcW(QINrPKw9Nz~W~Wp>$8L!cLB zTC)dgZ-&0DvI-;w`oN;-j|^fJF%i@uG~5JR^BZrkyxSl8NGY)>f7A>p56Pvlm7zo7 zjOP>vCG8_u0^mRQ;DU6JA1~E^_WM=(|Mh3phpc&e3M}DM0?N~b*r(VM?E4iUz{=LI zA6}4Bn4F&1n2gaCN_A;D$sbMljO0hrh~!l)3~ioCVa_vUg@l;Fo#eXV$?(DY$Rg$d zL7y>V9rowG$sbn#Kk~}+yx)242w98%CH1A_&J?$}(cI9EFJ1W?u6Uf@vQc=Yg4m^9 z3Nu*wZ%8_tB+F3@oTYeDvLQeG8rs26Bd;BzETzkD?2xM)iomtig~W08voaVGpzwcBg1y_&h0ZC0{!bNptfe5tKxdG%9& zN4-$;4@QW+*xSTqLXy@Bx6{Hw7bRZRt8(?OZ! zJG=|azaLtQQ~BI{7h30_doL2()pctjce+>aarfmHVm0Br6xR2w=01F#_vZX*thVm% z6dLVuuo@F34$-_pDHLf@m@=L624ak&etOS+`?>&?eG^%?@E!4d7$8_QC=&2o5CHsD zwia&1&MHGgm}5W(w?Kb<-^w;IKz8j4{t9gECsgJQN-axs`k(r~;0am783Obb*|`fl zv_#n9kD`}BIT-FL?1Fg{g*A7o6}o`xg3jli)x@tFKKxErlUW&2*cZUqHK)V2HBfnm z3C)6t3r{oOXp4{7f)DeA!~z63oBo-vGUdiShMns06r8B<1c)mYJ~M}ejpAUreD-dT=K!@RHDBUDQGysKGkb6jtUbG|qZu}zNA3V; zArEDTT2z4+ELxz2jlryY!{gwv9G?1)P`w}g9ifD27B&6tdxUJz3z(jN^3e=)h%>0d zeSxSU?9G{WZ`1`Lck)_>23SvIF$9D-<-POxBq~>vS`(`xBr{=OlPh(gj8|~?3bNO0 zzsN1dF1L+Jx3SEZ4EyHan<>>9;NhvVH*+jlGzPT{-bCm3qU2_GYYKID7PT|I;$<|1 zN538{xDtM}^rO!%#n?h=uH@@9ksB^Xm8qNCf|YVnV&4%oFCYcMi^9-!D$pE@sGr_* z6JNh=freWkx~MC)6L*~&xq$Qos|&op97FCZ)WR8Xmbu8E-V<6(4H7{Z0RHrCMrG1_ zDL`xdQ(xu}_Lm6$6rY4_X$1tJqg27ZtbtpRvr1t5EtZtjEcDZbIZ#t34mMRE{BeH? z*OuMmP(HG5!>hvCx*&}k3x3PXP$@*CjtfmdJJf~XE}!Gs#*WUs3pY01BQePlH^aiJiTg|l?TzPe ztH>WUIO?37Ux8g^EIf6Rg8`)n65M3ex1&HfbHhQN_1mAWj4@yP{e5jrYq^o$6AMrX ziv)PFHQA@3hA<%aPGG5m8~=2r9r)engE@=AFs~Euq2iVw&xY_ytj6N zCqTyi=?baKZ*vZEVGhYz&d;=#$y?HE6wWrv1lXU0XxKcP1f#6;*wTbbqO86&+$s+W zp#uw%+?dy$Zdns9)UJEm$Eh}YTc!p#CoWQEoCAKIOcOb%;9X5tk?Zl1!)IMIx~~O3W1Mp{Qy=-9<0z$S4dqBo8iWCNOu^%$PM#?u_a(n{>d*D zh5n}fx53rmG-(3%uO*TgZy9bf7(BIc)|vl)EfhX2&h1si&8WK@E|9Les-qn*|0XT7vqLpjqs=y7h4RDQbl?Gf_i# ze_Pu}(g%sVNbT#}PT)p9l9Io@ey^;ud-smkN1_{Fe*p4+@(;8bn1T69%?=x;cbooR zjsXMZT_`ea|AF#YN2b7-G~u)x-|c!m_OMgtiPb~LUaVU=xtvhkXwj6qRWnNFMJt_N zZW!_4PnI9~M@N9rY-&aP;bY7Ir2hY=m+|NCGGqoRE>KWl4(I>I<|d6&(vcIWREu{K zU_mld(96UDsf?X5qjstVTH=tl+PQgq1= z!q8LL&Vqj32@qAUro!ulK-$C^cx#ZoTOM~$S9i?}p>_y_X(76HHBi9|r(r26*34fC zrD|@3Mylj=Nykh|-{yYhdQYR0~seU05 z&VG6xQzQBTjqPvn0dB}9x|Q`>7)e~)o*{L%XBcs_Jp>ykdd6HU+@UJsI;dGMk|les zy4`#R>=p0^-=mF|^o*)EYXa2HRbrRvgAP!`T=o?bX!bEp&&vCf^tE0++gJMfC9zwd zR-AR;Q7zWfXtH;&Z%PW%_XB2}pN;;OJFz_Sq29CS{^4*(Q}+v=SUY`>w7+nk@2hT0 z3lCd5qtn*r-eHAtF2AYYe{v_b{M|*|4*h2zi}Dx?c-1`HWGoibUATOB;an%c-2%04 zzOaiQXuwS5h+>o-X40m1P=ve_yiHDZfHfW71JwAR9BtnJ4%gC&Rk6~+m8)nw8RDVj zt)bKj-&fJ!yk-uB>NT#I7}_pW7jVNh4LyLIK+>s90{jt_8hiGqI17A7I4)2xvJab8 z4=-5geR93p(fzfm9eJOOXoVb?vWK9YTCEXQBn3{$lx*qCZI;1UxPEfHO+GagK0VLp zvAc4&?96^-k|-t;kK`s0BsgxAmTQGP{cNV8}uT$6yLoqpk_gjWQlDOJqOCd$A7K#97w z+F_-%h3^Q8v+;MtS_AgU1M~3u1>(HA1OI|C_Fki5`<9|_<*2MN^$cOoYU$;@t*}>u zm30}@u`@L;6n8ItdT{F+4I#y4A)a;E*M2kd@0byOB?h8qlE05xe*OF>2Ce@H0`y<~ zS)Ll)7!CCh-t6WBi)gN3K^2sXR3>_&S7Qw|YpR=4J7bzV+3~z`N6zgAOxgM1@oqq} z=$aN8UqR0*15uAVR4UqJd?{AmldU@OU~u8N9!0gT@{3V#0Op&QC{Gm&**EM?kg~F} zWbG}6W#};#O)DoiHOxjoPaPGe zY-m03AA!HTb}#)l>{tJgLXQWo?LfF?=D4BWgV^T=|I~kfdaE}5c2)k!Q_OP^$erNG zg7I)!ea@UM0DEE$A?eK&e$=;jERb1VZH$6<8>$uX|GN3FW%(s7zdv)o4$)t#qF>AM zYgv97i(kg#m$CR|EdHV2`pa1SG8Vs##V=#=%UJv}7Qc+ee>vWQd242$T4SuEX901|QNx#;t%(D#*lUJM7g zxqH7$JOU_iDodEj+AvlaDhWU)wdNFN|`J_nZYxmS@@C>GKV z3^I5MmA!-op@!N4B1Z^-e}l)^L4Nyk&|ig8n&4Bkde$8zfD5JYAW`^`32}T4IeUcq zhPeV{@VLcn6nvQU{##u4Y52mgFZi`Yzoh8bIryb2ereNR_TZPv{N+{r@{4}Cj=#L? zU-7}O5b9Ty`72KT1tt7KTmIvC5#s^8nvbo(+RHoz`k95`_3Fl!El?hr$|O!C(IV|a zm`>4p@8c@*Uq9b&u#p{RdQ*PlM_$BY&YU>iM|~c%GSNIqaw1_6K$5p&Uj1J#&*QH| z(oYe)_$#UWusln*3@PL+7}D=7GfM4DN{L$@+NHcE-$rF}Z+1o?XW}HQWP^|L8$hrl zN0$A9BXprfS!0zS>H~85qpiFbH<#AMbo%KAezfY`b>+%nOmRr?Yf!XY--!kli#e3i z8CszAq?4*y*3&Qcd*b z0cLczhrYW|Z%=jrajs{D^}>StUTVh#VK&A95Y;`aG?@-0{vmY56xB?ta)TS(CA2u#*W1aBte~;fj2gz&g~=?*%J#9lc9)7PgmH&I!K0Fze4s z3vm%hJHJ!m@~wT2(~-MMwy&6U#HA{yzr)++Qb4^y~lUq&lh{qyrQ z%GbVX4-P+DStC|y#6RYBLjsj8Q98B{cQuT2l>SJ%@wQ}he7n<`zH}wA=}*y_t3v|r z@;8w!0o_0f$Dyr|pI%i0^EBu@E7Wyi@Sk?tIde9Gc2S(UB^@21bj|FF|ARrq!xDC< zthXm9Fa<3y@vIfpG6{kb{RC-aY0}QDyZv%`Gat7de|tLQnDCK%ZzU4DJq!>8AA_xE zVPlbZAs@Ukhn^JAD$I+8_Y)aS>0YQ-N-9r=Q68rW;iHZKV3TE0tVd5@PYA2oyI z4fYfzdKT2|i^GBzxaWmf8ft~ZLG)^e%8RJy33{0?v0D4hUaB2O^hwDIAeOrl0g+Um z3HWV-ICxeJ!>+#+O~UCOB1;?k=;>?38(%j0^i8*Nto-KV4zGSN)Rw+&Kgx(B!Z0o& z3e#f$7klp+)l?g;i-w}1H>pyBfTDEiAP|aDLNGv^33A%fy29gPk7GF6!+io@w zlVA)%r_fv-(msdTw=YkK?5j;)WlKwYeDPWJ()G)gb|KlW2Xfxt42}r^#qSaFgZOSU z#^}`*5gqnn5AZ|3cG7mgfuizz0#n?$sJwR;nUBxs}HAY(kXVj zHx*TEz!h&Wwxj{TY((5=#E&64u1SxwLL1#;6ollfS!J|&n%XgrcWC{?pClCFxY}W) z>TV+?YFS_u`9lB6j^uj=`O|&0HSTB1tX>zaeM&-ZrD%1h#UaV!W&skAb-nrb^dej! zFm-@t+2vd-C&`_(i`*zj8}#3bGoxr<5f=L;`|*6zXH!&Cmkx2aU=?zQ@74MDLnRpEvP9jha{r~BOF z8X{+Iu@yg4(Ys-W;ts+$X3|bX%}c!?#T&PiftWbfZ=^l0bmK3`=|^+HQug&n`ig@m z=VO>ebGg$8Sab3;nJD)X=yh+sg=TZV>V!3yAyyihKZzOer!FB|YwA+Xn zdIq5EB6D&VFdGBTVXY2MOp@|MSEL|=l^pgw^-Lx)U4{|b-u%J%%8f>iEcve{_tUaE zWm%+bfKB3`v8)=86BwmjDkLNBs1;p=C|XhA;ryM1mlQ#!eT&rg@Occ#cMV0%_Dt<)B>E*pY|HdSEy8l2$cx{k zRR3TwGlui$1gN+p4LHtttUa`Bjo+-&C-QjXZOf{<5!ZxyDv|d|k>E&kPMYs@5jG(3VX!5nEu8x^E02E4KT<&+M;882fbDdz zRy~mA*Dt3vDZRG%Yt0|Afhx;$)0Tz4C)MELrB)>Sl})7_9&rZ zx<=sDy$-@zD%Dl9v$qx7yMh_lJ{u>P34rqe!>gQFV~D%+tXZ@__qV3+zg)h?S(Ycq z%f1!kDt%}ImQLxVCJ+E>og0X(`dCZvf%I4?UH$jv*1YezUX~Tf#bKsjI@KKH1lWeu zc`qMSQ`G3$?LkNK@Es!0vyRgZKaqTX+4KsNF}c=4|24h%=-)R5`rDr7-hI4zPHf?L zNAqjmLDgx02Kyya4q>1ZDgnn|B#Wf0Gzairi)!-;+^UrVKQw0QcCO|h_JTN<9Gw9+ zjgpZdBAi}iAhPfc)hngm2)u~CXIkQ~er&Q1QPj*zPodOx-T~ViEO*S&{|{~hl`Ipx zoHm>^z-)cKmU+uH-kL;-t(f*1#~yAh+U8r{bu7W3noRgoehu__2()s8s=6Q&(;;_N z&P{-fOES2wgo@U5b)F;Vr)|rI4G0|^k%qe>18IUblzu9v4q=l0+llHy+7A7`3*$+y zz`k#J_E|x0QITbo-8J!6a-KBumO>wbJb_%6j{C9SD7xD*3XQvtU;y+7*{c zj4-jTL0ag9aMvu5q5T*k{HpMwQ7Ye#+mLn}S|?k6cDn7_$dcXs^Cj6x5ZwSE;S~US zo7Ahgd2ZnJ!dwJ?03;pMJcRK*Y2!#caF!B1=cP@Xni(s~_2`?sK$?ir+{2MAo`hhy zC+iik`XATr>jjwZvRHOLWP+NwaUY2wKY37szH6cQs3mrRLNo{RO-?l5+Rle(n{wE! z6FT4Coj?7Y@TpS(WOs|2Nx-Kvc#z`|DWg*~dwOxZ@0;?1d&p_n}cbJTd8ANGqpT?LlL3jZ6EFJ5(88=Tt{D zw-wO2yV3j2|YXn$Nkzb+kp!7e8&YOBQ>gB$!!-B2-5-4KK!4)r$?b+~A@jZn+1(n60??t^`9jA=^s#aO@nf(Pm zW+MGoPhxx`(EvESX3XOqNSpN)YtvT_wlTYdr61z1gwDtaRFJgDnDu>NPwY|e0dYIQ zji2n@rx=A#o>Tj|Ykh99V_fyOWYiCKmTMtn1)g0HfMomD>-> zMZ4fl92V~>R;3^_Q4iml1-;-b%YW3A75iRRmh0VTTfHGRa66Dty)bJr5ix0aTmS#ADGCp8l;O4`T zv$)*{bNs;hLB{}cSq?w(tXjj8)XaHgY9;LoX#@S37N~=!zbTo#twfq3MhGj{HCXce zd3^ujz_BL_q9Ea2NSYfnVCJVvGb#ROeLCXZ8LrFKcdA{6{qIx3_p_^W&49WM0+0OB z0jAayGLwrJsnC8S2-&@PpeV%f2IsJ%q0YgTnMYf&XE>5|=9C3c1vWYK4D+oT8U1Kh zcIj$0Z?VUBkz5Tw=Jrit(?0eMi&D{XyBQq5BEEk-7P_9LG+6wdvw6x_oct*^RH-dG*|*HU_c$;I|%~LI?3AvwkQqpcgx{)8ocXJw;p#2%LxX zavPK}vOKCJO}U}h?OqI??QLXvUcB`D<&^{7TYy{72h1L3T8A<~bv@GTdJEXHv$w?= z`u7G+Sqjc76x=(z$fMmCxVwF@QQA&Id3;W`3wfYWJen3Iou7i}Oc_z<69Ral7Ye?8J@p!dD@E zCoj>~BK(76o|k!f%h)`Rn-Y_aF+XmAX0T02euaUNr_0t;wTRU2I|fgT>~q#ho3gQ8 zdh2IdHlZgG<|K4alqd$_LQEI$9uKZ}+|;TyRn+~J=6O8p*@;iIK12>c4_6~&lK^G# zE;p5N17*w#^7N=1qgx9%sw&$WTM#J2Rctx=R<>nW2%t4L4EJ#BFgWDNcw&AZI#uxz z(T4pG_O~TzJn8gj7t1#uvaDy+KWs0YhdfK)QoP`~_kzJ;b|gyGAXId!^1)~3BW(z6 zE~zgr1oHh6p15I{Si$}ZQL+8YAS$B(ltuZFhRh`36~rVJo%(@@e(UFq zjSoqGSc{7%v_?fKrZxtOMWtpcB|c5qHdP@o*$kpr({#L z0f2x{CFiZ<^sJSf1KT%IU4i`RliIlwA$q2Qd{!X31x61|$)Z{Zph7+5fqD$gd2~zJ z@cBfoGrY6^k!VhMQ2&KY=Yu2$$KV2l&le4D7n$dVIK1dN3{Rt32xnEVs*zLe&A(m= zHZqsf;2vx-w|i9unnUM3dx&PG{}pM**c@l=eQ2)GoEx_g{Hr*dN{@ z$~=?IrOqZ3b3t`#hw_Dl+Bi>~^|(xWkWBO_b9CmcY`3Uf!Fi8up(2(VOxI^pkL&iF zJ;KoEqBlD%fc@_T-@i@XT7NVjUx#~nsMXh>?)}h3`E;oqo5}8E?Gh7xGJh4>^6w-Z zc)#)fpDYL~_-PLG5Ir??SLW~zptA8H15jB!3;OTRe~D+nvSY^J)cFh2L@^d1M;@@U zy#pYZ|5rYMqlYVK(wiOH&}CTv+wFe@nf`~6?Ejt06Vyfq2QPVoII%uU3`}6?lKOtm zQ*Q|uXTiHi2W*;K!qRN`*@hK_Gry$`o@{7{>16I~`2DM0^2i+k)+Ul}I*%EhrQTBd zYOR@!s_OC8C&I4w#gp8XvUPpnQMPl;36iKK0%S@8;YE%(6Z&cO-9M0Y5hjC*afu zq|s1Rg`}RVkLES3s(g{cV_D630jJ)X2KjUNc-IRg^q{f$|?&AZW1U6tQI&M<`y2$(No zRi#{t6OTQSn1}_=8~F~dReja<@ESC~%Wy^4LbUnn=9J0Ixq5&2#<;N<@J+R{l_J{?19AE{4I7iS0S_3H8r+_tENYvg-c4S zX6|)+w}LN!VY_`?YZca%nThg-B>=EAEFKA=iBPYS6$gssetL}I>NhlJ_>{hPCuDo2 z%$XMW%630xzwuT5ShNE9b)MH4p7wf$?BHL$(d_th_!*iKER9SBWmaK3MLKh{VB zaXsE#O16=7{KId*$SYmWbN=V11(t8Qho8!x6CGkA-o5;kU8;KC5K7SkT&CyAZZBRV zPBMDnc}^-SO&?WQ0&?xA)LU6j#0=&>Wc&09Q~99wPDgX$^s?xt^0Jx4yDx9Dexw0d zpmcm3H)8%Ruspg@uMHPx{Tcvt)cmuVNo#CwZ^L4@{(_Wbb3L}`cpDRA3G})S*`*62 zVh~Qg{CghL4daB1cSQ;jSL{2^jI0;G&v(7>GI2(Nf1bHA$6vs>&`6_@G)$Gx%xHp4 z$r5pW=olzp^I3!OCk-#qo7-W029g!jtlF%SIO4#C`IZu(&zPClh$GZ=|3UU*i}&6{eSf<$7v=KS@#_o5-PhTszjax2Qubl-fGzLu zE=)|}00kS{eyrlp^gFYYegam?vtRulwa7KLHF%x}`-(|OfrKV#u2e0c3eA+_deM#4 z11b=#Cc9V0{D#q(R);H(?N;*l7Q@CAQ0)=mkqVe^0TA}x@fSdUqJClk4=7_Sslz03 z>sQ_ba+3O5(fE1#almSV=SA_#!|41gW3|PN&GAvtd_g~ckw*g81evm}48H{(>U7#L z!%+(=AIZ)=rbp1s=0U)qo*TB2gGDnxv6Iif-P`VB_T@EH-tC&PAu|r}=K^|STx4h` z@ICQ7D8E3q73FGYc<9Y*WbXEe3#ZOp6i{P1<#ulA$p_sVGM^TI?*f+TRt7eLl?~>r zx3c=pSbp&TvIRe;Y13Pg#}GE8r4Eb-S+VC(FPF0ejagM&*?+jf`VL<95YAzxnAbJ>(S~zz6XRff3eo@i!Wp$<-mf=~jyQPSn zw#<@$tXElA=r0kzm2|8v)uGJg+wfrNuiGp2z{vZRjk0v;xTGGHzp+rTqEEi252>Q2jUfd1BX@|k)V6J90tjQrk#2JI+W+MWIvSvbn*p)zdQcqmRK1sDljly(2^Tey>fPKZGl!|3_(VMb>(q%KcFb zuU}uuT?(=S5qdXYqMbnpDFeOw;TxGiurwv%pR|jFgu<8+=f6L5U9-PmK1U=RtQWat zW!K96d9|&)sMs}ogN)RjR4PL{LYc;0vl3A52FpDy9V9Vg^{Q%s?F=?Q zHfM)vWC9Q4Vw|8t3HZcfK&~&Y9dHEuCZq1MPPx7@*SkG5ptLb;FZTn--yur9N!&{K zP5Pa-%L(EC)2J5KgnOc29b+6~c)e4et5ofC*K1xwY8C*hOHq9ZQwAmxWESI}!j^~` zPHu>83T5s3-LPU4!Ak}E+9D6d9rW&k2Rep^z%u%u```c2FXG!%>CZtCgwuxkY?{*$W#eI zDyP^9ZcV^D%T70af<1FvL6_SisHP00vrX| zs3lTk2^-Mf^!w=&A}10vh=Ly3Z#C~XoR#xtN&t2%JJdrp4$q=hI&L6D;D%cLEHRS^ z-?7&EMUD#&GuT|RX^~z?TL7f>@eGbOQi&^Be!X#inBOPwe0d2Wz#MTwvDl{+IkO5W-(HTF$rO3C?GWJ$+@`nU2d758yiJL!9#kp#m8@q|^ zH*6?ax#B|>q@S8#?&Zl9$*0%>NQuIyg;TMsY@3R|Z3KXj0GtcnP~o+P)x)H2x~|*c zh1<}{&COaF@4M_@ZNI!t%WT6+!WT!tsz+xS6CLO1l?*+^HSCz?D@}wXX-)j9tZT}b zd6mFAol4d(qoQA)Imu1R+H=Y|4M@`i9NpMSs!2$3xZiNkoX~eA1hm+hS^C}BYsb4+ zUgji7)XQZLWLwE)`-jCCa8dy6`W)SV#4g>fP`jsV(Erw0GK9Pzv#XlS;QraJNc4wn zD6`y(nQ?QD7`?RIA6ZA8a*7PS!EHubm>*6;3c%$rlQJIz4V!fguWwM3eHapv6LHz~ zW%{;;-a$d8+}>;6Vcsd#0e$cFs3UJ$C>5}Fz1~dfKoei=jcZC;+XkK)4biwfQkmN_ zIPefGR|%K9I+G|q`A@qH)eMP4{BxW&cDF*4L%sp{>$w;oioX4ehxs?M6r~GZ*=EOF z{1zA4C@l#;1iS}2Gn4R=A)#XiQS*j6z3)HO)EoYCER^g4e<`rc))=;P_fMf-KjKGl zQ%~0o)Ka1I<1g;iT%4KjXv_0oc^$ZQai8~8+EF>L{ErcE9@S&stBeFl>D|FlEu64N zXE5HR9p{$z_jC(trwX3*6C4)3zNF>eX>Pz(Bb7++^B2$@YewSZ8_1dBHQ}4S`{Q#S z7QamtL>I4qGE=$rNO8a-g{u6R}CNUANBsB3|UMU@Xc{oD!P~w8|$Hw z{7M~bdO!~!}L(OmR=A}f>ec+=5QwRyge;?X2A*CSsc4EwzHgR%W5&@EgV ztP#}%&7_Qz5yqr2G|~LMc?M^4k3|s2`OkKR+D?K38Zq55M%U=oM$(LL5YX-`fx&Lr zafT4Wiagi+*6fb&{41R+W9>^0;C;+e=aNallh-khpZiVU>p%(BVZ-+T)I3o6V{>Vi z-$~DaK+p567qvMCIuF1modlj^QJ8L<$w30P^P8_$<}o;n&1k64$bckm^jo-r`^#6E z7e3tFWYb;HCAQh5;*PgN$O~OLaGL>rC}lZT>H9CkifJ$G;F~X!WPKg*`ygW>oHxM0 zlLpkafIaU#hT6^3ryddTv~ueOEIm07e<*K`X`?}3Pf}e>#zI_GcpFvXFqMhj1=-RSVRvf=vlYihrZ?l({vo-_jC1Z28P z;Q-PAevhLCx1zYy+&FcrEtS< zVO}*I?j%?k4D@NQ~A0+jKBMuSx0F92sQ;K?A3G_gpst;3FL3v ztH@KCwt4<7TMz85Je^E0uh*+ZEXfXXrZZwZWal&24(;4a~6k$uUZV=Q%3XT6Q6?9E`05a~Ww>y_GvBc%<5v_`fj8~w`R zTx>oaV|yi~UV@~K3?yQp^td`8NrI(7KnSgJD!L2LRoijOCvW3U&E#6^ttYsw`RKU$ zwC_nz^`B3npGtno$%4Jvn@0&j@8Dz6CqobRhDB%>fCT#PJni#Vt#{EEU%cq7tjlMy zM`^)RuX(vh`+dA|nMYo5>-o$s{9`DO*5LC1*o~hC0dE|qrN5_um9pB^BwCn6f?Jp> zZt0$iAwd0=A3&$785~X|G|n8R@;b~RD_o(9`=}_~dZ)&hdtC3+$zE$d>ulcDc8oB+?dM zufQy6YV;)JafAff5$LLSNUj`)wPY?|iZa-lZEmCCHDw|sE+Rd?9NYM#*bH1)yb}l7 z{pfIwbS8_LE;kUVLSK|~#fv|(Or z!4eo^;|#HHq{ICyJME`bo@<7Wb$nM?9x<2YKZt#de#q1owt4VPz8jwaB<0G4s~TM$ z++)!CcAfGAoM*$)#l{1qCq|LB-KaPC>35#)5E#S(qQV$#02D6AG61D$U>VsaE-=!q z;e*#u$_1c5*WfUYX2p5q0KBQOB5AA1CaDlilbv?mo0xeZeRQvH=-|D=gIUdto~28o zz`TatUyu-kg(|*~M3V(3$6olLW?+*#gKvHDE04MsZ*u|hI%KWw^?ZY2CCf67s(?*y zN`=NsNs`JxR?M8qCF1#~eY2_A2Xb%&=;yHZ87Z;Z>z>`rI-rAJOg9%Hr0OOc5x5`p z*cyst8vwmclHBzVG)kpx7^>FYRRBGnHJq`Ed&r^w)Z8w<5C9_xl}5_Aku4*E6*q+( zyZ2P?YQJxgwB_$Yv?*rI06`7?w3>^70af8f1D(H;okmvxC0X%6FlQc$Nm85Bea3z@ zyyNU^k43HrV$?MR%MsL`SmF?2{A&s5Oo^Ens z0e$GVcdhE?aQaX}E<+q`15DDO9Y{*feb`Pz(>;qs!25?k=0ky8sJdDEZ;qG}Z5{q4 z@P!g-Wa`L2V%z`g2fUm_ZAvA>RL9-7oSkl}XBHz({^?i$HI;H_53Xd+=xabb|+XCm? zMCBYVT>|}QT>U@#KQD#)bMMuRPpAz91*(fKCEnu7D0Q!9>0*`O^B<~(dI8fJ&){Agu(Jhg%ptIbvb^y>oNl;&)&Ky$^kY8-?8~$e3kxZ z+u&c;K^^tY9ash^1MjdBjr2ivi9wg&p?(2N6Ii|5(HitG7Ir!8G*Q}L5MZXK_mT-TxJo z-~S)_4*%b5kz!sk^pJS;F{t;3SF}iPhoVR%P5klnwRBF`VNOtm=L@9cWTGg~;K-pv z9;(X@=jUoc;O%;$$_@8hsiECk&@LRm@X6D&aA;Jm15r7+fBWD`N-*18Z*Y4s?p;3Z z#G@|2Ro1@*{+ozRxjQC(4II##1>h57YgSJ-;w*iATD`L zBk{I`fZ~*qBz(t;T0&6eh2u5}>U4UO_SS2#;=)@sg_yPNM$FJ(XM zpr7+1L7+l+?!)axZ|NAOH9Ln>cNO{jbW~B}1E4q|Pxz^8-(Oay3z~KBvIK z-DlMYiq^S=Fq{vhDyeot`PaxRSZiP+rE~j9Vp-Kn7X3L3`sIO!r)Lmf9UwP zqv!OIpIcB;H9`z}sM`5K!EcREu7H`)W`}fRt8q z;oS8&d|#NBcCt-Ckn(2^!}xa=eXm>=9M%g6yeHQ7uSV51w~y;!Rjdt$ z0}Yf6TYjJ1_tF}qsR5VBK}b?_Ni%(O`gs8!-9}QRuN2C}4Qk@FTy}&Pk4FX@ zAobG^8GjFV)IedAgTu5+x>sQNRjnIVrD+~?N3UCQi=pm@e^|m$f`8t( zwntu>{`gqG^!b#>=`TtfMbAycCFqJbaGZ=Yj2WZjv>Po%IDwpIFg4T5_h7O6VXb|c$(n69BSNcZEdr(TB}~5silvnY5ZGR1 zdw}>N2DFk$6$}mtDQ6+Dzd8^g0pvT4hh9i%f$v@SlzDfD%gg-KtMMy*Y>>9aj2_k) z;C+aI3=B`V&7;=8-}6_^Y`5VlhzQehUU_OhA(Dgnw!>yC%$|GaIa@Vu+%mR;Ot zJdbyE+1oQ^#R6;@F-z%~OK_15+>r%MoO(<99j7BI!|vI@4}lYPHMSG%yn9<2XInCHeJIKf8tPVAdodrOi%@?giob`NZ?ONbfL7=uHP8Cn( zywnMv=)H(e18)!=O~9S>LGc?Xmc#RVoL_Y`zVUmQnrgjf|10|q9Uw@&?PBFKmQDdp z7XzKe6ugK5gk;u*HBuz^w*}5`9G@N6X?^81ctJuz&PQy~VW$t-xRz9q3mz@7KSV|hs?16%Oy4+ka^Y+*PC7@xCY2dp11 z0W{2+wL`xTDefuXuLjERpHA)!G4OoABAfNpq;AkqpWH9PH0o+y3714RHyTLWOI#?c2mmg?6bZ{e#`y9#Dvn-oih(DGuXf}X`7r2m56838@y3YS(o;x%&zie8Nh2xMUl;7O^d?t5kM z_?rM0ow@<@WOQTWT+!a!;Ri+czb_a?dWij*8W1~V{+AKQ5BME`0kH|palIF0I!t}- zuSQKZ9c%W#m!-V5QPK)9Salt2JJo<8i=8+>dNA-1P>Tg{3q~geb$D8_@-;7nUJ3Q% z;J{@sGiXW8fl$tFUGFIp8x_YWERQ{@|Ve@#}^X3x-NLIRlTxt;*as@E=R~W`q zJYX5#V3E;`o8(<^=$WP8koo|+3edd%ne@4qS9;gm#+i5c{N=ZTou-`J$WYV7sEy z*;~gSBBq?8n2n8o(x2h}S&d03=^3#3#ewAjmhEx)I?kUli;V;B#%Je!1k!HQTgvRW z?rO`h?*CYiMCgtSbQ5@CuzQDZjgP^BoX{TzhI;_bh5aten$X(t{?>NW`H9Qe zMux~=XfV`UabT?7h4?JT^%hr%tAcE3*z4coKaq}yC~?*6Yqy~^K@0`mK`#Un(>)wWB51>@&|A@fYCymx{s1G0h_WT^ZBLTeuN zMK6r3)-<|T>MQ*|_C>Ij<^yPy(#sF3AHrHhhpa&B8&Es^d6v(gT%UvGUS^mq!Q9zpmb+t)YWf9A$`#Fcu*Tu@N#hWcS<6g%+^Jg??wFk(|{ z`tkk^--OEEC$p*#K8X{PeB98_oOvG1`tlv|qX=QEFPd{9zXvE4wt8$dd|GF5)6y<6 zagL-uDLrP5vmv4P+(ij*#a{D#Ng=_eUr@f19M*y3g{_K;wI(00g(l0jWb|`0kDtS< z9k}eXpHAcPN&~UW%coFWU3#ZcNd{VvF#cHnKh6PV^=<7hY;*Yo_#GYH>4>CM6l^~P5#n7i0~PV&B|u)14UgG8m5_&YcqRbAb5V?;Y)`TE3L;1 z9#zd=@~Ij#u)R3i`r6CZv5JpL+FMMv7S0T#fEL!_b6GDLVqJIz?Xc{x|y zE0wPGQCV~@D-m0LF1Jlhg8Q0s6?Qt$2yzf4BQiC0M zV+C3c;sZ0wyLrc8jSj$=<-BSqFz-e7LyC>k{D})}M@Ve#blHfnBR7$4@z`Bs6VBK+ z)=iHbV+jb5;u7jnAcRFF{e`WK5Y<@P2%qQIon` zXEUE@TEDv@au~9vX%io(UXjSkXSdA;BvcPk0=Rp;k0HXlz9CyD5C$Wv{3HGOwvJ^V zf5>7sf2S!W4;!d~JN&nIx}4vC0Qx{p^cLQDmC$4@)x;v%qw_`9@>IFIHA0-J{s z^V*zjymd=6`9V^kQU{DuS61Fbu{(Gt*8XEE@@6%5~b~|Y&mZAJOLLHRJfQiUit1umn>gT>s)NUs&J-ab;zTe8~bfFqS zO8o=4UbYZ)bglgarIU`)gaDH-U6I-!YyM!TX@$+y25eJ(NLt#t@USofs3bN%+nC^) z{ueZb2SR($(RCl4$o!bSk_hEg1-vgdNK|Em)B6E+_?E72s8VU1##7@idLu&%p+OZV z6%9I2G4o;u0rSyj_DR=Vl{E+b$&`4(hLR=+p5&NQ$Y4u3_fTn;Q3gNa${13ercRnj z--SO=Zki4Ia$(ZF&GvMf+L9x)xydI&)_~B6|KV37$&qGB=OxdmC{ACMGRGK|TY~i* z>@2zxo+-|-{-&fn>KvKcy*UlsvH=gbe3#>o!w3$t;8?;xEj0A z=%9eji9Q9Ttkzu5pr@UCtl@%8BnZt2Lr9`?;Q62=R}WyI6A(52H&r&w9jN)BG*!Tw z44^B~_?`cP$gQ+edTcv*zSbWw*(RGA|1h{v2K!@QU8mep@W?|jZ{#s9-B#{OncZ1} z`$JtpCJL-;i>ff(4NFI#Ffb$p@b=7UHvepHLpRJ^t#ophW1F!WY;e_J>iSdu(V`Fk zK0nStQ>*@ur;0 z+oK;%Z%jVb+WdWkn0H1}%*j^Bh%oV6WP?e;1bBVYsXCbFO7e1`|evLetqW11EE2$ zs@;Q9O|`55=3vM#t?`_<-O_EApF5wOvGkNr|8k9oso&gK3!uT_r$wyj`3ye4BExab zwOTh$D;aD`|F>K>U(bNbvKI+%rC41M;Y6$r9e`(iu8L5KsMZzm9n*L1qZ=yag|q8h zl!JOJ@!UJPa(r;xWIp7{OI=a= z{#sT1-SjmxPeS%tmLHXvA`I$*YEq&11~iJK!oWL@(QLdqZ(pr;y~OP6mwJb?e8xk2 zk6B`|0mM-h6o(if>d=G+2=XxoCZT06HW+GG?Mw5CB8i3+7cbq<=j0mAC0P0$q$zTb zj3K@gnT#KhW^e+^a~R|cjh}wNW`$I)gtOdvYqeZro^7v{lE;I|+um}E8TwiB z>sv>5G+laSllVvL&dBfWd40KoO*1WNB4G)QScG@gO9}nF8zy3(q!GZmd#oB5ymKUB z=^0_TL$hdONFdD+h^xIA5Zu4-(9?m>x0O*4P&M5bR@EqXv$vtR#ok?1F~(TVTUWj| zL}&@t5Kb!`=)xVH&?TA z{8sz4JuJ%e`T^pet#^=X4G28;%w8n5J+K%7B_+NgqE4*x{n0=33Xs!K!AKm71$(3c zUS%YP9Mcb2nusy5l6q^HcWfDLU=tV~_|m=U8k54K^tId}^I$efSl@8+G^{H# zvEvlE0~;6-TKXrfHvAEHob1paHdg7dTk=KgMoml;6S>GyXp?#Jks3`uP#m6laYE~r zfa3FwI&sq!b~&uNgly}eChFXM)h||qJmY~YQQb+yJ9_6(Co2a3 z@k##w7lZ2`hSb05`Tx^YX7&6FDg?#?9-`j!#-?UhuL~QKskfpyzo&X%Il^+#K*f(+-Sh(qAF9fika2rwh+b zU9k9o%|9~zX*3ha5_f;IJ5&F-*y+pl22z#^5v&4CD_TIcAcQw>{0a5AdSiKQaA$Yp zZuja0{P#DYW}1s!&$DsU*aHs7WuV1#b0Ie!pNLQ;d6>3DoApjiKhrNN@xGTVrJF11 z9{WQe{L{8#)q2ume z%g_CrA7t84RN4JLHk$8|BAVMeY|KyU#83G~waR{LBFlf=m%whO%8q^sZgt}>5p*%=*%Lf z3C>BXbAbAmW4#y{pAh4Vmt9doH|8 zaQ*Gkf%S~Ak2K-+Q?buk;wfv%Mka%7=*)KT7@lN_P7MMMT*|HZo*V5RH%MV`v|s+I zPV|p(9Og>Gp0VN!r%xDFGJ0Su_O8(kw!etBT(NhuvW_|2oVOO$E3n~gC z^o(G7go)>jVF>^3U(h6mvP6GFU}W`*!W(<9= z`B#0bfsZ-Ci*uj!^qtTcl*uST>{wRFGaNI(H~&HOpvQe8VH;!;&V@YwvXN#u0bnLt z=T5G?v69JHDQP|SGdIUGP+Wk61W@2XkVgn3GH(49sjXYP<0Oz`A_aX9ukh6zKVP>p zv;#Wd>fp<1Iq%q-)Es-opz>*F{F=_SMN5A6sS8Z`1eS^8F-9r$e=QBO0HdCUtp|{{22dRH)VB+U z>QIq2Zj@H}t2O|{oS}}%in1CS+PC?1JHbl$d&oKbwl_o=wa%`}c-*`MBUxe& zGz{e#JtBWWhibORVAOLH>stg#56X42Lo$IhUQh;4?YYZ2$Zg&|q&H;h7^UU2#7S14uOTBATNs5f6&z%H}bPs@8MUM1XXX3rN!! zyhWf+$+0DbmOR1>+=mCp0BERY{~i#QT*RZ=WYg-MQH!r&$`?ik~MPLcXXu3Ll1w_(NK#+3=l!@xnD37#X?ZgKCZ8GzS6ha zrJR&8{-R}-xh_Jd+dSf^MpjFzw=Pt^3uTU41vg77(w@H@eDS<37g`rABJ~+tqw85e z!aP~OeOFm_%~l)PllvT}eKVM@m?(&P47rejpI-^;AQlnKfWgptP<&?2)73E`8-m?L z@#3jeiSvH=(Jsz3R;2kln7ixKC_zj8a(6a0zfSYvbAV5%0MRZy+5tEOin zMlLmY?%sj5LM>W%s?1(;uifIhWuqeolzEpQAl^WCG3nv#1OObXUnW<#voOzZ#R!Rp zj2M>QeT>aM+JEIj(*5Zn{gnzbViBN~)Ph|AQ677%uvXC?=h6x)Wohri;^pa~FRY4c z64;3yiqFF3o7xY5$ex%E;?w%HMl4GEE0G+95t!24>s{9#ExGm4w*pg<){)O2VhAJZ zpSRVj%M|ceysl6lt=i)95wzB__|xd88Sdl>`)bjt%Eqj(Maqvl<|6@kckA_e;fKW! zlSX#m-iR1_!cm{uLl=!CYt&Yn+=03l-w!laq#4{OLD09bGdxu`y6_=}(CNNSxsb#t-P`>tn@QRcK42U&Ws6X?(_~+$wB?I0mJ0U4bu~B z8%8Rqy=|x_(U(bnniMbysC*M@D1xO8?%9i61kpzeGOuyGkxHsXfzw%1hgd?eg8cLFa!R-6)MoE4yN7lufy4$}UFWv6_-q`g% z&Of8XgzBNdv)}ZX)v=4s@CRDBZ-ylM^_sDVj{QUpcn?;JdViiJSTR_M639LAj%pvW z$1I|Mdueo^rTe&VS^1zdHa=yVK8aL09l|*;%Q`ejzYf3QeC--V+q9! zWF{wL_fu6WL*+-#4FoUPD@1Gn1vK^w4flO0e!BZKdy#P~)g7J|RZ@`SCR90k0yWKd zmbAXt5NjAT-9B>Kw|~l@(9qJcXuE}#@g79)u^Sxq1YrMsJ4aPKP{H0C-SXEM8z$@a zTpv%ViqMkgOUgoA&Hn;AT2Iy}>LI+UnM!bNKPNk(RM1LO1W+X^+uk@T-If=eelhxT z&-26CZ!AR&7hk0ves@QgpBlpwoTnR7Eh~u|ZsG+>WUlc9*yY(;Xvke^9+%o=Y;x)| zdbjH~WsXZnhP8!p?}!IK4Uk@Wi_ioEf?2Am)B49%aR3bzHRhDpm5{ljS{rn}598OP&d4r9K4u(vhOIV!);Kfkt1{Elszv$N_F;(UBz3}**uL7eIe zEaidg;XRDI0|%c%l5R7{-nQD=*%|8YKVZvUztS^I+lq7TjS-pHW9w`7Het_z0UI%p z1NIY#iGp@{_wqj5We&C~+jBjCyx#bJU43}Sz_~1Q+t*{m5moh#jp1|n?{yG@)dKDy z462K5fSQtpNgdx(z4d{uT@QW9hkAh^+mFqi_t7*0-+JO!Gg3~>!c?u@Jp>4X1oE3J zmCT0X2KGPV=zYv8$6@Ko@Y+y{78O5^rqX0i=!PEIGbeI@_XuC#sir4)I~t~Eo{(wX zg+zv&?fq)f_~n7M*8)?*Ixe()i4D;H{E2rjz4sf6eDs~vGKsFM;K;ZY^{Mt{j|~!L zvMaF-Qt2DL)`cHNt_GvKZ9geZ11Zji?NOnx51EfQRe8vjWcgp4Jbn{J-bOUeeYdaY zM9c13<_=c8n)dmA;d!CXpl26vM4Um0N5--G!P~TBx0celDyKME$KTvF<=NPaPWxtq z=-pPh*L{x;%K1h}CPMwgAr6cwT!WW%YV1bR-KjBwwtFVNv?XN+)f(mb_}CJX4rjkS z9dY+Te1CMYkBdEhxT+d>+U1Btv9BzLVu~BQcNF>bFU;2{p z36KHqcCDR|s>G}shXuPayZfbSGQFE!qa6-z=U#*Puwlz518jp(*-5d6haR zHSw`xgj}%yzFfPEd1J)}5rlKdoT}^q;r?biDhtmNlpPTAOaHKF?}s(N=KA7tul5mt z>2UAf%)>*w?FB!qQ$%ab6#0lSFS&_(cSF>3Z45K+dgo^h-HL)u{mQS4kMqa7%NVIl zbIeTc9#ka@mUFE{0`2Up!b00xlA*v-@o#(CF-BCo%#rcz<4y0Q=Vcyqaiy<~AWM>M zS8iO}0KR*6R0YqcZcb|%%ul^})n3TD(e{S$jPO2<4`27`bgPd!OAuX7o;f(|jbyxP zp`Qd-q_IwYotEj6HTR4cKzTV9kssW1A zZ677+9d+vNGk>dQ!5K52mGW5lsw|gN#`@i>4r z;Wf#8mVvo)pzb_M_1fK3n-_aeo}S*9RdfCT`GxW`U}*8w>c%RZBaB%P=2tVGG3S>6}r;X^>zmFyq*YR{yakd`twHl&Y zKMKC1MBgmE@MybQq_5(cGdvAtTwR#8tYb_WtPoW`?H+6IAmM)dqRqK`aYl3O14dIA zVJkang;l8yjK)am8w=q* z_0z$RQzBG^_3hWwh;L-CVP19waNj~Mya<2u`9#%Ha~Dapmlzqh;|Vux%ZW; zBi`icDxeFoAE$mEVEB&a-_=C*Rd)7ldyq zh{_aIAGl<%d{NFfal7{Qi-fNM6UPJ;z3VYT0|h6;`qX*SU*DwM)feI;^Y;4TOuej! zNsMi205%SS#vK9H*(@fCn8o$X(`^~f1NL27wVQ(}dQzuaSt_XwudF=$6U+qojZ3zu zo-jW*edu|O={;VDsPlVK4&VKStjZeQ~py9rp581M5zW=KHd_ulJsk zrkjZu$7oaVI+1}oqK$nInle>RTy-TseKk&juJ^GoQ#wrUWs1Q&XSw2?>-TPT3|oJG zg{J9Y)ak_5No7KphsM##?f#Qf}1a}T1rFax^>D;AZb z)1G~uH*UMn&UBoa?mta7Pb(Mr7Pv3a zY>&W!no#^Mp0jCO!fGh$DUx8nTQYq&)x)&l@uErfmjv;9L%BL?*_Q0K+kRaqT%m#j zR%~rGp1DF79GNQbZouM`gHJc=fRLgZ;1eMN95hlb$ZK$f`!US#hOBU4SZgtyDRoYOGC}!W4PK%_ zPoys@YIyzhh~tgI>vtS6c+^Y+Gy^JqtG!>H#8iW+OohAYQB zR~Rh>+;C7XGL^=TfGqT`p%Db1&L^)ln0wj1%%a#c#V4Ry|HYDmGURkTnjb25SVBC1 zL^u!Bej9MiUb|g}Yt=C9t)+=PuZTCMz`XiF^zAoEi^n#2WVjoQ0|#<@7S$fE`Und5 zw678L6Ic9(06m4KBkhAR^NP@D%^^0HPzSo3@vM}dfe!WuuzvA-S=MP&YjhhN5qkxr&w8x`6a~82nJZk zD*)hR^D85~Xw)hCC58?)r=o!}#e+qCPHYJpz~mXFekkA)-fkqbewq(ef=(H=k9p!*7`KQeOv4ZM}2t>&o`}mL<`itelF@0 zZ_y@L!tIWP;MD^k?H$r;@3fqBH8*!Zre4_Fwa4(ula7%_)(8gF<$TVvXv2zc&ndQoFEUp{pT%J zq{&`rG*zyZNX~<%ttA1Tl=TUC+}idpIHMV{Oq*M%)sQ*vE{3~QsFGbw^Q(HgzUivJ z*m3^)RH@FgO&`qE(jr|aKT{F?m14)3_TxOvP57?#C2N5nVGVad4=n zQ%3ymTT7o~2o4&UoBtf5P0G+jx{*Ngz=R?Oh}o=bi0k&Vx39UxDcs>S;_f5dCzpFa z#SE}$$jT~H8(9L4<#CcROe|KRo#mVcAQ&EJrkCc1QzwgVp4Uz;x;c5*+5ASASWT60 z$)-BhS^#`d$8cT2a9|8MYkV;ch}HA5?w|tCeDS zV#CvffjUcmo2pv_UaATSZ+LchD9$|L)6uB*PBj>|dsADDq+Ssd-Xe_CTO%Ef>gS0( zwoiO;lJ$YV6V_i$J-kZb+&}$6{?y4v_$gI-8Wbh*;J@5C4=bZ%mQMt7l(_ao4>8C8 z$AACN?2qR7#5%@+5?T7xlQ|S5e`jWD;y3z)t*gxkgFBLxyQ>U&*Id1`W~CG16E}e& z+c?3^lymDj*Fm%(J~(G`(;&w@SN^1P(9u}6OE-rv72vN&sJCR*4CbBG^G<9!j0_>= zs!W_?+GEA?K09F^#6-g@L>r1Oe~ZrV_6#h$D!iY%kS^Kx-rFQ2%w2GHWZV(BIBjpJ z=-~ZR!6X57Lp&eLLFswAne^rj&gSA10hgaAOwXU6ejOpc1Esj$C7XSLsSSVZ=Viat z6xSs$A%sud&L7*?XGH2cs2i->$}1i6@W_JEqx2PP%xZ3Hm4UctWdS=87LLzAQP2}y z2jZV4JQv=W_Tnb9^m^5y%dwk%Cy$8KOH%+>$=m^=GK%~hm4&KChmb_n)dq?8y5+v* z^xllH5`B~3Ix(JtvvO__NyXi15Ui{@8nLeC7=q{Hq6m;_?Y8|q91MHi4h1MTFJ1mX zclYA_M=S59+u^(Fx6gCLJFZrHyuvZ^qy1;DD7XQ&_6B1!Q{9cbtYfEu>*ASMQPOVt z!d1QZ8XR)xC2bh$;+pbqm>T@Ll0$JNjmf&Hw=!&BT<$tmY(kGw z(iOf<`!qT|G;vfcG>Ld>Me6Gr1FYdD?a8*Zcedwp~jFc4?`7Q_@v7K9T&m)Q-DGqNc z?D3nLJk`9{c?^_Oh@-%N1PpI^e0kW^nCb?dy4qkXeSY&@@mFh_4?fPh7m{VM zHFoB$%2#;-jv-WDqt6^E5b!=2RH4=}ENYp$m^P=h{G8mGEfw2H9SZ9rSt=Pr>Gx9AdtpdouMQyL$X%EJw% zn!gikO<;JxMTeiTv>d7BiK`fpmBNO+(9}1pmyiR;`}kt9(lpC5P!MQcY068>k75T8 z$f?D5$>uOm8;A+%#!75tBs?58l;rWY{<^pWLHGnnJzU2lX=ZdBt>dmUPkbytFR}&8?lRD9Z!2hL5VQ2y4?*=^O-fqVjORRP$*yhf;#?i?fGOd%|{pq989mT^N5@>HlXF4USz1_>PUY8@4#7euAy$E|*7GsBs+>c-YJYuqaB=I~u& zRis?{yUZ~MES4ILqkP=MeGqI5%NCh@*SB~VK(^|zM=@DL;10x-b~BO32Sow12+9^a zAqr^ps89mOTJ^wwkbT|~B5`vU)VfgWx;Pa%>B;W5o>~O*@&UKm8zdKzka=S_rzr!+ z+~1gH9Vb~dUD7XBS{Nk7SI6vL%V%*sV!zqrz8id??roU8qLQ38Va`0?)$^4kuCAMm z-OKRYV{i%RfqU-{B){5DW5z^@p3v?)w&HloHJ_V34Db0QACx9UeuB(KSE0Hp2T|1~ z$Ex7vz2R9Y`s6z41lSw>V;=(TOdeiWHK~!3Yw+=Me+fXFbSQfS46pkFEa=LT!vXKQ zLW>>Den|%?0P`|N_7hV!3NF3ahu?)RequW!BEp1&5aqPVSjP;v&A)}F&cprj%b}_7 zluKaLNgc$E@cM&vufORYHmPc5Cm!!DW^AhCQ#W=rTdCRB(y$+C~B)a5pupJ@;rj< zpw8qTm_L&T{EP}?seU9$3&TCkp!;CcW*32oajAUa!<~Vz}un>F?JCB zm&5QK6Tx(oBzT0I)EeI={4C+b=e(&ni>GS5n!m02Vp_{kZiMcRGCajqIZsPM?E5t~v zZ*|B&@0U)RIo&1|qGr^$>y5!K?`hr-uM0&k9o$03S%S6Q188vfv56>)1R+wHH-A2L zKBPq6TC~>8S$_D*8^f33!E3r*{qLwUGheTG$h-bJh|jHf7_uM+rr{`GX|=(uqkzkD z9~!bt;{{Gzoe92tQWpy^3~|_O6T)h?>xK&fZoQi1k&g_j2>D#J(7wKg*P)U zFfAIjKg-LvGnprAcy>+7HN5nho|HM?$#>E*_vI(ma>2_?M|j<35J=PGmk`i?B&>l+ z%RS3AQd_QPPiNT&YYJculo`CPfVl^Y zW~ZVk22jOY;P*>Ioy2au{SpG5mp6@_%67+$4eNDHr!Wg^`W?dYM6eSYZ29aNI}hAV zHnegnm280<%Q;v5C!7u??I%E->?SjE`1$k6S&%cQ+BW;W zlH951G1jL9GRc}|(WvFEH5f1<$UG1CE+JN^%Rk3G(6uKsHB3_T{4Tx8ZON3>LhQXD zEx)UZsnb)jmm_HCncO5XTY<&zA%UqXLf15gc&h{q1Uw+bNtnJDQ+r;L9iHze&~k;S zA}ndzdiF)?-q_6uuPoXi8GR3c;8XrZIA3*h+x?4rT}FJ?Mk`%~a`w3B@$(*buK%_M zP>rtiTbNpQPSY8~)FSH_nmkzVx6QeQBC+zr#L}kD6*Wb!mzYHCJwT1!Gtiit$5a6PU+9L~9O#!lbIhBfHBwziwJ z3cGheVHHc7>hCu&ahd5_E$R}pDl_dR`?)SI@5(O3w0kyUDq?MK8GnDVF@j3$#F0bi z$LE>*+!V@-s@B>PDp$sMq*!gM&;H^rY7mkg@%rqc+9+wHg0GhBGRc+7xEBpMIRBtz z?Evn6k$^{Ombs;S>Meh{ue&ofKQZ=hn6A4!xG;G!6B>R-_CLs80~CxbeG5a(lc@%d zSK3JRCLjuED~`?=z1CHXX()>QfRh}SIaAgkc{WOI-`Y=lz&KUXO{r58X4F@M6{3BW zR<7ai_vC)G5WZw&V~5zHc>d(jmWTC)uAHsBYv>+3IA>JKVJ6UuvBQ=M}bv@jEiW> zHP1E_^lT~Y<23F*NhsRvEOIkNSQe#l8Zfc}Cqj{(#u9stJq~i|QVg@ocBotQ3*nhb zmyz+p%gymhHTnjgQTp{7hIgk-kAOYZ%Gf%H0cxA>6kh>rG(m5i+dK9URuQ@r3)w9x zM=oRcjMbkUR2@w9GWneg2@t`HFb{(BdlMPXD)+#)Rl2;^yT#JeYI{SloX_j@-Ifov z*O#TQNRiY!ja~H&uE*r##=~Mn(d9&OL$`(sO{)Xl*1GnRoP4@oPv_@v^HpVSI(5fg zdMJpE2G&cVP0z+#qoxp$P7H^MNHh02j<&2bD%xA9WqJxh3I!@1)i!fINA*PYt zBV`^_zya$@A3)y6Dw(lU3Ezq?^cZ!!H&uVB)2^5bk1|NRw8dcAfgfQg5Ayf`gK;2% zf)f7O?XB=X;zHs zMPMUJ8{o8$GsF%o-jOspgMpgB*H=+^x?BO_$8wcPhxeX7H_P`7_d7Xj^6&-S2|c-U z_q>O6zu@{;l%x^QSc8O+Zz6UjjWoeed|cRJAYOYbV+G_SkS+yDl?@ z3Q$r3Qz)xXaieD0@8aSC#D#3{gV~_2lF`i$Hf~}mJzr+cyE+(0Q4Fg`K3K>{Irs%qi_ybkvaYm!H8jHga_UE>==+2@J5j83d&*T;NBm27oG zdq3$Q0h0o@NdcjP#Lu!~N0vB}NvlWkJ(|+3rImDDh1?TdvBRXE_e%)%*Dn;q3y1w3 z?$)osb;?of17U9O`u^3R94NPO92_xBv}2mYbZ^5q%qUFCpr3BPp{m16traOB+JrfL z?l2MoRRTW;wBEh}MtvA)3`X&r z7zwGs{GOX51%r~n=~BrnuPfv&C+KM|6B^f$g?tOi^FD-~Q!Q~*5cvgiSt%W7t@y02 zLY-e$N`A(9H09D~%0MaQ_pIpdt7$B?UJ9nT1X zEV-BaGhQ;z+n(VTP7HlOx>aMutFBknKYYcl>66@FA6-L0LXutN3XrWA+0mTN_>e>kQza@!t6P}X>kF(ODpSP&k7 zy6lfQdNlFXD5wjt8qwMuzg3F6^rXINg5h1$cRb(?shTJnk}m#m!q} zGQ+81keRFLRkM{WuEu>+(@tBgTs=FEeVD+>4j@`yVD9z)w8(WYv&o=p^Qt^Oo8$nb zZIpqB@4>BtQo&Mw!z1rJhyM~^-ZHzK#S-ERZVAzAwuG>;o-HN*#9wBj1Pf?FvGux) zHciaKb0-8pI0I70e+m}=6+roa!grQI?~~|-*T{J;P__Jxt+0%FOUbg@d6<4nh>1DS z9&|>EL0J5H=NifniU0C12YLU$4(BgJ^51uFf%c{E4|r-pQI3r)Jb6S6y)e80Hs!w- zW9e^5xNRZh-d-}{~u^_MRH=Qin=F8?2% zdi|x#{|S5cFJ1osgI)Ng%YW(e?`Ze`U-z;9E93j6%m0}!XS`$XV+7D2u^)k!M`?iY z=#rloz5q~|SNsI3$5fy-wQI<|_<*~Gy5%RE5slS*KY2C4Vi3Is^rLYuA)@VamJqdo zGd}2Bl(Vd>fP>bC9eWHvb}sw&{v>u5@hbo(hvQ!Z2A^*z3aHz(v0vPRTCf;Hu{?;n zUsET{SPuwhs0BtNhb0D>l&{12a_@fr%}0^J)dOAvD%0MRkPH;#j_ zd3T>BIGXVsIKhd?G=rJ|(Al5@>g@*G7BMFz+fj9XosF=!5(vwF96Wg0A4keoNWxho zhmMScTcSi3p8a&wR;bb*?j+3~hNeHZLktk0H)-YJrnk^R-X9g{P7#-~oL+h!80zZ* z?6X;i{t=Dsug`Y;CA#1LS@Tpu3kYK=Q|C_ypQE84I1x5jO@ELdEN)l0o*Ov4>jSTP z2*-87Fr_CuHA3$osVzh<)YsPxu(Q_bqF~jFN&R&(J2JXP9~s1geyB4naex%(knLGJ zcLTDZ;h(;*wY!-6sYc`Jq>@Hh)RjEK7Q}Nx6di!P*6skvYyv}d330-*jKnw(&|KPk zRr5~O!QR>TavuO*o#wp}YBHj#zyi?_>kf`#Me@VXUcznw{lPMzs#-ib+QjeQxwjm; z5|SttWi%M)H?dGOo{G3hf@5*rn-=F{>)=3_mo7b`sumULd&|fxJkPyna;al36eb1j z(gmXM`#Gp`KUDavg({veAzlz*KtDlu9;Hx~s%qnS7MYD=n`2wb8zYk7(!oXXB;r#P z<05Gev{}|e@F(1#elH?s%R!fx5K6&J&kbx03-|R-5DchjDHott18_1Opo%_(`tFb4 zzpn`QS3=+mQvc!IapOUxHsS|V!5dQB5~2L%9257X-M?vgAs7lqd~4h#cjZ0U1J@foP2-c zG%M*AiiOk4#!nO9ns(!76iODa2QA=#y6G%bSqEB`P>Vb83&&fb1xcJaY8blhc?| zf_A*v|MzY6$M)LsT0w$Q(MXxP%Q`lYaB}CDmRXHMgBJ>e+VoT1o&~1dzWZR+g9|>| zA~o8{4l1^K-Ex#hhUZ9mSaN~t z2;V0FktVMK0w%LOQP^b(J=zflQCEQFiug)bMm^H}?|Vc?uk({QV`-XBQ9PWEfvbWc(z)6Bh+j=jja)n35&aYUoK>cqv`LmS@S zzpZ#iXCzS)WXO1o88onVsC$6!83G#VA~+w1I5eTo0|y`OW?;fx;B5 zi+Sv&ic3TOwQqr1Z!cAcKI13~ECCYOIQmHH907-%m0|$pw@PIR!HP1KIN^cnx`>u# zI`VqrSQ^%Ywd^%8AE-RnTS9=H17!Kfr|VN%&#|M9qYCzN{nbYxyDlF4cOGESMx>&r zx|uboJWG}=o*F$`B$>eWqx0lI*UR@`W`TB|Dbg3+?G8hkd&AOH2a!*d=Pdtx?tDp! z_r|B934}_?=OBIH~wlXyYPWV?N7r3si1%iEU-z>kQIRH znh5mqE9$o$@cSerxP6Zuiu@W9pV(?;NwxH&1bURj=}*6ys8`WnL^7pWn09Xs@5ZMB zG|~h4*EVxr1{2=cLpF8~6{1}C%jKE$1vH38Myp2x-sipG3GfNfB*+9_K(Y|g2t|&^ zPn#1rg2VQtz^Q#=uV_l*yA|8d!UdYRoi%MWruv7VGR?=}pz2 zsCeEvK;oiyB4_4S2fVI{R!ZMThk2;B6`Z-b#^y5!uYG%7*)u#wYN16w^?#f2Xu!2y zw|WpRy$sqgPK~8ER$E{UGIpJcbI}ydv^IUxd!ch4tkVydCBu@1G!lFd_iuZk{A(s= z0=MWyEX3nelqw?_de$4BCc~av56X(af2Gk1h5w z2ggGCJUhk`AT>ay0IK;nFClCp`YHHfE4O?{i#o$J;%;kLY#>iK=ugx_5VE8KQBCi zD+%}8D)YjfRmc?5e@AN}w)o41o)<-gJaE?(2nggR)FjwRX=28#D5@+gC>ljYv4z_X z290qC=(}IQws$m-G+I0^dVJ{#a_;3U@vqLILFC^i(e1xBi;}>o{kSw2f8Hu?8Um0F z)RGFyUuw4e1Cb`gikIi%|!HGw|NIIe4ccBIvRj1}7t{Uwv_rA5~V z$GQIZSm6J(i9dj5?3%B=X=Wu&Mc+HWe^qR}4iUl88cBaeTh~b2%nDw_vppIBp@wLz zxzl-@7b9+gI#P44^iAw!mM#;`+SbOpFc2@XtMY74$MzPfU@d>cfxR;CN_@hlBP6iDly@0VTIW++c2@`(-)n|s>q`eiUwC!T;#{a^b=15DIwseZM{W- z4bSfZWIa>XGo(u5w8AKUZfrB`Blm4zoWo0@luYa;&r43vpGbsx!h&{DpyX&Xe!d%` z?T0Ednu#L=GcXekZ!Z>=`>ILMT|h@I_CX_-Khwj0?a~J?RU?7 zXHDm9PHKYerdAjo{zN-LXNjzl{}glEy-YH%!N8gc0`29ECkKa2VWB z#xIhB7|F9fNkKah;D?i@MW1b%m_zrp)E+1}x9~=XE>Rz$6uPH3OiSZ10gU1v=MgZs zJgiVzKqg|E@x6py(f2AfIm4IuBm$z=v3gX}&kH|)w!PHbW`syVe1=9UK?td{gs27r zux2&bXqw0cQ8c`(NK~$cz8{2EmJkELz54FCCL07i zMWOel9askl2)HOr2i%ZijgET@>!9bm#; zfeUvo9HP{O?=e2oTBPcUj`9oW)DrVMl}iq@5VDlcX(33aqC-p!LZ zb_wifH_PVlOu)ZT)8nfD;9ZdsM5;akNH{0(#S6@Uh|fF!CgMMTNbfSBqCY8-=7Dg_ zb!*!E;r`2gYy5KyB{mIn2`U#seR%`*B0+g;df6us_GZ3V%tXTbiJ2jwFed?7MHb0tPUwgtQtYQ$!iZR`7@e%n}@|mEQVsP)Qva z#4K9+kD-i|fZ%!?Q0#25C7XgVuklp9#)W;P0BC*_peYS)^VVbhvxsNGW&}9%CXH_? zB!3L*Gl05w5FTrO_~Q}F|1LwKR-}ak#9$PC=PlBrmb{0#JAMNA$J;w-mTT4{g61G);IWaT;mM5{P&~uI05pv6UmCTuR=2e05fhoQ3ksf zEL19v1pttf%HDZUxi(Oh4c^sjOzrkD^G7o43&w#Jxf5sLt!KeaR7Jdm43QaVC%P!3^^?v?rbeJ@?2`);m`AFJcnAGivdxpGC#v86&0qL_Ot5u zs>N)CezF+7&I=!*0i`+Mq9%bcUv>IN!i)qLVo<=+C@)(YU=8{^Mu-7GleeJ8+7;Lc z1MsvTyeUtHD|~{j1iu5voqS&9WvWNbp|}MM27>o6XvT~G-2v!^E1_u!2D-I?#8$py zAZS7u2e6auvdhH;9zlzKwYw3XSqv8`_0<#mW5C?zwgTK{>*DRdEzfUFOsnF-X@aOF zgaeM9MR+s@G{&FKWCR++ED{hm0jV=unCFE=Q60yZ*JtGY-!(P>w)kS;GcC19x;zOQ z^-6Vf{`sR=0u`NHw}j~DX@9jNsdNW>%O66^GGZ5^h5%XyB*_l5c&i|DS23n15=Mgq zKZ*3=<@mv4AboPWao(Bjr1eF$k=gRQG_|R3`9l~{RhVgoZ_wPB0_$^n_)HnE@2o_2>-1<9(;&xj`)4LY%gRz;S5j+laGa#xGdffvnvu z6ay%lZR}dMmkR6?)!89IEr3rjaf^%gY!10BLenx`Mc{&@5@Ti#$YRk(#XsJqu@pcr zALL*_aWo#q-e@pW3)I#%483Csaf9&nPN|X#tTNYyOd3K>k~&<#TDWPo{G*Ncum0ID z>-i&I_{)0!vYx-J=eggz`2~Mj&p#~AFYEcwn_&Odtm!Z7`OA9N{uYt`iameDp8vOE z&nX;ZCtuoOuX`XFCv z0P8yuU=6Jw{x&V3KSdIUVaxmM(4dJ0fEWCQi?n{f=GyT7yM#Idz9Wh?8m~H$u*2otYp-}o-LJ4XoNOFb;Oro4PO%%IS8L|3~3}Q9uo+S+ZF;?Ty57v$sk3mlR955 zHF*di1*e&bfAdq8VYlCYelAfMjrs!RPJ!EildlBw?AcP$uTXs3AZ`>7ECgPa2xb%m zDsB>GT$C%YDs48eom2-4O9~1e_U5dL)<1u;ZDb|WzyQD;LSPRaQA4rTplO89)ajs9 zv7Bj}E7D{2sjZ)rK-f>&2HH8X5V(_=zzt2+Koh&A^IJZYbJpFvzv}Ve z!3O@`b|gz_Gblln?q3Zs9y_qip&eNPA7laoq|cGBPd^^`=J*jG0FG74%_S_%mn5pc)q_peSTOfE!mAY)`yrBa?z+yh8mjC2x_`4;WB58R;el zc5X2WR?+(Dy_?us7v4bsaCC$Ma9SdM#t=2h1MthX`F}8JdgMlb$&CyLdL!d_iOe$w zW};ZLg&q97(sE?HF9(WFFSeuCIEL0r=s65HO!03tlKNb3?oi%3;&^26PhQzkzkiKgA{rH49oxtRvg3F*nq|Che!altu~nC z`gv9U!PWx%2>Fe@X9Q$nS@J+zc}=0FfDK;G#(p;$P~W!SkPQ?W!q8tqCRf3H4}E z;YhP*Rzf``!5a>A$&-T}H``_#I4B;0_L#|Xs}0hPz2(s<{_bl0um_5P9I|RU zAvhxWT?NBWVD0St^jB6c9{sVdX}1nM#Ei~AeE05SYOA&Mg{p7*O-*;#zh1j?lkeKq zaV}PoGEPmb2?nZ0$Q0QkQ}8y()#Y*B`co-aUxf{MT$*C-?o)u>Kyd*`cYs+}_zSb{ z9Cc+6Uv8gHb>9MBMLk+6GSVZ5ItB~b#Q<7l1=JKsCjB=5@u?ug%?+dwV!UEkgLZ)H z%f-gW<3T%sUKcRVMJUSSQqTji9rOV3yFe_vtVMtB1x&vdU0e+xfT89%5f>=Ss!Rc> zGOho@K%Kmftzr?Tc*YO9qBasK7NgKi1=wtu<;`YPgke#amDpyW#0b3@hJ^wsp4KS= zRT204fFTr1hDCxw{WYIGXlPIYmb2#T@1sL@-k$Vc=hjk1FO^~F57^GHsiXtm~!R9av5}F(|3Z-0`-Buxh66(6KFCs zkYgv${qeu4_5tD*YaA`0oP=N&r0d(dmp~qVvXc+=^eXk zMQRM6e#uRf5n9N%Jg_aM;5pfw2denk42Wm$1HC|Gfok9H<`wgn_@nIqtPeYQS`{(x z;h7O|1RkZAbqEO8pBESP=cy$+KtH@c?&rDvCaf?Fxk1+|3mMKpU#sYX^CCgrOa<5# zpfuvo!}T5j1NxA9To$^ggzmYs-uag`XwrmzDxj#T2mXZOvtEXS``m!J+# z;HiOby2q%*9v81P=4L(r#>TE+$cR|jp}5iX(%0;i-tbu`tD=jo$iu*M zmAVSt!Jm_t;w{syyrFe&-!P|1eZT%1a_t!2H z{oCA(#4b+`rQ5|pM?=|~HxG!X%%w z0Ra^$f`HN@y-Mf^h=?di5fTuPo*;x^%G%TY?QfrNz3)0_@9R3}*SYo|0)dC;$(&=3 za*unA@h^L6Pfq>^o&E3IY155TAor3vkAckg-%loRFaE1qsXP$+w`2JaipB6{EY0yC z;h*$~$2&4)JuDE`>>IOl+|lV38OadCv}DURRjp zkMi4I{QWR!Wl8$()K+%ryfldqKQ-X; z^<(WKS3&{*RCqMlbXsY#tDBo;<5}gkV{BRi_NLYlx|z1n{dW%!Kk7R`H#DgEV?ASk zv!Z|YWXjTg z1YTL6vcJ46Wy8i@p`L;>+ElpHA)mRLm9h8bJt)C z{=n+En8kG{-79qp5WM?C_eJIgpQX5|baP8ej91#Zc6Alu0lL*Pa9=Q>E+G;y%sJ4YEm0D$FkeE zQ-TL|hJL|Fx8$e}T$CmlxeoNetBLa{cDT44>7o|Yc=)GN+luw?D(K-2)Pc``{T{TK zv)&Mhrk(v=3SFi+|7l9c>HQa^q8l{f1NXfHKKkE&{?o4No+ldh-tdjtAxiL1N3&TNe z6%KE@m#`VF-OKLbF{TD;hd*wnze@?r+P1U1(6CZ1>vr-=*Jml?MDsCozg*{#U{Z3`s5JZa!#uV`NW$qwwiG2YU z0zPyRjT7~0WT}wC1;WZ77eG;)L8J8wSLJ8g6;M8R8wz4q5yUNkaS&0IL^Fym7jWO$ zJAupDMTXJNqg#HK%-Yee{(p4?c-Xj!@2O!FpJtrS6_S1IKNkZ2b0GIOzwcjyy#MpN z|FdTQM+W|1%78))sgZDiTjXVkoH}%O$n}kuMO*2MTsHE$@hSj$nLVthf<;I1izm?|3jm+) zHbeO@+}DkNR)ugQiIlDxa&)w54Nrb@o4qg=+oKvfVJUp?Xq4X;=BOax1PYu5TO3Aq>81{0)Iv!EEQlt1XvL((lP9QI-K;I5vrLlUmSW zQRYe;FJ!p(31JX@vRQX5{(DMTt5~P$uj@$;ER(A8&Fr_jU4B0xCV(+Sx?B$=LknqZV%5yF7%g|M6Ipu--6dXV1S zqgDcw&-5UYYr3j=oyTA*mzG(0GxQNGlA=mph(xy2hCS3ty-S4&&r<=!F?MvVlZ{|^ zx9YL0lmFF6uj;vbhE9F>g%~h>hQ#BT>EA$zcNK8(G_WZn)ly61g)B%G(vgrPL(L>y z)wVK2pNom^(#;b^vL@$R%s0m?Zs=!Z>Vyc>W?@Z2U8#aleu^y^stILDxDsZVtR5ey zcZfbVI+l@X$CLpX6VFP5P^F4HHn2j2C^i~!!P#$P#3L_6Nl%2Ri#HvxH3eEsNqDPq zet4DkF!s;5Mf5XFAZ&KljrtrFXq`ZVv?lZt7q}Yl8IIXZh6kOUTy$->%iLZRanVY% zDPo8-|8V1`wQU!Ke7D1um`1Ik9it5|;L}k(_aab%)OW=6+IvZ(HQG@Uj?dbU!KP#l zZr)Q^0ZePWjdd~{id6GEO)(^$xoyb#JB8>QhNv&8snpD)WQU#lDDtmc+TUk64R;m{q9Mo~ zTevWC3?+iO0TrOYKWU(o;72<&ai}A1_e#iS_7h(pehO;ND4xw*L+(|I7Cy!gmfL_C zin+uf(tZRX*g9$9h@rIEtANYSCV|m7ccUjwt-Eb%+gyC@I3Zs7tMufLp%B9l#TWyG z6gctv1i7A|yCXfYhN&K^C64xXy+3?Qi#b{^PDF2#uO%CLA}7hh*fa8?XXkg4Kf$*f zBTUiP9Ak*gvmzMgwn&YYyD1i2$>yjtA&+++)e~oeq@AR%{?9a5apyH^5ei!ZWkf>G z88nV1N)bXHebtst#Fe?Ws#yqU+8|!PHIpC1FmR`46y}fham}g_zbtjPT}07!iqGSp zR}+7#lsBQCd0U8F?Ax5N$2on;G(?EUaD5I?80!Q$Z#zi=4`%qXVhHT>3LT$4GtY$Q zJxaOkE|~peZrJ@j%VI3qgx46@pnt}=5f)d}fPa;&_L?So3;XDl|XROSOTNT%BrFkq)sM;$uncDl!FH`5A7}=-C{0ujWK+$DpdHE;{E#Itz$Ih6(b9 zAFA1RD~M2T0_UeFQVaGP@p;?3Y=lIuey zdg0*xXa+ZgJSlC~VRC-IKu6_8Z1>p&A^Iv+tSLmIZdr;`hdq7Mc<^lfgAy z6tjtC(wQZzb62=$3`*nrID+{3T<3aar)B16pIs1gps#^QCJ|18neN{TYrUR!o*-T4 zO2cqfcW3Nx$(lxwL=xuhjZbVztBc^SMzKlKOVaOUCLPujLEs$w03k*#rVU*yC8fc* zB8yQ)@tYT&h2@5}9643?u}?4UGt-Kj_IvYg$nTm~JxsC$M=dGt8rGu2vMgQ1 zqwiPmOQoL1=~SE6J1O49TMV(^n3+ZT5j;uV7b0o65o;1D-aOoi$5_(nQ4IyoKRMP$!tGn9xoa^j_GL7{bn>l-Xz^w zAU>zIA!J@{NQe^h2fKFShJ3{Zg^HaoHCO90aNOP!W47}(8<4)lWkILp;ktxLgf|>e z_gHCM%0%ylwr+`kHyeM<5$#&9^qM zg>On{6*ijTUB@&};vUjflcV+0wwpC$<`Q?X(XTuKqHf z{VJRv zZS2+Z%LqQaKi|Pmh+Z}<;_>Gk{RpI zzu`O5Zh6KRs^`0WedtT4K5ZO>zl2FH;2SqQ9oeLk2;nro<{Nf^mQg zeG9?}E0Wg|87&-%b|bidZK`Frv91YF_s}=LI%MzfoRsqGK0JV8-j;NLGfPrF9)uq) zb~jIrRV!nX#F8DCpcKAei@pOm?1CUgl_Y@MaMm}j@{l2xELK~@G>=zzU`{#tF7v8; zrzGf@&WedYfl!BvF{cc5X`>k6eF3sZAe?s#jKDqS)o)d56cTQrHD=dt%&!oiY5o%R zU9E`mX%IZy3A?MXIrfcplXA@oqH|hV71II9AqwYI>sl-xUOsg^qbVz%f!lah zaxxoHLKZ-F3qor<-z^=xFfq_vaEdnX^fHO%M$T8I#nk*<i3}XwBQw!V}9~j92gTFskevbN%T25TpVpBClq}Lrr-!z3y8QzQBL%010_ve2) zM@b<4C`w)wq8PBz_#F$&Q9XP5S~nRBZ-Osxk3Vm03>8=2aRYf@e8zG z0Eqp3U<7@&=a`KgTG$PQ0(Qsd#c|ADyeOrQ+GYqvIFdrZ1@!pBBoNB-&S}i6e%(@{ zDBKFX+vCUMF|XC=w|l`y)&Mo*ATBc`eCN6Q7UK9*@E$fj7Eae+pE!LasIkI)SOZ+e&_0=NBE$k<}b|K!-aP4kH zk|rko?s6Qlx_TtUu<(`Bg_O370n%J7PM^+~KUtQzxHaS%rGqr2wt)N$Gqnz(i5H`B zqnm)ZRJWsgjH#8&hlT-qs6k97sAz^wvadEm?pd3J!X#+&fOe5iI}^GI{4_lE^)}m2ihs5HNT5G9NE8eI{y6jL zq-mD-HQO}EaJ;9v{kG$iG^RUy^M1!20mS4c>PPp`NaLNOcpW%W0yE1$x9c$Drt@?k6oVF zmg;!h5vQ!QeIjT`G5BTnmjecFv&k4GTR1ebm&U3N9w4ExkyeX$?U#TioFCU{zZDj? zq>$>$Q=F;+zZLXj^~K>;7MAO?F_uAS6#|AL$ak@^<4YNys|U&xx!RlKe0}PAOSG`) zX)(l8!QE^dyIt}nI%#E9BWSUP<40H>*VZUDefCK2m4x#R9iQ|qOHYhM7Y8spKfD+! zlVq}Z{v-bj1^TZ>@~$(q1ugG@e(9cyy z+fmQYyIr$;p-AX)^;JF-0HMkicZd;D)aFiZ+GpEtt~iACv6@yDN|$D8Q!{ppAugqs z*Xs^T>eJAu#djhjq#>sNiQXMJ%&x+ z9vdPUsXMTWTPZCLkPxx!ZzeKii22=@-eQ9uz11#nB>97zGhiycQ!LVTu`prf69tZ= z!usxBZ22Dex~s0tn1QFj_&u`}^L;p1^2Gy0bey z(!64RzT}|5`KnrdRM(FmUI4UWM2|p8K(A8rVVmF#xC4yk^UumPz@=#|5oET&g}!6I*Pygq z6C)^sHVFepTLiXwaT2u`A3)ln9es&?$Ek83sYVD`%K=Ht%(=+4AbG2cHP^K=W@d&T z$0FBvvcKI=dEu+)2=zq%{rW>k+R-)BCaSd=l}fubEHVDs6`sX4QKId|PVM2WQ(bA$I@$9VSpZ94sW{P(2-VGGNFBjCGe?G)dZ#@~j~1FUU1O2%!p}q_?KAVsIk{ zAw+3H+|~!x5v}y7a=1%<*^+o_gg~ngtmlrk z7qoc&&28p_vsZy^Vv>Z>RpUX7CJ88|MHH~=S%us{Brs5fc9uJS=aFxs)KpbI))FnL zYN1r_eM)}wS>mHDA7wSWSguW>J`NaU#b zJ+;;1-d~QwN@V}q*|Z|V*HBGky*s>P6@D&vQY-wyviP(DGQ>>hbm!L+)3iJ6$-DK3 zl_?UQ2Sr@U1m>*>vI27614OoQA&X-?3ncfDviR+z`~9LfGDbxdwdR(w4qXcIsHYZj zL;Ckp@g=yNRD2+A@&Ojjl~+>Y*R(kEZT`dcr!^LbPxsHr6#<3-gb|?*+&m3x28|`s zix^3;?o)#G-i60fuK8;=e+$0-lIXM}6O|!KANr7Q*nBvzeppQ)jDMw- z;FvfkmdDm-xiMnno#|PuXRd83o{^8U+r0F`!E69Sk~ix@OHfV|rs6azQ9Pu0cgwTy znoQG8Mms&^OniR{a~EYxXLm6~EOsQ*SXl7UxUv4Hl&Gb2QKxCr*rGte8;QOt=MavD zOAMF*9>ZITnLE^9!B;&GFJzKCx2d&;@+56+?&iXhg}(7?S3knKuE|PX*|aGnXM4rT zT~NnMymk(osbibt36YI4{3Eo|0qT}`%SDa{T0vL>>!swZUW0~!!HHLahxZglQP@`! zViO22uwkc2Tt$IemG?^4o1Sx|ZdGOq`pHZNXP%elHj?iPHC}l~Pdl3U7eqRD(Tp08 z1uaUBw5rT*&@ShZ&!Fa7L4&suprI~K?2+ANdu9d(X2cW}{Bneqs+V{M`2!$!_Tus} z5J)HlM9d`R2+BB&4EAm-4q4&2oK6u^`|MaxQn~%+sZ^G8;(}ECHE%mF^l`D>BCap% z@;S*>(~-%hZ@=E2jccDkS1+|SwOriFk+XTs@C_G@-G};Hr)+3|M4WssK`f#$0zG?7lhNuFb1SBT=6Bn5|EbkC61khYxwFV z1}&tN({kF{m-r((PJB{tdLcFtRSdCI8*j zbOTQ0sgRxNijI?-x0U=GcNsYd0x@X!uYa8Y`!RmRBxMMVbR)Y-{H3tN|$Gj{+hbscO<>bhN=3G z1BtK%yX1r)?1%Cp1U_BCjg~*!7ZcW0m&N$v-sJl}pSsSk5w`rpXi%E*Y72WWTL$qf zKi%Oo@)3kFH5Fm-+SS?bIbxodl#=;WaA{H1v?HVYtbSga(s=W=v#*oR8ut{G@AaIz z9GU5q@16QZtxY_h`6Y7;nv+@t^DKObYAEd(Tijtc6Kq_dMF7hBU;VXylj zP|Q9A;>n;2nrlq|1qrpLV$kET7#O$a4#-L9YcHnrM+S!+jeKB}WcqGgnfH9==kwX0 zDixEexZZOL(~lx=pt^i}3yhy7j5BnaDKijpvgyrbm6XLT=YrF!c&b+-&-! z-stqgGY00Ou-i^M`V};lbzK@HPZs_*T*sIgY9hk$zMEvM>zE)go(UHwu3kRz=&ky^ zW$gLWrsitqp`j&344M4AFm2$uC%GgLo6|cr;ZaD(8PB(QBj4MD*McvpFNhRzxqy>a zkNiN>BJ~!L#0EN#j>G$fB=|EI^|mKWM)gG6i*OU8}Y;p?H1gr36 zi{@-bO1rIkq=|q8l&?`ii zWV~%lu=L9mCYg-HJt#>L8mF1g1VC`0X9n)6i6~>oYZzY)dl}lPpVOV2-M#_ce>i*w zaCyB%#vsQg0gak5kHqono2%0|lJ=m3gTuJuX;;^^JO4xV2tBn2_z{*4E>4?=HI2n! z{L(==1!1ArN(v5)V>0EG)HC#cp84@8dzrLAqiTV^goDd0c??|p<+gU2qxWjJUq~8yL#D_@riEyH!NS{l#Y-78wLpz#01$F`TYP|FV_d9CrR}h@--61 zlJrf5J73J?PQPkak`9|?X9(G^@}|Etz8f*r#f(ON(SGxqHUt;kUSzKw!`}0-=R8AK zB_m}>nR<`Hn{YAy3`4LHa+4h|HiIB1XQ}U&|K$LmTdECdlaqE2`3@WrHGS;fOJ`{_0rID0yR_0a3zd@)1NH^x%lm~TX(Dy%ri_f||% zxHM5<1=CR2m(GWfBAr^a79i>MOhiug5fy@SM2~e`9zIaB4CB|AHfm?&6_PKJJ}z?1 zte*$@9@;Rrl`QXp{Dk2a>+0luoK7l67@Y9=f*5v}QsRuLP;01GPk(ytG$Z5|ym=6i z&%MwBmeIaJ#f8EM+jrT0S139>{Tj_d9V#=yXP>@OI^KQ!dX^s2%?ox8Kq@L7dIYmx z7$l_HP}X%LC*b$9JmV$;<6zz`&Vo-SwP*Ck&jt6Kb`!^$22RqB&;}P+FxO{qhcJ*X zOfERgoPWM`@cUAUtepav!LF^-$ziib^^bE|9%KJTkp$ey|9B7SIFB4Hlz?4ZNcK2M z%EE#~zvU7%A^e!B)ddT&OB|KcXDx?L^cT+e^y$@j`Ww%rTpBkXT!_lyaYXgIn=BtA6@60baX;?B(txe2>_;>H`XyS|$`r51}8p5d9=Hp4$} z3QIhcSnNnxO5aSy=MfW=ObM6=RUwU9_O?~CD%1OnYzJmDe4N8)dTJHMF5q{j>xm~w z56Gt|-V@ewv{Un>2iOm_b7BkZ>or449ix0vHy+U?)*om8SYS&os`S#Jn?NqWYwa+N zQx6-&Qkk8TjR=_d(o8Gmm91w%J{A{|-Z$$MbeD!r9c@C5#gI4BbCh8IJF~Zl&YwlO z5@8&!gUq0TN^OYk2DP+PiOqZJoB+orMGkpg)cia$+r+k2#yI-lhsH?lFR35 zLf$=V+q92s7K2PI7lsDzZVfUQa6qCGHp9s`5R%kN{cGU{g>gapB7|$z&2#6X*}1}^ zxP?!!HQGn@oG@#3wv7=#Wv8egLSu{g`e=(hb#+9K?$_4d0!e9sk_nFoK!X_2F6TudMTVOf7|v%Kd;|pz@rDOMMg0YJ=Vi$8td#U*L8U99ZZ;xmuz8$|G{bQQ2J+mW`-p< z(7zobHo@s-b@kyP$?d(mRU zRf6(P=U)eqY`1(+c;4mD0h1E^Q?#L4V*V$~yN_c=Ux=GNFkmjzK_>yVnm?~iEl0@V zC;G-I?4%$ZO2qF<02#g0*t(SDx6PossMUwISg!!L`oZL^m`Ki@<3Q|%JoXPu$WVl90*$4ce2!v5vLpm;YoOT!GEEH9 z>WNX($v5>B)nh8N_-oJ1VUJa_)-Z`dB**{}&7!f2fMu2gm=ui}9QzTM5DG7b=m*-? z>MT$$CNIjxIri$_X;PG_O$}FwZd5xTOdrQ%b=*eKekk~oP}T#FDS`rx6^fVZLehU} zBwIgvF83(Jv-x`c=Z0tfqoW^YS<+xiQErdvd}oC8Gd!Y!=g~nJ8wg2;b5P8d(vv!+ zzi8%b+q5Ok8pRWbh3V2x&)@!O%imV7TV$Zl7QRVGIOtmtK-dH1Gq#r04pKb0YVCfc zVeOp(;+F3@=+g+KY-K4M2N6wPG)MmK3R~aYInlVRA(}by*KViB+ zH=*Kyg?~YOGqZQ+t%)8<~h8lXbmIA@7#AJdoDj25QdPCk#<0%pK z7RvZHxj78CGF7wfndNl8fAU^!BRP_tC??b@&=e81xfAap`vpr9diiLzGkn5Us^H|J zoU(WN2Wvw_aqRHClTw3#&oOI?z!EWd(oh8;BDHVokR8h1tsc zq9aay<5FHRJf*=!N9YbxZ|8cdhs-pXn&U(<<)_*Dnj+{_&p z{Nzi6hY^ayko3F!vL^>tqr`q3YvTh>D@a@@U8^P8E+z;1c{MjDoNr4z>1p#iN%E?I z5f;J(5x}cZc=%`*Dt+l>j&4wl%6RYl;&*v>W^8<8nRT>Qsb+4%^`_|A`0JUt%;_iZ z`=33n=gG8?lgqq2)dRzyw?34{@WM8=nQ4#s=?<*7=zu1Jf`}{G1u&|H-~y<>=NJ^W z;4}0za~QarV516ECbJX3L$R``AqZiNngX(LuO8bl|IGhS+xD-o2yH?-jWxB%-|%!B zUZ{JAX?;OZ|7PZ>_ld?>r|ROCbIoPF(!5q!m?ZMr@8n4urbT*(xI!5oUd-MpaA6nbh3k_S2ZMtq(FpI^6O*{(|~ zrDeYsf8#K#o-)AtLLlWt0rti7!)DwLMd12KPungDMvTWxRO8Y8IUC7&@2h2VMyb-= zkFTHi6boL^;ils>UfPB+l6HDgJbS|77}mg9+J~M(YpY z2!Yf6G@`IUF6MUOF-jzNg(<{-UgPr`j_3h3?#8mrA97?aSIH!q0dH&5V!e z+nFs~A|{4Z`FZf;g?ip`?z2xiivAuYmZzkg@|JZF-(@@d42ylk5EH3|5dxkTHV4%( zc>i|3_P|U+GL4^9e#5FPpyon9_Y{%*m{d*r2rJ~NUT{?@>} zX3}`b>L-opYEuPAJqLfO#AiI|Hl;BPX-1<+0Pb{K>1z{ud)vl(zmUEzj&YJI)&nh@ zCJK9r13b*5LNCxyOXk#;ge>bedqYTtHq6wAh*Z$#`63b zEDYSNwc^}>kTJ9~?L~B$1kYSjAVA>i`wNnS;X_y}bv&PSarJlcu6$Ph4(5YE_qQ$n>e1y(UIVbspA5&E%~lHRE* z=`0}S^KCY)Kz9^QW9cRQA_fz#zpO@%V)!rKRr-MO(^c9ZZ_1M&T^^LafeyOW7^*!~ zzr)kOo;fN+W7Q^*i8l!0IjLj|ifUM~`{)nlTHipTv)vnGss0m!&26l$L67W~yXTo0 zA;=g&KWyt{q!xBq5ah=$JqH?-%c^>I?cnvbJJ;xPbe26{-)cC$er8+Yml@HCn%#&Y zom`DWoFl-NRQeY9MnYtpCUa&So+jpKdw`)K_O|#60{d} z=vzZN*Y9FY_RG!DK22k%AC#*V*$n*kz!3;>Cu`P@2(J(6=aWdjr82{reD8Kp z*>HVS*pQgQtlO3w$LajaKlVb8oe@OV+&r21~%)Fl^p63RZc z?EamTez<(ELny-2s7~H=q_$~EANlsD=m(DzpKgD5I2x>k>ugF}JKml$s1dL(tnhN# zi9S|OJc%+m4k&skDCYW@kh9-HLt(FwsLb5_*$u%v7@zD5Pk8k&9}kQ!s?7NocggJ- z_NwDrc|5|Rj5OGL_Y$VWU5S`pF=KMi=XOloa~2!@U^+;Q{rw;GqYxw<)y*4IVv>5M4XtC34Rq7jLPeKTp5*ZXC&Ym{9i*^X2%_toagSCx)% z-(}y0jjOsb7xC`q_NnY zKFN?~o^jJLngU9pBn&E0-J%o)#<g4^mVS()LaJ#iD5;J-YvSteWgB}={dLCTbCJ~>Q7JVQHcjsLv!u6# zcTe%rSlP%*-(t{0{ogrR=!QW@_VsgV=0D8tcE>Jt>IHda?d@#B$Ou1>r|MZpLciR0 zz?b)NNYI5vl`{KZYl*vHCQVSav0)LX7xS*!J1oqEz%(!*3tFoZhw?&`fQaBzHrfUn zf!cu*6J}tIDQ3fD*mTq8?)crqtrx3B=uaT8utQ_{Q%gs@O~Mo*&W-CkF1lOVjnl?| z%&c<+Uqflln~U9fpWWhYA#7lTF2G=07k-@w9on@)J8Oa8Z;YTvz}dV68V+Ni6#cLI zEcjepHep=fV##q9P|MW@Z;6LU#-1$k`e5=s@s}}sJJaVgQcx4HTxUqP+&@cxp-F=D zXj=Qy>O+m0>g*4mM^}fWLh{8k+uq2lxmyOw{rDcYuQF>%#E)ZqRfHnJh6p_K zLbrI$E9_qmv95+nttjn*p(s$=MeD^7YA=vSsB6+B96@@7oPUT*(-3Xlx;XvjbJ87kUl( zvj#p>v`EKanT7lF6a7Y`Yc2$rJ$Wm_Se&{rd?ws@86xmh)h%SEk|;??-xPYe1{`vl zAjN8On|J>A$?#KRREZU#J3*QC3PI0hJ*E7HC z38#P(vWr;*u>Hew)S9;)-8W~ywumhPncr`+K&rK6yqe-$I_(A@DOD3niu6r$kNdQa1{)^gPh3WwypYg?h;=&=rz4n^XLew%D6S44|3INU6 zFw{^M1;Aiv;Q#Mc1dzzITWA{Ur&AGE9md=GTW$}gc8VfX_DG_9LTbAgxwG|5`pAu4r#vd1t%~?1v(WRf!zb8jl1SOW7n1ISRd+q|^9P|SY zcLr7tdS^QHe2hvROw&w;-&brh<2%oseAgM;LG47IL4G-K6jj+A!1t((kR2M5N=2jUdR4P}cIMizunBXlXpf`tj_^Zu16NwF zAoTa?sR~**Y}Xgcg4tvdqCNaVcUVt0rHvthwuyBop}PyYJ4GUCe?e@ zOD;g}#4c(CWtqsAMRR+-@3z0TCy>xpm;$ulWe$niK^RYmZ#2Ffcjp!RZ=w5paq1Rf z0q^x&R}e+_=F~cp`eJ8!HAAg|Mq;c4AIAybOijCk`F-vl-2PuVP7q2JsUP1yfWcqj zN|j(HXg@&sqR=F3*C>~0|E)gNOBoHG(auY}0yk~Pb1oid-o};%5@UO43wCdt-Zf@vrFLGrZE#n8-s0rJg~q@6CJj_SXu2R-wHj=#QvD;Wfvu(6gU`4fOh4B zgOce1cK-885hvh%6&)#VYqA`1uKuAWe<4n|U&<8A^Purb{FR@^40kk@KrSGI%t>(| z-N%;`e*zc;gcWHBv!#<|7R8_C2pp8DOJgBqlDA3rw*i@kh>Tcqak+U2&u@RlHw@N_ zHMIjt&iNmY-2amG&2hPzBEZ%2JGU!j#tR4A5_mZ+*qF9HKV|FvQDS}i!=v=~AUPe< zX~<w$=2w^LxNW4$X$jPp;q1o4v z!)78osdfo7lSlkYdb#ur;e#H*&z}4BYGRM4!d)ij*Y}roOkXg$wBj=_w&D9Rr){F# z(KD9BTGBg0GchX<@F4QGi$8}nM1)UKF5yIMvjRS_)xomZm2qG~#X#1w?I(*(u=^LG z4>s_*#B=zniLBip-}&>!TB zUA?aX6^(pg`%Gpk7v7R;^~QfCtakKki|0qU&Kze~sLp%luP)`AmVj0!A+@N&f}FUU zo7iCt0ja6F@aU13K?+=XBj=y3#3f2WbmjMkY%Up8f);FKpLyDs0`>GDrO>miT%_%%}5IB z+9HZ{d4a1uLs`R`3q-V~{i9{eKE7H%y@PN!T`sqN-<2Jxaiy^V#iC(NTSAeq!7HB7 z9VQc^7b8GW;!_Bk*u|ar0*0+so8cl-R+D_j_m5qQPG$?c<1v5!e8#YRZ&;#{5$kai zdSQ;=;c!5Ix@ySOOJF13ZYGSDR$A0V%5Pm)J74u-QoU|aSAe+)b;7FeSf$t=`@lOC zR%pxJcM0~bcx*|z!^XPQuUS2S5WIUZH^b~KaMP8^G(|u>YXvQ$Q^b{vlmUsk!9f(r z+9uFMBh3b$6BDt8>=x{I-<73S%uUa_C;Mn?BX15Ygy}~zv`MC`F!(#Lz_4aE?FyYgh z7p)x53Fel38+-lzeG%{5elhbO-uDme4$vd$Y0&2)1%rL#vXDXvCpZpDNPadHo(^EP zp6HNz>2dTW?~(edpeMWEOfyZ{M%s%2Qn>eJ%H?4DC$ zVy{&7gZUWFC^dM-oY|R06NM&F-YT3V>qV~Ea3!<80_M8OYSVI@HSY&yK(~%a#aC2( z>XguO5WAlnd+ssj4V=DFg=GVJR9|5+o%N?~M4ZO4K(mG~OSSc}CwXVI@4I`2C^CKs zxbq@Z>p5kU+6h>M)||*mn(#P{1;JGhEKSw=%qw+w4_D_Aem^O>8)1n@_;RnhI2_Tm z*Uz8L-Kw_k%3U~uy7ms<^W6K>LLkVT3Vf})Qo)!O{kjUS%)i9?`ABHAg!L68=ux>o zm83$vUHS~Y2Ojf~;&=~>zTXMW_HM};y2#tX&}RDiYJ8?YvDN8&M}xgw@Uk8`oga9s z$CYVfVD28AbcS%Q&!IqfVqCfET6>1lQfq;Fez6nr>5vGkt0Z5%jx=DH;LisJnvnDe zg113e#e=D03ncvTdegess`&2bu)-02T80gC9|muQnicCqH3+6IhR`F<%P(+uNcVnh z-<{OzY*v2nJPxsyzH~$ta^x5%%Uqxsjn#|{qy=}W83%s|V|f)xlOh%MjfS(cNWVR& zQ6}t}fV8txOzM^PvGcxK_ZE2P)2yZd_Kj_!TurvTsU+7rHMx0q_RE9YX}EM9D&9kn zo4FAxaQ&Vmjm;jHchASpX~u1B!}DycxoBv%`c$DoURJEmT8tlk8~%X8Gt9;3*Q7hv z2T!6&cM|q$&6K`=FR22R_L$!1rtuASY#6BOtR=Y8Scb?KNYOa6K5Jgf zC9a3Mn~O?nw5g!>T^~y4rVOkeJkE3!OL~y)^}89K3&XoTs!of;-*r?V_P2g@ zIPJQ!J3O<~H70e!Fgk1la0lU{$dhXejD~z9^q|WtOAbQgyJy^o zi*zIf8K#y)x~%U*g<#n7`aq!fDhXWn;}m9rk=bwGroCO?b$z?w>XNnm%C_c{k7WI6 zNY2$w!&0zJSphB$VCOEaE$6?APHq@aEB>5cb4yXVKictdjxrhYNstluKIm+oB zkjJk@z1dZX0N#uH`F9oyMStwQ~nG*m-< zdsYReD6xOt<@)1^MNcBBXh<-#BiB}=91MXWoqWrWeCwALb zTUdd|fzwQm(R}H#$=z^%I#N4XE;0h^yJfschH)cMq_N7#tVQ-8I{4K``3+?Ks{4Y+ zZ-2NP)MfT)yx3DXCc$5dNZ*Cx>`=kC_m2B{aN~)2xM~TldBweUyI3_6q@9ue*5eYz zn}gz69=2p7WFPD(%KzkQUcZNM+LT)^&Fi<}tSkP`V>WaCMAddl^C}(jF!)Ij4XDw+ zVrtWW!@E%ug9OL!oH?*wQt0y$75Dm5PxxZxAKzlH<*VObT9$;q+CTIn?9i^r;HPM< zAcXlX8$cE#>QgJx82@*2eUmdg_Zy**NOaFcw|}L-<8r67x8t=&ciV}FLyk74Sherg zgD?;>EvvyfP#XmxbyIx3h^qnhcb?cGhIVuZZ`mX2+qFu{X|s!vQ2(m3eIxHm3*G$u z(f79E`p8XfnMccsKOXaQz4~90%#Msv1PZ%dd=p__D!dy)p{55kiH_QLbx#M_N-V+! z+GD;OojUVTcS|BW5eQZ`=@gEO4f#O=N_ya1YD!Zf=*^35T@ID7HxFLDf8x>7fq#=^ zA6Lf*3u#8S@2GBDz|`xb$^HeI1M$?QoF}MmWf*uv1T_cnnoGI2=~fEy&TvNOKujI+ z*&@pc_|`FIkNYq4!q`tdlQ_f8`H*i}Vj=X_H!2UAH!!#01U1u;&tqX^)vb`rrN`UA z$+W;FYyD2q(Du#10SvYVvwQt+Gc{fjws#F}PmM+W_-M$7`o^n5WqD1%;0L6TP&liw z7{1fJFdeaImVA38r|zp_LwVBhny`X#n7Sg5`71)JhyAr}X68~PNV9Zn!u)ooD?uv9 z>{j&Rj)2)K%7w|`QrdW?l>~9oDJZ)ZC^j3lNAGQqLw?j#7Q)X`56EGpBfl0SHnRyZ zqlu+7*J+LT_V`mm5l<(#o9T>bgx%2cdY;dnX5cj2L`q=~212Ii$(IORy<<$Ig#LN) zuQIX3CHqfX!8P+Nm}rKyT`AQohZu2jvHyULBa{Kph)=6OgNm zsQ>x%w?wIkm8phGz=)J9;@M;Q+V{iiv7rNPAtb4(KCv-j(QAz8A^V5z|JW0-PB9n2syFO&N8*43xGId5SRtDc` zp^FVR0CSYF^jMW-7%M`LP)}n!a}T>CFl!NwBd=9oZ);3D2Yn&=Nvg%p^UTaBZ~Oqu zWh2R5n7alS*BX+4yjmlb&#HcWFHeKV)yc`|vGEbkq>LhvTw$fO{{?xm@fW1S#ieV4$6)cbQf==3%D-(F8TEr6I$PC3~Hx$5->tp8Nx zflUxI4%M4)&@79-7gphRUc|1wk5??*uy0J;js3O$QVJY`8V=I8qHPV*o?iw|A!8H1 zpBi`2qar$O`rq1n@2IA`Ze278N>zH35(JbE(tBc~iGWzBLR3IN1O%xP5{mQ=FQ9;+ zSWpl`M{4N3qadLq0Ria=UHBKI>8hHVE$}%#Au0h;m0!yQgc5-Ak!On<4>&Inpb6GWz-RMO*{e& z@;0fQR(Gd^5ZvKKC~DVEb`e+pp86=~1iYF!B8e+%K~7A(VrkEkl4fk7)uY|4X0;5} zHetjSQZC`0bK|9fPrrPBj_E4fp?^qm$UM^T((AsrpM!G&Jd~)raCdUrGWu)?w#%U? z5lFp_#p=1e%rz!5CuopTU9=0zYDQG#ME+>x)<8uH%{6c+j3Z7XJLFp!@eB9KsHM@m z&u|xF-N1fDh34l1`&a(&RgLPozGM!!Ny_M@U%+-hl(6lF)*-i! zN)X?Wd~|X0Aoku&EtG6Dduw{YKw}1sX<*kIB>mboaK2qJubOd%vtWldABk8huF1?I zJEb=9%U*_hTjkf+)4=N=j%QKA`?`rdRSWn_j7nYBvz}hb--gHrc5oGx)P$WaHTGDdbI4~in z7MGofsY62v<$)*19s0I&d9>E*>=tc)0I57))4>Ah2&GYrW@aQ>2h9gIYBUYu-q3*@ z1Dvx+AS2yY1o#|iZTh$8&aWvq;A}+guB4!u+6Soc9DOG<4dbHWuTdLviwjbrl1F{n zb;-%`jnBRw-8$JvK3x%Vm81cHBT&3puv1I(#DSC_hqKhTgWr?hyUOm(IWA9_Cbtu> zsWV+LxpUre^+=lax6%EO<#zG2?Y?jab3ndS@0RW!)^`9b@F(a#Hm*&X9HX5I5%hA_1<-GWM&Y** z@RXh7SlH_^{wBeg?VmO)9T}1$j-D`O3$!mFC~}&xi5H_S>_)W+7Ly$|c2rMyH(BI4 zzDgI3@qXjV*gt0J)$*^VHB2I_{MECR0Hx1N5iy&&`yzj#QAp21weMec+7#gqDmQ9gW=>8F+mq3Ku<-53Q86h!Q@Chn!_XLtaeT8!=4EVVxO4<0*&A$`#y&&SuMcTcgL%(2;< z@rvkhc>sP!K#?@bp}4uBBJPm1o5Y!(H1NFVc8O=91o3?SfT*prW7&rJF$uO)KOs(mAq938D|dL%fjSTjsS0 z2nx2DC)v+*rIZ}yvWoWs9IT=>=P#3dm*bAoaxcsmrJ-wjUlZp@9|8Jg&YaUs?J^#^ z^F4Dhu90rW+OWEvEzT?1EEuo+@ClcTe^GIOwS$ov+;Z)F=tRDA?-@u15QhNOc3ZVZ zeAYJp|4yM1u1m21mRPQZn~3c@Y||lN6HpK_a_-IiZQIr95ua_Dg8J10qc$xwyPq}8 z9lrTKOFe*yT?7P7Rsf=j5&}39?&!8O7LSnqPFD^|9gdBoxWUV~1}X0zy=X~Yxy0zz z-|s(rH6wMIq)CfK<1-Cxmq)zlyuRe?z>LaWERv!qY$4rdrBR11>)LB0eMXcvYB+Eq zhr+gbDe7ci(p!QlVJ1~$T_#mfZT7L2k7P?#da_{b6&7Kk!AiF$9G~0!`Z2*&o`zn! zV6}w^5Hr`wq_h~~-JW89kj6)=ev2^$$4B5~M@P$w9vq7pf#7Qtd3q<76Ya)YXL$Qs zCZ*e1;al$zip%Rp+S95`)>NfZCXZolZ5x-A;Y&9Si^oYR_GSmP_~#R`?hOse0tQxZM{HA|7-$!e}l& zToXHzi8Ze?y@ra=ZnVyQ0@7BM0dWvv49E^2ARNu40+3535XB!$%&Z%R#BEzpq(Y#i zCc;j_yO6G1Ga7SFlIuIT@G~I~gMutxsOHEq2puqPUJ`CXZM`LAk)p}LxH2@D47Qp< z;U^S}%?Xo^PEWYySlnYWue`~;k|W`Y^M5u)6{h_G;X#b$sxOck+m8{^NHw8CYHhBz zBkRCrgW)}(`0~z0W=5&VH1lx{54iqYPmD#4DTyR7f$5%1zgH@I(=OuFQ{8rcLK`Io$zmZ7>k5FYv{lB4Y;0K^g^$qHtK`9ro_Ud?ve_xfK%SCzo~ zZ_A&VoVzT1qE$Q*R93p4gPDF6TC&);X1~x$+Bf-Hao{H3B@v~@`crhKh{{DPXcME4 z>REpH2$U-vQl+N#k~g0P3eNc`w+x)vKK<&azs@o!(mC4**96yDE|pH;{j#d4`c5+S02&K zt18!zml8qLao|K5q{$+wx5(Z|wozE_K;87d16?zvz{; zrRDD$R%C^lb_Fbp{YG9kspg}Xicj@D53i3y8dc_0>o0Fuum$)Aj$|G_0^~=IbfHY* z9eb%j`Tpf|ksB;K3b7M(2DmEWFdmJUm~0abF~2d>GNn!WP#EXM4+sVMAvIl2e=U4g zu$Bj+J^}`o1;0yN>vJ57TZ9lfUtA!VME({+l~d(DZkRV%7Yflwr<_L7oz=oFKbS>s zOG(5be}FMr?c(RjMHDD`rpx*Cw;hc%ey3UHDdlpXJd9PXk>p_fd7E4L!(ptW7uR%! znLiU}I+2Xjn-os!1?SU%KbM5)LT?q#HQf_LyJH)WxvTHI7pl~g-5;;Lsi=s6sv=#i z$gIv zQTz&Ll7ZNpkT=Dl9)ab`?5$<5=S*IfM;7{Rj+g_zG!R^5zb7O*FHntz4+|dsRqL%M zV)Ue-ApZO1)d&N97@1+&6nekRH^xDr5)+(OyURX1JGZ!=>+4I;_o*6K>AUDE|AqMS zF!Pj%;ziTHT4w(*f4j7)(iONnp`-)KJqQ*E(Z58zd~I)h)8U?An5}+&nyMqC!SW2S zoE^Y&M$l&#xo9;8+IS!eIJu=leO7_ptXfRUuF3?Ji~9F|<{2!VeITW7+HsfL(n`j6 zU3=>{0f5KxR_!QIi2mIi;W`V>YqgclVg5IaT@&t-co^h`xL6q_+!;Ukkk#=;FCe0h zh5&RTR~+KJu-~L4Vi`N(C?g}Oc{8*kCjtsEO>wc%eMjgHE6}0j_cz2WU7nmzkwMHB zo+V?^-4F>fH1KC1@&g8T<4O5Xt<`Bej2PXaZL-GLPQLc_p3uxM&PW6=8H`8Hq4B2) zzH!v^r0pUvdgmmrnqoXMXD}uw7__hpNZjX(Sp>}3RAL=Qyq|Fd3+$guwR5}KzhTT) zX~1>4<5UFX5}05En{6itYO0X0*Qti|j|My|UI_ne%FO2Io|P4uY?k)YHPvq)OXbz? zK**BoD0kpDXvIIrp?Fb(ano_8y<*Ag*TSWBX*n=s$faa)XNG4Htn<7Fr@*r{c(JxG zdGBHD2K)jalZ91ocTGpRN!;h6J74Y-^O}%MkVueB5YU;pQYrzc&Hx1gAnF3JJTq|h zX}ASp8NZu!uqj+|0GuICi*wk4Gc1wHHYX~EW_-Mf&2Re0tet;Tv;Ys#xQ!n%p@)uX zk#h+PcUj(cUPxN!_1yd7r4h8Pfp-(hy3ajOn==3k*8Hvc{fqW=jpMy8v7VNGmmJ|c zYI+$_UpSe#o?fm1cNKvkLLt!Zr{uWHWM8}(FD&MD@%Mmjw&}$UwT;0QH`A^YgTrSy z&&3;BfDn)A^W2+7)hH?-w6h5OTj}iKJ;GgzqfXALBbR_Wtr|x;1H~v!b@_V69UTJ* z_QP*qvVuu__7pFjd$#53(l!F0DPqWIF|q2H|EeIyv?E=9&5tq4>hP*4BT2lbc%3de zrL3`F(>Zs0szUI|bZ+{WiR@(8va$T;k2)P-h3TBLy{-s^LOz57H! zSeSM?2Tt8M(Nx$l92ljE$8l~qm>bz~8zCB!wB(%PjYk8?Y@56&kDx94T8d5N^rEmC z=%`rZ*6f<}Pq`!8-6FC)l=`x$8%v2ru1`+3MWD&nxD?x7n?;pYLQrJr(vVZ}>F3Ml zk-Nnh7N@N2#ji|}(Y+RrLrOZIgHr<(%6Yr1+vasGc(# z`Hj@`pUPXXo-s_3%X-Kk$|pEbyWE$bCWqnNPLthD2tf&6?>x?0q{h9&$T@~X zE$Yp~LM}_E|CJb)E@Zy-ScQ)ay-Xa{G`S;Tw$L`GME(mx1ag5ggD7y$D|t zrOCLYk|2196>NE+uhAgru&g#K%nH32{Y)dJ7Wb;~lIc9ZmDLvmb|4oDd`CL4n^=6K z;ZVFv!xIXOB#?VD7o+7fyBccRT)zu<*O~vZ;Z^pAjU*t@6C%=+^3E;(l>W zJkkVo_a7QOXe}tN0l8;}5cw1I!v{&@@;WGNM2S$<2*~a^b1y(!?)FupQ+ds+`_cEN z-nN()Bn#Zlwv+1<=40ri{|7~*-wvPOuWRX@%69wcnPYPtl<&so=rD`9R&|xh^zAJT zBanP)#0j9@nZf24;}VvvfC5wr%KuU&;G_w8eMjCe0%QwbU`b3shW5&15>${Z(}m^= zaG#*we&dNaKj}0LR6B1iH$}d(yqoMVcgw^jRem}WaQHj(G(Tn(d)7aPWIc*f@M34- zVUf5zaZ7{YP8b7#c_TX>1Mc~>0l*G&d5xl{V{N~~s)MO{eH~~DuZAz}Tx>J!EY*;J z>Moa8QtzP)Pf+iE?77&OI_6lJxX1U0f^52UV*5o~zh>bs?)mA8toJ{DoxPj$>Z85m zI>!4CId3m_Po5qQZpj$a2|iG1DTocIs{{zoe!%(l9|Hp{uyn|uuR=`d?Ih&!c{#c^ z*$s%{6Y6uO+b$A5#w|5Fx~p}ZYAm@LlA$Z4SSDY2J5^2L&42Du|3gQ|`V!cHjxA#D z>qXB(`|=boCe)d;tAAOa9mN|q>;|DImPyhs28jIz>m#FLs`(7m8+HaU0;~DZl52J$ z#m}&<$*P2_eb2~49pWr;mx+86=B!PL z#~XL6o^Mw$O_2qvU=3 zOblI?{M?DXhb}$^H-FbZH&XcdaaJ#wiTP&x_ERBztdM(@EMvbW=iELiY^p_5>noN& zfOXp?X@p|f(raEXcB1^N_yb?>#+|3SC-@eTj6P5 zv&QOJofp+(t0rd$%NmopDgfmo+T)+c6y1z?QDljnU3Za?UdwQs2(^~G_!RBlwzJ;@PCuH|--_@(jK*gx7woPArd}r$2ZsEbo7vg`EbpNsGNZh(`p7p zRFy7F+^Mk&f13V=QTY7J$jFSNnV*N9*QSu(zzvd9LS(#FZ3gGY+b@5E%$=1NDXQwO z#8`Gm*bV4OCuMv!eru^kU^u=_(T0PQaNu^-$&eGeQh82-R;CpnUlb`je!O5FcOxRG z7A3`zj7#1JLLNSlwGkh~DAjl>(83*Hj%(03?O>cxa@e#ZNi~7E#JV9c)WqV6_&IOvo!rJlil!uBkM!!q=k^=m1GvMs#In- zy#%p%%$W$cqDa6krVK=U{en^VJb#sWdx2jmjXge#x%RS2P6HOK@7> zcLWkH(DzuQ&zeU8_s=P?U%c6UzlNVZ-AvRr5iqc`ZT#eS?bws>+e=E%dzXRlY#r|V z?0t{F)~h$ETBq=~x@;>EOp#wDq~!06GryoJ6N3QElc|9A&cNjjd93TW{xc!J=LUNA zxGq^Dfyw8FTCaV5?zNn2IZ7Rk#y?Lk8~}AUWJQ$@?xvEGiMDGcvqNG+*RY znJCYHE;Q4^GvJQG#@_Tv?HIZnO;L?L-t%lKkEt&yxu-l^u5!-4_z!m-*ZY@+L3@By z4&EvP%3;7N=Ha-2ck(psRSxb#!^T$o*UMS)E?yOf&A#+g= z`Yb})VR4;)k$687Gd*}L#j%7jvk6_xgJ{)O7kl3-y<==;c1jKKKT5DQhsg&;V6r+X ziR@Wo@oBS=-$Ue1LtW&@km<#%irPv-x*;>GW+2fEtVdV5kD)-mLl+RkRTOMb;|lTD zUwCfe+ll#fW9XwrN$c3iyoutaGR`Y=dV43~ePu}CkneDSK1QF}5D*vpUJH#v2w3|* z3w<7Gl_O-XCuoCFmKnBwVG^FmPk1^@GbGAvZ4*j*^;(WA(1f85%<5;|1trplHxBo# z&W-wy5_1f)z8zLMNAySaiAT#}2P{9cFU0msMK_JAivX#IG%1Db--s(V|XUD{7-P<2R1 zExhNGBbUzAtJZFsCX^LiV6}kp`n}PJ6oD%d+`GTtkL#gxkGI(Joo*BQ-Kr+ilTvAX z=X!bc!1{&<}h^AebKH+ z?b?}$N@q(h^@|D(fPvir7`hD0MHJz=KS5>^s8$W%IJzKVE!K^&nuTwbGAA7L6V2Yw z+8LdG_fC2GHqWOkY=q0Asq#=Jsx~3F%K{R{$r1uHB78{RJKA>yq&u|y%r2bRbxXJ- z(9S4R09^M1$PObk?v1!EEejruPh$_!?sB+0@ht#fYE`u$I=J;1WU@T3H!z z>HZv1l0||7k)xOu5sEj7rosXN>hJt!lSRcl`NE-;Hem5G9?f%T_E7WUDU!SqyA;0XEn$ zByS?0{^^`-sM0_>!^%LxlPBTp(q8gvdU3`~YL)#`?!@F3=ztAwRCHi8_7H$zg+cz2OOIQ~EHM_l-?}v5xJzgqwVQ z7c9Bs5A$Ixzk;9-DUg&0%sjyiaXVS z^W_Yw>B_SyKU40$W>M~JaP!*W&+JDjVl`cRf{a7(jH)UOePsPTm5t1dw_*xO>OPik zYKSQ>o~#r8Qjv72CbN&t>24xn_Pnt{qk2k8+PykLYMcv6rRLW(S9sz=xi`Go6(%Xr z`)i7E<#uGcjt&Y!9-t_}HONG~l2jK$i7BX8xrs)h}$~KeO(;etc?r zJ2}#1w*?K}Jl-Gwx0Am!xVZ$2d4iZ*BS&?rRNYf5?_F=-`}qV1rmXBeKb@ATrNxd) zK=4;le2bRYS=xG@);EZhFb-wZL?Cx_2YzWmOn>}10$N|42LdGR*Q+7!Hb~v>YM+^1 z%R@0C`eCM8Xz_pb=DWj1SlVw|=r5S^Hna`8@+astqNVL{;t<9|Rd69;q1^)9;fZ)} zA8kwKTq$?ZpCGJC*7%dZ=JJR}j3aT^5OTXy!j=TLXvB}4F-^-(N5RLM-={w0-7~eT z{!+D)BYBlq#6>q6YYLor) zlm6(IOOT*$JId;x&HjHf_SLj_s2eyHDdO#O08T~Ml45|kF109(BRJ?RyT)QbOilHj zz4F{N#q^O?dA{^?gL+hR8^0$2bwk~Eb_ergmxt5G*7r&^Az zs$V?1Y!CT$MR5F|+Uozyp?Bp1;lp+sl`rp*6kXI?$d%NQW6SR4_i$$aw6U&;iIxN( zAb#|_^kG-`^y%90jmBAvj(0G5$$4;Q0c3$enxZ=WRebwikm6;&Qh9KNa|FZ*FppAQc3(-RgKikP!HMQY{|N&1zQqIG1B>dIa^^23mU5}wa~p~S?lUgTL3$6ES# zqE%9$aB)Kvf1;~aMnnx{K4V`TIx%)!B~p%f{oge8TC~?&8rV4K$rdrf?%f%UaC1k( z&TFqX*giMiOgUu%r4PLRWjgxc(zLI*h`SLaw!tdG$E1&j(X;CUTBrx?jt&}H{jrM| zdVi(?2U)G_86EyI2a~)-Vjt|sGnEHnp>qcm;2`i0vNqy(zxZ$N-GAJ_f4}}uU*x}W zF8slQ5E)P8DacuJT<`Hh>&kT2TTI=;uog?2{u>Fd$k{uEVU@?@%rGNByk?DZnH)?i zAiMi67qQzv&N+MIP~G8+UW;{~j>jCw^**TzLx}HSE9Bn`vnl|X(_>CdbSJvnJwgL0 zVJ=V=KVbOwFQx-CfXmaqwy+YBq-Y|nV?o4>$ls9A zqOEjb8yq}6em+0nzMCi1r$#;9ZpA-Zs)CdM8L6Fy(g6Pef`I2*tzUjMV5oU@+{pO?#;bu%Msu`iE{BVYBzbE)Ri~1Kl)tgMaOm2A zGSdBZr2TpV5B?ih>R&z6fA$Q9{kUx_3JaN;)I1ZQ6jd?=c7`l?UF+#R^R-3id31hYs02=<`o6n|4jafiD}q(} zIOTs1z&3_zMvH?m!5zO}_|{@Um@@hL?a``NGgz(DK0jz_bb41fNQRs*BkC?{+w@nC zi>ys&-KQ9Yu@PD-q!V9IoX{FGHi0sJ3b?u$&x9X;gZ59U(9im z@eN~T$;5xIZS^1ZKmPvv|9^88it2*_8U}}33;1|VY`nNcno(V2Q0yds!s^$ibmgV0 z?ue@G90~ayr(rdP$ui=x3O5`|`wS>~I_rL;YlfgqCJh*HxrJk9QwCQ*^|*TmP1k(k z?xIZJ9Nb-<*IQFW+|tSfF;6xjsRFlgN~_KMFQy@?Rdvr|zbk8ewP-;q`(Lq~TsH6% zUYpCK@ZuY~ciG|h3E^>xaFKEh0N1xVjW0T6B_G<{Vw6ga5ju6d12mJq3Fd{msc@0q zTJ%4Yn{mHMQFl~GuE!s9rg4lJFH|C<`z4-+@)O;uyfr;}zMbH?+@;`7Fbg@AlN+x2 z-DY*yBy*DH<9*nB%QR2h)-{Mry>|=pNcFQ0<_;*lk#d`kaXv*$|47ISnX#Nf*>7=6Q=!%}3<0J{y)!B2w8N)$bEER1vGxb85);HuB{1y6)< zr1JaP`*YU^ch=8-l~~c<6bO?8(o& zy3c-#Kl5Qp$$3T*vX=+wB9B<;DP1BN8FoMA%*}`9wz9*SLr+pkua| zn;b(KK)rh{3F5@8Y@74fE1z8Kfdf#|$%|x}61WVY+2L63Mg(kh|b) z%o7fXw_as-jxf9Vt}wS}JR>JZTB1x>QX=Fxx?*p!NTjl8_5j@bbsTbbzhip|DZV-W zOV28=^4e&cV$I-l6HyQL=KH+uxD%EjX}C9#FnUC84qLX0Sd=2zyj8ykeW58$k*baO zqqd1-U0e@mCi;`NSCrwc56u z2~9sr-FW-qYmlO`aB;jWRQ5i@ffW@3!$C>dAIO>(cajqY3RJ<>NKhcq*0kEtmq4=F z9n;oAwk9TZ6p6ymGbgm1USfxF{i@+94h!%9)t+u+IA3k%_FFKjaYsd#8OCJnI zh@^E*0sDOm!{isAbv^6WWZw6i94EO2Fd(ri0Cwp|$Po_PQDnK?1iBbpk0|!BnNZV1 z99Bfv1oAairO*deTz(8lDQGBWIm$`b3$114foKlXzgzXY9E?`W%CMMq2d4rH!;$T+ zAr3C%cZxe(S4txXq;a-AivU9>s)NcvNUbIbFn80H6LhpO7hXnj5YtZ&S61z)@kOVy z3dxKiakXgvKrs<~No1R-r>|ZbA8aTTp6)X|;TVdzr^w84);@!TqmG|jPNrD{gucE- zM%rs2?t}*H36!IGK`HkVgSh0_<7};-|GMW# zM_5{|(1&cX;;#WW7})nSFH13g2ET?8ZlIZ=U4WiaTDKLO`SasH^7ud2eW?3c^!(ZB zE$dgD&E8|nFw_4VP6{+N1roae=0=_ZsA2G{_)USygVT;CHR!1?&U`WD+aF@YWe3m6 z$5{X2ed|&$gCtNq5J%;c!K-w)+V+t$onT}LwnOG@;kzY2UoQZJZS{!zV^e+S{0xJs zjrSl(8uuK$1(ZJhTcHiqpzn`3(tSe7whDlt-;V3;T{`Sq6#u5|i#19#PE}aX+qh$; z`A00rK>Bw~LQy_}qZNTXJ;Y^j&DaitDD&NXYsNLlOUvuVvZ%Yr!`0fOY|Nf2c zv`V6L18Ss6Pyw=xL)^Xbog~RIF4vHsuvXFdb~T;;!AfQ(@fW;fA@4{|0GS^fcj(OC zOgIyHDq6Fu&h*Q{uLog%wGl+U3hNnN4qfIvvI?E$L#eapb5t*i)o?ghFFd;^rCZ*T z+il?)U#eh%Age$qAVlC&4niaVO#KO5TBI`{k|$)pI|v~%@x9wzbKp^E!GUSU4@IhlVspkgi%dKS}M zpm7&!DU9nXKIlhUl%P;&XRWQd-ue4%AhLctv=+H7Lvey%uOmV`MNX@Cq8XpmeSPw% z`P)d@;$nwrRet6mWkKwn%U)>&BS`qA4{AJ*3S8S!PGf!qEUA|EwvE>_%Sro1K=WK* zU6fg{5ibUt@9O=6a?6C2@rr03l*+Q+0<$- z76R~B_XgdB%RGYf7n#EH*G798(-g=6bbs;=8MYj@3Vfj3+;R5>;$nlOLXsguiz1Uh zy22ZC%D{VV)Q(O7RiWz2{?f7;#&RG%EH2uU4?B(#UpMKc6wlZ_@gn~vwQ5DH)1v%*WCZ8wp3g{+M4(r$J|$@d`4sDWhba%6{Q{0u<1oscVW5c`7V zq+07VeQlZVNjhKaSFMFhZ1YT1YV=*Kz;XUUZIu(ltjWJAkPwBwrq=9=qEas#Z>5pe zoZWM-_TpNyLOE>LT~C+^_gbo5GIah20psu2Hz)p{ga7q8KnnmhwaD2`{2D-)!4-q~ z+XUgx1k8r&h+nDIJIw3rVemm^CSvx*{$xtI{XF1M{dCu%y+ifoB^m82Tk%cOH?Ej_Xd!t0S+8&Dj1A7L(b}e2-7hW0nArF-=nonWBX5R z=yDDzmX7RwhRPZmdhIrWsAaS^^okt1CYLr;bPWHB0*cc9Lz(>{sfCsck)htCwY9NP zO$dFl$Xco}9=m0=ZDF-d0#LlI<1bSql+WkwMV?re?|mM9suqI!g!C$6*hKUG33`Nz zNAMFy(nvYz(se6uch5Bw!TfVl>}|$(onPBUUJ?g5E=ey6D7<%?B!}>)%5Pe z#stUJgqLNX9?St0`{}kSa@|rAmfY;bgP1ZRZZh7c zvjpRllB}5F_C#L2d~*s7YL&vg$muJ(rwi$JlKJV?A5auo$TWk-l{rIzI46xWi6+wh zJoata1D~1wxp!7i(cZCEYBI+9=}p@UDZ35AAlr>N0A-iC0YT3n5~O`=(SGP;;wz!bl#&p4siLNhb~zxhmt`D>;_5IlUywD>7mCiSe69IC z;}-lirc9Z}IkpcLY(L*sGiDV%u~}H$mPpIi@Fkm~AA;k~T5cEl65Q%4ww6VrqU^{Xkcr?sN!<9d{Ca>l#s57H2%_}6dz zMQ9HmE+1S4+)bY2H}i~RJl(>)R^PYX84$8@`epCP6nM*|6@&;w zcJLUOi)U3(oe4H?Abnr9tI(-~ZgLPw&*HE1?|(k=lwfj+qWiWAz$~BCC8*I_G&>bRfHvBc1Iv@#Ast zs>qzeE2;@lQnSY@m811-ysYeuu&G3pj>ODYz}o&J5mKGPP45C48RkR1RfN5!$j`ly zu~){dt0n@&Qm#zfUr5-I^m;n+KK4?thVFU6jQ)E|tw0OvNCIykvev3yiLndG3FtVQ zYL>xx0Xc2Q@M?DNN~QAoCzOu?w_|fuBcAOCwCjLK=vjTBDOx4lOX{`x7iACr zNd7Fb9AyLt$a73&S}ve7#Yv_Rle!DF2xBDWBvkWjt+j1ZgLsJcEmMfyhexpN$fJl& zKtCsZW+j-)T}Hp&(3f^t4m}9}j`#$mY6upbfM7>3(CUM6Y5K$RE(cK6JL^^M6R?iJ zdOcg)&cWx$yZL|RIS2fgEf2;Nz}GGGaB%P5O+&M`oF^=(xY3GnZcSDBRvt^HPu{7G z76$A}9{C#+&D21wjKeY_Q^B^-WI*oxEGl+~stjkuji^y$>tVv@w=2_83;UU>{)3`4 z+zkl^DhTL(MEW)E4SmcxSJH*%MDWheuY>tDba<3(ds#n+7fuUhszVjd=Bluyb86aY z+4bJ7{|vhZCRnCnAXuOkA8_)d@qOt}EMuPgEuHY3K>$YxDjkh~ME?P+-`#0pe`i7v z!Hm8}WQD%5_FzOW)D%iT8v0`dIz2keeoas6ompZP{q1Q)Tu~q2g5%OXoGCx#QB=)KQ6Teg_dd#ICh5q!0a}*j|RKwhB zg=r?`j?=Z^$VqQ1`}+`QQ^%`>x_kw5xhNOK>&wr!tuE#S`Jc5sQTp~4`rRM@qZQ2G cX7In&4F2AJf7{*PHt@F%{GVb2;6G>oFDD19k^lez literal 0 HcmV?d00001 diff --git a/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py b/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py index b01a35c7d..4bc5e5a82 100755 --- a/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py +++ b/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py @@ -17,6 +17,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +""" +Usage: + python3 local/compute_whisper_fbank.py \ + --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ + --out-dir data/fbank \ + --huggingface-dataset-path-or-name worstchan/UltraChat-300K-SLAM-Omni \ + --audio-key question_audio --text-key answer \ + --prefix ultrachat +""" + import argparse import logging @@ -126,7 +136,7 @@ def compute_fbank(args): num_digits = 5 for i in range(num_shards): shard = dataset.shard(num_shards, i) - shard = shard.take(10) # for testing + # shard = shard.take(10) # for testing logging.info( f"Loading dataset shard {i} from {args.huggingface_dataset_path_or_name}" ) @@ -159,8 +169,6 @@ def compute_fbank(args): logging.info(f"Saving to {cuts_path}") # see https://github.com/lhotse-speech/lhotse/issues/1125 cut_set.drop_recordings().to_file(cuts_path) - if i > 1: - break def main(): diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/data_module.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py similarity index 100% rename from egs/speech_llm/SPEECH2SPEECH/slam_omni/data_module.py rename to egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py similarity index 100% rename from egs/speech_llm/SPEECH2SPEECH/slam_omni/decode.py rename to egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/ds_config_zero1.json b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/ds_config_zero1.json similarity index 100% rename from egs/speech_llm/SPEECH2SPEECH/slam_omni/ds_config_zero1.json rename to egs/speech_llm/SPEECH2SPEECH/qwen_omni/ds_config_zero1.json diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/label_smoothing.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/label_smoothing.py similarity index 100% rename from egs/speech_llm/SPEECH2SPEECH/slam_omni/label_smoothing.py rename to egs/speech_llm/SPEECH2SPEECH/qwen_omni/label_smoothing.py diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py similarity index 77% rename from egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py rename to egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py index 0cc93c237..97870337d 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py @@ -1,11 +1,14 @@ +from typing import List, Tuple # Added for type hints + import torch from torch import nn -from transformers.trainer_pt_utils import LabelSmoother -from typing import List, Tuple # Added for type hints from torchmetrics.classification import MulticlassAccuracy +from transformers.trainer_pt_utils import LabelSmoother + IGNORE_TOKEN_ID = LabelSmoother.ignore_index import logging + class EncoderProjector(nn.Module): """ The encoder projector module. It is used to project the encoder outputs to the same dimension as the language model. @@ -69,7 +72,8 @@ class SPEECH_LLM(nn.Module): self.codec_lm = codec_lm if self.codec_lm: self.speech_token_projector = nn.Linear( - self.llm.config.hidden_size + self.llm.config.hidden_size, self.codec_lm.config.hidden_size + self.llm.config.hidden_size + self.llm.config.hidden_size, + self.codec_lm.config.hidden_size, ) self.codec_lm_head = nn.Linear( self.codec_lm.config.hidden_size, self.codec_lm.config.vocab_size @@ -89,6 +93,7 @@ class SPEECH_LLM(nn.Module): multidim_average="global", ignore_index=IGNORE_TOKEN_ID, ) + def _merge_input_ids_with_speech_features( self, speech_features, inputs_embeds, input_ids, attention_mask, labels=None ): @@ -274,68 +279,115 @@ class SPEECH_LLM(nn.Module): ) = self._merge_input_ids_with_speech_features( speech_features, inputs_embeds, input_ids, attention_mask, labels ) - input_seq_len = attention_mask.sum(dim=1) # shape, B - text_label_start_index_list, text_input_start_index_list, input_question_len_list = [], [], [] + input_seq_len = attention_mask.sum(dim=1) # shape, B + ( + text_label_start_index_list, + text_input_start_index_list, + input_question_len_list, + ) = ([], [], []) for i in range(labels.shape[0]): input_embeds_valid_index = torch.where(attention_mask[i] != 0)[0] input_embeds_start_index = input_embeds_valid_index[0] text_labels_valid_index = torch.where(labels[i] != IGNORE_TOKEN_ID)[0] text_labels_start_index = text_labels_valid_index[0] - assert input_seq_len[i] == input_embeds_valid_index[-1] - input_embeds_start_index + 1, f"input_seq_len: {input_seq_len[i]}, input_embeds_valid_index: {input_embeds_valid_index}, input_embeds_start_index: {input_embeds_start_index}" - assert input_embeds_valid_index[-1] == text_labels_valid_index[-1], f"input_embeds_valid_index: {input_embeds_valid_index}, text_labels_valid_index: {text_labels_valid_index}" + assert ( + input_seq_len[i] + == input_embeds_valid_index[-1] - input_embeds_start_index + 1 + ), f"input_seq_len: {input_seq_len[i]}, input_embeds_valid_index: {input_embeds_valid_index}, input_embeds_start_index: {input_embeds_start_index}" + assert ( + input_embeds_valid_index[-1] == text_labels_valid_index[-1] + ), f"input_embeds_valid_index: {input_embeds_valid_index}, text_labels_valid_index: {text_labels_valid_index}" input_question_len = text_labels_start_index - input_embeds_start_index - assert input_question_len + text_labels_valid_index[-1] - text_labels_start_index + 1 == input_seq_len[i] + assert ( + input_question_len + + text_labels_valid_index[-1] + - text_labels_start_index + + 1 + == input_seq_len[i] + ) text_label_start_index_list.append(text_labels_start_index) text_input_start_index_list.append(input_embeds_start_index) input_question_len_list.append(input_question_len) model_outputs = self.llm( - inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels, output_hidden_states=True + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + labels=labels, + output_hidden_states=True, ) text_loss = model_outputs.loss delay_step = 1 # prepare codec lm inputs - audio_codes_lens = [len(x) + input_question_len_list[i] + delay_step + 1 for i, x in enumerate(speech_codec_ids)] + audio_codes_lens = [ + len(x) + input_question_len_list[i] + delay_step + 1 + for i, x in enumerate(speech_codec_ids) + ] max_len_speech_codec = max(audio_codes_lens) if self.codec_lm_padding_side == "right": audio_codes = [ - [self.codec_lm.config.mask_token_id] * (input_question_len_list[i] + delay_step) + [self.codec_lm.config.bos_token_id] + x + [self.codec_lm.config.pad_token_id] * (max_len_speech_codec - audio_codes_lens[i]) + [self.codec_lm.config.mask_token_id] + * (input_question_len_list[i] + delay_step) + + [self.codec_lm.config.bos_token_id] + + x + + [self.codec_lm.config.pad_token_id] + * (max_len_speech_codec - audio_codes_lens[i]) for i, x in enumerate(speech_codec_ids) ] audio_labels = [ - [self.codec_lm.config.pad_token_id] * (input_question_len_list[i] + delay_step) + x + [self.codec_lm.config.eos_token_id] + [self.codec_lm.config.pad_token_id] * (max_len_speech_codec - audio_codes_lens[i]) + [self.codec_lm.config.pad_token_id] + * (input_question_len_list[i] + delay_step) + + x + + [self.codec_lm.config.eos_token_id] + + [self.codec_lm.config.pad_token_id] + * (max_len_speech_codec - audio_codes_lens[i]) for i, x in enumerate(speech_codec_ids) ] elif self.codec_lm_padding_side == "left": audio_codes = [ - [self.codec_lm.config.pad_token_id] * (max_len_speech_codec - audio_codes_lens[i]) + [self.codec_lm.config.mask_token_id] * (input_question_len_list[i] + delay_step) + [self.codec_lm.config.bos_token_id] + x + [self.codec_lm.config.pad_token_id] + * (max_len_speech_codec - audio_codes_lens[i]) + + [self.codec_lm.config.mask_token_id] + * (input_question_len_list[i] + delay_step) + + [self.codec_lm.config.bos_token_id] + + x for i, x in enumerate(speech_codec_ids) ] audio_labels = [ - [self.codec_lm.config.pad_token_id] * (max_len_speech_codec - audio_codes_lens[i]) + [self.codec_lm.config.pad_token_id] * (input_question_len_list[i] + delay_step) + x + [self.codec_lm.config.eos_token_id] + [self.codec_lm.config.pad_token_id] + * (max_len_speech_codec - audio_codes_lens[i]) + + [self.codec_lm.config.pad_token_id] + * (input_question_len_list[i] + delay_step) + + x + + [self.codec_lm.config.eos_token_id] for i, x in enumerate(speech_codec_ids) ] audio_codes = torch.tensor( - audio_codes, - dtype=torch.int64, - device=input_ids.device + audio_codes, dtype=torch.int64, device=input_ids.device ) audio_labels = torch.tensor( - audio_labels, - dtype=torch.int64, - device=input_ids.device + audio_labels, dtype=torch.int64, device=input_ids.device ) audio_attention_mask = audio_codes.ne(self.codec_lm.config.pad_token_id) audio_embeddings = self.codec_lm.get_input_embeddings()(audio_codes) - + text_last_hidden_lists, text_embeds_list, text_input_embeds_list = [], [], [] for i in range(len(text_label_start_index_list)): - text_last_hidden = model_outputs.hidden_states[-1][i, text_input_start_index_list[i]:text_input_start_index_list[i] + input_seq_len[i] - 1] + text_last_hidden = model_outputs.hidden_states[-1][ + i, + text_input_start_index_list[i] : text_input_start_index_list[i] + + input_seq_len[i] + - 1, + ] text_last_hidden_lists.append(text_last_hidden) - text_embed = inputs_embeds[i, text_input_start_index_list[i] + 1:text_input_start_index_list[i] + input_seq_len[i]] # exclude bos + text_embed = inputs_embeds[ + i, + text_input_start_index_list[i] + + 1 : text_input_start_index_list[i] + + input_seq_len[i], + ] # exclude bos text_embeds_list.append(text_embed) text_input_embeds = torch.cat( @@ -344,22 +396,34 @@ class SPEECH_LLM(nn.Module): text_embed, ], dim=-1, - )# shape, T, D1 + D2 - text_input_embeds = self.speech_token_projector(text_input_embeds) # shape, T, D_codec + ) # shape, T, D1 + D2 + text_input_embeds = self.speech_token_projector( + text_input_embeds + ) # shape, T, D_codec text_input_embeds_list.append(text_input_embeds) - + for i in range(audio_embeddings.shape[0]): text_input_embeds = text_input_embeds_list[i] if self.codec_lm_padding_side == "right": - audio_embeddings[i, :text_input_embeds.shape[0]] += text_input_embeds + audio_embeddings[i, : text_input_embeds.shape[0]] += text_input_embeds elif self.codec_lm_padding_side == "left": - start_idx = torch.where(audio_codes[i] == self.codec_lm.config.mask_token_id)[0][0] + start_idx = torch.where( + audio_codes[i] == self.codec_lm.config.mask_token_id + )[0][0] start_idx_re_compute = torch.where(audio_attention_mask[i] != 0)[0][0] - assert start_idx == start_idx_re_compute, f"start_idx: {start_idx}, start_idx_re_compute: {start_idx_re_compute}" + assert ( + start_idx == start_idx_re_compute + ), f"start_idx: {start_idx}, start_idx_re_compute: {start_idx_re_compute}" if text_input_embeds.shape[0] > audio_embeddings.shape[1] - start_idx: - text_input_embeds = text_input_embeds[:audio_embeddings.shape[1] - start_idx] - logging.warning(f"Truncate text_input_embeds: {text_input_embeds.shape} to {audio_embeddings.shape[1] - start_idx}") - audio_embeddings[i, start_idx:start_idx + text_input_embeds.shape[0]] += text_input_embeds + text_input_embeds = text_input_embeds[ + : audio_embeddings.shape[1] - start_idx + ] + logging.warning( + f"Truncate text_input_embeds: {text_input_embeds.shape} to {audio_embeddings.shape[1] - start_idx}" + ) + audio_embeddings[ + i, start_idx : start_idx + text_input_embeds.shape[0] + ] += text_input_embeds speech_outputs = self.codec_lm( attention_mask=audio_attention_mask, @@ -369,8 +433,10 @@ class SPEECH_LLM(nn.Module): ) last_hidden_state = speech_outputs.hidden_states[-1].clone() - audio_logits = self.codec_lm_head(last_hidden_state) # shape, B, T, vocab_size - audio_logits = audio_logits.contiguous().view(-1, self.codec_lm.config.vocab_size) + audio_logits = self.codec_lm_head(last_hidden_state) # shape, B, T, vocab_size + audio_logits = audio_logits.contiguous().view( + -1, self.codec_lm.config.vocab_size + ) audio_labels = audio_labels.contiguous().view(-1) audio_labels = audio_labels.masked_fill( audio_labels == self.codec_lm.config.pad_token_id, IGNORE_TOKEN_ID @@ -378,7 +444,6 @@ class SPEECH_LLM(nn.Module): codec_loss = self.loss_fct(audio_logits, audio_labels) audio_preds = torch.argmax(audio_logits, -1) - with torch.no_grad(): preds = torch.argmax(model_outputs.logits, -1) acc = compute_accuracy( @@ -392,12 +457,11 @@ class SPEECH_LLM(nn.Module): ignore_label=IGNORE_TOKEN_ID, ) audio_topk_acc = self.audio_accuracy_metric( - audio_logits.detach(), - audio_labels.detach()).item() - + audio_logits.detach(), audio_labels.detach() + ).item() return text_loss, acc, codec_loss, audio_acc, audio_topk_acc - + def decode( self, fbank: torch.Tensor = None, @@ -453,12 +517,12 @@ class SPEECH_LLM(nn.Module): def decode_with_speech_output( self, fbank: torch.Tensor = None, - input_ids: torch.LongTensor = None, # Prompt input_ids - attention_mask: torch.Tensor = None, # Prompt attention_mask + input_ids: torch.LongTensor = None, # Prompt input_ids + attention_mask: torch.Tensor = None, # Prompt attention_mask max_text_new_tokens: int = 1024, - max_speech_new_tokens: int = 1024, # Max length for speech tokens - llm_kwargs: dict = None, # Kwargs for text LLM generate - codec_lm_kwargs: dict = None # Kwargs for codec LM (e.g., temperature for sampling) - NOT IMPLEMENTED YET + max_speech_new_tokens: int = 1024, # Max length for speech tokens + llm_kwargs: dict = None, # Kwargs for text LLM generate + codec_lm_kwargs: dict = None, # Kwargs for codec LM (e.g., temperature for sampling) - NOT IMPLEMENTED YET ) -> Tuple[torch.LongTensor, List[List[int]]]: """ Generates text and corresponding speech tokens using the revised logic. @@ -479,16 +543,22 @@ class SPEECH_LLM(nn.Module): the generated speech codec tokens for a batch item. """ assert fbank.shape[0] == 1, "Batch size must be 1 for speech generation." - if not self.codec_lm or not self.speech_token_projector or not self.codec_lm_head: - raise ValueError("codec_lm and associated layers must be initialized to generate speech output.") + if ( + not self.codec_lm + or not self.speech_token_projector + or not self.codec_lm_head + ): + raise ValueError( + "codec_lm and associated layers must be initialized to generate speech output." + ) - device = next(self.parameters()).device # Use model's device + device = next(self.parameters()).device # Use model's device batch_size = fbank.shape[0] # --- 1. Prepare Prompt Embeddings --- encoder_outs = self.encoder(fbank) speech_features = self.encoder_projector(encoder_outs) - speech_features = speech_features.to(self.llm.dtype) # Ensure matching dtype + speech_features = speech_features.to(self.llm.dtype) # Ensure matching dtype prompt_embeds = self.llm.get_input_embeddings()(input_ids) @@ -511,12 +581,12 @@ class SPEECH_LLM(nn.Module): "eos_token_id": self.llm.config.eos_token_id, "pad_token_id": self.llm.config.pad_token_id, "num_beams": 1, - "do_sample": True, # Typically false for S2ST/S2TT tasks unless exploration needed + "do_sample": True, # Typically false for S2ST/S2TT tasks unless exploration needed "top_p": 0.5, "top_k": 20, "repetition_penalty": 1.1, "temperature": 0.7, - **(llm_kwargs or {}) # User-provided kwargs override defaults + **(llm_kwargs or {}), # User-provided kwargs override defaults } text_outputs = self.llm.generate( @@ -525,17 +595,22 @@ class SPEECH_LLM(nn.Module): max_new_tokens=max_text_new_tokens, return_dict_in_generate=True, output_hidden_states=True, - **final_llm_kwargs + **final_llm_kwargs, ) delay_step = 1 - generated_text_ids = text_outputs.sequences # [B, S_full] + generated_text_ids = text_outputs.sequences # [B, S_full] eos_token_id = self.llm.config.eos_token_id - eos_token_embedding = self.llm.get_input_embeddings()(torch.tensor([[eos_token_id]], device=device)) # 1,D - assert generated_text_ids[0, -1] == eos_token_id, f"Last token is not EOS: {generated_text_ids[0, -1]} != {eos_token_id}" + eos_token_embedding = self.llm.get_input_embeddings()( + torch.tensor([[eos_token_id]], device=device) + ) # 1,D + assert ( + generated_text_ids[0, -1] == eos_token_id + ), f"Last token is not EOS: {generated_text_ids[0, -1]} != {eos_token_id}" thinker_token_embeds_org = [ - token_hidden_states[0].to(self.llm.device) for token_hidden_states in text_outputs.hidden_states + token_hidden_states[0].to(self.llm.device) + for token_hidden_states in text_outputs.hidden_states ] - # shift one for thinker token_embeds, drop the first embeds, and add the eos token + # shift one for thinker token_embeds, drop the first embeds, and add the eos token first_thinker_token_embed = torch.cat( [ thinker_token_embeds_org[0][:, 1:], @@ -544,19 +619,27 @@ class SPEECH_LLM(nn.Module): dim=1, ) - thinker_token_embeds = [first_thinker_token_embed] + thinker_token_embeds_org[2:] + [eos_token_embedding] + thinker_token_embeds = ( + [first_thinker_token_embed] + + thinker_token_embeds_org[2:] + + [eos_token_embedding] + ) thinker_hidden_states = [ - token_hidden_states[-1].to(self.llm.device) for token_hidden_states in text_outputs.hidden_states + token_hidden_states[-1].to(self.llm.device) + for token_hidden_states in text_outputs.hidden_states ] # thinker_reply_part = torch.cat(thinker_hidden_states[1:], dim=1) + torch.cat(thinker_token_embeds[1:], dim=1) - thinker_reply_part = [torch.cat( - [ - thinker_hidden_state, - thinker_token_embed, - ], - dim=-1, - ) - for thinker_hidden_state, thinker_token_embed in zip(thinker_hidden_states[1:], thinker_token_embeds[1:]) + thinker_reply_part = [ + torch.cat( + [ + thinker_hidden_state, + thinker_token_embed, + ], + dim=-1, + ) + for thinker_hidden_state, thinker_token_embed in zip( + thinker_hidden_states[1:], thinker_token_embeds[1:] + ) ] thinker_reply_part = torch.cat(thinker_reply_part, dim=1) # thinker_prompt_part = thinker_hidden_states[0] + thinker_token_embeds[0] @@ -568,26 +651,35 @@ class SPEECH_LLM(nn.Module): dim=-1, ) - thinker_prompt_part = self.speech_token_projector(thinker_prompt_part) # [B, S_full, D_codec] - thinker_reply_part = self.speech_token_projector(thinker_reply_part) # [B, S_full, D_codec] - + thinker_prompt_part = self.speech_token_projector( + thinker_prompt_part + ) # [B, S_full, D_codec] + thinker_reply_part = self.speech_token_projector( + thinker_reply_part + ) # [B, S_full, D_codec] thinker_prompt_part_seq_len = thinker_prompt_part.shape[1] talker_input_ids = torch.full( - (batch_size, thinker_prompt_part_seq_len + delay_step + 1), self.codec_lm.config.mask_token_id, dtype=torch.long, device=self.llm.device + (batch_size, thinker_prompt_part_seq_len + delay_step + 1), + self.codec_lm.config.mask_token_id, + dtype=torch.long, + device=self.llm.device, ) - talker_input_ids[:,-1] = self.codec_lm.config.bos_token_id - talker_inputs_embeds = self.codec_lm.get_input_embeddings()(talker_input_ids) # [B, S_full, D_codec] + talker_input_ids[:, -1] = self.codec_lm.config.bos_token_id + talker_inputs_embeds = self.codec_lm.get_input_embeddings()( + talker_input_ids + ) # [B, S_full, D_codec] thinker_input_embeds = torch.cat( [ thinker_prompt_part, - thinker_reply_part[:, :delay_step + 1, :], + thinker_reply_part[:, : delay_step + 1, :], ], dim=1, ) talker_inputs_embeds += thinker_input_embeds - thinker_reply_part = thinker_reply_part[:, delay_step + 1:, :] # [B, S_full, D_codec] - + thinker_reply_part = thinker_reply_part[ + :, delay_step + 1 :, : + ] # [B, S_full, D_codec] past_key_values = None # generated_speech_tokens_list = [[] for _ in range(batch_size)] @@ -599,10 +691,14 @@ class SPEECH_LLM(nn.Module): # Get embedding for the *current* input token ID (initially BOS, then generated tokens) # current_speech_embeds = self.codec_lm.get_input_embeddings()(current_speech_input_ids) # [B, 1, D_codec] if t > 0: - talker_inputs_embeds = self.codec_lm.get_input_embeddings()(next_token_ids) # [B, 1, D_codec] + talker_inputs_embeds = self.codec_lm.get_input_embeddings()( + next_token_ids + ) # [B, 1, D_codec] if thinker_reply_part.shape[1] > 0: talker_inputs_embeds += thinker_reply_part[:, :1, :] - thinker_reply_part = thinker_reply_part[:, 1:, :] # Remove the first token for next step + thinker_reply_part = thinker_reply_part[ + :, 1:, : + ] # Remove the first token for next step # # Add the projected text embedding corresponding to the current timestep `t` # if t < text_context_len: # # Text context from the full generated text sequence @@ -611,20 +707,24 @@ class SPEECH_LLM(nn.Module): # else: # # No more text context to add # inputs_embeds = current_speech_embeds - + # Forward pass through codec LM for one step # We provide inputs_embeds directly, bypassing prepare_inputs_for_generation codec_outputs = self.codec_lm( - inputs_embeds=talker_inputs_embeds, # Combined embedding for this step + inputs_embeds=talker_inputs_embeds, # Combined embedding for this step past_key_values=past_key_values, use_cache=True, return_dict=True, output_hidden_states=True, # No attention mask needed here when using past_key_values and single token input ) - last_token_hidden_state = codec_outputs.hidden_states[-1][:, -1, :] # [B, D_codec] #TODO: check shape here + last_token_hidden_state = codec_outputs.hidden_states[-1][ + :, -1, : + ] # [B, D_codec] #TODO: check shape here # Get logits for the *last* token generated in this step - next_token_logits = self.codec_lm_head(last_token_hidden_state) # Use -1 index + next_token_logits = self.codec_lm_head( + last_token_hidden_state + ) # Use -1 index # suppress tokens between 4096:len(vocab)-3 # next_token_logits[:, 4096:-3] = -float("Inf") # TODO: where we should supress tokens? next_token_ids = topk_sampling( @@ -634,11 +734,14 @@ class SPEECH_LLM(nn.Module): if next_token_ids[0, 0] == self.codec_lm.config.eos_token_id: break # current_speech_input_ids = next_token_ids # Use the newly generated token ID as input for next step - past_key_values = codec_outputs.past_key_values # Update KV cache - generated_speech_tokens_list.append(next_token_ids.squeeze(1).cpu().tolist()[0]) + past_key_values = codec_outputs.past_key_values # Update KV cache + generated_speech_tokens_list.append( + next_token_ids.squeeze(1).cpu().tolist()[0] + ) # --- 6. Return Results --- return generated_text_ids, generated_speech_tokens_list + def compute_accuracy(pad_outputs, pad_targets, ignore_label): """Calculate accuracy. Copied from https://github.com/X-LANCE/SLAM-LLM/blob/main/src/slam_llm/utils/metric.py @@ -717,4 +820,4 @@ def top_k_top_p_filtering( 1, sorted_indices, sorted_indices_to_remove ) logits[indices_to_remove] = filter_value - return logits \ No newline at end of file + return logits diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/speech_dataset.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/speech_dataset.py similarity index 99% rename from egs/speech_llm/SPEECH2SPEECH/slam_omni/speech_dataset.py rename to egs/speech_llm/SPEECH2SPEECH/qwen_omni/speech_dataset.py index d0a77fd0e..43a4efb5a 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/speech_dataset.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/speech_dataset.py @@ -1,13 +1,12 @@ from typing import Callable, Dict, List, Union import torch -from torch.utils.data.dataloader import DataLoader, default_collate - from lhotse import validate from lhotse.cut import CutSet from lhotse.dataset.input_strategies import BatchIO, PrecomputedFeatures from lhotse.utils import compute_num_frames, ifnone from lhotse.workarounds import Hdf5MemoryIssueFix +from torch.utils.data.dataloader import DataLoader, default_collate class K2SpeechRecognitionDataset(torch.utils.data.Dataset): diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py similarity index 100% rename from egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py rename to egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/web_demo.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/web_demo.py similarity index 60% rename from egs/speech_llm/SPEECH2SPEECH/slam_omni/web_demo.py rename to egs/speech_llm/SPEECH2SPEECH/qwen_omni/web_demo.py index 3155174fb..e33d2437d 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/web_demo.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/web_demo.py @@ -1,26 +1,25 @@ # Modified from https://github.com/QwenLM/Qwen2.5-Omni/blob/main/web_demo.py import io - -import numpy as np -import gradio as gr -import soundfile as sf - -import gradio.processing_utils as processing_utils -import tempfile -from transformers import AutoModelForCausalLM, AutoTokenizer, Qwen2Config -from gradio_client import utils as client_utils - -from argparse import ArgumentParser -import whisper -import torch -from peft import LoraConfig, get_peft_model -from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward -from model import SPEECH_LLM, EncoderProjector -from train import DEFAULT_SPEECH_TOKEN, add_model_arguments -import sherpa_onnx -from cosyvoice.cli.cosyvoice import CosyVoice import sys -sys.path.append('/workspace/CosyVoice/third_party/Matcha-TTS') +from argparse import ArgumentParser + +import gradio as gr +import gradio.processing_utils as processing_utils +import numpy as np +import sherpa_onnx +import soundfile as sf +import torch +import whisper +from cosyvoice.cli.cosyvoice import CosyVoice +from gradio_client import utils as client_utils +from model import SPEECH_LLM, EncoderProjector +from peft import LoraConfig, get_peft_model +from train import DEFAULT_SPEECH_TOKEN, add_model_arguments +from transformers import AutoModelForCausalLM, AutoTokenizer, Qwen2Config +from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward + +# https://github.com/FunAudioLLM/CosyVoice/tree/main/third_party +sys.path.append("/workspace/CosyVoice/third_party/Matcha-TTS") def get_model(params, device="cuda"): @@ -88,7 +87,7 @@ def get_model(params, device="cuda"): codec_lm = AutoModelForCausalLM.from_config( config=config, attn_implementation=attn_implementation, - torch_dtype=torch.float16 + torch_dtype=torch.float16, ) codec_lm.resize_token_embeddings(codec_vocab_size) codec_lm.vocab_size = codec_vocab_size @@ -102,12 +101,10 @@ def get_model(params, device="cuda"): llm, encoder_projector, codec_lm, - codec_lm_padding_side= "left" if params.use_flash_attn else "right", + codec_lm_padding_side="left" if params.use_flash_attn else "right", ) - checkpoint = torch.load( - f"{params.checkpoint_path}", map_location="cpu" - ) + checkpoint = torch.load(f"{params.checkpoint_path}", map_location="cpu") model.load_state_dict(checkpoint, strict=False) model.to(device) @@ -122,27 +119,37 @@ def audio_decode_cosyvoice(audio_tokens, codec_decoder): Args: audio_tokens (list): List of audio tokens to be processed. codec_decoder: Codec decoder for generating audio. - + Returns: torch.Tensor: Generated audio waveform. """ - flow_embedding = codec_decoder.frontend.spk2info['中文女']['embedding'] + flow_embedding = codec_decoder.frontend.spk2info["中文女"]["embedding"] flow_prompt_speech_token = torch.zeros(1, 0, dtype=torch.int32) prompt_speech_feat = torch.zeros(1, 0, 80) - tts_mel, _ = codec_decoder.model.flow.inference(token=audio_tokens.to(codec_decoder.model.device), - token_len=torch.tensor([audio_tokens.shape[1]], dtype=torch.int32).to(codec_decoder.model.device), - prompt_token=flow_prompt_speech_token.to(codec_decoder.model.device), - prompt_token_len=torch.tensor([flow_prompt_speech_token.shape[1]], dtype=torch.int32).to(codec_decoder.model.device), - prompt_feat=prompt_speech_feat.to(codec_decoder.model.device), - prompt_feat_len=torch.tensor([prompt_speech_feat.shape[1]], dtype=torch.int32).to(codec_decoder.model.device), - embedding=flow_embedding.to(codec_decoder.model.device), - flow_cache=torch.zeros(1, 80, 0, 2).to(codec_decoder.model.device),) + tts_mel, _ = codec_decoder.model.flow.inference( + token=audio_tokens.to(codec_decoder.model.device), + token_len=torch.tensor([audio_tokens.shape[1]], dtype=torch.int32).to( + codec_decoder.model.device + ), + prompt_token=flow_prompt_speech_token.to(codec_decoder.model.device), + prompt_token_len=torch.tensor( + [flow_prompt_speech_token.shape[1]], dtype=torch.int32 + ).to(codec_decoder.model.device), + prompt_feat=prompt_speech_feat.to(codec_decoder.model.device), + prompt_feat_len=torch.tensor( + [prompt_speech_feat.shape[1]], dtype=torch.int32 + ).to(codec_decoder.model.device), + embedding=flow_embedding.to(codec_decoder.model.device), + flow_cache=torch.zeros(1, 80, 0, 2).to(codec_decoder.model.device), + ) - - audio_hat, _ = codec_decoder.model.hift.inference(speech_feat=tts_mel, cache_source=torch.zeros(1, 1, 0)) + audio_hat, _ = codec_decoder.model.hift.inference( + speech_feat=tts_mel, cache_source=torch.zeros(1, 1, 0) + ) return audio_hat + def preprocess( messages, tokenizer, @@ -178,28 +185,14 @@ def preprocess( attention_mask = input_ids.ne(tokenizer.pad_token_id) return input_ids, attention_mask - - -def _launch_demo(args, model, tokenizer, token2wav_model, asr_model): + +def _launch_demo(args, model, tokenizer, token2wav_model, asr_model): def format_history(history: list): messages = [] for item in history: if isinstance(item["content"], str): - messages.append({"role": item['role'], "content": item['content']}) - # elif item["role"] == "user" and (isinstance(item["content"], list) or - # isinstance(item["content"], tuple)): - # file_path = item["content"][0] - # # TODO: check if the file_path's transcript is already in the history - # mime_type = client_utils.get_mimetype(file_path) - # if mime_type.startswith("audio"): - # messages.append({ - # "role": - # item['role'], - # "content": item["content"][1] # append audio transcript here - # }) - print('predict history: ', messages) - # messages = messages[-2:] # TODO: WAR: add history later + messages.append({"role": item["role"], "content": item["content"]}) return messages def decode( @@ -217,9 +210,8 @@ def _launch_demo(args, model, tokenizer, token2wav_model, asr_model): dtype = torch.float32 device = model.llm.device - feature = feature.to(device, dtype=dtype)#.transpose(1, 2) - # assert feature.shape[2] == 80 - + feature = feature.to(device, dtype=dtype) + input_ids, attention_mask = preprocess([messages], tokenizer) generated_ids, audio_tokens = model.decode_with_speech_output( @@ -227,26 +219,21 @@ def _launch_demo(args, model, tokenizer, token2wav_model, asr_model): ) hyps = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) - # print('hyps: ', hyps, 23333333333333333333333333) + yield {"type": "text", "data": hyps[0]} - # yield {"type": "text", "data": hyps} audio_tokens = [token for token in audio_tokens if token < 4096] audio_tokens = torch.tensor(audio_tokens, dtype=torch.int32).unsqueeze(0) audio_hat = audio_decode_cosyvoice(audio_tokens, token2wav_model) - audio = audio_hat.squeeze(0).cpu().numpy() - # sf.write(f'{wav_name}.wav', audio_hat.squeeze(0).cpu().numpy(), 22050) + audio = audio_hat.squeeze(0).cpu().numpy() audio = np.array(audio * 32767).astype(np.int16) - # yield {"type": "audio", "data": (22050, audio)} - # with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as tmpfile: - # sf.write(tmpfile.name, audio, 22050, format="WAV") - # audio_path = tmpfile.name wav_io = io.BytesIO() sf.write(wav_io, audio, samplerate=22050, format="WAV") wav_io.seek(0) wav_bytes = wav_io.getvalue() audio_path = processing_utils.save_bytes_to_cache( - wav_bytes, "audio.wav", cache_dir=demo.GRADIO_CACHE) + wav_bytes, "audio.wav", cache_dir=demo.GRADIO_CACHE + ) yield {"type": "audio", "data": audio_path} @@ -259,25 +246,27 @@ def _launch_demo(args, model, tokenizer, token2wav_model, asr_model): gr.update(visible=True), # stop_btn ) print(2333, history, audio) - history.append({"role": "user", "content": (audio,)}) - history.append({"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}) + history.append({"role": "user", "content": (audio,)}) + history.append({"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}) history.append({"role": "assistant", "content": ""}) - formatted_history = format_history(history=history) # only keep string text format + formatted_history = format_history( + history=history + ) # only keep string text format assert audio is not None audio_transcript = get_transcript( audio, asr_model, ) - print('audio_transcript: ', audio_transcript) history[-2]["content"] = audio_transcript fbank = whisper.log_mel_spectrogram(audio, device=model.llm.device) fbank = fbank.unsqueeze(0) assert fbank.ndim == 3 - # history.append({"role": "assistant", "content": ""}) - for chunk in decode(model, token2wav_model, tokenizer, fbank, formatted_history): + for chunk in decode( + model, token2wav_model, tokenizer, fbank, formatted_history + ): if chunk["type"] == "text": history[-1]["content"] = chunk["data"] yield ( @@ -287,10 +276,9 @@ def _launch_demo(args, model, tokenizer, token2wav_model, asr_model): gr.update(visible=True), # stop_btn ) if chunk["type"] == "audio": - history.append({ - "role": "assistant", - "content": gr.Audio(chunk["data"]) - }) + history.append( + {"role": "assistant", "content": gr.Audio(chunk["data"])} + ) # Final yield yield ( @@ -304,8 +292,7 @@ def _launch_demo(args, model, tokenizer, token2wav_model, asr_model): with gr.Tab("Online"): with gr.Row(): with gr.Column(scale=1): - microphone = gr.Audio(sources=['microphone'], - type="filepath") + microphone = gr.Audio(sources=["microphone"], type="filepath") submit_btn = gr.Button("Submit", variant="primary") stop_btn = gr.Button("Stop", visible=False) clear_btn = gr.Button("Clear History") @@ -315,64 +302,80 @@ def _launch_demo(args, model, tokenizer, token2wav_model, asr_model): def clear_history(): return [], gr.update(value=None) - submit_event = submit_btn.click(fn=media_predict, - inputs=[ - microphone, - media_chatbot, - ], - outputs=[ - microphone, - media_chatbot, submit_btn, - stop_btn - ]) + submit_event = submit_btn.click( + fn=media_predict, + inputs=[ + microphone, + media_chatbot, + ], + outputs=[microphone, media_chatbot, submit_btn, stop_btn], + ) stop_btn.click( - fn=lambda: - (gr.update(visible=True), gr.update(visible=False)), + fn=lambda: (gr.update(visible=True), gr.update(visible=False)), inputs=None, outputs=[submit_btn, stop_btn], cancels=[submit_event], - queue=False) - clear_btn.click(fn=clear_history, - inputs=None, - outputs=[media_chatbot, microphone]) + queue=False, + ) + clear_btn.click( + fn=clear_history, inputs=None, outputs=[media_chatbot, microphone] + ) - demo.queue(default_concurrency_limit=100, max_size=100).launch(max_threads=100, - ssr_mode=False, - share=args.share, - inbrowser=args.inbrowser, - server_port=args.server_port, - server_name=args.server_name,) + demo.queue(default_concurrency_limit=100, max_size=100).launch( + max_threads=100, + ssr_mode=False, + share=args.share, + inbrowser=args.inbrowser, + server_port=args.server_port, + server_name=args.server_name, + ) def _get_args(): parser = ArgumentParser() - parser.add_argument('--checkpoint-path', - type=str, - default=None, - help='Checkpoint name or path, default to %(default)r') - parser.add_argument('--token2wav-path', - type=str, - default=None, - help='Token2Wav path, default to %(default)r') - parser.add_argument('--asr-model-dir', - type=str, - default=None, - help='ASR model dir, default to %(default)r') - parser.add_argument('--flash-attn2', - action='store_true', - default=False, - help='Enable flash_attention_2 when loading the model.') - parser.add_argument('--share', - action='store_true', - default=False, - help='Create a publicly shareable link for the interface.') - parser.add_argument('--inbrowser', - action='store_true', - default=False, - help='Automatically launch the interface in a new tab on the default browser.') - parser.add_argument('--server-port', type=int, default=8001, help='Demo server port.') - parser.add_argument('--server-name', type=str, default='127.0.0.1', help='Demo server name.') + parser.add_argument( + "--checkpoint-path", + type=str, + default=None, + help="Checkpoint name or path, default to %(default)r", + ) + parser.add_argument( + "--token2wav-path", + type=str, + default=None, + help="Token2Wav path, default to %(default)r", + ) + parser.add_argument( + "--asr-model-dir", + type=str, + default=None, + help="ASR model dir, default to %(default)r", + ) + parser.add_argument( + "--flash-attn2", + action="store_true", + default=False, + help="Enable flash_attention_2 when loading the model.", + ) + parser.add_argument( + "--share", + action="store_true", + default=False, + help="Create a publicly shareable link for the interface.", + ) + parser.add_argument( + "--inbrowser", + action="store_true", + default=False, + help="Automatically launch the interface in a new tab on the default browser.", + ) + parser.add_argument( + "--server-port", type=int, default=8001, help="Demo server port." + ) + parser.add_argument( + "--server-name", type=str, default="127.0.0.1", help="Demo server name." + ) add_model_arguments(parser) args = parser.parse_args() return args @@ -401,6 +404,7 @@ def read_wave(wave_filename: str): return samples_float32, sample_rate + def get_transcript(audio_path, recognizer): samples, sample_rate = read_wave(audio_path) s = recognizer.create_stream() @@ -408,10 +412,13 @@ def get_transcript(audio_path, recognizer): recognizer.decode_streams([s]) return s.result.text + if __name__ == "__main__": args = _get_args() model, tokenizer = get_model(args) - token2wav = CosyVoice(args.token2wav_path, load_jit=False, load_trt=False, fp16=False) + token2wav = CosyVoice( + args.token2wav_path, load_jit=False, load_trt=False, fp16=False + ) asr_model = sherpa_onnx.OfflineRecognizer.from_paraformer( paraformer=f"{args.asr_model_dir}/model.int8.onnx", @@ -423,4 +430,4 @@ if __name__ == "__main__": debug=False, ) - _launch_demo(args, model, tokenizer, token2wav, asr_model) \ No newline at end of file + _launch_demo(args, model, tokenizer, token2wav, asr_model) diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/whisper_encoder_forward_monkey_patch.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/whisper_encoder_forward_monkey_patch.py similarity index 100% rename from egs/speech_llm/SPEECH2SPEECH/slam_omni/whisper_encoder_forward_monkey_patch.py rename to egs/speech_llm/SPEECH2SPEECH/qwen_omni/whisper_encoder_forward_monkey_patch.py From 11bd3c9ad828ab29e068c6f7346d2827d0e34b87 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 29 Apr 2025 09:46:44 +0000 Subject: [PATCH 26/57] lint --- egs/speech_llm/SPEECH2SPEECH/README.md | 86 ++++------- .../local/compute_whisper_fbank.py | 2 +- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 145 ++++++++---------- .../SPEECH2SPEECH/qwen_omni/data_module.py | 66 +------- .../SPEECH2SPEECH/qwen_omni/decode.py | 143 +++-------------- .../SPEECH2SPEECH/qwen_omni/model.py | 84 +++------- .../SPEECH2SPEECH/qwen_omni/train.py | 88 +++-------- 7 files changed, 154 insertions(+), 460 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/README.md b/egs/speech_llm/SPEECH2SPEECH/README.md index e4738eeef..cc5e60063 100644 --- a/egs/speech_llm/SPEECH2SPEECH/README.md +++ b/egs/speech_llm/SPEECH2SPEECH/README.md @@ -23,67 +23,33 @@ The following table lists the folders for different tasks. Command for training is: ```bash -pip install -r whisper_llm_zh/requirements.txt - -pip install huggingface_hub['cli'] -mkdir -p models/whisper models/qwen - -# For aishell fine-tuned whisper model -huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_aishell_whisper exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt -# For multi-hans fine-tuned whisper model -# huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_multi-hans-zh_whisper v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt - -# huggingface-clie download --local-dir models/qwen Qwen/Qwen2-7B-Instruct -huggingface-clie download --local-dir models/qwen Qwen/Qwen2-1.5B-Instruct - -# First, we only train the projector and freeze other modules. -torchrun --nproc_per_node 8 ./whisper_llm_zh/train.py \ - --max-duration 200 \ - --exp-dir ./whisper_llm_zh/exp_test \ - --speech-encoder-path-or-name models/whisper/exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt \ - --llm-path-or-name Qwen/Qwen2-1.5B-Instruct \ - --manifest-dir data/fbank \ - --deepspeed \ - --deepspeed_config ./whisper_llm_zh/ds_config_zero1.json \ - --use-flash-attn True \ - --use-lora False --unfreeze-llm False - -# Then we jointly train the projector and LLM LoRA modules. -torchrun --nproc_per_node 8 ./whisper_llm_zh/train.py \ - --max-duration 200 \ - --exp-dir ./whisper_llm_zh/exp_test \ - --speech-encoder-path-or-name models/whisper/exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt \ - --llm-path-or-name Qwen/Qwen2-1.5B-Instruct \ - --manifest-dir data/fbank \ - --deepspeed \ - --deepspeed_config ./whisper_llm_zh/ds_config_zero1.json \ - --use-flash-attn True \ - --use-lora True --unfreeze-llm True - --pretrained-model-path ./whisper_llm_zh/exp_test/epoch-3.pt +torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ + --max-duration 50 \ + --enable-musan False \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ + --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ + --manifest-dir data/fbank \ + --deepspeed \ + --deepspeed_config ./qwen_omni/ds_config_zero1.json \ + --use-flash-attn True \ + --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True ``` -Command for decoding: +Command for decoding is: ```bash -mkdir -p models/whisper models/qwen models/checkpoint -huggingface-cli download --local-dir models/checkpoint yuekai/icefall_asr_aishell_whisper_qwen2_1.5B - -# For aishell fine-tuned whisper model -huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_aishell_whisper exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt -# For multi-hans fine-tuned whisper model -# huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_multi-hans-zh_whisper v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt - -huggingface-clie download --local-dir models/qwen Qwen/Qwen2-7B-Instruct - -mkdir -p whisper_llm_zh/exp_aishell_whisper_qwen2_1.5B -ln -s models/checkpoint/epoch-10-avg-5.pt whisper_llm_zh/exp_aishell_whisper_qwen2_1.5B/epoch-999.pt - -python3 ./whisper_llm_zh/decode.py \ - --max-duration 80 \ - --exp-dir whisper_llm_zh/exp_aishell_whisper_qwen2_1.5B \ - --speech-encoder-path-or-name models/whisper/exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt \ - --llm-path-or-name models/qwen \ - --epoch 999 --avg 1 \ - --manifest-dir data/fbank \ - --use-flash-attn True \ - --use-lora True --dataset aishell +python3 ./qwen_omni/decode.py \ + --max-duration 1 \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ + --epoch 999 --avg 1 \ + --manifest-dir data/fbank \ + --use-flash-attn True \ + --method e2e-epoch10_speech2speech \ + --enable-speech-output True \ + --token2wav-path models/CosyVoice-300M-SFT \ + --use-lora True ``` + +Please see [`prepare.sh`](./prepare.sh) for more details. diff --git a/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py b/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py index 4bc5e5a82..f67324ba3 100755 --- a/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py +++ b/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py @@ -165,7 +165,7 @@ def compute_fbank(args): storage_type=LilcomChunkyWriter, overwrite=True, ) - cuts_path = f"{in_out_dir}/{args.prefix}_cuts.{idx}.jsonl.gz" + cuts_path = f"{in_out_dir}/cuts_{args.prefix}.{idx}.jsonl.gz" logging.info(f"Saving to {cuts_path}") # see https://github.com/lhotse-speech/lhotse/issues/1125 cut_set.drop_recordings().to_file(cuts_path) diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index 47320ab66..42c9b4eaa 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -2,7 +2,9 @@ # fix segmentation fault reported in https://github.com/k2-fsa/icefall/issues/674 export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python -export PYTHONPATH=$PYTHONPATH:/workspace/slam/icefall_omni + +export PYTHONPATH=$PYTHONPATH:/workspace/icefall + set -eou pipefail stage=$1 @@ -19,18 +21,37 @@ log() { if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then - log "stage 0: " - #pip uninstall lhotse - #cd /workspace/slam/lhotse - #git config --global --add safe.directory /workspace/slam/lhotse - #pip install -e '.[dev]' - cd - - pip install -r slam_omni/requirements.txt + log "stage 0: Clone CosyVoice repo and install requirements inside the container" + # docker: ghcr.io/swivid/f5-tts:main + pip install k2==1.24.4.dev20241030+cuda12.4.torch2.4.0 -f https://k2-fsa.github.io/k2/cuda.html + git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git /workspace/CosyVoice + cd /workspace/CosyVoice + # If you failed to clone submodule due to network failures, please run following command until success + git submodule update --init --recursive + pip install -r qwen_omni/requirements.txt + pip install -r qwen_omni/requirements-cosyvoice.txt + + # For Chinese only dataset, you can use the following command to download the Chinese fine-tuned whisper model. + huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_multi-hans-zh_whisper + # Cosyvoice pretrained model for speech token2wav module + huggingface-cli download --local-dir models/CosyVoice-300M-SFT FunAudioLLM/CosyVoice-300M-SFT + # Qwen Pretrained model + huggingface-cli download --local-dir models/Qwen2.5-0.5B-Instruct Qwen/Qwen2.5-0.5B-Instruct + # Qwen-Omni like speech2speech model trained on worstchan/Belle_1.4M-SLAM-Omni + huggingface-cli download --local-dir models/qwen-omni-like-speech2speech-belle-1.4M yuekai/qwen-omni-like-speech2speech-belle-1.4M + + # For Gradio demo, we follow https://arxiv.org/abs/2412.15649 to use ASR model to decode the history speech as context. + pip install sherpa-onnx + model_path=local/sherpa-onnx-paraformer-zh-2023-09-14 + if [ ! -d $model_path ]; then + wget -nc https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2 + tar xvf sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2 -C local + fi fi +export PYTHONPATH=$PYTHONPATH:/workspace/CosyVoice if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then - log "stage 1: Download whisper-large-v2 multi-hans-zh fbank feature from huggingface" - + log "stage 1: Compute fbank feature from huggingface" python3 local/compute_whisper_fbank.py \ --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ --out-dir data/fbank_test \ @@ -39,26 +60,42 @@ if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then --prefix belle fi - if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then log "Stage 2: Combine features" manifest_dir=data/fbank if [ ! -f $manifest_dir/cuts_belle_00001-01600.jsonl.gz ]; then + mv $manifest_dir/cuts_belle.00000.jsonl.gz ./ + # exclude cust_belle_00000.jsonl.gz for valid and test set pieces=$(find $manifest_dir -name "cuts_belle.*.jsonl.gz" | sort) - # # remove cust_belle_00000.jsonl.gz from pieces - # pieces=$(echo $pieces | sed 's/cuts_belle.00000.jsonl.gz//g') echo $pieces | wc lhotse combine $pieces data/fbank/cuts_belle_00001-01600.jsonl.gz - cd $manifest_dir && ln -s cuts_belle_00001-01600.jsonl.gz cuts_belle_train.jsonl.gz && cd - + mv ./cuts_belle.00000.jsonl.gz $manifest_dir # put it back + cd $manifest_dir && ln -s cuts_belle_00001-01600.jsonl.gz cuts_belle_train.jsonl.gz + ln -s cuts_belle.00000.jsonl.gz cuts_belle_test.jsonl.gz && cd - fi fi - +ngpu=8 +exp_dir=./qwen_omni/exp_speech2speech if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then - log "stage 3: " - exp_dir=./slam_omni/exp_speech2speech_rerun - export PYTHONPATH=$PYTHONPATH:/workspace/CosyVoice - python3 ./slam_omni/decode.py \ + log "stage 3: Training Speech2Speech Model" + torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ + --max-duration 50 \ + --enable-musan False \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ + --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ + --manifest-dir data/fbank \ + --deepspeed \ + --deepspeed_config ./qwen_omni/ds_config_zero1.json \ + --use-flash-attn True \ + --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True +fi + +if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then + log "stage 4: Decoding, only support batch_size=1 for now." + cd $exp_dir && ln -s ../../models/qwen-omni-like-speech2speech-belle-1.4M/pytorch_model.bin epoch-999.pt && cd - + python3 ./qwen_omni/decode.py \ --max-duration 1 \ --exp-dir $exp_dir \ --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ @@ -66,78 +103,20 @@ if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then --epoch 999 --avg 1 \ --manifest-dir data/fbank \ --use-flash-attn True \ - --method e2e-epoch10_speech2speech_rerun \ + --method e2e-epoch10_speech2speech \ --enable-speech-output True \ - --token2wav-path /workspace/CosyVoice-300M-SFT \ - --use-lora True # --on-the-fly-feats True - + --token2wav-path models/CosyVoice-300M-SFT \ + --use-lora True fi - -if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then - log "stage 4: " - ngpu=8 -torchrun --nproc_per_node $ngpu ./slam_omni/train.py \ - --max-duration 80 \ - --enable-musan False \ - --exp-dir ./slam_omni/exp_speech2text \ - --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ - --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ - --manifest-dir data/fbank \ - --deepspeed \ - --deepspeed_config ./slam_omni/ds_config_zero1.json \ - --use-flash-attn True \ - --pretrained-model-path slam_omni/exp_speech2text/epoch-1-checkpoint-5000.pt/pytorch_model.bin \ - --sampler-state-dict-path slam_omni/exp_speech2text/epoch-1-checkpoint-5000-sampler.pt \ - --use-lora True --unfreeze-llm True -fi - - if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then - log "stage 5: " - ngpu=8 - exp_dir=./slam_omni/exp_speech2speech_rerun - # exp_dir_new=./slam_omni/exp_s2s - torchrun --nproc_per_node $ngpu ./slam_omni/train.py \ - --max-duration 50 \ - --enable-musan False \ - --exp-dir $exp_dir \ - --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ - --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ - --manifest-dir data/fbank \ - --deepspeed \ - --deepspeed_config ./slam_omni/ds_config_zero1.json \ - --use-flash-attn True \ - --pretrained-model-path $exp_dir/epoch-1-checkpoint-15000.pt/pytorch_model.bin \ - --sampler-state-dict-path $exp_dir/epoch-1-checkpoint-15000-sampler.pt \ - --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True - # --pretrained-model-path slam_omni/exp_speech2text/epoch-1-checkpoint-5000.pt/pytorch_model.bin \ - # --sampler-state-dict-path $exp_dir/epoch-1-checkpoint-35000-sampler.pt \ - -fi - -if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then - log "stage 6: " - export PYTHONPATH=$PYTHONPATH:/workspace/CosyVoice - exp_dir=./slam_omni/exp_speech2speech_rerun - python3 ./slam_omni/web_demo.py \ + log "stage 5: Gradio Demo" + python3 ./qwen_omni/web_demo.py \ --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ - --checkpoint-path $exp_dir/epoch-998.pt \ + --checkpoint-path $exp_dir/epoch-999.pt \ --use-flash-attn True \ --enable-speech-output True \ --asr-model-dir local/sherpa-onnx-paraformer-zh-2023-09-14 \ --use-lora True --token2wav-path /workspace/CosyVoice-300M-SFT --share - -fi - -if [ $stage -le 7 ] && [ $stop_stage -ge 7 ]; then - log "stage 7: " - model_path=local/sherpa-onnx-paraformer-zh-2023-09-14 - - if [ ! -d $model_path ]; then - pip install sherpa-onnx - wget -nc https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2 - tar xvf sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2 -C local - fi fi diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py index 7cab52f73..dc38f32bd 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py @@ -116,28 +116,6 @@ class AsrDataModule: help="The number of buckets for the DynamicBucketingSampler" "(you might want to increase it for larger datasets).", ) - # group.add_argument( - # "--concatenate-cuts", - # type=str2bool, - # default=False, - # help="When enabled, utterances (cuts) will be concatenated " - # "to minimize the amount of padding.", - # ) - # group.add_argument( - # "--duration-factor", - # type=float, - # default=1.0, - # help="Determines the maximum duration of a concatenated cut " - # "relative to the duration of the longest cut in a batch.", - # ) - # group.add_argument( - # "--gap", - # type=float, - # default=1.0, - # help="The amount of padding (in seconds) inserted between " - # "concatenated cuts. This padding is filled with noise when " - # "noise augmentation is used.", - # ) group.add_argument( "--on-the-fly-feats", type=str2bool, @@ -256,20 +234,6 @@ class AsrDataModule: else: logging.info("Disable MUSAN") - # if self.args.concatenate_cuts: - # logging.info( - # f"Using cut concatenation with duration factor " - # f"{self.args.duration_factor} and gap {self.args.gap}." - # ) - # # Cut concatenation should be the first transform in the list, - # # so that if we e.g. mix noise in, it will fill the gaps between - # # different utterances. - # transforms = [ - # CutConcatenate( - # duration_factor=self.args.duration_factor, gap=self.args.gap - # ) - # ] + transforms - input_transforms = [] if self.args.enable_spec_aug: logging.info("Enable SpecAugment") @@ -426,32 +390,12 @@ class AsrDataModule: def test_cuts(self) -> CutSet: logging.info("About to get test cuts") if self.args.on_the_fly_feats: - # dataset = load_dataset(self.args.huggingface_dataset_path_or_name, streaming=True, split=partition) - i, num_digits = 0, 5 - idx = f"{i}".zfill(num_digits) - parquet_files = [ - f"data/train-{idx}-of-01601.parquet", - ] - parquet_files = [ - f"{self.args.huggingface_dataset_path_or_name}/{f}" - for f in parquet_files - ] - file_name = parquet_files[0] - logging.info(f"Loading dataset from {file_name}") - dataset = load_dataset( - "parquet", data_files=parquet_files, streaming=True, split="train" - ) - cut_set = CutSet.from_huggingface_dataset( - dataset, audio_key=self.args.audio_key, text_key=self.args.text_key - ) - if self.args.resample_to_16kHz: - cut_set = cut_set.resample(16000) - return {"test": cut_set} + pass else: - # return {'test':load_manifest_lazy(self.args.manifest_dir / "cuts_belle.00000.jsonl.gz")} - # return {'test':load_manifest_lazy(self.args.manifest_dir / "cuts_test_small.jsonl.gz")} return { - "test": load_manifest_lazy("data/fbank_test/belle_cuts.00000.jsonl.gz") + "test": load_manifest_lazy( + self.args.manifest_dir / "cuts_belle_test.jsonl.gz" + ) } @lru_cache() @@ -461,7 +405,7 @@ class AsrDataModule: pass else: return load_manifest_lazy( - self.args.manifest_dir / "cuts_belle.00000.jsonl.gz" + self.args.manifest_dir / "cuts_belle_test.jsonl.gz" ) @lru_cache() diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py index acd882d18..e4dccf081 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py @@ -20,30 +20,27 @@ """ Usage: # Command for decoding using fine-tuned models: +huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_multi-hans-zh_whisper +# Cosyvoice pretrained model for speech token2wav module +huggingface-cli download --local-dir models/CosyVoice-300M-SFT FunAudioLLM/CosyVoice-300M-SFT +# Qwen Pretrained model +huggingface-cli download --local-dir models/Qwen2.5-0.5B-Instruct Qwen/Qwen2.5-0.5B-Instruct +# Qwen-Omni like speech2speech model trained on worstchan/Belle_1.4M-SLAM-Omni +huggingface-cli download --local-dir models/qwen-omni-like-speech2speech-belle-1.4M yuekai/qwen-omni-like-speech2speech-belle-1.4M -pip install huggingface_hub['cli'] -mkdir -p models/whisper models/qwen models/checkpoint -huggingface-cli download --local-dir models/checkpoint yuekai/icefall_asr_aishell_whisper_qwen2_1.5B - -# For aishell fine-tuned whisper model -huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_aishell_whisper exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt -# For multi-hans fine-tuned whisper model -# huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_multi-hans-zh_whisper v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt - -huggingface-cli download --local-dir models/qwen Qwen/Qwen2-7B-Instruct - -mkdir -p whisper_llm_zh/exp_aishell_whisper_qwen2_1.5B -ln -s models/checkpoint/epoch-10-avg-5.pt whisper_llm_zh/exp_aishell_whisper_qwen2_1.5B/epoch-999.pt - -python3 ./whisper_llm_zh/decode.py \ - --max-duration 80 \ - --exp-dir whisper_llm_zh/exp_aishell_whisper_qwen2_1.5B \ - --speech-encoder-path-or-name models/whisper/exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt \ - --llm-path-or-name models/qwen \ - --epoch 999 --avg 1 \ - --manifest-dir data/fbank \ - --use-flash-attn True \ - --use-lora True --dataset aishell +cd $exp_dir && ln -s ../../models/qwen-omni-like-speech2speech-belle-1.4M/pytorch_model.bin epoch-999.pt && cd - +python3 ./qwen_omni/decode.py \ +--max-duration 1 \ +--exp-dir $exp_dir \ +--speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ +--llm-path-or-name models/Qwen2.5-0.5B-Instruct \ +--epoch 999 --avg 1 \ +--manifest-dir data/fbank \ +--use-flash-attn True \ +--method e2e-epoch10_speech2speech \ +--enable-speech-output True \ +--token2wav-path models/CosyVoice-300M-SFT \ +--use-lora True """ import argparse @@ -183,11 +180,6 @@ def get_model(params, device): attn_implementation = "eager" torch_dtype = torch.float16 - # codec_lm = AutoModelForCausalLM.from_pretrained( - # params.llm_path_or_name, - # attn_implementation=attn_implementation, - # torch_dtype=torch_dtype, - # ) codec_vocab_size = 4096 + 4 config = Qwen2Config( vocab_size=codec_vocab_size, @@ -198,39 +190,19 @@ def get_model(params, device): intermediate_size=2048, max_position_embeddings=4096, ) - # codec_lm = Qwen2ForCausalLM(config=config) - # Pass attn_implementation and torch_dtype to the constructor - # Use AutoModelForCausalLM.from_config for more generality + codec_lm = AutoModelForCausalLM.from_config( config=config, attn_implementation=attn_implementation, torch_dtype=torch_dtype, ) - # cosyvoice2_token_size = 6561 + codec_lm.resize_token_embeddings(codec_vocab_size) codec_lm.vocab_size = codec_vocab_size codec_lm.config.pad_token_id = codec_vocab_size - 1 codec_lm.config.eos_token_id = codec_vocab_size - 2 codec_lm.config.bos_token_id = codec_vocab_size - 3 codec_lm.config.mask_token_id = codec_vocab_size - 4 - # if params.use_lora: - # lora_config = LoraConfig( - # r=64, - # lora_alpha=16, - # target_modules=[ - # "q_proj", - # "k_proj", - # "v_proj", - # "o_proj", - # "up_proj", - # "gate_proj", - # "down_proj", - # ], - # lora_dropout=0.05, - # task_type="CAUSAL_LM", - # ) - # codec_lm = get_peft_model(codec_lm, lora_config) - # codec_lm.print_trainable_parameters() else: codec_lm = None @@ -373,13 +345,6 @@ def get_parser(): default="/workspace/CosyVoice-300M-SFT", help="The path to the token2wav model", ) - # parser.add_argument( - # "--dataset", - # type=str, - # default="aishell", - # choices=["aishell", "speechio", "wenetspeech_test_meeting", "multi_hans_zh"], - # help="The dataset to decode", - # ) add_model_arguments(parser) return parser @@ -474,12 +439,6 @@ def decode_one_batch( chat_rounds = [cut.custom["round"] for cut in batch["supervisions"]["cut"]] - # messages = [ - # [ - # {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}请转写音频为文字"}, - # {"role": "assistant", "content": ""}, - # ] - # ] * len(feature) questions_with_history = [ cut.custom["question"] for cut in batch["supervisions"]["cut"] ] @@ -496,7 +455,6 @@ def decode_one_batch( history_question_answer = history_contexts[i].split("USER:") history_question_answer = [item for item in history_question_answer if item] for j in range(total_round - 1): - # USER: 生成一个关于夏天的诗歌。 ASSISTANT: 夏日炎炎,万物生长,阳光明媚,享受着夏日的美好时光。 USER: 给我列举一些新闻头条。 ASSISTANT: 当今社会的新闻永远不会停。 question_answer = history_question_answer[j].split("ASSISTANT:") message += [ {"role": "user", "content": question_answer[0].strip()}, @@ -504,7 +462,6 @@ def decode_one_batch( ] message += [ {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, - # {"role": "user", "content": f"{last_questions[i]}"}, {"role": "assistant", "content": ""}, ] print(f"message: {message}, batch_size {len(chat_rounds)}") @@ -525,13 +482,6 @@ def decode_one_batch( audio_tokens = torch.tensor(audio_tokens, dtype=torch.int32).unsqueeze(0) audio_hat = audio_decode_cosyvoice(audio_tokens, token2wav_model) sf.write(speech_file_name, audio_hat.squeeze(0).cpu().numpy(), 22050) - # with open(speech_token_file_name, 'w') as f: - # # save_path = params.exp_dir / f"speech_output/{cut_id}.wav" - # #torchaudio.save(save_path, speech_output.cpu(), 16000) - # # print(f"speech_output: {generated_speech_output}, cut_id: {cut_id}") - # save_str = " ".join([str(i) for i in generated_speech_output]) - # f.write(f"{cut_id}|{save_str}\n") - else: generated_ids = model.decode( feature, input_ids.to(device, dtype=torch.long), attention_mask.to(device) @@ -560,43 +510,6 @@ def decode_dataset( Returns: Return a dict, whose key may be "beam-search". """ - - def normalize_text_alimeeting(text: str, normalize: str = "m2met") -> str: - """ - Text normalization similar to M2MeT challenge baseline. - See: https://github.com/yufan-aslp/AliMeeting/blob/main/asr/local/text_normalize.pl - """ - if normalize == "none": - return text - elif normalize == "m2met": - import re - - text = text.replace(" ", "") - text = text.replace("", "") - text = text.replace("<%>", "") - text = text.replace("<->", "") - text = text.replace("<$>", "") - text = text.replace("<#>", "") - text = text.replace("<_>", "") - text = text.replace("", "") - text = text.replace("`", "") - text = text.replace("&", "") - text = text.replace(",", "") - if re.search("[a-zA-Z]", text): - text = text.upper() - text = text.replace("A", "A") - text = text.replace("a", "A") - text = text.replace("b", "B") - text = text.replace("c", "C") - text = text.replace("k", "K") - text = text.replace("t", "T") - text = text.replace(",", "") - text = text.replace("丶", "") - text = text.replace("。", "") - text = text.replace("、", "") - text = text.replace("?", "") - return text - results = [] num_cuts = 0 @@ -634,7 +547,6 @@ def decode_dataset( this_batch = [] assert len(hyps) == len(texts) for cut_id, hyp_words, ref_text in zip(cut_ids, hyps, texts): - # ref_text = normalize_text_alimeeting(ref_text) ref_words = ref_text.split() print(f"ref: {ref_text}") print(f"hyp: {''.join(hyp_words)}") @@ -673,7 +585,6 @@ def save_results( errs_filename = ( params.log_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt" ) - # we compute CER for aishell dataset. results_char = [] for res in results: results_char.append((res[0], list("".join(res[1])), list("".join(res[2])))) @@ -732,11 +643,8 @@ def main(): num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") - # we need cut ids to display recognition results. args.return_cuts = True - data_module = AsrDataModule(args) - # data_module = MultiDataset(args.manifest_dir) def remove_long_utt(c: Cut): # Keep only utterances with duration in 30 seconds @@ -748,13 +656,6 @@ def main(): return False return True - # if params.dataset == "aishell": - # test_sets_cuts = data_module.aishell_test_cuts() - # elif params.dataset == "speechio": - # test_sets_cuts = data_module.speechio_test_cuts() - # elif params.dataset == "wenetspeech_test_meeting": - # test_sets_cuts = data_module.wenetspeech_test_meeting_cuts() - # else: test_sets_cuts = data_module.test_cuts() test_sets = test_sets_cuts.keys() diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py index 97870337d..a0efbd319 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py @@ -1,4 +1,4 @@ -from typing import List, Tuple # Added for type hints +from typing import List, Tuple import torch from torch import nn @@ -78,7 +78,6 @@ class SPEECH_LLM(nn.Module): self.codec_lm_head = nn.Linear( self.codec_lm.config.hidden_size, self.codec_lm.config.vocab_size ) - # to torch.float16 self.speech_token_projector = self.speech_token_projector.to( dtype=torch.float16 ) @@ -498,20 +497,6 @@ class SPEECH_LLM(nn.Module): pad_token_id=self.llm.config.pad_token_id, ) - # generated_ids = self.llm.generate( - # inputs_embeds=inputs_embeds, - # max_new_tokens=kwargs.get("max_new_tokens", 200), - # num_beams=kwargs.get("num_beams", 1), - # do_sample=kwargs.get("do_sample", False), - # min_length=kwargs.get("min_length", 1), - # top_p=kwargs.get("top_p", 1.0), - # repetition_penalty=kwargs.get("repetition_penalty", 1.0), - # temperature=kwargs.get("temperature", 1.0), - # length_penalty=kwargs.get("length_penalty", 1.0), - # bos_token_id=self.llm.config.bos_token_id, - # eos_token_id=self.llm.config.eos_token_id, - # pad_token_id=self.llm.config.pad_token_id, - # ) return generated_ids def decode_with_speech_output( @@ -520,7 +505,7 @@ class SPEECH_LLM(nn.Module): input_ids: torch.LongTensor = None, # Prompt input_ids attention_mask: torch.Tensor = None, # Prompt attention_mask max_text_new_tokens: int = 1024, - max_speech_new_tokens: int = 1024, # Max length for speech tokens + max_speech_new_tokens: int = 2048, # Max length for speech tokens llm_kwargs: dict = None, # Kwargs for text LLM generate codec_lm_kwargs: dict = None, # Kwargs for codec LM (e.g., temperature for sampling) - NOT IMPLEMENTED YET ) -> Tuple[torch.LongTensor, List[List[int]]]: @@ -602,7 +587,7 @@ class SPEECH_LLM(nn.Module): eos_token_id = self.llm.config.eos_token_id eos_token_embedding = self.llm.get_input_embeddings()( torch.tensor([[eos_token_id]], device=device) - ) # 1,D + ) assert ( generated_text_ids[0, -1] == eos_token_id ), f"Last token is not EOS: {generated_text_ids[0, -1]} != {eos_token_id}" @@ -610,7 +595,7 @@ class SPEECH_LLM(nn.Module): token_hidden_states[0].to(self.llm.device) for token_hidden_states in text_outputs.hidden_states ] - # shift one for thinker token_embeds, drop the first embeds, and add the eos token + first_thinker_token_embed = torch.cat( [ thinker_token_embeds_org[0][:, 1:], @@ -628,7 +613,7 @@ class SPEECH_LLM(nn.Module): token_hidden_states[-1].to(self.llm.device) for token_hidden_states in text_outputs.hidden_states ] - # thinker_reply_part = torch.cat(thinker_hidden_states[1:], dim=1) + torch.cat(thinker_token_embeds[1:], dim=1) + thinker_reply_part = [ torch.cat( [ @@ -651,12 +636,8 @@ class SPEECH_LLM(nn.Module): dim=-1, ) - thinker_prompt_part = self.speech_token_projector( - thinker_prompt_part - ) # [B, S_full, D_codec] - thinker_reply_part = self.speech_token_projector( - thinker_reply_part - ) # [B, S_full, D_codec] + thinker_prompt_part = self.speech_token_projector(thinker_prompt_part) + thinker_reply_part = self.speech_token_projector(thinker_reply_part) thinker_prompt_part_seq_len = thinker_prompt_part.shape[1] talker_input_ids = torch.full( @@ -666,9 +647,7 @@ class SPEECH_LLM(nn.Module): device=self.llm.device, ) talker_input_ids[:, -1] = self.codec_lm.config.bos_token_id - talker_inputs_embeds = self.codec_lm.get_input_embeddings()( - talker_input_ids - ) # [B, S_full, D_codec] + talker_inputs_embeds = self.codec_lm.get_input_embeddings()(talker_input_ids) thinker_input_embeds = torch.cat( [ thinker_prompt_part, @@ -677,68 +656,43 @@ class SPEECH_LLM(nn.Module): dim=1, ) talker_inputs_embeds += thinker_input_embeds - thinker_reply_part = thinker_reply_part[ - :, delay_step + 1 :, : - ] # [B, S_full, D_codec] + thinker_reply_part = thinker_reply_part[:, delay_step + 1 :, :] past_key_values = None - # generated_speech_tokens_list = [[] for _ in range(batch_size)] - # unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=device) + generated_speech_tokens_list = [] next_token_ids = None - # text_context_len = projected_text_embeds.shape[1] # S_full + for t in range(max_speech_new_tokens): - # Get embedding for the *current* input token ID (initially BOS, then generated tokens) - # current_speech_embeds = self.codec_lm.get_input_embeddings()(current_speech_input_ids) # [B, 1, D_codec] if t > 0: talker_inputs_embeds = self.codec_lm.get_input_embeddings()( next_token_ids - ) # [B, 1, D_codec] + ) if thinker_reply_part.shape[1] > 0: talker_inputs_embeds += thinker_reply_part[:, :1, :] - thinker_reply_part = thinker_reply_part[ - :, 1:, : - ] # Remove the first token for next step - # # Add the projected text embedding corresponding to the current timestep `t` - # if t < text_context_len: - # # Text context from the full generated text sequence - # current_text_context_embed = projected_text_embeds[:, t:t+1, :] # [B, 1, D_codec] - # inputs_embeds = current_speech_embeds + current_text_context_embed - # else: - # # No more text context to add - # inputs_embeds = current_speech_embeds + thinker_reply_part = thinker_reply_part[:, 1:, :] - # Forward pass through codec LM for one step - # We provide inputs_embeds directly, bypassing prepare_inputs_for_generation codec_outputs = self.codec_lm( - inputs_embeds=talker_inputs_embeds, # Combined embedding for this step + inputs_embeds=talker_inputs_embeds, past_key_values=past_key_values, use_cache=True, return_dict=True, output_hidden_states=True, - # No attention mask needed here when using past_key_values and single token input ) - last_token_hidden_state = codec_outputs.hidden_states[-1][ - :, -1, : - ] # [B, D_codec] #TODO: check shape here - # Get logits for the *last* token generated in this step - next_token_logits = self.codec_lm_head( - last_token_hidden_state - ) # Use -1 index - # suppress tokens between 4096:len(vocab)-3 - # next_token_logits[:, 4096:-3] = -float("Inf") # TODO: where we should supress tokens? + last_token_hidden_state = codec_outputs.hidden_states[-1][:, -1, :] + next_token_logits = self.codec_lm_head(last_token_hidden_state) + next_token_ids = topk_sampling( next_token_logits, ) - # print(next_token_ids, "next_token_ids", t, next_token_ids.shape) if next_token_ids[0, 0] == self.codec_lm.config.eos_token_id: break - # current_speech_input_ids = next_token_ids # Use the newly generated token ID as input for next step + past_key_values = codec_outputs.past_key_values # Update KV cache generated_speech_tokens_list.append( next_token_ids.squeeze(1).cpu().tolist()[0] ) - # --- 6. Return Results --- + return generated_text_ids, generated_speech_tokens_list diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py index 1438a2624..95ce16d0e 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py @@ -17,28 +17,22 @@ # limitations under the License. """ Usage: -# fine-tuning with whisper and Qwen2 -pip install huggingface_hub['cli'] -mkdir -p models/whisper models/qwen +# For Chinese dataset, you can use the following command to download the Chinese fine-tuned whisper model. +huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_multi-hans-zh_whisper +# Qwen Pretrained model +huggingface-cli download --local-dir models/Qwen2.5-0.5B-Instruct Qwen/Qwen2.5-0.5B-Instruct -# For aishell fine-tuned whisper model -huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_aishell_whisper exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt -# For multi-hans fine-tuned whisper model -# huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_multi-hans-zh_whisper v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt - -# huggingface-clie download --local-dir models/qwen Qwen/Qwen2-7B-Instruct -huggingface-clie download --local-dir models/qwen Qwen/Qwen2-1.5B-Instruct - -torchrun --nproc_per_node 8 ./whisper_llm_zh/train.py \ - --max-duration 200 \ - --exp-dir ./whisper_llm_zh/exp_test \ - --speech-encoder-path-or-name models/whisper/exp_large_v2/whisper-large-v2-aishell1-epoch-10-avg-6.pt \ - --llm-path-or-name Qwen/Qwen2-1.5B-Instruct \ - --manifest-dir data/fbank \ - --deepspeed \ - --deepspeed_config ./whisper_llm_zh/ds_config_zero1.json \ - --use-flash-attn True \ - --use-lora True --unfreeze-llm True +torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ + --max-duration 50 \ + --enable-musan False \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ + --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ + --manifest-dir data/fbank \ + --deepspeed \ + --deepspeed_config ./qwen_omni/ds_config_zero1.json \ + --use-flash-attn True \ + --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True """ import argparse @@ -52,7 +46,6 @@ from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import deepspeed -import k2 import torch import torch.multiprocessing as mp import torch.nn as nn @@ -66,8 +59,6 @@ from lhotse.cut import Cut from lhotse.dataset.sampling.base import CutSampler from lhotse.utils import fix_random_seed from model import IGNORE_TOKEN_ID, SPEECH_LLM, EncoderProjector - -# from multi_dataset import MultiDataset from peft import LoraConfig, get_peft_model from torch import Tensor from torch.utils.tensorboard import SummaryWriter @@ -330,9 +321,6 @@ def compute_loss( truncation=False, ) ) - # padding texts to the same length, texts is a list of list, padding with tokenzier.pad_token_id - # remove too long text - # texts = [ text for text in texts if len(text) < 1024 ] if len(texts) != len(messages): logging.warning(f"Remove too long text, {messages} ") max_len_texts = max([len(text) for text in texts]) @@ -347,7 +335,7 @@ def compute_loss( for text in texts ] input_ids = torch.tensor(texts, dtype=torch.int) - # response = tokenizer.batch_decode(input_ids, skip_special_tokens=True)[0] + target_ids = input_ids.clone() target_ids[target_ids == tokenizer.pad_token_id] = IGNORE_TOKEN_ID # mask all tokens before token_id 151646 with IGNORE_TOKEN_ID @@ -396,8 +384,6 @@ def compute_loss( history_contexts = [ question.rsplit(":", 1)[0].strip() for question in questions_with_history ] - # USER: 生成一个关于夏天的诗歌。 ASSISTANT: 夏日炎炎,万物生长,阳光明媚,享受着夏日的美好时光。 USER: 给我列举一些新闻头条。 ASSISTANT: 当今社会的新闻永远不会停。: 告诉我如何烹饪鸡肉 - # : 对以下句子进行鉴赏:他心地善良。输出结果为"他是一个有善心的人。 messages = [] for i, total_round in enumerate(chat_rounds): @@ -406,7 +392,6 @@ def compute_loss( history_question_answer = history_contexts[i].split("USER:") history_question_answer = [item for item in history_question_answer if item] for j in range(total_round - 1): - # USER: 生成一个关于夏天的诗歌。 ASSISTANT: 夏日炎炎,万物生长,阳光明媚,享受着夏日的美好时光。 USER: 给我列举一些新闻头条。 ASSISTANT: 当今社会的新闻永远不会停。 question_answer = history_question_answer[j].split("ASSISTANT:") message += [ {"role": "user", "content": question_answer[0].strip()}, @@ -683,7 +668,6 @@ def run(rank, world_size, args): if params.use_flash_attn: attn_implementation = "flash_attention_2" - # torch_dtype=torch.bfloat16 FIX ME torch_dtype = torch.float16 tokenizer.padding_side = "left" @@ -724,14 +708,6 @@ def run(rank, world_size, args): special_tokens_dict = {"additional_special_tokens": [DEFAULT_SPEECH_TOKEN]} tokenizer.add_special_tokens(special_tokens_dict) - # original_tokenizer_vocab_size = len(tokenizer) - # cosyvoice2_token_size = 6561 - # new_tokens = [f"<|s_{i}|>" for i in range(cosyvoice2_token_size)] + [ - # "<|SPEECH_GENERATION_START|>" - # ] - # num_added_tokens = tokenizer.add_tokens(new_tokens) - # model.resize_token_embeddings(len(tokenizer)) - # model.vocab_size = len(tokenizer) llm.config.pad_token_id = tokenizer.pad_token_id llm.config.default_speech_token_id = tokenizer.convert_tokens_to_ids( @@ -755,11 +731,6 @@ def run(rank, world_size, args): attn_implementation = "eager" torch_dtype = torch.float16 - # codec_lm = AutoModelForCausalLM.from_pretrained( - # params.llm_path_or_name, - # attn_implementation=attn_implementation, - # torch_dtype=torch_dtype, - # ) codec_vocab_size = 4096 + 4 # TODO: modify above vocab size or supress_tokens when decoding config = Qwen2Config( @@ -771,39 +742,19 @@ def run(rank, world_size, args): intermediate_size=2048, max_position_embeddings=4096, ) - # codec_lm = Qwen2ForCausalLM(config=config) - # Pass attn_implementation and torch_dtype to the constructor - # Use AutoModelForCausalLM.from_config for more generality + codec_lm = AutoModelForCausalLM.from_config( config=config, attn_implementation=attn_implementation, torch_dtype=torch_dtype, ) - # cosyvoice2_token_size = 6561 + codec_lm.resize_token_embeddings(codec_vocab_size) codec_lm.vocab_size = codec_vocab_size codec_lm.config.pad_token_id = codec_vocab_size - 1 codec_lm.config.eos_token_id = codec_vocab_size - 2 codec_lm.config.bos_token_id = codec_vocab_size - 3 codec_lm.config.mask_token_id = codec_vocab_size - 4 - # if params.use_lora: - # lora_config = LoraConfig( - # r=64, - # lora_alpha=16, - # target_modules=[ - # "q_proj", - # "k_proj", - # "v_proj", - # "o_proj", - # "up_proj", - # "gate_proj", - # "down_proj", - # ], - # lora_dropout=0.05, - # task_type="CAUSAL_LM", - # ) - # codec_lm = get_peft_model(codec_lm, lora_config) - # codec_lm.print_trainable_parameters() else: codec_lm = None @@ -856,7 +807,6 @@ def run(rank, world_size, args): # f"Exclude cut with ID {c.id} from training. Duration: {c.duration}" # ) return False - # cut.custom["answer_cosyvoice_speech_token"] for cut in batch["supervisions"]["cut"] codec_len = len(c.custom["answer_cosyvoice_speech_token"]) if codec_len > 2200: logging.warning( @@ -873,7 +823,7 @@ def run(rank, world_size, args): if params.sampler_state_dict_path: sampler_state_dict = torch.load(params.sampler_state_dict_path) sampler_state_dict["max_duration"] = params.max_duration - # TODO: load sampler state dict + train_dl = data_module.train_dataloaders( train_cuts, sampler_state_dict=sampler_state_dict ) From 08be51a91f58686572381cc724096dd139481488 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 29 Apr 2025 10:09:57 +0000 Subject: [PATCH 27/57] change pic --- egs/speech_llm/SPEECH2SPEECH/README.md | 2 +- .../SPEECH2SPEECH/assets/framework.jpg | Bin 339257 -> 0 bytes .../SPEECH2SPEECH/assets/framework.png | Bin 0 -> 103365 bytes 3 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 egs/speech_llm/SPEECH2SPEECH/assets/framework.jpg create mode 100644 egs/speech_llm/SPEECH2SPEECH/assets/framework.png diff --git a/egs/speech_llm/SPEECH2SPEECH/README.md b/egs/speech_llm/SPEECH2SPEECH/README.md index cc5e60063..9a0b62914 100644 --- a/egs/speech_llm/SPEECH2SPEECH/README.md +++ b/egs/speech_llm/SPEECH2SPEECH/README.md @@ -17,7 +17,7 @@ The following table lists the folders for different tasks.

- +


diff --git a/egs/speech_llm/SPEECH2SPEECH/assets/framework.jpg b/egs/speech_llm/SPEECH2SPEECH/assets/framework.jpg deleted file mode 100644 index d708bb256f799cfd07f32c779cf1bf315a3376f5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 339257 zcmeFYcT`i+*EVL5fs~jV1!3B2`d|fCxxO2?<4d2LS~H3kXV+-U%HM z0qMOYG%10E5(6pB_5EheteIIeYrgsGU9)~k?mxLVH)o%_&)(1T?0rtAPL=@n+qwq2 z00aU79)oY-WEQvy(9+QS`+zSx@NkLm0CEZ(?Y{y4uP+DAW>gTeCv z9mgq7vCG#_bD2J&7x&^;3{CmSAaSFjo#);#PEyIoGf}j{48#ED7&kn?}mK_F22*(8D0} zDg;{hryNF{#P7D{Rn^JOHI(p=DnBjb$}5L_NRupP{x=fmUVC^YYD}B}Va-mT$+vKcNf;#57U|;h zs-+~L*4*}f_gcrYcWp0IfAT8@`k9a`f66ZrQ6v+xERj*tEAVg6jAv9?e#{p`b!H>) zAD8zT93J|NIjpYdIG=7nMxyFG+w9_Aby=B2BBWdc1l|maM@~@HOC;a@K;O0L72XTx zQ-?axgz*RACZf8R1EW9-zG}ipH}y8lYSQVg0YtyGHz<8 z@&|^y2c2%}dbR1wo5y|dr|dm}QGR=88@r$5qxC!gT2Mxh`k?He0odJ|@e;Z65|1Y> zl?^dZ!`2qAOM3UT@%TOIygT+MTgO*R`YgiOwAe1-uABFIvk`yt}}X z61j$~uF|hKzsUXkYo?Qk`dOdRtRegm)(3|~_a+^j0JK|FjZrMwI@pi(jg?NNVSt8_ zEJWb8nBJZtuiasG;Nw^9F0_+>%9D_;BD0u%!hPa2Z^W`^)J*AQXZDtbZGX3w;~gYo zaZ6kiFBEa3ioNU%E9%0a+k7hvbp*wNayffJ9!|e}R`rp+l2mNps%?9pNON6qrRdLE zMjLiTMxo#IUrcorE$=bpT0U|};I7ilIYs+b`uhj^VVm-hR#~Dfrl4ho%7XWg!u)tX zr{{fq=`+t%M2s4AwPN;tiSDb=l$1wi0Jo_Qz-|8X1Q0m^mhwS2MnL+#3j0g@mZ2Ov z=^7@H9dq37ACeU9J0ykT(Q#~u`|;)<5kF!4WJ~-O`t`y^JTkP5D!gD+)X$Tf5+qbC zVRb6uQ>JfGM5iZA?QWqE6F_(-JE(n$I*epR&~(z}YU2~L(S`LfYheqk{U(Z{N3pj; z-&54?r?vHA>g8#W?bndbs2G9n*7T2LlaT>RSMGz$&+g8-V)T}o1lH8rr9~mRCbZJA zz%gg6?9T(}OgK6-R$Fls$>cl46^uecA ze|!>|Tb>Yz6Nx>CBUIp3O^ojG?G+fx8W#Wf9Nc}o`ga%pE1gi-TtIgaEQBgfw8JDt zeVT-eVa}>{HEJb%w{;CL(0YPn)Ri%wghW0-N4}&9(U3V`PD_j5vIKaYyS6~sJC)9N@$}kVMX#L6g1Zo%m%ZKPGfL>pwt{df*O0)5wqP#$W~Zy4 z_SQT0RMis3cT7(Db9^ombdylrI)O-p;H_CUTqj|vQwVTW{Y}p()R>lbEzd81b?-;c$wza`HQ+1%Me`y zEx-uy9JagM2|9=zC97~e&DbvXN*H?Te88xDhEPhblLABAU%ABb1L@b~Cm#d%uh&eF z)k`e48n2G*M3Ri_Ez!wAim5QRF-Q;DlkypHmO7|moc)!23lAkrOlZVo-QDD;$}Tnc z(d@`ZOL(iTOG0cz3pV{{Cf+$0SsJWc&Ua+8_f?l2II$Gc(I_%3#ZRvDk$HEi2J)Rq z66??j@QF3py$CTP!a@b$7jSn_>{f z1on17XC;<~dQ1I$?Pvq{dMzh}&%IOr8|h}bVm64W7>phWVHBl#p){IBcm>G*P!XA# zMQf+w8?@QyiRC|u(fc|cjj9B{>Ha>ZU*UrsFKv1@rx!zWgoI@$xAveIdGeQqQEI1v z9rYdd5AESz?D-GSb3`+e8Ci}vzii~wVatTqBTPms>jaElJOQe9KJ4Al7k6m2NEc+g zc4Tn4G4N^tVRQnt%OF<{TGf%0HM`WDdnii~Bs9}wRIykbGg>eDQnd*u++Y-2W39(A zZk}C?CF+~_!dS!S`v3y_H9qcRc+K`CuN={B5?Q$ zIRB|x6BB@K!O1V}RC(nG7GsRloWIRpHjI0Hj1ROwi#u0@H`Crtn%8KK2;Q2yitP3#^FKup*% zBBX7I9U@TT;Djgf7R3FP+w9Bj$KUS`7UWtk-wFa8_fa^Wn{yNU-8T6^>${pOYnf@v(lZ+ zFG?oxc5@L~FY4zUqMa7``DY6Gg+Dd-w~OHJ4o%Nucq_APQ}B9*iFo4mZ|z=RdU|sp zQop=DN(-^9~=jlT=nfx3YuMqviW8i%GdAu{$2-n#z!dIOXG1D7k1r>hQ19AS``DK1iIoFLR zNl}kWH$2SXDdd62b9t3Nt&;dlX-nACB%#ud$YPzInm9$YEs#) zi}QMrgF=QX-K)gc0rbg%q5{Cor~2|Z6hB-9C(@o4SkbZ+0en7 zB2g#Zdf93Lf?tck!`d57Vw;tMDFq*%eP_ir5rY~*oNln6(&Cf>7M!#u88x(NA< z81x{apsIOKGHT%N_Ur8wv)*4N>a+!bZ*`xOzW7TgzeMGGVD#l?%g^n|vl9guwJBB8 z1ZPgL9>Xe6Uvaa&lue>JZ4(*GFX3OGCKh@o;dYo(WJ;g}h>&3PFc;9_e)DDxEOt_m zR8HBe;#=G10j8F7I!pucSOdv~hsFxt5s^$e=*U1c=^0rDR;>i*rA{>pU*Tka@nwMe zYxM1}T>Z>1xDS4U-q+}ILIVhDL@xU6b&>`V*4}EXqL0GCg0`DvCsXU*o$eP8}L-~;sApE6kt zc#fV-k8=b;!C(a?>VuB5|84-mwM=2>MYy9rwr@B7p>WMV{D@ocyb8szl`gxh-oYg{ za1MOgKS@O;y#%7dZrj|>S()yBVXlBO^&lfFg$`wBgZFVstt;udQ&+f71_{|v+3m$%N! znzucz2xHuR8ZET^%P?@|!YHv~cI+;Ft*H%thg}kityR;t(w0cyBVZTvIfl~!U6swp zyLmL_6j32*+1_&~6s8gWhyr4{6tXp?6aghOkFn(~BSkheE)^BG94ujd0?y2w;S1w= z$;~5L_P}F^{~L@E_8dh2NTI_ABV>L&)Nt(3Rj8Nky|+|ls%gw=d$hlg zg@7P4ZgW{YAErFKmBYoYByh58#xCMu5-FpZaBoA5vz2iwH1W}Q#+eUb?C1vk9+(fj zf|9wz2_AUi#w?w}?yDNIg~4HlB+VvG_`1-L@&$I`)rv%)*kdJ=F*!dsX16WDx1P_- z>@7bu)Fm4TReV47$l}G%>vSCLr$a89n)w+$?7ul$rowuAZ_(s??M=zQ=J!fWZ-o~t zU4~pcRN2p{0HKK_CEHidfuy*ONq?QG&fGfKv$x4+44c$p)=LVWyGuWE_!EAQSb$Cn zTc8^wJ1xtZkOVtld8QMZ7OVPUnQOOGr29Cla+;&n?#Hj-7>l&|$PPHYo>ICk?bCSO zY)r8F8Ocfuk zm%#q^#yrAM1FF=YjzOlHnV$)q^Zu;%V=4U6Blvw14cr2UY)28L13HoH z;N)J>78;SzNTqxt7&T38W?DQj&G29U6y`S8%mEq-Q1TGsfw>}37dy^_jT+Z=YnLZY zwGGvl1y5(b3J!2eq)ySgs{c6ThU{iV9K3Rh7+C37>h|Qiz2(U~AZA9FT=C%|wwLKc zY9Nq>rZSU*aC}%+bX*cUSsPD>5Fc^hu^M#ESa9=CeEhB3N$v4I6<^-B^Y=C5ZxO07QnVjY$uR zH!!X(j|pyq5n*(Z!z6LI*1G#Rf7TQH@i#u7OW+&4J9ekdCTBVPH@P1tL1ikyWn%os ztVH5hveBM#Jk2p!`1L)@m~@l3pVlMdX-Z&l3H7#>oV)Zh+-5<{q(}D#Wt(K*dSIOd zKj$*ieongE879;HKgi8Lg7cr_zwI3G94+JP2z^8}$i5Ts(ru{FEaPzWy!Fi-Js-^3 zr^oXJejSl1;&0t&1Re;QokM1-wTmL6%=`1I3&~vBXg=(PuaCASkAr(7wd{QA1}1&= z_7=RAe*La>NY2?K-OPbB0ZhQ&V&o$3cB!Dn zUpM=i4*6^v#+2-3=T5xYZYx4Wer*37=zMFG49oANmh~Ie>WkX~gX4fR;4>LZ-XlS= z1tcD1Z!@0ydyx%`dPQ~9nu+H~YDQgFWqP@E;XU_xw6{(WYJ2rxWC8=iu6Yfv1=ot> zOUh4ox9)6QasHdB&NO^eF`?|{`wwafgV4VAHu|^7_Gc%+%7m8O3E=R)oSM97K5kbT zK+fuIN0GRcJt<<5t!7}<9zOy4N&>B^y_$9uvDWf$D97C+m85Va;hf9j@l93$J`MgY z7?#p>YEOWQjQ$fq$HevoAn|~|%ZH1B-`Rs~?p7(B1YVbxrG|2hlL~m?s%7F>bY`hy z&x4KrYn${R)}Y;OH$T>-?Onf#{z6S*pph^h zl-mgq$A_wR+DQw*xb8YV((dn8I`ZFtMERO}>(pCaOPXGR8wN+l9QwA}U6e{taLCD} zAvWPMj(5@KVOm$wi_yFb4~`uhZ-2>i_s!1y6*HlAD?l%`#9T9s{F6VymRLZNpbjk7 z^?WP}thzS&F|+HgA-7djG+YoV_)h;Gulnz(m~0Pi_fL0ZoN7a4*PyX>6|mm&DbLPV znO*X)?aq`PLRkV|UGP{RJ)6rYUMH6^Q2b7=j3z3;z5V5*pV{?*$R}i_e-xg+Q>1D= z{J=9-42HfkTDmZ@r_)5buOA~@a%s=0PCZ)JVnW zSw=gDtONzaZq+t8T4MI^n98KLUsp|&9BF)i0{B5_T}>KK0DF-c?JYt95mFF1bpLyl z@kHuNU#za6{T(?k-nkzkdpw=8?tQcM@i|XX7r?7crY76%sy%y7fEX%-8262stp~E} zHA6Z9zb+0N^~BYW4g0^o_hmeEi2}TPl_AGz%OXVS!tKAyCQyU?0bIZSNN9Y1K%=6d zW|8$7Hpv^z3QJ^Cs6_I0QF8AIKv#}G0iwyYcn8en_B!UOv5~C^qluSxrmUD*g8NSE zfLGOv`O2G8P2bW&R3F288k30Lh3(K|0fZ=WsRz74^43R63d{*PJ-ejZTl$151UEwA z<&#RDmsQVVPoDr5RrNu>mY*%_bhtU{9evHzwH#OO(hn`-EYwsC^Jch~;x(k!35XiW zQx`q@dG@4Ly`mRIIj_{RU6W2sKa_5V5zm=)m_)i4Zna9)wt|E0V%Xs_ZNQ%^4OV;F z!Y1kJkYID3vNXrMi+rfAvhPghK~jzL2=Ei2Eae2)@HIPre~rh_NT%Orv`m?DdvNjY z_lBE7ggVoU6}O|lRel!>Jy5|X_2dzyJUfwRHg9(2vDC=jT-uHQcvQM`z~|tM3bAP8 zw^F<>!y6Jxbd0h5f)`8lQka%^s$UBwv-F(+ z&lYF+2)uY|_!25X=?{#%S^k?%$wQ6O){op)y~%d{mRD#bogkJ;?>XiM>V}q+P*ioY z(g_eTfD7ocJuMKQWw>>0LtqRN*x9oji?By^HT10h__^Zftg?@6+a*eYT2JK6Z@g+k zxo4|fU|+%h>R9tF*znAw_Sa!^RUV$1R4c7_edU!!wCEFnD4n!TkMW%5>O#m*lFc#h zs%9tG1^0+tJpT2;clz z9_{fVS5W_Ae*GSMpR7B}FP_Bll_i!O2MKi%L89BzI#!;9_%kuH*yS1X&=w$?e=Rm- z3q@5~;P{CdlyD$9A= zc!Ja8n-Ym1$0G0l5S{E424$5n=fK{jvKBwg;xrQDU$E3-FguXhIa4znWcVKCtw^$T zG@Cy+{$7&*Nrq4j5G;YCCGZi|NoqQts*!N{v(y$|M*Qh|oq3XidwY5`BZm-lUOad> zC)aJUc)z$41!~bTNT4PN2}O*FF#5?ucb^CZ=3S4z+Ef8Mf1%}5@+mgUPj6DHXcS)s z58%o%>*3TZL|rHsQRGn_o-Io0?ZnQUBK8I3{Ldvt1(t?~K>Xf$+C6i$AX%FTx-OVD z%B8V({KZ#4@o01pktAT4v9!2uIOFf!L7pLY%x^Y7SEc0WkG6qH(P&9s!q_M z$uCx&cj=NscGp`OHI@=x2mnvV3A=#&KI|u4*W6Dx#qZR$-cv~_9BqecbQH)3==TE< zgL`i877%q|Y$rg+_mMKJ8Na~ZMnTXtGYx;j;I0B!s_q2D_)0`uFPJ6f%oo3BlDV_C z&{AItUivM`bN|dpvF?*FNYA}WxTnzZHAS-P&eDrgY1{PUExR84Pmhh1x)5?L z)L)5&1*0@;t8aYusfy|>*Yx)kuD$&(7NWd3eV@u$j>|@)I^GMI@CV+%gGa^t$?BK+ zY0lfqS9fy9e~eeqU6lF6X^v2ZN#;B;?*>zF{VUeh-Y{+ME&a^X9Ydz5s`N4Hih=Uo z<|n8bamFho`qCj#lNUkI64%>>h4E5(aqu9y+=@c$O@$v>5z!9~>@B&I4wQP%1bkql zr!w)AuKZc7$fuqruHtU+;&eZ|l`u1J$?BzA2~Z*CfJ4y|s>*H5y3(R7;*UgbVEA4c)QA)jYJAJQTlq3xoMBPL^%W$kDU z*^*v*YM7`TaLn$+M<(3(@yI;+o2p}o`Uhw-?6K$cXA??|U(y+PFmY+U$6WPQNyZhc zTYDO_QMdG*ogNNlgy|%zRaKvQ9=?}yfW(5zEZs8k>hjQQTgmK>R;E!c2@C~hoV$D* zHT>DlNlE-DOx-?dCB^1T$8Dwa_U5@LZg51}u*a|nyBbu-S%)s$bCYDVLSp!x3CYp5 zn&BPnr`I~GxA3i!btw%C%bKVV{2WEy5BwWjB9DWc%D_+@`K6~P6Tei}Bja{cZk7!D z=Qw~?G7G@MmfoO*56csfRlXU*lFiws6U`TplvH_Xv0OJhIboWC(2Tp5>V4u022#bV zo-w>xd1P*9D$|9pa5dC~hr6}_@tk4N!oo0hg}0=_MnA`0(ddPlR)_WjC(6df~GpdMaa0&lP|>dvSy^cBcI0YjmgJ4 zs~-Y>^@#BT67R{Wz>EdU~Gv?n=D zp5X+T5*`A-DF6_Lp!NxK&SRzaE^@Z=!*3IfJhU!cogb@wuTAeC$=viXR(~!2vKos} z zwRaJev<}yy)R6PE_fQF~GFSx*E8c3=$~1Dfmt!K`ZVcki?sJVIm{leV6+b3V zZ_J=PivKSa3)Be^g>ex%Fz}Nnp9Mi)4|wPQ;}>oRgjj$a1>XId+c2?^P)w{T)#19% zTTozQD{1WgL$WoYO>IZjzBK~8q!}?RFxdeO%aRT!U-dbcS46}F#RbLvvCPol{$TOz zPkjF4hAb&**bA_b>=+n8<}9R|7%pd0f?mb}2?gpI<1-U3eKc$_-ZhUJ)PL$L zSc=hI(i6TNrfU(_%g=shXD7udanw#Io~UQBKXn7Yu)-HzpTN%0M>Fe8PVm>=Mrf zbS)AsQ5AKewr)jbeymLDzRU&DC%{*dW_l#DkTU5f?_Cp!oltkbFf0a>{ZCcUm3Hnrd8-P2DPI6X_{*!q!Sr_e>H*Kfai-36XG z^NJqu3WM8G8BgH`En&{Pf`Ox5ylZRgy;;BdFILHu5mp_g{Pux^PcycKUWs(}BOi;z zU9qc-Y$6%MFHh#owl9qaVFV$F>~r;o_kQ;^MDoPP$}yel!LF2DU!pZ{i(af=C-acH z8ViW~zIKrr`+9D4iI2=mnP$I53eggZW2BjTt|WV3AKGL^yaGKy2*GEwaRM+lKfxPX zJj}Mh=YAXR_EAPEXAoAy^-HO9d&XzjlsMPb+9*sPpl2u*rLAI{B2}IV&$W4GAK4Js zs0*5!V$PCYKVu|^^S8^kKU&;y>ZENjW>|5A|0RJyqFqgk6}O0f_^z02iaVRwf`^(~ zO14(xx$iDse_ZfG-yu$(Z;q)u(fXq9H9@_vU=Z zDEKKNR;hWXK`>0_F$-n=&dSqPBnVi~ke^aMkp&BglglPp+GS0BKcfAZ18y-=zIBNE zMPrP_QyKVcZ^4~>NqLdWe*mpnD${3B{Z}OfR^vpvJtJnGBcWtHN)5r1@7n1Zag4;p z`?|3=x_;O{**njQ%1u9Z$0hZ!;-P4hF!BRZz*d%A#+O+Lt%($ZVqACQ!0Jv7fq$|~;7!2g$}>*M3T83DA-2LutWHe!gC9bVhIXG&XR8H=xXJ9qs81UkUUJmW=y~12c{#Rou9CGW+|ndV z31Q*~=zz#n`BDLj%&m&qP106}JGrXNpX)c}wQ`P}L6f6rPn$*5VzqCb$46YJR=j(+ zzfXU>zZ^j3@?6U77NL1|>>sxjEYTq>>1M!ZIDQa4GiGh``0c{2_=ptM$BLDy4|3RF z=&d8kT%QTTLxpk@!B?ZrvBQQ_7M5BYL%_N~UcSXh-I|K%n|(c}zyq0X?=++KFJ z$j)?a2ITKiJnac^`U;F3b?u)pE842*hT^2M1!lL~N;TBCjFGv5SQLzWFN<3amapFE z<(HBSLs+CmnzotR!Oefbz6Z1oh2$})n4B5F*+RIZ}RD=`@?k`Tz)Zz*$nV> z!93(UeF*P9?YyLjzmGWs{WQQP_2SSN?U$$5q z|C%*TYWH}kAs6bK;^pm*9v2EFWsVjA!M(0v3OE5MxuwbszDYzgE0zhoZpG35tpRVM zQG;upr(~kR{A6zPWcwo27pq5Q{MUN{9d14AIy+v#~$(1~|4Q9W|aK&&>PUDSZ( zkNK~*i=o83WJII+3h1gG7#JTIMi2~vZbn|0zQp9uYE*QTq&+jKFxd90&U-2nLO-&I zGq*cZx@t}B7jb60M~WtAPJqcZP~Iv? zF*gT|0@+oXG|}<|umy?S#)I)6pif#j>4QR+CvUMv@ztH2BF1(HLjB-Bg%PJRiELLx zt@KfOk(2Q!z_FPM5G-w%fy9}{I1y_|2sngjr;^+{j9ut)#jalav*i~Qm-FSNDQjig zR+u*GYx3UuzvfZKeOdo8=A@!=JgTY?pFQ4=6SH0(g!M>ldO>*%Gkh66H7e}bs4z#eSMoXWVhv>O9s(h|X|V#ddZl03^x zID8X$G3f5d_xAbG_A&~OqJDKbie5rIRQEp|BFdJN-{qE`08iqelrwJQ$j5m{N=dI5 z33PEMz>Q}B(UZ!QN8*QT5LIx!*N7>&UER12fr!>p-uEwEpGcG_<)<`Be#?KKDshXx zKR!xwrKo(ke7dQp4#k0TUW}wlkFB~M8d-Nv@CBqPM(j(wu>YJo3kmKD5;zK?GJ~yK z=i=N!xmYff8k^e!;!j!d5$F?df>YidMUAln2wCJ9N~50~AGkDd_eJPjMEGjtE68#c zb(IX69vjcT>zC|UKRLRUN!$NYt)pn}#s`VRnN|@?XR_IIGaW__75U+tLA&qz-N z8-_8F)GM-FAvewrc7<6icWGIJ-W7$H^ICfx-E$SIE&9&y2^pjpAnSDL` zt~Q*H9(LtS3K-;ZM#x#_-bI0$B!03I@k^ZFGK|UN&1RT#tflGGz5GbWgj+$&a9}H; zZIAKZFUWo!nMb&{%L*h%d`sB8*YXmh&6ZyIe}4b@o5s$`JTYF@#d*anlJ~}Ia$(`k zk=AMAzZJYplv+wSg`R}V(RV>PY-!JZ#fWfuG&Z!nKT5Sx;^2SUcl2^S!yy%+PeHBf z!RP@>N5-J zJK&-lI=vYKnzcW6uo~&=scaoAwS%eAjGQWnkdzjA$tRWcWxhAWg|-h=4s!Fp|9!13 z3%_ELV8>UAL_Z1q+&!tZaGke8!ibxrWH8czPc6fMpFaj3QUCxugC_u)o0h;xbo&fv z#nX3rM%fD0;$ZR3vO=$Nd|XPUn(yD_TGP2KHWkkk7sGSYBjLJxgTxP{g9-7RXZz61 zeL?`PH%Y~WGw|oERjVrd;;idrZAvH0urVHM7UcYx>6_|B#9xo_iDm=PfR~iq=CdCb z-Cpe`lHK!Nc`x(tl$ZOtF?@`mfB4i*3vx|FAJGoBc^t~mFca)>QL%@8R?A%wr|)Bh zPPBu${QO#rZy=d>6R!a@UH$`brT=-IzX@iK0=XbFl^1?C`xemvv(fB^zW>cc^O=ur zr@v08R?$GP7Xd4R`%7@awdT(t-XW#(v%V(8 z7_*oRPk1#{qkm1kze2gmfShy&kJ|XT~$W=CICP>5h@c zj?W9}_aCC5+;?KFZaH=SYxiJKSw-O zqVZ_Lj?5E8y_41hJ3J~xZa6-GY>HIC5^JC_u*#%PdG=?MoVL%*1#akgc_|#N2EO}v zX(9jU9}45d&PJexm-rhl)E#SVe^Xd+ZPZ3 zD_L&OASsAqt7%a}mv3*N=|z@le|dTvpkGB_Wr- zN%zbw6AcO8#Gr3*6TET*$1~N8%5EoNMon@`t@*ugi@_gz>80uJ(!)v+U5i3UhpF9W zJ(4#3QQBQxP3G}~1r9vCLJtV#?mPcdQhaVltP<`1gAMUQcX{^&`0k92DWXm+u;V4R zT1BdUH|wCYi%NF$M|NHl%$>%gQ@Ju^6yqacL^o&M)=nA`nyaqaWuv_ZZcL%fP#^4s)2_}J! zzHB0c^(kVCOL>>H4Sy^)L*Ds+Gv5%|{)jG~mmruCojz>MxKCsU(?^!d$@`L~av9N{ zlCnL2+7uO5*k9-bv2sI>9MGQOsA_XER}@Asc1QaRQN8sN{KCX47bR>Fo_YRjn#j)Y zcM|VzzPVJiB_5(rcispYzbJwPHG;)9jik;zMzEE9v9rm@i}lOrqOFleF3*Wu<_(ea z>2Xr|W#MZ2=B+Y^^pG%D$Gauqsc4!SGdis^V{rFj>C%!klV-BrJ zzAWMVrvNmipAS1tGAD-p{$u65U0!TK%yn|HAD4f=>+n7CdQQW>#4Y9!uXiERLb*}D zQA_Vp6-luO$Q-c>Bi}U}_mV0ZSSKFja>u*ojgeT!-Mzw}Lz3B+VNUcJ^>VY=*z>ge zAd7IueeR)NrR2Aseum)3`J*s$s>Z&mby>!lSI4fPHd5+#g;m6~@0$G5qg6RJ(BX!x z%|vZ4<7O?1#j0Hi<{eguie=R>S(igO1J>T@5v}KYpS|y0q2pSUi8_j&Pm~>T`4qym zMAa&cUtH=dRU-*XkuVW#McbnQDt$*4%3gF0L$=U@w+J{VtQeVQ;e5D&HWbv>MVo$j^2TskdBK>~ zPjy@U5m%b13oS8bny0yiKoHRFe{h$6b#<{#Bx&)xbgu~yv2&?B@;=77vz*7Uaas4# zhzY|!|8k=K`0H$pym(1*%k7E<$n%H)l!BQ&LO$hlGh;as+K#G{j&5euu5p>*)A&*& z8GVnDR`Qc-L)e1K)BAP)1~#b*FZ4FdxFP#_+Y%%h*mfH3PN2sLaPN_Mmqx+#xQgY+ z6Tp-kDlyLyLw7sY*7(m3QayqLvEtxRp-;2=QIB)bgV%nDvLlHDd3> z*3NvA6-*fx;?+e*o>{eUGQm0_EXZJtf71uQ4K^{n0y$h{h{n7FVV3e8uIi>5RRf() zC9HQdizKVKU+LJ{>@tirJ!iit$U1AnxeV-wF-%gx`lx+gbgmGw{44n-aoKk?aMv{U zrh|izjENufPbLGc?&}t_KRYB{5D)zC@d@hv0qt5@tdZXSn@=5GJaveod6q_rv`(($ z6^I0Dkq%9F-mcCt%&Ci(y?pqjyg@-^ha5n7Rn%QAsN^LRgip zLVQzQ|3#*4i9XGTF6m>BUt9@2P6GpqIT%G=k_sf8Y61T!5V{d+6!Z%Q^~qv z%6A;jFabT6t*Sv6C|Rts>)jRd_K++lIRokZMA;uWk z+kXtJIGWL=Kez6(jdG^m!X!)(|tWJf^w?aOUKj)bEQgeL^cP< zZ|E9h_;#$o(DIkc%!upn5#d6dA<|5?Q@JO#%>CR;j4t9_UinTYah_4&&yceJtmQa4 z3r0lj(+Xk$I>_y7^xncH~pKfI8=hl%rQzzUU8$ zic*>LKzTn*m@I@pTqpC=%=Y+G>G6_Os6bh^7@qExQKu`~{B|gR>u;|9wigfH&#~*7 z+I@ZVUvP(h4QvazkLF8kW+d6b9VWC4mr(4)0DV+lv$Y|`{C4gHbJ*nOrbOy3rlY$S zk8={;woPW`33lHpC9O;gL~XaCgZZZmPGic!y4Skpc7`o=afMQ6&ki>(`!6w?mHU1Q zfhlMU!aXVFNJl7(1zZC!-`2DkbF7`hH&q>c|3;!)1~2|>QK?+zZI&lj=r|=kxF4^? zU-GEI-6W)f>?R@bZ^z#}Ubu=#2e_@`B35H#U*futsC%w`3EP6(ou40X{wRuOPG6T< z{HA>OG&1UdRkI_DE&G!zZ_1RbhI{oxfB)ooN%o#l2_50sZ@{tniXEA|m+*p^2gP13 z3oJxWhBfP8Xe*T{=9G` zgnVkLLBe9E+Ff{IW?Sr((O}zXN}__(rJ%!2w|yv;=_N^(Y>-CC9M>|$Ew{BwPWtAZ zak;YbZ9XvQPS%mI%bY|H)k^iv?b+GO8%?>!Hs;$rVD?$8^6aE1UdHjYA(*nM+6V69 z443fL;oj!H#MvIL9=P9fgI1owCvZ8XQJI)YvbLb4LCY5R(TGJ@cG#omYXRewj7`0s zUfoleuLg?#%gUYP29_s z?vM2^%g(VMWfwRno?74!Id$FM(i8^nS1~$Ax<<)HaJy!`Pw#pE)iTh~ptiB9Ddtla zUqsVoBm^;HCv*to3e>d2?PM1a2c#~W1z99s-6_rWhlee#{rNNPGx4y z3+SYLQpkLma&up>%dqHT!Ht#H3>4`J72L86^!rS@ZUpIQaP!}i{dtK5*iAu`u80ai~*p>o^JxMg0cYrTv=)nWTA)w0YX z)YQM*qddgs+=gHPt#3kTqRH5p={1f=R-M6aDmA|@&sqP9RDY6wQ{w9xnKs;YdO86z zw33oj(~q20M?O^@(^-E#6z|ZSHKFdEY9*=bsp-9%Vd^-@6p5~6T$VW4Cx9j8uo$8z z;THb6EoTd^tQZ5zXD-FXmerhkJRcV?4vXur?Am3Wmi(qlXW#5hWlX>^_iC(n4GAHJ zk=!L7)s%oPcV(+bHJG>W$r}N4gHN6`DhqO|g-Wc*?_sC(>HoAuM6_l;ad&d%YTikclE zQWo4@Ly2t_wD}23u;p0^|3k`a!Zce7elRq;8kD*=9o(4m z9n8uiP-I7$2<;)1|4&Ae(nh&(rkCH@V#5m#wVPvaXT;a^K&@M0U#UU&6ru@?%BdnX zCT+Vj*@RT0PVqkv^M9uj?&=MO^bS>AZ-@Utz=fBR36c$2YVq;6y9N&xC$!Fz#NbzkYbfVPHz(vSu30C*?$rhbMH@7{3BlKcJ2Qp#Zpi*&0>kz%7*WRN zK3tqq2yVx8vxu9vu+ee#UzUrgY2*`RT29!|ni5^XUR7YEXq9tODv{3MU|f;YTix1B zNzb;R>@6cw{xh7%@>!w=OS1cdTsarx^+fN|SIx}d40dB3s2&FWFHHYSOt{2XewO?f z*Xc!L$zaP*gvoLe5x!17w}^`*hEv&xj?bI`mFrYcdrS!cHOr)gT)X4NdCG3f-XW&t zFW9Pr&om`wlhikIMeuSp-BIyyp1K=j?(%XA{I5+{-SeGZu-$R}o@)@8ljF>D=nlaD zb>->dOrl#wseO&PNoaT42lz`R$E9!Lp5$Bh`Dx1hFjPoXxp-~ItNWGtwkeu&zatNn zRL)9Rk9sNxTMg+Y%4RbYg&CJ#Sj+sIvZ+|Y%z4^n6Qay+2-yhFN3f|IX}$pVzjqyf zz@N(R>D8^s*zr1(_;{#N<>?F~zgn+&3yPpKtRljS=m*tBEc91qoX3(+?ls?H^=9M1ii7Yi?IphgB(cRoc*iZKq?5G`qS5RK|8wx2GlZeARSo zg1@p)y(AsSTril;UGcNFqI%(Zf<(`4tyDGE z*P0I?eHU*~nm{Oc6V%n3!AcpkAeD&~7tn$9Of#e|jQG0CV}F;e*X|#>)i-zZ^UI<{ z9ep=tC+!I7|2*DVmVwNV@eIop;a&eO=MI)7IGKyf>pCKC7^)^$!n+MtyPgOBxL9Hx zMQKH?UO-i4Qu_0b;q~R?w*hiJQHpc{?m;Prxt6Lf+hHO0Q~?|{?p5YPVWDP0N92Y6 zc6XkJ`PJFtkU_^zgCW-DMruajZ2OE!7mVHw!8hsNI{j(u%6$vl>-Ap)oo*!3vOQSJ z;rO8@kz(x&!cV&pst|PyhBImjvK@uqW8h60`ZqRYH!H8s%_^T>)77 zU)$a8CyBtViG3vfax8?T+ok;8*`-;zte{bhWvbSeZ^P26|8KAmFS}Qk*>%PJ9>A= zT+5xOijTVOnGJ;kGB4j;W7qxl$@*toUZW~+N3zKVmO=ruq)$+Q445SajV_iCb6mgJ zI|Udaf)HJ;ToWjl-I+xU46B5BB(rZc{4roeOWL#O+K&a>A?aV&mRDz)u=dS-^_lh0 z08(8qitPXzgE&1#7Qryf*8~RK3$dEF)RDL$872CL<+WmXJUm~>SNt7rA073&l`HVo zS?2aup;5R>!L4tnYTtWQ(2GJEe((o~dT$$Y^+`^e{AcghfA+5Z#|Qr71OGEVU>krB zCqxjvaL_x6CqVDQrBSjvH)`|K`z*s4z5j*1HxGyU|NF&9$etzpHnJokJ6T7`mL!#Z ztL#m-grPAbvSlX}G73@I*Q{e-Lza@UjS;eCh8n{xop+yeUFSOY_j{jn{{8)~`~G9D zQkeJaHLvBlJQlqYmAH)fk%jje1Lx=`&sWmkd6rU9yUpp2k#9Yl<0rCjjb(RkU9eIJ z3}lk!JKCs1c3^N0Jj5Q!huw#din_=LqR*L%-6u+IA7IXdFJqKwE)CIId@{0?q^0>{{n&q1}= z42FcGapl`F1vvCY_2qe4;xlSPrxXcoLd{>9)RSG|V06D#1+^YHsPPpU$!MPmd~1Cg zr*hhlu5PZoh;#a?{DBmXX;+MVht`+d>e@nxQ{n((5WfB?2di>lUtZbACswRipYf;Q zE9f$L;(f);G>+x*+k<&l{a`xS%ucmnMzi7=LW?-F zA@`P<5nnpRNd2oH(o*xd@w`n{}ZT5_5Y&5gT_`T7n##xALH?mW)GQBO`HgXjlm z0V)P0YPrE#N+5|$Na{ss610e4lZtdxH0O8tn`njtH%$zD-q;3Y8y+QYZo4e+lF9x6 z3YD=rUQZ6YC@B_FZ@~Qv!V|+ICaex0{J|1K zS<5D%o?~yK<`DU=lIS&zW%vx4zlKh8c%2F4& z_>o-~xNdfLf^c@8_!NcJ+GTo;+3Zw*p73ke>Us*dxRv}E%Q=&Cqmp-u2b00itF_(A zkx-O46wMCts!l+h$B!Kn{n4eqR>9t3-v^blC8Rj&j6MB31i1~xJB$&xDZtu@jz@e3 z3v~+lF0tVSo_dht3GU;WmUjJiO*Qkr#}7E<_UTKYqE63F9=#|!GGRz*uvTWW;+$%8x!+GB}1Fd)(m@#c}#3f%r8oSKYTd|OF{cpvUNim z!{TX-Rx2HsH|)PMaEN~polJsJzIX}UItZ%y^}8zmI;pIhHi@c(yK0h*Lrx{)P+Xe^ z{#i1c=2ErooNGg=E<#SD8Pb=pa)o|prDOv|;53ZR1aTLi`Vz+}-~)*5h^#xg?Eod# zbBq-d6@{+9$vOjpuj|Q8TRM3lD(1bTS7X^W^dTmM+IT$gtY5MI)id2#RaDQf;Z;>x zbXj`D>^QQ{?kP|-KyyMnC3071Q`8}+Hy=8gw0a2NR+j45#JS{;G{vWU6wUfoeTLKq z?5!hG(u!MR;aSokr7N705iyP4KL$IS_^5E87L0U7#q= zjvv+I9Rf5~ZGsvx{Ob-y15htf=$yQg-1Hgt!LWI(W70Z4i}BOVLhJnDKs#CPRQ}Fc zk~tvYb2u2uet+o%!7Cr7P6>-ZV}X}MM(2gYOx}(w}kiFc62b25l#nGIz9lX5qo?y zH8m7{Y!qn-zCjyW(wjxd0=z*^JsTW!Z&wj%GnKi%zc-w!q(-eMcR(tTO z(#b94whE8_slH|#c5R=Od0X<{%JxDe^ z6{ZBl8AszY1gq4>gfq>x4T^fS%cjDyN#U_~rLo`lPDmX_F>T549bH;Y!;}#RsLv?3 zR>tgBZ%pe!vAd3LGC6F}E6Lf~nd*6%ix{~k_~`mcTcIw$>Sh4zO@_knQp-ca8v^Da zJttN#)u&Gti&)J(Hq27UwG5&M1Ico!M)-rmU-bG06`Fi>xN z{?Vg5tTjZ57(@PZRbjXbxi}7Jm+P5Yd|^!|m~e%tb~g9jFwfPTStritKdVwJo)CXB zT8$P2KO+s&SQ5$3r0vyDD6V!e!OBj!ESUlc7VA4j40JLl2EQmxs9;Wjw^)Zug?eon ziWnN;-_*k`aq^tZuwa|n5U1FNsb6j80aZUmqiT(W1wy6qARAfKRRAc<@``LkxkD|7 zn-Zk@mo#TF0SwdI zsgZDSgJeWjfkh13eVh`u*Kp(6QjSm6mWs?Y7g1xL;?;KfDLAwsiONMnaLdAoWmLU? zTI8w~uQSOV=N+d$8rnKjxzf}$?{^}m^K#8#fC{U@Jv-P9E$#*l5F&Td)q<>yn52nO z-*ujzBQ5vV4*OK9%`N*L@JuALk_7{eQsX7yP_jK@386~O`vdZK(KSNP zUO=tbRK4U|qs4fZIht$qw)0>Ws-g|AMMI8!K!b06=znECBgqpHq-?SW+?_PLg61Wa z_0+)Fu-^|xW_9Fyp`yF~_mJ{|`C3jf;T|>Ial>Z}vG)+;ZjDO)0UWet14R-C!F#|0 zm!jp@CoH|cnbTDFl8=9Vajqy%<)xwpzwOXsgB%Ud^dpwnZey?qj48&Y4K9<;4L)n< zTbVcwgicJcc|R7vIn6nm$y$UsJv8pU;vs4AmR+-XVaWC57KqC9=GoC>;ty(rJ2eZg z_y*&Xcg0nf^aN+_${}Tz-eSzE?45DLKjhS8c`~23!sQxFA1Knuc`peeNwVsaVLRn7V$2_%tVufJcO0Nui&pIM(TfTP3nf0b;0Y` zV!jJG&NU4e{C=4ms=5*Z)7p8A2l%(m0s|8XI;cUaU7r9-kOB$MSXm8J*sZKQlfm`= z%DWc$%Vv`yLAv#5BQ^daPGAPRAP>+OpplbC6Ls2djYSccI^KQqYe?ai<|{KTH~)J|E_DCDW10M*8G{8Igqr1V*k-ZlP1Mh_7ck# ztF@?ku&A#(;iKokC@ z28S-$;A?>X+nKWHTkGbh&Dx5M-Lm6UGGekE3;+yHe9{&W0V|Tj;*xp#E=>36#C9Z~ z>MoYP(mQHrA{sdTj@?ii^zF!Pb$ORwYnPAgPvQLnkADhHh`7{@;e?1m+ z^vbwhz*FjZS(T0Kpazsq!@1&DQN2{# zN3BiKOEh<)NzeHspcdN~FzV8igl?dI$_1X#mZ)G{NmzmN^C+Xez@Mg-A>+fWQ0 zf3EJwUbFXlEomUHEB8QMlI^!vz<@NLC1M15nxa??3mE)Bkj0h`6myiW#g(sAH6$sn z7)f4z_$&c*f=NEJuMc!G${?TmzFigo87-guea=92VbB}vH58l^Z zkok>e++2`h6|77GIxg0DbnmfDS`>{Peu0!kptr!URq*o{n@+RWCK%eJ<%H%JD32CN zR|}0pfpaH%85scd2S7Zh#N`XdsiL{1fE)*JfHPA9xh2u!&QQVh<`xb-)INdp@|uq zPW{S99(QWhc*^{lyB}ySPx}SC5+JciPpz+TA@5VKag~sspvBFmtUgckw-z>@m!CXu z3W}oJPVHc3T0iXlf%Zj_&Vmy(COGZk`d}V}=vLTXXl|3{&1+dZ$x3;3ms)mRa_@M& zvPbMTouCWeZuO?{CJ_Uul?~evdWttbWHA1LSDwg=dMCbv7S(RI!z^bd&7KVWy@rGA5TRjI<=#QZT&5e&CT%Ms1x(~IxVK;-RS zSDw%9+Sp31UPqbdnuQ|uwH6{47NKX=n{KnduPn%D7Tu7o3LZmP!2-I$4QUh}ppGUk z>&6UIhFg*XDy~%o`0a?Vc$yuViyB^Qsymnd`hYGub3P2KOim)2y9xUs@m7GKYQB>; zQ;H%*UUxIJ8NczU=<0*)EctBt?}Y4;jG=93Fy-_%@fW1QKdtHqiu<<<=M_?t@4KVW z*4uJKNCT!$;*zkrl#nz}V$At{y0lau`cDeLE-FKVfSgI}tQ&~(0Dt!NO3Hb{=LNR? zfCsM&H5KU1naw}P+0Z>j_H{Q^1s^lvmwRY}c<(3b`rz)+rceZLQ}0V}AVcVWQS4*6TcjEk;k;`_3$X zRGCjf_pi;qL3S%EY9uLr%Uaoq=+olTWgAws-Mre;1Z~MX3p!|s8_D`|HDf=+5Uhy& z7uKn3FR<>VsOfMdA=epi*UuRnAhw*MCuuEkH8e%s#-aVk+r&}>2cKo>;4K#o))HDGQ=DY$hqg~=cU++vmFyMw}IE{vV=tvq4j2ZLz zc&6_~UwC3ZOL}FOtPp754ImzLJ5z%2(>s`eK}a;FFey^9X>?T3YkKKk75zQTkKm|q zNvn2H7sozTn|u}u#2N7e6a26k9JjS<=H_iMA1B!fSu+G*lpvDnb=l|qb(_2!f&rLz%8DmMvEJ!ozO3yITqQsb{k zU_;UsF6vt~g_{W(t^zjhiF{9NT>~!xSS3{R$tPsJ!BUbFumi<^vYNiO&>||fhc(No z@2jMgm|>s9n3<<4G%2_Hdy38$08i3=tD-%Ky&kSY5&(#T?A#vu{Z-isbs|C>oCQH? zH%$tGa5U}%hG*xiE4nt z+4b%*!VEYrTb+gl8{<1l--n;B{KbVa1zDw=7%|Q_aJ+^Ms2GCNm-%VRurwT?JyVaZ zLRXR4>`C75-+m~v*UYF){xm7F&;IP{1)Cfb=O^8^8&YWigmZqLc5M|mL46|g2lP1y ztwy^D51fSffJt&?-Fv(+uO&RpUmtmC`WMw?i$5_$I{xjF@0KP(Kzje)Z&db&~(;y$nYLh)`KZricrabj_@{HKwT_s*&Iu^{$e zAl6?1JDgW#g3JTI>0s?)?d{)nacHP5D&{xC3H|;+B;KqVV4=#=dIbOisInb^Vq_Hi zheTQikVuu0o43{JMv>ieGU1SWN{k=cR0qS#-=BEM8`R66q7eqa|^I_gc7z8x_a zxJhFu3?z%PtwQnH5D~WookDRTidJ#I*s%~_jr*$?N9SzfKUMVS`W~5sz|WEH=(*9s zPE~4!o;IP_m~@G$YdPN5#2iw$(b;l(v0tVkgYr)H0vp$YDX*u48OIE@jckDEK{Elz z0>L|cWrF=uIV^M0lycj#OF-*2(a$^O#XX*`CU_AVmYV&G1LVC;15Z)xid{B^m-4+U ze6_zmmP%I}& z(GAc*j79GR1Hj=aku5T<6SxQdMFz2mvk3hksket1ywW#WVBxQ5QJ+Wx3qP-e7-e7s z{A4j8gqTRN`~&*vO?#zy9!=pvTQ$~E<2uF10{qx{7DwM%21#4T`j!Z%j4(YkICyZy zxiIjV4xKFoTmA>Me-C*Y`BUf*sLbLj1e*&6zGg@Nl>Pg4D`*wCrmBxa8uxZWLe%jU zb#vQIYF%SrwPaQm^oeGxKL$DghVfCEqc0%q(bL$_8280<$~K&zxrZrZF{x$H_MW-y zJB{`K2mAT}Md3$V#<|VachByiH@RRp@GBhhWuKG(W2_WHd;B(vrY2z5X)JMOv$hW$M^PGj;FM2e5l;cAI<83*UIqnn18R)NsTY0ZIE%Ue~6x+V~EzlS~LwtSf0rML(J_xrrH-`-y>}mha?G3Ai8KxWX*vmTolyacQ?%8HEUv-0VMV zaH{PHR~CHL62Ik#ym0&6yp?<0MRht7K&NKdrk29}$Phq&J_Dvkj35Z~dwtP3huL%f zO#95>>TGEGU|rt)iG0U`XPn;jpf<$81=)RtroQYr<`+!nCGF#uJXA*6-~P^+N#?ur zXo2fe_sRA!8p{Vl=pfOY)J@REdiU#JUG6#1-?#7#FaK0CA6W%itP_y+IVkpV5lsJ> zyl-)?dde-)O&rdK|IihYn-sEqdeipU=2hFZN@CyHl_q|B94PZAspoyP)g2%Dm1>d{ zItn2HSIfdxWdnqKMp%B2qs_eUVMGKe#Cl<} zyD|DGnoS*zar+BZdFvtxy{|n}ZEwX47=cV*gNW575Ntl6yoT(7qGy$vP2eUte>czUQQF&bZ3NIB=Ez0j;UJaUiz~QGic_*FTJWfc_m_ z2{SmrP7FYvq8b9?+q=W>mR~A!yIgk!m99~9c1n(wJ^ za}NFi%`Fdfa%gS`l`niV4|>bwb)$Jw?s0JThRByo+r8qPS3#vf={RkQB1@KnLWCZ@ z>~6^FO?>Zr-B7y_r@{ZeJD&18M@Aw!6*lqrdgk84e#58j`~j6y0O(&cv{P+HCR4X1 zN;UtS{XmTDx=88i+To+P-qfcQdd^+OYWM?cW+y*wEKl^|GOUzhPaH|zIBY6;S8tM0 z^S;w>rdUXq{|n3c5?1O2=`L0bvf|c@VkhZZun{@fuE$(Y>|r=y2#Q@YJ$w;8n3ZWR z{k!N?A^HLuTdosKfl#aeQv04A?{-X+gnQO|*ydJ~$9TPFZ)mAjbzKa7cG(aDH!&o~ zp{=0@b~M(0!ug-`gGhFoIBbLs$b3?3h9fJXjy5G~OBOoUj7-_gD%bSPxa-<$Ud0J= zl+gduvLejJYq&8R;o2-8Uq|(-0b=1APg||(1d$tM`LEg{5Bd?lw4S`DNC70a6f9yO zMY;$Gaec-7)K;2g2wD&-OSsJXyYDTFr!zJ~{5t0dd|WAh8E>V1VakityZWNx>vQK< zEW3M%k;-Pli_@bUq8?^eWc%t#r8$v4xB7n6Q>!=wMyP@K<8!ax7WNE$r?%SB-5yiD z3f~w41>8+lK6Mhr)Qf7SsK5aQiRfIoF#c>Y7s@Egmp|7wS5W0@mTP+!_73&Ov$vcK zWu=IEyR|Y3Us|{DM9*F-;5gx9DLK5i)9@k50$MV}-5=un(7*EVbS6X5Pz$T!e)w7( z@)AOjHVHn3P$FqA_t69ih^)xB=7O0wW`jzao>r}j5^_FWr(1Ip?@<0{A1N-!?)V!B zz_kb>SI`*Sh)>56SJhDGeb|%B3bE&l-tLcD*pdy=KMAGAjK!^g9_KpWTnlD=o@Q^vb*|Z2N;+bLUO6x&h3<%S|c!hd#j_F zA~vPp|9gDFgkL4+n`qTWH|r^@JB9|Lz+1>e!}7F}J5^vRrDPKnb7Px{l>eN2rv1yP z-FChSNZ0x7k{fU!&W`?aEpGuD?q1|v?>K+`uP?UK*P3OH%`u7Xb4OUKoNp+BRtp+D83l3K-dCfss+3}Ufx5@dz378^r$L6< z&p!kNE+oaM$J{Pd-rWi{6{hk=*ll(I__w&QLcz4YK+0u&y$_GP>Xx@dGe7B8+^1M# zN(nX^uw=0VPTJi|D zs>08x244S6M-Dgkg5~$+1;n&#eSjX1O?~@uY*W%a+kBnH{!Sv>(07nI-R(j;J-Z2T zy#R&x^h(*T(`t%$Pa$99tooRfRg>1_g?c|xi|`Y5#qFNba6WJ64|i6c2=y|8j_(7E zyuYlx|9<_~oP$$PoK|lDJH;9AU%Fx?=tDf-i~r5L|MNlBL;5@4RqN7+x}uEa*h@|@ zOLpo4(RZ{Kg`t_ivvt!2*17&k+r4=7{S`&C0nHryE8q5L_1a=V{gD8B7(PaT(pWa| zNw{j@wr8h6o=x1qyT!pFk7je4Xo8u7uI7#^B6<_*H(%P93x$qmp0uBxbUc~D8UIL8 z*SyJBe5nVcYoB+ME!&tCK*NqRT|7|@0MljR4}eFwM`0%|;@B1=_RMDBij{6xy<6My zU#8}d_W2dYCxP z#lD}%BrP@H#ldW=Bd_TV+e{93BOZ>4-E+%b6#yT?fQ=t3pa?MDXv&p*%H!hd&5i*ld4>aXz z{r<2@Q2Y{usNLM;mGmnaSCT}1UfXcpIVT1Zi#2!#!Yj?~5f$;KJ9EGZAxJ{^>bylq zzJrzIbR^k+j5c%sRH^i{FYq;3jr)Lq^`P2_)Q>mn27}>}G-1NhlYr_?C(5^}o5RvP zcgi#6c;ie!8QD_NmxeA@Z2|I@IsqgprLM#f?=}~^fm)|}9);7ln)h$J3o^{VRMy%5 z{s(ma7=^Xl*#rcC{PBIq0=)nia_|jip>-@w?dpNEiWkTghrS`@_BjqOqq ziEQNJi1~U;zqIe6x)y&OceHzknUs`}>us!uC_8)}n?{n_=mIlu%OMeZR=6Ko8Sa%^ zJ`xibrJan~#0MD@E`H}uM=ZUGGI&UeCo{nTjwK;~<*+YN=1T*IQP-sXr4pax(@svl z@5EkBFZ85vruSVqJbzg=rHC$OEaI?#B2LF`02$YL25-_F6b>!f&Z+bb)`)p?J;6-+ zb%-ZZ>J}n<7GRj)C2P|r3dN`|XhQQOk)GMsQW}5_nYGZYR(N>b(PZ?*Z7?O zT<<6Hsdr^#-(4^Qf_^E{C8Bbet8L-0k!D7Ypq3h7}P=JKUo>;oz5=Etv! z#f=|b9%uh}RXq7{8n{0D;shJ!!0xEtw-x zGRzy}KUc5{`hF@RW{39%(;0l)N@^}1VC%=XbH8zRb@9~^dyjFlMEp2tJz;ji+QxjN zF0C}=m$jidlCXz;8&AGMk)1_|lQOq&)J=w@N$mo_A?Yx-H7n8W+E-#GjxtQ=VY08k zgXs)deUSCdJ&|lHdvQM-*%k#%4`>qx?LsFNFy7}A~Jyw95O1Yg|-m>ytiIm8nnO4Gg~K+}{nII5BTbi=&jb$wgy zM*wTm{k*ZlCG$+VlZ*A*19U^8znMN8h?C9_u2e_qX{>OlIwk&Eq`%fQRq5I0>05YC z(mtc$eYt`Sois?TfNrW4U}{xxf=MD~qf*N;U(}l8^L6eUn-l+)2afLtXGGKil4Kgxv^ z29Ce_BV_>f0UWYHb|(O6uNByr5zuj&eqPM&FErX%95KhzhqH8CgQWGsT{49Yp2(fpnE2?Xws`j|ssQ!tm-_gN-*)Dio;|j?-PP z!qXJ}m9Y!6#*^x#rWQ^(*zw#8d+N&R_- zv3Kg5KStR%=Kw+g>@pLlf)kniv-cWq(AoUvxIAlK)6&`55;rn}Ih*%5KanA*UpU%f zE{(P51Ym=Bz8@Yyc0hC?q^RYcXGSZLJ=It6VD z4R(OwKu4W{iWg5LYyd)o1WsI2Y%w*htCeq;_UPbGJ z2nzjpe7w8^T!jn=c%~idYEqh*^L0VMR3^LTJ?)Ss^7c)Su4~Whl+X zS)zAq#YJRL9Nh7_-Kf-IJ3?bCJWNvnukiU(3&Tkh22n2NUlQVPIu!|S|E#ox#?bcZ z4MITd?ntaKBJ2GCUT#?X34SZR7z#K+VW8ZY7#RDs5fpE9bL5XdrU zM#?)cUwt`jEfpkOs9O8f7E=;!sImJTI90W2pHVfO-PIVYTlo$IrTDceilu67mg{G4 zvf@n93A*&V#Tm6vTCaHZl#id3AENd7U`xMkv0rsbJeFHN(9-`TV=5 z#4`zOfv3kp2|IW6CGyQ@pz5+sVyk6rV@gVcrmgLyg0hp-fK=+a8BqEAi#CAqIhM}Z zZUFfN!9*>hh%X5ee#V^YC^`C|cl9U1f}yx1W9X@lCX1-}C5i7moR5JZO|2Mse^CUT zZ=ob@ss+{G+K9^YpbfTvad&blt*?J{qh#K2mFg8VlQk9*(5D1w4EI!nN#vEoXBiq0 zb_*=buW0Jg1ZQJM2P&6w)LC+jG53T)RH1 zX+2J?{Kd`79-Zq)Ok=YU6Kg|8-v566SDphfP$X^)qww0{klmaZ*?tHoMGepiVcT-_ z&n3Fmcl^$tX;xZyG}84~SvYsmS47adODtkG2ig0Tric8TwxUGg1u851Hi4*SB?{^= z1-%QOH$U1uSLDsBOO}a5 z_LIanfH>KS1Ym40L!P4Xw}95w@a=zl511z(@Mx8{00mNR^4zgE#jrwv%6=Y+RnecI zi8m8gX-|sn*GNMR2G8fOsNoHgAF}U)Pwee%w?@#`p5@YbcgX5^gB1up%$$Mg_nhV+E{RadA zOzrU*N6k6NIqcQfwL{4Y=&P$csvDx&3(`V~g6Go1?x}zo;1P_|!ef z9Go!W-?-NRI7RaQ|LFCf`h2?v0fxswD)Gvpp>6LOg0+6;OSWb9V1l323w}`fH;-{M ziB#>Os%?|64FLYdH|IDf8|n9MbC#`Dq)j`=r&MUbW+vU_lBoC$UFiR{{IeE7lP5`C zC1f{0Rf4-0IgskMePb?t{32no8Fs?XgR!~EQ2I`A%>S|zms_n)5@#a05y7Nu?ViBW zXNhb>&Ow?(%mDCwJ%9^W8Vgx$j_?*5SepP0;3*nufSr^AiW4ZYmDm0Otv1kYHP~e% zd+jLzKH$wTP|yTI;3P0j*QGkT^9;-mXPbSYY=!f*5AFQ)y%+Q5;cwjf!&$HL*$=pC z-Vv04Jzuc$phpLdEV@&T0h9|-Xwow7`GOOs#FCRfmgJuzm`B?~xH5{h0m0Pz10D{m%x&3Ah2lbxE-iO$dI%UYwo< zY`!+2cw6)Ep;)ov{=j#LGb;8bC@DMn}L-rj#ftPOc_J12G{vVAL5el>g z^geP4aFl3PqG~wJa=Hxuy;EeVs{!PPfaa%7N8oR~s15DVg2Hx?LCwH=Q?kBQlbKPp z3^U*mBh?=({5{M5)3dnuKmFIAB)ipW8s8CWlm@WAKdu4ZANy{{=Et+wa&AY<=GfV< z8Y_%%&>3!-@5udLS#Bbx;lNyoVHEG9J=Xq^pi1W-Q>J1ane~?H4sJRcHVm~s?fn=g z-P)y2EbW>e5(lC1HDkfO)cdQJjcv1VcWIs@tDu$9b8As}7SBuvcRS|-PxE0WA<@|) z!~_a|!!5?GA+<5!H7wYhQXYp-6b%6%Xwtr~4;$jmec;cADv<7d0X zWeA8ezP@AsCvTm%nynqrH%#zyt5=l^J?B6F6l5keV7Bebgbg5_%kAM5f}i?=katP|_ zYB)_4sxut!A`VvrZBLSPz)tmw(zC)-vj&nd3jR}*{Rpu}0Ai5%wyacYzJQB~%R-@jL+UV+ z>7t$;22d4@uP9M7Q!s6gH$B@sEJ*vJ;kkqXtaXz1!PNfO=fXL<{7mxd6&?`2si z-kM}x>yW>mD0`vaxFNaRL5gGfrBhiTCmJCScLoe=IZrN-st1L4(eVu;VfIT~A>Il* zf#n%R-Wh4K_upM)SOsP5b4*b9-LQJX4Ys~zuyNetg4BKP&2;7)`~x^)ta+4yKUcvc z27)UK`3?!yi)yf<@IJmo@+64#+X8G?n-%8l0Pqd8lSsIX=JNgA!_*PJHLqtJa}4J_ z!`C`j>qsxja=uIa2Et8*u!D?VJ_nN=tj`gX~u1fsQQ>V+-1zEuVy2lVn$wm>l3nQ@>;5 z*7UgFJ0vB#dxZ0BPBAUG%1PRsI%$^AZf4WPv>gdwBwqq@$x8?h#2DtwKrBxG;l^!( zVryK!@#jj~bP&Jcohl0jpFXb3^)b<>R2=_CsB9^xiIT0PNq1i5`(fhTG4>5f0WU?h z!nkCgSF~u`IAWu*Xpt3R!2_=BUuYVn1Ix62SE(-{x=E2bmY$4LCJC2v zHaK5R>%BF;FehQ!ErN;C&+0 zi3M0#UWBqJxPn(B{!5=$jST8VsOf2^Qk>~{n8Je-Ro4UQqzg8F1EqQ-^#jr`fF7{P z!Y3hnJWF9QK=$-BY1hqz*3%sAg7V%EJ1TrIxoQyMF7#t=P>AvY7-awgv&2MAL$BQX z-+qLwyMy{>i?^r;5FYoJ*Td^glaDr&y^Fm?y${cmuhz{FBkRVO_Pj#F3G%xW&K^OZ z5jID5K;6k;s-h7>a)CUkZ*H5TCe=?Qy7?oP3;uq?ZMtK9{OMKKe@e*!jUDDc`%ftu zvV*^$QS!HxEX~njy=dZiR*D@a^%@(F+T{eSEX1gJjha(>7FDC~Nr}7-d1s#pFD@E& zo$(aOmb@p(Ri~PHDkv4&0;s!6qPx&@^Mi#KrX-u2+9w|+uGd_CCh3$Ta#se%5T066 zjWE6S51#id(hJ22TkZkGuLe0q=8SM@yj!i6Un;Y9@o~4M-UT}As;GQ$)t1Ivj$doH#W?5B?(oNT{uKPv3JZoFi z)LGjSM;uv~$z#h8XUG^80oiI)EkD(Mi=#>*rDIArHvfK$q84Fn1n(t=|?B~iI{bR${IdZ}$E+;3q-Mu7nI+N}ji4Skl>sCM8D+cbO@ipx?>>5CpoXy^! zZF{i_Ki~tY)ruB80(u6a4|^(NiiTrS%FoRyDtI>#KKK!WeuHk#>lp z!0(ad-@=~4a=UZJg3Ev8ZiZ%EIzPY2%rMD&?`5Ifo6z}!Bcr$H=y#R@LEb66)=sR} z)9HcM(2j^sX=sKY;BUe{${aI%z~1RmZb&_L_qi9iG}ld{0w)STNs+7vt@CmG+8kWH z{W;g-m0E9*%tBLD$4%P{!6ZZv>5Uu|wtn5VssYjF?MMK!|+S z;4r;RP#78WDC2i}Xc0zMPI?>sAb<C~ zd|9+X3B!c#yR}s)IUU=On-|#@*OCf-_<=XQ&qT(=kZ*N(h$)Ue5ZE7&Y0PtYEz8da zgYT9}W%e@;&6*{%gr3pqwJY|-SS%GgxUTnWa2P4l2_^v@dwl}B7a>Us$0hB$cezS< zht$kC-}`=fc*Ak=d`fInE8j~Sr**#~KW!pstQ+3(KNHcfCN5SDt)=6VlE&jAX z`gcqcpooE7oUY=nUj7JxY*gnvtLYor`Qv@KwJOtS0rrk~cvts&9Yus-(T(D6AOmS3 z3#sigK|dw`K}*YI<3)emliIdZll6=jHwtIqzz)p>e$?6o=zZUl^3qP4-y@g8xY0vRODahG_-jStZ6BLfusKzCmW+@iEkUwd768?5p&J- zdd}M6$XDqXpN!*|xLTuFq<6D<1KUA}nAyT@aD!=2^{H*}oKAJ$GLvs!)n#hjfm_2f zcvh6A`ja{XC-#t)(irySO z2g<$qOjS!hOdRN)L_;Wy(|X{5(yf~9)~|JveaGAPtr)7$Yz^}ucI~u#vhSUwQwWy; z`~6>}BXiIS25d`EG$7tHUvEJYT#{({ir|HP>(|T`FO~?u^Z3=|rL$i!NlKWz`}R@v zZ$*5h>3#!w3Y7S22JNLZx9?}#n^_@#VYX`4eCnqVJvBYsJ7?7wn|MAXlx(R93Y zKWXZueymd5fvG?eJYZ#lZB%x_5^CFA@3pGOA{N{aB{>ZUbEZB@`0d8A9TAqG-g0WD zbE;~!PaG;m_5RX%!eGKZm#g~=TdGE4`B(LN zl7ASDYJGwadPy{0PSw)NM$Di<0TAp9z%&;*pcIW+deh1XoSMYzm2d7Jx_Ug+KIyZf zd!iwID19`2&cmmhJq%O?uKb;igYW@bLDT344eGsKqnaftvTf<39D={TKybQhAZ+4( zKX7)I6(M^{fIl-f&=g6)BLIH<3adU41^ z6u(B>w$%xrTuIEV+4Kg6UTeH5kcT-{-7wB{zGJlN)Ve|x_#N#uV8%H;h)E+N zR$mwe`DRmRv2mvT*OB`}XlwO&H3m#$5yGz+2;Xl-_q?w9QK&JwR)0S~ia_20=0Bu(Su6A^R(HXiQqSjpe8G%Ccw_To1 zY9F}YTzlxQm?P_D|NS%5DC%F}%hi*AK*I2Qc!A#V;-vmgE&N98TGr3AjUSxm>Vpay zQd@9nIe_oqW+e>t7rL*vYx&~0a3T;9=WpLGl`cB-anFwshB~4A40n0!A3?%m-Mf$7 zd;d~k;*L{M!I1=|euNw;X4T?geHjYu5QQnef!ks`LEBQ=7sZ%(>4>xOdNyxJgV$-F zKWij$JnRim+jaDtayI@L;xc45G_QH<5$n7gmAovt_tR&jy9DNV4rJpvy2#| zW|^OZ(4GSKDrgMw83!SUvnUFGK-EC)8%BY5A zSLR!z>splcP(IK5Io@R;e5Tiw&MAb?5f!exz#K| zi5XUzfhPyjHO*H`Lt8Z)%ScJrnr9k5ko~N#e{}i0IQOaE_}sW^RY|leGnQDLsK*9` zy$BNc4N0@7x3Bg-O^R@$C;Gt7@-l$+56)5Pi@#re5%lvSeda@}vEx2hMPLNCXkWwy zYwHM#KdbSk{kiPRbBCEKW|hea^=sF;oG*2V>bOVjax`dz==ae7k=E7zfyTlIFrU1E zZTaoq-@>^G>udrCPR~T zk7Bz1J&FYc4kFFtOrLfN`{Lb5=zd|P|H0mSMl}_!?V>?ZlopEg5~YZW6zK>O5v7P! zl_EkwKtRNRNYjuYO79>bs00N>ML>EH2)zgxigY0)5kYAQ2nJHT(|yjp_xtwV<9xUM zxObfUgTY8f)>?DT`ObH~^?9OBdS&-UFFJiWQWbBe>U5*}eSR&Qj^w=f5QPv|jS*eS zYW)1L#9=x2%yd@W7mb-G^>t787#^1qcn>CIYd=n@g_ofQGVnqiWFqY&Rpny+2%2jt zd3@R+_KvgdvM=Us3cWRzd!uZ&ligN}wf=LRcFcZNB7iQ>8hpdzO(tBT^1ju-CLMbu ziv8Ms_LCd$QOdH(YVS@(;H3?%FF+prfg~B63gsd;4R?_U612efjlSQ4*Tza$rt0x~ zOs-R}v4pA7kH{}%&e(U{pL;>#Q8ubksV=Esb9ImKmpXX4O^P1RU@~)Y^BHpo##&&G zp~d7&xpnbP0VL6E+ornHiK#s|M^aP^PV;RHZo*DY#VVy1Bn>VgY7|-}@U){U2`Csf zVZOHRP1!>XhM7^~o!dN-BbjWNt7F+(DyJr=_D#!CT-8Xb^%w}7+yz|eMf140q1pcV zb&(~5lktO@cMwKjh|LaM9&Z8#wMP5{^6E8T&i{cNnhafRKMgQO-*Pv)RJiXi@R5aZ z8J+Z%5sWmMb(_QSVCG*C_O!^bhA-3B+gM`M=AH-!hr*VW10~G2aHZkaTVb z%u)x!$G!{qpXeutIPma_|Ay)hz+BZrwR>8QJ?r!-`$_PlpZ$O{eaReKp}ZbDbqXeD zo_GIyGn52l@FR({Yfnc*xw_FZl+PbIqa!WM$W67%yoN_?wQcX;;SOk<4tTVRp_r%a zoPtLqehdEP=9&$^dKUJCP^qx$NHyAaVdhV4htsygHMgGy_}2!D*S-z*^c8VMf2Mwg ztIe2iyx~S)D*rnD15u*6V+Ax_6XiuwRuKi;t%Y0K?Jr$>AxBNrGZBk-KnL_rj$AZh z-jj33ElJ|Uk5FP(I-p%}KH2L^#ZfmV%3dbEYub>w`lP#<%}7tGAVRGpKc9EK%SDT_ z{!nB53(CCQCr^5GLighsQnn`6y&Nygwma?>X`oCKA$er<00|KXz;wtv+Ls!Smh2-Y z2J|0zQSl{cOK2yBew5W$2l8vy;ZBaUE$Xsk%a`iqnTC_UN`>3qk&Qx={+d$cTZ~6(v z)2ZJCJ_a*d-*Ei!LtkaJ;x?WTrnOn$;Y+Pc29ACYZrv+HEnWf2^0)=F0JG)}R$>A0BEGZNB*WwK_-FyQv!zc` zUE+ujLb|7dUKmSUxc!`4Ow90m!}_#JH(HkVfTG>qB14JJ(W)RN@MxK7%;+dd|CkHh zc)5`(>|eMhE86~i-eY2MeFeCE>?@-H-|5zGQr#*P7~=}D`W9u^d*2rL3SOTYNRW8E zc}FrcC#R|T-QImyw&a5$=#)@(kQGDAW=jiFlOK{I!=rrWyvdv<%w*mgqr|v2&~`??1NhP ztC2ZpWS6wiqbs$E;k`8%;yf*i?p`@Kfs|a8i-A27<5KafNO_+VX)+LO9u4dj!H zt=Z{zwq~}fY}UAIzHo{$;i3Jsz|`a%t^5+s6jk%Z-X_^T3?iC--n&-X*wp$_Mq)rp zZqoyb7a0(n$B=RH2J$G?;h8Q3RMR+PKuN;6Tgl}L=ER!DG__H;{ga*-HYcmOga~H{ zKO+)imF!?Jnw0}3b2opmJJ;U2CM{wog<_hc)4qD!YhQgIWCJ@%CTVwKCpWk8@G4|{ zO&kbREB3#xs!NLVh-fkhsu`=lap_k72YtiZr1h~3Y#DC;C9L{$2BmHrMcs;d%#!n- zhIQ$|K7>CruO0IPH8D2`A-C}fce{2TPPMFmm2V7mp;HRTArUOyCD@F?UaH+Q(gUF@ zKIkqj&$`(cB^o9I=X!kFd*6lX|JArxFjGdvI>vEK98LmWu?7dB)Xpl_QTUx_z-&u- zW^T2mSBrVOb3bUYT!U>1R>85FFu&@%?Lu*0>;w`3fK7VTn053EL96JIY08c50FfMt z)0xbn6tTUE)xXOteK#ex4*~N7h2p0X=_hEi-E%6GvALHdrOT^?TlEQ78=?^XE%i~V zu|#754N2aqa$v~J7a*$Fis+}%_>5Zyl9{g5;O5{)%WVH%*vjJ){W=LN_IF3PzHr== z<2#TK4WSlytFWQ8b9I`W$Qa&G7e@n<@3ordXWC-(rptMXi1sXRvbX>Q7FT7@5O~lgUe~d#a){xRVMn!3);ALPX`i)dlPex8j8Lp;V*QznlgA9RTEr7heNHQ@2uGm{{~` zp4XGry5OGffET67(bWJ~`x#V{W^$f-H1kNd`S?lKFMe{aY~G$p9_Rc`>F@O>NF0Ff zn+Qv3F>nSIJG_rbPT2hn_!oip#jeJd7Y`rEoWHZL=D89!drt`IR!hqyawUa5?{2()ZMDQ%WUu`Ba~4yr3Zg%pnJ9Sx+%Ai2Q*v*!7Y~Uh zI8lDb+6fC!4`i?2<*whkG^{$^pZ9W)q!+tAi1o8)fNFoz%`Z)EAl|HfO2FC zgMSbq!03C?YDdpX5;IvB4uC_pgNT1we*fva-zY`u1hB^#c<>M@%-6dBLt#gSDaZ6J zlO-#3BREm)6e&U?$|qdwMgFzELuy}*eC6Bh(%KdJ#WNyJ7mI&5H~ZrxSiDOx7+QFl z#ZEn8dau+4s#Nqy`nG`L{$z(f_WdST-BL|N^#u|Bn4>TQU*HI8ugubK(C&V8qH+*`3_%&woTxIp zo9D(fkN6nmgo(wP_DP(+a4&Yw`OEE=5IyVH#$UBWADDDtAHizzD?Y{W$7qQjA~L>( zBR)*JebhQvCu>qyc}3VUKlHb{@G-d{ZZlX|92!T5u|}}9btx#_aJms;DUXw)oT89B zCrx3tSC`g%GK!2dL!KWPNWU1&d(}zdFwB{8mukW|3g|x90sV6r+>13q3wm=)+UmFd zteTk?OQHgyiD^5eX>NwQi5LCxHb4R~L#y?T^XowUkjx5BkVsqVJ`S8T|6Thppsl(k z$&*N!+6#KNU(b}je&IfEVK+WKt6qC7!z$CpIII-{Z3msguF$C&Qb;Ts>Vk5i9U(0- z;8YQFs{H4qkk_|v^J3rmkfuj$BqHBybEL5-ZM{d*QU%!Pnx(#4E>gqgTh14Wej3XX zVajE9V0t&=4pmk3=s*N%_6v0key8(I<0wyRLbspD)UTbW(pSgz>dfWRKYfCRM<|D~ zPT94{p^h_~aJOD?!uQe^t%jNeiRN!#`TodnsY%w&G-1@x^+!#YQzcwQPueF!v)2ey zm$!X~aGvl-+tT}ol?%UobIki~GbW$X|6sX}Q`+`ftYMAp1#3%T%|7?6!?0YKhyg$C z9G%5U3?O3TugT2Ow7W%4^&tfj=69-^whdifsf{fbBVi9hpwC_#v(YnHQUUZ+m|?X) zkWVa-U+U9@t>Wq2*H~lQ;s(w6%ltHZl1a>Pw#8V^f#iGajYjJEn|vp+YNj9KMN*^u z#z^8f%Yn%*Lh-Dm`G+FgTU3(?e8q8h?%b}sSjepl_ES{ySF#c>xlSSY?%C5Hp^q^Y zA+6=pA?~^xv>r}VT6}b83l>e_ zW?UAk%ke7wnH?kEZfKB6`mY9{|yiPX8^-}c7;5{mjq@|eoXp!Z-eN_FEyEoqo59=GE zmVFEd0?#|}NjdD{J!9A_XU#Ey1BIn|h)NtdougQSRKJvt%=+5#Umtz90!Tcj^L^qK zmlK}vX9s)pa3Z%Iv?pbXWBV90m!arRQh|laSSXq|H#;4WmU42a4|!Su;8yFEk}7H~ zRduAg!tjq;*jO{Liops>dHbNzS=x(vhlRBFe#+k>`ES2p>q-m}I6theE___Ftl{>z zbPK01ICli-Q5vRVVl-8zwVg&#u=(Z_b{Q=GB+e@j62I-AlvOhpKZ{$Iwm&AZ;9{B& zz0G{bP$FW5F>UJlMxK<7X_5+uLiEX?GvZQJNGDOb$e(Jcp%GUTVKa0x zj|cWd8lnctJ%d3~mo70U&f9;#A}hzsC; z6SHP|P4|$&9Qvj!6u8i_EWObd5rzR(B~ddd*Fxp&=p9YHr>l`UE;{Z$V(#3Zih8df zJIN-;9X}8ET%S&92gX|Q!ypymP?JmJ)w9Yyw&6+>PSK)8VOhr(ZoE3~ArUe6pMd1>(}afvHP7E(^Y zv7tFaegF1i7tlo7D+V;0&zNicRzv1~zp0zC+&Y0 z{e}JIZlJ&39zZ*o`kq*5Z{40(>?|fv5LfnPXjXJXu_?KJEPU{}RpnkxE$cq?%l;h&E5i93|r%NMbjIC-|?|?&n22SV=J( zCo;k5aaiBYiv*qGoiU5{@EzoEY^tt*?7#e7^@V8SFBx05d8xvn%Lp^Zz6May9mnGG z##0lFMt_w{I?E5-Z^`so=)YaE_^G12UoN)oxOqOmB=yoY%wPU0}MA<4%9N55M zaRsNGm&7~blu-(An*#bGt{1|ar7yjCHshY1^yR@r@R9HRP1UJQdZN=EOLk+b^GF`p9SbTr8R2V=eP7 zh)K#*cUDMb39@DpHCefHywuImK)|T0;9+6D62hlFb3-XhG}7_g-F!@minzhq8~4k< zVyf&{wutzt7@~6aJc4JfK7KZr^xHa1(+s(WmG)TU2#m5;3w|Z@ZFLZW4n&X79UL)` z1CwXZfDxV1Q)Ry_ZnS^?w85TMpd#3&_5sua_}_ozh!&sFcLqjZ60<;)2WnT-Le*}A z3tvnTq9%qV$LeJ1OT8jVEjSz4$VBN_yDR=u=)zB7NXU5je9;^St@L%M*Qx>AsO^9= z2-72LtWFl*Rb(v`2|O2;;@cv%G|;vv+8wZJuXZM)b?#)HkH*Xvk5s3Q;Iw7&DZC(G za3i)g@{^(6;fP?D|FSTS;l_-_-2^}sraw zeAV02=;&3lTl(EM4gnbmdomtdbJ5bL6dd{V`5i>d@pFp8{?e_6Gf12|A8i;e>u3<(rT_!^lws8{Z{j}GFHjKb3%lFxXhYLo*BrT&8rtfAv zJdSGF+otc|iD%42=&jR&dR9PciQJO0`O zDQXaN?Mt4erRpiRZuHX6Gq`PjW}MZ^ihBxJ0#oJY7FOE|bL?n>1GvKJfS zX3deVvA-Uj!_4;CumRHq8`Rc9~~;N6ImAi52e8nwr5?eYsLHq7ySw^MZcM7MNlq7r$R^} zpg?0UQMqUy#to#v(c8j5NDNKz_4{@wYc@Ns=-d>4!;ayPHO2Mm|5t;yDt4pf=`JW0 zYI*nIWujJ2*(cWNQIGoga092~1-2B-`>UyC-G}#t>PPram28S^4+CnB)>h3xh$rVBN-v`-5NTyE{!@0zyl@3{PYpT zGZLL2%wSK9-?~e#h&=bZ$^Ld<96O((uBhgYBd~8yFryeSx^`&D1zH8A@uYW2~gaG-@ zw3pep#Dr`Q{aif!O$fqdqp48g*0e!7?+^5;vCpecwv;0Lj9j;VUqQTSVTM$lh9_?R z>c%>QUyx;{(wRc%X#-6F`@-L2d8-MJ5mgs@BCCVbEwl|OO^qnE!4x$csjA~?6v)?3 zVI7Fab2daO$m6C+q7XC@V)-GI6-6PYkoxJ-~@xFm}d)w8`6(8^JSL|KBL zG%myW87d(}l}A_}#sxnr)EZ>9;vI=j4SomZ9bZMN4$2iQaEe02DvkU)9`rB-VcX}% zaW`R6yPu255)La2A5vUF`p>@WrsHEpn)>RDX2&Kz~;z_0Jj{&E;qL~*hYJMKOl(kk8aFYL`SG=h}sZ$T^RJX)65^0>;Xnd->-KVE(1}9IkWK(#P19jIV2giH*6jr(;`CG zb8uW%JYnXqG*wiWeCFrt>z}_HyS`FYYjE!fFQ(sS4L;g_L*pn3Jv|q?8q|B#!pbBs zFg9yL)Ai-&gPfl>jvV$t;&s{}JK60(I6+-Ts0S(!%@Hu#sFAZ?H=za>r#UO}*->~DQCVMKw|Q32L9Q{*xmCEWJ4~~O zA@)~jLYD!2gJl|}j@;9p!ryuCKvMLJ_ifc06j{||RRsT~*nH#43rB3F0Wqx=$Iip#9aJQ9!X7}%qz*PsA#1*olh@)v8r6Q184vo`?Zo~P~tc0IdWOLaXnQ}bh} zkS&SOWq)oS!gf<@yvTu^u9eyVpOvi%Pay)Wjg}fh3p${5>FgnB;o9Mha}SO`R6O%0 zrq2U?H{@twMn=XU+BwwRk_Pr6djYM6l$Fr7i|vF(o*sC8v#Gj2mY8x>ckcW-rN#4i z%cQQ#Py747TVmTK1O@RVy`RulH$fsf#pHACIAilYssvjh-08m0DcW5s`xutdYY2`| zoFUc2?sm!DUFl+;pRG>vMo)8UK|ShLTlpa1uZB~uzy(-Cc?ZT&{1kN}B3?OY^_F(U z>BKCMuTDSUcp zyjRVV_>~@cK|dUqZx!=$J;+%YfhzhZ>`10%c|R-#vyr0`joZS5kjk*S{m&pkT@G6E zSeJc>*xw+^`fFrsw*Zkj2L~kW)1h9>84OdS8U1F64Z#JZ+#U4n4g29%JS)M~mnUY0 zC^&{NoMj6#l~~mMS}Hm?-QP8P3YIZ~TjcN=qnh7E; zht6#Yx9CvlSLLv4CTUNdE-#a^WauzS0iOUlp!M3TcN6fZFTel1ftd%dgP3 z>aij=?spIl;BnLp*34!z1nJ%5zn5hYM#N*}dPmFarUI&M&703BzP&D>#E^{Z=zO6n zB%ZtiCXUkIvD-!1j)|JN=i{SpDY9x+ky&K_wIcn1ciWD?)FiqSJxAE^f(c_*4yv#o z*L=fJ%t{R=?3i&U-y=(3Hdi)$*%I}ttxq(Qtf(TUJ)wLuJp8C_f5e&cZ9^K9@*B7? zZG#Hdv~VQy)zlCF9```voA!dGwUVgE?;^BZrII1s95=-5Nd%g*W@$eSL6rKW$ECBb zdF-2^^p%hD4dzlj zwijCm&nM$~s3ZkQ&mvhN7ImuNXD((HOKx)u_u?6>k5h>cyYny`$+#3FDl5tPnfjX{ zFww)(7@Dx8)~zB*79qx)Sr@7&R?3u>7(=|{2bEvh`|tY@kY@n|Y(Hi`U-~pN0VP<7 zQUb*{1)Tv>dt}{;eO3K<^bZWkCW>@kmN~}%KF##XJKjP8{}wpvGA#+no<$=7_dtuG zw8u4|G24wXGixB1cErj3m~rgK-u&)Ys+*Ab&ueC_qB0S1D+A6@E|!p2J8XZb4lwj* z`tpAWPv5$QMr0}59UBvBzvGu5p{gy(9f)`iqZq+nd8E^$A9~ zdPa8p4s9K|u zMwHWQeioBPLUjGz2IIrhha(T|w9ZCcPksyvOZHnagj2ef$*@=gr-ArVR1MGlIlY92 z_kP{KeUuBM6+M1*WN>4Z17mDWPFRg&>#QCtQ1A@FkKHbkmD$ij?`uJ2v;wvL$p=>- z%}e{+h?`e_^TbDzkguO@eRMCO+Sn>rOna} ze9*F#>{V?NJ|-72qg?u|_>*LK6SHT+iGH&2T)|F~h zW)yow#P1C?g{aPUgbwxb+e%&6DvB&JW0yYld?JCNLbd8zr^zeC>pKw%0yHigD!%=U z<2WIWNk+kE|C3HK6Dizv+hy+a!jwQ=3 zhim=RcGn!(6TFbBlZ&_a^?c5|u@-#6CQC0uCc>be8IALN3ilvHH!eo|p#osZelNro;PHaZ6klo=~-rf;SJ+yOM?B}~^E~-}?3UoZM|6sPTWEmI7o1kw)Q`!uaA50|J zMu-wa%aw05B9;A$^m`W!#gaY*-~o;TtdLM+waQ<_>c{O1Dwn=xm-N>Bw(WF>rOD)- zwL+cbGm|@4Py;>BPiV2}aFEv@+rw)yt5NwzAgALX4sT1@dn@hZkJ_yJSMGlOYLYJC zVQ-fzyXS03PGE(zb%`R(n)$wo1HSqPLg)3lhJSdwOm);RT4B+7k~ABtA~7cZ2GuKuY;km&$KZ>J2KD%qK$GC1 zpeO#1xz`l)4z@xTL3)!=rO=6sf>kllhpE_AHWDmuyu940iCp3=5(e>kvwDBuFBlh!%L6 z{pbCz&g9{QxHisVnWLn4SD??`2SkIIZ2##dZ}yUqzi^W?EKU#uQ_@)9gSrH~g4cQH zOTz%OK(sk|jUPrc3Miom++n_jgM92kgZ&*Xi|0VXIXmQiyHCaVBJff-~C$PR9N~=sVymF=Ih}M@8HW~n+`mSN5YR`01xr{D!St} zN@T1IWw-vV=QYN6zXk1^r|X=l?oKc_`OOO3JWLEN1`H?m8D>37@J%SxsR`S$n-jTp z@^#>MX1v+J<*5N<_IJSoVB$Ym4LnO(@&fTl)+H+vlGy?O^}`1t2gvEM1{#1Fu*M*U zT1*y+@t`0d-E$;nO;|QOo-w7&d|KyuZlj|nw0YMfY|l5nRoN3F>nStZ#G^%2{I(ki z5JpztOLlg$t~5JI%8ZXY8tJ{puXbA4J3^|`ICKya1`b~`cp@>I*$E8BA0Y@0AO!z~ z15~zm+XCD;p@%NPP)Q*|yPz>F*%a#`SM6^^Qfx=*TH5_C9`?hSMjr@BNP81Vb=DtA)mN zh#R~uW=*m5g+<2wzs|&_(~7f}#DalK*BcOds`dRk4AZ8B!jLU^?yXCp$60a{_mH|C z095M}i9{HLth%QEln;S8afp4JfKj{vZ4dK6vGY|7qoy& zAK1G$ptVPH=dYV`>JKD$3Z&Jnn7Hy$L=1~_v_*m@(lK{vM0TUPSi~o|Bi)xmU>*EHvPvLut)T*evWiXs0l%zF;`#?CT6X)XwaqOFv%xk;nd$dN z=Z7l3=2xzN9(N{1z{8d|0=Lg4mH+L_6O|tsdSQzVD_X{YD9H%1LQaad6@5P$NiAM{`Iyg z7172c*oELj%U!FEs|!y{^LdA&^7Y2E-J2GHjM1Q;>^h|$6UXY(3-3=9@e zV-CPcr{{4?2w*zwiTgLX{(JxO?0=xev=jK?H<17>CIRL2o)&VsvSs+_Xy(0VC2==5 z^GI{n19A`de?2#)s~{KfB?WW*lZ)NYi&qadWL9s%6{veVIrV)_xXwijUuk~w{8@(O zZ@Z=Ni3^A9E77KZs}RD9SG5ZXzJN^x*%hvSsj~h{kRsvWt?}{N;53aYE5)f#Z^dOI z9dlyY&dQ}j%5y=zPZ}JGx=I1|1NCSQ2_&SJrG>r}QT+x7g742ht_{>T{ycd8q1;t@ zbJ?WH?kmaNErkC$ibZrW@raE}O(4zU`T)-q*a8vw{~RSnr#WBi2=~>j894RTX&_qH zEz?N$t>}qXiXjKP_#%U!(hsyFcd4=;3P!YmwrABT%jfYxEu#nJ{Z+-X{(toCB>O%6-%9@NXdOE+y5~9zRTVwM-7(9eg^p`Ak zKEFkpS!5vX$dINT-(NzqFzWn0WE8IRA^m8UW>DzrOJlaFIqMu9$O)mHx^L(RRv(64 za66cZ)t8#L+n8o`f9Zd7F`gqQL{aQ!ANyK|?%BKOD=w{Uqci}*-+_s_pYsK9YSY{2 z#J*h2)V^PYOLKIL1s^G9t{5VZ!v@H7Z41-9+s~cvkkt@!d+E-LQ!eQ>}99` zO3EWGJitVH+%j9WikZB2T^Qj#_0cP5L1t~Hnfja@=>GQKp|t$Zi9`Gse;PV~n?`Ow zW4>JZ1G$Y8Kq-O(4vVz%DO}Y(><6D7mfP_4e)>%8AINeE3csSqSpd#x0OcjYLSv|S zC?-l;9z^6i0i0DUW=mS=cM%#&(vF=f=D7L9UzAg$NX#~I=DEl0*J44Zf?J};xtz_qMG&2wgA#82}Q$O?p;}}@GY!)XE$dAKmi!r$NmS3-Mp!6zs zp%ea#Ck8x6;SZ$W%8D+W&0t{XaJ_)TQUF0arp;ggAfr6cJH!bmf-yxt!r<{NUE(^W zy!luGOE#MiaFJ-i{y-pD;A22nt|2xzF!%pJ{$&7)wakZbA7tVmh#<}fDnb(^#elc3 z&M+EY7x#m&XxW5mr_MRG6PoIhLi`9alrGCa{Qj1f@+i0~e#f+~M`f1`vTQ%*=5THl za>}N%TIY>7?n|SQ#X|CZVhX{l#D0TVjs80TF%*#=g8s6LU5TS4m{OB7US120B+;+OnECql z<;q=s@F31SvOsq~

W-A6l?f2A^L{K1zE=NoZxAYN$#6MD9E-m0YS+eaZ2N&+3S6 zyp`M@8vCxj+keM9wEqtHD#}Sc$p*(T?F1m3{oO8ysmMu81*?zw@txI!zx;fDgoT~5 znULF;$YRqCOJI85{mb>=!{Z8F5l#JNJo|wOa zzhSH+H^0ZXfHDzZjTG~-`FWqW#Pj`+5rL8*x6^}SvTwc?CMJ{Ll=!E|Kf7k|`c|Rj zS3!H{)4A0)7Yy_N71ZnBE$jdN@9b02;{=f{Yy_@qF`f}Zr29$?A}-Pu(s+v`ns~m? zc5vOfa?gFEx6>`=aL^AI+&=`;6xeqCXc^2`5VEw?swvWVJ`z6guJ&zT*#PYo0Z>^{ zKd%X8LHCCmct0!XDI%!l&0ZKY`&UT9tD9aCm~WRWK)wsRd#WVAJh_W1_hB;r$yL!B zG0l%lD;UdfXdXB9+a(8|T1nX|*XURjiB9QWxY$3k*zm6igD}b#E(P{WmSzsZzaZ7Z z$aE3Aa2NBx0vv)6Q72~`W~jZGwO0_RV&XDJ8op#g0eTeq2eNY_g?69Nld=nXl7U`) z2C^y_%eh~rY`w#5lHd=Zw1*BVyP{WsX1(zM%Q%B1_b?BzfG^Yr9OFL_d7FRpcEl$( ztQsp8%4FRQV7(~<^Vx_KOXIeBjTQvgSsSQ?j*_7do8(JF-P`T7uC3L%7hl)77XB^p zr22KaOTE{=MYNu31AH(sMd()(^XnZdpk($DVOz}`6vDb>(z_78#&zTVUrIKn`doT8 zmBTXMXXtb-q&>?mQob^`Zgu!o{0Y41Kd`&LsnCu=SHsD$DJvEqoOZr?7HY$qqhqEq zwfVWuc_ve}w>K72#5SMl;tdl6vE{y-Ml3EcgBlnX8Af+^8j8<6pEDSj5v6c!9VkH1 zAMt-^@>=#XZ($j+a60Barx z>Wu$cN=*KBIqjouOm@fMJ5*S6%>NFHX|TzR!rj5f0-*73DX(@HrQs&M*NzTL8MjVM zTa#UEZ>?Dm#@AYM4MgI{uK=B%kxwJ0TB4#vCVU6STdYu592{OkqBU?fW>$HA#%Tb zl0i_!ZyX6jHPie)*18Atsy~;`+yOEU1)}>74q|@bFJR&;rh1+64AF(9AF4qzWwUTl zvu{__sGj-?%l$sAz6|M|#YSJEx?O);SgZ z%ghiutl{mJ{_NT9i`w6xyW#Xp@A$54S1_SA7lhVfu~Gko{wp2!4n2(E)UdEzxBP-u z2-WJauF6?m^HZ*&n=V9Nxlxv!boj(4Ltt4L?k~Y~STX=|z^kfxB;)u=0MxdCR%G}N z`7tigub^CN8^UZvI(IuGI?w8gjPTCyj;k!&pq7|55r)Qk8-7>O04!2_I#|bKptyMSD1oea{6ie2 zunJg*nY0JMJj7iDu&@bu`YN233n$wDKx|UO{y@Tufx&kjF*D`0*}rCx!8!sH#o*sz zXs0k~^r(Ni)02PmO2G7NFCW1zk~Ud%E7s8z+SOfq2EzqI@B!+1nAx0#(Qg6zCDx=H zw)qpw;zoQ!6aV!#Fv=*71HSa0CIN%&2_aN~u~>oTB( zw81=9e!5HIcI?GkF-J=%JO4Be3l$87S`FXRd)9ljA?(J?L)qgl4!obv_--q*4uaF^ zs1s$KMS|}JG?qK@#CDLq8)?DQmeVD^zO3TKhHb+f|Iep&(LSPYKXLkI&N|jfF>l~s z18x0<68TSd5tN7d8t1#t0jQTLNnrY%UN7gSZ2z7{4>A+j)mAq2OX-Y0_iofexBAec zq*n)yxQ+#rbB&5;K|uUv!#NCa@*!D3b1=K%+ubeLKal8P8qkL4d)jcc!$5k-K|6mS zVga?#$d^%BCmz3haOc6@>e9acX8I<2PsdPs!dqL7Z|O;O&Uhe~F)b({rZ0Va;50Tb zu-AbmAQ<%#67VriQ&)a%-@yGIRpz{$4{Out0= z0FI=y0Qczjl$)&pQwGtBDF_3bWIhrIQxX^g_bR2B6_@e|Loo!B z+Pwrsl>7uU#AC(aJn}yn{KPxfXINcG1O{9ri3oLSpEikI(d0U#yQM2Xu2f2n?M&|y zgY@Xf1mAQymADswsqSAMKG6q=%m4|V37E@BhR=r&HPN5C$49S4&W%r>lC|C0^a_HAfjEd58v79$qA0M?9Gg4G86m7=jTiPxd724bGH-syPL>maBrwJbrGa6QqYu5mC z^#iutZ7zo+PQAM50Az!5L24BDjo2qgjoX6z$~K9=8A4pG1VMcS$3u)aBFc^>@X@I{ zS)$Z#_Ri|b1%I^f(=vXSudZ+A_EL8I;t%FS>)<|qch*grxdtbe#m_Lt?Z;dA$L_4G zuiOM|j|pCiCMWM6-{-(5s;bMrM{!eeTalR$2f85{!KGOq4To-(FwMKX?6a-M4UU9< zv`(jLgr9ae5-)nhhpR4kN0q`4`&GviR`lZxw=4xmS)tl z^Y^6wPzZal9jCqABYLzbnLWUTi>lp)5c0##zeg*7VTsXvEvSf;_s;pF_nL2A5hk&{ zF1+2AI22`Ib8~N~uKV-!z+V##Wh>e-FD4&02G|2qP!N;x-UFIIoaio$a~Wi0EZi$4 zdG0(24BX2&GP?CtK&NY0Cag&Gn005nox_y1aH7DxYTvHOb}6%<8Z+fS-%?60wIk5a zkrd1=v@Z!9nBHBxv=V!231T0tpPJSHNoAwKI}m$tw+OMg+Vu`XB95;`)W<-=LhJi9 z!TOo;SSjjpjq}p_!m}w4rAlLV<-msGe{;DF+!p>N-)O;Wd8LC^(1=Z=qPsBq2E z(&;vJj|{7-!q0`DuU9RqJd#%QRXy_nmq&1UKLW}KWER2a3$OzE7knrsoq{L@np;tA z2Rhj`Q>bM=jI`gR-kDa|7mc-?TZomD4wJf&LX&ezaeIyKzIJ9X_ubc9r<<8$KWq=GA4KI_z1$!r zl)E0=yNFAKaR7T^lF%Un=dVU4*Ae(pX9|R{)-_{guj|up+z`siZ%|V-)?qJ+ah`d? z>kvKx8VrYPLFt=H>kGjiMK9@RZ+kZ!Z)Nd|oSK>K^e2i%e5!ns>iYcPo^(fx``e|3 zlgqnT*K*zhY_-#VD=L0LULgYg+|<*Vp|hfVpFWwMFk~0t@eEZYu1BH8f7t%WM9Y>+egCl z&4lO8SPZ}7wiq1iekfFJ#MQ_4-R`h!YT+LBwDm^(N(ucI>SiUjv*iel4C`3Eu>n7= z!tI@#H27XPdGEfcyf2x1*A|}ie@S&%@CjPap2oQiXfYb3GQPBV) zFuoI+IN`!i)o#Z=ydU(UJZ`I>_~xg}$+5F^<)oVx?{D>YKu>-Bupk*>aOH-%3H=P! zoMo1sfLnov0rTGl2gl9F`gh|vS@IY8&zF`umFU1u?ct8F%I{;~OCes2i_9l*khgl2 zD_lec?3&dsrQ|c_my&9@UPymm3y?V0|LI6%a5~3K;$u&1`@`HsrzHYSA!RixfPRf} ztR=%aS^^_ZyW>!*iL6Sh_*(K-^=nkOAi9surqz2gaSvp-5+?@?pR93esL3{kHbb;H z#V)Q?ll&&_+`%0?!j(v!m|`wjUhJj7ROnkA2ujzKp@IsJ1I4lyzU_Ikjq$pKbG6M4 z{7$FjcJ3aKGp%Dwj55ByaQAE<5v^|8q5+m`7&N4f5gk^3CA`q|6Th9A+T*$1JL&Nd ztJ^C|R{Z+Sepw2F_GSn&%Ue{@1K4WiY>N7JT+n#+AZRVUS1!^EeagE1%=PJjzx>0e zt2bKpJs3UT6cIjX;FwB_Z2QtUNae9yH#GGIwT_Xis_vdM4>*}{$f>t&QmjMkfc%w+ zyBAQ$L1d|Q^hjjSJge@2>aBq5plT&RSm+aJYU1R6v>ki7{vG5sdNwJh-3dBfzU_rM zK+F1RE)Uw`ef)oq$9`O0Gs!+}`Y!aaX@O!|=GO+XK?q8hqCH=?s!R7~7>uGp=~8uY z4du7_xUzE5htYe7@?h7jBa#Upu&Yu;s~C(J1A(N% zT3e2#%L-Tc4~$Hk$jx28R9P7o(LL!ES6BAc?{KXzw2dXstcSaqU!eSs`K5@AM?kB8 zd)3xdCD4tf-fOFM#@lY_3;sU!LF-~Pd)BX%NCM?DG6pV=3M&9`?p_@UaL_0;z5Z!5 z$ih-|tyJlzJNCuv3+cmg4#xhisbV>xWp7s<@f1V3?DxQy`RX5mipLs6s#ae;F zEe0<*%CoB322iTZB9ef|GNT;&uU+)f&x4IK4ho36(@-CS*IOWhznXZX=R;2KEm)+ zhk4dUFI#0fz_qVAkTR(~vUihW_E*i8k*poZ*xrfuxj9DC`< z(Zkd5RVxa@j5?l)WMTspn_m~b7GqCx50TdHbO>TsJDqpXj4$=zVN?{|8a;{?F;HYp z=3-U|6kn&5*7aLIgJ6734+_(|>ZU#QYHKIc)eb?}!uD(X$uwYSN4lHC0*LDgyRv;R z3_dTLH$}tO`qi!%#!!W+=&J7o-IgyO>fKHdpXFpP5coJ2nLtLoQ#f47&Z?5}@cV=^^%bh^n zJ{RU?Ci(`MHhksz9xIz)Rt&-A?Yc&0DVlfo*8PQ=((Ht4y^7+W0aM>hbdE_YMqU2B zH|eLn!uO=u*2BS2WC zPr5H4fghw~kV86Vk^+fZFFap5awRn;CkZ8L+1pjQb1io9D`webx;pTw@?MExThpY0 z=;JDpJIP2oe!#B4VXTiHeATNN-X?5s(@M0Yxzi2vP$A(uwpY z(t8OB2+|Wu2qgJ=CVTJooxRrj?sxCG=bq<2-yaysqcgwEGRBziJH|U+zG9Z{*J9HZlf6^Af2LdulzwD`IPe@7Ezr)qIq#xxP z;w8^oo)*NYm&o=lX~vID4-!`vK`A^@w@z_*aH_Gno*`~0(f0#=FV{PAyz_0Ec?CXy z{rZ}mp6Q3T>9*Z(_QGiGlmSLTl{wjf7#mMh5BM=sQtzFyy0$nn``kU#{Y-T3NnS6m zpJ}F~4)sWlYn7TU9@sn$aqlOil*?4+&q!~4Gf1j^`)lCFd${Xb5 zzBX=oreF2}7W=+YC{S&N1n9>}D~mK6B7XO{ZXUJLL{s2-iBP|G$EUi(YDC9~mv<}*Aj2fz3k_8=TUlsAwd z(7>i4r_uxuPaJg4&$G$bKecw`#g;WHrJ9KLO3Wr3ZjQUYmakN#=uzWowp2HY2k9Y( zRKF8?Zs2!mvov4nb6QS~^$m515E|~AKXK5n2{mc4F`KnmbKC(@ifb|on~)@3Ehnke zD$Sh(S1yZoo2(1*sikD8GoQaHos~MKD;jbE?n@U09c<}tRC7Y7xVLx9sgx&u7TThX zS6(XJ$BHaTKhZV5$fTZyZByJAFn|-QI!bf&vnOQ+PY-Iy*jL0B<^BRfA_F;Dh8mbm zy^6#eE<7#VXNke8CSftRl%zu~OvV9lf0grDfI3x-c6yYmMHG)=gyxRem%3`&QOcgU zI2|%E9_NjHSE>I^YRoNu)$4C^Y3>v9j98o_g4<3YKsJdIlB~!xInhk$P&BUi@>Tg@ z?%h2~h5PSj2Y*uDW4cHBDewR^N;MhNJTBGvo}G%8P69HqSuumxlZUiUb)8feoV^BH zvv#jl)0BuAzWY633EQ?pJ&dMfnwPjBO6G4^CTMt=ed@na-l4a^QuAc|l+kc6@=@3M zAF*Wn?mZIL?Pp(1Z(mwvJoUo1nF68)XiA&m6q#)p4u+9M; zHZeBrsG*bYSMmXnOg)k1tG3`HpLzR!dRmq-EcP8wM*xa`8tgm|`;nDUy1pARMsH5I zKza06!G|rOmn}Ts_e$%~ysxP^I!*-F#LGAKeA2m=yN;Xj3vda+-mFP;q+I@GLZd$ik`rLo#a16l zl?^xigs30t4a<8U^51b>Y;X*%)RYPd4wq~GXv@?1Im?1Wq>cTx<~mVqlE>vA2+?E!HF7b1ti(rQ+Y& zSQ*gF1yOw$chwadqz#^m8v)eAAp>^g#&YI#EcnXU*3Oj+ssCh5VORte`x^$dW5FHy zi^&HQ4Bg~6>@|{6@*6fRH&^Ba=@g@uRv0oEG_@S8hOrt1_VwJn?fQ9;96Faty89Po zoQbMNR)MYpuATupu|Y}I`q$}(|5y|-iPnb;!J@3(Fqu^c-&%^3=81U8sGP*n&Qz1|B%?)LD~QK33!C6 zefS$T19s1tHAr7`SQZ8n_WwrjBb^6VS7HGe*M?hGnbli&MNNSjD@7d!BLG#btA7mi zzYE)g#~Wl#{A0BLmDd0(Y7%sl2P{3)Z&;tV$wUGP8G-tFLIpvBGjv1!=a5ZSs;E}2 zFi=+Ejhgc?vX+53jL5jw-QY8S%yxm73%mV}()OU*v};sRgJNL5uTnNPE&u)=Lv0dC zqadK_X0VAhoKyjku7Ufw?PeuLvm5u*??u_d`RtKwD+-S4cA2>*FZ?4}lI`y4^L~FG zjV_P&eGYpt#hlMs*1hvp{j_PksMf~f)G0o^0>whgAw-csL$uSR7bTQt0sW-;D$CgG6moA=tg^*O>sihev|PN7z&+KUS_ zg8l5$07AdY*9!?-jaWi!-x=#q&2#AZ#x>^7r*x1(8f*!!ODc;?1VgPl+`&ABQ^;N3 zA$O7;)fzJdd`&(0g2x}0zRHec0!V$Cf5*3ik;bI;DGE3<@upO8t-t@U=r=6=q)8K; zq>l`n;G^czgsG($$msBOaLgaAOgX$UeE(@zTTo+6qL|yJ=>tO8-qzV<*amjQ0AhC} zC4n*xj;m|68gd0>Y}B%~l3k+ktzhj)lZ(c7ud{AnYLU6Bf7ar_WfGpGs~B`HydD?) zbQ690?a|{{bz2V})8=#clIwQgIebg<~od^{s= zUin;(mIUVu@5zIxXD>9do&)QKY7G_IGYL6EcjhZBrww;^BrM9E;%Tbi9~;g7Hl3x#UtLJi;*KSu5lNMD;a#LqY4;K+}VcV7yc_)jDCWfj8cq~+< zlM?dYpOoUhYPM=5N|N&;ur6ki;wO(sA%fId(|?(?>SetUb+v-+?0x?9mCU4k*`)#R(R|KL=EjM~RxVw$$#e+J zpEop6VX~OuLBs{Yg`+1pF-HA1=MuF;ftPxdCxkHATbDe}y;5apXcn_7Uur)Xew^*J@Y3yQ(=0{!x&*kb3>dwLu zwx&+tzM&z}UuBeU&uxEQmuN7T|0H*D1I(hM?ny?}6sQ44XT^t_p10egTGZ4YP z@yG+zLMZoG_emt!hRt60DG>E9;wEOQjRO&KRlw9AEhekbO5xjf zml@Zwa=k#y3Lkb3*))oc`dTBjT61VDjC%p$RH0US=@6n^c}r{at|HsYVDiRg*iYyv zs#Tfn#RytKtQ!nY;pse_l)@s$oBIQKer_37X7tl!h1A43VoTnH-tp6d#POd7CHjY^ zY}8#V-GVjN~Ng zE_M=7yZp`#RIz@b^Yu7Z9U_XL&rRtMczE?m8hpBa(*^Oa=iqD2pRXhQQn{rRE?XbV zRv19Jofq~NPPm)#i5KJ3h$ycFF=0|Gbr4)yPf(AN+6m)OIfzU6?Iu(QJs7c$`L)Sa8Aimdu z9}}2UF#6g=xQLHFUup2tMw?$56fEytbIQS9~bxByLn=yGT2E?;49j86R~;< z>d4vg2DL9h`UehM0a7^-q?_3A8cC`YO%nis8GiDaB^wzjE_VF65ta81SumArddN7{ zG>xl^8!%@xw~f+ZpqGX(nvJ2Wiem*DQsi{np2RhqqYB3_`DVpHWI<@xH_G8^xoiDZ z_pqiGLt|9_CIfk$!$oICV%A-o-fNNZU9QYY!Q^UbteJ4_?Vj7FBPzR1p6WR>?GsY{ zCP9H8sFHFcjxcxZ1>@Dm39HYhGo ziOom!16i5TC3t!G(z!Tzksj`;J|*NeT1~zByYtT4a5{>2E~BR5!zHNqaH=w#p0u%v zoW%~GgxG~hSfW!M=l;xS4Ior1M(#*V;T7()lCF&W*5M|ct$$8!F{0s_PtdjLJ+;hs zSP&adW&(d)B}6mvyFs;_#D%nTqUm-A%Yx094zlrUzizr;${j1J(E8y1)u9lcm*r%N z3iwa=jlyuer=wy_Ocn+d7-*euV zX@t_4=y3RM!~+mg{wlh7s9$lhsp#{RTJ$EOdIY~DdCKLT3&_F0if=#;57wfJ=}#>O z8!@}yhNQNEYs6RO1p(M0#~Oz(qiux+4=)IK60hy@Tt0F>zr^T`rMSoCC$Gk29P&F( zZ{}pKBGgcmSp2TjSZ;5%I+TFAoP%_7Y4Eg1pG=9dU0nB@2eFS~0j3X(rO81@WhGn| zG0eEhbZ|3(j#5Rg^qs~D+gWjt;lpX?-pA+aIbISF(S0rHwe`TTVM!uRN13Tboa{qH z3XNL+kR(0x@*MclU8MV22kTJ=EfF{Zc{ytCzQ_`Ctfa*JhMo1sg9ecIrt)ky4BaUH zU7xC=HobJ~x4(}(@N!idkJ~#50Oqq3HtfR><#0}&N&h_D=XWRFwx7QS(x|}7w4#vy zRaziNrc~XWoI?qvqCl$^YjR%isV`N+P1y7yUAHGo($7^wj)`^G3hPUqBj~xUiMmjl zmu`|S0Ws&I(&#GKm#jVyk^MtP&b6?rh(uGNA;q=~y{;42H_ZyvOwNDE@WYOSW4o42PNo73q1jOtX5Z&PoTvf8(`D98;uL~SuxA#Nuza9hkHc~o}}|Au)X zhCQU=cAhk}Qx)e+Z~V+-DfwcJ?kY<6A~XwW{n<{{;NN3NhAX26b$Dh!E?<(?6vqOU z$ASc9x+ZeZcU4`>R(m5;now}*>X$cf6-&TrZJ8awIf7*B&AQJ0etcx(ph<*)nRiiB z@&HBw^PoaY=wk^Fr~ItXtH-;!h7&ZUzkHSwk{S#E65-pN%@4WhT2#4@-l>#f{t?HE z0OwxZOk(!@1$8)wsb0El^y7=NweJ!q8iS#l(++uaPR5ps7ZEUlBi!=?3YkmT>}q<` z;}FpSL(d=g*)J~3|14;C#Fx&VFZM-%i|h;3!+yvcHQ9n%MGoR0D^g{`7YVt3EDTI$ zUzGt%qSYa?N6);gsUcUafq}<`0$m9Sr2xy8^XlJ}_CDUyZeEI-LJ!ANC}di4%9b8>C-y85_98jGDzRF3BU^QFVq>IjD`A1Eu9O#PTdn zAw6VC0q?T38MLY>LhpEH^EG@#g^cG_X8|b|#awBC^(CB%nBq9s{ru6-%9uBwoGRtt z78pk}1^$eF%yhT1jz0U(EyuYyg6rg@3vogQ+q9*mDg3A##kaeP_4E8WOaq49JKQj_ zNjF}vpeA^GX8Ft&L5=T%aX&S^-x=fzFtz$;3 zoLG|LzA*u=*(9!rT}g9^65Lu$OUZ~uX42dGJI$2jfX-y?jgBI|6p;o26(x(xcR8c= zE=SSrRw@yE>_5MJFOSyC`yHH|8 z@a#opPw>sf|1*cOQkp(kk065iRvW>u^l2n%cIy9d>E2?%yNl9bD1knMe9_%!q5PoEFG~$Ydr5pOumN~4Te=YWq4O!rh4qbVCRy$=8e7hM?S6Uax`4UisVY76qypP zj#pk)-BT0&LAMR@zi8j!Vh|o)ET6eFPG=)2+B6pUraO z%89DQ)um%vyV8T!{Ck0Ph%9DtiM-Fu|E3Am@>8=ny}T#0tk>GQNk4W(uPZ+Gs`6DM zH>(6v(xgB=Ht5{rL@OpzKoa5g&r7jBxrC0!x(rXYhmTtolwGRP((lu|_TcJPnC|!; zr#cXf_6&U=JQD!#$#XBum%e~iuIFx^Q4-zbSQ)Dpk9-3sU2;_DwdjG=?J7d6yb=Xo z{Gx2~ujWrDs#t2833{<`a}MB47TRidL+zOTh=;f(9Mwcz2oz1Z(HvT6sBQoL!IbH( z_a@t0$S3N<8p|guL8m_EH7_E%9tQ-6kKi7u89kq14qii2)rbPL(=%^QVZTb~BWx0y zbT(Luxyxi%j*f>bZekkG%hsbxk&9k=qtvUb_-jRE5r(-j~?MVOFEBi36yVwjCf9n9O0uJY@Rb_ENWiXLp!Itt5TVYaZX zuaC9LSLFpudc68R?4PX_ctBU9m1c{o)vXby z?wkBt0AW^--_=lM#{QruhOQkqj|%6cGO~B5ysb zIZskZ!cIIh4f}9AZ)KOf^8#K8x5pPDov3KZ#|RCc^5;wN9tZq+o4!aS;LBc>}fD{!?Km&;N zp?7Bk(hlnDgYX`TBO2Z3I#rg_6VfbuQcK&?1iyVF2A(&@$>AoG5vwTBosc$yfl{=A z6lnte6eB4>kS!7w-e z4Se18-{9|@7?cC&@Wb}0iQtOFr0F4Qr%`m2c_NeX+_^1o%#~RO6q{_}>3XOcS=>MH z8BfZoIi!CY9bNp@!CZfX+t~bD`izhYs{KLJ9v^2!6`NCj&QJ8H2<3JX(T}+aI-ncg zEc`Sp?nwF(r!>vgrS&(?XI@4Jh0xD#C~bT+h8yRv|DqvQ)vs8Ie`3d2)M0&>6lah# zI4mOT7xoPRda)z!bhBl zK3j4AxQ3))vVERP8k7rw&in2F>Hk-)sX5&9*=WjUmdd**njNWkQ7*rwp z8aLVga|?3>6x~pTAnZTT-G9xm_$-K=LG=9Ra;yKm2#c0E6izjnMkNk)ZY~-z{4mg~ zy2vBxuK&JP;g9Wh$3o?<8{o##)bl{XlloZXY7PTbp!*TKx37f=k(({C5r$4L+=6}qvJQM18khKW@u<|>@;cfPZcZ#>`%NH zK#~$rV@^L_{+b2_8CcNV61oD0fv#x0`(rdYg3-`P=!QJG9V%PEZF`ksD;ECt)*OD< zZ`e+0-NrIv`}&$sC%t@w!P%P(swDC`|MqZy4ECchkJ@cxRnm20h4$0IL2lvO!q6`~ZihlXg7W&7 zbyVpbps;A&wB6bP8h{SEN5pTdc_T=+L^O0E4dUVC2b|_;El9^f@S;|Cg9^PO#q9&( z5VHh?&!nSQz{mcbPgw(fNW|p!m}nHX~@ zcSGp$o^*xKj{fg%(s3e{re4dmE zUAlt(2WkWu*#UbN;4JF4PUJMg)3>`zUw32=e+#iGI>Xy?&?E9nfyd(2r#wZ2X=jO( z1sj&>H3Hj+F476!%)rlNcRnHnHs4ZvXZfZ!AFvoj*d^9fs7rjmp-xw`ZT_9WL$=N0hAFp^uKe8t(<$tLXI39{WG`^S5jRY=+zn zih2}If8*3(L<>jFH4PMhA~*L74nU&SLR%*%;aj|msHIFO+Wwau=Fd)@I$;YK2qfJV zIIcI)E0PmEUKMEjrUBhRB8W?T!iWB_^IZQ~q5%~|%YgZw+yu-w0!sQ6KsH$%M$w}H z)Js78IB*uEzGFdt0v3}BrlZm*>K~KxH}gH`|2Nm3$w|M>a92Hns!eKN)3^pma?(kL z)xXc_`A&#Uf5&4@p=tapxb>PJxH}80H~*72H;oE8N!S)p8&il`8NzIDAs9!2d)52- z@58BgQc^%v;6N<`uw-H$a7M0!gcT5(Z^0~3gT1I$vsA00eQC{?c@X z29V3lEKJu1hzBw-wf_+h{3|YCom^?agR=i<1Vz~azHPN3qx+X!1mrt1f*y@n8iI5w zE5U%Py{rq>+@kLo!#KoFIr%?!DzJ$EwU>_902cq{RT?z>8}_RaI{;LIXTgT6$I+gH z;_>+G6JMG zpc#eOG6%v6^*EjmCV>`>T8if=&$|H{xPCS z9j3sRo^C#s#Bdozt*uEDki}pj{?~{5 zPZg8?__%+)GEtp~aMn5T(~QCZ<)H;*%;OR?a^RsUy%~Mr^e(2h7f(;n7YaZe!ZfM4 z4FjI-XEX5cxlyRC^T-$kMR)+W{?n?+(j)UotMs;1XMOLp4`ZqCbJy_HYvLg0_`nHW zwLp>E5QGcf`=CI?ma5BdShHN$=iKwTzhQ3}vnGX^^qgh&O^BB@5cdlN6Y)uKx-0t6 z|9kz|5lRtcyBYqU{^ROQXfK%5(;*ND18DvhjqV_sPseC07rw~~BFgy*&o;2}-nYw_Yt1sylhU_~9NtiS-=DeHB7 zvVj+DWvRJ2=Iz?1dRKjy*@HyW{y|xx8?Ks7x~G4^i)QJ(0+WMFQhDaTtV9PD+;QAv zM<2Ocdg2!cxhMILJv*th2$Rk(Kg%-3%d{Q?PS-_p9=aY0^G<5qerLinSFz`fTlTp2 zqwHSwYnOT|K0Lf;nthN-!d6Hs&l?_un@mOSRktCj-KOSw`Fy6{tUn{1TcH$g6)as@ zQK2V=dBCM5{r;ZZk!DrG7nslo&dej&u$?Tp-BDEL{Dq9V4RsU3=yeP=+5YK`^X0WL z7nU#WKU@)BU2Y+b{#gL-K4|w|u50mhiZFz~)#F4kcxyhY&+^hpWPeW1fxQwNA2`_M z(9}G#8wiS6)J5EgI8leMka(BlxV3Lq|GT2Xx>TK@_d4n++3Ax!(Yth8K*EX{Dv)H~ zZI+QTr55Go9P?Q>f)2yB$}YNe@ifBx$F-f|yCHQdC*>IlLzlK zGICyL?9JmHSVg&-zGKyD(wS6N6=D%ykx7t<_?a+pw%g!j`L}E5;(Xa*{(NMw1|(H3 z{2M3Je6JnFWMn0JI?Omsze+7Lr%p(pB{#m6yJFh8#Z-K+wCG@Zzz<-Snrl}^tgl;2 z)ouT5N+Ie*x1ExA1q*dNO=w8-Q943pT=(JRySspMj!2E%s-%Q~C_R`i%y?Up6SJ@3 z>CYcoL~{?0Gf^MS85zXmfwdNirrpmaLTy=DL_uNY%hZ~~Bw0G&!X#((MwGt>#fgi0 z$w)S_^eN?C@NV_HMViMeHD!e)n1W4%I{ecz=lLs2LQ6JczN>sSIdQzpVzfm%)^uoB ze(qTtiQv7HAs56sf!;vgB#H`>acw&nu?JCgwxHf0ZyQ2WgVZ&~H|Z-U87A;~Xj4qX zetQ@0$Ogy7LEuq>r!DYEi+URyM;^oT9Sy-}i7|c(|FgTSKMz?1VxJ=Md=ryo;Fuh8UF}0N##C9=hUmM(b?Wgw9{#;303x&epV|lp{W|4jL zJ}>tk=d-PRYsh*G^(bd88csht2pzJtKNlXdE+)L;zGY43J<;suc2Y;oc;d)|S8aNo zZ8)WohQPE_OdDc+q_1zlwth)vLlK!WjqHewGv7OYq8u#dTuv8Q68t&}fAlQXj-wSl znYP>-{pH9AN-7me3L!M2WBv4}I5app?XN|wx8|kOE+xmkNcT@Z)o!qvB%{Ntbo>_3 z-pjs@+)s6a_6d;$tjMCmmW|jbcQqfIRZw8!A%6z`C9@;2HSB0L@xJIXZ`nis*G@K;J#Z&|BK zSO_VdbAI+4b`C~siku4I_5-PoDl3k8N-Bpv=@{g5+o|Z)Qpisc2z>iOh1YKmB;& zxMoReS+T1!x9Ph*TD7%J10$CcMY~uPX*UA~H8>eB{nSz&G?aQhIxY{IRQT%gXax&d zTHf?IE|aLV=RDiCNx>uG?;4U_#Y=aF9~k)39xbPuT{ytkr?j6*>a6-?GJO_o=Wzf} z$WVb_zE=(|;F?$*1g>eV$|N)+Xc4?|`5G(eyjh=zs=XK>rb(XDI7oHSChInE@^96R z=#Z-OiU$h5pPZ3umv6Z1bJu2c_stjP$G-WWzpy{?RK~5JHnV@(aItdOLb{goN^-C!S<6zFwZJc7S z84e8)0%Y>n>?sCvVk)x(d0|J8 zaTZAjsjP*_OsiAmjBwIQT?Hn>&me2`*9iX`wzA5a3dbyN&b~P3UD0uRFzyQTYLSY zuld4JW7CIP-3hNIBVj$MRXkH5`o&D5hUsRQX4uC1$*{dFd|3}Q~W_>T&?khWS7k$ z!7YYaiEWp3EKmB?A{>*uffB-74 zPW&(y7?W5t}QpY*PW5jQe;Yw0`A#eG^A?L9*V)%?(y!H3kgjCD;5aWfig%mQYp)A*#B3*@4)+$>8Mp392a64z>1k7ced{rsp}JBM4Il% z|3DKt4mVr&D0psBgcH74XetaGs7!OdUC@@Rd{#od|C-+yqe33Ex`i*MGd#@CC-YZ@ zSjjumSXJSZ)|gw^<3^t1C0XE#qjec%MfrBp2ncF616&Zp?(b;YTzdNrPc$x_%Y#z0bxm zAelTDRp8n=oiyPEG?*-=IJam)?JJ@?r-Hmix0}9`e)1cWNgg zJfud7frhy8Y>xtt(12eqLOI$MO!$aUR)%Nf9LEdYqHP~_YToI=6eY)>$TRHEn5@76 z&q*pih27c&SV9dC;11LNw7R!xpeSO0BhW~SPyv!%Zvmh&1tgeG?1(a@8UpRG!@%uR z$NfOj-~lsw9BrqarGiU1oskJ_{NNS_2xa~6j}9MFwOut>;iN{ry9a`9J8*zXAX~!9 zpZ3RRa5Mh+3DsFW%K(>{+dDd5e|<_g={pigRmFRY0JORi0feId`Ox){I59#JQPMTw;`+QCNQ+T0&_3i$ zfFr8*utFS-@|hg-{a&ykA*z*EGv8zK6_MpM~u7%N?j#92Rn)l#=^f|WEIR>F4g z%&O(DZNutxESJsw(5VZT*Uy-4*gj}F*?I`Ej^-K&!(+V=*Heo7L!gSX{mF#6N6rzL8A*bO9T8u>y82&ScjdP9Rk0# z2gtMG%y5KJOc92`gQMroBj_)6>1u8iH?Y%ysYs~S1puHNuw}-p_!$fyzKSpb)7#&O zT$=(>QD7fX{R^M&q|1(Q(gBw^E%Xm5xqtt(9k%GN;~fDm>`A~6)H>kZR!hMiTGfWn zPngqtX;Y9UP`b&9#LYGVW{3$G)ppKS4ArO@z6C(e7Udn8w>tk1K=W}E(u@Zha62%T z=j`GvFtaP5Z|PGsDE9%D=D--^9YDE25c<;z{5l%G_+Ghd@#$e5lT71hgYd zmI#kWkT8!BYYdQG^|FkV{J0JnKj7@c*;VS+0FR72P9w zHu=WeS5DP=*$!Wp{qS7WHx24W3H@y@uRb@WTs1Gt%JThk!}y7-c_UKyA4NS@AGf^! zhD`mnCJA|kZVC=Kl5Rx?te+^gRGW-=mixG5{0We&G4t5hGGqF%%;&a+d?8cdy%%gk zVn%mnP=r1VkU;eY5~v(l0EO~b?H`o#iIo~{%8rdN!+)eD?V_fH6GT&N5=d@YGDk1Z zrB~0+c-!y+5zTocAhoLQZ2(A}L7-|7rAt2-4>VCZ#kqlj2c=e^Ky5Gv%I<_v&C9mG z5M(iD~^^jElJ}7hp zK|1nYNE@iH{_T^%uKjCEgkn@Su#6Zo?KP^w1E{{%WJ0@nW;*|C6qMHjRV%BZI$qap zG=*U8;PKEuZ7_2a4-_G~IfrEldx}T`6hBgCkh-RuwNK6*(qV0;_kmYX)o6K!W$v$N z(27R%^R#%i!~B;qPRbLmF1H&`OnSQ&yXYdZXR|Sav!~XMoszkHBK@P?sqwy_a&wU4 zw~cLpoTEek0BZkuMf|0NZyn6>DI*}EcXR@H?b;n7X!L^9!B&% zvR;&Y5KUmKfL`Du4A2X_Sq0oc2#|$T)&1L-#U2NCL32kS^J5ZF9)#P16A$zyKR`fv zupKB5a)Q;KzKkHU+Q8Svz~NW=Ck9;Fqw79l1koH6VA00AT_U?yJ8c6=c5B5YVMGNWv4C3(bB6Nih8X z^2w31s9KE;5kqT;eIGdks+$)X70EHQ_w~)OV|i&PVn6UbKU(>pAjly0Z_f|7uzCs? ziF2`bDr$w7YHmUngFgCXYP^xY)7VktUQR+RBSqc$f`Ht zX@}2g9Q{4E2}9#>KyJHY&GI8bUiiAMxWHsUE^uFcy^0gkl#pP}1qSdwAUi3Ir18uKbNIfR*M7S8y|uZ&u53U4vf`=XtBF3|W?IA_ zr`&;gdlm-D0<{Bv>|{g)TP#4L)EH7JQF~)1G~51SkH7Nj$%8NB&bUX;aaC1*9`;-I z{CFj%?6DGzagmY3j2kcZbzwO{Cf3XGsbiH^^iLQ zkE_(uknkN}6VcVUkl3ycya;fc80?{LiMRRNW%rD*W zdXlIwlkiHM2D2d3?+2blJcjcC8HNZj@#sg0{WVhF*@N|P_NqhNc~kQO3)W838KX-# zAoB3Vs@!hPN{P@bSoP8|Pb3e*MqaJ1x!!m}fqGsbJX!Hj@yBdC)I;xI(AxZ+5P+Q=N7jjsR69=E^BJ!k*GQ~oru#%N_j zo`i@iH+a8$?Mt|!oOM)NG}xa8?c*RP-K6@^)a(LydlViNm5~!aI4a6SCp`QTq0%mR zFWSkC9kX)CLn7?dSNLAAT~~qt+Zn1ZkU@P&tS2^@B$l3gz`yylx~*O%4lX9nizrv#IJCze6uhZOj`*RK(oqx_uEr)Ha4Bt>3 zMpE3UH`5+b;9LPh7Sv#0z4QT(w|Txs4ms^{A8O3f#|m?}xz!1}IsDdZtaRQkKY^lN zKOwU2Bqq8_%!bTqFu*VJilmeEP(dc<<+MSj9^Az{J>u3`A<4%l4w=BO{6m}FC!h_e z5JLh~qsFg@1IENr^PVMafAjOxQX$gzjaL<%1-Ugv?*=QKFhz(X9>9mvC=R4N^z$nJ z$%s>V3Y3k<65bw;_X!jCV<#WSPJTVpG<^2kq!%yKGUgz{ZeI)!MY4k0ZOyA>?!9qb+;Y|1iMLs7Q(qW8-P#ASsnFy>`&p~~{m^OZnjyN;N~+&OEz19# zq0R0do#&c~vz#ft>KWP!!xFl+A|urclpIoNm>+P~EZ@l-N%idx&#Oe=3l{dRKl1FT zetb7>-{;mfE&U)-GnuU>eW|?rz-UUS-?Jrh4j&lR4}0Ly!D?1jT=vX)VMZxMFnV3` z2GgWiy2s56;#itlKoi8l0Gh`OBUi-5yMT`qK3(W9g7nqo{knPVq;OSXKB%Tcx7E)G zs*W@or!&F=*r;n1FDk3eq$1pYGMM79d>C)gb!s%`>Wtdm43k*F5gpTdSCXYku=q%Zci;QB1CnoMnObX`IOlbr;T|HMf&19I0O4*( z%-dCr@9vonCzs4-*Ipej8yg=j_42V{1=Dx$24_3}18~iL#k8eJI|lHkNkBurZF1gW&Sj{9xJe`YNLf4-{i4J2U8_O_qjHG*_rR6(rB%;N_*)B zhL}W;R}?@EiLZfsNwpsn7~vAQM;VZ%pn7N2ub>NX&46)CQD%^zElrND{mvVy^QCY7 zy!gVM0^HXxu|gjbN6x%na);5|qMx}RD#yM=OumADSqZ5*O-J;pqsRyN$+=JWd{PN7 zHsW;9S~};65!B27^zxqTtt8(JHJ4)Ur7+tzC=?n%tUADj5Q|p^Q0rm#TPDz6V1o`9 z5-~KAu!{VHzDM#`C=@_spWR8s zxT}v^m%)NCAXppK`eqJ65yXKEv*19b04bKQ~NYz_2Y6gN_%Qt$gx{ ztbhFR&8kU8{YyBnG5y-H&v1#* z7Un0qFqfA-IF$if6OgEb125MEG566uvb>E5x}l%S5JMxkLOJ@2Pd;ZFjfpw+EK%`@ zi{O1ey-9`#0Zo-bFOCx0AMrae8|7Oxz8jx^T0N>ZZ9Q}8r4(YbdHnYiK7$6@|!Z*djWrmNm?s!Tva`Y%-JY~#3!CRcvAZJIrWvg_!T z&+@&ui%q|lgKilD`}I35k{O)TK~QNaN$NzO;sj9_7mL?6Cc37B?>!ooR=t$6lHz(V z!s68<+`xwxr^?m%=#UGdgSCnyYLrLRqRB^o2m)wmV4SR99C=h-KC6}^D&1aL(!uUx z#4fI2lcymq_xH*MF)*mR)eaOd29|D4hS#5?ytF3G-U*)JCH<^Aa5zThL|ftRrUuiW z$X8hi$^=7^9l&sQ=6`mhD<1^0tGeXSUO9`UBKK6?#46sJR0KOb8txo!m3{m&*hB{X{bC8s6Gt+~ zZAs(6WoS_a0X;vRNxK1!h#w+#KC5CSot%8<>VV)^*GwB89qY_g@+nUD=GmK)=xI^- zP^wL{YU)_~8-Q?D?i@SNU$Gv<{J%o1j6eg;ry#>gn`pEvpa;)-sI02nrZ~(WWr%Sn zjp&~`BE#)BnmpmUG{`WY3=@hz!a9mC$XqnPN0apypP9N<7H&=r&u3C7%GteNYK8L1 zBBn{nM|yQp<)BhF?%H4i)PX2A3ER0Wh2C+B@jC-uc&miBi)+JbU|ewlLT8};=|SZ} z=FDfsvo44_HKrVETOiCv=07YdO`bU~zVe{wQtrZYi)e0TB_C|(3JA&~GVKxUkW{DpyfWJ5%| zpRwc8+Uh*;w^jSOR$ceDU=vZ3@u)AQ&)!Z}&@IRcNsdGWkN11xyqa0bkpcITlE{Y< z!8s3n(8q7^yZty56jW9UPI&DN`XL`8xQ{l4H3=!m_WUgF8#z+0am(#PLSbxV^zzfY zN6!}+QDrdEj_PJKUh3h!ha(O0gdN6}SI?riv-oH9xr}648js8W(12V(zfC7fKWHh> zN4wz1Pu_2Uu%UVQmoF>qYR*F=F*IBa4L5_&=gD=g4BuIF%<Wc}dX`3h)Y*uUh*@R$qEv*%t2dg~Xpd3g&BZe06M!DE1+kGyZK#^Er# z_45zMM#u81GLG`1>Xe1dOx zx8LDnVOF!IJ@%GOx5MBy!A=|Jy&g3czp z@@mU1j#bt6BJ1M~?>Dw|X~F=f*@t+98_^;fHDS;-$4MP|-&LM(^M?{Uu1@UT`=Yg! z>-(f$B3arcoBoY|EZ}ZBR5AZz#Jjh4(8X-b9B*nraG+pJTjAHkSko3`6N{gEceNC) zUm|1Sq${{#48f!ch?^RbR2uPY95)*)*uu=sZTl4*-%IODhb?*59@{f~D3D*RhB`OK zypAB6MByIbqO3T}Nu3c@4y05dES)G2sYp$k3pT&la`Su8=FCxEaUo}IzgA$DJg5=A zne%|u2;l0-&8SR!q;ip_TOR97ezle!>G_=1W_9d&x(J&^rD)3JU^kMqfF6#5B9b7J z4I@ZD7d})mNHH#;ej?J~fyp+Fr4hN0M@Egy)=nQiq;!1#Ojx<6R?;<1E4`berHJbU@eQbA*4p!j<-QgqLgRTimIT}`S}pOOg+>1l_TD?HscqjE4}u~!NS78B6cmsS zN{fvqB8W;Aq5=W}BGQ6}grfA0h@cduDkaiYAfZ?39aI9Spb$t9LLgAB;+vG+db z-FMFz_r2fmzO(;gti?*!%35>H`7NK4{CrbVJBHRv2wb@VBq(5ya)x-)ud;^SGIqHI7kC<=Tgb0b_o0s#seFv4PE&T z2AwQbv5d;w0f(L~cl;h$mx+G)Hm<~86X(P{LDEeE)D$;|pm{W_y<{2p}r&@aKM z|Fzi*?AO82vLU8-b56m=fBelxV?QYL@MS;DGm`7~V4+`_!arEtfZE?5w>R%M;t%Y6 zf_cDyq&hgO49fdr5Cou)thjIsMZ5JiFbq8!~4*7sLLSa zFLJ|+H0&#;Iyq3)aQ#HINow0`=enfeDW@Y{bLkUnd>r9W@)G&;^EG5-pv{ypCOD>C_O(fMo9`Bzmu-in{-by8o({{F|@w zzhKn;*97?21o+=*0z{ppXU(#M$a?$E($#K~zFh zy)%$jx5j;E;6yVmq2l>rc6afSS7NV8zi`uH|E^B(`@gUlfNfmAj~BzNXp~{T_cY#W z$ed-GTQ8V1B5JUVlh&mZ2AJsk(urIoxD_D$_-6f$Rqp_&?@MKRurrzgGJfY>45*P4 z;&Oi4z*nJw`#LrPoiqQs{X7j5Z@#zF7o^ey^HstV1M*^%i>HJWQc3sXx7$fD#GMGcO;Xs_^; z)n=DW_C#Nmn_)&4ZOL0t)0css5CF#=rFkH?TGMB%^_bBF?5?Htpt89&=tueUGZBcM zEAG}oO>x~aMc2Pxt@H)OYzT6VsFY`m45XZ*mK3k9(y>co0 zE`$}pp|F#zlmKehJS==7M+CPQ`5q3X8w>;6k35I(na@~@eqcO~ z?R9o$W0s>w?we8YG)=nohpaEnPx<p@pc)a~m!r{E_p|DgalBk#+qV0mQO}k5b8hcg@6Ocyz$75{=<3i;(Um52x#p3! z8-13FFOgx}U!%w}*03Vd$n3D!LG7ET6bUu%> z`BM#@0sMBJ!ua&}pl5({1#brIAycM%4j(DSoieY9tF3$Y=*d^1tu?no|c+30m2S}K!ldASg?aB zq&G(Z0LNG0q<5IP{l7tCy#N{)Uk9)pgIp{4AE;R!Mqu}r9&=4&iy;U;;K&CQ^JLvI zq$|OJ^Tde=6gDzlUOCp57X0c{y}6Ci>N%VngCO7YVB;H(0XRI(7PcT*KZRS&*bBpoS15w#h06lr1c4_S9yP8;H z?UtkSP04~Pwgh(F>q#C&Fkrxp?S|mGoqdwJo2=A&|1fsOzs#vsJ3Gh8>ze}FZ+*I3 z!u?&>)n4WOHz5NuoS>b@D{$>LFhi~6${N%t%fC%~ z5qRlI%;_-S^^NBJv|TZfb=dS`)BQC-|C=hAhdn~6IWY>bKi*%Sv7HVYYX8<{+_ILO zW?f`;z?ucLJ{PGP3(ye2M$*ihgG+$I)8u`+UD~JC>EJ@~IO(@?fo-b|2~&N+ItCE5 z7wRS>3a5eMXB5<6;xSb&eGxB?RLRtde3rz$HMY$TV*PmLTxoJQtNvlgdsV(&aW7Lx zzt3_36Xj{E0U8&u)Z9=4a!+qRXqRCl9^U&AILOzWXK0jmMS0ugr!`Bc9Q7EQ{xSrv z4Zoo@+1i;?l}=12>$GX%1iS?13fdE16;jcgn>r6tWq68b8ec!QwX?XePaxd_*?RrC zbv5%7{>=`0T+A@Kl={HVoEv38T3uV|J8#*`-DRU3V|~g&^DTEmZrWko<@&^u;#}we zk90fm9Z=%@{$}YOQluHJ;C~P^cd-6ljjd0TYV(~r_RthmDZLZVn$JjuU;hoV9~rn% z_=-HcfQ?erH6kCHw@zu9xR{a6Wrb|WlZm_?*1LawA^!9^i@h?$sVxUV7P1IkNR4uj z8cj1o>5?vHkk(pfNlCNr)1`Gv z?Dy2u+p}EACH^sEWS&{a?+9QlDE8Qk~gid(&2GobVz=_yB+0 zr3_QvNDt*MLUNzY%aB9wYhozl9zp3SwS(~!GA_o=`=!RVuM!5P1SloIM)L^rB_dSV z&L{zOxSa3l=pC<1nR30NJT0a&GUvl=_uRZ$q+q zo!7HAdw}6v`XT*fG=o(kY0h>m>D5g9hrN1csQv6KS82w~j%?L&fA6WfwpyuUq<{js zC+1upmIe}iKlxrA`e1cLt^k8d=^$2zy@hCfF92Y{*acV!rnZw9@2M+KHn$km7o`fD zIC(6anjCm~?e%-*8ENzuOkZAofL|>2*qPv@Rj@d63kw>bZXAgg7OZDRUxM)N+tF3T9P}Qt= z(49V8)mwMJ_|#JWJ-o`)SxPD>EMurYpb2WIJB+LbTMq&tYdUm|+!k{g+;>{NG~ZOg zt70fF(j8fFrnLQI?cJKKF>s68eTxALUSHaMV6G$)O0{m<=a?IT;5YDhI~;1xeL7}7 z!16R))!?naWs%T^7Vt0MKgsOGK>fuz{52+Yua@^p?BHJTKMvWx@$k~QqO2&%?K>s` zy1N8}mB0$i0k8$^jCAG^`duoG6xf!{K^mZp$L4BFo?j$VYJs8Cr7pCsiwB=e z)J4+ev(#mRM?9-dqVxBRQl->rMb~Cd!q(-X^cPYOcj@uQyuEnkOM%F(xF46}ZBCdu z02#2U*@ap>O$GVG{2@lr$Ni;&imBR@$P424fD36mA z-D7n*>GaZRpkb2#8^l#lni5J1x9d7sK|Y-MWW?X<+S8d=@#OCK_xt+Q-tIDlhB12( z)%^v8L_VeF+30x>#fu%7H|`T#&Z}1a{GZQY9)1b(9t_JX9C0kOF%oBYOsSq|!(ZLW zmgG$7eXXDoh=fNVXdVoBNpKzDq$C9YXC#>ZRgYo$g>yNX2S{8S$F2V+pw{mOh>Z-c zv>k+l25#WxNkDn_rvDPW{HERV4@3x|&1=yZJ`gV1p~vx}{^o$%K~&Oe%WSVpTMV-4 zqiL=Xy;q$?*Dn7CX|)BWS|AQigE7bK@nTF3IwFUXK+Su(gcn5F7uwoTQ?Dd^9#`UX zG~iY)_!3p|vSjWGHzXGVDfeayBLHhpL0=K@YykrTu$8g*PncN6-!Jq;jfN`Qzc3oH=Mf{*7Gz=Ut2( zrR^>18|FA5M$`BG{`#(NB7Q%m;LHGAGo8G^jD^NxNJaQxymw+ERR#}YEm1l+|D#u+ zdFUPMKOh(!OOs!jow0j7k!=XVVDG7KYX6hseT^EYTWlE^U+zEWxU)1NVB)xG2 zsI*D!{FezC{F4(DE8wjEUK|XN)_;vUHfNyN)(kX+F$^4bc;|Z?1~g}ZRiG@%>E|z9 zxpJppOs_vEw*Gh6mS4=79-!D%QrB{8%L!MW8@e4G4$tp#E*Cm++ z^B8!YFI_Ub5?*B}v~;;`#P79yAu64A5Iyp9*)xY5eV{tiJKnE~qGRhFYE*wk9Q8`e zcd>l$VmhmCt;xNL<0GRayL;IZdlV3#7S7i)P$a#zaB1L+xiP zp~SP$4l_2SZ<`v>Ue~_%z*WHOGCDl`m>es&%Fpr>KTgOfp$|QlKfA0BCw`p*>U>{s zc#O7=?qnZcrnu01>s~V(n8`pnu}81zpM3EDUk$kb+voi?zrrOEwQq1=UqhruE{O{!$ zHG-L})V=f)K#>ee0$;S0ZTyR=Z9SIrdy&2Ei?~t{$*E%cKtk~r&YZOleU9P4I9%Gi z2B*7Y8~{$yCi}!#xjJk&GW>G&u{tZe;Pwv&9jE-_Gw$ich1&bC2NK(9{8Vd7W=&!{ z?%;%fyy`;Bj^4ynS#>La{@Xp58{Vt@$n{BJcq}9iUI5~akwnvloayu?N7XU9zsAFf z`;J9IWfgB~_~Z;@eY)7be8cr%Tk%xcx|Mx%u<5PlY=!X#(|JX2!*pYOOuuL~Rz^M| zF-Y;7wC|u#4C97uE26@C<^9)HB@HMqRm8w6{$4L7wk%{N@z{&AT`R>WAj11s-B>4= zfD>ofY}G`&H}&Yf+ZaLIG5VP?x*~yV=GD!iYMw)e%3U!R?rrZ=3-U3nM^%1x_~H`$ zNp&2piSEeZ!w_=N{Lkj`O<)z#W{fAk{CHNsWUhYW!#%^!-k^Nrt^~ht$~{B7UUNlm z?}31P1DmCO@|P~jZXQOW*O4*L@!pGMhj0$*sA%UArJDg~hN|RxjgIkiu->C-!9g`r z1E+|LBrLYU1)04F{`{0#yP-R-Ij!dR%JFVdcYNx>-g~TyC$GZ>+EHABv9z+Jg z`W68QuL^$W5&JO>i`0<3*jH$_bR=ThL|UP-10_-J1nLSojY^}&1Pi3A0d5>EiLxdiPE33mW8!sZ_0x?Q_exQD z6F&X}o_zL-VP~%|u-Afj`Yw7hTZ8&y9w9=PpuU|)L}d&9Jn#7aP_K`KT96iB-`DsE z<9s<*rQ@Y0-Df_qEX5N$O-{xYY!9A#Jdi#Fr-788_$auZ=D?gl3ougj zrD|)+RDb9nY_v`^96XeyFfkn3Et{2m{B+RXg#!?q*GbU8mv~M54d|2rf{LGCWaQ)Y z%f_qb<1Z2%-L$PMB(6V)X91IZOL!R z=M4QE*51B15jN&t>_vXKo(+L8L|)=R;28EEa9=Zi%u+<(0Qy%1&WG^~?z)zUsTFBj z+igQl>`Vt-UGDF~N1Y1Yr|pz+V<{TPT+yfH+R_|WuCR$ab@n&GN(HUww}?7`iL(fb zO-xbhGZKFOc!_@=F1OY<5^;F$dHJTTlhsjm)xA%ZT_8dhzHgm2N9JIOPShe2w)wMa zj8LsPpBp8!XliV0`-q#>S*ZtU3Rh?`kY%_3f?(&@;`96Y|KB8$e@BT2E0Pba9W!w@ zm4hw{R4;z{20vZ?XW07xzdDv$M`>YXYR#ldpIQ#(7`_gPTXK+l7`>uLJWCHPK#ZyQWh z8ka*sNx=!aQpZqh9MQ}}%kjVqSX{C$fa51xh_DOf&KFuw8iv9o0ncOYLcqA*)?s6mZ(i}^tems8*n30{G? zjY#GI{(TL+9`%jmgIW0c`6z+}?6IF7M4t_;65{A7rZli9+7HNG)Mi2>S5r!}@`(B} z%_cwllhwR|e}J}MevS%)IwN^vNsp@&UIimAkmZ|q@Q)fy$T@SXMY%u5i5?H!#$Sm( z(UgeqE{M3in!j&!pQ95^f;p)Ldvo(Po)xEu473}(S%rJDW;Xhw_Gil8FS5vKW#`Z= z$zhwVuUV0&)7CH^K%*XzkPo7cj_Pud&MdUuUraz5RrY_TnN?rsQP6pYk;!O{XpQN!w*^B5DaOM4ck$KdCun|pdlIhX zGf>n4Ct3Ff9|7tu)-5G)*49^b9q*jviq75=Xsj}cz1kz$g3#)lv}WJE1$&oR#1uq^ zTQVZ?Hy)AA+liMg$8|4zaA(FdaCyZVV{LV*xS0nsN$Cd`y3fY<*;w z>$%PYhr&y=OLWuPh~+kYnffe0xvvj}9{JXqJ`ER^%iFUTDA>q&cXM+`&zYCZLZk;4lhzK-Jy<+d~qyf#DdbE+b$>`TmW@TsxUS_*87)iF=N zRCUHGG^hmemblK(ay#0)PXw%oN-X={{w^+`4bkb8ExHfc7XLGk8wUjXJ3*)~ODrJF zPKmT+t5My_j*_OWfoHD;8yyue903_-Kd^9W1VNR1?XC&PJ5NCTPa`oWb9X#iIT+c)pABlI4k9o7(#EPBG*ilnz^81m@=m`Af zl68T3Y{D%@44AuLixss&MZRn0kRDMV>Xtm#Td%eEP`$dLz zGqrspJu~p(W=Z1-`P+Q9=8A9b?o}=IynJ$DKWiD<8Pm*DRvMq)q4P+mhUG{~0w<%Z zBEXtmvVR3MML5aJTN6$2Y1bxLb${yXA85$vs)oI!s$cE9Cl5|~QM!8H%JIv|Ciuqr zBA!{Li*3>Q6DhT1Lo8-4%hZZ`bpD%LS-JXT#Ay8UJ~p=xGZ#oVZRmCWpC9FfSVLe~ zjcqN${;w6_jsEDt_dTIyc>dd_5}WKj&>MBCdgIj3{`}R(b@rNI)&*~>f=Cmnx!*JW zw8zZ5UHjFA_QJD8jdgvKCJMR75-sa@?(zg}(FH^(0p#o&GOT^JI?;ahQF6YcY;8;M^6|{?@t381)=3`l)PJQyz1xU8eqz)JG5RMc(QS;xyEhxV`{! zet=lvp;=SY36Tl-xeVONj`w?B6ePRMG0sp|rl+JF@=l05YOyPKKbPVY{urXeg z0y7jq1@*oVWdIL({SLU?fFf3PZ3*oPQhWj0HY-JyZnMsm{PeCqyQ!Od-LAu4uA(6B zQk7Q#q!udujo>t@s{(S#AV$t6miIK~)eo`R$ju{XT!zd2-W^F64S))Nxo&>;txw#e zC0%_Z{Uh>=1af60A`Y3iNR6Qga42IUCi1JCx8LC?p8ZXQJump zy;=sv(wq2g^~8s8ih>PhDGdt&H=z@R(l2K)lf5oL-Kq$S`Sl8Z8P?|m6^x1-V3$>h zUg1Be7Vr8e_x)=$VtyCq?b3-wmJ?Dq7EE5Go!7l5=i)ET#&Cn)HCa6>Ym8z8{jD4rY}frUkEak{(3#{B&Yobf3e3jZ`ORg*iVzO(z#Lf znXV6NGZ*Vq&gq?oEecIVv}yB?IV$;ol0vKPt+22P$Q;uj*!lPs$4hq{L~9}sjkINQ zPR2Gi^&AVUhbw-VlZ{R_9(-8^8Xt#- z@k_F)yhy9D|^q-1x*h5-0P(?g&c1pmY5 zd^_JhI0P?>tXfLYbh z#xwPO_BJ-Bl1}g9jY-bs%$wvKd>NwLaDxEnL7f>+$e73Se%C)dV#4jY87o>o*L5Jg z(m?toWMf4h-VEE@z(wje`9i{k(e>tx-iqJy5;!0Bq3gt4obkIQ?H$f#w_IM|j#(c1 zVX~5WSZg~|WC&&L#D+s@PJhnf`Esc{tbWYJisg-Q*CN|^ZS+g`&tk(08)b)DCe9D9 z%-bjeq=C)#Fb&Zw=PZP-@IqL3l@FZ~#VL`@Bhd$z2J)yyr-tdK`SqpoH%g{D{aIC? zmxz9`%unc)xWW`C^(PXD{1b+Q>dz#hM%9+JJLqSgPbj1?lY`QygbYn|dGp7!1XyEP(KC-k?svp`1s+%4qt*3r0m9q($UpO9s#C7JmEElj7w;nX zT8o03vaEblLFIEXLel3fcD*i!$o;}n-2r07Tpd!Zq=_~~x$oJ>NUN#y4(drA12z>g zs}f;*b!05nlAsq`>cEZhXohh&z(_sA-zJ)wf`jy<9{1*L34f=0p?aj*6(XsqdAdQYJe1ViJT3K#<&zNQ zS+1S^!2bSw<;f8ep@S6c8Bk*Rs_Re60i|C-{Z=r%(!){k`PnyXG+D}O`tG%q1CfWba}#Z4R*L5BOcoM8rbh0C@X%+E`x-T0y7_M15DnTF zWzoNQ$LERcyYe2nl)Wi($xiWt$6j*pX3b@NQ#5o+%KtU9AI?$p>9&KDB^5#FJ8PZR zmU3If5mxtcki&Ev& zVsXa~$#K4Bhp>cNyXF3~3;RDh{{O+nz9F1aNu`Ob43I?HnEIO0#h)iTB{YRCRk-v% zlvi|!vq|RGQXt_q;u70E8)X6J3K=Y}`62eOJ?M?a+s*!0Wqq)0gh}$2dTyal!7kax)`(}7k zwn}`@-j6E%oXHz?E8w6bKBQ;Nrew^LnPUdoY0fc{oZc+*S#)APpz3XFr@E2nll8cj zH0V~u1JdaOVI1Z3;|DDpwPQ$6mNy+;)8kvTDJ3S;`2m=`bytJj<>|l}bq_szlZHr( z5pQXM)4r^CO!zhqfY#mB9r`-efXv#Cs8DLxBC#iOq&I8F&CKJrr{zLVh$xfLmz$DR zk|&C{@uyeT4vZ=ucwdvP^UQt7CC2TVwsD+lY3T{z9CRp8AQBlQuhYIl%e+qZ5EA)N zc4BG5m3eN|x(8D?K&*G79YRVC&z_H&v7&~S0a%2~_mLE*x@e@wtCpmIU1wf@5%DC< zNhCf7Xf9s#Af`4T(qq9q9wY3J;Qns+Qg7@@$kj#v1PjibDh_^me;4oiut|t&!bjI! zblDIh9d{T!;j_qrxq|r5Sd#U^#gfA1sHpEZ?mCb3eTcEosj=@~PbxhVzW>Yf$_SbJ zA}hOTzSQ1MJimWpC|IGd+j~b%+h!>$O-VGx3i z-9p=dh0O*K3aP`?ufIVYC-6Hy=NZm_nik~Op+J5J2n0f?Ag8<@4EqgovCfZWXZvq1 zbm*%9BJGR+77gg9-oWNnei%bY0h!gO*%&&gJMok6^|{Zl?O~^J2f8_>tz2zU=PeE7 zIFR)CKrEIc^z`BoBAmc5|3aZDP5S z9W4Lz7l?M`l~M@QqGT02s3uJj^bC#$yxGHqxdpv(emg%nB;X}DP;1o z=4<$Q#WGzpw4KS^r#5~@F89Zot{$VO3-{9xPl5^zboBLqWJf7Kho#C{6M_CZP5ZW( zck4i;=J+(eE#C5QinX5>Y$|GnO}d+v-%R$yRSv@Lo28Pz3;Kd!hyo#TD&vMo6jK*< zeauTkeAGnz+?k2>7FFI(rvovnyRf}tZ`d}s@1BBNE&=&f0JdpXK+DnDM^+n5NCFFc z%Kc4GQ*u@eUEe%e%I&`Tt%tc0?e za|aBQaXaxyZIcFR`B4*s~yVPa~jVO$M*q>}25prDq&&FLiwwwK{lV4c3m@(8m3P2-;xqOFJ*0pyv1bQs=&h9gpd`$6h(R}`7nbwzEaYI^4 zWkhnBc>mw=q}zXHQzsgkUx3y1nt=WIV06S4;ZiwRMi*NF^kMc zJq#smA9TA9whi>e?*Z6oVcpb*(GeJUPoe=7Y!7U*Ab=B`; zgB#8+&C+$?O2>_(-5oxo43__lHaeXNqL&xsS!OKzza!a-|87qSyxbCCWR?b&sY+kr zr!fBsNq0DY&J7g^YD>-Yv-G2DzCwW5buC5}dO+pb9b)WrCmqB4 zoI_WyzL4d7-fg$&C?XJ1cInuqQ3eO-?>(o4{co#bKrK}~hpAS@+~3LM%b{gpYVbcq zDB!14*5SmQodF7d3JVghe;k@^TT>Pp@4!!u#m{j7tcxsD07f-IfBFrg;{As43?MZ@ z;$FzV_KA|{>9v*((SQAS--9!w=Fx&MEwzj{x=JJ^CZ9JEMttY##GGYLtT*IT^Q(Oc z;;Fv_U(6k-33dU zSu4hJG2(xNxX)5O5DzVp3(S33szJc*%kHz+X_y(cucco2597hWMOkyyusg3*dlMk~eSh~`{^;uT z|0XDrgrD&s7yyWeBuxVt6WZw}x0m-x1ZH|?ig>2;8CRB`OWCy@df`E(=2~|b8?1j| z%ACmwEkn0ui$)S=+t(ts+l1{DMr!iMi{H$iO1ZeYbR+%theE^0rD3OZcv_Qp>OmBO z4ae~V&GnpkU9$oz!u;J+`?J72Rfa?6nW?8`oAaE_!gLktDrs%5lgK)!r@hc71sg>9 z7A4y4j#tNlGmkR=N@`kbV%y0~(hPai6M)m`S^9!U(side@Av zhgN4A$}!evdn9m$kav1N935LICb$G9a%IOfYo0Y zRB5S&U_m`U>;fu~T$n|c?!Z9k?!_N(Uvd*3?k$XsZUFkP+lsgBgUa&I=8pr9UhN-S z@Berwmsgs}nK&Q2#)fG=jpn5$UKq}w@K=;J8LHuOI3_u>b~x6mKKYxUkFr&zjI!gD{VM+-Sqi4-z5jle*(e3qdOkhU%l`g;U`mDS@)oT)gk z;g{joqg$4Hf9wT83;fnV{5ZTC(X7NtKV4C0f3RfcNp_#J62;?TK!RHx9|z}BlLU91 z)I;mViP#2L@>wRQJ1aE4 zNLc#rK^&F=mKfJrF?27y@^b}YVBhUb14^T}27{Q@k*tpO7Hhbkt3dA9<`vD>iG!K< zv>G`RqfHRfzFzJFQ;m4F1Fokvh* z4iAJ@*JJtUG62JsCEhvMDif{O_7YZ$`}Em5iz9yS^W4StYrKR_tTTA5Ud-|7HR55? z+Oi9zN{7FZ>*ZilP{QM;c`9{SDJL9^bIm0)W!A*eRUEnDnEip^Fia{&5y#SwqET-F{{DiCcg^+C@N4>TI}6kqj*u5=V4P5^Q|ejJ=3TKMiT}N$Md83>VcKT0`#C1kDKV>^k~nk`6K>(c?aKM?kzTW-rtc> zUOK(;W-_Yyy%3X~s=q`gKec^VY9)RiT=Nl!oF%ju{aOXQx|1#hgmp=Ul_mPI1-JQZ z3JOMQTRmKEPu%NVe;&VDm9%G%^ZqqNg%A}_Qfjrvb7H(Hy+}L6GAhd5^&s?6ky=cG54->3Ec0XB(7)0u9$&v5_OOW++ z-vnJYefYGXou9Vf>We~$?u;{@tnSx$yO7J236cx89iEuOPd`&h4R@iW721ie zE86Ot#0agw;N`5$ciZzppm)w6WD0L5CWfepImzd@frG2#?aLS2cv`NQ`UvYRc&0GV zPUDTx;`Vm7bRAN)7>15CD6lKQ_ttIQ6yoaGbD~oe5cyol>4dFz5LO-@>rVzIDQKNW zvUO|5x3CGT5slzNjU<7}qc{3BwZlpj9@t(qf*ti98F&a6@=J`vaO!F-5Rc*p3Owvk zfz3y5JiBMKCha?-^2SBB@XA6A8K8AMtwzwGDkx~RQXFM(r@H2q_d#U*o4)S^@*3}A z!sa}zLb#|D1TAbuWarurqx!00T@yxTc|gdSi$4Uv_Y9CepWj)u^QW@ktO*|zzGR{kF;6(f+$Gu|R4|g@|s+H&h4?1%y`{J1D zj2wO0A6sa-H(a{e`^Tl@Y?tNgFC=LWmpqyFd3&E{3GD-;?~`7cCFL^@fb=YJfNFgC z!}6AXpVYve`W@e8a2wrN*9Dk+H%rau!NNO_EAjWD=#&r-J$U7{%@}Fi@a!JYHrYj-mX0MKWY8KE_s8D5!l^AIT8A#V($4 zKJE*5h$z7=TY>rv-0=ZMzOD!XE=--K&9gV-Mb#~)KmYLjp#XpIwadxMo!f9vn&hxr z?Kyv4m6jb0{V1L)=gJWA28}r?EA%(Wv65c&NlY^w;{gnPE=r0iMg1~Nk)p=?1*Dal zKs^UD0)n0@yLTCTE1ozB1AUqyfHeD0MIpW3VY7SCN+8s@_O@lM`cBN}UCl!KY|nmZ&gh!?$=9y`1`LkZrmv=%t@;O}vs_Y4%T#r?`3TA) zDk-NH4s^Zhfb*XCfhS~KvgxEj>1rNqR}+lA?JQY#7ts8hlAf<$Bp4A?KwWUbd|I5s zL&mP0Bs^G)nPp=hqekfH3C*Qj3>SzWne3N#uY#?9v#`}NBt!Uggb&=rp8^0KY@#I- z7;H{FYVZwY3z#k&_4X=m=X@0BaIRfOuKwwxn_7^IcUu|@L0{}2)j|Dtwh>7{LttRr zn7su`V_o_Uvg2Q$3!UisRZsvVG{U>tpXYvqtb$5wt&j+jK`Lp3R~5CbMdE9;TDzu? zCfPQZy}sjO9}0Y3%V`!q-;3d)SW_v+74R| z|KJO#9TA%YsWI14mdBI>Qll*2@cbB0JUdm3VhYmVg(5Slg2LJF%SV#k8w7lUk6co; zXAet>uvO`a$|t0?qc}+Q6!h3LiUM_uc7}cmQ9~W+ynpq>&V&At2F(;>$whYRg?^-D zUi{s=UwCEbwpOIc*tQ%oPuwnQ8R|^V9y%Or_PV#U@$sAQb$oMYO}$hsvpt81S5Ng- zWOYmEV=ln&c$Ws1oLCQCJ<^sf!YNd2)$En$mWcMK*Y{jCj@4Yy)%b&6&rJpuE|L9@0B-AteT}3 zuJdqeT5ttsq}Gnh1wOCfuUzVu{eE!R!Mxz0J{Od!lfhe zw~C{bU$gCnu)fKKg0l6|&94(3@R_1Hqn3Bh&chb&)Ax;J9$oIo*K;X^#1VJ1aT@Pu z%%bHts|`@3JgiiPWQ#1N37UsvN!*CUyHkx?!}R z!}X6r$Gb{A?ti{b-kLghItF{fe^ZFYD)>s*MB!qk;g9_eyk}Vl!#`TEF8?%Qa=DY< z_tWO^ACHM4Z+7x>zW@EIkA+;!kVO}=05-{7~7r#U?wcNTFko1dpFX? zq;kua%s#Wr=jUlJwot&cq(ue6#zW+p!)THBzw87FX^#8Iah){Nw$D<-3N_!}*@On! zu_-&ISg$53^(TicsKkXPrY}LMN4DB+nTqHxsXYx1$pxc+NW__U0SBJYRHpWIeMyC; zn4FMFop|+;k%ktChf|J{ltTTN>^^BgxuTR_gq@Mx{n2y%`22~xM-FVVRKj^ng<>$2QB&SA&MS_bH*&@bQFZCfmQ_$&f)M9dCBb<1yWnbbE>^NVbf|0vQan899yrwOFvcV z%3VzHtx~UW`g&k5{}79G07pN^$#f>irB{F-CWzLhbC1>R38KSa;a+%N$Za9F9;|)) ztajLFisQtRVF0liviKXsW99_rHlBJku~q5eU`O41*rP)+4$h)>=Q`==Yk7-brUy&+ zElQ{nR!>}7g_A7d)Aj8z_G*w3s!(sGz}s>pyD9eWVDX!ao)VlQX;Ctbm8tH7i2TihV4FDQVKOcTL&1Y!$xI!6jVK~VCv ztLQc<6-L;G9vo1sT4tl)e`>LxHQ+6f#^il$S?#N|a`CD<_u-tE_^YoSfhD!Gqxh-% z`R90XG#e7uDYDOK_ve~|b{}5}1x^Km*JGvH%{$r&k(nec_1G3YLpBbx{PQBH(|47W zfK}56)169J007?opxXKRHe}~w41E&#S-xg80?V4&2JpMXda+D%YysQ}6N8sx0(^7} z0xT7+bIdFP;teB_2BYau+n~qr13$OCv#$}Scvb4wvKvG6xf#hTsT9!23oAyhwP(^X zx5Xs;3(D*beB|%M*$+q-JUI}vJhB4T+ zA+KqhjB~?^(;xG=8H3~*VN2wJkdrqMZ$=^33pvDsyu_(aCTB1KBkq=hacwRCfl8)V{q65&+G8g;$eN|5G7iGj@y8va&Cw0M^E|@r17vfi7pHWTS$T~hR?0L*Y zvGrQlhbwnD4`ynf=w>mIbYtCM|Fe@+|JtPMoOLIB+M5uOLx@-F2t;t}bGuE9H1@wL zk`J!8c^jnFprY)eZ^s%Qv3Gy>AuI9A8%BTr7OdH48OiJqFAj1VQ!{hostA~95}oclPB0rfRq!~fh3vJxkf!Y5nfGnvLl}bE-dEU zcV|p^qvC!dBYr59jlK>%xE>eUWOXZk@1G?#Rn<>U)+4?55Qrr7E^`jHLzmnxB zr*w+uXvUC3-ni#}F77m)N$2|#{Q3_kN|?>?bypY%euYDX`EZnF8$+7CLd1)BoaqFI`cHX1udjVgHbpk?j zz`RiZ2bZU7Sh|1_(|myfLgELP!Ao6SUc%%8?F*UoIXoi*Ox8ntLi9l#4K7%ww*^jN z2X(cfqYei5z*~j+l7Td0Hc>D;hvkcNx$#1nu8LC3#Piu5mS0T1g9G$9-~tI>9stj9 za8-Wuayw#O!2`CrsKpHNSjRH=`u+xyhPMyw{2ak-=)^!6dl==+llYZ6nl7@n^T~K` z;42&ZQf<`n`lL@6mIW0Sq~*%zCY||TpE`L65C08v3o+@`2CLI*rD4E$k<^z(7BXk- zvTfBHwL89^i%PC<8`gRz-pQvBUn3pIlPf`=guTF2FLti&W%l7Y{Gp=Mo+FKKY%RxU z%%X+zaqm2<@8;f7d*~{&amA@$8Er}p2Cab(#>)mcG)i9_&_bDS{wF}SMc!>(+ z3I)la-RY1U^hZ{+X#-awq=9MiIZPs$wx49nkpkm!Vkm?0P4JX{SPyu>BRAiP&ZI;? zj_^Mt{aF3hDTza>f`@%N;C3kac^$YY6=5{70R+`o#LAv)jWcf(F~_g))<-%;>=&Za zz+*c?N+%wCNk2;ZwAO(arx>F9S7}|udeJS5GAudetr5(&O|1=Fhrjfyh;|B?h)o7W^>q_ar{Z+&p#t$Nbvt{ zqm*ZDs(wM~ArnNb&3a4;1fYBlvas#Ga@SsE1|{)i%zl$*ScZ-zb6 zmjx%zM)@~T8_DNqEJkopw-I%1-mvwX;z?zF@0F*|e?6UPp}L~EOOv_w4fGzB7IYBo z%)UU3i0APd=+vsb{Am|x7v}C;WTvD8NQznxwPi&W<2UuZ9{29J z^?4MksC{KSuaeH+!w*s31rLp^7=gQc^Euvi>e3G{Kd`NAkQ6sc8uLkwwqrk>k6F{) zwhxUlIW#}FKAvs(?k!B+P9ndze7C8UEt|q2?>gdoga#A!CwVee5n;lOYljo zXck2X5}p~bmTkQseA|A|Y!`LTHKoJRtTvgP-Q8tQ;skCZV%g2K zexd=rXbF*jgwi)>MOCM7(X@qkOnI51Wvk zpR{l2_iJVt?HJ7Ar?!GpRme>z5`wfqcC@e}HIXANbX(1Qfv}EKWF4ZFQ|Zu?vcL>6 z>D+yfdodxY7nM{WTo>x%=jCAC%n79yP+Yt*O6Y7NRYs{clSrbG4K=h!QXtr`*ECZQGwX@;O?|4(f zBIie$$djMHo(*JTDB;KXO{=D@yf=n^cyj1My4m*)EKLV0TWzR?uhVb_>g*+Tmg?wf z*m~C5Qx=i77K?wL>r4MXj3@u6{rUeeEgHoITij??Lj4!>h2dcYSi;Y%h^?ThKU4V! zQm}0bhrRj(X(Fc{=H8{^+kl(O$=&e3!`tQ>@ZkNE-$L(`--Z9;>hemO%I!l6*me89 zi0(p-8zM7D<$Y{F;>Y4qC2N?J)guSA&*_<&rS^z8#v7*EN!_8vd1>Z)GMj-zo2mF+ z2Vw*HY{4sI%txeF;UO63k5 zTsFYO_1lT%dHi&_tCDVL^dRxg!u8Hj21(jo7@2cwpG0&aoku9ZPBcrbW_LfUO@!TS z%vialZmz8;VN7G4ufJj>vv3zKA0^%^np3-%-0c$vX9PQT{=uy9IEXEE9Ukd$;Fzrl z!HvZ&5n)4Y@W%}G0;|Y+P6?Bt=03__GBdsstVo3<>q0H;4VG3olr?)nSKpX-80F3%|F zmvD{SuxrDxNx24rmf4UslJ%S($~oJ z@CM!S+pVqFeLZ*ksf_sxGdE@7aZt7=#{uVZL_z+ymU}#p@|7jrC;nbLQ2}4e9;I z732&7K~aNPyxkg0>ixa&bp8U4VflId?F5`}PznnnMWcd?QmlJ6khZG@Z!r}NPsm26 z;;UP4bh~;jVltNbg}s_6$y_pv(e){^)6-#%Gd4Ankq)zH_+DAewTTA#$l#MB98HgOU_F?1W5TV268Sy+xy`}nW5+= zOMHM;(r%lGN~+g6uDC%@_w4!QGbSlUa>naf8E30#=wlBn(?t%z6}~6t1mnYe1Th4zT$l1r_Zad!k`bR#i{I{yFF{=T_FIgjS0pRgP3)tezprx! z9m`G2$QL|^0meqNYBZqT;zoXh1JDtI5G8cZg(J@9-A8dEY&~Rc2fW@HS58STln(TH9J#pj`F&1a9 z+R-fVWAk?ovqglR4cw+~F`a#lQG@;(b<;Zp_;gw%8rwiR~_qz`W515OEP&yOZ%^n8Qa}! zQk@@aL$i?|fF&~s2l`n5VFx1e$kV#K%qgXNg;9-hi<>XWzxwi<%J5q$jM~ZLX0+`` z@JKaJGl|P15B5UN4A9*k<2fmLQt#Q@ckahN>>mr&MN*PmyixSjpXxR>nmXxo(PX0n z`m}9>mL~x_wh%L+`1benxA2S?dT3st-+At!usmUf2+$FRcM-eWXN78#&AxB(kknI_ zoDxi3Gp~Kfr?DFN^{FiZa{M^#jx?kqUKi|_tv9q!0Y41+m3FWu0&|QSGxyw_pPfH5oz?|#QFC8?x)4t+`{dOAG$jM&4xL!( znq@@^cNg8y)I?PO`gJ5yz;9-#rg*KF=Dr$`X+BIOQRjz_F2q@R+W@RWmwiPm%8@>i(}f3`X<$9j$w^vEp$mvC>rOokv!o zKOu`8?CazE_L=)``#wLh)WIF((7yRu1X|X6=9O~drv<*Q3wi9>RNlX?pKRpIM8P72 zX*0mja6C2Nb7j4puKYj-H=1VA>?~ZqnicU%C|~q2Ttn>21^8?7C2+Z~OvNZ=?+~m6 z`t4^T_9^QPQ3dGo`9hgX`kONc(f*YC3VjdpxWK;f}7egzr zK<8}ET_usrX}0%o*tRbhihaL*aY~b;(mO(Z67)*VxqI#ggkrSp3K(_}Jp zk>NGv&|9gUke~`Me>Wvw+J8@c$R3Y4t(Llhs-9cTyu-`G8OoIS1sl#N(;mf8Nyi=r z`{ywW7qHj*9)N?zw+LwqNvW*vhqLxw+vuh5_B={~Xl2oS*MIJn*=xx&_bMBVu?fc$^8yJzU*^I^xWPPA$U}q` zpB~{~r>-P!`sl{xgQ~FIAyvqEEurT>-A)_}WoU!pZG0j!ZU!>9!njIBP3;4&{^&}5 z@&s}#!~L{jn)pECz*3W>qIS?gw=d0+EnI{;3Gzdn*UM}r-KOL)i#RS}wq~-3* z&L^jO9yjo0E<34&G8q_l`5KGHZThuLa^8Ud6jzMjRda(Uw=!7DG4zbDKQJ=da1~~I z3NQ*3xS?5q#ILwDW( zrz=1>leqw8_h{IP4Ph&McVB(=iARw4RDQP9^}fCkM)788;%`zQvUg6fnLV(&)dthS zxa~Mvb9grruKyz}ZoxyTuhh%j1g8&Qr3eeL%EyJ1bQwokh!I>fTAQ(++O-i^F@QuN4qxE zlw(g{kdZ}7pYN@3#)_Z8B_AGmxz^WbYXt{h@0A<=c*}tH^h;o&-T`HMZ9@@Oqs)N`eEgX^T!=ObE$t1g;Zcb@n1a0;oU4u?RF3k z@AIji2~66Vy2x=%b*ab3wYQuk+caSi zI-PGkjZa^r3Dk?TzH{6Ko~=fg%On`^2+jk;zHtB=1(Yj$9F=9GV^X67dpxL{pMtAl#v*I)V7SbF8T3{?q58yDnCe zttT}A&|}5?+FRi#eGCJcYaNS;Y(7oxcaR~ll@)d&^l_$_KCIegE1U_9hO1~F7}+Agr00D+yK=r!`6aqz zjueGj9UlI~PLv$_f~F%r-ggHg&iD|vSs%f`$ZfN+%17~%_-^6pF#sJJ-b*Oe`v`C9 z$dIHzLDTIH;=*+V+PgB2j5^FCvVlv&-c;00OxMQr*Hl$*j?azloDb+8f2_xSX{T+6>l5ZvEJm`jA^L8~`15j*)J`JmMN{bOac(?31lfW_5dXX%Kw*sUl4g7Z06Cm8-R7VpF5nA51{m=V2-aRsj^7FK%)o zv@`SzLmN^F)q0C6j*!FW3YgHvuwHeHWDBM) z!^qVw(&r3wzrRI2M2KEb%Q3|1-~A{-rnaH^VJY;00l3Gl{X@d=&$?`2hkZ?gSaJ00 z(OuXxD0=yC_K7fvQMpd@zv$~9sN~Q@{Pn-%6}PDzUOiT+cs0+O(VBSFISJDuOKSlJ zH!(fuRJ@6u($$p4Y64~h;6C)K&}wM-2K6Mu3vU!t`UJ`>U5RO`4%7XS>OCW5#j{fr zd{Kh6ymTr>svn)zV(pnjh;BzSkuTP$H_PCg6X$JjzPYN|h31|-$)_afd{g-o?Qh68 zhBm;FX~BZI7R%m%!2BHUAi8#&o0#OHJQ3e-Sy#iseaM`%Y$zBb$gO|DCo51&g34q~ zQX!7$5quJm7Z8_66x2fC9jPX4XpdmF-U5>)+t{ru&tI_g#k~oY^wwbO z1b9Bdp`!umc5}GW1#R-(tISSBgH?bwmtj#BlgHj;PL#1jN`T6f79S2{cp)MmB4K1N z)YDh8KT;}=n%kjNQICYN?5-HUiPM+sFLIOL zGKrZ;Za$}TOnyc4suSf?ad^dN17M<$R;XwN*}eF#7J4caVbT<}-8oB#ACL2XLw^r# z^L=w$MCoBlnYc0p>$eA3Xr>j!=N&D0OVs3U+jkFLp~h+=3@2MwJ!2>FAn9vEuj*Cu z+i5$-bdGa$p}bn*TFF1=l3SP_P_+Ko=Oo78GrFek<)`BZY=fwKs$2{Hy|>SI z=^qA)?%MlAlh3^iG`(te{?c%1jhCjnTt;?CgFZ*vBjd}EW9CwE!+Gqz@SiLDt)D2* zI4z07g!T4@v1i;ldzh!*1+19YDMn&iUEiNcvi#ag>u!bofQr=1B_zKbXIC&W`Qg;Od`0e`n7OcBeTj z<+8JfRE}e6Qm5ujxVnWGTm|dU`2o|!yZaT19=@DJ9o9wqjx8g`<-{j?25>_;< zjN94teg#{-xz4aW2afB%E=TbMXdA31x?M}F%-jY)Vp~__=5y+GoBTVKc0VVE$A%gv zw}6tqgXVKdJ)W1$!;4L7#F9+6IfdQYbwzicZ`xe{7@W@W2Ler(aaN&CV}2lsVhgNj zSPvad(+4% zNt(7B_TQ%elAQGL)J&Y2z=-j8wJ9B_Nwq$AZDa%!-AU13MFNmwozP+j6|gvw;;Xb* z2yn1CIalvlzPz)pu5ABB5)v7bCaZvDYV8VAqgh7%3*~pbW>lW@@<`LUch;*m29f3I z1rr?~ZLO^@er%F%EpOl}L_2E0WawO7!o|YYSJO)M|ERaXqq2P#;#9PhSvw z4&lA|b*|%0m@9?#uf`D|CO?nZ<8GgyRl%3NMROT3ou7R4erZ&~vYYu2MCW1x4cRH_ zQ-u}#ZSFU}?{Aw|kzhO8+Lk9?K1JKLl_7tivQXL7CUQyl9Nv06>b8x9R2cc}yg5f; z+=Ff}FBF}B9j>P>(eUi0qF|ay)td+W@C`|UWo3# zgwTk&3_b4|32Hgt{JqqCmK9GHx|_2E5({w-ml?akX;y<*jMU~QaGDyLv{d;82cK2s zbibW-LOi8|MRTR@sSta`dU)B{Z?7W8KKz!pDvW(!JQ8pfP|Qx8CaE}3N6^(vvsrI0 zRueIoKM9mSKP6_?UR>1+x%cJRuWmX*z14*^UEjZ2H|z&p4x$Tw%8=+qC~tSb7sfZ8 z^=`O4Zs9Sk*^{y)VeLdakYs6a*|G1@mpVgZkV@7YIEUj6L!L3A%T@Y|KHlD6cpzW4 zdfkDqTl;Lfps=~Q#ho06W!}w1lH)XT)}sKyMr6j#csj)G9Lat*y_r4LYqF4#_{^g> zi9Wz6$ax~lK$y*T&kbwz#a;qixIw|qMaD|pf9j7e)X7==YOOme**Uw^tzi&!`b8we z5(`Azwb7h`$leZ&x!$}Fv}$iKkE;1*%B&;OC2Rc1L~xmgvath%kM^VCtFlc{9#y zgyWfF)BBob?*f#RTluB2McCY#0XL!)&gYq~z$I|{*-Thr>1C~Rdms?U&xwWI$836sD)to51(za>UnjM1a znA?P(2<#f15&aP}k{cWxA9-Ce4JOT_H}Gl0MewQff>ak(1^EpLpx*E(U@FLnxBBvG zj`vqC%Xj{$>WZIsy@ki4tT_ihg%mDXCMnLu>bkl*p5%OXWV-+jL$gxF?>pTjz}vT( z17F=Z!DYWLAYVKE=?mYSIA;PxbDw*d7|^Z>i%^RXk-a!v>SwPobMcZ(q3!9WQ@}~) zL#Q+##8>M&13T#wne(qrEB<^Zip{6|UG=j&RVUm1O*O&;XI#Q>uPE@`S-7XV3o#5D zKzbp&VRfuvF3kI|n9N4}jhnH$__3z(zZgrZP+n>YFS!4K*m`dzt;z)_E#K|YFdsW= z17!sH9D(_YrdwQ-);nURnWS>{nB2X_Xwh)CUao{i+7Uq29Dq$pQBN2ELIRxC8_9v- zAASPoUY}J=^#f_{oeRwT4|gu{AI=(y(xe;PTPuxg*^Cb9CD<}tY@ zwX9}dueBcz$)-uLK&Edh3vl+xm4#bzuTfdRsfUiHABrFg=LOm|1bKt|!EDx_Gi9Dp zlA8N)ec;J0zBAJWhR!~j63Beyo|}Wb-0>(-1Ss+Dqkeamj4R>z2I!fp(+&$ z#j{gGn(V>1{h@{U8x+1^4=F;^BjphnziA8ROT~(g);88u)dtU;<4`DM%zas~^0l;Zq#qXzKAFs8 z)XF+s*qI(QsGZ(X5M@Hn|nw+y6T+p?it;q zB<^UPTUNlz^a@PqK9cJjRTMcyl{b-Zvc|GH1jO;&bMmy76xZi5lBs40nLf@dZ3qta zbvXVy`_E07Cp=Qwc9qq~V658_g2!xwpdOA%`4W6Iqf86ub`&?jH${Jj zAhl8Qeu^wfdDFx>&*%nXYaG{Rj?mV1B_Z;_QR5xZ7nw&R z0^Zl<%-b_uC9vgA47@J^?nOpANE5E);ZR*MLNuAj30t0+uXe#Wm|3~o#m;%Obcn>~AzsrACl z&uz?Eb}`In@b#wOb$3n96#aPbDMuq$<%3? zE)C|SR`l(rjJc^uI%xwCea*=ih}~0N<@EaGnC--Gl24(6kr;D zutD12-XZ@0lkhVS6E^KY--LtV&0tZ=WF<Ijhv-F^he_5^Jtq2|hEW%3x;d)f~tm*QHq5!9TCht5C^~3>|)Z=Hg z_hz|2CtJ+hba#QY*Vaod4-^@#5i<3J=x8Jd(YK>vbk8d7&O3tK)YfT~{C-rbzmb6O zP}e+Uns+a_69qx?1@2mTS#`{@X`h~#zwtuCVQrGT?8nlhYs|g;51gNm*O#{&eh2U! z+iJ##s=z zsCW-X>wddIDr(OQ)_x>kzFoaKX+g=}E*Yl0uu3A-y&6M-UH-sXP^MA&0?%VMk5QPGL4MGy(KdZa4D>{T#7gk!`&!7Z6&S>SB zUK{yt_M=MgR{LWgKJz(ah|Dij-V0Ve1zv<#KXT| zr=;(lY^C9o42ed`?%I89V#oNe%|{|?L@lghET@rTJmM5?h|3f@9pY(%X>wT*;y+c{ z2|+*!kEzUxIJ%M3!{Bf+wIL|RZnfJ^Oy5tDdMu>z7rq*Q;>+7VSu;oMgbuPe-+i zw=90$Lz7ql)B0B{{H`YzI!v|#(YgY&uiQ#K&d1!V|JV{H74D~Fx!`LVUCC|LCc*@A zAUFBdP|WTE@Ti&>{;3bt;xBAE-ge5kD|DZWb9w=s@GU^I4j^kO!g(qVedJe@Pi1=e z_h;~s&ZCa}8Y>C_p+<+jTa$Y%cHT_ToNzq;31(BkAB;QIcfB0fo6pnRvJ|&Vgy;R{ z3bX}FGOR|{p0oX_IvPje84sA)+f*2pFT+iDPC2_DE4R){VmKYU=1#i^Bg$5gvvSJ- z$2p#z!sj`P`qzeaANHT~rr^-+I(N)qU<7F6+t6$iXP8|NW*K&8=V> zz-B{zR3sOi-v`5!dhkijGkzZx*}V?~zJF9M!tH|j5KgeEes3}<3PO92sZkUd6&0Gg zIZT$n#t0v1MtgNEg-WH`+e466s8%)9A{FMId{7IcXvP8Y7zVXtf)oUOrZG?r!+Dc&kbk_cEt9yx>#F=zkZmi$MSECU;|H2*2Gfvv2WczJ|`IGqfVcQ z$#awm`vPHs$k;&o30CBIfo@9Bz#mAS0J#KgcS3(4JwsqsH|@<)*ydZTU1FtT=hJzN zXRcIT=y3#YvT;iYR($jqG|TCx%+}cx`0a?rCQIAQZ+jo)5^#@eG-7#u1(Q@qq{c#Z zjljRxNERX}c0wnHuKpmax;!rK#U)yiu2B1?mc_rdiubC|mO7oj07)rb$vf9a9hd{% zOE?lrZLI1hxRlnDF>3OBqOx>!vQFNc^%tg5m)1k;!KuOP z;Ms1A#$h6)#-3#z&p#7+f1D;(67$k&ChgAal_K35nnSP;(KA!tA>gFjAY}yse;=r@ z-iA8vw~3UM*F3OoTNC(9&KQAZf2g;NQ%f>9v<`EFstT2&ilJ2jfU()yLgG~Op@>B_ zD)Ir&ab|}O&zup#Dsm6b>7l+;ivQ^;fH?1KAYLNV&@G#aa=m3T_;L~-8Vh&sr1hK25p6+(+4CHxP>%O|11sMA#V zN?W&8eXew^tnFiI1Cps3U6=AMUqS4_ar+LAyb3w1H8fEMK+$s|)Gr_ZPyh23lOLt; z{iL14AD9SoJDx}@Wn=8A_y{m)Oe>lL7Y^km8eJsVT~{YMjqUaL@Tj-)-;k+emntkO zT!#S{&3Q$}Nuq@%phguD9Y)3D<}Nplyvgq4ilO0n$5Oy03X!H=f;a(xhUkqMyk{E< z3d81tlS!L@LFe2Pd4YqAF^hLyFfVu~)D(fgX>WUQivCUgLr!dR#}J^%wJK0y)8sS6 zB%5vQc8p@~%C&I^)v9xn;jOVEm2##s<9$fEY5LQO``;f-z59HZ*S2>vcb!z;X`j{N+W*s?y@tYZrR* z*uf)={eZ5%Rz@-)pT>75>k5AXlD<=c&Ywq{Z1gw=KHT|wDqgSWvDnR6ahb)BZWF!j zt_Uu$XqyoVqX_=ODTEny6knL|U~`z&I8kE-^XZf2F%yZMFlUyjq^=v7_t#XAoA|jX zKNN#|MuU3TyPvbytlPyQ}Xn=PT#BW-dv}-t%E2 zx*6f4Qhf^CZ7h@G3#L98Kd4!E#)O{+Myn_3X(4P3_tCe9A3UMR99;deQTAI{eV@0k zp}H{^IC|tQ`vg_vm zJZM8?!fjBk|CHXdT9u5u{Re_UsfUP|kliSLZmS_d(VJp~U1LfXmFvE)&bZ&erQ0KN zuHpRhAhJ7CrANYIw;|zJdvnNV9o_o57!>;+G_IL(3?VTe%>AQA*kPuud^0O<>(aE6 zXvr}e7B+oIQu-R&FK`wRY{Ta;u(nNx`7@1plJVG^@lt^cND=64>m)s|>?P%yXbWN4IXDS{D)iQ7}5mSo*$!^IU1T_!?eAFz&9sy`+NR zS4hsgq62$oD$@bUfI66o$!LM-YE9lD-d}z!Jxn$`HNj)Z9E3FJxVjQ*e}mc4EsxgR z5Ug4+Kso9sNEr|UHkAoBP^j&yBwrKkU0bi0G)(mjzEaNEP63UO+NF?wYM5Zn+4|JeMR528*CeCHZ#uc9DqR1t|Q4gI&BZ^?;m0!5_JTwhU(P~ z>=iHPy4qM5%QY*6!n>r1awNXuW6S;EI}cFKAPs)(9|tje_>-7_9K~~GQEXbbN+_SLsowO@($IXt@sZ=lF{Um$(E zvm(ao#>+BAH_V8b^gAQTiJ%n-dW+#%LayXWou9TTDBcN_B3-Srq*y}qLiuwP&e&Tb zXLIjRs{TQ7I-9tXUINyaS()_p>2J~s*6Pdl3F=@A%U%m@pyrsdTDM&^<#c6wt&Joq&sdNjjyX#mSrz`BUU%^~mq3_(WcZ zVe;7Ulc{rT`(LrgtrAjB$E07hkU`$4&6I*Et)fAOWi9rV3mDg<+;c=0+Qx`+vkSMf z{hLmmRBfN^>&K)NpAm&9S>-|Y;)h~U)h+GlDOx?^Iox*GQ(3Y;=Txsq#ap7!wOGGH z0mYi2T*qSu%I&et(mlemplHI@=|Y;un%Ri#onwUV)6LIH20R|`EMK<$Fz!h^5nx-K zI^FQ};YLf>)Plt7sNxDk;(dQ??bpMoU5U-3_!yP#6vOB>qw z5?atKMuvSu$`B>2@yW3pRyV&r3HGmRtaFvEXZQUQUdl+Pq(OH~^EyK$2;O1Pr9rq7 zC9r4#k3SGbs>pPE$*S+rSvbptdT-liuF6%rsw-7u2Nrh8n~o^g1Mq88E{V zXnLBPr$&_9Yhm_y|KJdyMFDRU^BoypFO4VFE{uWR?wWZr8uHur@929jY5hI%(c10j zB*hLd$%`jVx-7-B&%*h>hF{qIr6lXq4#%4*3X^RKWAXEIlWy~k2t6;QoLr&d_IN{n zz8hyxZ5T2(j$OEgIUx2z_Sq*NxAOpw7r2Ev&62fvOr+E|xow1^XNd<@(DYWgtuap7 z{@pRY9!r-yp*!QBx|j|<2U@U6D2}}$PUP1BT@KSv0KjZIS7OIh8qcm|J9xRWKWuQ! zHT_X=*Gwfm(11gq14!i`zKy)4ADZU}@#j7y5uB(@VP)=c&I+mobqIRuJ<(%cuqz<* z$|nzwaldH4xaUS0##!O$)A^~7(->)PK|)>;+UEtx$7O;1be z?rqEcYINlgN@dc=CO@0wZ*dFW)arn-YYWapc>^Ec%3z*Kw7OK=&=`1zvcaL%lhA3t z?PP~Oyk~TuB3m&u2TCOTRj*(@VrmiMA|;0^IZaj_SDYz88|EyTs%+e8?se*yyPSe3 zOwwbzAWDCoW)Ip44@XrWv?)f-o*|T1;@!Tac{H%0)!(RHCpxr$ePI|G;qmgzbO$}Z zsB=Q9B_K#C#-Z4#$_Tk1O$PXW+$W+JZlt*P!C29Io1X=Xy+SVUEiqblkk<1ezl`vN zUG*{&Ef8LbY+C6PX9(0TIODq_)t`Y#DfH$YN8?oOS1vu11*Sn^=30R(TiX#(Chc>0 zRoq}A7R_W`n-N`b*(ugJWXHHOUOVkPL95bLD(v|eS_lOBkWBY)(M9vyQSeHiL^3Za zgxI(cxmA>Biqo<8&S2IGjvd%JOzkT9%p%4h{WzB0yLLYjg0!T5Y1w%(H+i1A45R4U zj;|FUEJ@1LPTdPcd6GNg4zaOptr^;YpNogpHn)2gPG37nrIg%=TZ5f*;MR{wh??At zlHeB_p9GB**oNrW`E9ivmO_mD!viVKSo25NO-V96-B%^d2Mf_Q3(IxXmT5K4;n2 zA5=7@B^`0yH*AoKl+b1svds9C07^De8R&-1EgR0u<8)kuBFo}cQkHp74dfbk_mqX! z`%AwsMu4p=+`;o{bmy<;Gt_UnTIMm$qIKDVFKgCs?o64>-SrQ7YjVT1qtRFFmZg@J zWRbI|$V@(hX><$789*JzJSLti@d*$8+4*_n>U*v6y1AvQ)O!EEsUX29)r5q<3@l?5 z6Ztd|gD>f%3KM)nn5`WJ0m`6cWqIXJ59A&(x0w@JKZ+m1Y}ZA_ZcC2 zgMerFGJ9%_Y*I{y;#q7QlAUz(1H_3o9qCchlSUHCVF$Mk%C+{nh`sps1)m7=jWUER z(H7bsA~3lxt5q0%rKZw!wIi57O>XBK9oMQCaqqS{Jg}lf0%HmG#4$5mOZ;37Pj)j~ z`mLVF zIhD(1Vq0JJet}*Y0yzO8j}UJZO_7g*ub?xq<=R;Cz85C0R`91-_DyxIx#Ai9!E1=# zE26EWL7FZ`X!$u1Og4iIe)%rpXjH#Dg53yN6GfIe2PDnHek)_lCb{?Bv|ZTTvByq;yFDLKsRG zQUBHi_DTAuM3j11vF+M7kE7=2g?0n?r&1Lp5GEp95k(g+3^Gc55Dl>azZQsh=?1%( zQ{&VxQLt0vpkt-A*6L3#Yi-6mNr;9N=CUzd_|mhR1EVt8hT!M+bBK2r6r&rGlD)7# z%(W4d5jM4pD>s`y#Tj}mo%r~&*Y{S~XFUywOsFi_;*dPUO%HK7R_&9UFP>&37TcP* z1$ij=4=@*5-PEUZ=Ijwo>JWe&g8b2Xf}lCDun;2BY;5s(=5S2ZX#LU`%*&qMWc;~x zPx%56avKb36oRhZknHs)5Vj8^MzU2G+pg}e?Mq!7!KF6`mqd$}KK!thL?a~(k;ESJ zTm~PHu9rBDm+izHfM(idF$kAij)rPMlQ*_sD`>p974hZ*N_Z7}}DEqSgZ4HjU ziV{Sa;ZZT?eB$T06?7%Y#?Os&$MwQ@mi>m+;tLH#9JNeOz2KkXcy#4Et*iRC1dALoolw-h@j z)rR{SGQ}?!t4cns+juk7pY(bhO*W&%BYYOjr?%T*LUOfc#}t`XneSImOxs;keAS<1 z;1we#9rQ4vM{6&S4gKE%KFj|=_P-L(mLtEzs@)QFMTp+(=2c_0$(5AckefzHGl-HN zDX)!~4V6kIK8s#q22n}^^@Jz78nZEh1uikok_{uLSKReJ5z#NmD{d zu9)rCBla7;ke#9bg!{%crH)GH2}twkjm?Tc=Jxa1UP+axhaPekYs0<5pH5v%Q(sOL zu8zFWmK$iTF)YLw05Z z<%Vj9g9fpsTn^4cmJKL1_wsoYcu&Q8OTAmdSma(GFGu3kEm;k(5J@u5Qr2%P^LI{)8!(GbpWdXbdxJMfLOcWy_Ct~R^}_@d8`2OC}d z_9m_ku}`7#EVv7pnqOrTEe zelbyEKby!6oG5p@Jqgcnk4-!ryDrVQ1sl_dgtlmuo-Kf=uJS!Gd&fTM*UDmj-f+HT~B^<3L!&8Y0*gkx;Iu3-D5<0qyjqb z6w!MYDpCV=du|(LB9Y-vZjpTuSIS|YLf?P(!Y%6BLd)BKTY3M}BbFDbQe+u?MVAkg z87`TB^=!Gjs_}DEC&^*MG!^0AMyIo)LPDGgp}{BCrSn*T-wF}+S6X{F-+hedueA1r zt<8ju;a_R(-5a-jA+!bVHw+-)e*Ay-Lb{^{L8Ak@q4a+|hP&CYU0`s!1eoY%KGnaH zuEM4OsT$n2`lOF<|JCRFulWEi-e*3VoRitbPU1h8Oi_GlGeDdwpCY363yGf}`le}~ zk2@FmCFa;8pLaSvd!;aUY&Otu^)`o(y8PirR zTHRNlz1O{BE?=73_3Cw`T{TUHD9ess1u(S1f)7Wd9YE4T92+N}Coo?GWCWB=1L0iP z8&=GeX#EVW&7@&P1Md@l_ji0!FvY8<&>#O_+U%;DtZ<21U9&u6+FB*8`(;vA)!A)3 zEDK+wZk-ihnYj};`Zsfd%uhOt9EDaY6qVZ`PK+XS2n=5u(ia@g#FqC|eof##F)Uki zML0#AH||Euy|iLTx5blr5LyE75M(X`*7j%`goX#F_2<#<$uD&Z83@Fs%7)v%I_ABvbw3H&+tK_*2DmU3`X1|KAAWF zSvdb1dI|}>3~vn>6wB>1Qb_gI@bm~ibmKQ#42MXGdXvm?44nhC3s&K!WTQMgh9o_X z<ctQy~%KmV3-*6Zf(qWO~7l>m4CC)zE@l%FL^hP42%f@ zg~8`1!vc#@p6MrBon`?-T ze7WMnnq<85Z+IW6LSj!hZiURS@R=9rRQJGCiiq5BSPkfH)_}C1Wnf9rQc2MldE0P; zF!|6w7_HBL_Ox{4-*ZGf{6BlU{x|$Rkn;9Vc__P=sE>`Ps9$y!!$+%S`yr=#qe-ZF zASSX>9{Z0c3^%T&_F(fI*x3s4<7U8RTLOiD5rVObze0jeHFsAdR85zCb`}S2bm0QPRqwW+9o&6laJjnpx9K-pI#J zQ7-PdeX4J4$Q?FuOaEEj*yyoXtt^>#%|tT01YmCz{9|gO;Fr0B8L&Uft``ykK}&L^ z4ZhD<^AGc`a|n3{?Gds~0QA<-b0s3anL-yR%U3C}`ko&sNF`MRYod^+Ufjt?>+DDLz^QHS4@BGt*u3u-QjVjR zz$fVyarw7v`31CrRH6p(0ZGLAkhVaLlm2_P<_5K)of?MP2>>8rH2KmbdJl|nUyOr` z4$35-KWH5~QuCoj&H1Cq6&0iK;|L{uKzni>s_isRp1loBu_r*XlD)bh4CcD|id<_y z6;P-8+WW$qCjqq&{)a3Wbq=;4iCnOnbR@(pwGMY0YBwDWA52VzX!*l*yB$TWUfyoK zf*`+cn2?{(ojj<+8F25QHs6qFR?&6mV*v2$K}ucF1*%YR!rvjzu5`>zJ}un$oSgP3@u^nU4r^x;qz0uXpm=X2 zg#-g_DhI=5;%9ttYp#0cyu+LKrWGKnB7Wk1TAJLM%PhB)UpTY;{sLTeugEU?Q(kX^ z*frp%Ki4@#Gr3+B7{{os3}!{2vwzxF?#eBlz7ln}bx5NEG}8Y%G0kId`31&x10tt~ z1?{~~-mf#Vz7sJ}$Q(h^OITrZ8r&>ocdzvl^q-y z5}U$+A#vv)2#Izbdi12`ABMygGL%x(;+U=q>StCdAcwL1pjoSZRUG)-_`9Oo|H0my z$3wmM{o_+A?NixOm`;*PQd-FNsZ+KTr8Jh9lNL#mB+Bq9LRp%F&>|C(BqkvhnGh;T zLPCsv%P?aZGiK(~?=_vf>ppR=bIyHT_jP~o>;9cTJT#wtmU(|(@7L@3dcHRP4nvEF zm3>*V*mCrhI@?2d zz8@Jfsp%vN=JaFwgkaLJFbVB?(j_*;`TBQ)8}Ryo$9zTRszNdw2yUV=>Hd|!Vf8Fc{JPo;`m`PeIrijR&))Y(91M#HFrRHv#W}QSj<^0P$=y4O8jf#|zY}vX z?HaGSxy_DRzia#p7-~zKJsJdVZ;KhpVbUvNg70exhWd0!h_gEbDH02WmijHacEqUehx=+T(Hv~q>CXi`SQ(8h}?rI*pJ8Q(HSB%|C6viWmky%{o3-#3Ui>Yll zwpSe|9vN)+f1tGD*5t!Kh8T!1`FXYeDbk}P82C}t&zDw%%w!YdIW%7mX{$Jn_8xi$ zKxpSq@k{?1zZ@ohpVmxT@%R646G6u3B{Er07yCfUpa+nbD~Bd>ompFqF891&-dk~f zWZ#vB9m{`GpP|2ye&uA3-)o%8YWv5GEMo8GR>DWMEtQy>K$UwV^^?-Gp`AVE8l$>z zR`u2^9o}2NZu<*^il?#r9M0xA^Hatoyxymep=IxiHK2$4p)b5McWx6Zw@+VA=In=P zsiHS09xTf^|7d4kef8rWEd{5+N)AA@D|fI8`p6o1{{xMAU?Ik~Qx?2q1qxJA?lF2@ zDJEGH4Rfa{(Jea6@;aQ4SEqoGs97MFA1e1E&gg1O9tpAYF z5^CC1`?)SHdeKx!>>Urk3=0v=f~jQXE$oLMtuqhj%Z`!f8cJB2w>ufp{( zVFYz}xzL_;MQb)W|H8e%8Lye-2Om}0m02hnkdE!y_i#L~?^bbtZVQ(T# zFEAc&K#xWO$h^X#}K7F;)$jp0+ieC&;+Z7)m~3%UtWa*A)~7QRyHbh&!(R&v1* zRT7%QG%gihB`>^B-ol;1wkg_R-RFPIec{ce+r_KQC35c`Ii2FX`G>hWC#OxTFK(r- z5}wNy=kMiaG&z^mPi$5IyA$mtOvo*>+XYPx)FK)=boNO)His#A^1CjuU zEMMQFQ8F8dsGN^Nxs%(%Ei26q%$A=&=QY}5@y=Ht=RJWx-KUZU*`$-x+4_Rjb==DZ z&6%n8^QV10eqgK1`psf97C+US@?(@rL}tx%#-ov7o&8M|?h5fhcNBM|g6mi8+WqL~ zZtGKbZIxmzmtOL~l@{>)FI(bdmYk0ukzlC-kRL?ku$Or<>^l*yx;C4uYAkcJpJpzQ z8{C%OYiOl$ZGVXG$*aYN;z?*so-X(T2lEy{8on;_1-G?qlR^d`LGyKd79Alh01DJD z%+cjGNu`d98m@kkXL>#4_7tO0a@f|_ez%-2_)ZXSFF#LUZBNzUT;)P=?GhsDE>l9R z5f~qI*KEyxaxpbKrl>|$eH&>}z@lSqVRiJ)db;Gbz(74vrO z`pnmEaxc#whn9wu6gE$qr!q)45ne`*xzx(0?QUYc&~}maG_81V;w;r^`J%@%)329k z6L%rXLABlF$@xk0AE9nv>=SvP@ErY^nYcjx2^+bnx0tNVJ{ZM%KJqC1&7;pcdE2h= zt{SdfCF8jzz+lz`>7B^#l)>O8HGyPhaU+47d`F-rOvs(4?3Y!_dm&v{wKu~0+}p-| zcFs8g4GSNiP`>oYn-M)B#^>_838YKZ2Pl-%38o6O^;DcNoth?A2gU7Bie-_z&PlCY zX0pxGJ991D?jJvUpq*B)CD zHzJAQw1DP6Y_d>gPu7!J>jvt!h22`ZXu`_pCpUFGrE}d&dI7O-1;wB;3Nf`MHB{^! zon=a>t$)tIRp6C6*wxcOWCU#vXkA{}IQCBT1eK3Dg_>WDvX-h)H zNa6G;xf9q>-<;Ztm-LJ-(bEuhH@jEdoVt5^W!ut^JFrLEM}BISPauZII!-vLlm2$I zSuWC)D5LR+nl4i$^FTJH&bpYffzhzOcZQnl7u?EHHscO{%S$w0$p_YSCb79(QRu>n zit8NaL+;Emcd*+Irik1-@gVLXXB9V^KZh%3$M(oKQ|R$gWwmVp)mE^%Db-n~^l|!{ zmrJ5_FDwsVZ~tjU_9S0MPhHaYF30B~VoS6jLQMw!s4xiY|IEi7&7b`R*S3qB^#w<& z^T2{*&uNpOUpv#Gm5SpA79f)`N!w3Wo|Q$*D6Sq!8`PvX-aewrY&waY9?(4S%u{a!+Fo(#9T2`?tF zqf=gZVDdjbMS^yn7%jSxy{KeK47G9j-YwLt2PS31mw1e6v+qQ+TJx<(f`*@5pGDZ3 zeq&con2g)h_#c^BUXLW3JT#?vu;!CP0W-#l=& zwmwB>34Ue6^CfZ~%PeQt>aeflxH-*)LU7$!d+2jK14|mU*^g41%q}FK{2;BT@P4VA zWc{Y4nhExEn6ouLX{Dcj?O)KMO9=)jhz~{WDU*-VrFuiiB^~ybhstMaouWBF7`mVD?O9TI_R;zKf@54S?}VO5;U638o?Y30*ca&M zdFyU%TJhq<=8kiE$z|D2sX`&IVk(mglcWK{LGI+xC>+L6UNq3QZ))5hmN z&kUdC^Ak7NhMSa#=9^*5tORmd0WNY*GBOJe$`Fkf%$ORh84_(iaXK?(Re#Ltw~WZQ zQuIxbrjI=A451&SUuay!qK3HZ%9uaS`@qpi$LVAzkp1UhF z&F!wTC^(sBbcr z)Mt}ooR2jd_fD%HN%f>MP57FYFqm)p!Snkw*@t5jhjMu8G8-qyMab_;ui=_b?OOz%=>dSs2rlL1Rk2In=xiB%&EeZ&|w^s?E7EipZ2#=7n4G1fP}HjbJ`by~jQ8dso~ zFo$7NapC)D?g8E+7(@LtQ}l-P9E4!%I?NzYUM`FyJ+9`u@qwCt11j8w%Kwq>nld66 zh;8Mfd=!+r!(cS{;V=0zz;L78Q{hE(C{#on`6FjItE+JX*QHtQ(pj>!f*LC?xFu)4 z(qQwW4?|p+#h15BHKwe1Fw57#P;+TkCN$b$eF$?%bd;8Ux;NTkUcU38L(>Z<&AnR? zzZjaNpTNzRoY3(FH>MZA1sI#}3-zC^cV1dTdQUr5X8M}3tI}n^ms5)N<0wmqOI0bw zUC#sdnr}9ry69&om3$JOtir0<7QLMn2<1T2yC1$eelVkA<>Czg_7(RIE?;nWrNx}% z#V2l^h8r{fzyl`MpKZL9XUUSVwAq&b` zF6RVmS)Yxxi2gdzaN>Z8dS-EnL3+@8u^(V!MW}_xoGGSjfv$c-;k~5;>WZ_^y9eJ8 zIGA4za8Y4E3fgYSla!!ychH+fQ;`Kr(ILPLhw_C9SPGB@l+aj*QCfqrKWAX5C5!y0 zt2-yaw1Dr6kgd>h#lqDJ(yZ9PCg^Mz3vjM16?gfw%#ycZwS1@4K}R0IZt$}&~s zsiXeX;Z_8T+(9Dvj7fjR2ME?CaJ9E)Rb?cznT7c4SR{K=w?fB=A(wea?HuZ%eZ7hAj8fW~7vX0z7K?kojjMOv_|vpD=!`PL z?B~lnq0{Vef?bdDkgG;ti&O?gc?Og)Z&^nTC0wU@;caX$Ne-ZEs|8C)f_h$chcz|Dy3 zsfiVd{Lk`zu;Kb6?E1Qx95gx_JcNHG#F4dZ9wFre>wjLXs2o znodh}RJ|PaXq5b@)X8GnQH>%PbXsg54g z&7D*YYF90aG+G4JRn z|2$`U7-<99>uY-SdSK7bqS&SD^xY|ubndREP-5}kMzv>9c7CR%)HZ(B*;bSD{qc2E zZ6hKZA5S{(SG)t~z6bJ9$Pu6$S8>mPmrR}5SGYEP!WmiN z#rw3`o6=38ts|TT@&SX&wWsR$WCy$U)bE@SasI9L-c+bC5(hs3^z@Z;WDu8BYO_s9PD2Xl;yPkYq&ap@jSE>wfIv6RPO-!-x!Zcjv#d8gee%`qTW989v$&`n zek)cV3Mtx2-z&~%d+!~s+i{tmXxDxHW@KK%oyG!A&0Y`Jl@H1#l8K=e=V#u(?R$I9 z=0%W2NdIHEaEO+V%z}E&O0(F)0tJ~0^6~>ZdE^guOR2MB%#F|G)Y@B2et>(`iQA*F z65caKq35YjQj3fD5rxH7nSIA->z!B4NKW?_2oBcdOA&YxOge&Nc2=;mhvp57m?7nziVM?6p%BHE+D3c{TI& z=m)A)+YwXsA79>|(B$*uIXwvx^*UM3>~f=gwU2_&TfK!J$W|@S%->t~xu=vNuhT1? zrz1Dl%=VC(nsmf{Ml>mpk)snh)=l&(pyP8@n|8=%qO&}mj98rT`fN+by>qRfpETZh zdHsyHrk;J?N3+l8NpsbjLo{4HwF?&DFSO(ya=P+v=;g7flTW7a+nVw?B*p=T()pYO zS+nq&?OFI7kG>$%a!AEXz(?@BPUy6w0jyj1R9%Ld!RBM!@Be19CIKm)uiD9@Ju$h}0~ z?98eukRBa)66S7{sbcYTw!w^PTfH)T4cy+JLjVjoQ-T{2?l2_jbzTdus2x zTG+gB#>awvuN>qH#AEIFH?PW_!p(X@(8fNI%~-Rk?A^d3<;vfi%%75zWR|q8U5saM z^txY+|JrE(%Y2-#R;7*ui<(cEo7TGu`q%X1x$&K5L@8WP=7Rf$894h&<`q|OzI5wTDTn_Pe1+u#E=6#tKyQEYS zx6mplV$YN88nw2k*NZny58FH#k~pt9WsBMM7bmsF{Ja(} z0IfS9)be+K2xBv4Y2zHdf3FU*6aJabwj@oT8b-z>f~<}f451i)>{oKu-o@AGL@GJB zJiKJlad`{Mk89qwc@y-V4eodC*yC3%#|@HXuWMKl)J}S~swN_oC&en924Qa`*|C?)^v?ly{q$Vd5sKR+%z@&F4CU4rDp!8gt)eu3fgSR z{niAlTbVsM&>=Bb>Fkvi-e!`+e0A&)<}^Aux)eqrEgSjMSzmuZ1N=K(=buPlVs#3n z2C*HqLK0^~J=!hL^e)lgr}RomQR`>U&#rww5}qUu@Prp!3aw4UpbV5&rmQu~{>2a2ADy{N=m)s#9fFyu4dRER^`QR*47h}9FFeJ*x;4rUnS$irYPL^k^JLQx2A(l*QKrp-Yc~o7;b#^{F z-*Q-fo$-_2j*^CrxJXSUoB4{L=4i23ZCmk7ybsCeI82$fxvbfBO_u&=$5o$)^<=-l zvnW^O*AFt37c`icPNcVa5%weq=)n+zYJNiFan{LvT@L9?W+dRAtDdlEB!G;`AU>1H~aHZ*YB{v`~5ZM9!?>R-a}m)`xGf&EJu z{t|}2gyC=K?0-%e^yGv$=uOv1ta5Y#S-Tb5&RG%aVJ-y$Ao3*-j!bPXV?&)K^1(Zi z(^}RrVtTqYqzy=IsgTa)IWHS;YQEp=3&};d-a?ysN!_iGR^@OIXZ@4am9Dn5C1 zbHVcc*TimLESz~i$#?3KIZlJEE&YYinZoAL zUa-8IqHU^`RQm-N_EwBV(CX;8k%N%zfV1U%d&c<+=PWKuEOS6nD{x#S0$YW zl`NCc$ygO#LPzFK!zZ*r*GYcJwSF5v`-WFlkn-^R$hYBwt#*E;DJHJ87jPV7sOCYQ8*UcJHS|HVmw)QofwcGiYy3Vj#^2}N%`$n@(=ECTB zhlU{{jKrq;e0)k1cyT>?&vavcN3MRs(L+r*<@I)Gj(GGJ9Im2dFrf#ZYKb=eh*bsG zg&IPZBcxd9F(F&BRQI2vKgX70HwfPs_lXjP3c*k)>BSLPAi^R6hiaqnt&6|$- zoW)))cuwV1v19VjL!w9BW2CX{HShorYq}zRcKQ@yOPok`Hwrb}V$dTd?lS|Cq9yZy z98stzr3^C3u^ZRR(w zdkS+B>X=2jp)*+@&6b?}p-^$^M_=818t^?o;eBELcXSezhbpY7h_C5Cme%6isNHhV ze}eg&V!aUKlE!k=HE(@BvquCM^AsTk$+;X0I?B_)ra_!|v05n7tAxjbXg2ueTc%m> zI_+WT7kuI9fi8oiJ?|Os_~J(IPL<>9f}Pb&Gb^0~jgB>0MhxtnOo|U0xRG~%#N5~l zzM9ege%RqNetyv^T1g?aXf0tsx#8 zWIs;cpCDzb^FHFCVI9KC!{$RNt^q3=!$aw5J7MerPcwlwv6&(iCj*~(K8h}&7>+$) zt57}}s=bv)87U_lxe=&UnNkdp$O-k2jQ1!!{}eJ0wa-NF#e zgp7u^v&h&3y0J4bACo@3(>1jGuCOx)E#PxOU+6C^AaQlzl-4Wbjzq$sD<&xb8G=Tt z*c9t8xB*?l*aP1lSUl?uy|c<^uz}v}Eh?Uh4{cTxD8M;vU_r+oVNtoKX@!K*+1zaC zNvn$T6e!~kI6xv7HJtBP*FhTWLC*9s#xCbSvc85H`Dpno?|i148gH18JL1A6p|ETT zo+dEu5#Gp2%Gd+B%9wl#EWcl#>KEMeQW9pEFz!HY6TP!7uZi;{S};m!^vV{f(P7bc z^%{W4$cW-~j1_*S=EB+pu4^9+P3f zx-DxIWxKY+S#mTDVFLXKL(GveiEQ_j5HciBBIcxfEp*zUu{)r}z7oIk!5({`okbay z?a)7M%vVWh|ZOTP4#jJ9eZTB?3cxNt^L0v zEeYf0XBq?yj*52dt^Qz?r4B0=*7X~QIG$=ep4wyGv_oHTdI|rX>pbM8BO?$*e^U6r zqw#u&8cfTr9{BRja(qW$yW^$GGJTEO?24?6CwT?&OkKE3lM46xzX0tR6mFh$K({($ zneHWmgRDX=!McOq6zj9PYDm?igfL_M8584#*CTsb5m;h4X=%IRqe0mcd&`k{&!g&z zE3PbMtm;nlOA2}MW9D-EpTZ-1EA#I1k3y#4N)9+>|LfbL{ySwM{?W6}BoNS-!7T`X z3RHJ@-JwOGP1lft#oS7)U?&{7H(FZ4fR7X`KN}fcp3=tPE;R$pwhl1c6=Zb9vu#M9 zySKgPRSIwZ&Y|cw?>K0Q66N)u*vr%91-0I(DZ4;z!BM?=Y?l!51jw6{c&K9rR{HEl z>7b(3?#|27Tduh4NYseO1a+XzZDs^h{a{yfuhP_3Hb|C|o?*0NHzQ1p+8ISZy zp^iGJmOE-6#G{UTj^WRcnyI&etJYc*kVf9zw8M$-Ve}}+Y1(nUQr*1T>bbQW-drqc z51YNIMAvg&jFQyppim>@I`!>+N8*G0HjVe>xUD59YY)SpfG4}Q5M~_JBe1X31q9yx z)(XpJ(Em>JWmPT zOtY()@lP$R%$LVo&CxX9^wa!<6Aj#aolZaa*kwBE4{l?@IE~VrrzUV_k)D%g*?2;V zbqz;4%Of_` z;2a5oNCtz02$ZqK*mJCAFs}&nb93w9+31uHugUv#CV3?N<|ha7SxXy!zOjpVZ0Pl$ z{66%r>lXdrxhMZc3%!WDnQud0#vN|4q2jp`MQHugXITlPQWxP(50^)iPZ@YuZVmEr zob!|1V5OfOeP?BGhCx_(c&;r7=k1-pbP7AckikG%G^quo=5B6%=ER?2^d!e-Y8ohu z;_h1c>J0^#3)@5A8kNYOk$mr5J1tAek4f2^B@bdb*cSt(Z#-KB|B1??XHi<;W6em8b#gt~o~-9lD9r;e-2_9(vY zaUoZ!6=Ax}IjZ1L3wZxw|NcRp$8|(roBz)Jb16$sJ@1_O>~&&Mi9x91vR8Jin)N03 zTmiD^YePbHtG!cW4S%|OE@MrvxHWe-Hm!1oC$&ZS1q>m2d%H{{DyKeqZP}7EM6(9* z_rAtn!0kryWHZmCSuTkU+wpfk0@b&T2d72aD{{SCRX&j`kA@YP@x#Pm!>Cln^);)Q z&{qh|B&7OVS!^a4C9&SRW%0Qg^?#aHRhdz&pv|f&Y*1Gd6=PYQ8;jo3s8UT4*W&Jsh9WxDwyeTh^+yzD(XXXj$^I??QL?%O$uGrYx6$ESEV>GRT4V>r#wq{i`;t zR(Xb#eYfx z{i!=u^d<>tefWP^ol5Sqx-pJ>mEtkk2sRyLaC&PCF_;V-%yb&xkS|Wmd7oDc?Vw~L zJ9!d1c8AL!K5D36o2@KcY-SB$c@P~o1at~g*QeEGd6r@$N0HLk-sl7K9;HNOA^00m z76KJO(eUF6SO4yF<>)gz#?RizDT6*UA+#d7`ZWW5P(jLRWnf?%f^6#jV zbG2Jx$OPJpXQG*)A##``$AR3GYY@h_O4o3B-7sRHaUkYkr`B%Y+^|Hgp`M)0vDW~K z7=Xh66%-Nu+s6ydx7JhUmNb_LR?xXyb8XT*Nlbbj-4QFQx|1=;@unPxWW?yx4a|_v zg_pIib8leME+v%Fm3lyu6+lHgIJS3FiTHq!Lw50$^-NT?oT&0@a9^MM{>bJ-HnsIP zqUzl2YKAlTl*Sb*ERXSLCyYOoMqMzTkQgDd27%B{gFX{Zdj~ZD4pbT1$k| z1R&Fz&0ukgU_Hkj62?WtSzEUV4O{4=(p3WFjj$!2E{--I7J!cX#dSS(&x$^&s5e?K3fpVAX!XqJ8`cKs-FbTCL!tedXZ{vTv(uY9 zt|_>NuQ>UJ6@L3pZ%k^+C!s!s!M2qAX^Z$>@8bU*ZC_TCy+!eeiy)v2-C(V+=k0(b z;)CF12Dh5=r)_~lDWBDT--8}r2D8PhhcC6*BW?S)yeEZmz$>w{Vv2kdks2U`k{&4s z_Ui10a&c+)>F9f#IVrN(s}B8V5vLm#OUorsIys@K5^3J#IRiU~@HV=FfmYegxecCc z6bNtwI#ed*UR|^IBxQwJyymjq7h*nz`wZ%gV3OkJ=v@F9nK+jb6Izo9Mp8`VQKcPu&6hxt|z2qw>`0o5?(}Q&H1GE!1|Dz`;v6J%g zlO6T&H7=>^NGp2k3eLnwFn8ZfOHbeJ%hg=uPRl@>entnJeL(l6KLU+r)SJ>lzepc2 z%Qb_TQVvZhSv`~_n1_eT$QN|ZhELE&Q$Et=O}l;rV>dN?HBG_7S$0HT)Y$dJ*yg*$ z7Nw5hq|fw6F2!En+Y0J;o_8;E{Gf7ouf;53Ldd!2Ax*K~Wj+lxE&g;tEUlQzpXEdv zJ(+gh+Q27J-u+A^-3?Thzk9RUsPsA)flLu7Dp01=KNnDkRvuw+)q)|-B`qMR@Vf2W zSR>BHelF2Y68z=^(CI3lOcLxMu+nmYmH4qZ6y%3X=U(qRl5pL&FkJVt7baEXk91j9 z&{-rI=v;0nP@zpn-=!y@O?hhA(x(fSbwrEbLP zGw&9Ms;1uCT0~AZowjqfy0}8Yv&LLE&K-aj-bpZs-W(B1t#>xqQDq&&y;x)(88%wa zUju!11xsPUO^l%xmu5<(${WzlYS2>X0)w;M5g9dq}r z-f@ldYl`h=a(`$_waFiq8g2~kXxR60^SX!L_gtUUtzUV9AhsknlH)R_zLi|V`ivQm z1R%XKKx)TFJ$;h6VB6ce7@e@2XWUe}V3EEP#__^T_$tg5|EO&K^U|M$mG}@Iu(Ab` zHU(OC(c-noe&Um&r8(X?nU}5(yoh+E9APUwi9BQoZf6%FoMEW5c62(s`^u9DZ`m@t zn8i&k;Ui)EX~3TS(D-Yl`qhX}1|xo5WWXuH^luHt%f8^s2+(Y=1p8QHO5mo9cAd0` zxQ$wLhiA568GZu#PW%f_c@-iG=-twp`~|m(Acz4WZjJ2Pv)Aqe`UV>$9Wdh&Kb`6S zqd(upbP$yH#tddpFMf}#_)x#8YJZUK3#NXR>co&(Q=Giha1ebt zP?Us$vvdihcSbVkfY%@BdsBduCcK*8e9CWlKdj-JkD@h{`i(m3-x%~zgK6zH4fBHl%;IjeTV-EIG*_pdm~#hpj0(w~!OhM(PA^8HesLT1z3sGbL~{L}Q& zjaOpGnfsxx0ggMi8a=_E{vp=&}&^VbDL%=A5eMax-9-Q>l%-CmxpT#mt1*| z9foyvlRy6-&`W$S`?Z;J6ZM7c1n7x%bQU!+Gjb3Rp@gXpS%N+3YJ2XM6`kGAF4+3v zn?s8k*=vk*^P@jpl26#H?Y+ob8-Mx2MwWfpsB&a+$y1BoX-z+pNX#Am?y#B69OR(3O zRYP92&Ptx4iS?3d3n%W0+#S-o0>7Vhpk{oZv`G1*ipREOEni1Ai*NL)(CVY?Pu|R? zzHpq}HusakL(imLGo`9li_6O952sKaAXwRz$@K%3pSmrF5=a7@v_@o;NJ7c1cONe2 zMjS2eQ@^}m_VNZ{XM{JEa~ZT)WeZ-gB5Q?SIo}YiaAb>PQr3}Y%Os!W=BPcU-7C!D zmoMcbOnLCFlU@+G7DxAdf8Ox*Wt@TDyr$9n+VtE3mkg|eF=S2N3}#?&q9J$$NBT!q z^%encO);lg#Zz2XmG=NPMjAaFMc==SmJ&ShW1-6ORb`;__m%uK z#-vUdx4FnF&1SVZgKEykx?~ly`ez89IWLJ07V#G zbZ)hmycG3OvY?njOcW$tMhjO0*WuwsC85<8d=pNm9r>P~iUy(B}Z?5nz z3P8dIf8nyGnlOx7vNO`DuV=;TVT`7sV%JT}t&|BEt&&u8T8A|On!fV21)m5}BML4q zFh=@>@P3}ECv>Ef&Js>Lb<=_PBwz-89;v>r&>38f@N|Rp2;GCcqO)b79lkX7vlT3rQ$(&;e3%9CFvj=F-(p?5SEb9CV!Nlw1*g zsLFMZccMvSe=-X{?mA-+kkDPEtLTfvE%XjR9=;Xb&^~uR85?XOa1RmcIcQtNo4G~g7OFQA-*CPZ^@pL>Ydgzg18L!5358!ejM#AELhbo7j)2tzNcrNa9SF-!KOU-f)ttMxNHOW0tiK(US} z6k7?4DUTFTxQ3au(J5AtL7|2EAK66;@;*uz2v9GiQmyw`26i4yYSUZ@Fd*1mrV4gG z3gynGHXD9-jm%{UpDn)NI$QZ4(F2F5L$d&%<0w2?xC6zJG&B>o)N@$Y=}5D}dwQ3w z1B9EpF{wCxf;A1Ygb0m={Dtr|w~nIRN$AURp!&R^1KO=aR7@<13omafClcUFDIyRz zW2H`CHd>AHxEP1>8)qPX4XdONIHS^NuX#0n=(4q7Ig1c*s=>+dx>u=-mPO1hmo?F$_gwQ8 z?$F)ojtqdr!3byhQRt_s>Z2e~gWAQ(B|>iv>|A8K!}@#gN)xrrUCE_;dVvfc#?L${ zP&iby=K;58O$y#Yqf|3lCB>@tPAHzc=@fy-1`qN*%HD$(h~0ugfE3Ii0;q>H-ylIH z-waS4(g~SXCW3M%GOQ7Rc3EwKs?$pF%uG>ij=VTVMhDhW1r`L>xh`a|6{45m47Q}) zil;DjPywVLVUtCDR{kYty25;^Q!{4adkOY_F9*qhLbW%zX3HB7ukVq9hX$q70bYzV z%Gow5=SK)my=I=Y_kN;`x7-cVjBd99x}>L*Vkvja!b|y0j=Pfy4vq#fib`i6n3gT6 zu5{#S6W$F=?jrf1^S}8Q-+mMP=n`lfQ*Sl+F|Y2U-Ck9Zgcl5+wpj?mQFkJY+6^oD zxBvpTXKu&QoUu zq9Y*om7&!>+qry9U%Q_Va+@Gcw;H#Vg@ys}E^O4;bfFUAg~EFDmBkh;AKpXK3ScYs zrG^Zfe=TSeKVWeHKBVTaBVuVwix099v+bjbeGz0~)22zyQ5BQTkq|h0&z!+ez zNK&piw!s-j2q7!tKp}J{4d4A{e~G}i>Ea~=-Qfq-#Xa9qLq*YDuYP0 zz#V*tQW)UgMjwh8PJ={?$4JD)adFXqpUjJYZPtQ#D}xE%@N!kQhVUsk7}r;PP6-nx zBkf9g;3;R5glqL}-A{es>s@Rs6w=wN!7yL_{Eg>v z1xzFWJD7MA4HIvuLdAz^SHIw<*d;BYQwRnSEdY<^gUBTO`bUZz=K|_Kg`I>ITGwDP zDV#jIB%rvqVV`xdNu0nVs*F5k&UNT=t>Jc$#%91(Hh%Ssz8aTSYLnBH%TKD`Y+MCTd11fa~%UvS&s*Vv8) zU)>3uXR@y@@(C@5y-FfHYx!+IiGCBm-=5G-92n~v8eeehfUPF*57=s=Ews;ZuY$3e zd8BJ^V2{6X2UPsGwsxR6^3@So`K9Pbncbp=PxVP9wyOyw6frYmi-YjIJ0jd@UDH#2 zipY(Q*JRPfzWS-ZypR8?pSoV4C8BhiDg4QG@a_`R0-p4bbxFp*3vtD97+;0m&jAJ) zcti)Rkv>$W>Qg}c-7&eeK%|LA1&VHgh@z_pimuAZmWpS)q@j)w0zP-|-DYElN{xG76YiQ}2RXD+sq1n8)C;*i_+~{^!h|pg1yll{ zo@|I^~?3 zUlkG9a{H)MP(&wdn)?eN{)*d&)>NWh(@2j=nf;)>il}_aEEpyhMaTO9LU}o{70bH; zFmoGwZ9h+{Two+b3lQGQ4UU8&2-*^3XlT&P?QhS(kTeD?B@pp8c3#oMKgD(`R9LyJ12H^X}v&SWQ|%-{^SM} zGDs7VaZ#K*NPcwkE49{=3aV?xSljs(;rlED&GM>8;0VI^0gQdp3L%YFGy~=7LN8~| zf)Dhec3a6j*-<%nOv@+fK3wGO>8~e>jId`X6TaONz8RbhbA(nLuzAi9AFCe z8;K?)Rc8@ewswGjFx~L8i)iH0(i+wctVmTEOjrwTJ8iA-D!r`^lR!Tvf-luAXVD!Q zleg3v?+-UIAGZO3)R0t|h`2yNg{%;)6Q1|E6l5jIn(+#Ic6^ z=@B8#;H14?3HO(X!S~qkABY&Z!B+gVX&<)>SnQN{$4w%kzV|)iJ;_kPz*B&8@@?pZ^hF>$gTCI%l8A9QO zwCfCq3c0IjHnw!!JNarj{&xfE2|Ne>G9Z%m^K@V~owFAR2&;xs&BFf%)M539Fa|72 zv1gS6iVOHv?mQ@%Rbdr2d^2u(UP3y(?Y)i%XfDDi$pz@k?~L0%wLd1^8d?f-RLDd9h8;wf&8P3Azes!dY(w++g4@K@!HfK93Krc+{A~LhM=iX;W%I`j z_s$TG%z`OhKY+PXEQD7qfUj)+%Nb}rBAlN!S_bOp6I@A^3nJl45Nb9M+sY~Qw%vI^prE5`oQ7{;)8HAG z+OYbu7G%`*DXq%W1=D}{1-B5G&|QMftwcfO9;8uK1v__U`_ z2z3zZ3+nTPy*JLceJ?;>Xd)jig7+)MiJBd_tr{uSvbV_?MpOCNuNNbOtE zc?n-|BS3W3FZd-+@heB^6zI|*I!uCBLjMhbRzU(d*;*jL zSA!1P*ZEa5i?W9QOC!R@<>D;p%=<1zyTz)s*HB(MCYN5>L{y-J`##zM&MRF3!DSAl%AW&gMr zA~F@fg>A=@aw0!yELAr4kt@`iN~&XAzu-Kh4u2;n5p~jKhS|#C%vAJ?NBU1IUuQtoZ$vEUju0?^^phb2(oFaVNrlF7z2(907kO$ zLbdF)&tq*cvwt$u77Z9o$ zfZ^TllU36eEs*lr8pwIT+7qrGuNH9s+O^BLiP+$h>$hps5(TsYtrOw9iuboGAH25( zDmDxQcUeJT@aJyQkOUAy&o6wzEkK%ofEc8*0|eBjVe7d`)lLv_%N|)PRL5#dk*=jw zOq?*dZrObxFpN&oMyJ&Rbo&tvC47Gsy%ZAWO@9)|c?&n#4E#Dv6Otk0^x?7vTN2TC zrsZF76D0&kAeI>@ie=uUVw04)6~oqFaDz1xvJu!b!bw%^#3r$~|z(sV;r{g&9%kyB&yNHqzmZf)Y3g@$7sE2*I5CZz*8v zXxUG|;2{_T`uqE|U&K_fr-b&m65KUBY`ox}q5dIYq@VcxIA@CkV?c$<4I728i4Itr z17veYmYf#h9#iz}WD({O{hR@VgYv*U)RTna&nAAyoso7Fq66zeA-bU5D+Op#Id5?~ zV+8*NXSN%O>jBx@f%tt@2fv2X`HV~Sfz#CaXz#%f$k4N%ZROm;l)5AIEQcaqztVuc zi-gnEg^tI4K6T28bvYrJ1XCPC=>s#Vf*oYFxj&^_us;nYZ!?KJ8eb=vD?HPzHuvBY zmzgE_%f;^#5l4+ut)GK4&T4PC_HNfc$VgDG!wVoMq)is#UPqes-l3g*n6&T;e8-wM zkQ)h@uoEByOBM~|Bcttw$k5t2Fu=)R;te;8;5LhT1Q`@Uco<$;i%25)o8guA3;dKt z>7AUz2u3!8p*+z%@&C@VH@EilB>U-1Di&fb1pGRN;T;e!YFzc+-3)JAZ)3~L$SX^3 zfBaV7xGm(=6gp{qgj42+I3hm!wbZQ^USsHb4$+OvFUl~bTd!Jvqcb|RF^PO||`5ymRPhdSOYh|r9=a^%> z;~nqlElU97cG1CW@tp&o^m!6pgt|@5 zBJ-G}thbC4wNfc1?iHoVr(%{>f4EM1lCHvqU_a3QWq`v0ycgQ>%JZwz4L2`m2eJlM z4px*B%cha7I27NE?NOVP_cP;()$PeivT&9ct6<6oF0-ph62;eJxwasSq&lFOkz4N`|b zcXI6<*>OvdY?$fH%N_2pvKr4JVWKGB2?Sp#lY#VIkF*g)w^w&AG`koezw{&kC34aI z58dcP!GvH(7yzIOWK_BO*E}hl;JBFBK*S(sr4PWdy;<^rI z%RiFg#_aZn3H$F>*WTG8>QJAfa8GGR7VNW9e;k=bq>5k8v z6J88K19_*!N3*jNT{^7Kf4Y!mvaYv_^xyFL`_k~YB?93!g(Zn4*P$crRSA&jd(cDv zZGyVCeI800&+4T3fJ2~;W`BP z2Gi$YxUVQC*bgT{7mW!l`fFk!&;ECM;6HhRi_7m&C;v0&51TM!8lq_7VG)%wS;m^f z$cQYP`J|pPh2lakdFflW*>puL3a1&=Y7STE9S)y6b9Gl4bKkD-`Py-OpW72)EU5&9oznv_ru8 z%Y)%K!Qc*D#;miEJ%=^HoTsH4lzGOjz8EX&>gBqkZwg7#h*Q<~nn`N@N1d4fmblwL zNsadZyw%bg&r&EbYBW&8dx?~o<6lPO>>{1h9(is2CJ0T2E(NM|uvG(BLHqnvYttyC zDvc1jxy*G{AvLu!7Pr6Gw#XXH+Y7gLN8Di?s0v+b2bIQF_(2nVGDQ@Fe?)y`@#qTK z`5STw>v&3UW@%ld39+=&O8(TWCN!gW*>Q3u>gnY{>C*qetd;$21Q6Hp-#w^)XKVl1 zRsY#NsN&E-9Um%v}^ z%;-xfPUr43KCH*W+_dkGmgL+p4A8=leHR8{iG_LL@Q#$HD-V&qGuyUhYB^J)(Mzs=&fCH{3Cxb)R^tgp?`M`c*(}a zWd};6R?IG26m|^j011a5h7*VJsF+GS{8U4_;0KU{Bwtw`i12fDfl!Q{qZHO;dBktc ze3qtL*o!Rh)^j#Tc4xU-4kXGsrc|x0fYIo<+vx3gh7d7D_Ct0GiBYp?>@J|mbO|x> zdbZW#6M)O*nMix_#($qwpnt4(aOq2>120qhb5ivPjTuEgJ2cYB2xX_JF|tb-PNHr5 zu3S7_L&xey$8?)&|Eet3Mdxm2iqBCsZ>_F2ILX)GCt3egJ+KzqeIVx##NYn73wt&D z-7af?5}A8&brp63g&Tqv`%$^NK!h_v1_9IbZP=f8QLTT&-~Ye3ROU$g2Xg4b`xV;y zS~5*AZnx0jJRS^INQoRxzfhl=gqlc49Y!9Yb<=oBk0w_+NS>A-dXfFa!VlwPU4G)v zofVnU3S&C8aRKjZm;~txyU(3aUw6r{ymdUi{C?Oji1gd!Cp~FB(1|cUfm(JVxHeU@ zkcVSPWV^xR?1l=|Tt z*tzw8{>tcozwk-ov%+mkR|^zGCIINYZ59g$6!U$q7={vKJGo11i1}hLHEu_#Z+na8 z7FT>grzkN=`in3Aq?q##AozDF<_3g)2|+J!6URN*mz1QO%RW82N3AdTF}lw-SBmYi z4pUVSt6q?@U6RwbwjfSnnEeft?9I%Qr9|3v^^~?%TUK$7in!~pvlVds;Lu`MC z^|?8K8vcL*K?4O6@QxV(II+!vCiy^td=MYP8^lON6eSAo)oo3_jrroRTOzTGi5(2x z6*SP5dyK+scL7&g0@U^E>mB6vQm9-_XWR=u{Hhr6v+j*E1KO|C0D`MHn3_{`U&pguTP z_WKTySKsHV2k1TP%QVfc?Ei;7bn<`pHgWn_S`F*|lr!HAfwmU`4Dz0-2F~Z7e3h`T zXaZJxdnbQ%a4Q))!ZSbHC6ydU%)?noH{eb)Xwq9@@XK95w<~I#c6Ad1ZIe_18M&Ng_bu z5`&rnB=h?dgGk`>PeCvc9JhVX1hkUFAk`OW1uO)){#!5Y$`${u$$bN=*Ow&cRc4^*n*AJqozs?}tn97!m%b?px4xmK0rS;HP| zm&mEKilXWCPXgC0O!UOnjP=`MRrS7~5C&Yw1ABfmvi|@2_kS%<`)6|P56rq>`~JPc z<=3d$mU?xO=6M53 zj6{I(nl{L_{6;OFfq(+;F`ObTN0Ly9p}>e;VDT*tdPEgzTUU1LxrVBUOIvQX-voBA zo)^`J1N7}K+L--wvLP^crn_7`kMSKRz|pTh6?G~PX&04c%z7zFmU)>ce|Q1o2r};b z@kFcCYl{|5J+tvSVn`Pm8J_Mp70$bcvs4Xy>35;e4G#7d6X;~1;TNH5thKqqM+yT!3co`mGaml3K?%XY-q8UR$ zRMVFiL3;o|9`KIV4E}>Vg)Uz80wq}BNK3FCrM+>4{cL(|2564j;OxJ>H}r3AJ9j=B zI6T%L(c8N}H@+D-K#)zTqaYiEqNJcGaX>5Z-*2rC^_#Z`KY!8SH%~Vbm16~=GJxf- z7i?(oNPl~8nZLPduo}V!@}_7ga9k1CbgAcBP(Pr60t244=J+bFlt;p}%J6UyaYN8T!lQ{@Zf$%jEuAL;vas{aQnRIYqy`++SYq zudoFKoxj4CUvcPPgYRE)=>J4uAyM-pSqxRDg3GtD0GYDfW$3lyl*LwB=g6$vnGx^V zG-uw<13}AxLJ<19l23afLQ}Gq!AQ ziHsk-lxjMtE#U%l54ptne!i$9``IX4qUT2gVMZUdA397=^yq>Z^!&@}x<+6V$xX5P zr-wM3Ftars~sUXX9rTIWz=I04~2B9O+ccA zm(#7+m=G733cGhXiuNZ6g~Tr;4YG#+yTN+Yp-O;V?x&U9b4+=tJU*11tdZdyuN3sP zZb6~j@T4iy{+Wn&{hQlKDYMDWIWAl5-!1LWBy(@ED^Dk>TxeSe4B0QTc$z!=Rpt1U z-!cz=k$AUTk!mw37xfZ9ll1bAySuSXhnHy2UTNQE7L8@PyCc3KZOnV09)cLY`fL`4 z<*Jk?WYwfN$5is?0G1KS(|jG-8c4izxnEeVq(#Ki*t7_YQxWtzpABF~Km9{Z>!XjKi2SFRnb? zLib6{hVT~G&R;9U@-W=~S;lykuzMH|S3mfTZlAytC9I1Wnb&LAqS)fK`}U@?yvFH> z3+!=yL^?{=r)W?!i(eRDBKb{1NrrAd^8>>R@FH95?YoCZc{rOM5;{*G3)YhSC(~5_ zwW%JaX}M()04Hk@c&HVq`6S7WwsqLB0nOZRQjY8k$_k`G#r8Iu4}}FBi$X8TG_t-1 zx+Yr)$~2vTC`1j&SCT|FYW_Y%|78u%|L5`ozn_}_x%*~#PTIwjdmFLD{S}~4PR@Sn zKd~raEpU)6Nx*@I$B{6gex#)yLSx$gsR%%fn5+F4P2@4{)=$A=zKfD9$f}ku$Rp(# zlY~a~ETYpUg)vtqBitZf?0Vbt{TG5R7-|g2EjfTEGe(j{GWkQBw59PAgycwy-mBq- zk48CNN6Uh3&x)}(vn1Ld?l-#DEcFER9Mq$I!+sz>CtD+ql;iJ>P4tu)%5q-lxu<+) za$tr*?n_2&OUueb-%;K@mo4VtVU;K{7}2$WR*!$N^>CWd^}ub+ypNUd9>3~Ti}aJx zI&Q;{6)Yd{ER>w!yY$QBrC16lYfUV^%!&|Ei#;&*LrAShK1e9>wUOq0_Ph7v0(asY z#aNTfB-lPIkF0yZ?*m4&kALB$XB6z_q6X~Ik>j5;NvXPh$9rBSH(X7yv4>l~GZ4L` z-uA8ot=Nct@!ix&ro2)Y$3x)`#9^XW+}$YmaaEUU;>)4RG;a0S6jkxUq!3&7&<6Jq zuMscuT-|rc?n*BlS;nt^I6u8C(?09DJLH~Qp)X1urEv5l{OhD>T9HaiI1}ulP`(~g+ zX<(p~aqjAg_X-6A1jBdrTFfnTUkB%4%X=pj&MhPrCcA$Z(og#1@HtG%Q$atE!j2*mC&O@Rx}9j0`2D7i3Qa zD@h}7)I~^=l<<0Jq4bOSWZM_xAjO0HE=C+F3H|dHg|p!jztldZtj7R6^EAD3G|`qw4NY*@@SCk4pHv z%Vv#vALG#9d~oXM`5gX`WyWhM^?2uHX6JXb)*V!n-8qQt7(#}M4Rqu8Toox(Fy=_} zUeA?G^KKTHtY)S@z}^IMBK-rJ*9luwp%^~fv4szoLoS>xiyngPf-e$nw5^%BRVO>Y z9{vDnJIyo#KZx=~2SX2+G;~uyRl9M5d0dY~{-#7$Z%pUh{;@`YAYA}UjVpzWsKI^*3&XzQ`hCJ&$9ywFgn z1>BSnb4uVqwGyqP$I?H3&E3En{{|3E4J*^a@8=_m{aKawS?M^=iAL6nCS^bhG_9r% zu+{_|7$}?6y3u^5d)dNga5%QEmd#=8*#nYaVfrjMV-l_7!tJgsl}3KuJ298bU6MNv zp6=p^WOED*Z@x{!60<%JqCC1t6522D#|3ZBoyRkUJIUKVy4n^I%_%e1YU|M1s{F&| zjxEE3&$Z-mITC13um}xHIPDo$ezr9FrAd7;*3Cmc;jVx&CXuKtV(B((Cv&|puX1TqtKBqO*}^m`!}26M{Iv2ebDh#bCEr+7e?vlw;@~&1Ru}KE z|9azJ=stEe7`GOdKo7Ep_so_jd`TKwo?R=*XL~r)$IXTrV;1gI-b+21_lzR)QaA5~ zy%vwDiDi)kva~2_O`myPzCx5keM6BxAFkmK_|rUOO@#WOc=u*uRJnhjuy3^G2H7}w zGqw+xnEw3)*CGE}t=u^nNt&q{&Q5ZzLmna-;Vvm!*eki1z7(tYU@XltFW}pz!)Imp z?c~PktfL))JT+L**0dHKHhxfXnWq8C-c#u{DZ;FtRPwHNwz&5sKP3az-?YDuj-Jjj zzA=h!%j_XWh$ckw>-F%g+&kF;Yj|&sO;#WNY;^;++*G3;n$FCt73;UTEl3WcbzVf) z+iT#Rn^7Ty5S*b{Ur(L;@#Dw8vtmw+?&U92JY~T3a!O4===8PB4bbB@2~_g4fq0uW z5HRhd#3>>$eEhbMbH@xdwQH@+VHE{AKUeCX!APnl8&c#+Or8SiFhW5xuGPs*SxRkM z!LIGONPtb#5ewTK{zSu;CT~Ds*oSgm4IMA$&XP@?kQ4Z}7?5`Ei<{7id|9i@d%hHz zMy_PuX_pWg4~3_}@n(>rkP+fnLRTzJ4dL*F^seuAp<4=~D{|E{MJZlaGK8g(>uw!~ zQr#L8tIzbtVh?C_J^@7JB;V|}MU!D~1s*H~#OP^x)L`jCzCq`$FZ#|{d zq){w_t zrZW-`-itH??#@BfwDYd~h<$)3EyBwlJg2>9N$5#fBw#o5&^mxSh5f+U*lW|3*O}*A zZsIjv6Mrq_BHP`zR!k~8p{9kY`@0x0j8Bjq-?{pnimFIiS!RkJEjA_}E3lDqF_VTz zeMHjtiwZSNR^{Rb8{td{yRRdbc#EiFESF0*?Q-%FyPosM5DZ0iX_?nTOM+E}r&0)> z5#xB%u3+sw_}GHTafo8!p$-8l!KR%2dSYsBa?_DB9b3$5sxcCR!8L`CXvh2&0O`Du zd>^4+MY=CD@B%;8e1=s^SJ!3wx%nyedIgV!K-DvZ^AC%H8j~fno$$u z;lpE1v6HKj<@T7|5ihUnZXdpP(RV7hB>kA-f8zU|NdQFB5b#VysV|kxhQ^7GeVcL+ zyOM~gl95lvCh9B74M`oxD%vHt*oq8Cg!RoXL@X`MhyMxxE1cK%oh!fLaU0sK7C>ufHA||%tB!Tn~c4b!vqH7{t~rQ zGxO{BlakFH3cfjC_NCAlIu>?9NBls!jzjjmwLuKb8O3q{(p(ylc6Zce$NqKcg;wsK zvtPFha@dN>^$n6*_B*Re?+qHl-<@hqhO<$-4lG=14MGI0o~Na|QataQ!(kgm5)7MbAasQ^ zt&*gg!wM+gWLJb5e(d3s`k*|2Pm|u|=}$L`q~jMX;&SfNAJw^@w#Vz+;8z&_9BNqV zva1$~7(iHxb)AATwrg4=ROG7l%c9K_MtEAU-V#;40NE*}*ZTtG8<$1kIa`q0+xF<>kouSCS6q^C8>uRLEB4H(@OB^T<(rc<~V>Bxs-reqb zy6&*8z40aXIBz)ntGT#nvK(=i`e~WsMOG&9*YB2rrECFiF88G2vC?AqtzS18kc|U`|#YQs>B;%9K4BaG0k)g zWt)XOz_kc@itO+WsGpBVT>Rjg*kmuGO*GVASvhWb-WjdG*laH~gC@zOSLHW4;FIfT zcU&R+Z}X^p{Qj)3*i#dECR0@)K1w>wfNylntGRca6?PLo1e=7{Kg+;dgdz@1b}%nr zk6q0-*GO&TZQD+m>x#Y45zF*=(?P?}Hd@w!zR{gCt*l%fz9A~2drq_I{0Q(-+LWsW zk-%|i?f9M_T`DofS)|lS^6L2Ql(|T!`375SIVo!1kup8kAI#l1$Huj}wA>0_fh{6VEu9x1`5oXq_w zbI2qcZoBO+p1^467)Q*I{&q#2g;{li6CQ^-qRrs~Es~>fp>c=1>fa_C;r~JxDZ6w$ zNXgMfyNhef#O2wk z@Ja57DPuKFzSm<~i7SOcYy-$o))P>kV;!Q?t`|RS)B#6i!Z};YaFg8g!vNxj{yM(VXOvvACv; z!AZzk^LpI7S{B;%>C;fSH499e(Ubb8dI20JYwu~ZX zkeqO+m~!X37iJCPHwnboBFg0k-()dg4zh}`CW#zsxzyim>N*W4iTE|YFq>qmGvh$O z1`1Z>3N)dbkgSe7#>+)%s)o88rXv0rx+#_gCI*HsniinZI94A4if~#m;IMa`hUWCo zAZ4l9)x0Fm$=N7;y)8lI8^Sw(ze#u^ku^tiRcT0sx^ULP_QU(_XAT0BRpwYJFjcmV z(Y0nSx$(P+tONs^%vx)>zLya{NjO%qMBn?2dHuOO@Rhy5(`S%qc~p zP5*p$4fnl~m^8K{+ox#EWn_QEDUudm#kRXzhH|#5gdji@Nnab2%a_|^QqFh39ep;g zQJ4X7m-3nMu2+lr(&~uAL}75H3;xuk-=mV%{kL|o(TbRTw=viHSEYN5Q{j>W_iwC? z#uN60aEaKR#)!LA~; zFdj_^L2~QTN6^062g*6-5|bNv5S^pzGcfZpc2ZfxJ47Kc_h1IDT8iZS);|;{RD|C$ zgW(_l7%UeUKJdBsiTJl`f!=$zjla5?D?bjsRTzu91|^BBM8h0`77^_jDCyI*czYY^ zi#eW{HGJ!>`SGwB0~hgicHg+^x9omw@qUO*VJPA!>~71{AcsbRxGUFYK3$3+R*gn! z?wQrNFA_JDDc#0DeJS|ddjPtKi!Z;#-#D5umFhrSe$kGS$o2o4ujF4_@}tx0#(TWS zsqBPJgOYBz_H9^Ma&77%L?MUmc--*F`A^WDBN7GTK&~d_DFW2R*h%ROZW zBgKZWgyCUT>GP#;L9uT7{iK8Z{cv$q%Z}$i6jc2LIv5}a$GK_R^T|6*^<5D?Lqp?r z#9lj_*U5KZsY;@P9zND)<`a3`aAb#-s6t}OLokt0**5+m{`{8x9$|f@107W|eZp5v z#v8_rL>`|8ojBccG-bJc7K+~-FRm^Z@{wxhkIlLo?ICZvGGM)vP6Z-D(LucbCZs^W z&H5*iyLJ z(YkQE#Y6wEwpk=Vuv!%fp41RoplfP82!_L-G~Xh21zO@=CdbK)b^xVFE(Gq+9|Zaf z`$@XgHY_Q58Y$k4T0056t_QTA0&qbuxAFg?ni=%#e#>5DZ+`=#78c);6;&+gYMo#o zJ!yF`{(T&8WYfV_`N4!)Hiw?-sQ1{iercIPcQLDF4+pDPiR$Vn_gV%X(gC=L`|1#r zDYcNsUHdBN!?->{KjD)?N(IVq#>N~d1dM{KTLB6tRCiy=p0MZ_@Y z2{44i3JFqm!{;A4T$ibeNqHLO_TZrl;{du+FO2t;e&fFWx;q&R%b4MhL|xK#0=Y)} zXljG?K1>$rf%NMX2$!`@^Wn{p8M1HQ)W|;Xyx7Wk%7JMX3I%WQkfvuI{1%K0A+8y3 z@^ZfS+k$e&bROsV<3 z;?x0&GAlW8AP<;~o zP+I_%!w>pLn=zBZ=a#nAJwCEub1%r>*k5o%&Hl6;OOA->K2e4kXhs3n4LbtCEgkZA zZh(GtZ;sM5Vv#nm>v3Dj`tVuJi9@~&!Yj^V$RQ!{;vMf^x^*<^#9b&Ks`hj>lfU!1 z#({T7d!`((GKv`I7!@lkTn^)ZY`{{pp|miLx9(tBBNGGhzFCYG9K+xI;JvU~s^+K^5+X!Y{LfDBQr?^<%{3ro#Y5{!p|zT;IYwEZr5l##xBW1Y(3`VpBcuvW zZA9xpu~A^M6_Z_&w3GPRSk64ppqxc@1p7XRFuRUh`;3-t{pzZ0h^64*pm}ru1J#Gi z0Q?kzw>9+nyup|>Go`>O!uv(C$AHh_s8bUAdO}ICh_|-%JhTLOgLh?E>r%4np`=UL zgZWDPS5`fg-uTZc1kcYZ7|*mzTyd5*o%bxfMOVCxf1a!)A zp$YdYMJaCQ`vg|pddYtOqNZn*%(RE!r_R+0O+Q#6>|nLfJnVqBfPY}EwXNi^c5IgJ z+lCLq)JX&0Go9+(ZXC^@ZVZrhylw-ldBxhmEw>6SNJbRHvekNY&G`(u8>^Hk_8IVYVc7GACY^6d_3*rCRalf0bu7BD@Yi!WcqSIa#PotW}}j&2C4MHTUwRyZWM6XVD@ zo9QNP$HSCbR3 zqaALE?X`FIk$%_694iv@K)~5$u?S!tA$14^LPW!OxHer09u=n4b$HCimD5=Bh|mkA zsfKX2eNIu9*>%9J!qB45Q5L|LCs8e#_+%Qk1WM)Ei;P96l%zD{ z#>18it^-K~LCFEN>xt*ePloASWqP8+D?C_>#aniTRV$KS;5@_S{Wa>2Y&{bDSc!Fg z`gNnV(3-Awzxkw?d&b0+)}31n8%%ZBHR)S_q1&8;(1NARKO$5(YjI)Pq@&prEF zfAAKyj=+oh*D%XNlLeB>fiOe%EW-u}oiZ6>rl?)-W9P0W4iD^EGX zz6IBOC~HgSBlVO0XkU)l3ULo=^O_MfOiP|Lx@K2nl=Ho2Byp$HJI^Oh`8u0ZCgN)7 zpzk&bg*6}pB04UFbu~Lg5&w!1aTSkH3=NoT_<9>6pTzIW9(5LZf!<#KG_%|Fa8`~} z>*LYd(zG#HiD{G<&IF2kK)U|=`$)0H;M%b9n=jo(!}T2%K?^74Ba@NLnxV}A1$vKN zGI&jEu4e0F9BeK0AGG;)&+e*{6vr#>wTTC2l8-*co5_x*(Y?!n3IGXeCdx5vDH?!% z+_wR|$wj%EWHW#~3U%(Yw~AV%a5Sf@(+_4)Zx*JZJkUejF8O$CFnun*j$U9iw4AvuW++v}bRwE}9awYT_6ipT>V5i)(47dohI_ zRoWUu8o6NUL2t})lK5_uX0AolehL8SII9H~h0Loe4{7zq*NGli^go8ua*ou5VX>h9kgHGZaX3|ykO`ke@aU7Y0HiM zs$*$__1YkL#fT)Gn!-LzmnFSVm&YS3aL?vKn)qE~ZO*6nNWF@;^^jN0uBd6z^u4|a zKMTAE9oUU7lIwUxxn;Mj&n7gE(5-cm z?t+%vJ4cYhsEH=9JU>~CBLKwSfOwfCGbwloB>B6dG&OQY4P~X>mF)aXqgIqv$=UAo z*DDy%Z(sm&EEQiy;k^w@@mSD2##zv{Ii>2U;iG&PiwANxi-WImX2wit>gN;NUt z-@QgckxwD+PzzDFA#9YJq*dG^jKLqYuQ^}t9HD(R-Z9)k`hud^&{IBU7a?2jrmqjq zEzyktn=tHB#zA4ki|w@34L~^ei^d4CR^@$pxZbyayv)00l_5p7$hBIN5rn&Q8=iODU$xsV%7{@d6;|&RjZHC!*wt;*)yk3 zY}K<$J^WNnwsmJ7FUBY~N-^Jja>7ZE<*+fL!C7g?9lux;U{<$E?;luqgv*2*{qkFV z)^OBG+t;_aPCcQM{WjY1AW3v9<6!jv0UDR1v9RK|CQAyJXI>%<@CNlTPXD?Q9TSm@ zWf-$=*_CCn^kFlVc9&yk)Uz2G;?n!*cjonw*HQJgFjvrLMTu$*5apm7RU$+mDp2cj zfknZNXr96q3Q*{=cud$mW+&sZnK_N&v5ZvhN{?3T#Gv`1dype1LQl& zQ-zE_#yJpt`6P=5WmnWISMGQ#dz&H=gvT4x{WF%%M-!e4-f z3LOJbT}Y)~0SQ^xt7p?;W73oLskrpSyHO+I_{(Rqukcw;$LwLA7@QxYL;BFbmyG$x zgJFUzh)`HAwR`>*^H^!qen`PRTbQ8P{ZW$cG1 z1&3qX3rPQ_@}kS|b+D(HehLMZt7i6J8bTZ=<>&Y#XBX`Q?@L!|<$RBnEfAcMufw}L z#aX?+s^=(-dG6hMn9%hYH4(Fej8h!WsxymLp9|Z(us>SRhC_;>U%Q(r=?50i^nHGj{Bm6cA@PpX#m?oMcwG_$dS}g?VN=nO|Chi)`0b0*N3g20Xtc?=!10nsfzBvamLXStumE96eTi~|4yV@L1pQ>sb$d;#=JtTTI<<CZTLDDK|;vQbp;YBbSQKZtbay0*Bs4MJJ0iH1w+gcIx6B-h`^^qM^>9(J$#(N3M~LV#2OZ~4Y#CSP$}?IrrKTyeEtlaJGa1~lf(i4!_x zNK~c4ZPa{4tpxR$^;8LHgyy~(6FyasJyiFlvMD(r@T|@xt}q&K)g%B9FQF)Qo}?FVlB+2H$H8 zQX$JBzebUD{e>1UCz~8G&CMDON?QA_BF8Czq3ws6s&G0)IJ9*FqyLZFW8}}NPTuLw zm3Sjj@~#&-r=)(j*5O3=!;yyx-4(-QKecEH=$C&}7=#@~xj?s##v`%Iz^6K2WbxMG zIjsk_mVjkPjg--z-vCXnzWBFOcao`r=s6Lh5b$#2?xA+ba@6C1d2dk*efuYo*1w6h z{`&lTqtE}{GZN@(0Kz86(`7)!r#DWE3fO{@{?H=?zZwOkL`*cnI(6+4mddfY0>c}^ zww19Q02mknOt{y+S9Y-bQJ=xe!a+edkXC8>yeK^E3;8kV)>91A{@ZK*SN()Z0)a<$ zu+hSzsh9s7@qMX3`T_y@T=XDc!hQ*4!y92lwK3HEf!H7*=ll-rmb79(pz%(Q*sV5U-Lp(;VvvD9-TyzBT9<9r{J#PAhBHdgaF{Xu$Q_{Op zq#0z@#Y}4+}K79c!mDkH$xYP(A0fbfIAcqq}A4VXnYmK9zg2y15JsG;WRY^ z$|=9)H-Di!vr03&g{9oZ+Gl4j_(OO0Wr23^!aGFZF6tzr@Q&Co|Lm`p*hy#lYk&Iv z+Eveh43HC?!Y8*5Odl>4>n^v$gIWvJ`wQLFQ&hbQau~h=5 z2E6d{#Z671(|ZY&NE`|Zlm|uu5JDB;v&b``9QOAJ1Sm`T11@8+*v}PY#sD;w`+)E= zBh>y2>W4xhjV*Z_^8}DHe+Rq3evfY??riEoF9D{=S0xyQo@wwSbcZb$v>+6tIZw;| z0d+zD9?$_G(*Nm}S^tOL^3?YnvMh2)z4N(W)^~sB&f-z02?PKXQDO8D$SNXouQoW$B71`UFULoGirg}jZ zdOp3ek|*zEnw~V2SBVQQNrxzbM7`r`V507ofgiJ>d^vdn^5N zNGeVCYP2T8@zj~1f_CyT8cOa<04H~@87*s@MmGtIV~gwD1HXiXV@Qrd_Ti*89;5f zH?R+(C1rIew6$~8CS;F)A=$U9{z9|YyH@H8KTo3Ly~HPzx1Lm=Rlo;hl1H6!T|>qRzva0BgKJez^Aptt(@|k*UGufwh+p-oG!M6Xiu6Tz&nZ+0_7-G+ zS+wTfe3a)c=KTt&=00Lq?e3Y%%CZpbHq<1Px47*GdqUwc7k?3 zYEom$A!FVvDDy&v&}Ct+Mm+>vat&m+%{DrPCOmPHS6mO3{ScSuIZJQ`n zCr`SnF2An0Vz)ZumUP z+us|VSkelfF2WgzKCp{1e@zfAQg7wYcJfhUgPehQ!KHWEKNLH>SuOkkMF~ZyuMNnG z^-<|>Q05*s+xh09nH4lmeFc5`?vVaeTCmRH6)acvv0Iou)rS|;W&839_;v8h$r!Qp zL=HYiqhJovn|c%1=nMreFuCX<9u=Z$FKV;=u-}ItsD|73-IGBM6)acEWmz?Py=&xs z^7f`)a(m2^H#IH_mmL}G#+O>j9u#G2uH3z#Q+4QfF80Db37zF%Hy0htD%d^tJ6$oj z=frl!lwZ=ykN%S*Lj|D=Xl_8l#}8GWm_V^Fjt_m8AQ(q(1VV~Njfrn%4?*Nb9L=6R zSpP8i%3>~J88DSjFi{g>i<_{Ge5oZA8e0qrh26wPlp#+NN;3w^5$BJE42&|BS3L{4 z%6lm8b0_n3Fr8R*up!+>v{Wo9h@jlt3_l>P>FM`+!Tvzc%JPBjGQ!F5?}32`ZjLV; z7M8U-*jcw=`BFeA(RV0&iI(U$9*oF@%L!jwFiSg`81i9K?_k zh$|$k2}}%4gXA?C*uHw;J4xl73EX@@X@4d+hyJ2lyMUOZi{n#j!O25<@mGR7_np>` z_`lF_`T}~rBRHvf)q=>c>Gb`SOB1oCLFQUQoXRiX z@cP1s@iSK@#d9OaT^Mt(P2jz@$EF}7E3T^}Q#CZ@vTkh&OVW`pgLqcV-u7D#6_rnF z{X;H);M~3|ai6`6yKCjy=Ec*{g2V??t3tQnY?}PJ{<#f<<3r{966rrzR;)QDx5eCZ zkIOxE??*<;9C9DRxtFHpzX!_g0&3)O?N-i7qn4!j4tI~fm$f&dSN5bgYYsAz15vG@ zR1hQ+MX@^^Uzu_cGNd;+bgVXGucp()m>Jo*J|H=jXgvP0o$2{Z8v)iTnH!_&p>{FN z@s3VGK{d$8gxI25qbLfXH#CO!LUwWC9i2qsLFy~4TVXi-r)3zXWjiSvWgW% zydI&xB^o||ezrn^?o3M?vxw-SQ=w>WyfcLhi!+NvSY!c7odB~KZ9cmNk2p)SsUQI> zo6(OB^o~ZNEc>S0wfp(|=Jo|^e|S~tKun>&tCS;4{HPWhsC0VmS{5KwgaU1n-n`af z>$d7B3y4abF{qbpsc*NK6z@Fv^?NH_{}|Os;r+AquIk*8VUoxQ(wVu%Q4fw6#1LN4-YMrUGz`5 zWGC6b(emnExNd0mCDELcefLGghl903qPOS3ge>!{!$hK>lO#Qy-I=WhYqCsjf_c#c z(^Srym>%)8PS&&&_4;{P3_9B#& z%e;toYD&xxZ_b*uWO?%aG!*^eYM1+R~Of_t%I{25To(lGZ2eU{hk~ zy4+arcEa*zT%Y>}=7Xu_AFV(R+!A7G0f8M?0$~t zdGF^i~5BdN#V)&P%QET2R3166dZz^aB>ryz^_ui828utnA4)$~6 zg|*we9lm54qqez+y2plhn${I~LxoOTX2_|U^Lk`Vf1T0FcB5J>!j?-w^&E&p%d3hOmlU zwEPy*Anq>V)5|E82-(j`+=fmmlVS*)8$E>2i#@uhs>T#{9?RdXb-(+tI5S{ajVCtq zg2O%PTtO(d6HY$0*@lMbKJh+GWKL2I@7GXxIUJ$tEILX$BE!t;*KL{N81TBcqRSKl zm=nGRp_>~+6zfn%QUz$!SUVryU%nsl>L@+;)rEJnw$;(sJ{jB=wvqLAy?w1VF5m_9 zRuQ{7&aEqrnJP>eZ8=87JiHU3`NHr#?K#P;>1!b`k842#u&zWi7@ft0qPE`HLvpR} zl0E!0z&dg{W%;d$2&y87H%*MvfO&-S$cM8<06z^FVFylUCbX7^vcs`n4kN0#$I7Nnf zmK;N2MSd<+tR+7w>sc(|KK}%><7Lj$Jdx|+a}IjIK?D_ZIxSek@Vrk?ye%tvx13mZ#Y8en@MKG1Je{S1P(eC0ssca& z5yqobEp9xzQw=F*{jMHT@agAmwgazo;HNa0lcS{>@iMZXsOpppKvV<46a-Jy=CP@hX0%$^=}v{3gm>=`PY2kBbuc3G?NIo=(-=Jd6?t+8p2*xfaaQ`i zHs5LJRA;on#Ah`pRBn9$-?t_ms2zmn|Gar~VWLNe5qN=z@GU1Drag6!5;oFj90p;D z&rM~MG!}AC>@p>fV;z{OBdode&D9zN7e(@W{(lQN1gn>M5x|YkYBv2bid8j(oN)b*3%p5 zJkjTLW1qLUkb$qm#74Fx_Vl0$58Cr~W3-)?-U_KFz2*BH9#iRXP*%JGSDs!H!*K+X z?DnJaZlvqB|V~px#KZ<;5y#%9j4brbN5K;8j{T_OOw>0Ap zl(bM!eilSgCBK9};BqorJaPQ6O84}=@f8DZ*5EaXuO>{!cMg3G@y38OO|${klN?Ws zU$9P&PYs&hlxtyK& zpvbaI&s*4QTIxC_m~!8Q~itl9dLl??u=w^S6-im7p|>D zO6d_?XG^pjJspfQc+%Ujo-ab(w1RIO>MxS(7^9j|M3IwprAidf8m#6*j@YR)PjWk^ zbW4KVTq*>rIw!`@c#dE?H?`dfuNmx!3^yb`CC-In+B5CD_tP09 zTIwFdbGD=*{MWeOoLN)jQZn(a`>!-o6MwukwauH-Sbl%_l11M#XTHdx8vMq=%fmD!fi>FHpdTw=GUw*hu`si<{NW zt3I0h!ywXG+KffiCsJ?HFxut>`;J)_+Z`-ac(9IKw$}4=Iy<3nIuecEiMMIn_S2g^ zvS4mNu|a+V!b(_|w0Ab4QBGbhC$ai+LSh-qHA7vWB_B622ywB*^ecYmiM<8UO?!?# zD*@(Lqvxz(Q++cLyRnJeWFgyGLp%opHkM5UWJT(}6mzlrucUV9tmEgRe?x?tC5a&E z$S0;qa4Mk{7k`T=ZwNN^mA>L6Xiatuff$Lum*6mF4l;;npj<=Fa3DXS_yLbpezo0y zU2RrAo6FJ8(ZTj*VcfzA`QDp;3;H>lu0KYDFd9T=DsN0%E<$(*6W_u>$Oiba@}H_U zD*b4Vp3u{H4(&I>1BW?#hr46rKLsCZYn#&NfHsz4GVh>#W%CuL^{`-h@%jgLYs5K>f6T2oJl!c=iPjIjoKFpY2h z>_W9^K4~l^&jYXh;AU2DdP{HAqjaObR`x)I&3`CGGfHM3OIXex*BCNYZfL!7HZz=M zZ?F?fu<)JiX}|Y<*cU0|aqXK3a{E$!1GauC9gaF^&hLZtVU~(P=m2lX9eH{k$aHEg zK$$$uPuHPw)6;RIKgAK)qh$#>2foELmr(;Ep1KL&-}eHp?}ixxwE*u(QEp#s6%!d> zNgTV?GFs~`&)Ss#-G!bEmdqk+;=$ZG>RrFM} zEAM_*&T1bwvSIqlJ)>?zA{~?YIp&zO+;lXpbpFjZ#5ICXOc;*Uc!zcm*FNibe)#x< z=;h*TGV-?Xt>u`dO0SJ$6y(zVSV$MX1p9ba+QCucWd)TMnb!OLpW7vaGQFRe7LZU)Q3Fg?%>`!V(*nj5jEK&piXr) z;uQHeM72|lfmngYu{Pbcm8`aq;AeT@qMj}DCS&c~Ji*Uv1^zXcex!rY-;QR|hF64w zp5)`YibN#VgZ)(?trD8goA^;|sV-G{E9Eu$_OZ-8&DJa0#dH>B3RkGxJnH@iU7ZR` zpfbM>wOg6M=j2_F(;#fi`8#x)`j}Xma2h+rv9TW-wqg7w_8yyv5~M;2rus=X#EAsp z+OqxF)=U&sD48135R?^St9}|7d+2@IWc(r(C5UnzWQ{NzV=3v(bI4qK{w( zOu_xof@l((rjGfl`9>fi&zvm1iRWKLCByle`#0IYkF@7ZG3Tw!8b#udmTC4z^EiGg z7M?PYI{uUc+N=$b_)LPdr_CO3rhnwQby9en-e(_AMtuD?dHlA9#fmwL^lF2Kh+BE2 z=s$$K)}Ii(0L~6O&>|iRv-q;1&G|A^|7zZhkw^LWI@~F|i<1T6Fc%x4t%V@rBEq>H zPvwgF!`zc1vf_+@YcimR$#2h`>3?}d;v4C9eFH^jMmiuMc2?bznQ`}ln|~|w58eMh z_`<&hq%@FHqCqE$PZMM~QYq7B~Jgs`tNS2}f&D#n+3D@FVN{L=Su(80cQ^Sx&$RPx+K(Tf^A9?H{0Z@?q0$-NdkTWunGD1^D&L#VMspx>6V*5y z`^s)@-(bgXlvF={_3gV)@f!lJt{Nw>*PB5a`b2jc5C-%E$o&IY4%GZhbk!JkqX~yT z@|Emn)tt33UbnkDr5h8X)u)o5K?-WTa9g749YvE3d5h8A0xArqqf5i6z@{17getJb?)9@U^u=a-*(TWu zlQUh6aYl&dQi%YD#nOvAfC5@E(h5kfdN^O6DHD`)v3rP6>KhL>|K4q*qfI7CGF+TV zOg9*gU-nOz{e-$TmyBl9RgX9|8ian(a@3cRe0$I)uia6xrpZxTwKq;sOpmv^io?pL z8y9f#X);4PH1c^fD{c^9GVdFMfKmAg@s1<4KO&59UDaypIaXgjJi5KIV(R|j?eH6r zWC*Mk0ClvU0_sh$pS_MaOxDNQ)*LTen^kG4_s)eb*t=KUh^;JWxtrOYY9zQd870M> zehxx8lt|c|Yy$HAHz`+tSnrA*9eWvbW#l>cocn(B`Pr>0CI1zrI9HcnF1HzvaUJ1Z zLU3c}%i#>V!sOI-qxzs0!{ zTy1;@cB-_a{$A<*+Eaz$TuG57o}3STPqWHfN8CuizQy>;Rnq2wpV!y2e^X4ajPO`A zg95I65S)vgd#!$`=Cl&Ge@Nur>8ld3R}%9~f)eQviTenF&|EsJIDz~8sB2wS4H5P5 znUO1ho=+~YFAu}68SfGng7r*`L)3iE4-1-yav8lgcfB80I>2D4OB`R|Wu%qX3mXZ-m` zp|;F!ZJcjnT>Z@Kq(f)5UZC%>Oikw-ERF9L9)U&)(MQ92dJt3g3)qcs&)Qy5&k>V2naA@KpW02^gVhgtilX#_8ZY+Utq=gL&o;(~|RSaQyw= z%p|1KC&x7O=WWI1cd#}3uBawPnh-+eW1g2vq*}121++*JW|Xiwc@>qVopn#-czhJi z#LF~4ubhK0OIQyy7C!TOgKG1UQ+Vrh^FvibVV}Fa_Y#+poc98`??1*0LB6*@=+eIy zYLLeW)9rK^QL1f_&1g}r^26ChYucG%M9N zVirbD@QNg)b@MG5_`6}c=UleKW}f>M^`tcK^}}W%d_abwme;ABK5@M|@n_$PaFea! z_tofg-xo*E%qLdr%5L$nk{_ z0)`5qXX6RTfn1=0y#IPHFs|#K6f`cEnx(*x`YN^!blg5bA{0P0=1O*6NHUMw$EM~Q z)n;`~^pwT=KgMi;O8-hIl`3`_WldB8kCCy=cIj)@7pz} zlV-Xgi1w~6AT44jjnvOb9#G@H5A=t~xlr{92(ctOx z$`i}$sj{n7155x|fVKIB@vrsFl zZD{zybVKH84GvMs_e_=!YU+N_$7>;?l@GK)E*eIC0V9F>EN1jKWVq%Hs&;T=4>vf8 z!Ew`vQPs43y|N1fQppjgeO^qh@pb;pL`nLm`#(9b#V+aiwxW~q4Y=_iLN~7v+%{rW zbI|^h?}%az9>p^?4tx(@Ot3vzJ*Ga{sgPX+`8*gVZ!YmqlvK$B0UZJ51+NBNX5kEdm$T-}0d21?h` zQwF@`jkhK1sNc=BK{9IRt=1UA7-?X`h>QG>dxB|ZQrwGFuqEm_&#^y((YF!Ca!Okrkw)Ikv z@5$c-F!#kd4vQZXE3$4x_Mv!n73jk-G3{$a?yEHu+AzW?xmax6T~WKK!zVwWm-V$4 zOyBL@cHZVadP4g4VY4uhzTGq9@4`^{j)7+)suTs+FZaG#SsOPsC4k2-=!snEd}hJJ z#&ksk!Vtw2hWZEorzJ+_A85YwK#&8or>rAI@8Act{WI26mShTr71ef8R}ch%5IG=V zTENV_A)u0!K{N}v9R`@Bt z(1%Y8Hu|@!kGb}GFPkvrm4(iAs?AmUQhDsVfnE|_Dim>jl**6$pnWm{m&D3jAos$h z-uj>$OP_dX0&|$v!2?x7wUpcRiN<>TampF;%E&@URL~&VnrPo9->``0JSSJ)j5#Z1 ze5d%9U5MM%+nGC}@7CrS*L|%LS5yS7wnkkuVZI) z$tx|F$o$0J9^k`XM#a4%Y~s+ZADqmO{;;k-OCD^0<`zvbl{Fr|NIpF9E-L?#>I7%SO*n{n`vTpW!KsD74mo;o9cod+&^==gic2Fkes z>$aH$CSM2FH}hu#LzCM|0rjeOxq8E#x6s?sja(W_rLUgNbvKNz9|m?hE~Lr#eo# zQ`;xSk86HQuf^UOIPW_NH0q%Q^x`b(=DvcCRJZL9>8Z5r!!Z&Hd-FE6zE zFuj{UaITm62U93N9uF$X{zNx=E6BxsH+5WX9ZW^?(w;{!KtZo0)q(B=m91&={2 zh)>n~!n(a;XdkdkpLGHH;VTz{cEp_#{NF!RH>ugQWMF5@V61RA-^prFfP+Su6ot0$ z+6~bsNeMemTDGUB2bv3mz77T?`&E@`TJ-7|Nv&a`(1hzyfZs6hN?~2tjN5?<*m~LZ zph;oBOtozA6MfMp)8Uf+@A^7hZ<(zq6X%P}Nw<0s2T?izY3>yV)A&HK?@SniC>~tXKM}ux#5dVZy$hbGvRU zNzHE%o6!q9ha3cCS3SBgcxbgRfd+wG(4Hy9%tK6$3?(iuLR(>sUq5_C6B{FCC`J!w zI8M)c6|a;XjWr0<9pbz*Z~1?W#Yu$7)523TcCv}Eo1)Uhsgqncyt;ftcxmRlgS4C9 zh|;?&Md@sav^mZQS7J3OfhmnT_Sr6`*vKNUgclcgJ&$Dl~6D7@l1 zSWpCLV$q}r3j$`4#jR9o`PLgbAtRER>Jc`c^X+(ftrG#pTjS24by+M=r(#5AaY<(`SmK~gy_aF8DlDBV43Lx7S zQw7P#=jV=mAI~9une@m35SPru?X;Kk-7h_+#|pMO(THj9fYI}lc4 zed>{t?+Q;NF4o^-&eyiTpUicRZRn#+^ktSX^z{CuH{f3;!Xz;bkN-!of6Nt6WS_7qi>u-Rcwmb)FpAGh=MZoZ>) zN7hEG{F#a_>gSTzzg7^(MiK^pxyPF%MM+2v?cPFW#R){_m)+&hQJp z+xoIMZDK=xB+pZf^@X^TLA9+IY~0XhAWYrX7#|y<4?Ik}qraN$P2E&3_HtBwxz4#& zwYn^xeT*~N8noFmY&yJyiNSJhJeM1?4{dk9($LRQN4Wnq1lRZ&KL66rx9h8L2l`-( zNGKKr0UYvKeW?=`)v2EcO|uh(H*ar?9CLj86%zN=Iv^{fAJrzb*pM{8*;(@;<90Jb z;F44ZbKh`ep=hU{hLlb$B-CwVf=Jv!P4z5dJ89MnMyV}wMTL4}MBDae_|cykZ{|WTT9B;R zTim%$I_evt8#P@o?ezX|`7i;Jdvmo?od|9xvc5eL+6I4Ee*{IEW1o79oeb$(@)t}J zhp1#$C`AuXsO)eny>P^hZR_Wi zPMK&&%kvG#lOk#uKnx-E#tRVesoaQ;DfGjQ3YATc*{){}YgQrtHGz5o#9~w7H;}%* z<@*N@9Mg4Xb~#*=BjqN%y+ao4VdyBAv`bP*d31#!%RVavLd zh5=-}bt~);z?3Q>M6nx}fIS>Ks|xJMIWcvLG4dmV9qyUJGHp)m&KCbR`I916S^7S1 zf8tm)mszQ7Rzb3$2_#zslLja%^MxoF)=g1$bAw21-!jvct8o&X5bnPOdmL-UWp8pE zspq~3Qx2R6q-%sqfkdhs#bXUy!~Z~AG>FLG+M-aeX7?>F_h9C?A8YM=xmx<(O!(mo z=_yZZylb+Z$7g`mx}_`V=DMBO$+5*o&m%4pEGH&wFWf(G;K!DE{J2uEwy&U(ptz(lT+wn1H1MEb zVo_CnR3rOo*HgrXPslNq$XU< zlzj1Zr>?kU_r<)eK_T+~2nnR6qjN)dan_xpS^yaKK)Czffhn8yvyoZ-JONcxbv@b# z+TNBpzBcI)=U~u)?B|ofla3?|;`m$df+TYaCgR9~sFqbU?HB6-w(3jbIVul~mYYn_ zs`;L;e6w_TCV1!5rqS)zv(S+pOf2>QQkq=aZe`W)<{_W4kZq=NE~&;aQ@cpLzk0db z_)g)k6#$w2rn?HZL{ozPqEz$_>!vW7+i>)A=__~t$raq%XSW!(YdO=D976{CgnALC zlOKeQz;}2;HpLk}@hG7RlfxFFFmgL+UuwoF$Mtz?>|F!DwPdp2^Fx9+8}xdPiH-^#GTXd<=-s7dYwu+=5OO>4eGXa6EeU@C{nLio?c9SH7 zp=WgbDW;pE?x5N&D2AX~p-=J!{wn#RGku667J;tWY}}4$-mXV(xYfxDlVXV%kss#> zSHt1SNW~FV6FO|YMbzFL@2ECVVru)~7Nc+>!;K+v4))+rFI4wWNyV(_kAgZ6TM4C9Ugx$#LBfZ+T;jEMq!Iz!X(~YPN4*d|rrFe_ z7tgmEkoKf<>-{;_`FS<&HYXKj zp2xaTKE7XIsfz7?cGU=j?4H#m_Y*NV^G@hPx^n%vwk)B2mv5GfP=`6FXv}lws8*St z^sx)N+ukSLy62z8AE`i&%yGe8U{g{(LlNHb_8^t_E!a7H0kq!ByZ6z%*w7`tq|Qo;z@;yquSR99XQy7U$>?#OMWB{l{6?_xODWC z2}g{C@qzjrN_JFt7)=b*hnm9T)Y8BP%LzJs{D!bZ90tP*$4q*@i40qu+^KeBN8dS; zq#GvYGv-|-hF<$3?Y4lj`kP8MBlADirEjVFZ@?c{(%!pr|cQN=%fnSu`LDy*}1@MYSmc zqQye9#`Iyo^3$*8|5@!bAha*-zel?cblI?EiSid#u$tALes_Ksj+|80j~W)?#~7sw zqp_BPz{xV$2hI2L<){jaR&ieA zd&%zgkSWKU&Fpm6nB=YgiJueSSMi)gC+Ph-psu#~YD#BH{>kaC#)}Jl_L*vWf>JN{ z>{PxZclC!`T!Ecxh!9M|!8e)|5>Mluvup{DI8V-jwAskat0x*#B2#Z=G@{C2`rG{Y z7|OLdY6LAwmmyT&FwacCt3|U=qt-?|!*Qi>Aht5~e2>iS2AAx>$wT(catuFV1S5r* z0C&{kW+S4#Ri4)eukQLyHf6Kp0p8wAL2eSsxnZ9u2~sCiv_;3Zp&K1QbyKW`R*gJK zbt5le-txy0mkF&6?+W*&zxG{Kw>TJZvnX1+`1}lHQqYwR?$p8&akiRlK&WmhDLKrnpB~kwN3dc3nXtFVX99&HZu@ zzi)=FwND~2D zqjpe9Dc?aU-S`+m50eF-f8?Y-bQd}`y$HJ>Ay}rm+wtM*?5f33NruY|(cbYvt%Pyn zLNfp9CO?O4!Ou*{ho}yz2o3R(N DRR_1WZ^~3lyEnam3)&B(BIxT z7gyXAz1bz?6XWdRD`(;GR?^DGkK%em@cRAq2N_#WArS+>hQxd+A{yaY3HpYOVdAx; zH`OwmC77>g{#3d3BATI1qZfXWc7|+A$ZCf%MTomCV3@wE-DA(XlNhaK(;8+huq}!D zApSI9!$fBQ>4*A9HNsJby+4-f9@+ObS*i=o3ZfVoTZh1?3hP%n1RA!nKbTe zzQcaf>dveF3k{kAG$;Y6WJ7TZ9g+M!Las~B=f+faMx7fTZ`j~mWzww|aX|e68E56HQUen+@vE<_7B6bAR2l#04+P8mDF!Ol` zUR~I?Pb4w9;UMLO0o}<(#eNZ?gp6f2;bKf~89FyxONoa%%CHV2T&4*E709`1b&!mL zzS^5}9r2ZjH?$R&5PlGP%ON9c+3v(M3U-}8*#$b9w--)7Qb~xQiqJB&MScBQ^km^lrW~xOsZUC3z!bAU~ScvM08~ zFiii5uG;NFiPA^Umf}T;E8R!AZ8*=mBSllM4BW9(X32d0Njz1<^z|#rCrk2QZ{{pu z=ifJ;UZQi6P2Y$ct5EFmhU2aLSXM0W}@&j1y+=Na1eF}Uw)B`gn_bVy1 zRupD^;Wmd^eFS%GU(QuwrprndM$G99vsBsfJ5&P#wr#(z0OSDN=Dw3ctY%I6!psjS zdC&VtE4E^oz|+WEAHT=HVM-Ln2Y7q+;^pS`#+Jh0`!Rnxd9vXVZ~;E5 zBWLu+NDVEs!1A z)nQDwxv5^&J}?($IV~W#I-$BhfB;Ln%>;X^z?AfcpqBMERPP!1$Muf$h$_3wIF->G zktOr9oiZ9Sf`Tm$OI*+D6_Uuxq;6VOvtY?IS>1_N5Tw8ZXfn>VX-;P&r`pbNMu~Y` zyQ`$BUH#(W2SbLHC=iU3x^sVo@~M{;?YY`=DlT6HnFh5_s6gwWoWl0=Mn9awnTPyjl@f-6EVYPxzV5 zQ#RC_lOZdf%zONho@ffh!+I43O74Ivm_Zm>M8qLELuI?{m^=E9hFnOx+CaQrk$I># z`2Bmcmj))MJd^YRkZlD z@TvL0bFgDnuq{QqQROH2CUCG$sL&wgoL63!Q4p?2}Tyv)QejNI9YJUMXCYMhTlg5zAw^B3+1%18&Wx>OTTxEKfBB-HI2 ziBcOKL_11*wQ;+Nwr2SyyIIE~4t4z9M@u4dL!6&k@8_Z7;PWNGhljmwTkxGiEjT?} zFgv$ogcD^yP11sGCgbqHtvxawILdqsNWD@%-%)c4PeCrR_SPY*ya8 z1xLfhHZfk}i}bKB-!F76+0Aq`u6?`C@k}(+;UtSpW6BHpDcUW9`9>`vZDIETf5%z% z4h*}|i^sZyN)A6N9Zc>v1!rkzp7Jm`813*hCN-NN)q~x;l-q&WUo8dIk|%F|osEKn zV!a!Ny^R7EY}?HUgLwqhzPvF0jWEUE^S;Hgy1Y(L6E2zLgNDXq)>UPLC$J8(TNrvE zhK~AfgBiQ{7qB_?J(w%6$VA;^^e=z1Sj?VCiL~|>mI8dOPKZUJYqS8B68F$VTyvW8 zxHuzM!=Wz#)%2^hV^f&^agMcF+-m`IG<0IY5dET!rPW#J=hMwIjKU?s={3n={fZJx zD=lEXgMeLGe;C}iwAlT*)S6}V!cVMkY1=E~2{RTGo)9z9FY<5S=8K1ZO}CELWY(a- z>7xD=MbwZIs7%pc{prxe09w!Za#K}k1#q9uSTqNciJaAOH8jL7lhr%igpgD0L$XkK zT6sCvdudYWBe(d$8$2d_zajbQ@bjp@g8)q6b~k=ej?xF{s)X?*9F}>;gDx?aIO9>5 zQ@=kj(!h1_eDLK~S2E;R8|L}n;RhgO(7MtZw8GIM&b7Zs%7hMHO6%J1Kn!@4+IS|c z*#024f3rGqvG>!bf!u@aXBhg`Yr&JbjCDgn)?rm?iOuHBaWw<7ole3WPBxxueEuEM zhe8)KPs%X3xMZ|k^%M3A;`RDRhUF#*eTuPva05I5e$fr3$;3q|q2?cpNi-7`HcSY= zHeY7q!WF}ARrOI4615s~BuOm!9MB8A#LKbs;fU#NItyy?VY9=wW2kJ~$^v_#?bylM zswdvh>#f#>`n6M(*^eBRSIf#EL8)Bv^h;T7bHDb=K&i&1b_>v;XSd*SFv8WeWF*^| z@J96-vu#ziZJVuBrtI<)wN=5R2TV_%TY?zzFzh?yU|@ByYg;dPp^vV$FcE&u#`|-| zZlvaO6`!$*?ECi>I)=l4HKtco&<$u237ZLxLy_$CVG z!1p2QA#xyq5-7gG5fKipzagDjyxT_@j$Aun)PLh+^kK+0Iu*;JiXoq@6k2UJpOREr z*RRb7`%j`2zcqjU<4TvhR5imHPU`_hgZk5yu^9#$FD<)8!4ocqbsJ;~-LnB^Y{qh#z#9)WylB=J9hDiudWQaCp6E|yc15u zuf+Ab)Xj7hIqa1y=tnbjMaFx19Zh+wHvXl@*O%omE>4o=VbKkl`>hZ*i1YzF>F599 z5C6Mg{Quf-4FuTfIfKj}0AS5846l0&$C!5bs~>cu>;*ip;Lo4CgCgGVWHhyS`=6d# zfL^X8PHa#Jt2kf0GUet+fSt#oqkUUoWZ!lYqd5T+uli^jU<}ni_x8RG-W%?99J{iHQ7fi9G8HFjOyy@hOKuvibWvccz`W5w=vY8?!8xf$gq}E5wYD2h0n`mW zJyE`mxpE*Es}5C@OOG+z{9YNI`SI=+}=0@Bh?ZIh)7Z$@UM;I zEheKC$hjc@)yx(Bt$tY<>L>F)X2iumpY>m*h}cj9p>`cTx}4)w_1E35!@rUQch=vJ zM%f8EyU(yi>#mE9?R})2K()z6xDxb9316Ch7t=V%fe6B8{DZ9@b^8(9&6}ER$J|nl z4-{R+%kzF@e-j<1y6ke%Z7tEdMYbK7sod)% z5m5C>3G763BTpgpUQ;g-S&aqx3>wsuB;rh7gkcjs>~m!Nn1g5h7)Z3gn$@rZkbJlJ z4Vm1;S@!fmWm<+}wJerd59tELOyPZ@#Ua)ZF zVr6tYAF|{`iiOE;d*Z}DF=;?fAXdd(*nQ-O*omPXTDS>X%sZ~sGxv;1YJRcbb^3`~ z_af*iTJfFoTIl2=&Mo$i&)f3NpUvVZXD{EN;Pj~5bvcLCwUZCL(PR-!Ri16R2S)Bn z)fq?K_Kk8_4PotKBIy?vU@xR<>#E@Mhi^t$qe`2%_yA~%w0xn-f@ zm&Py~3YfbTuQ&aN+-eJLVzedW1YYSVDegh_Y)L36RT+52ZUDbHQVvie%b8y0@{|0% zA(Ul}Mcvt$FV6gUg`ZP%6Td(Oy0qAQ%^_(ja672BoQ5i#-TXjM$7c{syHFf(U*88P zuDb#%(}Rb|nkuJ)XnMbrG}sF>JTqsMwF53z=%pICPL&4xDf!86P7jKZL(J)4D3_Z` z^=N33K*zmYY3&DDKb5z);uZzhGg6-ySCdf-1#k*{V~V}Nmxh+hvI`6DZPCZho$3)J z?TE!Np+Wh8?FX1JfNb&Q?mB8?&n0i9zs_Jwl)iUaW3awxM*z{XMMYuAzB(!(P+kTN zw)YgM!cMcnE9?E6RDNRVJSMRgMLuO6+ahPz@e&)=taJ21UR~LK*VEe{wO>Kp5=yvk zCY3py`W(8ZyxX;!8leSlm3w5V<46&GFjcdH+W$!GyveTM-k!dH@~_?=3yVoAobVY< zLzBFjFW1utb4<#$n_Ljb=QbZLCR6S~^`ZxXD-(AYu%W=cc0U`0B}G)3c`Q$#*kk_< zi6|9uoBnc-4fgNDhLSHM20;xN_-?iMRUe<`7Qj}Uz#eS2E(PgIOpEeN+sh6Du8qrn z;O4KSGu|0HoL<7(ClM-#o^HUTbdWy|(kNkzXm)UyreGcucfZ4{7gN2kO3TdYnUUQ{ z%ss1ELg#cfl=t}{DugG;YNhc3=P4K%2ya-W#(-$1#MDMX*SjXQ&0%U+3Sx7*z-X-BrW?`OW3 z`sd@_Mb}|Bei3#_@5uhTv2Q`1lkoGyLY+ND=T1$4TER+`T3^Zitr7;MXStHjHw%hj zIVwj{q-7fVZi2SBSLD3;L%YlI2eoSLX?8;?DNT?{+2(5dzU#>xE>_Oz4BusU>^p$y ziPl0nOs^`*v?k9?5I$5<_${LC_!WsPjRmxzE_sJH;QxB&tOCf@q*iyKBJEi@T?07+ zT$9!Cz$fu^v} zJ7aL2)3O?kx-L0qG#tQy0#~4w(uDd z@u-$-sml28I)_-{@pQovF!kvJML)*D+THr7TAF(&X-SK=_0%8jtM~m3lG0D4V;7ot z;0u~4I2~B*LlgA{4UJnAppK7f)22#AQb59>qaDq$JkqR*`C(npUq2q;89YME6Pyun zoINFa&VS_TmX1L%ybd2tHK#q+JsAq`nArE8dnj312Y1cC62~Cm^{~Kp*2walrHSyv zL4l9rxBeg55%ADD~U{@X7JoeM+4C#dR$@l+{= zRggeH%}(!kQsO?g7q5^($F`qTHVzVxAfj4nj>KHfB@ZB*t-@=!5XE3{b7;Z}cq zFqeJWaM-WaC0QBy-1c8=Ln*3oc7S`|uRw>Cc}!V-KoxzKms@U#(~5srkZtRdc5E-e zdrF^84a&@1m|DG~!)Qhri>e&Hb9!g`%qJYHO{cqHs!8i^Im@KeENQjiT2=u?YikFX z=83tr_C)EIxBq9T03c4S!G)W>n5jJBresI4+WOY4P}N#n6--szwxe%)SfS6L`t!70 zgG`}cKyU)|s~VVy2C5ODa&Yh?P-EclMw7VW!AsWTPP%`N zJ5i*g4FKR|hByf}DJ9gB0kM?!0A$C9EZ2Zb8Te+MyR#G3+aCV{D~<;tdUN&kzcx_E zDAQgbm4PwTq4EJ-Am}ay2qd5(lkNcMOS4%UnLn`Yb1Q~+yJtF0J>;9_bgFXtu=nJV zTApbgr9>*&GULICV=#dF1m@!X_rE({FJAiHhxUxL{eQ|Dsfrr$Axz98Xcl}5G!-?LeAL4jT3{2Q6@|9|aYaUU4l z|I~`3T6D?ZKpuO7dh9nuFbMSkJ&E?9xREUL&1uh8!knCF)m>={{SlEHd!;WmsoSGi z*TFCW3X>JwNSI(Bh={v{G|@0OALH1Ng=^QF#7kW%)Pq^kQ5!@zyx~ zFs{C4OwNws_F8A|)d{YpbpK@bg7xn#|8*|lzseE(t6yh>{qtnge*ZrOFMYTO{}~i{ zEA(;Np1_rz+@;88pr=?)_)PbpUii0dGPKp1{LyPvQ!k%qZ%s{KIU{ z;^SE48$vh^K`kilhEW7n(`X9!eg%AeHNKqF6Z`b?bpmzhn}`%u#&F!@21btVr_;nH9Pk#4*eQr<+ z4$TOrhiYk05NWPv^N~??-(+%hU_sCD2haDLfKsV24K`R%3Hc7SvFf-Qt1exRbTfnr zI+fUTUme;L-(@7-0*#rx$Y!9ho0A1Pf%*TXb=xPzmN)7McH`}XUb4%*2`zwG1A55VJq6&s zPJ|E@=$A(pp|s?@9(wKvGlYN4v&$vCr%sy>_>*6cR&pgvx^8Jx=)CGQ1Jo7*Ozz^p z`WvJ7yb1ZrXb;5~Hizi^W6QC<+neO;bH8~1I9~M6X@B+iheL_Fe>oTSe{ky7H`Kx^ zNi0KWmlTg)_NL8c)ob23`%|};A`A9ZkQU84O$Gp4e;7^UoAuHh*szwag4t1r+E?`Y zl|?@SHrJE(giSgQzVF|Nc{=c_JNps;uUEescs<5fvW-G8-y;>lME#6*_-iMEcN)9;STE;&*`o7^Tb8Pf8WQs7f{rK+_v8k zil7RLrf`wAglYSul&}?yUQgK-!Jf&-&DYV zimtPxVM)b=lsbH2NNl>#A!~ z9_(ql;ti8TwF9J1S?OPv%JsiKu$&!2$kNJFenZgc=-&|W?ac~nx^J5ZKUHRMKN0n3 zDY}BC_#b~e;(1VB&ZyN@FeP*w5PR5zeh9)F_}ZzzWCLi~u)jVCsj>K9mp-||NT&mNOkITB$AaHvz`q~eFUR%KpHULdkfnI_iB2Lrhpj$3=1jhIFu3tfB?Uc zxfA2J;J{>5#DD*4KHsyt2Xf;MJvIQA$nX!(IC&o$1+UvFQviJ*_nKPFrGn3N2F>Wm zlAP%*Mt|lJk(<6Sx9?wz`G`1x>V4%51jK-+mSTI0$m{pj^6cfoyl?*>dtV+8b^q=? zsI-W(@1u~)mLe@OZK9-!(o~kIBq3=xYRp%J>>(5}6=jRDOPH|>$-dJVOR~>c#xOJA zo{#(d`knjkzMu2^J?A{nIp=wu{-`nEn(uslKG)}3-q&@#uf48B)tJ*UDpCi!om;@B zdY$`@Fhz4B!A>Ur;ay76>q6wjHD?ZoBe8(gngiuhbu2}&!vEGMf`5E~za5CbZeP$E zSj>)VfLc1Bnc#r_{pU2MA1WtmA@Ql0pY(C^Lv{QTDa%Zu$QfZ!J>36+IDBAWmt-Mr#{=e@n_eP7VdU@TL|3kwvR zYg1ZM9-Br}ID1eIKo}?VS>W%#6Pc+pRN&RC;=d!V6ME(MWz67%b;pr~-35}(>cPi@ zk8yKJ6~yi^82uelNnwJN`rDNIvo}^9`+#0-l|N#yBCsR07Yfr;`JKY~N%@eQN1sBG z47*|I-=R|ey^1Qu0VO~vN$9nL>|J9In{29Alu2Jz>GA;c<_z?hGKg&fdD=EC7tvC? z#=sy4`RTn)hTzfNKvshrf>}R}gJcJhWx;_x0JbBC6$w{%6S<0+r1xWeV<4$TOuPuh zU>$HM`Ck5ES^t5CpK2utEdj-0*{m`^bhQb%?r)~7(=5dH^kjC>vwUk}kx_f&K*G!m zm>N^;Z$b$i@!!20Yq&~EUwnfc;+&(S{u7rrqwrK8#5$7Ank08KW2PT7T`jbpoVfjd z5tE9MV;I8*Y-n*2%jFqc-QjTZ?T{Gh)3>q#)s_^=n)4_nLLcF0TRDU3^oLDIG+nOW zc{ChSVf`G-ujxxnJVZNSAbL9D_o|+772xpS5nqAj=m7`%nj>)i|J9C>!NKsT14k1H zqM0hVqrK+toU7Gl^Z13<3HTc8+GNo&vB<1@lm*3l3hflsg87b62AUj%8ozrpE3Mk~ zFN-=u`A=L_auRsRK!uCkaR$Y&bNn1(3oDA{Asd;4TX;TCI!+F}0BWG>LKCvXS9H7-QjL(49_1%um zs=zYZQ^6=xK)JBRfx2j@ewv_k7#1h2!C3%vNDm#EwsOlG?^f0tpUxXb$PV9_OC|np zjkA~7NIp_-en((|T)tGkr!)WbMoWUpF=OoW1%ir{bO8O%{WDJ^*KLMo&F?>jdPWz^8CuBcZLdZ&Ke7*k!MbY9(jQg(6n-)|y478HpnNmJ zfGgKDPx>Owk!Jzhtzyscfh$dbelY%0geS4SEpW!TonfH;VqWFz!yh4zrAvk8f&|bQ zVC`U>7U`b-V$l zb41pBBz&QTyor!6geAvw>T%h7BfXr!`@qKsb!>P6lJbA&+OvkoB# zDlr1FPAy?0)B_^B6FYMjRQu2iVrozy2z^rgK^fgGY(H0OdiPZ40^u#lS7SHg-G7J< z#)Z8G$U2NufvJ?))|-~fE>Jp!Lmc*iCfMH*CK$1)L26|ZEU^&RUPJkUnQKMn0JZhN z=Ud4SVPAU1`lIqxIK=OWKxm>CeC*5|+vLyJMuw){V^m35R}+ZoRDilt+dF!buj zozB4fw-1OwZkgI;elg9KhmVgJ-^Q=Qlb0p*8=)L&6s7{Cte= zFJKvsGYjqPtWCfRn?%?BCK*f8@za^M3cN`b1-@S%cz9q+8|3pdP^BTAk6^uh=pLb; zoe*F?wIEzxC(1o)18yYjnROv;i!NWZPi3w6_xJ=O_zt z-x2eYAlQ*!I6Lgur`$T7&wdb_2GY)K`@0}SrY&%L8x&H0P;7nkI_f} ze2uXK+eb7E`Hm?37gKM#xPM7Z!0`BH3P2zLOOjbDWj5&iY|RX+%LQyee?9Q3^5BqK zbENNjlo!zlRA51a*!N%w0Nm0bTp&F_{@J20?nJUoQHqt2JOS)G=0GLbncrZ0*ZB(K zf)~_{<%UvT)xbDd?K|R=D%=I)rEsnwrV_Yjm2iRLtfddPdvUT9sO*)C7r+7ee}N|c zNAJl89I9YQ7-&nP5a(?BHGNTz3LWw-B^NyuK zMdZAzA7%FrxJ6`~oIv8htHn|gJpOnMr*F}G5PT5}s@rpM;b&Mmf&$`Npc zYU1=$bQc;f42FYpUkbUf)-W`g0R?oZ+{$7g*u7AjH1Q_6 zX2N;Jc8hDrjCu$shli}=*JE{Xdzp{8FF}a}sVl4mDL~%m_yHYUxO*Vi!b;P!DfL2* zM&8?8B^eCCYOwS81j0nU8HddS{`N7CQs(Pw~3c=X^jx!oWfSPh%J ze5z3AkXyl(z!v<+1O>jT^9Z(lscYIh^|$YH9ryYwVnRE6Ez8S!PiBU$;tvSg)&CCj z1a8;YYbQt&>agoL=CtUk@m}ayQ|#BzQ`JG(t^1G2T=uK!y}xZE`7mG|@njZq-+^E8 z-h2tBOTr^{gI7P9G3BfN$}#ZD&=oi8<(wdKU&Y|7zR%-$f+kY)&mx2xqZZK2Puvu| z1$!_4Buk1$Sv%6DJ`o(0<9mmD%*1R}e1v4%gIy($;uFO(ZmWC|*e($yh-WkHx$$6p ziwg5y5tpd1_r?CprZ&xS;>iY%;oAcOytb$9Zi`&cbYDVDR|xYE|C7WnBUrc3B4 z{1H&0^*|%mk^7Oa?_jElxpouJ@6tmTK@nUjTZm;$*2Vd+vtP9O;EOR4+^+IWUe~F1 z>rMosfW}|f;Xo0YtMAlgo_^0rsKkmM9S9a3X-m=>D_dTaT>oH;x$)k+-byljPYODX zFYJAS=z57f(1_*3@lu)Hthv4dG_s3xkRDkkV{%?I&)!-h>`0JX!*kG+z^xlQWA8o3kAZ0c#Ed-J0yR3LEf*f+Huh)N9z2XVpg2^vgr}?W4NH` z!s+gUtr7%srrsdIpV1vrA zEWM)`Azx>30S89P?=dXXT;@ntYo8=kzy6kMM(wRxan?{eVuK+=tCDw4$$hsD;8)P} zL+1EQB*hyK5D_fTGU9TAeDz%=>67`*&8CIqK&i&wfQ%0>_KYj7tUT>52)Jx#rTtir zG-$$#Vbe#QSWm0>KxG@``n%oiH*#;lJG~}_tsRVi8mN4H>tsm7Td|WV)skK2Wow)nL%drTg%mTKGf`8ac+?R(m(4*GG*|bu~ls(bb)YKYip;&dr^*mBW-ecR3G$ zaavu@2Ur`}2aR8|eDzLAM89NJ8G3#^seTyu4ud)OIy7n#6~+Yv8fA zO(8Urmo@6|xk}-moI4j*6;%<3J%4u1(PMhXvQ5IaYtrS?jc%YXj;anLJAwzM%Ikxc zQpom1A)HUe8eFVIYeZSun{oBm(m2@fwtT)&mA_bIE;W@4)1v{i>>g8ZuR_>YrAh(Y9Y7 zaCz(J?Qd>>6~sv*=7K$u(i}ww0L^n@P5OzrO$o2i+7V-VxlQ}K2d-^X-RQ|0kZr$< zjQ}9Rac3V$E(dxC& z-MMcmj`eb#_ytVW9WLlXPTNkQWq%R-h5*1PjC z4aDfk;55idvoo$#g1YXmvpnupKkOA(tTE@bD*N&HP*Cq7si8oc-^PQRd*5gD*MZNt>Fv6r(6ax-ulp| zK&xPMKUR43-r`QLw>Ym!jj6O~hr=FCLgk@1cmqO>5W@ROXSq@EUc4nYouG}ybsx_E zK$jSwi;Kw3$PKORoa7kp(O+d3p?eu!R9L>>Qq945>MnwP#*alBAWa+qd|8xiNBjyd z@nVzHT<@k{XFcQoqD!bf-6m%13$rcP843DPRYRMcc<${GH10Pn$95Gkw>PVE9x(VS zz*LlKdRy1%%AD-%=KkiKHM@o&HSJ5g2F!G|F9=&k;KkO=AcPvERm-5O)BsGB?zm?^ z$v2j_l~8vJuNPGroaDM{lemFlkV&ZOmYVmwF5GXgMe-815kQZaPl>bO7uHtxLArVT zwnEI8eGKJn^_%Z{-qjb3M`V;XD&3q|zeD?iR&P$8DuQQmhXG!htqnI}rSKx`GuCiD zQQld5O2kouiB7yqP;6H4=GmUC{eH8rHOJJogdX5IO|yE2;F&9OrB18?{@yyg8cVf*N*sp) zSneYtw&_}6U0P&L$MHjJ%O5Q~HdiYbG_t&jmB(AQp{@}-9ZMx3ksQinSd5CQenH5pNgo zO6`4C5Qz@##nVM@c|TEkmV7&)FW|<0e&r>OwkVMGxJcJ+vFEA>_jy39yv_y%QvU}C$Bh}=;GHIVj zu5ZHQKTl9!@|?bSn#W}TSz#s82bBUB4mVY9w%uM@F3z$h@yJ5-1{>e)`>BeV8wVfe zR~o$(tnp*<(Sq{WwuBY}peWD>lSx){HqtBOk1wkD3^v6Tg^qTs*nFBkJf2)EuuUQN zoQFau(SRF)vL`;qKvH^!bd>?OplJSbC;7U&GIDlDfLm>HsU5ZB;lmvF%c@?Mf$VUp zZN*3{Q!E1RK;`6BU_FSrQ)QH8EG-SZ;Y)`v$e^J@q@$w~#f%)`*b%paHn9D0L7aQm z3LbOGfiy?FWj;5bAkR|8uu^UgVEF)H`@WRX`7bLn($7W6TwXpH{Q0VG{$Y0a6{(fo zXBqGLX65}@ip8kxs)@C12b|3DLYB_!5v0IuhtrvTHkJCy2ez9eCih=eQz$Qp6R0^H zy5-yHC$-+_ht%wZN&v8D}oyg9lFSMzf9_p~WD&u!5$+7x=$=G^$e3gOU~XDU7s zaoavx@7leXNzp;|k@S^spz8P(*wOhJwmswiODP% z<^U_?qKe<0dU2K}%`)_o1&XiQQEn#rvaQDTs8!nrd7*no3+eZumHgdLF}1|>ya~s5 zaMO8RK*KKcEfpxr4>=ttC* z+U&LR&TDab=%~@f(XUeHr*%zl?DuWk?@kyZOzWPS2wuZN_oKTFnaK^9evA*^i5Bcd z5Ne5uZ@i;gv|aUWtl0C&I{|b(cQHw7IMuzzPg2LVw<=f<>Zh=_Boqg#^auM~n-!A1 zDvl9%3AHzSuBNlHOD2C~ex6dq>YDCjc^g7LxV0j<=C8@;xF1lCMSy&+k)uU}mfNrC z7xI(W$=|t0ESeVQl~;v)Hu7FvdC0(^Z^v~d+Dbk75a8d&74J;PWu0Usc)&`MVV?`> zN)b2DWj{WcWw89yNkcYmW^1I|3tpGmmqG^IAjlptnl_Mkuo~lWL1l)SxpeGfi=)lm z#@=VYoP89QHsb%07QJ3`tLQD<>^D6jMqa_VGnY+ew^qQUU6MHL7zOGEO zye8RGtT?6Cad>Z--EMxxuSAxTkFO#l_%Y>bDnZqL{BiOo`54c2S_*!uBg*!rK?)>JS zs`eqf`I)(_6#6M^@iL3yTJQQen_W|G6|?jB&r+*b~-CfFUaX0cidkz=!mt0sv-kLVp{mzN~%+jXY>+ z6)LAJ>_rElnA;tZi`H{GZZIEfB_~DuJHqt|xX-s(qrc&zJoO;3gAG(c2G0)Q&Ek}E zsJ>|8v@HeHdrT??qV^OOa5+c4+>_Yrn0*cYfSr=~Z?56c`h?1hDr7v&F}uS@JaH{Mer_<|HtSkb|zB zKJ3C^e()eOGc((M2iWnAp?@mS`0B%19|k+}b4$RYE>eugW{(>(uyYMu<5e6<&UXa( zwCc05B~()%?21PScWkxD!hLeFouumgc-Ntl3CZjO7v| zN1YoD#HLj~{v9Fy83ix>87Q}gO*jrj8Wgn;pTZr2tW4QpKQZqK0YBt@=~axC#l8hq z14@c^xCg-N&e-hMpD$U*TEPtDShC)=Zf+YERsF&oh{=7z`5*WB2?T%j6G(1~@`^eI zawddFe+7WkhyMa9f6)#rqOc$eU%kOV^bG%RAny;M#PrSo@rbw~RjlJ?OeoGo(C&;w;H+~PmyxV}PJ5}asksWV$N8&pB~ z6>X>Ni7c;g-#~u}WYGBUXg1h?ls2j_3z`AqI;jO}ymY2x+(06FdGvQg6bjUU1#>e< zPshe@fniN!DJcDZp#j6iZyo?L#xI=wA%UcuK!kuss_w!Kz!e2kT+M8f;9p%7(ScXZ zx+2*ni>+XXE0FQO35hFl0%Th@wSzowg}Mbu+ztJfxJ!UQ;tn9!9dA;g5!oD5RtU&i zupoLKs9XqMTy*C0sAi)J0y54K@zs8NEcN_3RkKqhQzX$X@(gLWG~V zH2e1%hMNNl^;8#SNyEYY^_)`51PQLjHVomfICGrGGEoK}p^YP9Qi4x-K_h43uexwdcca*i6vi zPu|P9$|d5lFCC=!#YWTF<^1NY8(ZFnZ;Dd9ZL872dzcpO^v+Y@Sy!w*gK?{vN?pm=|`AFP95THA%z^f|2 zFOQ~Y!G%B{HYKGrLiQx@JqeYNJ7E|7i0@51#}vn?+g%nWgvlq0fZxaTocll~@r@}r zjIe$T$&0cJUd|H2=4TYPX@kl1?};WMUwUgzlss+rD6aRBJAE5)``rHHh>CZByRHx; z!nw%}8_14vx=t;}7z<1xpJn9dH}A}_3@nzFy=*6E@L;L_(iSlF<5HMxmLsFI`DEh& zo#Q>A+bOJjlx77}5Emu50OJ%9OB_@3#REzI8d${R*7cTa*(*f@u zLmqD$pX^Mq4o&vea7fhHY^H+7Y@kMuU*(wOw~U z_8|1Orw9mnp?LAG{lv}`Q`S-TK@%X6VuzW=Z>mzRx`d&1>OMRis@5g)Uh3a;+V<&n zwVHSPZI#{i+Doum&J#TSnk&BTNeg6S(cECSi_Uj#nP|Rql6arM^ZTB?SX*FJxkAe? zfv(HEf%c~6KKini0ZoY6Fl74ii#xh{aSzxVWn|j(wBA;Q*t~a2PJZlqf^aJKIf;hs z3}$Atq~fdkKm#LH3a|1GC*8emq^zP%(D}|c-JLA~@!OqC(`m%39k0wWezQ}ZXeO#v ze=UIZP$B^Es6oqf&N7Z~>^5sXvq3C-oA;@qgD(YF`mJx=zkXwKqO3a(^V8r8%Hgpg z6hB;JtxgAik*G`+QtrBroES$j?J8r0*=CB?q8t_f@_D0`z`<<=sV^8CcBxh=38jDp zcQ9+qCSuYkHUn9)`Of1g$2VEI&s*;e9*`Rp+Izclnem3XfuNffsPuiGpS!SSIGfy5 zYS^0_6$3eM1@z{woLdyBk5c+LDtG|FupUVL79p?P=sX*E=Cj-(;}-S7?1HTKAdFm- zgb8e+n?HSWV00~Bmj-nJ;yUj0VK6_}9V=FdQgKg9v;Hb1#QNIW)49W|`kakdXAnkG zMs!js>H1YZP)y|Uy7&XB%DL0gq)9+*`yXaaM7C3r{6*oRCuFUAR%?W6C}Ozq0`T>EIoSWi6t=snZxdLLW(JM|6?*$wLMTZNbo_N7cmX+EMb)zCmC z?ul_zXFvG;^!oNyYz+L7pvwwmq?Dkf$m>~ghV;C0tU$L;B@FyYC^f zR3tdhQKj{Z3{Zt|>@A51Si*0;sh==lZ|v*<$o#cn5%F@nYwkLeFE;LdU)+;v6T4EU zj!?D42v8-rhG|RJ^w(d8M>o%&W(Wvdjmb+h@bYe+mZ_f}Z+h6Kg3CLS{4ROLJ&&_5 zoS2l(0j3D!N-!gzpo7217{sn8D_WFpY<`vb=uP0mj#KwJi84IV4r)fppX$ADt#)^v z;)v}tAytw(4b7f9W#%vr_uJo}sQdh)EGXg8b1L#g;gXnf_RI{EXo^m8*6=T-E!gAYv5N0_%TS#hXONZ3EY!gzmValpIO^-b)Wvq}Y<^W-l(@QufA zn9iD(iG5W4f4-{-q$P%ry&tOPX8{-fQX7Z@<1#JAsz_5aX2#uXb&u^9dBP>=vF)N#_GiGl4Ze_?)rPc`7?m3 zpO2uJk`z#q?NK<-#67%2VyXI6oxR(!OuDGe`%1I*rI&h29qFB(wUW$O9tFi=$%|HC zd?H%~ZbD^#%}D^mUnuMF67M)0O>SylPn|Qik82^#ieuhvGQpxTIWXldoSmGIB}%9-Ckw&w1&|I^P0?bMTGnu!)*9? zCJQ^TCeX14uJSi544wacA!%@>Z1ww4gv{+wBk*5_IqOO2mT^!z=s!a0zdCW9gLd5+ zTXkK@{m^wU@7+~VVS=s;=xe8~b+{1}N1@;kXwC-EQ$W_jp^wtlKBpwAJi2yS;(ovZ z?=vP1l@c2Wqq(0ldfp+#nBaVP-{&u(_(TbL6l&Qs_X!!~S~%i9>*|^{?MCoUR>VZ} zseO9Tz)y22MSD|(@pf!+$HjVYc@wC3kw1XY+mBt*?Gu=@(yXhuJdKg=63F|+^ajS| zc`FaL3)$%c#t>5jvHaTmY-ovjS@gN=Ew*? z30GWvX_xx?>E!u&lRjE5C5V5$HLOn^1Ka2SgwwDNw1G>@cpc%UK;(iOp=I z(4$M<)*H{<5pC3d1u4E;r{v%(dC&Yx@oBydN0~`L+^CQ4R}}|$4wMl1+9$6$>ja8- z8C_RtzU-2BtNExx$LGi87pu%=zq;#NvOr9`C6pxsj<0P!?AzwpiE_kxU{_M?ArTz9 zn6QO$i)GjEmgt5qBfYhbKc8GRvm^6JrzUc7uP&cLsnJExXN3@3la*3|&7+oKgT$k} zgOQIoI<$rCl-k{IpJI=y8YXVO+Hm+}+Vz3Kk3uz^8#^4C8QdW10QE6mb)cS~mF(<8 z71L8IwkL1!(A2r?d0RJRD`1R_`8cYjXD#a1A2)wHtTjyuBKT?4Hf3+ zDlLS-Oi@0C#L~<`0TGW9awbcY9(j-@M$apY<)e2QebD!HQ15q*-hO<@zg9IiIzA-k z^S157fS8M?tGx;UL*`tRDTV7-c;?y$``39z=}~lrW$$>0R*cKvkga-gi+>*vDP2{f zGFGrje*?}KLvN$l%D;+p$jv-&)qRXdNG1UJYEtOre#Mae$aTQ$h|%AROX*8+pMwP8 zwo@ZyWI@CDoAmS?y7`Ncu0w;HI`~|RiE8*4?gZ=odgj3h!lxOWCbNrf=uc39o0$5$ zSz&f>0arckdG;>tvF@>*c^^({$(7W+a=ly~=sTTJjPSu3GEZ_7QFg{#3bp9B`{%vy zI`bH8aMCql)Wto27f!Y_$Gy58*!ythtxsY{ch921tn=LriL!dX#j1)aKUfcXOcd2o zMUBsW@aS~{T&Z;TcQ?JT(Swx-U%Oij)r#zwS7#rFYA%p@p^kLQqIe3{p3u-xb(_xL zf7$GHt!S)dOKW4L`NNzi>kf>nR_JeCQZbDBi2-`;fUS4j>oOWv;OYRnFzbD+Ejje~ zmlu^D8@-^WxY1GE^<7en68%2j8ikJZ<_fZ0MyMp|uG_}R(p4IgkK2x3ykl0*d(fk7 z*Rp|(mqJm1HySm4ITpZyq&k2?7%P)BxbMKEip|6F*07&dol*9NOb+=PKc7^tiK3V90Ryn5A=-&{8BU3_KOmBS<{E=+F_* zve@-`>!{~@>$iR}!dFMnAIsRj-dAF32KZSl8T590S1TpA5auM^U{aXy)i|siwso=I2v}3nI5}$%>!nnP*3mA4l z>AE*?;;x&H7%jSb7dl)0lS47-o?sa7&2|@(z6JddjaW{SCiJ4RD}8Cf{)RN<0-=C`^2ODLc(J^QrmJ! zCdi2PTN)FOQ$o$x?d=mjsxgkPdsqcGQZpio2AutOfHW`q;(~Zc5%& z#1rkUD7!2#c$`3tSq@Kx(Dp+7AV}Y;T_jXSP!6u_5`X%^io#J6&sgR+*?B$ujDZZ% zVM;5pOunc816B%vGTFlu$_XVjY#>s+Q1dN|_gsXPp)uVgdZFE8yZkomvnT0&vcjju z&UBRvxHLi&02&FT;sCG zmO5Tde{ChZaippgWN_C%?DW4{rn(OONpf9J*n8QTs0CwYV1b7-Yb$kpYT4~0Gi7n; zujOfLHl3LbEK97v)`a6@9_GG-dF<8uDBeVwLQP%wh%4gGq>=IYDEzhJtV2>B=Jj7d zEqiUf!WNrAq18~yK0SLHsw;~|sVJbxCj?&C{`U1f<46k0vL-Jt(pk3gq2ubx#wuRb z%4Dy~hLy6*MR`6E+R$=Dju?h{>wR0u5wA_r0y{}s=7(_CHF@2SN?la!>boCatkeew zw10+joyT2m*chDMp$Gf!JFmobm$_`3Iov++B1L)YZ7+PRdAGwObtnow zKEcM~x?1sC44Hv8|F^h(KH)A1|Mdm$Q`A#;8qT!b;S*x7jWQkChvZ|o?pSs-ASB_l_+eS`L3y{!EOYi2TwXCXFSwi< z7`Yhauj_?J$HnM>{`|!?YH|0D%eOa5dMpPd4`Fc`L%6{{zDm^9VDOd2Xtp>2-4C`p zPE!6jX?2DQ=%0-NEkC!AAcen{-g4(GUX>OoYd;_1BJwG(>1cHGGhh@+i>KYJ&}*ys zP)AwyN21HEh1%~#UkTLE99PNleWRpc7x+|MK_npd###g(z?{pHnp1V4j`ON(n4LCv zp`?-<-ONino!+_J>x*>+5E8()y7()IS!fUZp}rUu8rq+%#o3*~SUdVHb|-yjjQS^x zeSrY4Wbs`ApY5tn)e6@UBD0k7Vfq`&C8(pox{YChiM&C>$M z`N*decRwK=uH_bCV`8;f4EAxjk|?Z?tYSEqgLz_a`cR?{Hpe2SjyE^8E%rR;|CA(c z$S!^-snLFBb2hx-pc!1ys@b5=chrT0-Qyfnf5W^XARPG}= z-x1KD6p9t4pWqAr$&CH%-}T=g{JGx+e4Xe0oVQtQbB;pPXY3wjhJKJ&Zus!T$D1GJ zUsLd^VEiI@8NPo+QltH7&JDylDlICB;?DFia~t&3P}6!NF`?M5wR1=N0M zPECbwGs6KNPP18Rmq#oR|Lq=ZY%2Drj4I(6>tB2D`#({7B3YCp_@D<&0yc>PQaT@r z3V%pyfS5bCn}QlN*?aS~Rr9-fRbR^o&rT z9TPZ}9o-;B5`WKpUXE}Z%L?EEw!F<=qNZo%cz!hB{MN?v=TzU++P`R&u_j|Ucku9C zc)U9CwI4fdX8#WvN!ttNg8MVSBj#o)i+t@iKXt-gL(}8$T|@;7o%(0VE;yrD5tOA* zsfXX@B()z|#--OKilLtya_uUZcW(QINrPKw9Nz~W~Wp>$8L!cLB zTC)dgZ-&0DvI-;w`oN;-j|^fJF%i@uG~5JR^BZrkyxSl8NGY)>f7A>p56Pvlm7zo7 zjOP>vCG8_u0^mRQ;DU6JA1~E^_WM=(|Mh3phpc&e3M}DM0?N~b*r(VM?E4iUz{=LI zA6}4Bn4F&1n2gaCN_A;D$sbMljO0hrh~!l)3~ioCVa_vUg@l;Fo#eXV$?(DY$Rg$d zL7y>V9rowG$sbn#Kk~}+yx)242w98%CH1A_&J?$}(cI9EFJ1W?u6Uf@vQc=Yg4m^9 z3Nu*wZ%8_tB+F3@oTYeDvLQeG8rs26Bd;BzETzkD?2xM)iomtig~W08voaVGpzwcBg1y_&h0ZC0{!bNptfe5tKxdG%9& zN4-$;4@QW+*xSTqLXy@Bx6{Hw7bRZRt8(?OZ! zJG=|azaLtQQ~BI{7h30_doL2()pctjce+>aarfmHVm0Br6xR2w=01F#_vZX*thVm% z6dLVuuo@F34$-_pDHLf@m@=L624ak&etOS+`?>&?eG^%?@E!4d7$8_QC=&2o5CHsD zwia&1&MHGgm}5W(w?Kb<-^w;IKz8j4{t9gECsgJQN-axs`k(r~;0am783Obb*|`fl zv_#n9kD`}BIT-FL?1Fg{g*A7o6}o`xg3jli)x@tFKKxErlUW&2*cZUqHK)V2HBfnm z3C)6t3r{oOXp4{7f)DeA!~z63oBo-vGUdiShMns06r8B<1c)mYJ~M}ejpAUreD-dT=K!@RHDBUDQGysKGkb6jtUbG|qZu}zNA3V; zArEDTT2z4+ELxz2jlryY!{gwv9G?1)P`w}g9ifD27B&6tdxUJz3z(jN^3e=)h%>0d zeSxSU?9G{WZ`1`Lck)_>23SvIF$9D-<-POxBq~>vS`(`xBr{=OlPh(gj8|~?3bNO0 zzsN1dF1L+Jx3SEZ4EyHan<>>9;NhvVH*+jlGzPT{-bCm3qU2_GYYKID7PT|I;$<|1 zN538{xDtM}^rO!%#n?h=uH@@9ksB^Xm8qNCf|YVnV&4%oFCYcMi^9-!D$pE@sGr_* z6JNh=freWkx~MC)6L*~&xq$Qos|&op97FCZ)WR8Xmbu8E-V<6(4H7{Z0RHrCMrG1_ zDL`xdQ(xu}_Lm6$6rY4_X$1tJqg27ZtbtpRvr1t5EtZtjEcDZbIZ#t34mMRE{BeH? z*OuMmP(HG5!>hvCx*&}k3x3PXP$@*CjtfmdJJf~XE}!Gs#*WUs3pY01BQePlH^aiJiTg|l?TzPe ztH>WUIO?37Ux8g^EIf6Rg8`)n65M3ex1&HfbHhQN_1mAWj4@yP{e5jrYq^o$6AMrX ziv)PFHQA@3hA<%aPGG5m8~=2r9r)engE@=AFs~Euq2iVw&xY_ytj6N zCqTyi=?baKZ*vZEVGhYz&d;=#$y?HE6wWrv1lXU0XxKcP1f#6;*wTbbqO86&+$s+W zp#uw%+?dy$Zdns9)UJEm$Eh}YTc!p#CoWQEoCAKIOcOb%;9X5tk?Zl1!)IMIx~~O3W1Mp{Qy=-9<0z$S4dqBo8iWCNOu^%$PM#?u_a(n{>d*D zh5n}fx53rmG-(3%uO*TgZy9bf7(BIc)|vl)EfhX2&h1si&8WK@E|9Les-qn*|0XT7vqLpjqs=y7h4RDQbl?Gf_i# ze_Pu}(g%sVNbT#}PT)p9l9Io@ey^;ud-smkN1_{Fe*p4+@(;8bn1T69%?=x;cbooR zjsXMZT_`ea|AF#YN2b7-G~u)x-|c!m_OMgtiPb~LUaVU=xtvhkXwj6qRWnNFMJt_N zZW!_4PnI9~M@N9rY-&aP;bY7Ir2hY=m+|NCGGqoRE>KWl4(I>I<|d6&(vcIWREu{K zU_mld(96UDsf?X5qjstVTH=tl+PQgq1= z!q8LL&Vqj32@qAUro!ulK-$C^cx#ZoTOM~$S9i?}p>_y_X(76HHBi9|r(r26*34fC zrD|@3Mylj=Nykh|-{yYhdQYR0~seU05 z&VG6xQzQBTjqPvn0dB}9x|Q`>7)e~)o*{L%XBcs_Jp>ykdd6HU+@UJsI;dGMk|les zy4`#R>=p0^-=mF|^o*)EYXa2HRbrRvgAP!`T=o?bX!bEp&&vCf^tE0++gJMfC9zwd zR-AR;Q7zWfXtH;&Z%PW%_XB2}pN;;OJFz_Sq29CS{^4*(Q}+v=SUY`>w7+nk@2hT0 z3lCd5qtn*r-eHAtF2AYYe{v_b{M|*|4*h2zi}Dx?c-1`HWGoibUATOB;an%c-2%04 zzOaiQXuwS5h+>o-X40m1P=ve_yiHDZfHfW71JwAR9BtnJ4%gC&Rk6~+m8)nw8RDVj zt)bKj-&fJ!yk-uB>NT#I7}_pW7jVNh4LyLIK+>s90{jt_8hiGqI17A7I4)2xvJab8 z4=-5geR93p(fzfm9eJOOXoVb?vWK9YTCEXQBn3{$lx*qCZI;1UxPEfHO+GagK0VLp zvAc4&?96^-k|-t;kK`s0BsgxAmTQGP{cNV8}uT$6yLoqpk_gjWQlDOJqOCd$A7K#97w z+F_-%h3^Q8v+;MtS_AgU1M~3u1>(HA1OI|C_Fki5`<9|_<*2MN^$cOoYU$;@t*}>u zm30}@u`@L;6n8ItdT{F+4I#y4A)a;E*M2kd@0byOB?h8qlE05xe*OF>2Ce@H0`y<~ zS)Ll)7!CCh-t6WBi)gN3K^2sXR3>_&S7Qw|YpR=4J7bzV+3~z`N6zgAOxgM1@oqq} z=$aN8UqR0*15uAVR4UqJd?{AmldU@OU~u8N9!0gT@{3V#0Op&QC{Gm&**EM?kg~F} zWbG}6W#};#O)DoiHOxjoPaPGe zY-m03AA!HTb}#)l>{tJgLXQWo?LfF?=D4BWgV^T=|I~kfdaE}5c2)k!Q_OP^$erNG zg7I)!ea@UM0DEE$A?eK&e$=;jERb1VZH$6<8>$uX|GN3FW%(s7zdv)o4$)t#qF>AM zYgv97i(kg#m$CR|EdHV2`pa1SG8Vs##V=#=%UJv}7Qc+ee>vWQd242$T4SuEX901|QNx#;t%(D#*lUJM7g zxqH7$JOU_iDodEj+AvlaDhWU)wdNFN|`J_nZYxmS@@C>GKV z3^I5MmA!-op@!N4B1Z^-e}l)^L4Nyk&|ig8n&4Bkde$8zfD5JYAW`^`32}T4IeUcq zhPeV{@VLcn6nvQU{##u4Y52mgFZi`Yzoh8bIryb2ereNR_TZPv{N+{r@{4}Cj=#L? zU-7}O5b9Ty`72KT1tt7KTmIvC5#s^8nvbo(+RHoz`k95`_3Fl!El?hr$|O!C(IV|a zm`>4p@8c@*Uq9b&u#p{RdQ*PlM_$BY&YU>iM|~c%GSNIqaw1_6K$5p&Uj1J#&*QH| z(oYe)_$#UWusln*3@PL+7}D=7GfM4DN{L$@+NHcE-$rF}Z+1o?XW}HQWP^|L8$hrl zN0$A9BXprfS!0zS>H~85qpiFbH<#AMbo%KAezfY`b>+%nOmRr?Yf!XY--!kli#e3i z8CszAq?4*y*3&Qcd*b z0cLczhrYW|Z%=jrajs{D^}>StUTVh#VK&A95Y;`aG?@-0{vmY56xB?ta)TS(CA2u#*W1aBte~;fj2gz&g~=?*%J#9lc9)7PgmH&I!K0Fze4s z3vm%hJHJ!m@~wT2(~-MMwy&6U#HA{yzr)++Qb4^y~lUq&lh{qyrQ z%GbVX4-P+DStC|y#6RYBLjsj8Q98B{cQuT2l>SJ%@wQ}he7n<`zH}wA=}*y_t3v|r z@;8w!0o_0f$Dyr|pI%i0^EBu@E7Wyi@Sk?tIde9Gc2S(UB^@21bj|FF|ARrq!xDC< zthXm9Fa<3y@vIfpG6{kb{RC-aY0}QDyZv%`Gat7de|tLQnDCK%ZzU4DJq!>8AA_xE zVPlbZAs@Ukhn^JAD$I+8_Y)aS>0YQ-N-9r=Q68rW;iHZKV3TE0tVd5@PYA2oyI z4fYfzdKT2|i^GBzxaWmf8ft~ZLG)^e%8RJy33{0?v0D4hUaB2O^hwDIAeOrl0g+Um z3HWV-ICxeJ!>+#+O~UCOB1;?k=;>?38(%j0^i8*Nto-KV4zGSN)Rw+&Kgx(B!Z0o& z3e#f$7klp+)l?g;i-w}1H>pyBfTDEiAP|aDLNGv^33A%fy29gPk7GF6!+io@w zlVA)%r_fv-(msdTw=YkK?5j;)WlKwYeDPWJ()G)gb|KlW2Xfxt42}r^#qSaFgZOSU z#^}`*5gqnn5AZ|3cG7mgfuizz0#n?$sJwR;nUBxs}HAY(kXVj zHx*TEz!h&Wwxj{TY((5=#E&64u1SxwLL1#;6ollfS!J|&n%XgrcWC{?pClCFxY}W) z>TV+?YFS_u`9lB6j^uj=`O|&0HSTB1tX>zaeM&-ZrD%1h#UaV!W&skAb-nrb^dej! zFm-@t+2vd-C&`_(i`*zj8}#3bGoxr<5f=L;`|*6zXH!&Cmkx2aU=?zQ@74MDLnRpEvP9jha{r~BOF z8X{+Iu@yg4(Ys-W;ts+$X3|bX%}c!?#T&PiftWbfZ=^l0bmK3`=|^+HQug&n`ig@m z=VO>ebGg$8Sab3;nJD)X=yh+sg=TZV>V!3yAyyihKZzOer!FB|YwA+Xn zdIq5EB6D&VFdGBTVXY2MOp@|MSEL|=l^pgw^-Lx)U4{|b-u%J%%8f>iEcve{_tUaE zWm%+bfKB3`v8)=86BwmjDkLNBs1;p=C|XhA;ryM1mlQ#!eT&rg@Occ#cMV0%_Dt<)B>E*pY|HdSEy8l2$cx{k zRR3TwGlui$1gN+p4LHtttUa`Bjo+-&C-QjXZOf{<5!ZxyDv|d|k>E&kPMYs@5jG(3VX!5nEu8x^E02E4KT<&+M;882fbDdz zRy~mA*Dt3vDZRG%Yt0|Afhx;$)0Tz4C)MELrB)>Sl})7_9&rZ zx<=sDy$-@zD%Dl9v$qx7yMh_lJ{u>P34rqe!>gQFV~D%+tXZ@__qV3+zg)h?S(Ycq z%f1!kDt%}ImQLxVCJ+E>og0X(`dCZvf%I4?UH$jv*1YezUX~Tf#bKsjI@KKH1lWeu zc`qMSQ`G3$?LkNK@Es!0vyRgZKaqTX+4KsNF}c=4|24h%=-)R5`rDr7-hI4zPHf?L zNAqjmLDgx02Kyya4q>1ZDgnn|B#Wf0Gzairi)!-;+^UrVKQw0QcCO|h_JTN<9Gw9+ zjgpZdBAi}iAhPfc)hngm2)u~CXIkQ~er&Q1QPj*zPodOx-T~ViEO*S&{|{~hl`Ipx zoHm>^z-)cKmU+uH-kL;-t(f*1#~yAh+U8r{bu7W3noRgoehu__2()s8s=6Q&(;;_N z&P{-fOES2wgo@U5b)F;Vr)|rI4G0|^k%qe>18IUblzu9v4q=l0+llHy+7A7`3*$+y zz`k#J_E|x0QITbo-8J!6a-KBumO>wbJb_%6j{C9SD7xD*3XQvtU;y+7*{c zj4-jTL0ag9aMvu5q5T*k{HpMwQ7Ye#+mLn}S|?k6cDn7_$dcXs^Cj6x5ZwSE;S~US zo7Ahgd2ZnJ!dwJ?03;pMJcRK*Y2!#caF!B1=cP@Xni(s~_2`?sK$?ir+{2MAo`hhy zC+iik`XATr>jjwZvRHOLWP+NwaUY2wKY37szH6cQs3mrRLNo{RO-?l5+Rle(n{wE! z6FT4Coj?7Y@TpS(WOs|2Nx-Kvc#z`|DWg*~dwOxZ@0;?1d&p_n}cbJTd8ANGqpT?LlL3jZ6EFJ5(88=Tt{D zw-wO2yV3j2|YXn$Nkzb+kp!7e8&YOBQ>gB$!!-B2-5-4KK!4)r$?b+~A@jZn+1(n60??t^`9jA=^s#aO@nf(Pm zW+MGoPhxx`(EvESX3XOqNSpN)YtvT_wlTYdr61z1gwDtaRFJgDnDu>NPwY|e0dYIQ zji2n@rx=A#o>Tj|Ykh99V_fyOWYiCKmTMtn1)g0HfMomD>-> zMZ4fl92V~>R;3^_Q4iml1-;-b%YW3A75iRRmh0VTTfHGRa66Dty)bJr5ix0aTmS#ADGCp8l;O4`T zv$)*{bNs;hLB{}cSq?w(tXjj8)XaHgY9;LoX#@S37N~=!zbTo#twfq3MhGj{HCXce zd3^ujz_BL_q9Ea2NSYfnVCJVvGb#ROeLCXZ8LrFKcdA{6{qIx3_p_^W&49WM0+0OB z0jAayGLwrJsnC8S2-&@PpeV%f2IsJ%q0YgTnMYf&XE>5|=9C3c1vWYK4D+oT8U1Kh zcIj$0Z?VUBkz5Tw=Jrit(?0eMi&D{XyBQq5BEEk-7P_9LG+6wdvw6x_oct*^RH-dG*|*HU_c$;I|%~LI?3AvwkQqpcgx{)8ocXJw;p#2%LxX zavPK}vOKCJO}U}h?OqI??QLXvUcB`D<&^{7TYy{72h1L3T8A<~bv@GTdJEXHv$w?= z`u7G+Sqjc76x=(z$fMmCxVwF@QQA&Id3;W`3wfYWJen3Iou7i}Oc_z<69Ral7Ye?8J@p!dD@E zCoj>~BK(76o|k!f%h)`Rn-Y_aF+XmAX0T02euaUNr_0t;wTRU2I|fgT>~q#ho3gQ8 zdh2IdHlZgG<|K4alqd$_LQEI$9uKZ}+|;TyRn+~J=6O8p*@;iIK12>c4_6~&lK^G# zE;p5N17*w#^7N=1qgx9%sw&$WTM#J2Rctx=R<>nW2%t4L4EJ#BFgWDNcw&AZI#uxz z(T4pG_O~TzJn8gj7t1#uvaDy+KWs0YhdfK)QoP`~_kzJ;b|gyGAXId!^1)~3BW(z6 zE~zgr1oHh6p15I{Si$}ZQL+8YAS$B(ltuZFhRh`36~rVJo%(@@e(UFq zjSoqGSc{7%v_?fKrZxtOMWtpcB|c5qHdP@o*$kpr({#L z0f2x{CFiZ<^sJSf1KT%IU4i`RliIlwA$q2Qd{!X31x61|$)Z{Zph7+5fqD$gd2~zJ z@cBfoGrY6^k!VhMQ2&KY=Yu2$$KV2l&le4D7n$dVIK1dN3{Rt32xnEVs*zLe&A(m= zHZqsf;2vx-w|i9unnUM3dx&PG{}pM**c@l=eQ2)GoEx_g{Hr*dN{@ z$~=?IrOqZ3b3t`#hw_Dl+Bi>~^|(xWkWBO_b9CmcY`3Uf!Fi8up(2(VOxI^pkL&iF zJ;KoEqBlD%fc@_T-@i@XT7NVjUx#~nsMXh>?)}h3`E;oqo5}8E?Gh7xGJh4>^6w-Z zc)#)fpDYL~_-PLG5Ir??SLW~zptA8H15jB!3;OTRe~D+nvSY^J)cFh2L@^d1M;@@U zy#pYZ|5rYMqlYVK(wiOH&}CTv+wFe@nf`~6?Ejt06Vyfq2QPVoII%uU3`}6?lKOtm zQ*Q|uXTiHi2W*;K!qRN`*@hK_Gry$`o@{7{>16I~`2DM0^2i+k)+Ul}I*%EhrQTBd zYOR@!s_OC8C&I4w#gp8XvUPpnQMPl;36iKK0%S@8;YE%(6Z&cO-9M0Y5hjC*afu zq|s1Rg`}RVkLES3s(g{cV_D630jJ)X2KjUNc-IRg^q{f$|?&AZW1U6tQI&M<`y2$(No zRi#{t6OTQSn1}_=8~F~dReja<@ESC~%Wy^4LbUnn=9J0Ixq5&2#<;N<@J+R{l_J{?19AE{4I7iS0S_3H8r+_tENYvg-c4S zX6|)+w}LN!VY_`?YZca%nThg-B>=EAEFKA=iBPYS6$gssetL}I>NhlJ_>{hPCuDo2 z%$XMW%630xzwuT5ShNE9b)MH4p7wf$?BHL$(d_th_!*iKER9SBWmaK3MLKh{VB zaXsE#O16=7{KId*$SYmWbN=V11(t8Qho8!x6CGkA-o5;kU8;KC5K7SkT&CyAZZBRV zPBMDnc}^-SO&?WQ0&?xA)LU6j#0=&>Wc&09Q~99wPDgX$^s?xt^0Jx4yDx9Dexw0d zpmcm3H)8%Ruspg@uMHPx{Tcvt)cmuVNo#CwZ^L4@{(_Wbb3L}`cpDRA3G})S*`*62 zVh~Qg{CghL4daB1cSQ;jSL{2^jI0;G&v(7>GI2(Nf1bHA$6vs>&`6_@G)$Gx%xHp4 z$r5pW=olzp^I3!OCk-#qo7-W029g!jtlF%SIO4#C`IZu(&zPClh$GZ=|3UU*i}&6{eSf<$7v=KS@#_o5-PhTszjax2Qubl-fGzLu zE=)|}00kS{eyrlp^gFYYegam?vtRulwa7KLHF%x}`-(|OfrKV#u2e0c3eA+_deM#4 z11b=#Cc9V0{D#q(R);H(?N;*l7Q@CAQ0)=mkqVe^0TA}x@fSdUqJClk4=7_Sslz03 z>sQ_ba+3O5(fE1#almSV=SA_#!|41gW3|PN&GAvtd_g~ckw*g81evm}48H{(>U7#L z!%+(=AIZ)=rbp1s=0U)qo*TB2gGDnxv6Iif-P`VB_T@EH-tC&PAu|r}=K^|STx4h` z@ICQ7D8E3q73FGYc<9Y*WbXEe3#ZOp6i{P1<#ulA$p_sVGM^TI?*f+TRt7eLl?~>r zx3c=pSbp&TvIRe;Y13Pg#}GE8r4Eb-S+VC(FPF0ejagM&*?+jf`VL<95YAzxnAbJ>(S~zz6XRff3eo@i!Wp$<-mf=~jyQPSn zw#<@$tXElA=r0kzm2|8v)uGJg+wfrNuiGp2z{vZRjk0v;xTGGHzp+rTqEEi252>Q2jUfd1BX@|k)V6J90tjQrk#2JI+W+MWIvSvbn*p)zdQcqmRK1sDljly(2^Tey>fPKZGl!|3_(VMb>(q%KcFb zuU}uuT?(=S5qdXYqMbnpDFeOw;TxGiurwv%pR|jFgu<8+=f6L5U9-PmK1U=RtQWat zW!K96d9|&)sMs}ogN)RjR4PL{LYc;0vl3A52FpDy9V9Vg^{Q%s?F=?Q zHfM)vWC9Q4Vw|8t3HZcfK&~&Y9dHEuCZq1MPPx7@*SkG5ptLb;FZTn--yur9N!&{K zP5Pa-%L(EC)2J5KgnOc29b+6~c)e4et5ofC*K1xwY8C*hOHq9ZQwAmxWESI}!j^~` zPHu>83T5s3-LPU4!Ak}E+9D6d9rW&k2Rep^z%u%u```c2FXG!%>CZtCgwuxkY?{*$W#eI zDyP^9ZcV^D%T70af<1FvL6_SisHP00vrX| zs3lTk2^-Mf^!w=&A}10vh=Ly3Z#C~XoR#xtN&t2%JJdrp4$q=hI&L6D;D%cLEHRS^ z-?7&EMUD#&GuT|RX^~z?TL7f>@eGbOQi&^Be!X#inBOPwe0d2Wz#MTwvDl{+IkO5W-(HTF$rO3C?GWJ$+@`nU2d758yiJL!9#kp#m8@q|^ zH*6?ax#B|>q@S8#?&Zl9$*0%>NQuIyg;TMsY@3R|Z3KXj0GtcnP~o+P)x)H2x~|*c zh1<}{&COaF@4M_@ZNI!t%WT6+!WT!tsz+xS6CLO1l?*+^HSCz?D@}wXX-)j9tZT}b zd6mFAol4d(qoQA)Imu1R+H=Y|4M@`i9NpMSs!2$3xZiNkoX~eA1hm+hS^C}BYsb4+ zUgji7)XQZLWLwE)`-jCCa8dy6`W)SV#4g>fP`jsV(Erw0GK9Pzv#XlS;QraJNc4wn zD6`y(nQ?QD7`?RIA6ZA8a*7PS!EHubm>*6;3c%$rlQJIz4V!fguWwM3eHapv6LHz~ zW%{;;-a$d8+}>;6Vcsd#0e$cFs3UJ$C>5}Fz1~dfKoei=jcZC;+XkK)4biwfQkmN_ zIPefGR|%K9I+G|q`A@qH)eMP4{BxW&cDF*4L%sp{>$w;oioX4ehxs?M6r~GZ*=EOF z{1zA4C@l#;1iS}2Gn4R=A)#XiQS*j6z3)HO)EoYCER^g4e<`rc))=;P_fMf-KjKGl zQ%~0o)Ka1I<1g;iT%4KjXv_0oc^$ZQai8~8+EF>L{ErcE9@S&stBeFl>D|FlEu64N zXE5HR9p{$z_jC(trwX3*6C4)3zNF>eX>Pz(Bb7++^B2$@YewSZ8_1dBHQ}4S`{Q#S z7QamtL>I4qGE=$rNO8a-g{u6R}CNUANBsB3|UMU@Xc{oD!P~w8|$Hw z{7M~bdO!~!}L(OmR=A}f>ec+=5QwRyge;?X2A*CSsc4EwzHgR%W5&@EgV ztP#}%&7_Qz5yqr2G|~LMc?M^4k3|s2`OkKR+D?K38Zq55M%U=oM$(LL5YX-`fx&Lr zafT4Wiagi+*6fb&{41R+W9>^0;C;+e=aNallh-khpZiVU>p%(BVZ-+T)I3o6V{>Vi z-$~DaK+p567qvMCIuF1modlj^QJ8L<$w30P^P8_$<}o;n&1k64$bckm^jo-r`^#6E z7e3tFWYb;HCAQh5;*PgN$O~OLaGL>rC}lZT>H9CkifJ$G;F~X!WPKg*`ygW>oHxM0 zlLpkafIaU#hT6^3ryddTv~ueOEIm07e<*K`X`?}3Pf}e>#zI_GcpFvXFqMhj1=-RSVRvf=vlYihrZ?l({vo-_jC1Z28P z;Q-PAevhLCx1zYy+&FcrEtS< zVO}*I?j%?k4D@NQ~A0+jKBMuSx0F92sQ;K?A3G_gpst;3FL3v ztH@KCwt4<7TMz85Je^E0uh*+ZEXfXXrZZwZWal&24(;4a~6k$uUZV=Q%3XT6Q6?9E`05a~Ww>y_GvBc%<5v_`fj8~w`R zTx>oaV|yi~UV@~K3?yQp^td`8NrI(7KnSgJD!L2LRoijOCvW3U&E#6^ttYsw`RKU$ zwC_nz^`B3npGtno$%4Jvn@0&j@8Dz6CqobRhDB%>fCT#PJni#Vt#{EEU%cq7tjlMy zM`^)RuX(vh`+dA|nMYo5>-o$s{9`DO*5LC1*o~hC0dE|qrN5_um9pB^BwCn6f?Jp> zZt0$iAwd0=A3&$785~X|G|n8R@;b~RD_o(9`=}_~dZ)&hdtC3+$zE$d>ulcDc8oB+?dM zufQy6YV;)JafAff5$LLSNUj`)wPY?|iZa-lZEmCCHDw|sE+Rd?9NYM#*bH1)yb}l7 z{pfIwbS8_LE;kUVLSK|~#fv|(Or z!4eo^;|#HHq{ICyJME`bo@<7Wb$nM?9x<2YKZt#de#q1owt4VPz8jwaB<0G4s~TM$ z++)!CcAfGAoM*$)#l{1qCq|LB-KaPC>35#)5E#S(qQV$#02D6AG61D$U>VsaE-=!q z;e*#u$_1c5*WfUYX2p5q0KBQOB5AA1CaDlilbv?mo0xeZeRQvH=-|D=gIUdto~28o zz`TatUyu-kg(|*~M3V(3$6olLW?+*#gKvHDE04MsZ*u|hI%KWw^?ZY2CCf67s(?*y zN`=NsNs`JxR?M8qCF1#~eY2_A2Xb%&=;yHZ87Z;Z>z>`rI-rAJOg9%Hr0OOc5x5`p z*cyst8vwmclHBzVG)kpx7^>FYRRBGnHJq`Ed&r^w)Z8w<5C9_xl}5_Aku4*E6*q+( zyZ2P?YQJxgwB_$Yv?*rI06`7?w3>^70af8f1D(H;okmvxC0X%6FlQc$Nm85Bea3z@ zyyNU^k43HrV$?MR%MsL`SmF?2{A&s5Oo^Ens z0e$GVcdhE?aQaX}E<+q`15DDO9Y{*feb`Pz(>;qs!25?k=0ky8sJdDEZ;qG}Z5{q4 z@P!g-Wa`L2V%z`g2fUm_ZAvA>RL9-7oSkl}XBHz({^?i$HI;H_53Xd+=xabb|+XCm? zMCBYVT>|}QT>U@#KQD#)bMMuRPpAz91*(fKCEnu7D0Q!9>0*`O^B<~(dI8fJ&){Agu(Jhg%ptIbvb^y>oNl;&)&Ky$^kY8-?8~$e3kxZ z+u&c;K^^tY9ash^1MjdBjr2ivi9wg&p?(2N6Ii|5(HitG7Ir!8G*Q}L5MZXK_mT-TxJo z-~S)_4*%b5kz!sk^pJS;F{t;3SF}iPhoVR%P5klnwRBF`VNOtm=L@9cWTGg~;K-pv z9;(X@=jUoc;O%;$$_@8hsiECk&@LRm@X6D&aA;Jm15r7+fBWD`N-*18Z*Y4s?p;3Z z#G@|2Ro1@*{+ozRxjQC(4II##1>h57YgSJ-;w*iATD`L zBk{I`fZ~*qBz(t;T0&6eh2u5}>U4UO_SS2#;=)@sg_yPNM$FJ(XM zpr7+1L7+l+?!)axZ|NAOH9Ln>cNO{jbW~B}1E4q|Pxz^8-(Oay3z~KBvIK z-DlMYiq^S=Fq{vhDyeot`PaxRSZiP+rE~j9Vp-Kn7X3L3`sIO!r)Lmf9UwP zqv!OIpIcB;H9`z}sM`5K!EcREu7H`)W`}fRt8q z;oS8&d|#NBcCt-Ckn(2^!}xa=eXm>=9M%g6yeHQ7uSV51w~y;!Rjdt$ z0}Yf6TYjJ1_tF}qsR5VBK}b?_Ni%(O`gs8!-9}QRuN2C}4Qk@FTy}&Pk4FX@ zAobG^8GjFV)IedAgTu5+x>sQNRjnIVrD+~?N3UCQi=pm@e^|m$f`8t( zwntu>{`gqG^!b#>=`TtfMbAycCFqJbaGZ=Yj2WZjv>Po%IDwpIFg4T5_h7O6VXb|c$(n69BSNcZEdr(TB}~5silvnY5ZGR1 zdw}>N2DFk$6$}mtDQ6+Dzd8^g0pvT4hh9i%f$v@SlzDfD%gg-KtMMy*Y>>9aj2_k) z;C+aI3=B`V&7;=8-}6_^Y`5VlhzQehUU_OhA(Dgnw!>yC%$|GaIa@Vu+%mR;Ot zJdbyE+1oQ^#R6;@F-z%~OK_15+>r%MoO(<99j7BI!|vI@4}lYPHMSG%yn9<2XInCHeJIKf8tPVAdodrOi%@?giob`NZ?ONbfL7=uHP8Cn( zywnMv=)H(e18)!=O~9S>LGc?Xmc#RVoL_Y`zVUmQnrgjf|10|q9Uw@&?PBFKmQDdp z7XzKe6ugK5gk;u*HBuz^w*}5`9G@N6X?^81ctJuz&PQy~VW$t-xRz9q3mz@7KSV|hs?16%Oy4+ka^Y+*PC7@xCY2dp11 z0W{2+wL`xTDefuXuLjERpHA)!G4OoABAfNpq;AkqpWH9PH0o+y3714RHyTLWOI#?c2mmg?6bZ{e#`y9#Dvn-oih(DGuXf}X`7r2m56838@y3YS(o;x%&zie8Nh2xMUl;7O^d?t5kM z_?rM0ow@<@WOQTWT+!a!;Ri+czb_a?dWij*8W1~V{+AKQ5BME`0kH|palIF0I!t}- zuSQKZ9c%W#m!-V5QPK)9Salt2JJo<8i=8+>dNA-1P>Tg{3q~geb$D8_@-;7nUJ3Q% z;J{@sGiXW8fl$tFUGFIp8x_YWERQ{@|Ve@#}^X3x-NLIRlTxt;*as@E=R~W`q zJYX5#V3E;`o8(<^=$WP8koo|+3edd%ne@4qS9;gm#+i5c{N=ZTou-`J$WYV7sEy z*;~gSBBq?8n2n8o(x2h}S&d03=^3#3#ewAjmhEx)I?kUli;V;B#%Je!1k!HQTgvRW z?rO`h?*CYiMCgtSbQ5@CuzQDZjgP^BoX{TzhI;_bh5aten$X(t{?>NW`H9Qe zMux~=XfV`UabT?7h4?JT^%hr%tAcE3*z4coKaq}yC~?*6Yqy~^K@0`mK`#Un(>)wWB51>@&|A@fYCymx{s1G0h_WT^ZBLTeuN zMK6r3)-<|T>MQ*|_C>Ij<^yPy(#sF3AHrHhhpa&B8&Es^d6v(gT%UvGUS^mq!Q9zpmb+t)YWf9A$`#Fcu*Tu@N#hWcS<6g%+^Jg??wFk(|{ z`tkk^--OEEC$p*#K8X{PeB98_oOvG1`tlv|qX=QEFPd{9zXvE4wt8$dd|GF5)6y<6 zagL-uDLrP5vmv4P+(ij*#a{D#Ng=_eUr@f19M*y3g{_K;wI(00g(l0jWb|`0kDtS< z9k}eXpHAcPN&~UW%coFWU3#ZcNd{VvF#cHnKh6PV^=<7hY;*Yo_#GYH>4>CM6l^~P5#n7i0~PV&B|u)14UgG8m5_&YcqRbAb5V?;Y)`TE3L;1 z9#zd=@~Ij#u)R3i`r6CZv5JpL+FMMv7S0T#fEL!_b6GDLVqJIz?Xc{x|y zE0wPGQCV~@D-m0LF1Jlhg8Q0s6?Qt$2yzf4BQiC0M zV+C3c;sZ0wyLrc8jSj$=<-BSqFz-e7LyC>k{D})}M@Ve#blHfnBR7$4@z`Bs6VBK+ z)=iHbV+jb5;u7jnAcRFF{e`WK5Y<@P2%qQIon` zXEUE@TEDv@au~9vX%io(UXjSkXSdA;BvcPk0=Rp;k0HXlz9CyD5C$Wv{3HGOwvJ^V zf5>7sf2S!W4;!d~JN&nIx}4vC0Qx{p^cLQDmC$4@)x;v%qw_`9@>IFIHA0-J{s z^V*zjymd=6`9V^kQU{DuS61Fbu{(Gt*8XEE@@6%5~b~|Y&mZAJOLLHRJfQiUit1umn>gT>s)NUs&J-ab;zTe8~bfFqS zO8o=4UbYZ)bglgarIU`)gaDH-U6I-!YyM!TX@$+y25eJ(NLt#t@USofs3bN%+nC^) z{ueZb2SR($(RCl4$o!bSk_hEg1-vgdNK|Em)B6E+_?E72s8VU1##7@idLu&%p+OZV z6%9I2G4o;u0rSyj_DR=Vl{E+b$&`4(hLR=+p5&NQ$Y4u3_fTn;Q3gNa${13ercRnj z--SO=Zki4Ia$(ZF&GvMf+L9x)xydI&)_~B6|KV37$&qGB=OxdmC{ACMGRGK|TY~i* z>@2zxo+-|-{-&fn>KvKcy*UlsvH=gbe3#>o!w3$t;8?;xEj0A z=%9eji9Q9Ttkzu5pr@UCtl@%8BnZt2Lr9`?;Q62=R}WyI6A(52H&r&w9jN)BG*!Tw z44^B~_?`cP$gQ+edTcv*zSbWw*(RGA|1h{v2K!@QU8mep@W?|jZ{#s9-B#{OncZ1} z`$JtpCJL-;i>ff(4NFI#Ffb$p@b=7UHvepHLpRJ^t#ophW1F!WY;e_J>iSdu(V`Fk zK0nStQ>*@ur;0 z+oK;%Z%jVb+WdWkn0H1}%*j^Bh%oV6WP?e;1bBVYsXCbFO7e1`|evLetqW11EE2$ zs@;Q9O|`55=3vM#t?`_<-O_EApF5wOvGkNr|8k9oso&gK3!uT_r$wyj`3ye4BExab zwOTh$D;aD`|F>K>U(bNbvKI+%rC41M;Y6$r9e`(iu8L5KsMZzm9n*L1qZ=yag|q8h zl!JOJ@!UJPa(r;xWIp7{OI=a= z{#sT1-SjmxPeS%tmLHXvA`I$*YEq&11~iJK!oWL@(QLdqZ(pr;y~OP6mwJb?e8xk2 zk6B`|0mM-h6o(if>d=G+2=XxoCZT06HW+GG?Mw5CB8i3+7cbq<=j0mAC0P0$q$zTb zj3K@gnT#KhW^e+^a~R|cjh}wNW`$I)gtOdvYqeZro^7v{lE;I|+um}E8TwiB z>sv>5G+laSllVvL&dBfWd40KoO*1WNB4G)QScG@gO9}nF8zy3(q!GZmd#oB5ymKUB z=^0_TL$hdONFdD+h^xIA5Zu4-(9?m>x0O*4P&M5bR@EqXv$vtR#ok?1F~(TVTUWj| zL}&@t5Kb!`=)xVH&?TA z{8sz4JuJ%e`T^pet#^=X4G28;%w8n5J+K%7B_+NgqE4*x{n0=33Xs!K!AKm71$(3c zUS%YP9Mcb2nusy5l6q^HcWfDLU=tV~_|m=U8k54K^tId}^I$efSl@8+G^{H# zvEvlE0~;6-TKXrfHvAEHob1paHdg7dTk=KgMoml;6S>GyXp?#Jks3`uP#m6laYE~r zfa3FwI&sq!b~&uNgly}eChFXM)h||qJmY~YQQb+yJ9_6(Co2a3 z@k##w7lZ2`hSb05`Tx^YX7&6FDg?#?9-`j!#-?UhuL~QKskfpyzo&X%Il^+#K*f(+-Sh(qAF9fika2rwh+b zU9k9o%|9~zX*3ha5_f;IJ5&F-*y+pl22z#^5v&4CD_TIcAcQw>{0a5AdSiKQaA$Yp zZuja0{P#DYW}1s!&$DsU*aHs7WuV1#b0Ie!pNLQ;d6>3DoApjiKhrNN@xGTVrJF11 z9{WQe{L{8#)q2ume z%g_CrA7t84RN4JLHk$8|BAVMeY|KyU#83G~waR{LBFlf=m%whO%8q^sZgt}>5p*%=*%Lf z3C>BXbAbAmW4#y{pAh4Vmt9doH|8 zaQ*Gkf%S~Ak2K-+Q?buk;wfv%Mka%7=*)KT7@lN_P7MMMT*|HZo*V5RH%MV`v|s+I zPV|p(9Og>Gp0VN!r%xDFGJ0Su_O8(kw!etBT(NhuvW_|2oVOO$E3n~gC z^o(G7go)>jVF>^3U(h6mvP6GFU}W`*!W(<9= z`B#0bfsZ-Ci*uj!^qtTcl*uST>{wRFGaNI(H~&HOpvQe8VH;!;&V@YwvXN#u0bnLt z=T5G?v69JHDQP|SGdIUGP+Wk61W@2XkVgn3GH(49sjXYP<0Oz`A_aX9ukh6zKVP>p zv;#Wd>fp<1Iq%q-)Es-opz>*F{F=_SMN5A6sS8Z`1eS^8F-9r$e=QBO0HdCUtp|{{22dRH)VB+U z>QIq2Zj@H}t2O|{oS}}%in1CS+PC?1JHbl$d&oKbwl_o=wa%`}c-*`MBUxe& zGz{e#JtBWWhibORVAOLH>stg#56X42Lo$IhUQh;4?YYZ2$Zg&|q&H;h7^UU2#7S14uOTBATNs5f6&z%H}bPs@8MUM1XXX3rN!! zyhWf+$+0DbmOR1>+=mCp0BERY{~i#QT*RZ=WYg-MQH!r&$`?ik~MPLcXXu3Ll1w_(NK#+3=l!@xnD37#X?ZgKCZ8GzS6ha zrJR&8{-R}-xh_Jd+dSf^MpjFzw=Pt^3uTU41vg77(w@H@eDS<37g`rABJ~+tqw85e z!aP~OeOFm_%~l)PllvT}eKVM@m?(&P47rejpI-^;AQlnKfWgptP<&?2)73E`8-m?L z@#3jeiSvH=(Jsz3R;2kln7ixKC_zj8a(6a0zfSYvbAV5%0MRZy+5tEOin zMlLmY?%sj5LM>W%s?1(;uifIhWuqeolzEpQAl^WCG3nv#1OObXUnW<#voOzZ#R!Rp zj2M>QeT>aM+JEIj(*5Zn{gnzbViBN~)Ph|AQ677%uvXC?=h6x)Wohri;^pa~FRY4c z64;3yiqFF3o7xY5$ex%E;?w%HMl4GEE0G+95t!24>s{9#ExGm4w*pg<){)O2VhAJZ zpSRVj%M|ceysl6lt=i)95wzB__|xd88Sdl>`)bjt%Eqj(Maqvl<|6@kckA_e;fKW! zlSX#m-iR1_!cm{uLl=!CYt&Yn+=03l-w!laq#4{OLD09bGdxu`y6_=}(CNNSxsb#t-P`>tn@QRcK42U&Ws6X?(_~+$wB?I0mJ0U4bu~B z8%8Rqy=|x_(U(bnniMbysC*M@D1xO8?%9i61kpzeGOuyGkxHsXfzw%1hgd?eg8cLFa!R-6)MoE4yN7lufy4$}UFWv6_-q`g% z&Of8XgzBNdv)}ZX)v=4s@CRDBZ-ylM^_sDVj{QUpcn?;JdViiJSTR_M639LAj%pvW z$1I|Mdueo^rTe&VS^1zdHa=yVK8aL09l|*;%Q`ejzYf3QeC--V+q9! zWF{wL_fu6WL*+-#4FoUPD@1Gn1vK^w4flO0e!BZKdy#P~)g7J|RZ@`SCR90k0yWKd zmbAXt5NjAT-9B>Kw|~l@(9qJcXuE}#@g79)u^Sxq1YrMsJ4aPKP{H0C-SXEM8z$@a zTpv%ViqMkgOUgoA&Hn;AT2Iy}>LI+UnM!bNKPNk(RM1LO1W+X^+uk@T-If=eelhxT z&-26CZ!AR&7hk0ves@QgpBlpwoTnR7Eh~u|ZsG+>WUlc9*yY(;Xvke^9+%o=Y;x)| zdbjH~WsXZnhP8!p?}!IK4Uk@Wi_ioEf?2Am)B49%aR3bzHRhDpm5{ljS{rn}598OP&d4r9K4u(vhOIV!);Kfkt1{Elszv$N_F;(UBz3}**uL7eIe zEaidg;XRDI0|%c%l5R7{-nQD=*%|8YKVZvUztS^I+lq7TjS-pHW9w`7Het_z0UI%p z1NIY#iGp@{_wqj5We&C~+jBjCyx#bJU43}Sz_~1Q+t*{m5moh#jp1|n?{yG@)dKDy z462K5fSQtpNgdx(z4d{uT@QW9hkAh^+mFqi_t7*0-+JO!Gg3~>!c?u@Jp>4X1oE3J zmCT0X2KGPV=zYv8$6@Ko@Y+y{78O5^rqX0i=!PEIGbeI@_XuC#sir4)I~t~Eo{(wX zg+zv&?fq)f_~n7M*8)?*Ixe()i4D;H{E2rjz4sf6eDs~vGKsFM;K;ZY^{Mt{j|~!L zvMaF-Qt2DL)`cHNt_GvKZ9geZ11Zji?NOnx51EfQRe8vjWcgp4Jbn{J-bOUeeYdaY zM9c13<_=c8n)dmA;d!CXpl26vM4Um0N5--G!P~TBx0celDyKME$KTvF<=NPaPWxtq z=-pPh*L{x;%K1h}CPMwgAr6cwT!WW%YV1bR-KjBwwtFVNv?XN+)f(mb_}CJX4rjkS z9dY+Te1CMYkBdEhxT+d>+U1Btv9BzLVu~BQcNF>bFU;2{p z36KHqcCDR|s>G}shXuPayZfbSGQFE!qa6-z=U#*Puwlz518jp(*-5d6haR zHSw`xgj}%yzFfPEd1J)}5rlKdoT}^q;r?biDhtmNlpPTAOaHKF?}s(N=KA7tul5mt z>2UAf%)>*w?FB!qQ$%ab6#0lSFS&_(cSF>3Z45K+dgo^h-HL)u{mQS4kMqa7%NVIl zbIeTc9#ka@mUFE{0`2Up!b00xlA*v-@o#(CF-BCo%#rcz<4y0Q=Vcyqaiy<~AWM>M zS8iO}0KR*6R0YqcZcb|%%ul^})n3TD(e{S$jPO2<4`27`bgPd!OAuX7o;f(|jbyxP zp`Qd-q_IwYotEj6HTR4cKzTV9kssW1A zZ677+9d+vNGk>dQ!5K52mGW5lsw|gN#`@i>4r z;Wf#8mVvo)pzb_M_1fK3n-_aeo}S*9RdfCT`GxW`U}*8w>c%RZBaB%P=2tVGG3S>6}r;X^>zmFyq*YR{yakd`twHl&Y zKMKC1MBgmE@MybQq_5(cGdvAtTwR#8tYb_WtPoW`?H+6IAmM)dqRqK`aYl3O14dIA zVJkang;l8yjK)am8w=q* z_0z$RQzBG^_3hWwh;L-CVP19waNj~Mya<2u`9#%Ha~Dapmlzqh;|Vux%ZW; zBi`icDxeFoAE$mEVEB&a-_=C*Rd)7ldyq zh{_aIAGl<%d{NFfal7{Qi-fNM6UPJ;z3VYT0|h6;`qX*SU*DwM)feI;^Y;4TOuej! zNsMi205%SS#vK9H*(@fCn8o$X(`^~f1NL27wVQ(}dQzuaSt_XwudF=$6U+qojZ3zu zo-jW*edu|O={;VDsPlVK4&VKStjZeQ~py9rp581M5zW=KHd_ulJsk zrkjZu$7oaVI+1}oqK$nInle>RTy-TseKk&juJ^GoQ#wrUWs1Q&XSw2?>-TPT3|oJG zg{J9Y)ak_5No7KphsM##?f#Qf}1a}T1rFax^>D;AZb z)1G~uH*UMn&UBoa?mta7Pb(Mr7Pv3a zY>&W!no#^Mp0jCO!fGh$DUx8nTQYq&)x)&l@uErfmjv;9L%BL?*_Q0K+kRaqT%m#j zR%~rGp1DF79GNQbZouM`gHJc=fRLgZ;1eMN95hlb$ZK$f`!US#hOBU4SZgtyDRoYOGC}!W4PK%_ zPoys@YIyzhh~tgI>vtS6c+^Y+Gy^JqtG!>H#8iW+OohAYQB zR~Rh>+;C7XGL^=TfGqT`p%Db1&L^)ln0wj1%%a#c#V4Ry|HYDmGURkTnjb25SVBC1 zL^u!Bej9MiUb|g}Yt=C9t)+=PuZTCMz`XiF^zAoEi^n#2WVjoQ0|#<@7S$fE`Und5 zw678L6Ic9(06m4KBkhAR^NP@D%^^0HPzSo3@vM}dfe!WuuzvA-S=MP&YjhhN5qkxr&w8x`6a~82nJZk zD*)hR^D85~Xw)hCC58?)r=o!}#e+qCPHYJpz~mXFekkA)-fkqbewq(ef=(H=k9p!*7`KQeOv4ZM}2t>&o`}mL<`itelF@0 zZ_y@L!tIWP;MD^k?H$r;@3fqBH8*!Zre4_Fwa4(ula7%_)(8gF<$TVvXv2zc&ndQoFEUp{pT%J zq{&`rG*zyZNX~<%ttA1Tl=TUC+}idpIHMV{Oq*M%)sQ*vE{3~QsFGbw^Q(HgzUivJ z*m3^)RH@FgO&`qE(jr|aKT{F?m14)3_TxOvP57?#C2N5nVGVad4=n zQ%3ymTT7o~2o4&UoBtf5P0G+jx{*Ngz=R?Oh}o=bi0k&Vx39UxDcs>S;_f5dCzpFa z#SE}$$jT~H8(9L4<#CcROe|KRo#mVcAQ&EJrkCc1QzwgVp4Uz;x;c5*+5ASASWT60 z$)-BhS^#`d$8cT2a9|8MYkV;ch}HA5?w|tCeDS zV#CvffjUcmo2pv_UaATSZ+LchD9$|L)6uB*PBj>|dsADDq+Ssd-Xe_CTO%Ef>gS0( zwoiO;lJ$YV6V_i$J-kZb+&}$6{?y4v_$gI-8Wbh*;J@5C4=bZ%mQMt7l(_ao4>8C8 z$AACN?2qR7#5%@+5?T7xlQ|S5e`jWD;y3z)t*gxkgFBLxyQ>U&*Id1`W~CG16E}e& z+c?3^lymDj*Fm%(J~(G`(;&w@SN^1P(9u}6OE-rv72vN&sJCR*4CbBG^G<9!j0_>= zs!W_?+GEA?K09F^#6-g@L>r1Oe~ZrV_6#h$D!iY%kS^Kx-rFQ2%w2GHWZV(BIBjpJ z=-~ZR!6X57Lp&eLLFswAne^rj&gSA10hgaAOwXU6ejOpc1Esj$C7XSLsSSVZ=Viat z6xSs$A%sud&L7*?XGH2cs2i->$}1i6@W_JEqx2PP%xZ3Hm4UctWdS=87LLzAQP2}y z2jZV4JQv=W_Tnb9^m^5y%dwk%Cy$8KOH%+>$=m^=GK%~hm4&KChmb_n)dq?8y5+v* z^xllH5`B~3Ix(JtvvO__NyXi15Ui{@8nLeC7=q{Hq6m;_?Y8|q91MHi4h1MTFJ1mX zclYA_M=S59+u^(Fx6gCLJFZrHyuvZ^qy1;DD7XQ&_6B1!Q{9cbtYfEu>*ASMQPOVt z!d1QZ8XR)xC2bh$;+pbqm>T@Ll0$JNjmf&Hw=!&BT<$tmY(kGw z(iOf<`!qT|G;vfcG>Ld>Me6Gr1FYdD?a8*Zcedwp~jFc4?`7Q_@v7K9T&m)Q-DGqNc z?D3nLJk`9{c?^_Oh@-%N1PpI^e0kW^nCb?dy4qkXeSY&@@mFh_4?fPh7m{VM zHFoB$%2#;-jv-WDqt6^E5b!=2RH4=}ENYp$m^P=h{G8mGEfw2H9SZ9rSt=Pr>Gx9AdtpdouMQyL$X%EJw% zn!gikO<;JxMTeiTv>d7BiK`fpmBNO+(9}1pmyiR;`}kt9(lpC5P!MQcY068>k75T8 z$f?D5$>uOm8;A+%#!75tBs?58l;rWY{<^pWLHGnnJzU2lX=ZdBt>dmUPkbytFR}&8?lRD9Z!2hL5VQ2y4?*=^O-fqVjORRP$*yhf;#?i?fGOd%|{pq989mT^N5@>HlXF4USz1_>PUY8@4#7euAy$E|*7GsBs+>c-YJYuqaB=I~u& zRis?{yUZ~MES4ILqkP=MeGqI5%NCh@*SB~VK(^|zM=@DL;10x-b~BO32Sow12+9^a zAqr^ps89mOTJ^wwkbT|~B5`vU)VfgWx;Pa%>B;W5o>~O*@&UKm8zdKzka=S_rzr!+ z+~1gH9Vb~dUD7XBS{Nk7SI6vL%V%*sV!zqrz8id??roU8qLQ38Va`0?)$^4kuCAMm z-OKRYV{i%RfqU-{B){5DW5z^@p3v?)w&HloHJ_V34Db0QACx9UeuB(KSE0Hp2T|1~ z$Ex7vz2R9Y`s6z41lSw>V;=(TOdeiWHK~!3Yw+=Me+fXFbSQfS46pkFEa=LT!vXKQ zLW>>Den|%?0P`|N_7hV!3NF3ahu?)RequW!BEp1&5aqPVSjP;v&A)}F&cprj%b}_7 zluKaLNgc$E@cM&vufORYHmPc5Cm!!DW^AhCQ#W=rTdCRB(y$+C~B)a5pupJ@;rj< zpw8qTm_L&T{EP}?seU9$3&TCkp!;CcW*32oajAUa!<~Vz}un>F?JCB zm&5QK6Tx(oBzT0I)EeI={4C+b=e(&ni>GS5n!m02Vp_{kZiMcRGCajqIZsPM?E5t~v zZ*|B&@0U)RIo&1|qGr^$>y5!K?`hr-uM0&k9o$03S%S6Q188vfv56>)1R+wHH-A2L zKBPq6TC~>8S$_D*8^f33!E3r*{qLwUGheTG$h-bJh|jHf7_uM+rr{`GX|=(uqkzkD z9~!bt;{{Gzoe92tQWpy^3~|_O6T)h?>xK&fZoQi1k&g_j2>D#J(7wKg*P)U zFfAIjKg-LvGnprAcy>+7HN5nho|HM?$#>E*_vI(ma>2_?M|j<35J=PGmk`i?B&>l+ z%RS3AQd_QPPiNT&YYJculo`CPfVl^Y zW~ZVk22jOY;P*>Ioy2au{SpG5mp6@_%67+$4eNDHr!Wg^`W?dYM6eSYZ29aNI}hAV zHnegnm280<%Q;v5C!7u??I%E->?SjE`1$k6S&%cQ+BW;W zlH951G1jL9GRc}|(WvFEH5f1<$UG1CE+JN^%Rk3G(6uKsHB3_T{4Tx8ZON3>LhQXD zEx)UZsnb)jmm_HCncO5XTY<&zA%UqXLf15gc&h{q1Uw+bNtnJDQ+r;L9iHze&~k;S zA}ndzdiF)?-q_6uuPoXi8GR3c;8XrZIA3*h+x?4rT}FJ?Mk`%~a`w3B@$(*buK%_M zP>rtiTbNpQPSY8~)FSH_nmkzVx6QeQBC+zr#L}kD6*Wb!mzYHCJwT1!Gtiit$5a6PU+9L~9O#!lbIhBfHBwziwJ z3cGheVHHc7>hCu&ahd5_E$R}pDl_dR`?)SI@5(O3w0kyUDq?MK8GnDVF@j3$#F0bi z$LE>*+!V@-s@B>PDp$sMq*!gM&;H^rY7mkg@%rqc+9+wHg0GhBGRc+7xEBpMIRBtz z?Evn6k$^{Ombs;S>Meh{ue&ofKQZ=hn6A4!xG;G!6B>R-_CLs80~CxbeG5a(lc@%d zSK3JRCLjuED~`?=z1CHXX()>QfRh}SIaAgkc{WOI-`Y=lz&KUXO{r58X4F@M6{3BW zR<7ai_vC)G5WZw&V~5zHc>d(jmWTC)uAHsBYv>+3IA>JKVJ6UuvBQ=M}bv@jEiW> zHP1E_^lT~Y<23F*NhsRvEOIkNSQe#l8Zfc}Cqj{(#u9stJq~i|QVg@ocBotQ3*nhb zmyz+p%gymhHTnjgQTp{7hIgk-kAOYZ%Gf%H0cxA>6kh>rG(m5i+dK9URuQ@r3)w9x zM=oRcjMbkUR2@w9GWneg2@t`HFb{(BdlMPXD)+#)Rl2;^yT#JeYI{SloX_j@-Ifov z*O#TQNRiY!ja~H&uE*r##=~Mn(d9&OL$`(sO{)Xl*1GnRoP4@oPv_@v^HpVSI(5fg zdMJpE2G&cVP0z+#qoxp$P7H^MNHh02j<&2bD%xA9WqJxh3I!@1)i!fINA*PYt zBV`^_zya$@A3)y6Dw(lU3Ezq?^cZ!!H&uVB)2^5bk1|NRw8dcAfgfQg5Ayf`gK;2% zf)f7O?XB=X;zHs zMPMUJ8{o8$GsF%o-jOspgMpgB*H=+^x?BO_$8wcPhxeX7H_P`7_d7Xj^6&-S2|c-U z_q>O6zu@{;l%x^QSc8O+Zz6UjjWoeed|cRJAYOYbV+G_SkS+yDl?@ z3Q$r3Qz)xXaieD0@8aSC#D#3{gV~_2lF`i$Hf~}mJzr+cyE+(0Q4Fg`K3K>{Irs%qi_ybkvaYm!H8jHga_UE>==+2@J5j83d&*T;NBm27oG zdq3$Q0h0o@NdcjP#Lu!~N0vB}NvlWkJ(|+3rImDDh1?TdvBRXE_e%)%*Dn;q3y1w3 z?$)osb;?of17U9O`u^3R94NPO92_xBv}2mYbZ^5q%qUFCpr3BPp{m16traOB+JrfL z?l2MoRRTW;wBEh}MtvA)3`X&r z7zwGs{GOX51%r~n=~BrnuPfv&C+KM|6B^f$g?tOi^FD-~Q!Q~*5cvgiSt%W7t@y02 zLY-e$N`A(9H09D~%0MaQ_pIpdt7$B?UJ9nT1X zEV-BaGhQ;z+n(VTP7HlOx>aMutFBknKYYcl>66@FA6-L0LXutN3XrWA+0mTN_>e>kQza@!t6P}X>kF(ODpSP&k7 zy6lfQdNlFXD5wjt8qwMuzg3F6^rXINg5h1$cRb(?shTJnk}m#m!q} zGQ+81keRFLRkM{WuEu>+(@tBgTs=FEeVD+>4j@`yVD9z)w8(WYv&o=p^Qt^Oo8$nb zZIpqB@4>BtQo&Mw!z1rJhyM~^-ZHzK#S-ERZVAzAwuG>;o-HN*#9wBj1Pf?FvGux) zHciaKb0-8pI0I70e+m}=6+roa!grQI?~~|-*T{J;P__Jxt+0%FOUbg@d6<4nh>1DS z9&|>EL0J5H=NifniU0C12YLU$4(BgJ^51uFf%c{E4|r-pQI3r)Jb6S6y)e80Hs!w- zW9e^5xNRZh-d-}{~u^_MRH=Qin=F8?2% zdi|x#{|S5cFJ1osgI)Ng%YW(e?`Ze`U-z;9E93j6%m0}!XS`$XV+7D2u^)k!M`?iY z=#rloz5q~|SNsI3$5fy-wQI<|_<*~Gy5%RE5slS*KY2C4Vi3Is^rLYuA)@VamJqdo zGd}2Bl(Vd>fP>bC9eWHvb}sw&{v>u5@hbo(hvQ!Z2A^*z3aHz(v0vPRTCf;Hu{?;n zUsET{SPuwhs0BtNhb0D>l&{12a_@fr%}0^J)dOAvD%0MRkPH;#j_ zd3T>BIGXVsIKhd?G=rJ|(Al5@>g@*G7BMFz+fj9XosF=!5(vwF96Wg0A4keoNWxho zhmMScTcSi3p8a&wR;bb*?j+3~hNeHZLktk0H)-YJrnk^R-X9g{P7#-~oL+h!80zZ* z?6X;i{t=Dsug`Y;CA#1LS@Tpu3kYK=Q|C_ypQE84I1x5jO@ELdEN)l0o*Ov4>jSTP z2*-87Fr_CuHA3$osVzh<)YsPxu(Q_bqF~jFN&R&(J2JXP9~s1geyB4naex%(knLGJ zcLTDZ;h(;*wY!-6sYc`Jq>@Hh)RjEK7Q}Nx6di!P*6skvYyv}d330-*jKnw(&|KPk zRr5~O!QR>TavuO*o#wp}YBHj#zyi?_>kf`#Me@VXUcznw{lPMzs#-ib+QjeQxwjm; z5|SttWi%M)H?dGOo{G3hf@5*rn-=F{>)=3_mo7b`sumULd&|fxJkPyna;al36eb1j z(gmXM`#Gp`KUDavg({veAzlz*KtDlu9;Hx~s%qnS7MYD=n`2wb8zYk7(!oXXB;r#P z<05Gev{}|e@F(1#elH?s%R!fx5K6&J&kbx03-|R-5DchjDHott18_1Opo%_(`tFb4 zzpn`QS3=+mQvc!IapOUxHsS|V!5dQB5~2L%9257X-M?vgAs7lqd~4h#cjZ0U1J@foP2-c zG%M*AiiOk4#!nO9ns(!76iODa2QA=#y6G%bSqEB`P>Vb83&&fb1xcJaY8blhc?| zf_A*v|MzY6$M)LsT0w$Q(MXxP%Q`lYaB}CDmRXHMgBJ>e+VoT1o&~1dzWZR+g9|>| zA~o8{4l1^K-Ex#hhUZ9mSaN~t z2;V0FktVMK0w%LOQP^b(J=zflQCEQFiug)bMm^H}?|Vc?uk({QV`-XBQ9PWEfvbWc(z)6Bh+j=jja)n35&aYUoK>cqv`LmS@S zzpZ#iXCzS)WXO1o88onVsC$6!83G#VA~+w1I5eTo0|y`OW?;fx;B5 zi+Sv&ic3TOwQqr1Z!cAcKI13~ECCYOIQmHH907-%m0|$pw@PIR!HP1KIN^cnx`>u# zI`VqrSQ^%Ywd^%8AE-RnTS9=H17!Kfr|VN%&#|M9qYCzN{nbYxyDlF4cOGESMx>&r zx|uboJWG}=o*F$`B$>eWqx0lI*UR@`W`TB|Dbg3+?G8hkd&AOH2a!*d=Pdtx?tDp! z_r|B934}_?=OBIH~wlXyYPWV?N7r3si1%iEU-z>kQIRH znh5mqE9$o$@cSerxP6Zuiu@W9pV(?;NwxH&1bURj=}*6ys8`WnL^7pWn09Xs@5ZMB zG|~h4*EVxr1{2=cLpF8~6{1}C%jKE$1vH38Myp2x-sipG3GfNfB*+9_K(Y|g2t|&^ zPn#1rg2VQtz^Q#=uV_l*yA|8d!UdYRoi%MWruv7VGR?=}pz2 zsCeEvK;oiyB4_4S2fVI{R!ZMThk2;B6`Z-b#^y5!uYG%7*)u#wYN16w^?#f2Xu!2y zw|WpRy$sqgPK~8ER$E{UGIpJcbI}ydv^IUxd!ch4tkVydCBu@1G!lFd_iuZk{A(s= z0=MWyEX3nelqw?_de$4BCc~av56X(af2Gk1h5w z2ggGCJUhk`AT>ay0IK;nFClCp`YHHfE4O?{i#o$J;%;kLY#>iK=ugx_5VE8KQBCi zD+%}8D)YjfRmc?5e@AN}w)o41o)<-gJaE?(2nggR)FjwRX=28#D5@+gC>ljYv4z_X z290qC=(}IQws$m-G+I0^dVJ{#a_;3U@vqLILFC^i(e1xBi;}>o{kSw2f8Hu?8Um0F z)RGFyUuw4e1Cb`gikIi%|!HGw|NIIe4ccBIvRj1}7t{Uwv_rA5~V z$GQIZSm6J(i9dj5?3%B=X=Wu&Mc+HWe^qR}4iUl88cBaeTh~b2%nDw_vppIBp@wLz zxzl-@7b9+gI#P44^iAw!mM#;`+SbOpFc2@XtMY74$MzPfU@d>cfxR;CN_@hlBP6iDly@0VTIW++c2@`(-)n|s>q`eiUwC!T;#{a^b=15DIwseZM{W- z4bSfZWIa>XGo(u5w8AKUZfrB`Blm4zoWo0@luYa;&r43vpGbsx!h&{DpyX&Xe!d%` z?T0Ednu#L=GcXekZ!Z>=`>ILMT|h@I_CX_-Khwj0?a~J?RU?7 zXHDm9PHKYerdAjo{zN-LXNjzl{}glEy-YH%!N8gc0`29ECkKa2VWB z#xIhB7|F9fNkKah;D?i@MW1b%m_zrp)E+1}x9~=XE>Rz$6uPH3OiSZ10gU1v=MgZs zJgiVzKqg|E@x6py(f2AfIm4IuBm$z=v3gX}&kH|)w!PHbW`syVe1=9UK?td{gs27r zux2&bXqw0cQ8c`(NK~$cz8{2EmJkELz54FCCL07i zMWOel9askl2)HOr2i%ZijgET@>!9bm#; zfeUvo9HP{O?=e2oTBPcUj`9oW)DrVMl}iq@5VDlcX(33aqC-p!LZ zb_wifH_PVlOu)ZT)8nfD;9ZdsM5;akNH{0(#S6@Uh|fF!CgMMTNbfSBqCY8-=7Dg_ zb!*!E;r`2gYy5KyB{mIn2`U#seR%`*B0+g;df6us_GZ3V%tXTbiJ2jwFed?7MHb0tPUwgtQtYQ$!iZR`7@e%n}@|mEQVsP)Qva z#4K9+kD-i|fZ%!?Q0#25C7XgVuklp9#)W;P0BC*_peYS)^VVbhvxsNGW&}9%CXH_? zB!3L*Gl05w5FTrO_~Q}F|1LwKR-}ak#9$PC=PlBrmb{0#JAMNA$J;w-mTT4{g61G);IWaT;mM5{P&~uI05pv6UmCTuR=2e05fhoQ3ksf zEL19v1pttf%HDZUxi(Oh4c^sjOzrkD^G7o43&w#Jxf5sLt!KeaR7Jdm43QaVC%P!3^^?v?rbeJ@?2`);m`AFJcnAGivdxpGC#v86&0qL_Ot5u zs>N)CezF+7&I=!*0i`+Mq9%bcUv>IN!i)qLVo<=+C@)(YU=8{^Mu-7GleeJ8+7;Lc z1MsvTyeUtHD|~{j1iu5voqS&9WvWNbp|}MM27>o6XvT~G-2v!^E1_u!2D-I?#8$py zAZS7u2e6auvdhH;9zlzKwYw3XSqv8`_0<#mW5C?zwgTK{>*DRdEzfUFOsnF-X@aOF zgaeM9MR+s@G{&FKWCR++ED{hm0jV=unCFE=Q60yZ*JtGY-!(P>w)kS;GcC19x;zOQ z^-6Vf{`sR=0u`NHw}j~DX@9jNsdNW>%O66^GGZ5^h5%XyB*_l5c&i|DS23n15=Mgq zKZ*3=<@mv4AboPWao(Bjr1eF$k=gRQG_|R3`9l~{RhVgoZ_wPB0_$^n_)HnE@2o_2>-1<9(;&xj`)4LY%gRz;S5j+laGa#xGdffvnvu z6ay%lZR}dMmkR6?)!89IEr3rjaf^%gY!10BLenx`Mc{&@5@Ti#$YRk(#XsJqu@pcr zALL*_aWo#q-e@pW3)I#%483Csaf9&nPN|X#tTNYyOd3K>k~&<#TDWPo{G*Ncum0ID z>-i&I_{)0!vYx-J=eggz`2~Mj&p#~AFYEcwn_&Odtm!Z7`OA9N{uYt`iameDp8vOE z&nX;ZCtuoOuX`XFCv z0P8yuU=6Jw{x&V3KSdIUVaxmM(4dJ0fEWCQi?n{f=GyT7yM#Idz9Wh?8m~H$u*2otYp-}o-LJ4XoNOFb;Oro4PO%%IS8L|3~3}Q9uo+S+ZF;?Ty57v$sk3mlR955 zHF*di1*e&bfAdq8VYlCYelAfMjrs!RPJ!EildlBw?AcP$uTXs3AZ`>7ECgPa2xb%m zDsB>GT$C%YDs48eom2-4O9~1e_U5dL)<1u;ZDb|WzyQD;LSPRaQA4rTplO89)ajs9 zv7Bj}E7D{2sjZ)rK-f>&2HH8X5V(_=zzt2+Koh&A^IJZYbJpFvzv}Ve z!3O@`b|gz_Gblln?q3Zs9y_qip&eNPA7laoq|cGBPd^^`=J*jG0FG74%_S_%mn5pc)q_peSTOfE!mAY)`yrBa?z+yh8mjC2x_`4;WB58R;el zc5X2WR?+(Dy_?us7v4bsaCC$Ma9SdM#t=2h1MthX`F}8JdgMlb$&CyLdL!d_iOe$w zW};ZLg&q97(sE?HF9(WFFSeuCIEL0r=s65HO!03tlKNb3?oi%3;&^26PhQzkzkiKgA{rH49oxtRvg3F*nq|Che!altu~nC z`gv9U!PWx%2>Fe@X9Q$nS@J+zc}=0FfDK;G#(p;$P~W!SkPQ?W!q8tqCRf3H4}E z;YhP*Rzf``!5a>A$&-T}H``_#I4B;0_L#|Xs}0hPz2(s<{_bl0um_5P9I|RU zAvhxWT?NBWVD0St^jB6c9{sVdX}1nM#Ei~AeE05SYOA&Mg{p7*O-*;#zh1j?lkeKq zaV}PoGEPmb2?nZ0$Q0QkQ}8y()#Y*B`co-aUxf{MT$*C-?o)u>Kyd*`cYs+}_zSb{ z9Cc+6Uv8gHb>9MBMLk+6GSVZ5ItB~b#Q<7l1=JKsCjB=5@u?ug%?+dwV!UEkgLZ)H z%f-gW<3T%sUKcRVMJUSSQqTji9rOV3yFe_vtVMtB1x&vdU0e+xfT89%5f>=Ss!Rc> zGOho@K%Kmftzr?Tc*YO9qBasK7NgKi1=wtu<;`YPgke#amDpyW#0b3@hJ^wsp4KS= zRT204fFTr1hDCxw{WYIGXlPIYmb2#T@1sL@-k$Vc=hjk1FO^~F57^GHsiXtm~!R9av5}F(|3Z-0`-Buxh66(6KFCs zkYgv${qeu4_5tD*YaA`0oP=N&r0d(dmp~qVvXc+=^eXk zMQRM6e#uRf5n9N%Jg_aM;5pfw2denk42Wm$1HC|Gfok9H<`wgn_@nIqtPeYQS`{(x z;h7O|1RkZAbqEO8pBESP=cy$+KtH@c?&rDvCaf?Fxk1+|3mMKpU#sYX^CCgrOa<5# zpfuvo!}T5j1NxA9To$^ggzmYs-uag`XwrmzDxj#T2mXZOvtEXS``m!J+# z;HiOby2q%*9v81P=4L(r#>TE+$cR|jp}5iX(%0;i-tbu`tD=jo$iu*M zmAVSt!Jm_t;w{syyrFe&-!P|1eZT%1a_t!2H z{oCA(#4b+`rQ5|pM?=|~HxG!X%%w z0Ra^$f`HN@y-Mf^h=?di5fTuPo*;x^%G%TY?QfrNz3)0_@9R3}*SYo|0)dC;$(&=3 za*unA@h^L6Pfq>^o&E3IY155TAor3vkAckg-%loRFaE1qsXP$+w`2JaipB6{EY0yC z;h*$~$2&4)JuDE`>>IOl+|lV38OadCv}DURRjp zkMi4I{QWR!Wl8$()K+%ryfldqKQ-X; z^<(WKS3&{*RCqMlbXsY#tDBo;<5}gkV{BRi_NLYlx|z1n{dW%!Kk7R`H#DgEV?ASk zv!Z|YWXjTg z1YTL6vcJ46Wy8i@p`L;>+ElpHA)mRLm9h8bJt)C z{=n+En8kG{-79qp5WM?C_eJIgpQX5|baP8ej91#Zc6Alu0lL*Pa9=Q>E+G;y%sJ4YEm0D$FkeE zQ-TL|hJL|Fx8$e}T$CmlxeoNetBLa{cDT44>7o|Yc=)GN+luw?D(K-2)Pc``{T{TK zv)&Mhrk(v=3SFi+|7l9c>HQa^q8l{f1NXfHKKkE&{?o4No+ldh-tdjtAxiL1N3&TNe z6%KE@m#`VF-OKLbF{TD;hd*wnze@?r+P1U1(6CZ1>vr-=*Jml?MDsCozg*{#U{Z3`s5JZa!#uV`NW$qwwiG2YU z0zPyRjT7~0WT}wC1;WZ77eG;)L8J8wSLJ8g6;M8R8wz4q5yUNkaS&0IL^Fym7jWO$ zJAupDMTXJNqg#HK%-Yee{(p4?c-Xj!@2O!FpJtrS6_S1IKNkZ2b0GIOzwcjyy#MpN z|FdTQM+W|1%78))sgZDiTjXVkoH}%O$n}kuMO*2MTsHE$@hSj$nLVthf<;I1izm?|3jm+) zHbeO@+}DkNR)ugQiIlDxa&)w54Nrb@o4qg=+oKvfVJUp?Xq4X;=BOax1PYu5TO3Aq>81{0)Iv!EEQlt1XvL((lP9QI-K;I5vrLlUmSW zQRYe;FJ!p(31JX@vRQX5{(DMTt5~P$uj@$;ER(A8&Fr_jU4B0xCV(+Sx?B$=LknqZV%5yF7%g|M6Ipu--6dXV1S zqgDcw&-5UYYr3j=oyTA*mzG(0GxQNGlA=mph(xy2hCS3ty-S4&&r<=!F?MvVlZ{|^ zx9YL0lmFF6uj;vbhE9F>g%~h>hQ#BT>EA$zcNK8(G_WZn)ly61g)B%G(vgrPL(L>y z)wVK2pNom^(#;b^vL@$R%s0m?Zs=!Z>Vyc>W?@Z2U8#aleu^y^stILDxDsZVtR5ey zcZfbVI+l@X$CLpX6VFP5P^F4HHn2j2C^i~!!P#$P#3L_6Nl%2Ri#HvxH3eEsNqDPq zet4DkF!s;5Mf5XFAZ&KljrtrFXq`ZVv?lZt7q}Yl8IIXZh6kOUTy$->%iLZRanVY% zDPo8-|8V1`wQU!Ke7D1um`1Ik9it5|;L}k(_aab%)OW=6+IvZ(HQG@Uj?dbU!KP#l zZr)Q^0ZePWjdd~{id6GEO)(^$xoyb#JB8>QhNv&8snpD)WQU#lDDtmc+TUk64R;m{q9Mo~ zTevWC3?+iO0TrOYKWU(o;72<&ai}A1_e#iS_7h(pehO;ND4xw*L+(|I7Cy!gmfL_C zin+uf(tZRX*g9$9h@rIEtANYSCV|m7ccUjwt-Eb%+gyC@I3Zs7tMufLp%B9l#TWyG z6gctv1i7A|yCXfYhN&K^C64xXy+3?Qi#b{^PDF2#uO%CLA}7hh*fa8?XXkg4Kf$*f zBTUiP9Ak*gvmzMgwn&YYyD1i2$>yjtA&+++)e~oeq@AR%{?9a5apyH^5ei!ZWkf>G z88nV1N)bXHebtst#Fe?Ws#yqU+8|!PHIpC1FmR`46y}fham}g_zbtjPT}07!iqGSp zR}+7#lsBQCd0U8F?Ax5N$2on;G(?EUaD5I?80!Q$Z#zi=4`%qXVhHT>3LT$4GtY$Q zJxaOkE|~peZrJ@j%VI3qgx46@pnt}=5f)d}fPa;&_L?So3;XDl|XROSOTNT%BrFkq)sM;$uncDl!FH`5A7}=-C{0ujWK+$DpdHE;{E#Itz$Ih6(b9 zAFA1RD~M2T0_UeFQVaGP@p;?3Y=lIuey zdg0*xXa+ZgJSlC~VRC-IKu6_8Z1>p&A^Iv+tSLmIZdr;`hdq7Mc<^lfgAy z6tjtC(wQZzb62=$3`*nrID+{3T<3aar)B16pIs1gps#^QCJ|18neN{TYrUR!o*-T4 zO2cqfcW3Nx$(lxwL=xuhjZbVztBc^SMzKlKOVaOUCLPujLEs$w03k*#rVU*yC8fc* zB8yQ)@tYT&h2@5}9643?u}?4UGt-Kj_IvYg$nTm~JxsC$M=dGt8rGu2vMgQ1 zqwiPmOQoL1=~SE6J1O49TMV(^n3+ZT5j;uV7b0o65o;1D-aOoi$5_(nQ4IyoKRMP$!tGn9xoa^j_GL7{bn>l-Xz^w zAU>zIA!J@{NQe^h2fKFShJ3{Zg^HaoHCO90aNOP!W47}(8<4)lWkILp;ktxLgf|>e z_gHCM%0%ylwr+`kHyeM<5$#&9^qM zg>On{6*ijTUB@&};vUjflcV+0wwpC$<`Q?X(XTuKqHf z{VJRv zZS2+Z%LqQaKi|Pmh+Z}<;_>Gk{RpI zzu`O5Zh6KRs^`0WedtT4K5ZO>zl2FH;2SqQ9oeLk2;nro<{Nf^mQg zeG9?}E0Wg|87&-%b|bidZK`Frv91YF_s}=LI%MzfoRsqGK0JV8-j;NLGfPrF9)uq) zb~jIrRV!nX#F8DCpcKAei@pOm?1CUgl_Y@MaMm}j@{l2xELK~@G>=zzU`{#tF7v8; zrzGf@&WedYfl!BvF{cc5X`>k6eF3sZAe?s#jKDqS)o)d56cTQrHD=dt%&!oiY5o%R zU9E`mX%IZy3A?MXIrfcplXA@oqH|hV71II9AqwYI>sl-xUOsg^qbVz%f!lah zaxxoHLKZ-F3qor<-z^=xFfq_vaEdnX^fHO%M$T8I#nk*<i3}XwBQw!V}9~j92gTFskevbN%T25TpVpBClq}Lrr-!z3y8QzQBL%010_ve2) zM@b<4C`w)wq8PBz_#F$&Q9XP5S~nRBZ-Osxk3Vm03>8=2aRYf@e8zG z0Eqp3U<7@&=a`KgTG$PQ0(Qsd#c|ADyeOrQ+GYqvIFdrZ1@!pBBoNB-&S}i6e%(@{ zDBKFX+vCUMF|XC=w|l`y)&Mo*ATBc`eCN6Q7UK9*@E$fj7Eae+pE!LasIkI)SOZ+e&_0=NBE$k<}b|K!-aP4kH zk|rko?s6Qlx_TtUu<(`Bg_O370n%J7PM^+~KUtQzxHaS%rGqr2wt)N$Gqnz(i5H`B zqnm)ZRJWsgjH#8&hlT-qs6k97sAz^wvadEm?pd3J!X#+&fOe5iI}^GI{4_lE^)}m2ihs5HNT5G9NE8eI{y6jL zq-mD-HQO}EaJ;9v{kG$iG^RUy^M1!20mS4c>PPp`NaLNOcpW%W0yE1$x9c$Drt@?k6oVF zmg;!h5vQ!QeIjT`G5BTnmjecFv&k4GTR1ebm&U3N9w4ExkyeX$?U#TioFCU{zZDj? zq>$>$Q=F;+zZLXj^~K>;7MAO?F_uAS6#|AL$ak@^<4YNys|U&xx!RlKe0}PAOSG`) zX)(l8!QE^dyIt}nI%#E9BWSUP<40H>*VZUDefCK2m4x#R9iQ|qOHYhM7Y8spKfD+! zlVq}Z{v-bj1^TZ>@~$(q1ugG@e(9cyy z+fmQYyIr$;p-AX)^;JF-0HMkicZd;D)aFiZ+GpEtt~iACv6@yDN|$D8Q!{ppAugqs z*Xs^T>eJAu#djhjq#>sNiQXMJ%&x+ z9vdPUsXMTWTPZCLkPxx!ZzeKii22=@-eQ9uz11#nB>97zGhiycQ!LVTu`prf69tZ= z!usxBZ22Dex~s0tn1QFj_&u`}^L;p1^2Gy0bey z(!64RzT}|5`KnrdRM(FmUI4UWM2|p8K(A8rVVmF#xC4yk^UumPz@=#|5oET&g}!6I*Pygq z6C)^sHVFepTLiXwaT2u`A3)ln9es&?$Ek83sYVD`%K=Ht%(=+4AbG2cHP^K=W@d&T z$0FBvvcKI=dEu+)2=zq%{rW>k+R-)BCaSd=l}fubEHVDs6`sX4QKId|PVM2WQ(bA$I@$9VSpZ94sW{P(2-VGGNFBjCGe?G)dZ#@~j~1FUU1O2%!p}q_?KAVsIk{ zAw+3H+|~!x5v}y7a=1%<*^+o_gg~ngtmlrk z7qoc&&28p_vsZy^Vv>Z>RpUX7CJ88|MHH~=S%us{Brs5fc9uJS=aFxs)KpbI))FnL zYN1r_eM)}wS>mHDA7wSWSguW>J`NaU#b zJ+;;1-d~QwN@V}q*|Z|V*HBGky*s>P6@D&vQY-wyviP(DGQ>>hbm!L+)3iJ6$-DK3 zl_?UQ2Sr@U1m>*>vI27614OoQA&X-?3ncfDviR+z`~9LfGDbxdwdR(w4qXcIsHYZj zL;Ckp@g=yNRD2+A@&Ojjl~+>Y*R(kEZT`dcr!^LbPxsHr6#<3-gb|?*+&m3x28|`s zix^3;?o)#G-i60fuK8;=e+$0-lIXM}6O|!KANr7Q*nBvzeppQ)jDMw- z;FvfkmdDm-xiMnno#|PuXRd83o{^8U+r0F`!E69Sk~ix@OHfV|rs6azQ9Pu0cgwTy znoQG8Mms&^OniR{a~EYxXLm6~EOsQ*SXl7UxUv4Hl&Gb2QKxCr*rGte8;QOt=MavD zOAMF*9>ZITnLE^9!B;&GFJzKCx2d&;@+56+?&iXhg}(7?S3knKuE|PX*|aGnXM4rT zT~NnMymk(osbibt36YI4{3Eo|0qT}`%SDa{T0vL>>!swZUW0~!!HHLahxZglQP@`! zViO22uwkc2Tt$IemG?^4o1Sx|ZdGOq`pHZNXP%elHj?iPHC}l~Pdl3U7eqRD(Tp08 z1uaUBw5rT*&@ShZ&!Fa7L4&suprI~K?2+ANdu9d(X2cW}{Bneqs+V{M`2!$!_Tus} z5J)HlM9d`R2+BB&4EAm-4q4&2oK6u^`|MaxQn~%+sZ^G8;(}ECHE%mF^l`D>BCap% z@;S*>(~-%hZ@=E2jccDkS1+|SwOriFk+XTs@C_G@-G};Hr)+3|M4WssK`f#$0zG?7lhNuFb1SBT=6Bn5|EbkC61khYxwFV z1}&tN({kF{m-r((PJB{tdLcFtRSdCI8*j zbOTQ0sgRxNijI?-x0U=GcNsYd0x@X!uYa8Y`!RmRBxMMVbR)Y-{H3tN|$Gj{+hbscO<>bhN=3G z1BtK%yX1r)?1%Cp1U_BCjg~*!7ZcW0m&N$v-sJl}pSsSk5w`rpXi%E*Y72WWTL$qf zKi%Oo@)3kFH5Fm-+SS?bIbxodl#=;WaA{H1v?HVYtbSga(s=W=v#*oR8ut{G@AaIz z9GU5q@16QZtxY_h`6Y7;nv+@t^DKObYAEd(Tijtc6Kq_dMF7hBU;VXylj zP|Q9A;>n;2nrlq|1qrpLV$kET7#O$a4#-L9YcHnrM+S!+jeKB}WcqGgnfH9==kwX0 zDixEexZZOL(~lx=pt^i}3yhy7j5BnaDKijpvgyrbm6XLT=YrF!c&b+-&-! z-stqgGY00Ou-i^M`V};lbzK@HPZs_*T*sIgY9hk$zMEvM>zE)go(UHwu3kRz=&ky^ zW$gLWrsitqp`j&344M4AFm2$uC%GgLo6|cr;ZaD(8PB(QBj4MD*McvpFNhRzxqy>a zkNiN>BJ~!L#0EN#j>G$fB=|EI^|mKWM)gG6i*OU8}Y;p?H1gr36 zi{@-bO1rIkq=|q8l&?`ii zWV~%lu=L9mCYg-HJt#>L8mF1g1VC`0X9n)6i6~>oYZzY)dl}lPpVOV2-M#_ce>i*w zaCyB%#vsQg0gak5kHqono2%0|lJ=m3gTuJuX;;^^JO4xV2tBn2_z{*4E>4?=HI2n! z{L(==1!1ArN(v5)V>0EG)HC#cp84@8dzrLAqiTV^goDd0c??|p<+gU2qxWjJUq~8yL#D_@riEyH!NS{l#Y-78wLpz#01$F`TYP|FV_d9CrR}h@--61 zlJrf5J73J?PQPkak`9|?X9(G^@}|Etz8f*r#f(ON(SGxqHUt;kUSzKw!`}0-=R8AK zB_m}>nR<`Hn{YAy3`4LHa+4h|HiIB1XQ}U&|K$LmTdECdlaqE2`3@WrHGS;fOJ`{_0rID0yR_0a3zd@)1NH^x%lm~TX(Dy%ri_f||% zxHM5<1=CR2m(GWfBAr^a79i>MOhiug5fy@SM2~e`9zIaB4CB|AHfm?&6_PKJJ}z?1 zte*$@9@;Rrl`QXp{Dk2a>+0luoK7l67@Y9=f*5v}QsRuLP;01GPk(ytG$Z5|ym=6i z&%MwBmeIaJ#f8EM+jrT0S139>{Tj_d9V#=yXP>@OI^KQ!dX^s2%?ox8Kq@L7dIYmx z7$l_HP}X%LC*b$9JmV$;<6zz`&Vo-SwP*Ck&jt6Kb`!^$22RqB&;}P+FxO{qhcJ*X zOfERgoPWM`@cUAUtepav!LF^-$ziib^^bE|9%KJTkp$ey|9B7SIFB4Hlz?4ZNcK2M z%EE#~zvU7%A^e!B)ddT&OB|KcXDx?L^cT+e^y$@j`Ww%rTpBkXT!_lyaYXgIn=BtA6@60baX;?B(txe2>_;>H`XyS|$`r51}8p5d9=Hp4$} z3QIhcSnNnxO5aSy=MfW=ObM6=RUwU9_O?~CD%1OnYzJmDe4N8)dTJHMF5q{j>xm~w z56Gt|-V@ewv{Un>2iOm_b7BkZ>or449ix0vHy+U?)*om8SYS&os`S#Jn?NqWYwa+N zQx6-&Qkk8TjR=_d(o8Gmm91w%J{A{|-Z$$MbeD!r9c@C5#gI4BbCh8IJF~Zl&YwlO z5@8&!gUq0TN^OYk2DP+PiOqZJoB+orMGkpg)cia$+r+k2#yI-lhsH?lFR35 zLf$=V+q92s7K2PI7lsDzZVfUQa6qCGHp9s`5R%kN{cGU{g>gapB7|$z&2#6X*}1}^ zxP?!!HQGn@oG@#3wv7=#Wv8egLSu{g`e=(hb#+9K?$_4d0!e9sk_nFoK!X_2F6TudMTVOf7|v%Kd;|pz@rDOMMg0YJ=Vi$8td#U*L8U99ZZ;xmuz8$|G{bQQ2J+mW`-p< z(7zobHo@s-b@kyP$?d(mRU zRf6(P=U)eqY`1(+c;4mD0h1E^Q?#L4V*V$~yN_c=Ux=GNFkmjzK_>yVnm?~iEl0@V zC;G-I?4%$ZO2qF<02#g0*t(SDx6PossMUwISg!!L`oZL^m`Ki@<3Q|%JoXPu$WVl90*$4ce2!v5vLpm;YoOT!GEEH9 z>WNX($v5>B)nh8N_-oJ1VUJa_)-Z`dB**{}&7!f2fMu2gm=ui}9QzTM5DG7b=m*-? z>MT$$CNIjxIri$_X;PG_O$}FwZd5xTOdrQ%b=*eKekk~oP}T#FDS`rx6^fVZLehU} zBwIgvF83(Jv-x`c=Z0tfqoW^YS<+xiQErdvd}oC8Gd!Y!=g~nJ8wg2;b5P8d(vv!+ zzi8%b+q5Ok8pRWbh3V2x&)@!O%imV7TV$Zl7QRVGIOtmtK-dH1Gq#r04pKb0YVCfc zVeOp(;+F3@=+g+KY-K4M2N6wPG)MmK3R~aYInlVRA(}by*KViB+ zH=*Kyg?~YOGqZQ+t%)8<~h8lXbmIA@7#AJdoDj25QdPCk#<0%pK z7RvZHxj78CGF7wfndNl8fAU^!BRP_tC??b@&=e81xfAap`vpr9diiLzGkn5Us^H|J zoU(WN2Wvw_aqRHClTw3#&oOI?z!EWd(oh8;BDHVokR8h1tsc zq9aay<5FHRJf*=!N9YbxZ|8cdhs-pXn&U(<<)_*Dnj+{_&p z{Nzi6hY^ayko3F!vL^>tqr`q3YvTh>D@a@@U8^P8E+z;1c{MjDoNr4z>1p#iN%E?I z5f;J(5x}cZc=%`*Dt+l>j&4wl%6RYl;&*v>W^8<8nRT>Qsb+4%^`_|A`0JUt%;_iZ z`=33n=gG8?lgqq2)dRzyw?34{@WM8=nQ4#s=?<*7=zu1Jf`}{G1u&|H-~y<>=NJ^W z;4}0za~QarV516ECbJX3L$R``AqZiNngX(LuO8bl|IGhS+xD-o2yH?-jWxB%-|%!B zUZ{JAX?;OZ|7PZ>_ld?>r|ROCbIoPF(!5q!m?ZMr@8n4urbT*(xI!5oUd-MpaA6nbh3k_S2ZMtq(FpI^6O*{(|~ zrDeYsf8#K#o-)AtLLlWt0rti7!)DwLMd12KPungDMvTWxRO8Y8IUC7&@2h2VMyb-= zkFTHi6boL^;ils>UfPB+l6HDgJbS|77}mg9+J~M(YpY z2!Yf6G@`IUF6MUOF-jzNg(<{-UgPr`j_3h3?#8mrA97?aSIH!q0dH&5V!e z+nFs~A|{4Z`FZf;g?ip`?z2xiivAuYmZzkg@|JZF-(@@d42ylk5EH3|5dxkTHV4%( zc>i|3_P|U+GL4^9e#5FPpyon9_Y{%*m{d*r2rJ~NUT{?@>} zX3}`b>L-opYEuPAJqLfO#AiI|Hl;BPX-1<+0Pb{K>1z{ud)vl(zmUEzj&YJI)&nh@ zCJK9r13b*5LNCxyOXk#;ge>bedqYTtHq6wAh*Z$#`63b zEDYSNwc^}>kTJ9~?L~B$1kYSjAVA>i`wNnS;X_y}bv&PSarJlcu6$Ph4(5YE_qQ$n>e1y(UIVbspA5&E%~lHRE* z=`0}S^KCY)Kz9^QW9cRQA_fz#zpO@%V)!rKRr-MO(^c9ZZ_1M&T^^LafeyOW7^*!~ zzr)kOo;fN+W7Q^*i8l!0IjLj|ifUM~`{)nlTHipTv)vnGss0m!&26l$L67W~yXTo0 zA;=g&KWyt{q!xBq5ah=$JqH?-%c^>I?cnvbJJ;xPbe26{-)cC$er8+Yml@HCn%#&Y zom`DWoFl-NRQeY9MnYtpCUa&So+jpKdw`)K_O|#60{d} z=vzZN*Y9FY_RG!DK22k%AC#*V*$n*kz!3;>Cu`P@2(J(6=aWdjr82{reD8Kp z*>HVS*pQgQtlO3w$LajaKlVb8oe@OV+&r21~%)Fl^p63RZc z?EamTez<(ELny-2s7~H=q_$~EANlsD=m(DzpKgD5I2x>k>ugF}JKml$s1dL(tnhN# zi9S|OJc%+m4k&skDCYW@kh9-HLt(FwsLb5_*$u%v7@zD5Pk8k&9}kQ!s?7NocggJ- z_NwDrc|5|Rj5OGL_Y$VWU5S`pF=KMi=XOloa~2!@U^+;Q{rw;GqYxw<)y*4IVv>5M4XtC34Rq7jLPeKTp5*ZXC&Ym{9i*^X2%_toagSCx)% z-(}y0jjOsb7xC`q_NnY zKFN?~o^jJLngU9pBn&E0-J%o)#<g4^mVS()LaJ#iD5;J-YvSteWgB}={dLCTbCJ~>Q7JVQHcjsLv!u6# zcTe%rSlP%*-(t{0{ogrR=!QW@_VsgV=0D8tcE>Jt>IHda?d@#B$Ou1>r|MZpLciR0 zz?b)NNYI5vl`{KZYl*vHCQVSav0)LX7xS*!J1oqEz%(!*3tFoZhw?&`fQaBzHrfUn zf!cu*6J}tIDQ3fD*mTq8?)crqtrx3B=uaT8utQ_{Q%gs@O~Mo*&W-CkF1lOVjnl?| z%&c<+Uqflln~U9fpWWhYA#7lTF2G=07k-@w9on@)J8Oa8Z;YTvz}dV68V+Ni6#cLI zEcjepHep=fV##q9P|MW@Z;6LU#-1$k`e5=s@s}}sJJaVgQcx4HTxUqP+&@cxp-F=D zXj=Qy>O+m0>g*4mM^}fWLh{8k+uq2lxmyOw{rDcYuQF>%#E)ZqRfHnJh6p_K zLbrI$E9_qmv95+nttjn*p(s$=MeD^7YA=vSsB6+B96@@7oPUT*(-3Xlx;XvjbJ87kUl( zvj#p>v`EKanT7lF6a7Y`Yc2$rJ$Wm_Se&{rd?ws@86xmh)h%SEk|;??-xPYe1{`vl zAjN8On|J>A$?#KRREZU#J3*QC3PI0hJ*E7HC z38#P(vWr;*u>Hew)S9;)-8W~ywumhPncr`+K&rK6yqe-$I_(A@DOD3niu6r$kNdQa1{)^gPh3WwypYg?h;=&=rz4n^XLew%D6S44|3INU6 zFw{^M1;Aiv;Q#Mc1dzzITWA{Ur&AGE9md=GTW$}gc8VfX_DG_9LTbAgxwG|5`pAu4r#vd1t%~?1v(WRf!zb8jl1SOW7n1ISRd+q|^9P|SY zcLr7tdS^QHe2hvROw&w;-&brh<2%oseAgM;LG47IL4G-K6jj+A!1t((kR2M5N=2jUdR4P}cIMizunBXlXpf`tj_^Zu16NwF zAoTa?sR~**Y}Xgcg4tvdqCNaVcUVt0rHvthwuyBop}PyYJ4GUCe?e@ zOD;g}#4c(CWtqsAMRR+-@3z0TCy>xpm;$ulWe$niK^RYmZ#2Ffcjp!RZ=w5paq1Rf z0q^x&R}e+_=F~cp`eJ8!HAAg|Mq;c4AIAybOijCk`F-vl-2PuVP7q2JsUP1yfWcqj zN|j(HXg@&sqR=F3*C>~0|E)gNOBoHG(auY}0yk~Pb1oid-o};%5@UO43wCdt-Zf@vrFLGrZE#n8-s0rJg~q@6CJj_SXu2R-wHj=#QvD;Wfvu(6gU`4fOh4B zgOce1cK-885hvh%6&)#VYqA`1uKuAWe<4n|U&<8A^Purb{FR@^40kk@KrSGI%t>(| z-N%;`e*zc;gcWHBv!#<|7R8_C2pp8DOJgBqlDA3rw*i@kh>Tcqak+U2&u@RlHw@N_ zHMIjt&iNmY-2amG&2hPzBEZ%2JGU!j#tR4A5_mZ+*qF9HKV|FvQDS}i!=v=~AUPe< zX~<w$=2w^LxNW4$X$jPp;q1o4v z!)78osdfo7lSlkYdb#ur;e#H*&z}4BYGRM4!d)ij*Y}roOkXg$wBj=_w&D9Rr){F# z(KD9BTGBg0GchX<@F4QGi$8}nM1)UKF5yIMvjRS_)xomZm2qG~#X#1w?I(*(u=^LG z4>s_*#B=zniLBip-}&>!TB zUA?aX6^(pg`%Gpk7v7R;^~QfCtakKki|0qU&Kze~sLp%luP)`AmVj0!A+@N&f}FUU zo7iCt0ja6F@aU13K?+=XBj=y3#3f2WbmjMkY%Up8f);FKpLyDs0`>GDrO>miT%_%%}5IB z+9HZ{d4a1uLs`R`3q-V~{i9{eKE7H%y@PN!T`sqN-<2Jxaiy^V#iC(NTSAeq!7HB7 z9VQc^7b8GW;!_Bk*u|ar0*0+so8cl-R+D_j_m5qQPG$?c<1v5!e8#YRZ&;#{5$kai zdSQ;=;c!5Ix@ySOOJF13ZYGSDR$A0V%5Pm)J74u-QoU|aSAe+)b;7FeSf$t=`@lOC zR%pxJcM0~bcx*|z!^XPQuUS2S5WIUZH^b~KaMP8^G(|u>YXvQ$Q^b{vlmUsk!9f(r z+9uFMBh3b$6BDt8>=x{I-<73S%uUa_C;Mn?BX15Ygy}~zv`MC`F!(#Lz_4aE?FyYgh z7p)x53Fel38+-lzeG%{5elhbO-uDme4$vd$Y0&2)1%rL#vXDXvCpZpDNPadHo(^EP zp6HNz>2dTW?~(edpeMWEOfyZ{M%s%2Qn>eJ%H?4DC$ zVy{&7gZUWFC^dM-oY|R06NM&F-YT3V>qV~Ea3!<80_M8OYSVI@HSY&yK(~%a#aC2( z>XguO5WAlnd+ssj4V=DFg=GVJR9|5+o%N?~M4ZO4K(mG~OSSc}CwXVI@4I`2C^CKs zxbq@Z>p5kU+6h>M)||*mn(#P{1;JGhEKSw=%qw+w4_D_Aem^O>8)1n@_;RnhI2_Tm z*Uz8L-Kw_k%3U~uy7ms<^W6K>LLkVT3Vf})Qo)!O{kjUS%)i9?`ABHAg!L68=ux>o zm83$vUHS~Y2Ojf~;&=~>zTXMW_HM};y2#tX&}RDiYJ8?YvDN8&M}xgw@Uk8`oga9s z$CYVfVD28AbcS%Q&!IqfVqCfET6>1lQfq;Fez6nr>5vGkt0Z5%jx=DH;LisJnvnDe zg113e#e=D03ncvTdegess`&2bu)-02T80gC9|muQnicCqH3+6IhR`F<%P(+uNcVnh z-<{OzY*v2nJPxsyzH~$ta^x5%%Uqxsjn#|{qy=}W83%s|V|f)xlOh%MjfS(cNWVR& zQ6}t}fV8txOzM^PvGcxK_ZE2P)2yZd_Kj_!TurvTsU+7rHMx0q_RE9YX}EM9D&9kn zo4FAxaQ&Vmjm;jHchASpX~u1B!}DycxoBv%`c$DoURJEmT8tlk8~%X8Gt9;3*Q7hv z2T!6&cM|q$&6K`=FR22R_L$!1rtuASY#6BOtR=Y8Scb?KNYOa6K5Jgf zC9a3Mn~O?nw5g!>T^~y4rVOkeJkE3!OL~y)^}89K3&XoTs!of;-*r?V_P2g@ zIPJQ!J3O<~H70e!Fgk1la0lU{$dhXejD~z9^q|WtOAbQgyJy^o zi*zIf8K#y)x~%U*g<#n7`aq!fDhXWn;}m9rk=bwGroCO?b$z?w>XNnm%C_c{k7WI6 zNY2$w!&0zJSphB$VCOEaE$6?APHq@aEB>5cb4yXVKictdjxrhYNstluKIm+oB zkjJk@z1dZX0N#uH`F9oyMStwQ~nG*m-< zdsYReD6xOt<@)1^MNcBBXh<-#BiB}=91MXWoqWrWeCwALb zTUdd|fzwQm(R}H#$=z^%I#N4XE;0h^yJfschH)cMq_N7#tVQ-8I{4K``3+?Ks{4Y+ zZ-2NP)MfT)yx3DXCc$5dNZ*Cx>`=kC_m2B{aN~)2xM~TldBweUyI3_6q@9ue*5eYz zn}gz69=2p7WFPD(%KzkQUcZNM+LT)^&Fi<}tSkP`V>WaCMAddl^C}(jF!)Ij4XDw+ zVrtWW!@E%ug9OL!oH?*wQt0y$75Dm5PxxZxAKzlH<*VObT9$;q+CTIn?9i^r;HPM< zAcXlX8$cE#>QgJx82@*2eUmdg_Zy**NOaFcw|}L-<8r67x8t=&ciV}FLyk74Sherg zgD?;>EvvyfP#XmxbyIx3h^qnhcb?cGhIVuZZ`mX2+qFu{X|s!vQ2(m3eIxHm3*G$u z(f79E`p8XfnMccsKOXaQz4~90%#Msv1PZ%dd=p__D!dy)p{55kiH_QLbx#M_N-V+! z+GD;OojUVTcS|BW5eQZ`=@gEO4f#O=N_ya1YD!Zf=*^35T@ID7HxFLDf8x>7fq#=^ zA6Lf*3u#8S@2GBDz|`xb$^HeI1M$?QoF}MmWf*uv1T_cnnoGI2=~fEy&TvNOKujI+ z*&@pc_|`FIkNYq4!q`tdlQ_f8`H*i}Vj=X_H!2UAH!!#01U1u;&tqX^)vb`rrN`UA z$+W;FYyD2q(Du#10SvYVvwQt+Gc{fjws#F}PmM+W_-M$7`o^n5WqD1%;0L6TP&liw z7{1fJFdeaImVA38r|zp_LwVBhny`X#n7Sg5`71)JhyAr}X68~PNV9Zn!u)ooD?uv9 z>{j&Rj)2)K%7w|`QrdW?l>~9oDJZ)ZC^j3lNAGQqLw?j#7Q)X`56EGpBfl0SHnRyZ zqlu+7*J+LT_V`mm5l<(#o9T>bgx%2cdY;dnX5cj2L`q=~212Ii$(IORy<<$Ig#LN) zuQIX3CHqfX!8P+Nm}rKyT`AQohZu2jvHyULBa{Kph)=6OgNm zsQ>x%w?wIkm8phGz=)J9;@M;Q+V{iiv7rNPAtb4(KCv-j(QAz8A^V5z|JW0-PB9n2syFO&N8*43xGId5SRtDc` zp^FVR0CSYF^jMW-7%M`LP)}n!a}T>CFl!NwBd=9oZ);3D2Yn&=Nvg%p^UTaBZ~Oqu zWh2R5n7alS*BX+4yjmlb&#HcWFHeKV)yc`|vGEbkq>LhvTw$fO{{?xm@fW1S#ieV4$6)cbQf==3%D-(F8TEr6I$PC3~Hx$5->tp8Nx zflUxI4%M4)&@79-7gphRUc|1wk5??*uy0J;js3O$QVJY`8V=I8qHPV*o?iw|A!8H1 zpBi`2qar$O`rq1n@2IA`Ze278N>zH35(JbE(tBc~iGWzBLR3IN1O%xP5{mQ=FQ9;+ zSWpl`M{4N3qadLq0Ria=UHBKI>8hHVE$}%#Au0h;m0!yQgc5-Ak!On<4>&Inpb6GWz-RMO*{e& z@;0fQR(Gd^5ZvKKC~DVEb`e+pp86=~1iYF!B8e+%K~7A(VrkEkl4fk7)uY|4X0;5} zHetjSQZC`0bK|9fPrrPBj_E4fp?^qm$UM^T((AsrpM!G&Jd~)raCdUrGWu)?w#%U? z5lFp_#p=1e%rz!5CuopTU9=0zYDQG#ME+>x)<8uH%{6c+j3Z7XJLFp!@eB9KsHM@m z&u|xF-N1fDh34l1`&a(&RgLPozGM!!Ny_M@U%+-hl(6lF)*-i! zN)X?Wd~|X0Aoku&EtG6Dduw{YKw}1sX<*kIB>mboaK2qJubOd%vtWldABk8huF1?I zJEb=9%U*_hTjkf+)4=N=j%QKA`?`rdRSWn_j7nYBvz}hb--gHrc5oGx)P$WaHTGDdbI4~in z7MGofsY62v<$)*19s0I&d9>E*>=tc)0I57))4>Ah2&GYrW@aQ>2h9gIYBUYu-q3*@ z1Dvx+AS2yY1o#|iZTh$8&aWvq;A}+guB4!u+6Soc9DOG<4dbHWuTdLviwjbrl1F{n zb;-%`jnBRw-8$JvK3x%Vm81cHBT&3puv1I(#DSC_hqKhTgWr?hyUOm(IWA9_Cbtu> zsWV+LxpUre^+=lax6%EO<#zG2?Y?jab3ndS@0RW!)^`9b@F(a#Hm*&X9HX5I5%hA_1<-GWM&Y** z@RXh7SlH_^{wBeg?VmO)9T}1$j-D`O3$!mFC~}&xi5H_S>_)W+7Ly$|c2rMyH(BI4 zzDgI3@qXjV*gt0J)$*^VHB2I_{MECR0Hx1N5iy&&`yzj#QAp21weMec+7#gqDmQ9gW=>8F+mq3Ku<-53Q86h!Q@Chn!_XLtaeT8!=4EVVxO4<0*&A$`#y&&SuMcTcgL%(2;< z@rvkhc>sP!K#?@bp}4uBBJPm1o5Y!(H1NFVc8O=91o3?SfT*prW7&rJF$uO)KOs(mAq938D|dL%fjSTjsS0 z2nx2DC)v+*rIZ}yvWoWs9IT=>=P#3dm*bAoaxcsmrJ-wjUlZp@9|8Jg&YaUs?J^#^ z^F4Dhu90rW+OWEvEzT?1EEuo+@ClcTe^GIOwS$ov+;Z)F=tRDA?-@u15QhNOc3ZVZ zeAYJp|4yM1u1m21mRPQZn~3c@Y||lN6HpK_a_-IiZQIr95ua_Dg8J10qc$xwyPq}8 z9lrTKOFe*yT?7P7Rsf=j5&}39?&!8O7LSnqPFD^|9gdBoxWUV~1}X0zy=X~Yxy0zz z-|s(rH6wMIq)CfK<1-Cxmq)zlyuRe?z>LaWERv!qY$4rdrBR11>)LB0eMXcvYB+Eq zhr+gbDe7ci(p!QlVJ1~$T_#mfZT7L2k7P?#da_{b6&7Kk!AiF$9G~0!`Z2*&o`zn! zV6}w^5Hr`wq_h~~-JW89kj6)=ev2^$$4B5~M@P$w9vq7pf#7Qtd3q<76Ya)YXL$Qs zCZ*e1;al$zip%Rp+S95`)>NfZCXZolZ5x-A;Y&9Si^oYR_GSmP_~#R`?hOse0tQxZM{HA|7-$!e}l& zToXHzi8Ze?y@ra=ZnVyQ0@7BM0dWvv49E^2ARNu40+3535XB!$%&Z%R#BEzpq(Y#i zCc;j_yO6G1Ga7SFlIuIT@G~I~gMutxsOHEq2puqPUJ`CXZM`LAk)p}LxH2@D47Qp< z;U^S}%?Xo^PEWYySlnYWue`~;k|W`Y^M5u)6{h_G;X#b$sxOck+m8{^NHw8CYHhBz zBkRCrgW)}(`0~z0W=5&VH1lx{54iqYPmD#4DTyR7f$5%1zgH@I(=OuFQ{8rcLK`Io$zmZ7>k5FYv{lB4Y;0K^g^$qHtK`9ro_Ud?ve_xfK%SCzo~ zZ_A&VoVzT1qE$Q*R93p4gPDF6TC&);X1~x$+Bf-Hao{H3B@v~@`crhKh{{DPXcME4 z>REpH2$U-vQl+N#k~g0P3eNc`w+x)vKK<&azs@o!(mC4**96yDE|pH;{j#d4`c5+S02&K zt18!zml8qLao|K5q{$+wx5(Z|wozE_K;87d16?zvz{; zrRDD$R%C^lb_Fbp{YG9kspg}Xicj@D53i3y8dc_0>o0Fuum$)Aj$|G_0^~=IbfHY* z9eb%j`Tpf|ksB;K3b7M(2DmEWFdmJUm~0abF~2d>GNn!WP#EXM4+sVMAvIl2e=U4g zu$Bj+J^}`o1;0yN>vJ57TZ9lfUtA!VME({+l~d(DZkRV%7Yflwr<_L7oz=oFKbS>s zOG(5be}FMr?c(RjMHDD`rpx*Cw;hc%ey3UHDdlpXJd9PXk>p_fd7E4L!(ptW7uR%! znLiU}I+2Xjn-os!1?SU%KbM5)LT?q#HQf_LyJH)WxvTHI7pl~g-5;;Lsi=s6sv=#i z$gIv zQTz&Ll7ZNpkT=Dl9)ab`?5$<5=S*IfM;7{Rj+g_zG!R^5zb7O*FHntz4+|dsRqL%M zV)Ue-ApZO1)d&N97@1+&6nekRH^xDr5)+(OyURX1JGZ!=>+4I;_o*6K>AUDE|AqMS zF!Pj%;ziTHT4w(*f4j7)(iONnp`-)KJqQ*E(Z58zd~I)h)8U?An5}+&nyMqC!SW2S zoE^Y&M$l&#xo9;8+IS!eIJu=leO7_ptXfRUuF3?Ji~9F|<{2!VeITW7+HsfL(n`j6 zU3=>{0f5KxR_!QIi2mIi;W`V>YqgclVg5IaT@&t-co^h`xL6q_+!;Ukkk#=;FCe0h zh5&RTR~+KJu-~L4Vi`N(C?g}Oc{8*kCjtsEO>wc%eMjgHE6}0j_cz2WU7nmzkwMHB zo+V?^-4F>fH1KC1@&g8T<4O5Xt<`Bej2PXaZL-GLPQLc_p3uxM&PW6=8H`8Hq4B2) zzH!v^r0pUvdgmmrnqoXMXD}uw7__hpNZjX(Sp>}3RAL=Qyq|Fd3+$guwR5}KzhTT) zX~1>4<5UFX5}05En{6itYO0X0*Qti|j|My|UI_ne%FO2Io|P4uY?k)YHPvq)OXbz? zK**BoD0kpDXvIIrp?Fb(ano_8y<*Ag*TSWBX*n=s$faa)XNG4Htn<7Fr@*r{c(JxG zdGBHD2K)jalZ91ocTGpRN!;h6J74Y-^O}%MkVueB5YU;pQYrzc&Hx1gAnF3JJTq|h zX}ASp8NZu!uqj+|0GuICi*wk4Gc1wHHYX~EW_-Mf&2Re0tet;Tv;Ys#xQ!n%p@)uX zk#h+PcUj(cUPxN!_1yd7r4h8Pfp-(hy3ajOn==3k*8Hvc{fqW=jpMy8v7VNGmmJ|c zYI+$_UpSe#o?fm1cNKvkLLt!Zr{uWHWM8}(FD&MD@%Mmjw&}$UwT;0QH`A^YgTrSy z&&3;BfDn)A^W2+7)hH?-w6h5OTj}iKJ;GgzqfXALBbR_Wtr|x;1H~v!b@_V69UTJ* z_QP*qvVuu__7pFjd$#53(l!F0DPqWIF|q2H|EeIyv?E=9&5tq4>hP*4BT2lbc%3de zrL3`F(>Zs0szUI|bZ+{WiR@(8va$T;k2)P-h3TBLy{-s^LOz57H! zSeSM?2Tt8M(Nx$l92ljE$8l~qm>bz~8zCB!wB(%PjYk8?Y@56&kDx94T8d5N^rEmC z=%`rZ*6f<}Pq`!8-6FC)l=`x$8%v2ru1`+3MWD&nxD?x7n?;pYLQrJr(vVZ}>F3Ml zk-Nnh7N@N2#ji|}(Y+RrLrOZIgHr<(%6Yr1+vasGc(# z`Hj@`pUPXXo-s_3%X-Kk$|pEbyWE$bCWqnNPLthD2tf&6?>x?0q{h9&$T@~X zE$Yp~LM}_E|CJb)E@Zy-ScQ)ay-Xa{G`S;Tw$L`GME(mx1ag5ggD7y$D|t zrOCLYk|2196>NE+uhAgru&g#K%nH32{Y)dJ7Wb;~lIc9ZmDLvmb|4oDd`CL4n^=6K z;ZVFv!xIXOB#?VD7o+7fyBccRT)zu<*O~vZ;Z^pAjU*t@6C%=+^3E;(l>W zJkkVo_a7QOXe}tN0l8;}5cw1I!v{&@@;WGNM2S$<2*~a^b1y(!?)FupQ+ds+`_cEN z-nN()Bn#Zlwv+1<=40ri{|7~*-wvPOuWRX@%69wcnPYPtl<&so=rD`9R&|xh^zAJT zBanP)#0j9@nZf24;}VvvfC5wr%KuU&;G_w8eMjCe0%QwbU`b3shW5&15>${Z(}m^= zaG#*we&dNaKj}0LR6B1iH$}d(yqoMVcgw^jRem}WaQHj(G(Tn(d)7aPWIc*f@M34- zVUf5zaZ7{YP8b7#c_TX>1Mc~>0l*G&d5xl{V{N~~s)MO{eH~~DuZAz}Tx>J!EY*;J z>Moa8QtzP)Pf+iE?77&OI_6lJxX1U0f^52UV*5o~zh>bs?)mA8toJ{DoxPj$>Z85m zI>!4CId3m_Po5qQZpj$a2|iG1DTocIs{{zoe!%(l9|Hp{uyn|uuR=`d?Ih&!c{#c^ z*$s%{6Y6uO+b$A5#w|5Fx~p}ZYAm@LlA$Z4SSDY2J5^2L&42Du|3gQ|`V!cHjxA#D z>qXB(`|=boCe)d;tAAOa9mN|q>;|DImPyhs28jIz>m#FLs`(7m8+HaU0;~DZl52J$ z#m}&<$*P2_eb2~49pWr;mx+86=B!PL z#~XL6o^Mw$O_2qvU=3 zOblI?{M?DXhb}$^H-FbZH&XcdaaJ#wiTP&x_ERBztdM(@EMvbW=iELiY^p_5>noN& zfOXp?X@p|f(raEXcB1^N_yb?>#+|3SC-@eTj6P5 zv&QOJofp+(t0rd$%NmopDgfmo+T)+c6y1z?QDljnU3Za?UdwQs2(^~G_!RBlwzJ;@PCuH|--_@(jK*gx7woPArd}r$2ZsEbo7vg`EbpNsGNZh(`p7p zRFy7F+^Mk&f13V=QTY7J$jFSNnV*N9*QSu(zzvd9LS(#FZ3gGY+b@5E%$=1NDXQwO z#8`Gm*bV4OCuMv!eru^kU^u=_(T0PQaNu^-$&eGeQh82-R;CpnUlb`je!O5FcOxRG z7A3`zj7#1JLLNSlwGkh~DAjl>(83*Hj%(03?O>cxa@e#ZNi~7E#JV9c)WqV6_&IOvo!rJlil!uBkM!!q=k^=m1GvMs#In- zy#%p%%$W$cqDa6krVK=U{en^VJb#sWdx2jmjXge#x%RS2P6HOK@7> zcLWkH(DzuQ&zeU8_s=P?U%c6UzlNVZ-AvRr5iqc`ZT#eS?bws>+e=E%dzXRlY#r|V z?0t{F)~h$ETBq=~x@;>EOp#wDq~!06GryoJ6N3QElc|9A&cNjjd93TW{xc!J=LUNA zxGq^Dfyw8FTCaV5?zNn2IZ7Rk#y?Lk8~}AUWJQ$@?xvEGiMDGcvqNG+*RY znJCYHE;Q4^GvJQG#@_Tv?HIZnO;L?L-t%lKkEt&yxu-l^u5!-4_z!m-*ZY@+L3@By z4&EvP%3;7N=Ha-2ck(psRSxb#!^T$o*UMS)E?yOf&A#+g= z`Yb})VR4;)k$687Gd*}L#j%7jvk6_xgJ{)O7kl3-y<==;c1jKKKT5DQhsg&;V6r+X ziR@Wo@oBS=-$Ue1LtW&@km<#%irPv-x*;>GW+2fEtVdV5kD)-mLl+RkRTOMb;|lTD zUwCfe+ll#fW9XwrN$c3iyoutaGR`Y=dV43~ePu}CkneDSK1QF}5D*vpUJH#v2w3|* z3w<7Gl_O-XCuoCFmKnBwVG^FmPk1^@GbGAvZ4*j*^;(WA(1f85%<5;|1trplHxBo# z&W-wy5_1f)z8zLMNAySaiAT#}2P{9cFU0msMK_JAivX#IG%1Db--s(V|XUD{7-P<2R1 zExhNGBbUzAtJZFsCX^LiV6}kp`n}PJ6oD%d+`GTtkL#gxkGI(Joo*BQ-Kr+ilTvAX z=X!bc!1{&<}h^AebKH+ z?b?}$N@q(h^@|D(fPvir7`hD0MHJz=KS5>^s8$W%IJzKVE!K^&nuTwbGAA7L6V2Yw z+8LdG_fC2GHqWOkY=q0Asq#=Jsx~3F%K{R{$r1uHB78{RJKA>yq&u|y%r2bRbxXJ- z(9S4R09^M1$PObk?v1!EEejruPh$_!?sB+0@ht#fYE`u$I=J;1WU@T3H!z z>HZv1l0||7k)xOu5sEj7rosXN>hJt!lSRcl`NE-;Hem5G9?f%T_E7WUDU!SqyA;0XEn$ zByS?0{^^`-sM0_>!^%LxlPBTp(q8gvdU3`~YL)#`?!@F3=ztAwRCHi8_7H$zg+cz2OOIQ~EHM_l-?}v5xJzgqwVQ z7c9Bs5A$Ixzk;9-DUg&0%sjyiaXVS z^W_Yw>B_SyKU40$W>M~JaP!*W&+JDjVl`cRf{a7(jH)UOePsPTm5t1dw_*xO>OPik zYKSQ>o~#r8Qjv72CbN&t>24xn_Pnt{qk2k8+PykLYMcv6rRLW(S9sz=xi`Go6(%Xr z`)i7E<#uGcjt&Y!9-t_}HONG~l2jK$i7BX8xrs)h}$~KeO(;etc?r zJ2}#1w*?K}Jl-Gwx0Am!xVZ$2d4iZ*BS&?rRNYf5?_F=-`}qV1rmXBeKb@ATrNxd) zK=4;le2bRYS=xG@);EZhFb-wZL?Cx_2YzWmOn>}10$N|42LdGR*Q+7!Hb~v>YM+^1 z%R@0C`eCM8Xz_pb=DWj1SlVw|=r5S^Hna`8@+astqNVL{;t<9|Rd69;q1^)9;fZ)} zA8kwKTq$?ZpCGJC*7%dZ=JJR}j3aT^5OTXy!j=TLXvB}4F-^-(N5RLM-={w0-7~eT z{!+D)BYBlq#6>q6YYLor) zlm6(IOOT*$JId;x&HjHf_SLj_s2eyHDdO#O08T~Ml45|kF109(BRJ?RyT)QbOilHj zz4F{N#q^O?dA{^?gL+hR8^0$2bwk~Eb_ergmxt5G*7r&^Az zs$V?1Y!CT$MR5F|+Uozyp?Bp1;lp+sl`rp*6kXI?$d%NQW6SR4_i$$aw6U&;iIxN( zAb#|_^kG-`^y%90jmBAvj(0G5$$4;Q0c3$enxZ=WRebwikm6;&Qh9KNa|FZ*FppAQc3(-RgKikP!HMQY{|N&1zQqIG1B>dIa^^23mU5}wa~p~S?lUgTL3$6ES# zqE%9$aB)Kvf1;~aMnnx{K4V`TIx%)!B~p%f{oge8TC~?&8rV4K$rdrf?%f%UaC1k( z&TFqX*giMiOgUu%r4PLRWjgxc(zLI*h`SLaw!tdG$E1&j(X;CUTBrx?jt&}H{jrM| zdVi(?2U)G_86EyI2a~)-Vjt|sGnEHnp>qcm;2`i0vNqy(zxZ$N-GAJ_f4}}uU*x}W zF8slQ5E)P8DacuJT<`Hh>&kT2TTI=;uog?2{u>Fd$k{uEVU@?@%rGNByk?DZnH)?i zAiMi67qQzv&N+MIP~G8+UW;{~j>jCw^**TzLx}HSE9Bn`vnl|X(_>CdbSJvnJwgL0 zVJ=V=KVbOwFQx-CfXmaqwy+YBq-Y|nV?o4>$ls9A zqOEjb8yq}6em+0nzMCi1r$#;9ZpA-Zs)CdM8L6Fy(g6Pef`I2*tzUjMV5oU@+{pO?#;bu%Msu`iE{BVYBzbE)Ri~1Kl)tgMaOm2A zGSdBZr2TpV5B?ih>R&z6fA$Q9{kUx_3JaN;)I1ZQ6jd?=c7`l?UF+#R^R-3id31hYs02=<`o6n|4jafiD}q(} zIOTs1z&3_zMvH?m!5zO}_|{@Um@@hL?a``NGgz(DK0jz_bb41fNQRs*BkC?{+w@nC zi>ys&-KQ9Yu@PD-q!V9IoX{FGHi0sJ3b?u$&x9X;gZ59U(9im z@eN~T$;5xIZS^1ZKmPvv|9^88it2*_8U}}33;1|VY`nNcno(V2Q0yds!s^$ibmgV0 z?ue@G90~ayr(rdP$ui=x3O5`|`wS>~I_rL;YlfgqCJh*HxrJk9QwCQ*^|*TmP1k(k z?xIZJ9Nb-<*IQFW+|tSfF;6xjsRFlgN~_KMFQy@?Rdvr|zbk8ewP-;q`(Lq~TsH6% zUYpCK@ZuY~ciG|h3E^>xaFKEh0N1xVjW0T6B_G<{Vw6ga5ju6d12mJq3Fd{msc@0q zTJ%4Yn{mHMQFl~GuE!s9rg4lJFH|C<`z4-+@)O;uyfr;}zMbH?+@;`7Fbg@AlN+x2 z-DY*yBy*DH<9*nB%QR2h)-{Mry>|=pNcFQ0<_;*lk#d`kaXv*$|47ISnX#Nf*>7=6Q=!%}3<0J{y)!B2w8N)$bEER1vGxb85);HuB{1y6)< zr1JaP`*YU^ch=8-l~~c<6bO?8(o& zy3c-#Kl5Qp$$3T*vX=+wB9B<;DP1BN8FoMA%*}`9wz9*SLr+pkua| zn;b(KK)rh{3F5@8Y@74fE1z8Kfdf#|$%|x}61WVY+2L63Mg(kh|b) z%o7fXw_as-jxf9Vt}wS}JR>JZTB1x>QX=Fxx?*p!NTjl8_5j@bbsTbbzhip|DZV-W zOV28=^4e&cV$I-l6HyQL=KH+uxD%EjX}C9#FnUC84qLX0Sd=2zyj8ykeW58$k*baO zqqd1-U0e@mCi;`NSCrwc56u z2~9sr-FW-qYmlO`aB;jWRQ5i@ffW@3!$C>dAIO>(cajqY3RJ<>NKhcq*0kEtmq4=F z9n;oAwk9TZ6p6ymGbgm1USfxF{i@+94h!%9)t+u+IA3k%_FFKjaYsd#8OCJnI zh@^E*0sDOm!{isAbv^6WWZw6i94EO2Fd(ri0Cwp|$Po_PQDnK?1iBbpk0|!BnNZV1 z99Bfv1oAairO*deTz(8lDQGBWIm$`b3$114foKlXzgzXY9E?`W%CMMq2d4rH!;$T+ zAr3C%cZxe(S4txXq;a-AivU9>s)NcvNUbIbFn80H6LhpO7hXnj5YtZ&S61z)@kOVy z3dxKiakXgvKrs<~No1R-r>|ZbA8aTTp6)X|;TVdzr^w84);@!TqmG|jPNrD{gucE- zM%rs2?t}*H36!IGK`HkVgSh0_<7};-|GMW# zM_5{|(1&cX;;#WW7})nSFH13g2ET?8ZlIZ=U4WiaTDKLO`SasH^7ud2eW?3c^!(ZB zE$dgD&E8|nFw_4VP6{+N1roae=0=_ZsA2G{_)USygVT;CHR!1?&U`WD+aF@YWe3m6 z$5{X2ed|&$gCtNq5J%;c!K-w)+V+t$onT}LwnOG@;kzY2UoQZJZS{!zV^e+S{0xJs zjrSl(8uuK$1(ZJhTcHiqpzn`3(tSe7whDlt-;V3;T{`Sq6#u5|i#19#PE}aX+qh$; z`A00rK>Bw~LQy_}qZNTXJ;Y^j&DaitDD&NXYsNLlOUvuVvZ%Yr!`0fOY|Nf2c zv`V6L18Ss6Pyw=xL)^Xbog~RIF4vHsuvXFdb~T;;!AfQ(@fW;fA@4{|0GS^fcj(OC zOgIyHDq6Fu&h*Q{uLog%wGl+U3hNnN4qfIvvI?E$L#eapb5t*i)o?ghFFd;^rCZ*T z+il?)U#eh%Age$qAVlC&4niaVO#KO5TBI`{k|$)pI|v~%@x9wzbKp^E!GUSU4@IhlVspkgi%dKS}M zpm7&!DU9nXKIlhUl%P;&XRWQd-ue4%AhLctv=+H7Lvey%uOmV`MNX@Cq8XpmeSPw% z`P)d@;$nwrRet6mWkKwn%U)>&BS`qA4{AJ*3S8S!PGf!qEUA|EwvE>_%Sro1K=WK* zU6fg{5ibUt@9O=6a?6C2@rr03l*+Q+0<$- z76R~B_XgdB%RGYf7n#EH*G798(-g=6bbs;=8MYj@3Vfj3+;R5>;$nlOLXsguiz1Uh zy22ZC%D{VV)Q(O7RiWz2{?f7;#&RG%EH2uU4?B(#UpMKc6wlZ_@gn~vwQ5DH)1v%*WCZ8wp3g{+M4(r$J|$@d`4sDWhba%6{Q{0u<1oscVW5c`7V zq+07VeQlZVNjhKaSFMFhZ1YT1YV=*Kz;XUUZIu(ltjWJAkPwBwrq=9=qEas#Z>5pe zoZWM-_TpNyLOE>LT~C+^_gbo5GIah20psu2Hz)p{ga7q8KnnmhwaD2`{2D-)!4-q~ z+XUgx1k8r&h+nDIJIw3rVemm^CSvx*{$xtI{XF1M{dCu%y+ifoB^m82Tk%cOH?Ej_Xd!t0S+8&Dj1A7L(b}e2-7hW0nArF-=nonWBX5R z=yDDzmX7RwhRPZmdhIrWsAaS^^okt1CYLr;bPWHB0*cc9Lz(>{sfCsck)htCwY9NP zO$dFl$Xco}9=m0=ZDF-d0#LlI<1bSql+WkwMV?re?|mM9suqI!g!C$6*hKUG33`Nz zNAMFy(nvYz(se6uch5Bw!TfVl>}|$(onPBUUJ?g5E=ey6D7<%?B!}>)%5Pe z#stUJgqLNX9?St0`{}kSa@|rAmfY;bgP1ZRZZh7c zvjpRllB}5F_C#L2d~*s7YL&vg$muJ(rwi$JlKJV?A5auo$TWk-l{rIzI46xWi6+wh zJoata1D~1wxp!7i(cZCEYBI+9=}p@UDZ35AAlr>N0A-iC0YT3n5~O`=(SGP;;wz!bl#&p4siLNhb~zxhmt`D>;_5IlUywD>7mCiSe69IC z;}-lirc9Z}IkpcLY(L*sGiDV%u~}H$mPpIi@Fkm~AA;k~T5cEl65Q%4ww6VrqU^{Xkcr?sN!<9d{Ca>l#s57H2%_}6dz zMQ9HmE+1S4+)bY2H}i~RJl(>)R^PYX84$8@`epCP6nM*|6@&;w zcJLUOi)U3(oe4H?Abnr9tI(-~ZgLPw&*HE1?|(k=lwfj+qWiWAz$~BCC8*I_G&>bRfHvBc1Iv@#Ast zs>qzeE2;@lQnSY@m811-ysYeuu&G3pj>ODYz}o&J5mKGPP45C48RkR1RfN5!$j`ly zu~){dt0n@&Qm#zfUr5-I^m;n+KK4?thVFU6jQ)E|tw0OvNCIykvev3yiLndG3FtVQ zYL>xx0Xc2Q@M?DNN~QAoCzOu?w_|fuBcAOCwCjLK=vjTBDOx4lOX{`x7iACr zNd7Fb9AyLt$a73&S}ve7#Yv_Rle!DF2xBDWBvkWjt+j1ZgLsJcEmMfyhexpN$fJl& zKtCsZW+j-)T}Hp&(3f^t4m}9}j`#$mY6upbfM7>3(CUM6Y5K$RE(cK6JL^^M6R?iJ zdOcg)&cWx$yZL|RIS2fgEf2;Nz}GGGaB%P5O+&M`oF^=(xY3GnZcSDBRvt^HPu{7G z76$A}9{C#+&D21wjKeY_Q^B^-WI*oxEGl+~stjkuji^y$>tVv@w=2_83;UU>{)3`4 z+zkl^DhTL(MEW)E4SmcxSJH*%MDWheuY>tDba<3(ds#n+7fuUhszVjd=Bluyb86aY z+4bJ7{|vhZCRnCnAXuOkA8_)d@qOt}EMuPgEuHY3K>$YxDjkh~ME?P+-`#0pe`i7v z!Hm8}WQD%5_FzOW)D%iT8v0`dIz2keeoas6ompZP{q1Q)Tu~q2g5%OXoGCx#QB=)KQ6Teg_dd#ICh5q!0a}*j|RKwhB zg=r?`j?=Z^$VqQ1`}+`QQ^%`>x_kw5xhNOK>&wr!tuE#S`Jc5sQTp~4`rRM@qZQ2G cX7In&4F2AJf7{*PHt@F%{GVb2;6G>oFDD19k^lez diff --git a/egs/speech_llm/SPEECH2SPEECH/assets/framework.png b/egs/speech_llm/SPEECH2SPEECH/assets/framework.png new file mode 100644 index 0000000000000000000000000000000000000000..6cd941a0bf594b24b237c0643d0f0b6ef3014713 GIT binary patch literal 103365 zcmd43X*m9|`;U4;LsZjz zRKwp5mO}a&)qi{wlH-tIe4}A4 zX}&#Wccgoa<~z?C_JU|yAohVnK6d60V?`ft7KtT7E;ISS`FmYF(Uk0u$n-~X8?oZV zFFxM%>H}z=(jSiFC>nUTbt|!`^HfD{aD%sx{(c-OS9ixgkMO6v({X1} zMV;48iYg{i_?V_i{~47Rz2Tq7eV|CEJ$DdIcVO|2h;O#%A>fs{HZ%K_HjOe~Cqk1G zTgJjAQ`?*f7WQxBTbAC#hSzRRyHLdIMq=2@hjkWlesRUAA)FyQI#}8lMUFvxMHE8e9{AowsoGH`J3*|4T20aUP zJRn$9Z5~c7gQ=QzA;hAjG~DB^Fox^n6u|FIzzDPX?$gO1R`^rY`Pad30*%Z^CBywU z7KzzuoNBZm&nuT_5g5)R&uj)2(pP4vHYJDs1-bm$9t-vdyibogpYHF@!Py{q{>7pC zGp#U*9>4z~RK{eK#p^q>ZQaRzkz1-YB6%@Gl{B*I2AA7@(R%W6ip83hlkeFS>KSU) zvLW0T1l&a6?`E6y<(=l_j@8ro=PudP4*X#n4^#UDJxJpH;9ZZ4AYW<|e0?{a5qehpU zNhEW0sZ|k^GXGfaf~H`IighPyVI??O=_R7SqkWlSs{G8Xak(4Kd2*0Y>NpcA%kNs#{M+}u^uf#yNeg|GNx7EY5jAU-CTVebM@_4XC`}(r_bn^ zzGI}D^2Sk*#4Vz+xlwl;)_i=qMfS7ch7kRR`2wV=J+8&v+mi&1L}!kAvMVB*my8TK zn0d6a)d;25ciN~a*WKOp0@9unSGT~;N7IMRD)Lt=a@A`N$wy;r$2)jyLYf7+1-pRF zCO;QrJR{o>lI5Y@7&LO85p7_dJ^f-Jvl9Ct{}3nr81-Qow+7y_uyFcmNtJC2JV9Do zL(Yb=gx^!EYM?UyxHfk<$ULYiwlGQ9Fx=_tK5^Cb_g+Yl`_7hK+Uj^1zLk^uke?fb z*|WyoxyXbH)922YVNI|8wMhQ>Ffqxy&tm@4H@CX(aXkHdq)E|JpSoRIYZ$O*x~Bw> zjt#=e7mNCIt#s~G_66tc&*+?rnZGg(!D(?(v~B#yxmqked?)-~`lC^9Fxcj35z|X` zf*B!~HfEIZ$n4nRgE(dg#fMEz0BgTH&}dzBTN|2~TH|s_uO(p(LbRxg>JXsmmT?_u zj2}z2TAfR^o#s0bhFXkj_y+qk77N6Bg8QO_-1~o;dl{SnJ7fv(q^DE-HP^+qV&Ysp zyD4^gmy{gtOYhjU-;Eum)%xC7&j@XM+KHa@%4G5yk2k@F-#_m?Bx_5O$1jV&HAK1a z54!ui_6$_2+F<*|)rZQ|@Xa2x9buTv8WXyxE~A_g^9F9h899VN8^Epk{j8|Q{IF{< z&FVSC1>f#TJSx7y8Pa1L|D(lIKqf|XlCyN=4EV-Be z$r{54Lll@fX8K`Nzx=O&GfwwTpbaDL64w7jh@t?CeE6gUO36e0hW# za?u!5aEi=+=TDtUzbr0j)k_DI(uh$aaszgrYSu!99u>>}gCVtwT?g(jH!axV`& z7L5$62_SBbOSlvANq1}WZuw|{4m){&K!Q293>mp zY#XLPF>nnHF0I0gM5TImLa*&*r6H?Mm{OZ2HdWvy3=Pw4EZ3}y5gW3)lp4b{sn~^h zaH0lT2CGEE77ob3bIHXJaR&c!{`v$v*A?Q(D?U+Em*mctGv3?gOswy&JMeKYSDG72 zT3bjrIC!6u{tsM1oic9HvlcPbSKo-lo6Mq~nxvVoN+&cdD~r(~pQBJ|Q~~+DumCYR|gE%8dM@aIaWtI9H*p;B6=TC7lHKCObc} z?+i;MITy{0c3OCLqFrz%m^$YiVx%TF+DUPa6Nbw1t&s_fUf>XE!O$a@2RZH{3-Jn{ z)neA4cX=OIWq^j*cm5^aF=DEDEMYP>(FGUhSlBvmkzX}OdE{srJ5G$OFq2g2Cc)3j z{AxUE*w5zvG(=*LlbxHWTPlpMz|31w=FGJYu;N{bwUx~Iu62R(02#-l@9A7?qUwp8 zFLa=#Pv=L&VlJzcaQAVeGh0Qu#?Y6aL5Na6Pd8^rl<5foeTw?tgm?RJhz@s8;2A}Q zqAtHBUH_a>8utDL=|rF=+)|4$7VeP{wF36ANOq|xuGpi-eOw-NXCQ|y-amseDH3)% zJM0Q|Q8`aC$qjO6Pu8{tHg~Pfv?Ae?@JV}eabrMHBNo!S@ZM@0&x}o(R~ZqA?ed}7 zni+lpZ=}bxOlgjK*;hG6m&lG-o>}N|!GyFUtoG{SstLIaZY|k2P{IiFbJvg+bUZK; zIutT!X?6}ws(28My{?Ca8wJh0(i2RH6W~se{Y4!@#30N)v#nL6F(76@vjw~)B)kRQ z0-BLKMzfosF38I>)u}nVL%=c2mVd@bf83Y|ynn_0vPuYzTfq)x3+~KQ z(oKcBId+meL2kOw^XyCFluu3lyjZi`H(-A(!f7d{X|b=H5WN(a#q=X_0C-xet8vXL zi!0VO3{H8{M8S_obo+ZfaM0X(oJEL9i_79Q_>+dNoxU_Q|8ywa2mfu3&Vt`@ ze`iB2dX#BTUR3(*PA8rU;^mRxBBDLJBX7krO}xNJ#*Z$8h_Qxns_olo=I+TAy_t2G z?RFJ$$hQxE8WQ`A>*gohJF&OXzF+f>>RC=Byb$|DrZe7156`)9QoZ_Hx~_7PtbYyL z_&g==G$s@8Ux~licr4=g(wRGa*W*{Sda9^VbidNsVDyb+M;h{ z)u;HZzh{lRK(G@@S~$&cD@N?+l9@G6OfQ0rKm_=~796b;9@KOfDYS@I7XoW#^!+be4e z@fb6cd1uzR;02csh{ly!ZNW4*PE_OjwT_2(Iz}s?2}A!P?ZM2gw9Jn$!zTayD*>2$ z1J>Tv25;fck%d0CFFX1R`suQPZAWS*1z8NEXF*1V8c)f`(w}msvAZ+>`YQp7W)F6V@6--kP52q)(1@b0(4tToqw!7A^SP&f5D+2p4%e%y zr!q6UweS)Z5yPi$5Jh9p+%S-rv?eV_WNNHs@Kr>rw9pxM7erxT%A2`er~ zM2Ac9ij31DRu9kcc)yQ%2=8}L`9c(8H^xbBCAtr{7Dh<`|+ZgX9MsU5bUwp{&;q zcLloYGj*OVw6fKf9%Ns}EN;R&zHIDlKYW4&)4&r{nC0;XxHNxE*%yTUq)0;(I+h?U zH5FzEMm66KdCE?e4g{Y?kIPD?XFm+>XXak$f^hob? z!N5Key%4=XMGkt5==JCgWS|WNv!@8E8M$fQstxYs-q3#rh!1+kW+K*>Pe^Ppt9T}~ zWFfP8gXg&)&bl?XdIY*60^#b|>XYT;LHke=V zQ#welM(V14m{C^f2)dtE?{9g>O%Jn}(NpD!nKRjr+VqYt$eoG*;1IqC+9ERt)JSBK ztT99yH|Xx(?6E6k?g&$?5!4A)T~Uj!nVEAa+su-W*iJ_s^3R!}P>=f$NuTNk zNgppYtW^>B*z}~06TjgsnkRn_??%&s@4J6@gIW+FH)@=k%@0PDn#>!sMh#7G%FHUQ z=<3z@#t+^q=Rw+hfUZ^;#&cEOONtCXd8NgFZJ}X;>S(CKjV8ur%>^t~#^n^eb-`~O zV6Kd!RhRmTOcO}5?DLTQT%xmKKQwXuh2S!ixs+^&>4whj<(8_KSzw9#Y&rzbjEoN> zn@wf`bSob(G>V*ZLuJw$BXR(55hf!#nL}v0+02UfR(Rf=Tjo~vXl1M65TO>)UlP|^ z`M|3e`;~@o|Cw~T7L(k6r0w%&bKPg)IOvyd1fc4=iUgRMEQ-a@{QDevheKw$07>J7rGu2o`Lbvl!=pFADK^TKp5uYLl{#moi=quS$Oy=8j1 ziz)-TeIrKR22(e~a*7^4sVa6sQGfHAs~!u_Hy{YVx<)J}+U>ytFufuQo;@8x3Bb;xMahcEF7MO^f=b1RoNo^u7WsUV4FCObxqKePi}# zU-PFKh?haVG_LSj9DuyUr~$(0viR_h@tNy zIB>Z=2jiD!rKI_T_f8Om&D~hy<9sygS;gP;e&MWzon8?B(Xp6}PB_g=Kj!@9z-gSo zw8+tNqr5LT>Uw7GAd2jzu}B*W2e-`p%zboBLSQ);TVF2Q>AWDpt> zFX@nCgv&1aH2Wq-DstYJ1Sk=h^cp_oD#bstSwIyInUYJ{6&a3wA6{U#plFOi`u7Qt zj@E8=gPnfZMZeh|eSrouDr%PUh&0-|FnBa6#)|9hiQ>eJ2+Op(o@mJY0=lqG`D40T zH0Q0(kGWJ<_vlgVf;R!R$cn^qyG$PM;@#y!^WF13vwmp|2%Vp5%?IPkxXoL@>4`LI?9(WSRq7reDD>`-UlLnqE4RH zaPgo=xBSr9^dECk<@}lH+nE^QP=A;Tf;cd^zl==kDdLOFkoME9nl4>0>ifr%6%^BW z>4cYfkAOo!Is%esq{b;cf|E2JG(LWHo_i8Qo}jccm!>&)TlHi*#E8Ajh--esVrM+x z59^iA&trXy51)IXnu=NUMU^1nM@AaD4f%^k@{u*4%SR<)Wfd&d4s1*zHQHNBjlHO5 z(_<04m6xqY2uqkr!h(4~`dXf3jO_c^rJG#IqBeel9cKxREB4Ll3pQSV`E!uB=yDh` z{n1QxgC26V#&>FFOrkhpy{>(Suesuwc&(=7fEsr7>J8JU&QA09Iq9uv4u66HxkHV5 zE?yglf0q>8mav9e7PmAFuXR-0cl_}}aD94U>fxNzbXdm=w3PY@e<(qIxPN6gzwb}5 zSX!<$dg%`aH>gD;&Hx;RGxZhpoJjwE-}!s{GtL81q6L-cQRh+96;tto8W*+Y-=Qv{ zCFR6|q%JEA70y~WKR-b&(Pn#DRTcyAJXX4*(VI3meK&@Z7gfDl9D_vMGeSV!me#Rw z@u*x@ouqZ1u#n^0G)#Gf^HVz+(3)8jKhNzy#Hot@)$k{zKtW0`n>Ws`b-4KMP}#jb znVD-mYmw;Sux1{Mo{Knu_B^oWcitRu;F8Jo-B>WMQ3FvnS@yjO-evmcPsgwIN zo|#VlqZDlSMG9KZBB1?_$U7QZ{_@%~MYTeObfy37r`elpcL03-fp@RlQ(8C}W6Dbu z=vm*_YZF?9JDd;sx9g~W(pN0C72*oyJ81CxQQITdv7adPsKh`cKc8YNmCp~e0n*AJ zg$8(5<8eUFH>PXN=!epwUpXO~CYM_Bb#{Kd?On4%!7(cr#w47|cn$W4guYYkv0=FO z7lhT%8pR>IgCB2wRd^4`64!s^3MG-=Ku<9$>~Pe1Tbw$!AOHJW4Un5|D(8=<{~3Pg`j3ExjI*TqwSRh7Dd{DO#_i1ebfx$+ z`2XLs)aQo&KUcqIXBA+~i4a2f>!SVL&BjCbcbHerM?U}urw>Tz0ZFTnuYRd9I+!KN zCw7xTHoOM?1Dndp62_>TeipxOHXZ7|n*;98BE8m9aJ8fPKYfZ^C8a-F4gx#@^V@Ed zb)mfRxK&Upz-jM6JTf_8c{uPyaS=sx7uz=6GsnPkRVWpVV@s|8I}K2^Uz?|Kl;%E9 zztUQ-!=2*c;0C+%mls=-E`bTVxcXruvPyTv>Z(*BeSZ9T`{VsXN6%>ChC(Tqz?u=p zoHafhi3f|_@9$e;?e1T>1o~acN2_^$p$0I{SlP*jG5w-z$Exyu$sm*dBol!KpDuCY z>HhU-J*zdHc0RiBLiuIDJI_xx#fp&MAwFt(n~OO+5s$8j4PRgHQwVoJ;{!t{j@O+2 z>hq^ZR~PT^07pKr*AZmptvjRLbk(yjlwU0h_i}Vfl1s%?3D>iFzWf};N;io>uDbHU z>v?w|`hH>2Ou23iE84=UTR(!2C>Hd1eNrDMElzNM>VK<>vwpl&IzC^nj2ohqUapOuXoF1k#CHhG<2De|Bc_M|`qukioF6i5?g*HTmay({6K+D= z@oev0buOAqHNmG1(d8)}aHq_&m)BHG=L4H2B~T~WB3_=8@zR8BMaIEz@MYwULqUWT z+w8I#v&&jj`p0S zbf&O=T92#FT4qF7T`jYyOo@-#CxF#C1KB6!I7whQ3+Pbf03JF>)mBx3P_2To&qPrHsdi$;lNzTgTh~gUifRv z>ZvQRyTU=}d5BcE-1K6W4C1D}OYAldUV4{`OlJJbgILi#{PE5CE~kR&Vs z6X~YX$wSxHXA(vE#~X?y!m?IQi`xf>w+V7Vf>wzj>M!}6IC*UiuXbbU0_0E?*fbTB z``hi`z+!NW9}PFrv(ka(J)j~_B&cX<`+#4|rI>i2netk>LKE!zyDLAb(HTI!VFs3= zr@@D&S`uPk;Ll%v%nqf{h{v^($zFlVQI`m)2#0HFvOL3Y&xpNi5!GoG;DDQI#d4bl zTq>v2A3fKa6AL&vQWP)82z$SO_~d0+Z|*q!p%Zb-)$wdfDl?+yYGG!6#xEA+OtaR= za97uF*n;Q2wUc-T zbpC`E%|0M{tnzsNWno5TOoxy+=!WwPKb^-uufeHYDJL2p5KJ8LhqsJN(lBW<6R|5X zKxP&^eN3ySYO2Y>nw|B79%qEw+9w>Ml#r8b#&a#6hIuvw_;S&*=(?kySLZ~1QIg+^ zf^CQnDRzJI#BwiPPQjG+jl}m9S1XECj0`zD9 z8~1daOk-g}X!WaEFf6#HN9>*XkZ4lms9*(xvYEVF%zaFYfbqtaWBk$G^AD4N@UU8r zM92o)&Am8B@u#BS(5qK028XlJmBYn-!9!lSiO7_$D|)5nA`R5u0ro`seIBJkKrulX zzt^~ScG|_UEhdfUHq`oXh72|zD$1Q)HdAme(&HI~XQ+j1h*TG-1K@HiQh;uO)p=}_ zkB$&?)9yi#th~R@g@I>|x@zFmKYcLkV9qyyLq+_v-w^)Dx5TgBn-iGpmUCN~zxNa) ze~-XpuAt))w1096A+`(8Ff@8Rz|a|etbC-o#J(^b;LTk#NN_522i~t7nxB$h1CZ;; z!5F)*7%?#JPo|qN#9`fQyVBaLRl`J5-=neFZx*z3+8OUJ>rp8NRR#;=1U**|&TQZ4 zmS}w3*3ttnxVjKIIUo+;h@Kxg*hS8ok-_fk+S6y-{N(oZH3rqdbN^pc)>PzR)O#da z<-Ex38+V7><{l1j8Wk$fzVo8SXvf~Sp`b0A*19iX<8-cX&yfqTOJ!dc<=Sb)_b}#q zgByU>8=A9|yKuIs4`ldfyL%sVesflbCyUdv3gSl2=>2Y2>kPSaBAv5Xa5LY?P+j@M zc!waVOb}BBbu3KE+K$Fl%L7z8e<+cqBE~42hGauYf}SK`TJ)SvAl6PuTd~UAeS&~} z(*iD)u({*2>*uC@Q@2ig^`m22$~d&$PQveiLJe-X;g_vxaYR#0XJGFr&iROfpiZ$3ai^i4;Yi!@)&pzE9QEYS zBieY?v@Ea@C;@1CxZ^vskvPjFzy2fm4#Q8#1 zLUV?y(?aHc(#_AaGXNYacr>8%hk%#}2#bjM8a)xwE&t4Eay{fFAeS^ea&`^y_X@4@ zj0%6ZkYISwz{Tau;>0vuujB59aZTC3(U6ncK6g&R3U_cl)FP**D)AYk7a z(>Y4?O&pGxys>BCWkGJN0B+<58FAI!ZY-JpR;0R`U#Q}GX&dKVg8C>Bw!OAN-b-DkhX^E4s@~H&{BUb28pLRN^UV&pl))eVo=TT?;6}3utnoS{v8Wp}r zAwURgwNMo0#XVtOH~$Jih%Ga~tL59v>bfXh=5F zm3J~Vv=NqC+I|F+*lbo!TBp?_X+P@+e>bR8KlQdWKpwtaBABnV0y09IN% zHm?}4**^)E0cK?jltcap;42Z6;>Lb@zrZ!aE==T?+Ko3fU-beSbXtu|yeiI2w+ij= zKU18EP_?#qGN77f{}ouLdMA&ldAKVNp}TkTGo2N^K2)iqd=71g`d>W#ic@V=$aB$9 z`{BHDsEniLQU(=TU8r@wta8+O7sDMuKa!Jf&aKb1# zTFjyxra@?*$tR_Ep0Oo=sYPsPOf1Mifc>V6trrx0!2%Cp7vKtbdzGUfJUz0UTg@aT z)FMm#W_k=2vH!kmASx%}eQ34Lf%we#t9z6qYPzo&{)1wmJ3~erj_CUHAbFPiNp^hk z;r`p|IrG_#yOM2Jjzl9oIh&%m#NknbgZu#; zqS(WYLF!iRk0vXuw2B|jqW2NC{GPz}NmT6J@NYXiQi#S{Ap4nW_?L&+F8-|#;feEJ zTTWp2!+!QuLGLI9==Nn2XTp#_*ZSAdR-)X6G8V3t^UVO#qNMWWwOpXQ0p9z5H1VRt zb--wEo4b}tvj9ohj8Xnr4APq7 zmv980`uIWo?B1WGjpq^s(IHVpEK&?CShh!gv!p=e_m6k~N{3pc_?1bl+RVxddc{kV zO)XJRkEQpMjf(o}IlodzdtnFAX@PSptPDdST1x!2Pv{DP?~6H0VFZCL0!ym-LsY}q zVhbckttglC3>?tAW^@i(NI?AmyohKipO|yT542BEp43dV8eaG`%1W<%tYkXvF0j20 z5JR|8p=JL;cNkzA6t>vyyU$JFlxgF~wP-z7xT{L%vsM_sFIaguZSElOS7kEdcg@$-<7T9~ zSeZe(I9#1Dc~6eEBRgToN6%X>nU8C*-ttCA%_Cp~qNM?4wmBihq=Qt}>z#UhkEi<(LYTFmaYB8s@a0z`5fUN(@`e1{8 z&9VbvOrjxC6d%prK~86I!~AI*H*J6l;w-g>r{2Htgb6%UI?rn#POL>p`S;P7MGIdw z{AJUkYp6wrDzNL(xESc#CBX2mVqR-3NFSo)h6;9WF@FA`9BAV|0~09Dp6JYTxcf4c zvLIF~$Zpiyr71g9)`Yd@)`-}&Bzn{{%#Ke@yy<(W`}cAq(L;z`xxJcR5b(0AhR&E(20(%Uh-H)B-ut~vq{z=e=xgB_x1TA9 zAEBQ07e&Magk~*?rtD#vGg4ShoiA8NYkrd6m0sm%eIi|zq(vZ8Z}>(lr#K(*uK?Kq zceC0lR=DMz$EKgh{kLpjK{vbi^4ilisfIS0aJ)QT<8O!he>8FSU)@wr#c2=|dYAQf z8UVhu%l-2wl@zx2S6^1+w4L-SIHZ8(7HG;sd7*3_aJEwd`{7SCypmF*qvF1cL-Wzg z7|U^W#Lj#kM8@Dc6(%dvmcsTUBhR`9Ci!{9iA&{CftK{*M}iu%}?vM;_6`t`to#wQTa2H)EE? z2C`XBa>JSwZ!M2pKJ#~rb&UU44Fl%?TS3EhG@$k)9+vUZbWSob+@t=kia2oS|EO@V zRA=$g5uih;Az-oC@YlZTqW#g<4mR|Kge~K55wEJcEW2Lm;q4juo6WZ5h$aXv-YP@* zTj7AB(XYlqWkhKf&>s}6FfFKQ5`6FhIP7?9Qed6}qNk!Np<4|QJE*U&i_sk8wBWV& zIR&AA{lur& z%iW&{YqM7Iu_PMgLpIiWMm1n{)CmyYIuD6yKj5S7kReXgUAA;`^PIX{ zzmOYL+*B-#@#U7*Z#g}zD|XFy|5%Su?gUP$X%d&%? z+Qx_~=Rz1OouRcgiUvpLGqR$xVCIm;{Loc?JzzDjgmz-> zu|1mODej^I7#;%9;Ikx*eVN&GQ1)cSauj)Jzcw9s;HkXxkz0cmJSC-@-2H8bV?HDm zM^*E#!|rUz-@DCB|A5`@5epjz z6QKkB7dY%_3!(ecT{7b~n)(M6_fU_wQLq_yKcnPh?2Y?+=5YyqcjGdaj_d*(x7GQ} z9qtscgHDBWMaxZSlXtHDUuu#YeYk;;-`vYzDzm1!rl_&dTuz>6 zzVg-WZ+$%UXvz9>1txiLz!P~o!AckkCwmoeK?Lz7X(W~r>&Vg&rB;{S%A#1c20$=q z=EdDS6&Z=RyXmQ(g!zb{{D_Gj@3|IgOVQ2_X`RID}h*Y(M#KEZ&oSCd*% zmt^siXd!&+0_e#kTHDZekmceDM7IS7DSlNOvZP+N7u(0K-ak%Se{Azfr7l&wH)QT87ii#LA;YXMb6@f#J1rGq@j-)=xUMGpd8m$gk(hUf0;m z;YBVf+E36y6k!AFX&t!U!i*iwAF4XEXA!PEWp`tb#htz}Gr%>tfFsM;M< zI2tw%)k?mo5j{4i!gs?%k(V^3YDza{d9tK96lS_R3KQbPNQ@x%$Q{Rm^n5lU*%0@B z)PKs{@sJ>XSLj8VG@Jb``J@yCBn->y5ns*mD;e+U5e@~-D&|5t!78Vke&NR$D5#3JUA+6| zX#f=nTRw_Uf@0H%sl}G1)%7u#4;M#8aT?R{z`-}9@OJ?G?W=rWK55woyxGEQ|jG8th);SGyC_aT7O^j70@HJ_;?pM3J`T2>TQ{k60TYm_p*5_p#yN4Q))9M>)59Qu|s~altjI+RnWD0Sd2kR{(HODtttTBTm0TT?h}L1_%7USwGS^&J7;wR1-p&!hfmWKFW17Z9 zhVmc{SUfZfJ4AKXFVA>57sO%kv!|JcvJ2#XHiMQm{S&#ib!HPI_uI!E3o7$+!c14L z8k)AkdK93VF~1e|r*Y)nRJ`3t6Py1V^60n8r$VLA2%#X$1oeu34?48d82Oib_DC2EQF3P^)oZeixEo<^Gd_HE3(CDfCSs5W;n!N=CDKLTbiW zp{{E9*_p_ux8rvv_sCmV!<0HO7fZeyNd>;?%DTZF%d`AJK0peqB`rc(xIPIftM+2* z;*Vd&kF|z*)5)lXwb4q5EEg>+b)9ZJ#$On#z&1G)B9WOv>=IWxkIL55Y6_FZ7RH4t z)0SQRC%|?mO&2Z?W)o-PPE*c|l!tJiILl?>z6X716PR+4SQ?=0Z3G@iJ?9!#j&AsI zgT|+rhQeiE)I_WE4SJH$q6>`7e~fYVZ**o5P~g~C6dbTa#f|S@N(#my3k#~+52I=s zOj3uKZaeY*UP7@$PpD9>e%?#W&qB_bqW21^7zdm80!GU;Hf_gGo(3&=HskmJw!JcGBIc zrrrf zJiy-qg+`2caz9Rs5KN1aW7?gHmE?*1Zb#1g7tR~67mKBvPfUg@dgz|yt!%j=g``L+ zbyOuKC5MB?0V1Wv9;VUv`b{4H*|W})3fK% z&OMnnzAo;0h2mw7K~_7E1HcH2^z=Y39g!vu-HQNBsVTKQelZSG!a9>g%tIYQ!2`tf zHwv760~lg5LhFH%$qwWlsQS74*0(8XrvC-tHo;-&ZV+&~X*Q@Nzf%;+$|SIv-4zq9 zAdSDO94mwFwQc@~(mTJ7j?7{O5$o>!y{>;TtD@)L^GKh_iJ6G~bHTxK?zb`;s*BY) zdFF)j*DM_hSPTlD*3|`IJy4&onprbKqR8)n6y8^61$Krc%;9h?5@#AaaIuy0Cv z+|JPclc@Ld-BibHJww~6yXnrR`c*C4R)12$(S!d4>5Fku->z1BkiiUXe)a2SZ|#7& z;v_9`(pLVOt8zZ_U6bPh!z(mmty$4qx>nvq%BMt7@1oJ{Blw?JqL*gt)~zFWyVK1h z%c3k|WjA~b^BElh>?-A*o$5+T1Lv2Tq8ay>I}FX=_!(yhRBJ3{6pBd!kgD3HwL4+j zDJw50L~A+kPp!Us^EyqBy#l0FK-6zThUL`65>M7(=6$=&VJlGv)U(;o?jGT6fgb*W z$q9UJ2a>3kiVVQ(o~pUBG5u^VU-rWZ8ZoS>n(^K)v7_g%B%r!@MRc0);yGevaej+0 zJ#iomso;5VO)%x1&g^XJ$Ms{*UmKW+oj)|47P3F52;Kc-^Z|rKWBWNIy4wM^wTr)j zHZ+_CE&W!zujKf4B+xsuv`Y)n$3?N-qxiWvxB5@x;Q33BO-VR0Ge#>&1XqgeVf9tE%>DPh|c|YHUe_eBCI$5+lVM1^^t&&Vo)?Vd` zeR7E58fea1)Ii*Ufm@?^!z(8@iAEg;$;3RO9%?}&p--`^?ozbVS$x`;RDTQl+?Mb&D)*MO}rM#?OpGt%-ZPp1U+5G!TZw2cOuXMnlNjj<3Z&DHNA=0Zy&4|M7$Ar2-}50;STAZAP@=zBHWzTb>F!8Dzdq`bXDFXoA>rh@G1|9a*$Dc_#Kf z6uIaMCMa#|klv=j%gTZ2&^5?fk$qXDec>jKH{=N<*!krbE)xXjvuGwUqiybIz|`2S z?|xXtTi&^5whQcwuV`&VxSX|m#6Jc8X!0k{3Vnp*+SO-X7OaS^=%XNUPQ}52fhpwl z!3t1>^Uem8@;Cx;|BCtI(Xn{>&z_r&?glmbq>Wg}vr}xH;PZ6tmATaq{!P`amDiC@ z_$e_z%bG0VJVrjVJvE>V8wy|3NOU-vayJ4+($;2{Y#$-+bbwTh#?6 zD$7VA#XWJUrcsPc*8qCaza6drkc2P|Mc}+;_Q;PGLL2Jw)HB=7v8_Wfc8@Nd38Eaq zbhf$AOz&}=(Z@Ij!K#ZVrX}>PKNKYH?#*3&n#<84uZ1`pH-w=0WN{vvAjiakBOSD(6eu{E} zo4JG8wQXC1wlUury~#fG3#u}J1Wmbvzf~Mdk8AuQvTPDXQ&e1Gx%Fe=GY{2w>LD(A z&n>kL8uFu&&J!e@R~r;5c*Ke`NfQeiqLD1%i{nO+y})-d%r3Cq{82KZ{ZM4hCx^Cy zGRPy9+tfYU4pZe>;DNhZ1Mv1ktr*!;p{FOO5_?G$kw+8?d=;xzI3#I-a+a}c^DAqv z?u$5TjOO8d_6W6lq7{h&SiS-qSw`K=WJa7oTs(^=4^y2dT5(?fWs#wKWmM-DlbxLU zLzw0cvks$ixE`mqlD8ZxnfLK`CnDMW;e{T7#V(8x#Wl_DKGW3)iQ0^*i>M92D_jCM zmo7B5ZM=4R*Bz161bERl8MlQ)Tg*|=MS_6M_skX!a}-5W)CXlseEF3Hpq)W>dU|vDwFQjp3cj=C)}}3L8ZGVp*~`=e*3k+Lo@g@q5{+ZDAV;rzOa0zovx_>fDa1 zfN;coF7GGK)XF-2RX^MJYfgqsrf8)R3kM~oB^FPkp2S(D_Kcdb=X@4U=~hgPKK`cD zu_yEV9_TGr;kPwqZ5a8`+=POf_xUTk>~XkBYX#Zmj-D-d-!o{? z*V^J(_2Zi<;hq@qr4M4HYYqc^DZ-Yz%nezsDBie>o*_$9x7t7WZSfk3lUM;ay7-~| z0HW5}uM_Lagj(8yWJ=oT2EIb*`q`P-j)(U6WsWET^P$8{s|GnQvx-qv4=~)!n;sNa zFaR0Ro#n0#{A8Pw`Q!O)N>+t^Cim`UFT19yc|roiSMoeM<=zwn(^uHN|3HagEVO}5+0N!w<;$QPI){Zr8PIfw+-Bo4XiZt27wBskj0j7__cLIF3Mh#AxTpzacG?|T;9 zm(V*%I5}P%r2!9!F?Sk*MpkvjfPxUvOs2CH;_8!D#7__`lmpBjm6T@4c=jGVl(%Qi z0AsM~Hm6_$eqUbhEZnKUl0;y4RG5-Mt^&NAq1vXvDhZ-lAy5Iy1$-Tk@bGs{iYkSm zxgG=7gfPZ7&mD;%7QcVj_#E5-NC=Yce?7oPRzD3`R2~9S15l`-iE0}Hjm0vB1zwAx zz+RCl1X+dx+rr$ZV|HY<`FfVB&n;v`W!rGi7!#0XQC%^a{}*ZR0o7!>MGZ4|=8oeX z#SxUE2o{P+6Obxp9BgzURB1|=9w2}~z!@EouC#!(5rs$#0@9mGlOiZBbdVyF&_ah0 z@}D;_^}Ba{-&#Lw-F0UMlD9qYdCu8;pM5@=Zce*3cNFh-vs=lTB}=-Hat(5NJ08Ec zosdKXBJI}ySCV737-(I;{9h3#qUo2vQ)5QLTG1!JdcBH>%Mr67uercu0bfIyzDKs^ zSAjoeVoEJ`0Io>&ms9Z1KmNMs-jtdL8O3_V{NdNgpLE3i@t+yJ5Fq^Y%SUcv>cu@; zMnQd#4C9VNxWym#jHAYkA*O&e69R=J)own~Zy~7x(Xoy_+WX3RLR+g(Busm2sa-l8Z zmH%Y*m~QP|D*PSONu+b(_n*IeQXK;}3dHfh|MKr2`+u~u+T|&EGyeODoNRuHkdus` zNJ{d&%t1`ldRDk!r5~3!(>*_s*fe@IcWU`eQ?fh*e&MyU8)Ub;l^< zD?qO85@Rwzj162tN@I8oE8{eN=6@bxKG@OxB2l3owD}P}=-;ine&pG=p0qO`0B610 zoWDRy0KJ8C^`3brfh9RS_;>4cj{;?;ag?Pl%#&c?Yjyp&Gb*vJLaZeF=0Vl@d}g-?0@j4OzUqnf~4l$$ZA*@7L|NWIK3#8MO>2lcRI_;U!jVSt^n*nqyUpl1&jgV$cV?n?e z-g>$5ViHLEb$DCuQP)@5(-e28Njw2+79urR4Oxcb%q0!h;kU~#5Z=qYdgB5?ZRXUc zmm1QsZj9!fQdsW)e;BQaBP})WR_n61##m@(@jAqKRIE;D)6CL~$r7c9O$t}dlJ#5V zrF%vDN3-fM=W8rV%~DDzau~mgcRd-cWl|CJ4+T-Q!##1EM`*B6aciW8{PFB-quYyJ z=5HCVubau$%3&9F!{|! zmqt9`a^_~_YgDKgZCYGAr>kC&P*+KEnR3v$prj>wbTG>UW?~wOo?2JVbPd=LBoOqKEbp#OO;bde z%7p&+)<1_t72bb5dV{@1xudJeFXk?3(kcX^KGtfU zg(%I$bu9Qn8P8`sNdkn}79w%m@#Q*CaLF*);ZnAV; zVF!SIb<^y4oGzoU2L|#NY-b!v_fd^rE!~3pmKSZdF0L7qeA<<+1mY~+Z_W+^vbSELN6gHm zs3t01%v7XQd{|jMW0+r3aOW7E!x6o$8$5y6I8OL90IQZb$#_9cdzy(B{CuB|#Ih;&jtOq7VTW9(Ei8a}VIeF~%)2k+ ziM!gKWs})dV^Oyr-t%&G&@9klc>x8rgDvkG?hGbeEoiHtO?a7&Us$_@_)|Fyij`_R z*sBYc%)o0a(3`P;N zL3s0royY>CxUh*53HZ`WoaGxYSW6?sEkXxVgi27hS8IXyBODY&Eo9@rEO?abg|9tU z-xdl@FV-s97Mo3c0HSn;hPz>|x4tl5JE?*dxLLh(D4*f9T#B@-u4%l&H~Hru=eXn!3|; zhUF_QvtMox8a298U!7@mnT?T#PL)t`5YKiDdoPb>7>X%HbJt5xaM*^Ut)CFQr zoml^9<)teRnwn`HoxpU2RcD~yI=W)AxlC!E$loi}TY02aH4&Rs?>xvdn@&)b6^!}q zCNUFyqtG7+{N>J8?x>l*9WD0jkayeqO`9hhzf$#&MQhF8_h|59ApAoZfnK^%)5^Ns zb;61p2?w!>H@1cpO}7ME3Q=gpo3LAzg!Q>%RDXiUmvB~J zri7wkluu0Iu`^R#A4ikw>su-U!m)0IKCQ&{pxC3V?=5NS1!?WWqhRge-Oe#*38z;v zwhB9_Y#t*xGgLl=SY8#a)3|O)M#q6#>r|n1;zD!<|7vnNp?v3EmbxzanYyl~FkVzV z+*XC^{3Srg%&CzBj*K8QGoHSWj`-wqsEdEhJyLr7bV*JQxa7rTZZ-L)6xE7N4T%r% z^50ogJ)ZXd^TM|PH3{)mvYB6u4ZSv z2v+?1zWonb7}eC6G%R}SC*TdVyy-7Cv#7WKCw#Yz>Sqqk5Dh%=A{$|SqvAi$agkU? z)olJ{Dz%@GpKzXdfocXzBxKqV#gp90w?$1J-!MIW^;yEL1n#ZkDA^qZdw$V=a|3Qd zMn5hMAFKNWyLs2^Y8`Bu^LYpc#zY8%Cjylg9=g|`I2l!ZwpJ;V+HKKmoGjIqn=nb6 zES0S@8GaU>7Ur?O?g7Z^R{%liwHp~}o)^Z-&}o;_KglR>^L}J(Q-lP7^V6R=Au=FP zktWT8Ka*jMJ%`;2I50we@GQQfzueLtdSc}IN*5%!qt+vvmr?w+ant7EMcJ#q!*Uc6 zrQQ-%e|oAF)s%)^a4)*lKD}(7_HSI3Ih-+VlI4K<-aRXUdzK6CERd7{JD*|3w_jiy zZsCHa#w6w8$7PU&iFQ;=>?eqRMNl6%@vlX;-y<=by~|aWdC*XOM(LP9$H|W(-bnn8uLs`q|qGb z521XaZWv_{})XkX+hao-kjDFRLFm@o3|w_J8A%j6m9 zC#^}jsRwxmuUVu2P7p8}!Ohc5<|#Sl7_&jWo(%1ACpVRWz$|yf=oKge4;E_i5#phl z1yg!&gYDqP^d$FwlTtd8WS~nU`A;;9fxzNe`TCr0lVkNa2nF<497H60JXr5V)bt4~V$ zj)gU2yvOJQTR?>`L=0 zy$sAq$Xk=#l*688%agL*Gd@_x(7^kg1}f{OhCz93O~^@copbGz`bP(k#Iz2q2nz9Y zH(0;pFMpn6x@4iTp(b#4x)jF}+PZHC^cR1sTb*9pOS-|@Di!`lRprWHb%fYA)X-#Q-K$OArJo?D`-A8vxODVnwU~DsY zNmpIN_fA*5f3yS30mM=zy%m^;*fJJ!mqD+S^u<0~`IHj=+NmwE_AmNzLU@s8m&#%8 zR|3Vc9*z|&mo(06oPc~~)T|MM?EjA84cSA&0nRqhHIg%*Sm(*MnxuV``xN^@rUaE4 zRgs)uz9AZ#UJx-ozhKosaggxz-)gkCn4oN&{8%)1lc#k-VnuO@;djPFpqsvrm* z{geMa2T9Vt{Xv2m-BPn2`$Mxj3E8GeohnP}CK5VkbgQ=i6C6=Ha`xE^K*vnSzDF)9K@_lS8@wUQmLul z{+OHDyHyWV?No_&B1QY4^BwtsV;_RD`-o;#SpKQcVB5OBpuAI6I!_i;QZEuy9)(S; zbZji3Yjn7Smibg^b9H6`Wp;cb;EP;10$&8z56pxVsf^Fw;i?}$F-19miQMLSaR%@a zMT_Ki%;S*#N3fo%CGYsWyZPcS+)+Or5)`XLLiLtY^F1}>%m!nQ=KyoE^(s$Qqw$=b z5_ymvOJ*JHx|UYhei&cSQNeF`6-T=7nHlh|Tu;u-GfTZo&L@k)>2CI}I4_ItKIq-! zIn?<`I@VHoedZ!mGLgK;{^cfyz!+^lY$2R~gE3g`g6sRcWFIzMY?LDua!8!R-Jm8SNtD}f_ z$~0T7_+Sro2lNnU-tEkzdSSQrsdr-6oH)i)eUbOg`4b=aVXZEnK-a8QOD8|84Mu9) zg_^u$@=91M&xb=&0iBBvm!D1qMxr6vi(H1SdZs7LVlI#RIXF#09$f6y&6 zl|NRWU*g`f7CUvoxuaO9sU(jla_)Y~j@E6{;8>}o_iHWh;<_-VAb}qVcPqDA?I}GJ z)VTI&8S@aewl3a6c0tv8ef0Kk{}`VOuq_Aa*u%zU$#T(KDt^I0lUI?Y$fix7y}2{Amqm_#^&zm6i9I z?v?~P_#r$=*p5C(kM9gzwYWPxy-HtH-UjFAz;$myT38^3NTkmNk}+vSDamCxsDfR- zx&M=zB)TX;3(yozWKN&y-($~0|y^!fD#I#rOi95@f3RoNDztEti7 z1TNz0%jx5_*Jqk|K;Boe2H_V>HkOL8i&%9s*OfUc@myQQy2U#m^& zRNfR2q~YipM65Y|B9P1P)o|^qYsQKNohV4Fq`Rmo(~~;`mq&fz0+ea^f@^T9KIf@R z%TF%XDc9@6L6}~L>_U^1HEB};IUUv@L`Tr#l5;T-l_HMqUCrOZr(hBvg#+R8KlC9zB7!7A;Z;Qw9URrGB0xWptL_?gNUQ(85@ z?)%W^Kt8n^Zq{!GE?4W;c>=Xs7XpWNv)SZ5Sai&`t!7&R5{;Z{44^$Rml z>*ViW#Hcc$WWmr(M`<6p(QSaHjuq-JkV?2vfWBUp=1PV5qUF%Qd&NQDtW1cXd_&vfFFs*B)pJLMH z8}W!vpeV3N{&#RB`T!rNyAC&w@rj{gsAM%6k2s~~rhn2%;I@Y;ELUq|uA$E(^ue1xTf_I3!$4 zr`-Cs&QL5M4(?Qd>j@?hleslY6+g$EG*&`I$vDRmsAB9C*Cma^j zh(7R?M^*Q^kh;VWaLX_@BPt)VnM6QbMx=>vH{rgz-NnE&`ZIdBk1GlQA5r~R#$(P$ zyq%X*O~U`2IGY?Vn;gED-4MKtK>4hK^uMS}eL_k%B>Y1;XJ-131iq=eH>d8h<4&;+hFn4DldDzdRV5w- z@Txv02p23oA$2w)7rnT`*w(hMw55$bs``FRZo?1FU=?0_NJG$GfqHi?&yP--Ug;S> z#Q0_q*PV~KeYLG#CgwKMbd(X>2~0>QW3I;8(3P!K@(NgbgGhXR#IaMllIULE77y-$ zzBqln@RL_1qYxuc%RPq!FQx}(00hId41O##F_H&J*?(qNJIj8q^S zkVq{Z4R1F53<*hc+9ou+CCE|@=Fv|FpIqzAWL6cYYycQ1b}H+pxtbvLY`d zEn&#yq@HVLz-`P{$bJP_HX+0aGgU9$6NhpcG%(~@BF+p>un3zlimfAFx8o0{;X#vhIv?Op4` zJ2$zqux^dZ-CVQj_N-~#0?BHO?!d)BT}=(|a{l@P*R6#1lrCqR3&m&5U4u$aMPldu zbJolpWbG|bx^=&pcmRg=oPc9t*R$T?5(OIs@hqZ$+}&#(=84;7KY^%VX2<1JGI@X zb}W-X$TTE?`4W7pfW0ZpnQF?ZS?~jukd@}h$jO&Mrh)Fl?ngJyz~+#Gk*3eI{gtLjg% z2QqBfQKok($@w4zAlrKPUcQ_+#&jGNQ^K333%>uni3#680aMuYE@Z!>c$Prug10lO zMPWpyxAI(k7hY3bhmbG2-#DW$(}NU!E6!m6COn99>vqU4M9Uh4hTIk3ELGLu&Sj;h zYjet#e^W=8a2G6J&Tj3)Lk{aBx~7l2)n|b3i&P@s<7iSD_<%9BGKRZLoP^}L3lb+b zL-h+twlYQlz*v=GC)hC2$@f*p5mxVk`njZNYs+!H(dW2?vxYc5HewTPpx(E^7iTFa zSmX-0y2gJTyW+R?juXOi=#^}K~=8Es=U6wQOV`hoAWl7 zFBk4UaqRej=6-T3L1OOVG$yHUUdj23M@>eXTo+y#vQm#EKq{p_E&}ZunggfBG$smY zAMg0AEtrp?bl)+I#7O9wXWDPoQYz#PSko*ETYtc)+MuzbiFr{8FZDyG4&3Pi3nY`& z7*zdD!>Km!sm}GP7ujLKv3I%FR|yga!D2bvMUS{TQn7BUX0cqr~W1n0fOIqYbJK?lxZEy!}CP_!o9?$cL)GVOb7{ z-DMn9bhmC>G(|2NjMUtL$v1!d3B`Y+@p-nWBswm{5F0N$XheLFj>zuO@x`K8A>%Jz zpoMN)wujLLBCew~5lJ$LHhKE8@54)umLJtJv;~{$m<#Z zf1t>_9*hXATRK?Ge>|Qb*Z)H*GN0?&$1mcEFF{;)jjtBuEJ)dZ;#u-n3klYB*&BV< z6>>do(XdvMgLZeJ`NaPYM)Np=SP6Q%u8ClZA{(9)z!&(X>*&-IyI-FrL4&Yn4Y;Zz z&04bHXafU&Fk16@C^(I2r|=F0qQ3ZX5rTWa{F-9BM@*8lyHeVEg{}Eg1%$6fQ!K5) z4js35{9oYs2)GVW3KUTT`q4P|U2*$2d{J7}=7{;k=eH84oS>%x?1h#qS1{Ff?MoPm$MiLLB1grMCtIbCnj7C9EJm z5vgK22kspH2(=tshIhnZfRtn=q|Md+Us2cIh&z)|Uc&f0gJ(gWXxt%cDVz}`@nj$+ zhn&?YkIxd-bYIPNbd50zK$%z_&}(g74V;%}OUXWA*0AOU=EzQVIB6wsO6B)(&pdZ_ zlsAZ-wOdil^q|c^x^l@(Al-tF+YOlVcUu(1B%!$m+SFZV02=0LbJ#!}K#0@v>NlJB zEZmnMg{vn11tMpwYfusiO*gbO;4vHL7r=24?dV_+q?C}jaLn$4LeJ{SR1Zq>EQ!(M zgXcsP_D|99zAq+kFVM;K=pAMHa#NW+Er)KO%s9VYKCR3b@ZJH2`^7DX zRz7;pLl4w9ix7;C)ZvbhWv!F7(Crydnl$mT4*BBxxC*2@`cWr+J#Yu9o9T7*sXgmY zLr`H|yU4Gz$bn@w-D=}y8`Q(j+OZ>GvxYclp{gjS5IC{NaCYr)`LwknCtwM!D5mMR ziPrt5Q*jGnyH6S(5A8|q9ecfDgfhIeHm zflE9)euz#aaHnGj9s%CRU>(HX3H$RLjgX~r8JxEZqpUO!MUf+(l;ktO@Q~sjaU_|R z7x=aTo757DeRQ)VKa?unBklq|hKQ|HEJwrI1!#Ljh7YyWqln@ztb?PyL0=N2F(MaB z6j#p*uuo*&rk7#OQrs;qx*K$eUU&_xZXG&UHq#ZA`CijGWNZF-X?~6!LDP9!UBS!5 zEPK2Lv#O@pq$JQ-CusG_ykS8Uzu^xlk(SjaEoug~l>w7=U`acH)m>mvVfV5fD-JpF zt3*m8wE1brBpo3|-gouSOKT*pFp1Sd{5j7fi^>`j--4a2*VV$az4*Lbcxhk0_mp=| zX%-;{qB9G#VJekmBLw05Yv4=HA&R>kN2X`Hhg*i1HotEt9{E1Q6zje^F9fTREv<>< zF?))5e`pgLO!KWP`2idsJN(X>Sbu8ndLlZC$O4g!^k+GX>_LAb5d@GFYMtg6Mr>e? zML=v+xIT$AftQ=w+KoYCkK!-aYdzO0L^s3v$En+gx;MuFvzTwzV*_q$AMHJFn9n99 zm%V2g8t{UI#mTs9=E7j0bF+Ct^2(!kN5L7~6&YUA6mB`KkGsHT^;`G?iKF40LjLj% zLgP>RQuFQn$euQq z4^{upNye7G3zEFn1XMCyw&!^hpLfxFg^Hq_l575%Pp)A;N2d;~Xht8`y_%){r>@)u zgs!VFSdYK;49}>qM$%TiBJbyxf*F&JQ#Nq}<4;{HQMTJ%`>jrT&%VfuoSx0s9TnG* zT6H)lR*E&$Y!W%h2@R5`* zjFSRIx^1(AhsM7UN0>bjdR0pg_J)4W-X}+{e7q=VyGx*!D_L#1!E)M8m6d)-&{P{=t@{XpCX06G7ga_D^3G42H(H9J{CJRgL$Hw=n3Ranux zUU0=U95>7NAkczsuG)5Ajss|X!YElv9T+|B*D(cTI zv8X)eu2}ZKR1W&(R0h!PXn0+GnE!$c{0$2%`wurR`7r*;O4%g&{N&+urbVv$1<}%# zO+d;zKK=oDbp1^>;S)rdQF^&&Od|Sa7r5O41d)?LgO|zgnWcz2{R1kzC$KmEbZq}5 z%fZpWZ3MWk+og7=dfhDj(-M_uy%*vpNFfkMZunVz0H9#%AElovfMxJwcxe~6 zd(BGy0~9so+eI*$41e7HBO*9cAb6E*In{}1J{RUbGJJy4>uv~XZ+HZe9;b;Fx zE`MOxLuaWxtfOcrctUK=e6%pIoOXL*6{VDOoJlrfW4K`&UQPvN1Y=Hkvj{X$2Gpkr z0SAxXxh#|Vde2V142_XICi(}gu=4wYW&UrFeB|loA7qojV~MeZ+|2*HyD@v1xZAbA z;K{FV{q;dt6X|Z-4G6?4R;oM;o;})uAg&FE)DC(#7B{wl7rmpvz2NN8fjD)L5h?4I zo=aBR#O$x4GECElYb%qNkXv2G;9vg90Ks*QwU~BcZvk+JzQ@by^@mXu;<#k*9U&Tb z!&{x^pQZGb&-}PI4ix|kjj2?f@y5KKtAIPQhcdebGlB<_5hFB#Gec)9g*?UIM;4c% zpxo%-KNbMF?Cu-z$8W5Fdq+qP3z40dyXd?7?+;jR_cc7g*Zk$1;Tep^c3?N z|IISdczp-8>xh-$stNd54kD9v$ihJdwDLI*63I{?o^>#Q6rd%jxQ3B)v>2@sjd(lp zG)Yv(Y)iqxp{anP+tQ$EmY9LGWjn>l$L~9Td3)Zy4S5k46{-MB0bq9!WHfsb4b50n z>|U^G^KshNe-&-rr>W71bfZWo%{h1R&Zo)q+N^47gsI-a3^vYHyvor2X;|D+8IxtA zPw05UB8qyil%;aKa!qx917n2>{T-IF`NhtKZffjIJ8)hPj^EOObZH0l@Er>PoE+h} z7!}IB?{}AcThGG~Q|D%jncF2bFpRjAkc<@*Xx~^znqrAFr{r><+%kepkGIBAhL_@u zNL*|cK0mI@sIaaoDX6_%bpe{}ka4o{i9$j$mB_a98ugc*GsqZ*`8j%u6jl`$4w@0(0m zVPieKpp(YANvb#gcO|@0-~hqPp>(ARxj=<|yAZ|{azuvB%_)WF!MN0qxe9yDq?lO8+uDAE;|AB}ggUH{-__bW?X}m^{pGPjDs{`-T z=4K)O=6o@)-{*QSE}v5>udq?qA{H^7z*`5b0jqC15gE(W?ISRZeXpU z^kf4NBlf%7^Qg6Dms0XjpbE4a-d9T2;4h{ry*h!RAi?iP??2QN0#RCk3o@0RL3Dx) zL%jDos%{Xf5%yu?$nUe8An@C;HiAs=%^7#bpr8s(VLCJugRp;h{vddfaPhXJ44_t* z`6gQchlOWs%2+xdI*^PQa?-GzE<~>I2bcf);L`8)_18>9;Q((6Qk$6${Eynq9?d9v z8L9nio!z%^5>odkjG!?FLUM+hVa)!YDVxmY-6Dxe;IaAz|G&|$WheIP7Fo{S_j&!w z?|&gFc_`0(NZz}O(21CzjT@Pu@` zzn2Xf1By!dmIAYHjarU3LByPIIRTNvZ(>BicnoYkS~v2-K1npDsUK)609P(FEBB=3 zRJSE3V9CM-9IYG?SJ5#s*?`E2mkhg@blgr;xjg<^9y|>Yvtz(V05(}HJ{jPW;x5;O zsn&A3hARx7_0Z0qUih%_6_eIpU*k)BRhAs+30!}ej+KcNh-ogW+EkTctO88$sXP3CD>Ge|mslgS4QGbQY)8hO~Gpr7710Al(~Ov)JiHYl%CG!B}P;78xSn z8rJ`yGxX3+)WZ0u1$!p9fA3v)R7;{=&{?}|H0z^mM9cxZ47(v7p*9(0lkZDdU@{(^ zKY(H7f#v5~iPs^a-Iw;gihc|)>@YB^Xo1F$mkpj?E`RXJ?&{EM8j`V&;*#4!e8Mo^Ahl|^!V&H}Et z%;Ax`fOY-zMx`PSX1QRpQm`)H?F1^n3xNo|Nn|q81{A_aAgxeP<&@SqK(8#=)7ZK$QfDYi^?gRdY!Tug zt@j|m(40Tfh>IbsLYRFn7MWQ0ve(RiJ6{1> zmBu4wg|uysXBUM|TM~_1$7h2xYNi(*id*9Ee9ocrx{PhUXb-J4&!=I(V$$9NEEGJ- zkI!JaJrdtew5gR(iovvvKX)v%$!I2|cki1RnCD>=qdC;}IqLv+hytny{#z+sjLqIV z<qpZcO^9zM_{ zM3wzK&6tKmukmD21n>9Vpm8t8XLHHMw8#F!u{ja&OVxm8id&Ua?s8`-{CMFB^A^Z3 zifIP%fuzJuQj3$&f7|FVq-&wl&f^5~Q;W%i;H2HTlov30H$re0@DWDg8@wMZnj zCl%(_Sb5A@rlVXMOZdAU-P6}>lf}t?Yo`2tJBE2W`d5$rNtCJ(ERml*#tcDsX_fZ! z0zs*VoSk0?Eq71*8ET)9(Fq63tS%{e;^;@g{8BXiORUw2!69!tzH^OJ=DC=th&Q%5 z!n?b7?de=(Qx?p<@Dpbr6>IRQa=FMf*5<{vL9N|MvV*%>+JV$LO<=+VmK!7P`!O~! z=*~>%$i9W?czAQX+X&!2Y5@KJdw!#ZFf3p$fKpun`sZpZ1MFrXt#)o_mHtI}(1dN2 zHacPJ=RW1-tj*rUbm8>9Pg?`o@nqb0hPK{iqX|HjxR8a zr{ol2;iebYGxr1Tw7Y6xrB{#LdQ}wG$Bt==8uX027?4J@qIzI*NPgG_52bo=JXJE> ziuDffbInp2U449QpEfFHfU5rUx`=&ba3&AT* zkFf68j|Lq>N}B3}l=KR$2W{AMwoH*pf&EW_>h;f0i{iMTa)-Lu{X9x11^GY68<4B- zd7D;hi|T9b_GG@1d=pzV>4}X6UWFh+M)nZf3)_Ip0qx_@NIX&Rt@YvV!j7sSYPK>L zv+_XH>H4+AWuVw6Ay$Q(7X5LfJ#zSVY=Hdx`dysU|68w?;Yy~*7S zbeEB`lAC#&YijwXI1c6QAsgS*?d>e{8{}-}Cm?5&)iO5EBIH(hA$!L^itE)&v#*Lj z4Urn3YTU?;RLP!6BRD^t(8-S1H`L;)1PAkCkw$l6F^urD*o&^Zn#!NIPD<4qS%^2b zqZPw#^vK-Q^gJFbQ@zrtg^XH_uZq`wUvTi4Q3Xq?$pP5OffEyFT)KKYGK}b*A86#p zGmV#8Oh-|5a)$?#Sjo|qTF)Pi#R%m%4n`d&Z|R{?M@yqVD@~j%m`9#yeEy9qoNlnma`LGrChL zjfak}o^x-STl(&eN6YFRvs&g~T2mF7&^cD9?PLNSr0Cuf3xcyVwy&o= z(&T9Ci%2Viv*rX>XWdZgmxZf;rQ z?pSnDqI$gBvMm)WXOo2Tp#x7naHU@$^5Y|)FNGa~Ex_kGx6GJGGY#DbmTVT?%n16x z(EAlV)PoWgtW1GOhK9bS+$aGdyDN8|V1X0iQPA?NHZScJYH#eaiBCez&%V|fwvtfC zor6hYZK5^^JyO8ER8|X9v1AR(r5h3*vl)=#Uv-Gf6J*m`g((eb`1aq-RGLmTYQ=(> z$hdqtBZtGv%uXb9uzsgG#Cbv2fZ)6w8O2uPRlY2s#9V%)W3+5kuAzzO^*C(8Be|}8 z`Lv;z!Xc}nYZQ*2op}~AnqF~@ugN2Q&Qx0p98byV@E#7%9RoTlfLSmnXQ&{FB%?=& z@~{DMpLE13V$WA#NLn%jx7ZT(*(uWrBa6-t;C*8@h?z(pKLFh&E1%ydRTFh`U2z>l zu_3=2xfq&_i{b|sR9e4f3i>0fa+^5MFK}7>(_*)3Fy*sN%Fw~F%;O^zTasnLJMJ?A zBTNx8&vPK4wN#Dba}$z37~+x^1-mk(8QOW^smkQ3XQv#|ZL&axSRK$mC?W(SX|D+9 zlQ$D3LR>WGTB?b~xuSEm`jqWvK9nnLSWQ-qw01%MyF=qXE7jZvtU?~Nl(6Y+AM>d# zVeV(-@>B`9e#b7U{s%evHvfo?fuIG_hWH4L)R~vWK6{*U- zMGM9G+QtP0?AD|~{f-k0bwUua(InpYskUd$zlODXsy%dt@?=ov^H=vK_TjG|1=`1?)%f+{Q4bEJVd2`I%K0lsW-pY1Y|FwxEP z3vbHa;6yb#5NVa66TZ1VN**Utm;9&li?-ui`C?v06@xVxLCTDUp}AR?lnGr0as|g* z7mjd6NTLC0TZ%-#Tj`)8KbM!P(>Xh7g;G%&T@=5d^+QpaTZp@8+p#WzyR!z}$QcLj zEclTL%~_47*wxW(9dk#fFshL)#l_@u8rVYOKw*TGufjo1ZOoYe;6v$-H`X^g5|ZHz zo&1P2590Qa+(*OtvY|8%?-d!q!sDNcCfb({yBxAzm`CJR8=C>l1ZSqjS9=9cUdw~J z1WzN7O@6@-ytiY5_&Mm(s>i%aKg>47wQC!NAW zzX-FF)QFTjlDv^(il3)6GO+Opo*(({o?H8OVEIlXdCpowZqvydiwWt%vst|q6%-=m zUo6cDQS1>9DYEnGSH@_(sGhXjANWzqr3?|FLfQu=c09Nl&#kh0k6B%$qpqK!n_AsH zZ#QGeTE`k=mF>G}QT6~Ws6Q;?`pjYq{v=(mP+{)Pn6@qyb5 zvwX*t^phLVvUcMZ!sH=Q{Sm&7BonrnzI1$Q!=jNkh8br`|2(eTE1tSg-ti{8EKq|> zWSA^>I3{)=Ueq$}t$54m@U=rB6<`4bak$9_T#7`sKkatA0jT2r%O_M14^tU7`oPaI z$Cs>_aCXT{P&b*^Cj0e9)Jy?I(!_HCBn{*>SPwWDw*n%`7Jj4prn_%{0MoxuBOnDV zJC_k;UF>)?X1P7X^;ROD19T?Lwx6i4mG|_MqUP%P#EWyQ?GmOwzhiw)WxICnYy+Qe8HQO@jv0TxRR5i9X8A`fLN)7c*uZYd@WDIMp-bd2?WW ztvU045ZGvNyXGvjGI^=L`+>O3$--R9{>}XJpS*0&1QAa6>?1u09uG)UT#{ujdA{Y`_nNHIJo+S+ z_37tLZ~L|4Q8ypm>C2>{K||OLZpXf_xKy_xU!1q@%``T97%(=iVJGSDVXonduc8IK zD6hF6*8QOQ+}vbceK6BP*Y;C{#4xx$u?!$llt#MNT%w8|V`ON8|h-w&Ei`#;P_p=e z`dW^wnUKBgu?RM$q{g4)tGciMq#3DledTfCmNdF;q9QxD$TdqTzGCD-y^_pu`$pAm znU+X9Gs>z2*FHmCES@l{gmO~gZn}~&4#U_6V+Q7{Fcv&4t+{?$vDG-P^fKqSsVm3Z zahY`6;ds3dFWWbNsI@g&SV&^C7}Oezg;qtNOH7OX;Xq(oK?c+JZmxlu~cR}RcCa;2QrY27k%NF$qnulwdh_g0@W z%>LfoK?$6U(e3TPtyagDjW+2LH>);QDdjg-w{8?_kbHGM3#YMC2V7-^(Xt{+cJZpT z<0}{PST2O;qXvBgM_-i*l6I`)M$%@tpEg-&{Q;TSn zPSkL~&VN_l!A4;Z1St#pFLMd!&}VO-=ja}&_3_Il4d^S<0s?O*W{bty74`q5 zc#2{}eF_a{pYrL6=002RYmLs*A6xkGD=fr_ia3X9^ai3VUN$vJ@L{-s3$+=3cKJm? zc1`#6R5zt9DefD4k`ZRqHbjRtXUnK+0YmtAmUMV2ZZ7oNoa4FG9oEQv8_d?tcPVvQ z0W~eu8$*W6WKCyBVS?EFGfcyDeaVPA^G79E`s(MB+0rf+^gFTL%syk3E9TR~J|!KId*`gZ%oaOu&{hx$=5>};RY0=6T5-NXQ%<2Kb2 zH$ZUxAZlnpQ~(Z1t`jj(MzI*c2y1HPu;lrEN&6 zmkx^LiXG>idi(450?#r{YXcOG?S==FZPHYO>ps23A#SFo8>gkZ`|@Ab`L;xwc;yyW z6t0L{yA;LL(ZY%N(}$7&;Q;xY4tSXbQ>noRyY`aAVV z?;gdUHHK0X{mpEeE-E#Vy8Eq1-%`%-PR!+?w2cZ~H4q+2*|M@sGlk7;< zs(j6@tNvIc{CmTKLp1)8pv)$plr`6z>+%>W#a_))=M$8YUysq0Vv{Yh)=Rp}rBbxm zNx2RkWi%C-)HgWz>L6@W6n*7IPCsY zZQ#lF412RUHJ2biPJD0Z)WlEKCuUPEN>hUCZhykmeR9i?U&cr|S7Md5a=H&2$l?vM z?Br8Kx#}wB%5%2Hr*a1P2p%*x*yGlpG+?moF+Ukl8Y)1IF!yXV(j}T<8&b z9$eY;`|8J~^Ca`!c}^ECP)pPl^~YE9CF7h;s)o+d2$36Q!3P7Rj^tO)te$dB31HJE zm)IC9=T?qjEYb{12=b_ut#RUo!#qWWH8q+P>0HU4JNbPT(?3=n)Vz@Amr^nAMUemf z8GPI19=9xH;%#cq=Mv)++R#e8Q>SMle9<6limcVzo5Wu0qL23LR%~Q)`?5CUcPmQW z)@t0M(z9~-Jmf2$@(1DFYnxs^pQ5E5@N+8BSi;&naf4lGbF0u-w^rC$*RVf7VQDpX zuItwM7iQX3Vu^Z+6eVCUkQeeNj+)G_M>5T5-{EC*fz^=Ju zZ`#)Cuky>(lx(5Nc6`~~Pb*v9j<9p)HQClfM170S?h;$0B!{l^#T1(uT&vPf(VCtl z=4#GX)L`nHZpBPbKw4oRJVWt&4f!nM)cBJ{c2NNAk}^Q}B^W3a7Hh0+g4|s!yiu3v zqhP+#b|Z6eE@;z#q>P}y2G*WIcOrDqOR&*Y4r>MUW_P|y#X?s0*L;wwz7QV9o;w&w zlw;O^#?ZV-L13D zC|kzij*<~7dmNQfDeAT|Pi`SwcGeL|MpVWz;y6boj=j!t=I{C(`abvb{GRV`{QsOk z3gp$^}_)hBvmC&oM{E>4XW z`~5iVnLnSHgNv;d@9wK8#@)=Vs}zo3%3b+u)M{y<(jfFvV4}oC&J~SRqF2IwrZ45z`ahQxz!{&qd)diRGUKdh@Ri;j&^< zcG*24|K+2u=Q5F4eBOj1j%Ju=)c!d)=UCO@LrZl0&%%no01hBteChU%c1qe86+z0& zom7i#YE!t;NGa>DGq&)Wjw#cQ^-<3q==S3n`83%@uFqeg;bU4JVy*}3kX=Jpw3oZT zg@&%^spqPS>~mHYlA+%_9=G4*riC}8rg&VQLcPkI$f1^9wnFvHCxGp+`TpCM029l1 zZMdPVr+x1orUY=i_D>oG1mu<$?j)lO@K zj!v~kRH(`pWVL9-st=?URNc*d+LxvGkc^#P4tb|^%y}%P>SU$ilvTwPF-X8v8KZUI z>-;b69HK##J|+11|i?&OY~cWupY zU(`^<&vl{m-oH&QP7?I+rB_MLd4Xa6khJw7PG*;E(tcOB1f<1V#ABz7cFi%}a#M^Z zHg{E(1T_;#O2OAU?+x1zR=@I2IM+r*(C z{!~nGNBAETOsAruo8f@>$}3(zSzoN%uk=1@vyGWWpOj4026esIp_+#z3kM9q4t6Ss z*sa8sJ=Ca;%v{PE@W6Lun`pSa$CWsES>t`TR~&_AgLXWla#}Bo1)+iz%}@Sb_Rv+YOsBQ@ z^4p1=U44&8r`*yqeOP9fvr-HO3!+FC>RE|>o`EzMqp@>CSCKjhp0S&@=|rhrIkKTQ znze4aI4ji!s$Jit-`T^#+ZHQjrq9HV8_jV9f7~f=e=siac_+)sU15KU(@#0xx83`W zJIg0X(}}O0Chn+&dUkI+kP&2Uq1co@(Q{*PL4??EqKe_91?f*!70y&?z}sG=GQUR< zhO1GRk5v0jg}o4S`E=v)H6R9n+l?CEdXQrh5NK0Mkc{HJV;n`QT465j#V_ox$?e^( z6;9jRowMt&ff(h=@?#*YP$^`Frt9^&J6AE_sIjMP}IP~E+v5y9|M)J31xZ` zCok-|G1gVqcKcBmKWDpYivIlBd2oztZQSe*eNcXd&{UGVW`?}O!a4cgHxVYv1PQM# z4c;}?b3wVTmUH*2g+6W%E*XWbE-`bsYrLC$i=&7$gr`3TjuM@3d+R%WdDBtaZUr@G z;?p4+Q51J|DXFfGL|wM79o8ALzZ$S~g+kUO2!B_{{ zv$w|`7o~qL^opT_C;4chev(zyooqR5D|03%74kivw@wV`B=xaWrgW+F^Rf9~!aO72 zibs0xsxEND0}1)l_#OQph_(J9jG%0;%(jk}r2c>r)YoIpH-TFUqvwH?oUeZ)ZvSBb z4zT)pM@7OY`Z96iv7c$Cw7u~)h5h_LjNZoU3I(|o8;E@tH!HAuD6o~_^J=E1`v}Tp zeyKa~+oI&sJKm$a?_8M*a7>);(nvLWZ4$Yp0^3*vfC>p3e##3Lw5D(JX*#6@U@t0z zdD>bBUaOpBc)GFqrusgSy>$hb5WbXq>ehY5<(~KMkzNVPx4t|0xar{2?@cs?y*ykC zCW+nroF&AOUis`x=8~nm!Y3OxPn=Syb{x~Lm_Ak3UeJ-Px40xeSPrvk5v17~iB#F2 zJa`(R6$O8XB~U4THz6}U;X4?6Ai{{mmo|kJat-E^LNVtzeMsCG4$lww=)HmUDXpxb zxPg?}mwIvhn+SUUPLj&TYNs*n^667>kSOLOZn98FiXu z8EX(a?!BiYJ3T$U($P-%wdZ;?OgyTIFbJzyB}Ly6(o2mBajM_LX+0YkY3 zxYC~^UMrB5LPt!&gHhRe1?ok%gXgIA=_q%lRi!(@{@WX1cglvOX>CcnSz!>Gt#uPS zZ%Er6dEX@a^4Iy+fkLoeFUHFp>-Gd&J9H|?3H#|qY97OM2tzUNah(B^LWO2xRx_4TX4GKX#Pe`rT*b6Ufq$MBF*$pn zyx8kiF1!K;q;<6IZwg)}$qx;DLggq}UovW5q!z7yZ8Bbf8ZW!26$XT({063ubC104 z1HPwE?bJQ?W<1Opxo~7_Yu--B>|K{yEi)o@qpQk?f)YF@va2<-uR_}b^_KG^eHeP( z{c)M=iuzPsai!A%*8O$)U+|>mh;~U&`^4_D3+f=u#J-x3RfY@qR&;pqZlf6o)ET>4 zn!gtQQ!nzwi~wuUA$n#8nlSbe7RVMp9+ap6H@jw+YMB8Zp(F}I5Ejc2DSoYeKm*HQUZEWN>% zODP$*bH?lJNl`k+dCs_{sialpqQUVww9#%qZ@7d}&PUU2>j5XN7ys%{P9YH9-UkfP zZ|bbiWbZDI>YfNmH`SX4Yr9cBZy_+O0{%R*xz&p8d7w^;dCQsSfK!0Sa!gnH1;sRH~dG-|`m$nB&w-z$H0?1C{)~eRf|Ocv7jXrsgAu z^mMgv?-9fJEC8^*fAMjKjXsl1QZYym1%dCb3=1VJd>Sh&qraIC*}ff;V%i+GR}G^c z7F>mMLD?-5_}@gBDFT5Co`6$6|Gsi_xW?i+_RhGWoe12oAN@iM{zHV^dJgA@I{*B3>o+g{_p)DT=h_{8Ps2G2t+ybr0rM7aQ8v$C3DAh*?yMf1sgjUUDMqsryTRM4{}nDmmj8cr zyY}!}w=FO2Q3$Z5_?9tH|;V?o#|WTgWDm&t;9h+xItO z(z53}`YrOj;J|~wUK7W)Uju~jZ2hcfs66{CsQCCW?Qmytmypsfj=o3=UVeB6Hrle; z#OD@7ea_B~l(8=z2^d^kU~nIY>KAOlaoE99Tn(l-1_ zvfXXI|0}Fh(D9W%2N9E*ObacBb5&#qC#vAAJ@7`f`axC$_4bn26|WxN@V~))Y_Z>| zbspO7Z!~pi#oz+0$p$bl zo12g!M7Gx`vO-6M`d54Z4C?p3Khq+wj}nRzId2+dgW$CH7;r-)C7W-4R0VTjvHBz9Jby_IB)h7gWS~AqH`2BD92>tQ!y#$ zia|IHQ#Qr$Z3bxJ(C@_YQtd!^6avU$I-%c@toGCo;iqqitD;MShHqAkB0`j-FAY}B z;nR&cwoK(vg_S9k=hNNtxLyePI{XN~LcnBi^y-ezhLz2GMMyzys@WBJalK0Gy|90o zSd)*Sud@XxY|uO_UdCFI&Mt4qix|A_Ged<(VyyqQ?I3)a4192ixYdF^sFmBJk^n*! zVl;Lj++~l(q62e#92$53!ZJ6dU`I>mMja97>Cg>Uu z0Sl3}@>2L@Pgs^UzPRqMO&n&PVA@5qI;ppFIwpnMsp8N!ca1G}(4Yl~ z#g=a|{U)E}CO?M2yrz`Izy7{SalP%eehmlG&0+(K7tVss-<~=! zsv`AD2?ZqQ0lr3$TGB?Ll1o=5e^2DeZ= z*Nm3zK@kGnafM24cT|vc9n`fls~FriI#*ewE3F~Z7HPL@t~}&$b76KRN-ksq#9DJRx)3xkZ`$Ttek7!pF<7&?c z9QOwlc8GhHtTw}~?#IXfIIVS#v+DJrqf>hN$m z#p+7t4SWU{yqaxIu<2G$OasTa$?JB!>(t<0| zu7ehZh<|Py4Vqd>W+(eDr3)Xncf0_gSs=w9@FY+gtwnZc`w$x`SbGlEY2^_F1O`v; zVdY-<SIl(mZ&_NhOh30JswfNzi0I&+Y-I`_-{i zYI7?mAht)n-IWa|A@w5XmKEudhXb$6J5uUrmsWMmKnM?k2Qbjk(Kt|P;EII9uKm%R z)>vFm@O5jebaSQ64OAB(Js6QjjLJsDv0QWEoOi+G{26^3QZIK3r1*M_$cg{-3R3Qc zi|f<=i;4M454!vmap7N?ev(^q#O2^#BKS`!pJ{(4L|f!?2vBs2dts~vSVCM2gEu3z z^W0*Ql6f3O+p=E}5O@J>L$f1@CVU0lmyV}i)hR-uGp(^w9-r?i8{_UMRKSP=kkw+G z)!`aZk=>$!sCpSTt*Lg28xA*>O47_FB`XCcsD%CHvXTQ z*?0#Gi(~8yJIUQWhaCB%m~mW>p!EFP67h7AR8EgAg*Pobu9X02MI>IRhLzvcn=gBa zdO=O{2t2C(2Ex!;^@?(FH&^x2lP%E19-t&O=$o8wRDQ0!m&a+-wo%Xb>t1(#fj^fY z9`23lKIh}H1>iRXJ(e82)l-jB4y95VLB)kH&mL{RxWs?Z5m~&nZ-lqbo7k{O8S>aT zE`7j>zKj@4#z42hIN$<}ip&hu-GSe(>}`_c->!OWt{gOf1M`~$Z8v96)zGv}WP9bs z?7C-V4nW7c(FrAYzR1o`?ZuY27Ld3P2ZFZTJ<(=RYa`olW|)2td2wAP;)V=T45}X5ao2BduZyI5{?JYVIt58YX8qdOQ zm1mJ;6X=!r&B4u)f0bt8%P}$ww)I>P?9zI9Fc7zy6%HMV83Mndq47SB4FLsrl>po# zxOU=R$1^ZPxexvqFXgv2|79$SEygd!KNW+vz{F{TzG|%X#{PY28SM5((5P%@xPC?a z*B#yP9+{2X3T>*zR-NDNYfjm1UG{{Vp9QiS^67ZP#u-+{pwmipwUTDHb?o_CKi!Gy*u~o|F9^47g1oJeWYFXdYYuDDn!~EU(EkA$w-AljY=HH0`0{H+L%uo_^3JKf6>Pz3p$|2M9i3#j!Mf~AU*nrn;=P>yg0`edqN%cFdK#ylT zkj*|Kv^FIilCUuTJMhc>>?#wJgQF}4wE5mT8}`(799bK;7v=slbthJS+OutV<)k$i zH25r_N87g+Ozr|{KpdekH)yXtM$h36A@9{D-aIT z7J{HG0*Ef*1d)I=iKFlNKU~H7b!)H!htdSt!@3$O7VOdBjapkQMIML|ce)Xt^$v0E z>P2>~je7ZkRVR)BqJj%Sr|Ox%o>iVH?Wp{#Z$+42eai=3xmPBjz0KgqSaXzK5I(@h z9I40Nr>TWN9PotRK)g0uCS)|&j#N3BaDCX!99h!;_+FjSPSe_wA015KL=fNLz+YR_ zWyl{-t{e5>8AXr?Tg2 zd?IY!r2A_ws=VtY6#q(N-#n?hwgPQQK?^A}ykez~9A^By*+}C*hM@Ml>kPzAjoeuI zY^%EO#J}F4zO?L(zzN6*{G$uqgS!sy6F++r*)7W#oi#9>qgo&+GRYsu2-y|2QfhEp zSk^GJAex0HRS)kPUP;AooeAi46J?P%uvpW0UzHki?F{!!AfDFH)v2~C)%Qmrp>`$- zLuVyn$BwX1e%KT69!0}8bke^hX*-tbx7sYt$A^*7qY+$fh*F`Qfi90c_Fb^V?@0A& zbNi8r-(sM#)I0mFuvU?N<=tAKe0VC@m5}iuCSiH(t%9cZFdXzj?F9Lgo)uVpBO7o-p5%Zh<)e1|8OS zx5tRhOQ%<8?zUO18GFZejZzUwp)99KYjfMGxfmT4E=tWtZDEa%p^q{bI@k;7(szlV zIkN^)X4On2`w5>=tcKaG-a$TWta1{OwZR+a5N&bNx_ZYo0BBgFS|{Y=Fg1xK|+$sCMXKDm~xjjj~^v&*V=+v zz%mL6)yybOk$<#1iZK#3#D!r^f%P3eEfI8wl4npU74V}OD_=9It$B!f0-iM)Yakrp z??XI>1$y^(Eq3EszLa=2xMpkcb&mjSY|S7#x+YtWH92^b8^?Mt5Q;;OW4H<#_c7A8 zpjqmNX-nR8Q!(2RLx0R2xq!~&+A7biq_#+F}HY#wIQ@vV_eruCrF=SXpwctyuEG28pjLJ3@cl*-Wsl2n7DCUnGc)lXOhsQW6h%V<{hY&?_DxIS!bnDfhh(YZ9yosl-T$L*PIsG+T$ zRr!Z)&jI|8@|(yf$TAwk8_BE>2F+-Yf+I0&GP>w8mYOuP1;3)rIwkYNidx?o15Y@r zM)n>Gl4lWQgHBWa?xc1{`*xJ@9nvcdU@gsBVi|V`h0vB{nKhZIG?9dt+tyj*mf9Kb z5m}2g8$Bx^vZZikF)D(!yKdTmMVRGdHf$h#_pUj@+%?w9DYFMVFX+Z+R>$&NeO3X0 z#okGr9JU5eR_RHj_RL>V5oGcvlcJby_`So_0&;RYYvm$2gm52YZNB-ph1xFG{yMsU z(5yYPsncJd^2QDQst6HRSm<0zUM4DH`eMoOIwuH z+MxFPg@F7Ag@@saWRYK^Lagnqk)?mzg*7}v>#hzmuoL<1aGfdghn>T;b{bo6C-eU6 z>a5g&+s`{S<bDE3SU3-T^_7B1#g8w);1n0Qjm`e@xW@PvH20IX)&jzp`ac6C>6zwpm`F@gn zVMPbC-uPkRzB$EBAYZx=`2wH%vubm3_#^Gjcg2uwtCKqGPj=Dam_*3JAdz_hc5I?8mb|t~%q{8Xx}bq?kZ)~>?3`c!W~2<`^7H1y zU>b%K3j=Uvvz?-}?C2G@m3wC4nqA8f(Mv!pF&@rlywNFSW&3Mt1SIJcZm7?mdobW6 za=BWN`ln*xGgrjo>$?g#81-wVd50c1$8QT`SO%>D%Y1Dqv4CeZ6>cr88zT$Et$Q^e z#+ol7a5I@?=+!fWH>&h>EM0=DtzP4{Xj-EGR(EqNT+W+W+JEz1;N=gOK0F=z)xmEo?mjAuAHOfQG5*Gyc3n41cQa8X1zr?=<<(33 zT^JMiT{KjbCW;M^XSqc(11iVTHHwRPk`)W0dJ=6!@f2>X5K|Myiqij~`8{1jo<5ka zi89~3oIuhl%fxMJ9e3+ltXLYq-m_RT^nmvFQ?LlD5%;L5IfSQWfiQdLXHwgEJnalR zKeMyoEItQb8mDn-Dvx|uMMVd{BWPOBAf+8&IWN#KB;%Q*ya(J+LX*OOR~4PxGI=nNdx4t3x)`HC{{F2zo!QkjIBOx_`UU1MVzJ_5SLdGb z>si_;tCzCH7uURX5Sf|_x^~R|iZZ0NUzvGCGqe9HRp$=sjq0SX@BaK~ed~Yy4mEcw z1;HCqbSugp+iy`IBL<$^iVn(OzIMewl6VG&qz`~(_Sp3H0$d;m;*i5#>6dMWd~tqF zK_EZ>j;#1!pQpdL1IQl^{qiD_U;gJ7WE9w+{~59W&jnNewvrKxxiu}(cX(}KBIEem zFDM83<=ijugoERSA-%G9L3v%W&i~P(zuq+EJ!JHpm8eP6fq=qi9D0UtEhEj>>)>?K zZcrG9rwa=*3Nn)Zb~~ox2HMK}&M>w9#uwI}aWqYvW%Uvr7eQ<5iej1DG1aOV1%F-| zm0paeD3|<{78(oOL9WO`zx6!K8_X8R`yv)kjHn@|SKrmKQ}!PYoRrl6w|ZDv&x5s# zxRW>_K^;55D6#qqI7=u`XKsmmGC7oKs)x431exk-Zs^zm0}`w*ZhtOSkUvWsZ>0|k zhU%;s=%q+C1ClOVS6t%m6awskN+oILs^fCQUqE46VtQU$@dABGcNn-G3kOLj-R8Chutl&RJNQs zTBla{qivxp9c|jBsrfrPI}?jovA;A4{Ol_cW2AS4llMN!LJU+_PY>DM^@pJaI%JBf26hppyX^2tBFyJUV)Uns77j}iTq8RtiJXAf>mi8v`e4|o(z1OQwbPd@wBdSWu3K7a&){nE7k~C zY`%Zm%Bsy)PWi#-0_7wquy~$zjGNySGAyWaSx)i^E_o~a7E@B&7`ky`F zNzz+;P(9(5usje@Gw#N)*}R07?kTU)Gg@*3{8{!eIQm&7rL6aXm5(*rZ?r}IR>^z! zcdw&@`;?N_?hkIfEM<@6=C8kEaRfkO$ooN<{Vh=M9N7MR5Ov?D{MGo}<{aSN*uOYD zjr9;s?HrQ`R6g_RX;2z0XO2A1;g^4`eFd_g4#kuq>=KCV?^Cck zd1!+PbWaPvO)P}qZVu;cjB2Q1I-}DCx!P`Bft<7}VmRQPH=lWuq4f=+d?74a_Q#zf zsU4R6TifT)m#s>>fqWM67eTh~rSTSbm6TpCf%gyvzGbX!XQAJg6Qye7y}MC&&wzQ%Xqn$>PdSg|rbs<>0g1uAVsL^QZ7TV>B&gG{Q153UWxlNW?; zrRa*cZ1;%`zALd zG_U(a(#H#f$8ynK2Nz;{-sDr|=gzqf-Sp1b!E-hHvRaY|rdX-pv^u7ztfeltB9~c{ zmzuZjqzSI>!Tqr8&nofbX9$CyqHf8edaot(o-fhh#4$CSq-iqgQN@Yriw);dCUFhAWhCF45028n|h}xc~OIQqwIdFJaEVs4OgH ztd}I6bEQ?;<>)%SEbn&2IK-8mKUdn8)2w!gY9>vx%wE27#tL7of*bF&_}$4u-9pey zC8cc81M`xK8X7&B^pVT8On+9XI@6w%d&R5{&$lxjeRGsra${v7px-xn>S8asw=9BO zqy5bKYDe3z4%FAZ;@Ov(Y|3E|)M)*zXr3!CfeZIuEF{1^v%uuumFk?V8%YkmTUj3? zb>3VEeP{@r9w-Y3G~`-26o0Uj6R>>HiN|Sf?IJ^ zUun-L;|fA8yVjP4xt_Dl)pKu6i8~CsY;~f3U|-w5*qWU0LAZ3M8ogPtJzAPC_adsb!)qrpLq^oAWL( z%Sp66e^S|gT@&47%i*amdr=~T@AGlU)o3_xZPHxOalGFIt>3fj!oYD81IJ5dv6cAn z?WQ6rWx9|NI4Dwa#)S2t;v_bJUUBIvs+gKP%xOpxfs}z}$w3U1i1ES}?yR`p3 znp9KXG+6PGT;qe!891;9)n(XxlvnjVu3lL>>FD5eWgt-U{7eIgW zDXCxa`&zmmJ3=c|9$%a0!y=D2HweS7ido|K>#pALoZR02_}Jd~ zfnH;d1AD?{w4Tgrk|~ST5-?EkZqik<%Bg{>b}vaVmMebsb+B|fcazaNSRyP zfSiq4kfm9Fy!7v;rYP|O?|!AQzZf|KaaBau>p5a)zjA8k25p%xptL*nNWA-rm+pyz!$>~+3 zze4~q1sgAsJX(MQ15pH>rkKx-X)Y*TvP;;Eg*ix-d~@%Sm!;FGM7;*IX! zDg&g7(B|*g4!(=+H&IW2ch!;94f#p}kxP`m!P|ZLADZ7#FO$H!F%BtC=x$-3!Mg?!sW_NTyJ% zS1RguPD-v#mwpZIWr0K1jd)zSYu%V|Vx?ZA2Tj^JNqez-5YNvXGSCoVcH$UGvn_mq z&Xh?h{B4927P2z8TZ_C)vuajlZ71|)UJ+|_7y9EuU<*collP>sN*))#gJIfz2eC}$ zck)8K(WG+^H@JB0m2^XD_u!MdW6|_6$%&00wf@9({OuX3ElC$3r`2@hCsI#jSLM80 zQtYpw^fgb#uv3)HhNe#a;USoL&jFZ9+uecN$Xz%XTg8y7W3o3rI?*f8)iMhs>))yR z2N0rW-JYtP9t~UuAXuFKwNQ;;Tyt)LV98v%N?2eGlf(U~_&plS7g_q0+h{V2m-S7P zX-@f$LR->{AyS2>yg?s=u=VsR`x?4v>+BTH(>NDThZHY)m%WkN+7>^HaHL})sKiG) zPB}YevC{8$hnpvE{)4!DWDsZF6D{dxXjbDAeYr9bx_H1RW?)%6b|AjeHCJJ)RTDab zb}^Qn_LW^(mq~+#ms6|tcPhoxg!sMFkRn;v(G>OZiOxwEzsCmJSUew+PdpIsI=obN zOrspGiLV_K&Or!m3%-!T<1Z{1K&wWp(xXnY&Qd;OSQ)cnCz&pkRMzZ!xd2LdqZxfv zln3pyNL`)953*Ftv;{@{4iS1BTBMFiD-0J680`#0Vm5O*TpA0bHv5KWdoThTWWJzV z><|z#%Y8Xux45hQX&7B(r_7t}a8O8T+ZFr75Y^V09^t6PvV!96T?G>l+sv}1xNq6- z`shkje)qQ4;ch3zZO%DYLA}|l2Q$j3LbP+;1&!xWrdzP0O~YB%x5nLRcOz8u5caT+ zZm}nP{^$V7Q7yX*$7=fC1wmIYyHaNP!Xa~mJD#;nYWPkdH8AkA1`P}}&69Auj(`a< zW96fTG-$2lkWQ)~o?j+tip9^Y9~o?HMz7$vi87kZ8r$k-NsUshsLPZ^ES20a7SO1< z6*Wt)(+ud*WKLN@cwj-7P}dAS%dgL9Ij+Fo8{&t>}>g z{4UvFTGkS$Y!4i(Fi4g?|Sf`;)fVb#GOl%1M2?cjcJKwy3Vx*`rS< zE@l_I4*U`53Hh#9E}Vb_GqMYmOozp8Y|~@hlcW06(8ZK;q>- zB)3{sHDFK>O%&(z05!ub`fDBtU|wgst@ektAj;FANsmBF2P{jVb!(2S?dHk$RQr7|GnlQHtVVIT$r0 zhRQ~$jz#br9KW;Y>~S@qJS$s}S5v-AZ{5{SYg}q_wH~|jcHFnOWQpqZda@0NY{ zYu>iO%T3K^#y(gn$d*XieC_AUZkAB(>ohxXcrr2 z1?omop>t@)8=uQ-L)Vv(AI@_k`-Pp_m%$qkv~Pe026}E;X7?+*>YT9}Ajx`sHa*)2 zwNgXJYd&MhP@Tl4@;%h|YrV|teUuwvBtm%>f%21Zyx^Aox12=w1>E%UnYH!#mTMy@ zvXX-fb4D$_Aq<$+D$(oB&^LV=QH+{dw&}Mi0@Xd3)54ut&B&tgUg{io9dM&x(}CoExW zl||vWhG*`lGV*4Ri+bq%3?k};%Q6m7;=9M*O4oJuz? z%5*7xn~8XLPqW&^q001-QQ%RNIKQb(mz=sBX3axCNJ>wGI#g~ab0S?6y04Zb)ui&B zOVOOHKw_u_K6Hb%-Qvjga8WGvFaa+DwFP$xXlU3->oQ55tI0&rpd!U5+F>NMdBvrH zDzOKdV$q)+jh{#9G`B|R)LnEPlTZKrs(19Zw~0aC%_BDxs+wHns;>AG+NBFQ zA8CvrtKC`$CM#P#90zpfdwpGnJp@nYyY(=?mZq8uG3`}*_igg-U!vAH`qetkr&bhr zcAlP~#YFj8Q2bGM&`2;m2>mudL`y1!ti~Ark6nd~lyQ-Z+}?L*%9;+oQQUgEd)sdg z#HzSW5kxujuE0D?T*1Wo&|xA+tGr(0G1`%y%_;iV^8*DTSLF0tWyg4=AS2Z$dK40_s8)jUU$taY@Xz0o3MN77&Oj2FPr| zr^xf7b{K#YDf**-zqfnoPES;RQ6{}{M-tR_AnEnQa-}#J+O>+?6;*kHTDr8eJx8$p zfsSwl&hv<2UI~>FH0wv_qf#M7qbvvKK0HFj)g76AZ||%NLT*t|Tru@&RH_k+D1V=4&$${`15?4Y1p9yofa?9!1 zU%glB`~xu|96#q2Bo4pXZp)>(EHzn_T09yjoSRnwney2ZytlSNk~iUV<*}Rz%QF_L z-!imLWnYcCQ4Ga~nw!O>PYr`;hFtMVpVJoBVmNGTM?6J{TakQ1BP>CWJf;GbuPhxu zg?YY%QfG^4RfdE7s12mj&yt2TLvo_UVbAvBAPiRDo=`4jRv;~n&1OUnmP}mXuejJV z|JnziyL@oCjuP;7(ch*9?Q3D}F!Q~UP&7NBGG+>;_7scqo)DK?jLS$C$JbItqgN2t zDWt?oOW!tK%_lusTUDjWhG@b`4sJ(=S%K8KlY4$3Jft$Mumoj=*N*^@g4(E(+Uq=C z*lJ)3jnQ2k4Xdg3xqfk>9XZ`F4v{XZ4@z=82gC}I8f8Z46b^%7NlypQcVpH=qdP>K z#V2}q^uEfvA!z!L#;7uMEt+`kC|M@EZFrAlzF&Jo82QxA&KzeGsJ+|POk@-c#QV^^ z=29yrsM-pTY=;(d^?^JudlRD@%}}>rjsDeJAR%`5y^^Bw$xzAIVEtx2Bp)B@BS4+5 zQ`@2TiF4GRUUsnyH!GYZt#8UpkW-jlsS7Hi)Ob}pBtnttVy_Qpe>yo0F{?W~UhNug z0oD10YFdu6d-9nACazgNG`aW`oVk&)P;djU98u0lt(aPBhVgL130z!)$_mxJNMRxN zXGwu;UznBoDq!)PPSg}auV6Nn6u&TzzWk!qUM$_X)*>Z$W63-*P`72-6~$f^;FE$${#Yg6eoBD zCpt}*UPcOMNvY1j8kwMtec#{RU5;PK>BY7zyn+;&Igg(TJ)CjlkR0ne$;G#Z!qs;u z_*u=%G)0~L+Vyc`aG7uC)qPPH*;~6q68S-y=ZVf5M;Pf-2j@RSUM)a88Hs^=5*Fx{ z_Q>}m0dw#}FvI@;2Wy|ckJL-S|7t+Of6K5zp3c#?=Rbu?`~~IV^oP)`ko!O5%f0}j zFLeWh{|EuQSFJH;kgtOe{j>G|CSV64fM2%+r@#?Vp>SYfQv}@Y|Hm5J4|&u-B7FaG zL*E0WH1C#n70{V2^R+geS5F|(>g5M(4a$N%D>*J1DXk{~R1HoA^Tb&Duhi*->9gxO+^?+# zy4{@sLbcjhR6*3Kxn1YLu#-4zuRBuVJbX=Xg4Di(;ll9Xz`xMR@5LK~UCu5)h?v}) zA>#iR%X329?L7=|y)ROZ1Jbr*Aa^QFj<4ugKVLLspGS6YF^-D-}~dg_QFYb0yt*S|6M14;_YZ4lg&3n(>!?1A6GuHOau6rnn&^d!$_d<}PQMhag|klXB#h1SBZ zf~ASZAKIhYs=zDJAt~a0C1cV9lN6M9#GQ(ma=*(6rpeU_>XA`#UQ6%0`LDmeSSShy~XQWA7(GvNA=J0 z`P<^Zxv5bcn9K)QX(&*cv2JNnU7UCSd(=&64#)S^bZLpxm`u$*otm}cg3qKC@0l$- zelnIj{zp2xDJ|WdPAyU95=g%8aqgx{O5AX+-Vo>GnZubc8y@tlH$&5=ylD0s1rn1t zth1f%R`YBxp_W-IFRo4OuP!Ve@tX+Tpx@@G@I`iAaEj^zY;Z>+Z0DMh;;Y2zPmZcp?1Lc+Ik8#hD%`# zD%yvVz7+pJD~&lm{FJtVa;MR3>)Lj+1q|2sc@qX51cSRJ7&^4ZQ>3ChW) z5+9zQ-!varwje{gz{sCoQZ>P8v~=VLaEL5h>`{Wq5%&y)oRjWs7i*l{;6o==bZEvA z`RESwq)rx>XG&GlgNobzRV65cCQ)KZ(vCKpq@&K!bU#wwbpfTo4cB^>zSB+zbjjNo z^sDG=mltL;@+V~Hiao!RB>jiPsUFPd-$*UJDfw=wo*P-_7WjM`HOAFu%y2MCn5;6G zj4zbjT)Qk1Orv+0$FrUe$cUErkKw zc*F(L09AaG$0X-5o={p9&@YLov3AX!ca(+p*a$>wo*2`)3`i+=VW-AIbJ}y|7w>{D zE)`~yPiPUh?#T{{O%TSDJI`ue+!QC?97X7uF3FS1Ey=6%@gKFlVm()7G*vxA416W{ zAp-r}%1czrWVkeiSk+pfimZ3$9&)ZEh$WBD*{ygtoA1dFX5364&J?~kB-@oZ7&WUD zPFBbFuKVge^j>|G;veE8-2Xa?)LwqjCWt?Ksj$Q#?a*#@wp2eqEdfN=_6m@7P+xXo zdqd6O)O)l{teP_n5ntXn~dcQk&_xA+u^ z@-!z^!^CcsS<2N<_Wnk+ghV6{kDBOmv5OIftKAAnr5E}r3FG14W3rT3HE7Zkx#HCp z_XKBM?&bjZWlqnvyE#MY*1(HU-?r5KYtFK!#{2A}mO6Dz3jINkg!RdA-C-Tz5iGcp=Uy3`seY5>eNzcs{-u`;kL-YvA+jA!CozW0Y$^-p)hWouy zP)ZxqRL6=eIaSF=^m-*V%qsRbpSzV?4ks3Tb+BI8+;(hNVfqctuOU#O=9AY-_VmhL zY{7;8`L+%M-PkK&)Y+=v_&UZbd)lk8m@zl^5mPvaZ_O-TE->`Q&JXTpbx?0ckol73c;PAmkbGO+Cr2UVnfC(kcCV`{)9h$;833s4pkN1rZrp1Z`f zK(|z&uE{4HLLU<#H#tltwUDbR0b4C6n~}(nIoV?8jHQ?QBicwGFa{1RaYs;a zY7R^9+G6|94w+0{@>XAXx13S#oN9i(|JI7b{8BuFSP)6C>b2-pdo9{FAkMn@!lBLe zR^Z%nzLZ8C=@DAkq?eD#ICm@_U#`-ui1MxpsKI1Ye<)VKGf;KI#W>@KxROUc)veOb zxiu7@|Ha;WMm4#1>%!R3wG^?6A*f4GT6Br@A|fC) z(n2qa5FkK6N`wHB8X<%pT1fKU0e$zo$2t3qeZKD-prb7Ql*2c3+Q?sjh~DcIR)yajr=_F4mW z5?@oGXR4BBPZ)fN=-RhqgodAI%<(t}GoY7?GP_6w-R;EEnYHHh{^Y*Skb5NF0ZNdk zhA??+qPpYVqOTgJZSphe*z>uZM(#Ep|IbE`_J~M-1MBbKj z;ymm@j4u#GlAYCEP% z@j*-U)4^91bh+M^G^(f%7~g1tJ*3@rB+L#ZnhjW+3F+Z~V|zK+-f((_>F=~Wc9x-S z(ClLs1fhvRaczFqO)MU~#R#LF6!yt3UI zl2P4hdNGd1BYJD}f*poF=dEosNZ-ab;jX22FD{zwY7>ln?=+H9U&-vNBE5P+)9zIspSiW5pCUQo08bezbO`uUf=Flg!ghT(6<$cg77nidN3L5u|$8d^Dd ztdn*+#7FCP$e~48I89$;ZStit#h_jqgRDCA0t1(DAy%TKXco1j_j*GFol`2pMTM6e zX8qADyvv|Xll^U%iZ75*;kht{w;(BOVto4oBy{>^tVu(Hz)`(~EEIdK;rzDW8A|uu zXN;+zwX|+VBT2H@AtA7F=wq{mueHy-SSaqR3y-Z{cwLR*(+nw{4Kc9});wRYbT4b( zu1V`%QT4jlI88r1SpN$g=-iw~dRv2(gG!-65kSEK%z<|8vpCYq5UEqq9t2|uajd!v zsbIG}3|EydAFU?Q6~AEKcTw{}_%p>je_pG9;-e9)KcL}xeC`&fvS5Qj-==j=?sjIZ z(J~RFjrSE-oM}>JCS0b46RXiy>$@_pU7AuWB17bm`|ii9u?LOTt^?D3*G7$o+SKa- zO^mO;i4BNY+#*9HOen2@(hgCUf5SZehO4z@8Lq&29$F)US8?YutRI=3w;Nt7u9PIF z*ug$WE(MY;FD37v$eZO-AiKT;8WWJ`7^RcTC5;b;ImzEvtA_uq6@d+*XZ@#{5Z)tk zqqVI=aGI8wk>>Of@TMa$c_)fh5{4FIs*#O!y1~AIAiNvI3Q1K-v z*GkT#5@efaPVgYIGb7PbMgNPVmKvwI%2tQ#la9+6K+FA|z3EC6g zU02lDfi-zlb~wX$U)UyM(oZ$jyw?eS@Q}Pr`1X(9TF^DX0vOA_ z7MwZR4agb;A+W96YW$}lu%~B%c2}|vsZIoSnaIAx5o$bzwAtPhAhb!LV4A#cxE(AZ{(8xpuJU%tVYQBWwRMn?t%@cUVS=r zJjoPB8G5s^?KDL4h3T~+8VuKf(8$ZP4=c|tX-a_N#-F2qivSMZt>-|3SfUKLN*al? z&LUjDK|p*Sl`I&}S^5%t2K(fu8KGHZC<$$~ozQ!a!EV&crSH)S=u;rBby%cD2^LLU zt*Ye$vK2F^3tIC%y~ap1OAG3raj>TSApN^?;~gdcH}tn9ZQR6z#ObJZ!$*dtk_^?-j= zeQVG!>hG-c*XksHLs^ljfo%&kHn#RdeDgPzWW2^MH|aYzR65HF*?tFg*CeB!zk!G1 z)?9#Bp_|J^$L?wx$Fp-De0mK`ZqKD_t;i%kFQuz@pgc_Ta^3AQIYwh_f4npHTfhv4 zWCqM^KK5kzmGPjgMCH@| zhsJ|s?bpl3Ujl?Q9F)C1Iq!l)E@(Ltg(h>B{D(JWYCMlRzhM~qs@nob!af!6W)b4? zLW|x-k^m6>;RobAF$Tk9kV-4-+bo)s2ax*f#~dbA`aoVZ`Q=#5l-B9jZC?&1{jGdI zf$2e<+!t(x-5Vah!8NBG8%r;d<-A(>Zthi^G4i&99NIH;`Y3Hf5jdxxJ0r4}@kp4v ztLuv8sFLyHCg>_0Jp1?H$c5tAkxP+?ZC+x9vJWUb!+F)cy^Ub{O1wzKlYg4N!{XuoK zdK&-|J)9iapi2o1pB9!l$aebT!%91Vs(5yPl=x> z!6i7Lh$!)QNCZ9fO4YuQz8+GW*BAW~)~v*-bOA9p!Ig_m>9?zG6L_5ri6?(+S+ZQK z&^@Kx(O{}rcZ(HA`%7Ce_V&o(m<4;Onu=3Iti{v|siiVX&>y+focWQ-1+HdVz^Pq{ z-+yTJF&n}UUehdd8TWjd8grFJeE~1cwQp}v%w8-x_N_$wm9(1m=BEp`uO}{j05OIk z0!O&0z&&u%Nu>B~dW@B<2(g_$_t5|MBz6w$6D#b)h8uvid06S%npH>;d9>g;^MPPZ zz{>QkpcE=s)F%2Syc?-7BeH}#Q_8hymWqwR2aqwz|1`+ltr*D@ZWbmg7kKM~F>hay z_!ItiKCskIP;>md{1;M3N%CDW*wBX}Xc`s<-SU^Bu|SH(qUMYU|FZg*m6v#k7HWsb zkD3Ihi`7eWhYxZZ*AfAP&nKqI#U3aJyKw80*J+2>JIPV+lmO9l%bF3C6$`L5gz3>3 z0xGWvNJcHWkcS;R(%#BOr9C_uv+#*CMkLE}J)tCR!`g3s!^wfHo6K7=q&GRkp-UeB_;xh z(tSR-V6POvI??|{q%wa9`sT|0RPT3}D-+s4G8?&*_%X_THZ`m=9iT1kinuTMcUEhI z#$8jY=m2#xTAbBj#i@nY&FlT+G#=x1HCxYnKqGnm^aM?J37%m!x;~VBR}|I$P*!Yz z@9f_j%j;npbo0F8;uIjcfasvh3&w4L_tlySS zZt&^i%d7t|*uYU` zk8~{^`e08<8##}yFf>btdQrX&R1Ke3uAG@jB<9$|RsN(|4y1uzVgs&@a%zLNNceuE zb+zSjnDI!^t;DH6?qlauUGHpLU4K4r(O2HN_WCBYRi$Eq*JCPvPujqIB$Sr7%*LWxFr zQ^qloJ)zM@W!3GMq^H;~G_~bCxiN_P)E@HNsoNp9(1!=5>u2>nD`iLVqf4l{lH7jh z)W4mw}Y}Tv&0`P}mpEl&*vO zCXSlShG<*Y7Tx`pdHWL2Uh>_)@rq)ce>v>IZiE;LzE^)(`h}~2g*^d-ws%M4aQ)WG zddnSfKms_b4KMXt<7*iZKRQX&v2surOzZ3M0q%t+c3x`$5bpp6&ABq6Erz++Bl^jhv3jHyHSMkdS^o8zDsiuQ2X23KRv&i4bvKSuzPhtcaQu3n}p zX~ZP@GutO=P-ClHMPyDvU6Nq0D#;8Dh2cs!m(~W%{6gqr`GOPM)p*04&IzZ%`iF6a zN+I+T`%6fW`LnESM_3Nd<>|^8NEo8ppWWY}O7phcqCT z^Q$&Ytp{(~4N8By(~zWc9;i9`XqMU#S`$^8b2~=sU~Tb98c{(O2r3s z+U$=@O&7BVKCo20l?~Q)%in=6HH!Mq&sydwPyBuAdmi;|7`7$)TP;0$j{O45220$? zk_fnD{!cKVd#HdgpGo_R^5tB1vbwNZ(8C2#k0JqtdNQFPln<(h_yVy0FbqVQCpDS? zW7JhUgq}alQ}|E0@#~-*c^(^sWXnrh=+!iHJEH+73t?vWhueT44!-{;;WM<*&2dw*l_pGwFUM_+ize%$d4eRus>@ z1Wq34WVEkGf>7wuBA4Tj+KFl$7!>OFV%Dy!JrB4>U-7P)(%I-UsJ5z-g@G#0$!2=? zEzJk4j0jepCy<5_mxk1%8TC%B3}~$5bAw)wQy!IwEV;x}KuN8WHlWzv(>{eVH@K%p zV#!ts-D_kIn{V6Au*TcVBEGZ1S9dsk-%K|RvEy$sptk@ev348q)BpnqocKUj7!dQs zbwSaJGYMZQzOO(112s1xnhEzq`{6no(GjH=f<9wQjtb-_>gPP$1Q9b9%SGaXFvY!p z#ZQ2hQ?Le%jdqvoW{n<+!0U2_{%OD#D*H#+Rl7|t&!vMGStHqn>dTZ>k_7$2bss-i z41;^mNGpwLMZtgfV!BSfl#;|R~E z@q6OL4^Nhx7e3?-0$B+r`Q;w8jSZ%8uMZc1+mq~x9hAI z9=2nCE~P zR@l7``QVcBez4l)V@*LNt$C_Sg~X$Tf4Jh)o5_-(SRY99eFOOaZLuWNo!p&9vtyc^ zL6CHJi7#IPm1}SKQXlZ7@h9BL7n@!Hz*V{Sw9P3D^0iZr$btAgrw@{j0rR>3GfV&j8TEc zELR4o!U8N4ctjL$w~@8LfzmW%e~@W(dAm**Z2&-#;B`O5`mF$G#4>)tn7ohe+q0iy z5doWAGJ~iO`;YAL{hzNOw);loe__oS{2;{qj790#e~=8m5!4*~;QQ@`++?RaCr}lE zGnozRb!UdFPyTW0h@5)}VZ|Npa;Smk*MJZPV%kY`%YDl6%MH%~P`fvbH3Dpo-5^h@ zBb}=4$AXTu&EqNFip%%b{K*$Hb3I#vDfqA}$#t5G)bCHv1o!YHDmBz>DmAFgzXIaj z4?y5PRAW~U?!M``ArR*ryCKqi#PPElk?(W*#)$oCb0Y(9u)Z{Cm;q z%-;wmqoGKH*@;+j50j>q8CaM%Y_hL_94(f}*{_=tSRl%wGsCq3ax7B#(JM!8$o%c? z2v!AHpgit>lYe7t9Am+RECCDf5RKW!&t62?9wHf?f{ca%Swx;apc*bKu)nq|o%hiR z`$yPLe|>R_Jk07f5OlTywhJ$3_|D4`zodJVwZY~JPXA|4Km`#Hv!z*TjXPhm^G|G7 zfwuPUZ{X+uiIj7YGndloz~W|sqBvkf*mnHa<9u%fZ| zdijA6SRx>f0;u<3I{+R#OF(Ks{(r(i5@s=wJddgDBy2w4_YPs&i%}Av_xAH4nj~l< zcPgKta8Q$B(mfxOtrWYuVKbGkns=JUy*X$tqJ@3~K@eUzWH#d-11m4}D%iu;Rnq?I zNKl=%SGCtvdzL#|Acll*OnLdHUWe`!k6@bEvZ$pIum1Z@NDILUIpS@ipawr5D16Q3d(^yOV665$! zci6_&(^np^Rt=pS{OErBj8BZTY&>0C*YnVx7hX5&lCFo(BXiC2#AOP(<) z4uARjs?y^#wYMI&%;KIF^|%Lg0_wod<%UvdT{Lv9 zAe337dP5ovBAfr{-nTd(%-B|;tkW`KmF)PRx+QG>2w_O;m8~tROYjT7JP9m*icim~ zJ{O50II#oT5;i!Ey&A*4Z13#2q&+5E{Awmp}9mL@85~u32ehUmq#Qo za=x)oJOxTLFS`~Q*5lGO8-(3KEuD%gI}y#0DCtkemH)aMt(X7dhA7CD0oF}!ULKgO z;{-MX_j2>p{fH(7^>GFKT*1%AcqE2%$?pGNb5-lpv&B@d}0wk~hsvnwtA256dGgC6+X0WWB+{sRoc^1q$ zn3G?ZOsTvl5i^RNDt6Lkejs~1pAK<1t5@uV&jL-;JLV|-CCuDE`8j5pIO`@!Om70i z&|Pk_vOG|tS-p!rCTGfPlm9Q6vfwx&bE?HRwIPEtI9!p)MB7!o@tj-J(hcYk+cNFF z3%}g{CO~oA(vYmGt)NgbD5?kW1_TY7=2BOFTcvif6A)Z`zPvXYG zhkcLFR87PfA8ahZljn+O)vfutR|bZ*w5`HP`&-nLRU0px8szpk<`m^u2pi;PBHq6u zA*Xag?@J+bWzvS0@5N1#L&BTR0G?B2+kI-lStT@i8{E9K#dpMXD8?#Xu4^RZc?L$% z9Pr9a2W9pVtL*hNkk_djg67lSuzq-88|GyR$4n-bY;2s>@BGFjzyJEg&-uqTTB8Ie z;66_0>xQ*bdXZOL2;6RoYbD2yzEKsn=dTct7>Aw8?sV`jqXgCo3s&AasHK-tI6AAY zNJeo+RQWF6rZk0f6;k_y;h@vNycMhyY4)0KkH3L#%F33oDXcOmbuC_KUcGbfbY3B@ zWl3KC`3D2ROQ5A_j-sx^{9&uE~-!O|fc+BoH-=ppIt zCF(O4Rs_=Q`AhNPmKt1=1a2rkHlMkb&{?vuPBWYkjmJ%UxeP>y#HpDaS!{*AmfdtANKF?)YaHr9AH7G4Xl8@5Lqz`b)1IY+50S=k_FLPVD~45s zp)3^R-8}|+f^($k6xPHAYvhVYPY`uB!yd)u7DAJg?8PI_!nan%Eb8x7SYhV#QTG*Py?PKuhku2Mw>E)C^1bm=)U(R-@2%%ijhlXZo zUgRoHtJ#|AgYs0k2pCj(yUZ%P73830o0s1oqsm_&!33k<Lc(b;RHz7Yg5+)c zeY#=%{Gy8KID@^nggtNMq8YDbOGz5Gn^MQHH3W(|*}2?YC_0C?L%vTVIwuz=lH7SQ zs9@ErHru5`5$}paU;Q}bR-tp<&ZN%kyKWX$MU>Q=W3ZHENHBO6{>{yb?la3cNhpJ= z_mc*y8MGt6UD1`cD@(+={K%uB@yWIVq}fY#wW9^;{zbp$N=s1h>8AR|C)4L_COY)u zt9^&;_P(WZ3>;dF=WWHa>Gb&FY9U^qWSI&Eck^(Pzzgq3YbFm3{Cddsnyh{<3V;6BDObep3~*o(OKeDRPlt< zO@U^aQ`l3@-)qIF<&J^5So0zS1sigHrUNtBo#e2)AF)-2DIJy-Jl@gG6YEjrl4BJ& z^*!_h_mk5}pN?52*|#3}EU&OHt*gId;^mf3q7Q93KsszcXO{T>e>5V##T2I!#|`MHjUmrZp;?g*5?ZT>QsKYvA< zLwUZBE?&eXMtd3B=-oA_MSfIp#-86iUhBv$uAdLbJKy=_NV>6EoKx|=+M@LXe_j`X z1mCnbTa(hOx|-m^&woTVdl)GiCymAi9V2ISc8Bz?c1(z|M}U0|n5199c|)MAowup@ zi@W*O$Yfl0?q1h$_Rm&?1hd7Wo`#myg^Vt~Al$e{JS&#Zy0Xu60b@B#QFIE3R`LNSu0_bIr*7drmwV1`ZU~BeDy%TrC$< zAb2+r)BE%Y%hq}$4P8P8@GcEUV!lkNV>Pfbcdo{={e1eO>=1C~Z$1(NBL7i2BX04W z;aWqkic5vD)=xNjBCnfE#hY>{{v9-svI%Zj5%Ub3u7PzSR){b*ouBvdcX@K;(&|ia zPUh8&qN~pp!TxS+tl(@`XtH!=adEWQKM6+yZ0oGNJo9~=WHWZmz$f18hADlRms53t zu?(sNIGk3un5v8Dw3$)~Y}tq#(qT|gl_vJ{{@r>n$G(7#zUQ1YN{D;)REv5<30WpX zyy5+-em#ejTn1yDIMeW$xcU`{W4f zKA`^p9_7rg7xf!^uA}>`>e^k~x4kM}a{dZrC*8q?YX$3;SFz{fQF=u}82;Y$G*9)c z$20wV!of^Y_UNDls1C5IWf%90KgN zdLN}(NW8W1Q$R}uWf-uK7}bGaedxh=EzDG2{>o+cKPS;IlJ;DzNGKMY_PlvoSMK8l zrE~ZCQm#ql@ICZmnMRWF*p1~^(^2M5-#dcW@6?M$aomrfUs%gIljfv;U2EB2uXAmV zoFlgX8|Msd{cT}`e6wdd`7r*6O=pP4dq(*FeJxi<>w`Ievkcbhgzyk{+s?*TDFvPZ zz!otBgeR>@NG;0YFemoU!(L&H8?6Iq^*H;0e=wH!^?v`P2mJ)6E)C5}U1HBDn=03{ zu)d>n<$LBEX-D0+gU;ouvq4o}{oqE1r)^6`qN%zQxqK=3c#Okeg9^*v!0s7hF0K^{ zJ>pQ~Xhp5HltMn4tA27RXWk|q0uQ7kOzH9Vi6ET*_I@KM|0XJ`q~Ks4%#EEYDVO;9 zq>Zcl`9`lE$!kCGa&qqI_8a|E&htQbBxdi| zhUX1*OMdeAd^Srz>WSr zmLQCw_S^D36dt8>C@NIF#koi5${(fL6%B2#;1AfQd- z)W$I^{sOzI0+O$v9{2+S@=X6>t8x@D=-kR*LEn?mpZ(Z1QC@2tG^49DAXs8|;kt?(X_zbo`- zEVkF7^p>$PM?0=APCBQs0}2GBdLW(+)kR<{BXxM#Y4rL77ow2jPgbvIJ!z??e7}7m zX8bRsbyT(VTl3rR)CgV7`%cq0Nnn z9c-1+4xwWijwY?KcP?X>MJqi@2ctNAZBWgGG}|LAxe_)`M%OM z6)1epjIe29i&*Asb9`^nvmP_!htEW%1`aT?3k@_}EcUUf%UugQ-Fo!9o5DVJ%>KiW z-T-SEn38e8Pv6ZXIA>ImtcMPU6! zOD|nrT?pAyR#zldh2Xr>dIv_*vQpizBW809titQ@dJV}j^#coLTgmA8b%`BIuLC5S z4cCEFFfZ?Ra_96Thow9D_GRrr2I z3S^iUnuvdT_Vbfh&RqNTPasoa0Qkq3>kqLt!oM--h?k5t1I8G6Yk|4p_jPtXWc?9~ zDZsiaPs!~duDgGk0mpUsH!ve5{{4P&r?q~)!FF2b-_gno|CZ9!di>v{%O3s)V8co~ z=CTcavu8_WTcmvzv58>BYomUCM`QV+-A}w`enPB2qT4W=ZQcXDedZN znJ!z>%m(J%4;C|pbtQcL)iU}wIK~|KrheucGl4OY&fr-pEU?)UCmJxD5VVHW5V8`Za}|%E zaAumH#@F5rIpVu8cfuj7*wbSCo)8Ih~OmX@Mvr^ZFxq#p$@=rBT z#hIO}WM+$Wjt#TFAueQsxZyH`M^NK?xfnw>dJWq;hzs;(Y5JR0aqyxwlZi!8QSTep zh#TTFJoW2!tC0y1B z4f|p(Ybn;ALl_#*Aeoc6uOiG+L>dE()F7FOU^p^m=_`m$)K^VrJhLGl!CYoC$8F%r zjBOnAEPd0aRsz95*)VLfG?^=iR7N~=<=L-!7kUZi3q)~P#_{{uJHT=TYXU6TUsveC zwYrU8J_wlq|2`4Rj{SN(@adgd`Rl(*SWo-w`cIR;U}t|QcB-;$iMIwD0-nip-E-BN z0^B1fP(hTXD!zM#s^JA50yJh^C(~6i8B|vr|JB(52ufYq&1u~E=;?f=){l94b6&u` zt^m;0r3HMX*?+9J?=n>Oe|wsR*S}g0{`tSV;yUy4^2Q^zV(k;_L+D9&ZqERmf%Ulm z-cP>1|F(J;ht;K6l?ryR|8e>Ldgs6HsQw4Q%LEM(F-+%S+ ze{Zk(&v)_p>0+}eglug73&RF`l+zRcqu>7j*{61H4u1SM8~R^whYG5#9yKG`Q(geO zvrp=D&<6ILE;xtY*z@^KM6irWf!gvC2^-q2IS^z8RhDm->LSg<6pK^4tctP{TT1)s z_c~<|^IoW=fMB$5eamJhKtk<&Q<-G%<-0S#rt1Rc`)L3g0_0T}z=~jkmJt^wE&?1a zhZ>lAD)Kt8B$uFRmLu8#19j1C%cZr>UHE=9lspJ5Owwet?U|jjKCD*r+W+-)U8U@g zK6!nq`cE>o+S}wpnvReqFYTuD>g$y1^h1G4UIE`-oc1-P~rIf*w!(_i*}+rOc= zV{1vI@r=U@m~bu;Yg8wIi~wAu^lObh>KAy(E8-ZWENH%udl#p@^f=5RH^>?ozM_pE zIj_2q&&_0|e`zdVJzj9Av251Gn|geeekgb+g%Gqcl-48ga08$WtY!R)Ww+5(WS)P@ z&YBUQ zoI4Yvk`^$b=nOOg3LM|TiDvCmbp%oC%j8EF;(MGnYVZ} zU7Cm)_9tJ)e4~UXEGHP11@)E>shUm?VguZwSfI0e?{GGXj%(ANSB9yvJakwMfN>R* zJIx-GS1Roj@yK=?+gybWWTCpu0JtWYBdyqXg7iZL6=!5{>v|e>r0O zA#+$(9;teai19guL})=1&nCw3*g7VG)r}cFrEeVN?kz1rr$p}k7vul~iiv2p~(K--dW2Go!CDc17?w6dv^hV0v z&@GiRwQ{WTh31!<7oqwne`!wFKpYAPMRug+z*~s6DLHD?>vy6hrO(sOtrgc#OfM5m z55h6fuEqhA_2N;&mi=&%w7V7M1o`5~{cVL_T^_+p&I3yhr7E{G^96yim=zl{RO>=3 z+M2^^;Z;gd!r89teZ9CG2;zObx0N8dQ`K2OOC}vk?K533m!a_W7HMksfx{s%Rv%yd zV{I4DB)EqJma!^tv9T?VR;BOtc+3k+IN{joES21u?RO(P1#O2G67S0H%c`=%OXzgk z`9gVChT<^F7Dfc$C4Z=#L-RyA=nIrMJp(f*S2mx09Q)1KQHyrFNNQ+SLRhZ6&;3oC z3&B^0j;7@4rn?aor%RzV#a^XU7~ha)*nshyM0-ho^R)~oPAKr|iYOB@HM7||y55(I zk5~mnQ8j#Nic9`hm?-L?uk{C6Se#4xW$%s*LU}Z{sdYtr7+t7atPT_Qvb83BnScq& zO_vs`YoYm)G^;nUYCG);b=w*C#gT2^{Vy#P&+X;(-g6voYpxQ%U#onoE)hIZxr0V| z7E#?A{02bgRXeLSA#YcqDS0ww*eg=qwrPJX^ltJ|<+HL7} zxTj|j{Q5t2qjLjePNeoipoIRGKKo?LB)d4pPP?^!19he7k$NaW22&sl>v8Ci!8xr% znj0ZQ!`YN!<4D+09KO;jd0NpbpnwWhlOMUl1G%GNdem5cb{#sA9tQ}Hxjg6^s zPOUPu7*F@E+){^{Y+bc4)Jk~Q%h0r3nvSK&Ke)gs6XSk?MPH82;z6GC#x7|(UY~A5 zL44#kO1$xkRR;WBJ>!+|qiv2E7dwJEH>jMFQ{1hu`Gozk57-J9bn?Ifq;cPOcJ(p!jUd@3y_*EYn}ZN$ORF z#t+5r;a_O(R*i&b;aD3n=uz=0@7tai(cWd9>U=*#m4P;)4}7B60>nG7PM*Qu4f_{% zJ8!o&@|PA4hWqg!*zJ7J;I%M4neSKF)8zL(F3ROQR6<j?975EZuY&dx2mFIXAI`*C%1g_H^VPEim! zR!3a0&^#DxA0Yp9MsEMTpSDV)T%vmGN5H9$8F1IoF$8BV@{mx84>%Dn5!NM(h10Gd z+3R#zn9mlNO@pp$)#``oQ-Q2FB6sWX$;Q4Lv|589I(iqoZMJ;{%qkX4hRg!h<()iR z3f})34T=vtpEdt>e{j^plz=G0L2G?6_Q#fY;v9%Sf>wVd_=I(R1cihv)(&mDvZu$i zEvO*VtqM)F2>@g3c^u}ED?<;Ck^#-%38y^4lc{98++;akH^t=FE{A`9$>N&o(<3>3 z!L{>Mwrqw+ZanHzRJgd9nPw;^tXa2TqpxK@yi7-&KVeEV@S0i{I^#I(GcvS0&R??m$ zT6>qDZxqYR3LV+xUZnKC8*aT6yo~@%G9{Wgx=b@Lr zfII=h(|3H|-SD+H^$@teb@*Dm{FQW4$M4E-?xEmQxzqzmzI-Ph`BT=$pu>TdEg_JH zmYgaGc^acTIL&*s?#l@b)5%7=Bt z3>*<|6OtSF^m18g-!0p!i9j2V+rgUCB*`h^r&X3x2m1{J#VksThW3uzCfcP=%F}LJ zbPs*Q1aSQ}{USi<4v!W6g1tl3oZS04yu4yVv4%IS+Yu~Gg`se=lM5!>pVC_1qcBA0 zCn0$&N8)j39!I+82Ee012I20d9@BNYX;qf?7g$9%2^6%##)BP0UPaWuAXmKmql&;C zNuJeL)_})AR&{9KFV(_^1UgXYuu<{dVKrO`X3^}RZ&T$FRgt{`Ue zs3zM%pg>Ky(=u-AmOvTxjX^r3j?s{hij_u+2=jNy!4NMt7k;0NvVTPG#mir8U`6{E z7mpS*zj)t(&aRDBR}YC=yHs!C`Y0jME0|!22)9Hsznml_q6l!@b}tX&4u07Dp=1AM zzip^l-zoJ|Pa`AV`HwU}rZGs7Qn?H;X}Zi_d2LEYyjwAhg>Du!-;d+oYhE9`>9rhU z3aN=P#YLRlr$AW77hsDhNdUPfGY|Sawys*1S0E_yCy^}qieZ zyYF?K<-is9PcQ|@8m#7> zC~%7N-ihkR9E)!nJIeJ2Fdf*1q^XmoxrwAAV76Y{6!fl@s;ZvrRcGAIo84acObqU^R-Urt1O>Wc`xCZ3_+f3#_<>55&rOYznpkw+q8G6ELNy^ss0=i!dNOyz7eluTzTvDi! z{}{#{e`dyejt4<=7nIx`PeE_Z1K)$|kUB+x!fl>1){G883^cI zKIE{hyRaMS?Czvv8oY02cGc0BTC77?x7C?6F(KJ*iFSt+r*6SI{R#FqhSCA zxqk=5D5IsL*On$Q8^?cr4?7FHdF>3D_OG+f0iMh#P7EsxLZNu-4x{2%RJE1^Kv|8G zU?tF*U|j=v7g|BM8#mPhXf(a9iaL?a<1ktVa;LB?>()i?CdS4mZEByffMGjuTI~YE zp$E~HQh>jNl=cJO?cO7}pHzVV!E}bOFb>mV0tNWNV8jGFUBBJsz7FszR=goEZ$Iy^ zQ!zE5l!am>gf-Oe9<)EE!4%$PA;Z6}|0Y)TpCFk38kzWC1GT@t^Zzpd^1t^r9w+V1 zSYQcogfC-d>p9rUJUA#hF>GvZ+Pg57DIkl0cfg>5=oc6;H8a48B6IwahViY@a3Y_H zWGo5-cO96*ED!|j&9=oy**wCOA=!CN{=+U&6%{w_+Etw+oB&_7vh95m2{+}%M-(my zpi`>Z4|iwWY6u82vev@U)fFGUAW;)cf)Tfk26_~}R0kRW8}Twtqx>+N2V@rywY(00 z8VCSLV1^8kVM7b+o~5yOZqw)tD*M>nR7-iX;ezQL7rypdCpDP9rBELW=g3|I_R+@I z>r%fht7!xHvd+4p2mv62AM`V`Y{k>m3NpXt!5_+`diSkPdaISRH>%FDYOA%bg#y-d z*`TX6KBr!4ADg?@Zg;%Se|k9i+Ht!%(AawtZo=O_!9h6Q0M9dfdN}ad-NU_u3+u0= zb4w&Fo)#2k@kJSOUk6cf?FBzE8)2cLli!FroQr9BoK=V|jw|J0q0}-E?POWDMc>`< z6C0Z_=09z&jgr7Pg&-Ki0Z*$%BUZr*#8ahh>ff(=t>kusiti^Meu4w0=cUKloo0Go z;4Jw%#1rxECfJthw_n_Vb3=m7RkFNhIsdH0|dqRVb?K_tQcFV2BF)jDEf7Z$j%3th$ zLVM|g%h1#Y)HK(lO{K$cqhQ{YfL(pdX0cM?{s&+u_H4dOb6Ph6 zbMN;Z)MTMT%tCmkcF+}s+6B13%cbLhR2epVrk0knD)4=X1!g&J^0xlYrhe+@Bn3uC z+VF=W$B%r+EqTii!~@786Yn2VW%hD^%3iJ1p!!xTd=Ui*N@4^!Kw!l@j(P=)XAeK$Y+FE zc&LwEKs2<5&DGA7I zmsoFIv*hvO>MyDh@DfR+(b||xRA-8%O1A!lMkN02a1_*PJb}-fQ`OXUopp*7R=l`| z2NV*86IR?Jo{8pr741Y~7W`h=h0HNLk8r4Hm#1nwoWso+s_Xapx>cR;)TxCvys))- zPQX**TuLqB+D7PrVa=qHLc5UkYXsf`^4t)NyEiJ^wZXf&%rQ~sRd3w-;DL1M@ybZd zb0{AXrLd0#7&c^I5Ie{RX!oOr4FY1~(H^$S^4PC!vRUs$1Chk_kYrF!qGTa~4hm@W zgaCiQ&Fj2rT+x(=@)xbL*>;^^6cq3Rek8=VX9dMrs+ITaSp;~&h{mIqNme@e;=H(O z-VzXgMVFzhy7~;)RXmAO#W8M!;oj7N@~GUW*%>qPSYe`?5=B(Fq7Y>@F8TEX`BFVZ z9*}`%A#cb}jUq52XXoXzQ?$NRw+d-apQ$HaP!evL6IrMHBd&v&U9&vCjsoVUtBN=F zu3e3RYpHz6C~b1KdSOGxppzn<+hA$!g-uwCp$!!E40txUwDXSXvts^wCpEsv4nv`J{pO*^0&{S>JMqpGNu|TrQ?AY4sp5M- zu6x#VAY~&Yc~~OK=?lXU-(duNn}^eHA>f#wt9{?18|gVvs|eT>YWdhq0gH8evzWBa zf})z?gvvL?Q4G{o&;yVI3LPmBQ_a?d<#z z#Gys=?`7F4135|2MW?{*kll>J*KLHv5Bu|$B8;{LyH`y}t6PG};*MQ~U4`r2?qR$^ zqfu0)X@0-1^2`})!j|>$xOvEnAg6|5(yOMimqJG6!w`5&NZ5-mm7EsYFDplZ1+^QY z@<3a@w61;OlRF%};BR-C{cOuRp=@MM-H(YbA-S=g+OY_MVS>_GF|;kucr4Qc9!=kk zZNOgX<1*;|YpkB!>4~x2~AvJzEMLU%{qB^NJq;D8-pWx@ku!TA|`hY$DSW$H3mJUzf_ahhgz zjzt8FcZHB68Ub9Ag@Vvx9QXBC;PmQoQSnT!$ZN=$ih?^mQkk%`b2sjCp^kk+6)7m% zS%>wl14@uej*N9t-H=0iMCpwddXdwQxhlP?5WLbF?v=m^8HztW(oQ@_MatF+GeP>2 zi18ZTN`XwK2=?CIheZ?(7uPKN6`i3wJlDcS)dQ92ayogfYbr}|a%Jl1`nQ4gX>yeoYta7DU$@%(mb&S8F5bL|rk6*H zIdVTeNa&D5rfZN4gX}ExxiD*01K*-%-XEjiemf>_8$^*qevd-tc37P#5@Y{*gc7-K z>l$K5M}UR)LvTnT`;LQUxfOH5Gc2Y~{*wy4Lq#RK*)J@O=}plrV|Y)fui+qCy|))C z3&YX&MH)pENdii;@uRfnQm{`U?g}&v?U?SpcMdda*V+!Pi9dWfQt9!8UTM96Y3JLR zVUF1%y4?^|NoYdGR+#o>qQ$2ss%k7jR__T)ea&tu%iT&!JG)xso7^d1Rlm7`jz6LELezn8>3t^CP}U zjJ9piPpls?E>aFMc1!W2hyYl4tH@}(M|N3*vvCf7m3N8D5eZ@uScdY?Fpq_ z`Z<0*z=(rQTprcUcjxCEL3~h_8^~%I-Kf_FmZV=|r^6*BYZv_Qj2AEg_wz`F>&&XS z9kyu``MCR5%rP>>aM;Q(!>YMeBju_tUMEd2o++u2=nOTBPIx!Alm(S-OmMh-lX zCQt3reSElAYV(b;aF>7_Q^(iYY+D+-8Yu|B46(=mN zl;Elew)AUM^*7D7o?nIGI!9injNe!+1cv@@+!Od(ugxyn@uhz%Y{?cNyMt`*boH}d z#rKZ;kL8OFG0*;U$+=i4bC1mZ`k-FS(bhbrFx=eyj|+nE*|#G&Qil4PxzFCoS)RBK+s#W*eStkg{XMO`Va7XQv9+L+DX!_==W&8#dw( zZQkqmC^$;9b)=c7CAf;pN)1vt>9hddwdaFFZtk77_tpib{gD#@R{;*6Zl`aigK!dg zpYAIVPja|;Z1>NB98dqrN7ObL{1VYEaIQker7W7c90V#-L0=eG_MW4vWHD+(HPob~ zAw%*a(34226n^?=spp!12GQ|19w%hmKK@Nf>g5iXvr(F>j%~k7M7^z-T>`G>#*SD! za!bHp-bKn?#YgRd-xf=-05b;Z=*E?JTC@yKT0lGJSxCt53(ASZLAp9cc>gAjDT;MibY+?l)0FaIoq{5dXJTOO`@`_+lw zom1V8MGut^=p}{dr@L&pB7Gz+o4+1ko(kff{&+;LwCG_aW7eqv(u_vomzlXUQ@Xn; zCa6{E@_mK8)+rHIrMOSR-qEg%_yDtka~9U-U3C3c4F>|D%dSoH6s5rXh{5ZmYZAxL zE8YFkq&YdTxi&3mgV!g?MI&C}k0(PEi5Q-Q5iU4P7&w|6{nXs<=l9% zal)kTCJ6n=_v6*CgTI1>?E(~6jy2BSicF<9&m0|W!f(iPe0+u$TVTYyq{K-zQx@S5 zZ4KS$Q1_gm83m3+kS1^EhkApRZ?4mJc=J5S88chcM~qx-V7QsgZ%`?<;L03u$`-KUz#+5^x~1?w`7!OXQm*#Nl>I{ zp!=7$P>$E{HKYd&S|4IwdN%l%I7U&rQj1Vx0|dZC!3>Xxu9y8Ja!^9j%L?l$Y_aqVPBIea`Y$u%OW^1He?D zjwqV24Ur#Pu?tCih}M=&k<2p^#z+fUi8Y`ta3~S3k%EQ-Cj^!Y3af*0FZS=5JvTel8h9={#OirYg z!@JXTM9Z!YLin9NXXD2P`S;;tE>*qbNY8%q%Fg?ZuZ+BQq@RfEOyFpEd{Dr$ERd>Y zD48dwwBQ{{zGse4+>0nGD;f+mb!&(%%nHKC`U*5DzcIi#3?$6C>uSawTU98hfa!h3 zQ*iT%aMQFUP2u>f)5*sAUyyn^ObD5G^vX|piHUWTm9kY`h&saDgY$~d*2aDv*}(dh5>Nn5cxW)4K<4zPg*vfV9E zn=7_2#5SmpF84~9Ml-1yz&FEirbLTG%`5ni;N_}zzv$MJ5Xh4?qk8yCYu%GdD_}@O zAyzQ>{vzCwX0}VQ6=3@v(d4>;SjZ_iLmYk*`O-Yu)~e%EVz95~44V(eBuBCFb*56A zid5a_c}G3KRij2k+lG$aI}!Ej>$mWn%y5vfbC)q|*c4WhX~U}zvE6qTp37lW+Qc@~ z1>l@_G&M*+zw_ZzWfq90_{GT=OH5`H&X4S@ck9r08kh{|o3Tp&m_!|N4!0rA6QzH+ z4)}WclmW7e`dCZ&70b&GQ@TBe<7(VeJE7gxT-l&P-!tt3A&DE}o!5HOEDGx?kAiK1 zO#QwIQfnZnXROE?ZiUq99gTW*@t9rJp0hi*>s_?b7Xf9|*d4;;{y6np`=1Vi=tw`P z$605hQ@<>Eq4-jhS;dFeCuVOz7~V8a)o*{N%Y!$aJ#jvj@W?l^xC+}MK&Y{drc9_T z^tKbxT2q?N`;*9r{N){>NdQqpH;OOAKnaz%z#p%1ch|2ZhVbX4XL^wWOs6)wz=QY( zMw@N#iEGakXqFyu`=p{)UCxk(hZ}UJTy6Szs8L%u-T~h+caq#&qI}^*56Z#D;c3rx zxYtB%zl!}c!R(GXy34l(lcG3$JEmwkCwrSOZM$7XoJ}R*3M5e`@=lc>=9xiSTn-zE z!p4UQfNJKVF8Ata6rc{m(l%v#9L~&8-DOu1Zf17`*~RrX4N39RPtVTHJ;!4-JFDil zj>}^Mw{r7e8<@cNar%_VWhH~6cNtu*`bGuUyxgRGd96#|YPA1$Jj}~#<^iQBRrtj^Y26{>d-X=H zJby@u?Cc=ZJd)mt3vzGv0cf<~xb8dtT@2q5-K2`uG5%5l(xD>UFB-*vE!*$KYV`vQ zhFP%6?O^3ZkUQkeolP22ZOR1rzjnU%x)FdKSmRW#g9OPjR=;55dTNB@*@gge#RK6x zFmr_!_Iu7QkYymc#X)+2S!B7rUxXcFJD05=k^x@G6-f2;#=)l|WQgjsgQ8*Zat4&; zra=%e!Js?>`-BO|A$e!UbH>HRj0?Y|r`# zm+r8z(LGq>DX?o^l7CQnU9m%qU$m$_2$70rF!6du$`Efod7K1fdPtPf(Y?R;Ljg=a zN20d)j{aYZ`>@8aMioGaUi3YIy|(#A%s5Y%F;c$e9|4;xkQEP9N!|si(rl)j$5Yv_ zdH9$0nA%smov@&#Y#a4l&1z9`2dpYs3f9FJ&nLjDC^{eN@D>fcWz>Ve1qb7{VR zciMls(~1FlVeuCT1Un)?EMEx|3vxjswlOesogf*m^goy{{<{_NKmgzT|GNPC|H{Pr z=hJx~5bdYKMjOBP5M=t?f1!`oT8ZlkKe;*sj&1YBvqucsq848R3fa))+8G}7qwKC* zL4bxs2MqY(=cv69&VLYA$}^qe+@NmX_pP9&ly%4$LQ<2aD z)1`LS(gZJiUcinPPE~JNf9S($%gq1Yg68YAHYFgOnizy$7j3bD5!tjM&pK$qavSSG z5AXEfHiDzKc)L>_yCR9U0jWXXIf!-Iv`%Q$Q2~=1C z{En#P6P&x4@QXDyq+ZTo=+BpTN%)!bX*n*FUj6*u;*B(k1zO}f`{9U zB&EZ{9sc{~nf`1MSLA#GZu2?xB_q9qQ>eU9$h%=;ifNwKEInjANECWL-ei6;T#z2Oc6yDxQ2jH~f!AK8J8~Au(KH^hT&|UqR_-9Ff=}Zv{#sut}kN3&>Q|` zhq>?%QefM;f4q6Do|l^&^>=Y=@Wq6GTh{*>@*(}vWI(bAOAGLZ-xb0%MV4vk!y33{ zzuqU}=mF$lY@Vaoti3`leBU!%cAv@hR11rd)-?FNAHN(GSu@H!cf@OC*4 za(TA__|bEH?S>?=QWL<7h-5`c&%;teLuZsjErF;BUp4rW)20Z*I}i#M39A4(U_1d# zobIK`#GE=3h}?99=_5iUBPgOo>Cjz}>;Sm0Wu5yO%1qwDmD{sT&?1JJ)w--}TU@v9 zdyN$>?;)i+hVUt4Ma%4)(G(J*yF~LP8j2g!g$WP_9Q)hl~M(GPJ9C-4>k`{>5)OFJ__7F%i6XGkb8YYe*wB32uO$! zbN(U<#2U76!M2FOIl@V9R(tJi4M3|Tz4LmqS5wd7j*S9`j2E?IR)q`oUwR-iHWmhA2&n+x{41~b2l6BHIYjzpKQx) zZ3}RPAz9@_hhh)Zq*V>n0a{Tq;r2ij-!$gAN5P@I{U@>zC7y!*V=#k;U9poK59VtP z4*^-{5r+!m!vVB$edhCIW*b#AGOjd>UY^(bsClF`)y7v?4(KF-Db67PzjzPh3;-xQgveu%N?YwGiVS7nGf%xKf1uNfvZXoU>h7$Y&4LUzLVt>U~-R zyBY(B)tzqJ)uB5+u~ll7>gT#cS0(6N;Y&`Ke4%V|8bfb*p=gRmlHGf@JrQedW5wba z@Nzk!Q^a5_4ks{lHYkf2P&>gJcEnXF~WOL zY&XHnxR6z@Jy@m|{_d-wBi&}|%Y%p_Cp?ahRX5MFe*m*8ZM{yF=)npS-SW#Xy=24 z*ZMO-XtbVYT!kNZgruKxBG6$a*Elh<>x!}4Cw=03?wGP=I9f2;G12G%OC|AJGCPe6 zmC}hN{8@_NUGR_R#z{^2${tB$_UkJnDoLDt zgO-t*igv;!>VjRcD~!0%fF^ua;%Qh!jtl6^6rmfD9YA}U$I&;y4!Ex_+B;MfDc3vA z)z<9Yp!@75RjbX|sc6EA(CEl0yHB~Nde0$PTKwe-hmAT{UVfec2CAl@q3uRJcZa`8zw z>d502VC`_nvUbnCXpzQoi$;8sTx0YOLvVA=fIG3?1F@saAt;-lp@1oxI_249l$x&Z zfrxo?7x6^5+pWoq=oW7ZDN;# zz7;vjSCYN(gAX3M`n?d+SL{=-Ncjkg0Ftc{+R4sEP1$@aq`G%%t&r!tU%?ezEC(Hf z$)HhTwMP|Yw#0+#blV=8MvmfkSUiBI7CA*t_)Q|5X(h3I5$PU&s! zzC=GvS~c!xW%ljEJruq#G#Obi;7#@y!VE1>k#)*T=(vWlnpSP6hZAZzJU`AgbKS_V zgi8+cD1n4Yp^-y4akLi_qq;162m(2>E%9YaNFtw7dVKCr0AnTUO77*ED3IP!!s0Fj zUMxaKjw9Vykb1E@J7ugX=Hm~NNob_Lpy9=CtdEHY;?s5|j(VN`UUspUaopZcX*Xl& zBPZE@grltgWZ;EhM;)~PflgKfb&tjY#Js~P5B%#sre}L3bxIRSxorAv#3MMy)ju_~ zzm9)f^;d))pgFS+jWbhJB-Pa}c{rZ3Q!T8j#BuA}S$9iK32$@(wsQ zEZ2EFfT4$7J{)XJm@}di0&XXwRSABXQiKB)qw`}q<$p0(+&*)w^^2C#NZqXV{A7HH zJB(O%p#O+qHc;77FLRy8lV^E2{Re47?@$7iuBq7omZc`z?wMH!8hxjH@CT{q&_d|y)0B?i6gB{saXTdv3t5weMk1=#9uyg^<`k1(lN%eelM~w!3GGs=2U)N zEiH0-NW)%Go}yCAd;}R2C8g}g`t3l8v)}SC1V#-nT@QI_zPvMIX0GC0xZby3oRunO$QI zsusHEXj(B&+S!A{aNptJOeKUJ_=UTR0H9RY9QBy9U59!Gw7IRqXyF@TO6cGxE;pkw zVMa>uwaw|a3{@XPFKq2vdPI}7t4K+vs13{Q=H;~*6O^jU2Y?TuPVQG4n`InI+tSa! zX=eO-rD;#+q)n2?sWLB#&rH3COZwTXqpZs2-T-MEtcPH{zYv2MM{wsqYnaK6$oM)H zHaaNJRCoD!Htwh4o1NTc8KKtxGBaE_H<(kw8k3=BXq^+pK?zW^RU(=)pA1%+U-rH^ z___T1BrxM+a|+?E^&XV?6M^{T_vmd(HG3Ttlpt}jl_22k{btE2pb5uE)hoB!AvRx2 zgvyhplP%1qw!8x)=Mx(cF}kfy;yy(LID(n%g23&yH6y z4o_J+HApkCg3LITlFwXX=wl#L_r5+v#yZkgCI^0wg|eXr5h*3P#x-wwURjo_2+!R- zw>+NVo<mcay(>3BhQ zLfb!d^?xB5(HK+72ZQn}=21cTd_}x%h~2Q%H#j+Kuievy=fl`vxK~#%YyTPa&E}C4 z`$X%WrIcZ5X4Duu8XDs}GMt25>EOrt_u#1I=wiHpZG!(w4Rv>dF1Q^(g`1T=g3cjA2He0HxxmVl+ z*Hm;s#vPzh7B#$pk|*F^3^#rk*Z~Q1Alu>!PHU3^vYMf0fWroejB|h@h>aIwpn<_U zYpgbK?zaO35Aw1S#mLSJ8Q;5Qs-OjsQ$V{w$EuDHbQv6#fo_iIG~nJf0*-&CJy*v@`Bt!`V zb&V^nKr`opft+IBN)?0C$`9*vzdx>?YkvnwssstcLzay7&bhPkn1B;S2LR2}Wz{5c z&c;Cx#6F(A-%hk=l9&uL>tmf+26#haN!kFWCVjq=`8t|W2p8fzNTn3v8HpFF19DVDx z*yFko+n9lz+2?4#{eeL)DKv@P2llvn*)Fc+yiq)%yzDeU?y~(4*&~wE@8kD^jy)UD zG*)+`7_MnOLVRUXbyrs{_CH3URfBlrQxUc1;JRN+>7_jRE-H)eA5z1p!Ds|wBQYzn zLcQ3pvP9HSFJ^I{xSJQz3YdKt6K@bQxcdo@sOcH3RwQvDXN*s7D{Fkgk*^DADm7_I zSf&7+>Zj|C=vJ(MpI>8%FQpT9T!=i{EE3&rfdI z@8RpXf|^XRw#TA#E*YQbahi;G&1?!S@|q`(`_IZ(JwODYZ~1&f+|OW+5PS>66Shs; zivr6DpKO|Z>w^3rldGE~0M&HC@JZJXSEE!J?);@oVy zq-{S;Dso6KmZK25j(m?{ z4ApmX_M^>Fd*?s;xw;zjyTGdDs=5dh*|v5gC&z0z#Rg`=B|@J3Fxw2Q+MpCd%Oz8x zRhd2P0^5Sg54CW3yHziFS;t>Tf)EngqHuZy>s2*1`%v( zcix8W{p+y{mLOO)!wSgVpx(d!EtxK%OA%put2aAzpSjq|zwfHI+m?n@^7Ju7{{(u2 z(HInL#Kk5O;mLoMkD+t!ODYq;0FBOem1|SyrV@n8dtHv^)Ke*gFHJYFe+~4wO)E2U zur_;RmTtfrvub@|>{Y`EOhaT{4Igjf@{ZNdjgsIBvyXpH)~D*PT<687r`VJVL$3rZ z@7&)Rovtl^wu$$+^&#L^Eb$_6hL|tk{c4TKXZ8#AmJ_SS;5-U3u&&GR@GRF(0uJd^QQRv(l-h*Qp<_w~FDL)O8y zx*jL@U6I@3p?PKmD==u?YEop{@ArdyV({3b_0Vb-PnueVisx(QG%ldbtHO)`^+^l#9qMQ6L&BVkzA>XFlk?_mvi8=aKjnpo-w{j^!|x#i z#wP1HOK9p_x1;H!V9?VkiiW$p`qcX`Y6W_#zs{K5xZ zm-6zM?tMX)c^jsa&IBQCMP|YV)BijcfqzS@&YPTAz5krAilnK>@RZV|Dq7fZtrM6P zS2G7yvZ7BIQy*t?C@z*8GLUGm>Q%HdXuT}a*PUiEL*L1MvybL<8>&T7>Wo4s`wVtM zEA@Xu^HsRK$Mw&IH+5=NMWh681DwbPG$%eHw;c6}hUTLX@w$1F0im-a!FM)FO{L8j z?eo$br_2|*VR5}A@VAO}{*UlbRj@T!SL-&&RY(xObUHUlzO;2XaObPmaR+%} zZ)%5-Qf5BL=`1k+lZSCiqGl&BD`qtnX)D0yJ zDVodc8Fr~8MSOm~q`05Go*YyM=Kbaw+b(~Ym)YRg?ROqD*dg($g?P?Et{0_s_Q3=M zYYOr_wX#|+U=lP6-!3QuR^3wj*pofV*hF;C&|hEaaHzn$s;J}8;JdH8I{}T3oG?3z zX=@Tr2TL|XRlQyuZp|cpnFBDwMT?8aQmgbwvu@ql&qNAtJG{IJt34^1(0&VG5@VJ? zjgv%+kPFI#=Q&0vS{BRf3%8^?I11~R&%slH<@b#_Ds_v2+omSDbpOwKNBHe9kygzS zw7R+{3(!ab1eE=rHfcZS|43&M4|AX{N~?^&`rbk4xK?x_m?Z_1m9s1PB4QKHN}k4Q zayb_n#kh|`A9o^c8N+YFDq>n1?V_(9_CC_X(0fR&173pA*cybGDH=5tm6@Q(uz;#@ zXeCM0hfemgv&T9?S+ zU^{XfI~2`~3y($(miTg|Tv)eZG@R8gabFVs7L--v)H`$7FsTv{)9w#0+3oUGaR1Mc zFrcQ=u6dY{*r$=4-sidVq&co|+h1;5YfouTPxVuvZ2Q^Wt;AztD*(-J=_N3{?4aq( zeG%q7x_%~19>L3O;=Po9PTkY006)K&GS<(GpwFPFMdSX*^1$7l@;PgVC}AJd$?N zMqIwzf9MKB(lavJKR7QjBXaY-vGsWVEOp=Xj74NT7m!18LWozI10!RWdJXxWYB=TM zyp*|*VNI0BD+%Y~aNUwF8kN7JRq+LK-jYPC4B-8-V?T%FxM)5cS@zb7VHJzy^D7dq zvdvqgELIW6qa3HV3xWF``mw|`4`yVONZvR)#!^;y4t9ePeI*NO4o-CWgW$|d6ZFS& z)*_ggM(@6~$)@-icd15JWjaIbg zI8UgvP~-B=YbIph{r=&lWz1mB}TJT1?8!c{g*q3C7U@2KuLb6N5~n zn3J%Xus~&x=dy(}w)DfiEl&*gaF#_APy{WCI5lE4?>C-U^NWM+P3O0Z$c~jmI}}^e z_SDP%lry81I*uXA+5%v+g6AatJ&QV7!8%MeTu?4{?&gxviApD0@K-@~h*gen-pnqq zif@H##eIkH!R}wd`fAZ3q=zDX2X}4k%}O)55hGFrNSZ*IKJJ`~F(jm+kUY8{IerVs zyTkyi7ZB5Zrh&^vWMyc9U|>7tEs{6m2TeX-pS4|D@N# zfR6&ju2n`CyAz#qaTV_9Va)XHwfs%|P=9&`2J%mM5&$N96Wt07*_z90lPn_x-2M57oX~u-a z0E{WK^k^7k=Q>)ayO5ga7=i?`>^JFY$@cU@`>d*S16SonkSxcy3FjOf9>P8|GF=(> z@hWvZKK}+C^Xd8{5uvDK>}grF<6yF@gLM`V`&^X?j$wX4u}<~)(BPhl7B40YnK2_l zF(i^vWp4m_5*{EpS~eh!%hJoFmtXi&8H#$8{+i_>s6&$<27d$jPKGT@1w7@QXG?KV z>NB4or`1H*x!TUk`lie2`e>b{t*r+=;n7`PElMYtM1Lr;K8QHh+XsR<4gv9P-2j=RGt#k`# zytPvNl@Js$AF6xxR(_ z6^EI?0hQ+}?uuEiS1%QtC#t;=4oXh0G*bzPU1w*-IE?;7c<;vvi3u~ zLAE%*u*lBwvSHPizOfWahI;Ie*R_tMiuOFWu_KsBnIu!X*H@%@<9po@xv$kxW3txD z>gfn#rgL}!C0rtmbt;_-n-IX8)qZ~Ga~z(k)267O%iBpF1*(xC8E=!0d-x$5d2hy7 z_%aU$R@k!QQ|q`VsLw7>0Q^7{-2gSYz{DT4uR?IW0%vsTE+|U)W!m1`37v|Ky&;gH zGj%zV0A--Sz=T$Y0p?{C|cma%O(%R*i7CN%|H}T5kq)PS~9BexRi2- zJM?)^7(AB0=u-oOr^2_n(Dz`M+{P;l?b9yc<+Jv6er0cMKeSY-HW}KXXs9bCF)?RC z6bksEaxSj5OGiAzb8O|abXQ92L!gx@G*d2VW_cgQ5JfvUb+DkT9Ohwv&kLzX;rg9p z9H@XoYC{88(;L=728$8G&RlC)&DI`kSRMXp%Kpld(xO)EPXC0z*#@ z^pFhVi?~4?*KXH3s(^LU6X92rJ)gdOBz#FQKwwyOhfhPV6EKzB)n&x$)G9D zvH{|4h*`p!@k7IvGZD)lIV-|uMpzU#eZ_F~3jf6bta+s%{DiQ<6Sp!|v&y+twi38H z5YGEmkL^{m(pV!Gss>PCx4%d9tbk-^ZZ3enV7(;4CJ1C~9x3yHlE(dxDv=F3ivhxu zYURQtktUklwwW;tz+401mc`it9fJLkQrPk$oHT#uEAeHIMi~zjxf9n$l2ZcO6}N%( z+@O0tAyv7O#gqqd8W0V4#O_-6_x>i zE!YHPlzfHxN2A)2lgF0(3p3|A!q8*FW#%zqK@E4nt*3x-N5};Iq7IQajM3Q$iExW9 zmNX{n&Ngj=cq`5HvqOFym9v%)zTS#?v72xJIXBz;k=}Hvc|W1WKX!yN65|uQtkzAz zQskeumZ5=>5?ruZeOo^gx6_3nbLy0ZO&@t2(B=Xv&Ltvc(CC8|zlP(o&|Z#q##ge+ zNQ|-=khD4VB8YwriUM`}($VO>h z>-G{_m}$d=Jd)-_2uzRd0#3e{ox1~}z3U)DSq<-FlugF%R>t5E zKfnnT%Vq&lArQBWD+8ZT*0s91j^=0C0o^eE>{#RH{68ngP+x%L>bY!D#hae&iUDc% z7bO=9xnIV4DH_p~a-PcEADERnjeT_<5Vi*aWUNH0%e>Qn!|kTC zfY3}kUtuGy{`4mi+kX(qi9hodsYcueHM^$nQVwlcYtqY^Xe-~maq%ctA`Yu#svNuu zWXVMuS7NcROdJ|$|BF;nilOq`9(>aW7*tS4E*@!vP(y&I^JkM8_nf>nvW30S~=i9bF#DlW)(LG+J-Cja@5c4s27 z(l?}mcECIc+&;hBu=zj_*39%+ss2?^U|=d%)S05!&|%**k^VyiuXTR`e;@o#>=u7? zy61lolK8&~**iT7j*EW_0y>3>OQO6K!&V`X%^+q2cED;F*eyM0J=NKuyt$p;V=v#o^P(@QDWiKn50;ffqir_Vrp@?gY-mn$Ly76($_gJs^{>4F2P!!;x zep)Fp<%4G) ztM6akjZd`Kb;k8Z!Y9i#-U_Vfn0$H>dxXkS-yi{=7=WyRgQ*5M`ElxXV7;y}oI#H& ziK~WIY5vUqLyJz3+$gv3Zumzfe7TS&U|R5Zhg~J z(HKmhU*o2<9hABO@;jioz(_mNq6QT2kgR1T--J0iCxUpw(Po|MEL&@?u^aqn<*9U< zCb!8Gr=nU7I`SE^JNUe^|5NJ@S)HwRHW4>fQ|F)d0F>o>5A|PDpSwQT!x9NA^r_S@ zTy!Z5t#Awhh?cUr$rK~yg&f;~>~%{Q6X88Q&P3A5(M9I*#`pfV*|;za5GcGeA+j~K z)jPle00L?IWwTA@x-%xsjlb1H-dQ{BFr#5g0Vn=7YBn(sYi((LZ1?&2&2MljPVQSz zyG6OC7GIKi2iz}fhdy6{y=wJ!tR3RVfKrrYdJ{K?q*7nPFKf+gH;>P$3p~jp&7#oyL>e0R%PB8WQ$v|-Jo4^i4)2yM^_;KWWFAx3n zn$5Cg{djx+Hy~koOl)d&Ro0gEe)y25@$|KZjl|d=b!23`<5lijA@8j?zO!&>bGWvr z{w=`tC2uV^O5{gz`;KJor4=6|X=)UJgkC*PXF#XgD0af^SmzO>P0w&sBK(I^lP075 zRvS=l9#qIF-7LG%+#%+SVSg7J>(l2{hhUpcQWZ8|GjY0oqengESwMbc8KmgC1N>*- zd_1IK(O%;6x83&#`E;FX7b*5!Jg^VBx$i%S)z+pZ)=b()57ok0AJ@xtrK+><`rB&j z%7#9|9Zw@$_IT)#!TfhIi+yr%GR#9H{4g^JMinxxxc1HYUJegf)>v!NT83x+kQBNZ z{?7fYqMs;D7bl1h`gU*~hOq5=rGJRAsG5c?tQajRY9Q^A*ltDqte=acRhqZ;6wvrR z69dIs&&JylA%KJ4&Ts33XNz=Qf%Ox$mcFKo2IQ*@3&oRJ;rpgt^+;5mmju$aJ+I#@ zUb?qa43I7bSxF#5*b!1aB>}54$A~UEWbyrZXP%J6o+8Y;$z8)VEM^+ashrLtqZDl{ zfdIUcevr!paP35N2^Vbk!4km|SF0lVPd>rR;nnk)Wu6zp=(8O{#!n#u}L$MK55N)n76W=iev-O~8yNLK5 zuWB{L?>Y4GWYcC5e6?cFZ_YwLo#t`3{JvGgRIYU^p4Tn;@g#enG_uM5t{hS~+)Iw~ zAn>Gn(l}iSE-8h%We3`UEN=TljGtHOc-h%w8e-D<$E%|)BOFG}gb}HluyM8HgjXl~ z{t?%N6Oxz$A0l9O=Wm7!5+hnI=V)b2jvlyc%T}lI zAEYqX)~*5j`x?_Wus{4R$In^9q#N0PHf%@NBJ)UzjVtg^?p~_!Q+aG_><>a~~EC!6Q}VK>UV>b;*%iLNM-WEThSpH(j{ z<(BFHS52#5a8D=mSfg)1%Y^ECGI%I+*T;dec=m7R=EcGgY^IB7c|kCh16py&3ekR1 zyE@o;2G0@H?*MA0$<2i2$}HC;D=Mi1U#L?zP@EZH?e^2d%nZ^YIN8K*BvLkY2etPq zTgg9*cZuiIOhgph^Z825DsJM%Rr`1hf$+`XPNTN?BDAmKr+W9&gVub2>KGIKB{%&> z4a2KHnZ(h2@Buhx(i9)m7EqQ^v%8;f1!IJ>Is5c81}&clq{YA^EAR#kp5<#id+xly z9d9a5Y}HPuiEU%;PiAc%Kb;ygUYFw|XFw}M=-<#vT*$(+-4`c)9R`b!c+dt5cCT!j z;ghzHXnC?onpQf@#@5+*-X*=tLCdEBx+%Xsr;NaXIB%Yhu6(P4luq?GP=pV>bmQ3K z%`GCkvg)s;0_uTm>gm4;>63;z|es^P| zQAJ6^gx^K+{E67IHYvdeY_?lP$wjJvF)>ekg`t_XxgU{z@syN>`QFHz2?a`E=rpkK zmT}SLYLVZ_K60IBMZZU20fxc+$X^xs)^&BZmVe!p{tLACHup}MJW|4g)*>$98$sFe z{VvYJZkN9hdHxVcm?CxUFp~btgLawfUr}XkM_s?RM||-6{{I7hduJ#78Ui#y$M5(} zOK#i2KX$}Xkh9`CwL=PPYe3W*@Q(3-I`a4H{n{gl-$p2k?a$grKO3#N+2VJfzITrJ l&A-1QnlR$mzxEg#;jd(@s^DwYBKq*cS<~O~zuma^{{T^`*pUDL literal 0 HcmV?d00001 From 2dd40b62efbca2054b75b41ba0d660c540c4eff8 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 8 May 2025 06:29:46 +0000 Subject: [PATCH 28/57] add vocalnet en data --- .../local/compute_whisper_fbank.py | 115 +++++++++++++++++- .../local/vocalnet_lhotse_cutset.py | 99 +++++++++++++++ egs/speech_llm/SPEECH2SPEECH/prepare.sh | 53 ++++++++ 3 files changed, 262 insertions(+), 5 deletions(-) create mode 100644 egs/speech_llm/SPEECH2SPEECH/local/vocalnet_lhotse_cutset.py diff --git a/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py b/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py index f67324ba3..58d7cf3d6 100755 --- a/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py +++ b/egs/speech_llm/SPEECH2SPEECH/local/compute_whisper_fbank.py @@ -35,6 +35,7 @@ from pathlib import Path import torch from datasets import load_dataset from lhotse import CutSet, LilcomChunkyWriter, WhisperFbank, WhisperFbankConfig +from vocalnet_lhotse_cutset import LazyCustomDatasetIterator from icefall.utils import str2bool @@ -105,9 +106,50 @@ def get_parser(): default="belle", help="""The dataset prefix to use when saving the features""", ) + parser.add_argument( + "--json-file-path", + type=str, + default=None, + help="The path to the json file containing the vocalnet data", + ) + parser.add_argument( + "--drop-recordings", + type=str2bool, + default=True, + help="Drop recordings. Default: False.", + ) + parser.add_argument( + "--subset", + type=str, + default=None, + help="The subset to use from the Huggingface dataset", + ) + parser.add_argument( + "--split", + type=str, + default="train", + help="The split to use from the Huggingface dataset", + ) return parser +def remove_short_and_long_utt(c): + # Keep only utterances with duration between 1 second and 20 seconds + # + # Caution: There is a reason to select 20.0 here. Please see + # ../local/display_manifest_statistics.py + # + # You should use ../local/display_manifest_statistics.py to get + # an utterance duration distribution for your dataset to select + # the threshold + if c.duration < 1.0 or c.duration > 50.0: + # logging.warning( + # f"Exclude cut with ID {c.id} from training. Duration: {c.duration}" + # ) + return False + return True + + def compute_fbank(args): in_out_dir = Path(args.out_dir) in_out_dir.mkdir(parents=True, exist_ok=True) @@ -130,11 +172,14 @@ def compute_fbank(args): logging.info(f"device: {device}") dataset = load_dataset( - args.huggingface_dataset_path_or_name, streaming=True, split="train" + args.huggingface_dataset_path_or_name, + args.subset, + streaming=True, + split=args.split, ) num_shards = dataset.num_shards num_digits = 5 - for i in range(num_shards): + for i in range(252, num_shards): shard = dataset.shard(num_shards, i) # shard = shard.take(10) # for testing logging.info( @@ -147,6 +192,64 @@ def compute_fbank(args): shard, audio_key=args.audio_key, text_key=args.text_key ) + cut_set = cut_set.filter(remove_short_and_long_utt) + if args.resample_to_16kHz: + cut_set = cut_set.resample(16000) + if args.speed_perturb: + cut_set = cut_set + cut_set.perturb_speed(0.9) + cut_set.perturb_speed(1.1) + + logging.info("Computing features") + cut_set = cut_set.compute_and_store_features_batch( + extractor=extractor, + storage_path=f"{in_out_dir}/feats_{idx}_{args.subset}", + num_workers=num_workers, + batch_duration=batch_duration, + storage_type=LilcomChunkyWriter, + overwrite=True, + ) + # cut_set = cut_set.trim_to_supervisions( + # keep_overlapping=False, min_duration=None + # ) + cuts_path = f"{in_out_dir}/cuts_{args.prefix}.{idx}.{args.subset}.jsonl.gz" + logging.info(f"Saving to {cuts_path}") + # see https://github.com/lhotse-speech/lhotse/issues/1125 + if args.drop_recordings: + cut_set.drop_recordings().to_file(cuts_path) + else: + cut_set.to_file(cuts_path) + + +def compute_fbank_vocalnet(args): + in_out_dir = Path(args.out_dir) + in_out_dir.mkdir(parents=True, exist_ok=True) + # number of workers in dataloader + num_workers = 4 + + # number of seconds in a batch + batch_duration = 10 + + device = torch.device("cpu") + if torch.cuda.is_available(): + device = torch.device("cuda", 0) + if args.whisper_fbank: + extractor = WhisperFbank( + WhisperFbankConfig(num_filters=args.num_mel_bins, device=device) + ) + else: + raise NotImplementedError("Only WhisperFbank is implemented.") + + logging.info(f"device: {device}") + + num_shards = 50 + num_digits = 5 + for i in range(num_shards): + logging.info(f"Processing shard {i}") + idx = f"{i}".zfill(num_digits) + cut_set = CutSet( + LazyCustomDatasetIterator( + json_file_path=args.json_file_path, shard_id=i, num_shards=num_shards + ) + ) cut_set = cut_set.trim_to_supervisions( keep_overlapping=False, min_duration=None ) @@ -168,7 +271,7 @@ def compute_fbank(args): cuts_path = f"{in_out_dir}/cuts_{args.prefix}.{idx}.jsonl.gz" logging.info(f"Saving to {cuts_path}") # see https://github.com/lhotse-speech/lhotse/issues/1125 - cut_set.drop_recordings().to_file(cuts_path) + cut_set.to_file(cuts_path) def main(): @@ -178,8 +281,10 @@ def main(): parser = get_parser() args = parser.parse_args() logging.info(vars(args)) - - compute_fbank(args) + if args.json_file_path is not None: + compute_fbank_vocalnet(args) + else: + compute_fbank(args) if __name__ == "__main__": diff --git a/egs/speech_llm/SPEECH2SPEECH/local/vocalnet_lhotse_cutset.py b/egs/speech_llm/SPEECH2SPEECH/local/vocalnet_lhotse_cutset.py new file mode 100644 index 000000000..f7519fbfe --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/local/vocalnet_lhotse_cutset.py @@ -0,0 +1,99 @@ +# https://huggingface.co/datasets/VocalNet/UltraChat-vocalnet/blob/main/UltraChat.json +# https://huggingface.co/datasets/VocalNet/VoiceAssistant-430K-vocalnet/blob/main/VoiceAssistant-430K.json +import json +import os + +import numpy as np +from lhotse import CutSet +from lhotse.audio import Recording +from lhotse.supervision import SupervisionSegment + + +class LazyCustomDatasetIterator: + """ + Thin wrapper on top of HF datasets objects that allows to interact with them through a Lhotse CutSet. + It can be initialized with an existing HF dataset, or args/kwargs passed on to ``datasets.load_dataset()``. + Use ``audio_key``, ``text_key``, ``lang_key`` and ``gender_key`` options to indicate which keys in dict examples + returned from HF Dataset should be looked up for audio, transcript, language, and gender respectively. + The remaining keys in HF dataset examples will be stored inside ``cut.custom`` dictionary. + Example with existing HF dataset:: + >>> import datasets + ... dataset = datasets.load_dataset("mozilla-foundation/common_voice_11_0", "hi", split="test") + ... dataset = dataset.map(some_transform) + ... cuts_it = LazyHFDatasetIterator(dataset) + ... for cut in cuts_it: + ... pass + Example providing HF dataset init args/kwargs:: + >>> import datasets + ... cuts_it = LazyHFDatasetIterator("mozilla-foundation/common_voice_11_0", "hi", split="test") + ... for cut in cuts_it: + ... pass + """ + + def __init__(self, json_file_path: str, shard_id: int = 0, num_shards: int = 100): + self.json_file_path = json_file_path + self.shard_id = shard_id + self.num_shards = num_shards + + def __iter__(self): + + with open(self.json_file_path, "r", encoding="utf-8") as f: + list_data_dict = json.load(f) + list_data_dict = list_data_dict[self.shard_id :: self.num_shards] + for item in list_data_dict: + custom_data = item.copy() + json_file_parent_of_parent_dir = os.path.dirname( + os.path.dirname(self.json_file_path) + ) + units_path = os.path.join( + json_file_parent_of_parent_dir, custom_data["units"] + ) + speech_token_dict = np.load(units_path, allow_pickle=True).item() + speech_token = speech_token_dict["speech_token"].squeeze(0).tolist() + speech_token_len = speech_token_dict["speech_token_len"] + + assert len(speech_token) == speech_token_len + custom_data["speech_token"] = speech_token + audio_path = custom_data.pop("speech", None) + audio_path = os.path.join(json_file_parent_of_parent_dir, audio_path) + item_id = item.get("id") + recording = Recording.from_file(path=audio_path, recording_id=item_id) + + conversations = item.get("conversations") + assert isinstance(conversations, list) and len(conversations) == 2 + for conv in conversations: + if isinstance(conv, dict) and conv.get("from") == "gpt": + gpt_text = conv.get("value") + break + assert gpt_text is not None + + supervision = SupervisionSegment( + id=item_id, + recording_id=recording.id, + start=0.0, # Assuming the supervision covers the entire recording + duration=recording.duration, + text=gpt_text, + ) + + cut = recording.to_cut() + # cut.id will be the same as recording.id + + cut.supervisions = [supervision] + # custom_data contains the original item's fields, minus "speech". + # So, "id", "conversations", "units", etc., are preserved here. + custom_data.pop("conversations") + custom_data.pop("units") + cut.custom = custom_data + + yield cut + + +if __name__ == "__main__": + json_file_path = ( + "/workspace/slam/VoiceAssistant-430K-vocalnet/VoiceAssistant-430K.json" + ) + cut_set = CutSet(LazyCustomDatasetIterator(json_file_path=json_file_path)) + + for cut in cut_set: + print(cut) + input() diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index 42c9b4eaa..cff7a45fa 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -120,3 +120,56 @@ if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then --asr-model-dir local/sherpa-onnx-paraformer-zh-2023-09-14 \ --use-lora True --token2wav-path /workspace/CosyVoice-300M-SFT --share fi + +if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then + log "stage 1: Compute fbank feature from huggingface" + # CUDA_VISIBLE_DEVICES=0 python3 local/compute_whisper_fbank.py \ + # --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ + # --out-dir data/fbank_voice_assistant \ + # --huggingface-dataset-path-or-name worstchan/VoiceAssistant-400K-SLAM-Omni \ + # --audio-key question_audio --text-key answer \ + # --prefix voice_assistant + CUDA_VISIBLE_DEVICES=0 python3 local/compute_whisper_fbank.py \ + --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ + --out-dir data/fbank_voice_assistant_cosy2 \ + --json-file-path /workspace/slam/VoiceAssistant-430K-vocalnet/VoiceAssistant-430K.json \ + --prefix voice_assistant +fi + +if [ $stage -le 7 ] && [ $stop_stage -ge 7 ]; then + log "stage 7: Compute fbank feature from huggingface" + # CUDA_VISIBLE_DEVICES=1 python3 local/compute_whisper_fbank.py \ + # --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ + # --out-dir data/fbank_ultrachat \ + # --huggingface-dataset-path-or-name worstchan/UltraChat-300K-SLAM-Omni \ + # --audio-key question_audio --text-key answer \ + # --prefix ultrachat + CUDA_VISIBLE_DEVICES=1 python3 local/compute_whisper_fbank.py \ + --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ + --out-dir data/fbank_ultrachat_cosy2 \ + --json-file-path /workspace/slam/UltraChat-vocalnet/UltraChat.json \ + --prefix ultrachat +fi + +if [ $stage -le 8 ] && [ $stop_stage -ge 8 ]; then + log "stage 8: Compute fbank feature from huggingface" + + CUDA_VISIBLE_DEVICES=1 python3 local/compute_whisper_fbank.py \ + --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ + --out-dir data/fbank_gigaspeech \ + --huggingface-dataset-path-or-name speechcolab/gigaspeech \ + --subset test --split test \ + --audio-key audio --text-key text \ + --prefix gigaspeech +fi + +if [ $stage -le 9 ] && [ $stop_stage -ge 9 ]; then + log "stage 9: Compute fbank feature from huggingface" + CUDA_VISIBLE_DEVICES=0 python3 local/compute_whisper_fbank.py \ + --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb True \ + --out-dir data/fbank_gigaspeech \ + --huggingface-dataset-path-or-name speechcolab/gigaspeech \ + --subset xl --split train \ + --audio-key audio --text-key text \ + --prefix gigaspeech +fi From 7cc366d82d3359d2175f12ce3034ea99587cf23d Mon Sep 17 00:00:00 2001 From: root Date: Thu, 8 May 2025 07:23:22 +0000 Subject: [PATCH 29/57] add en data, cosy2 token for training --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 19 +++ .../SPEECH2SPEECH/qwen_omni/data_module.py | 40 +++++- .../SPEECH2SPEECH/qwen_omni/train.py | 136 ++++++++++++------ 3 files changed, 150 insertions(+), 45 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index cff7a45fa..58465c448 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -173,3 +173,22 @@ if [ $stage -le 9 ] && [ $stop_stage -ge 9 ]; then --audio-key audio --text-key text \ --prefix gigaspeech fi + + +ngpu=2 +exp_dir=./qwen_omni/exp_speech2speech_en +if [ $stage -le 10 ] && [ $stop_stage -ge 10 ]; then + log "stage 10: Training Speech2Speech Model" + torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ + --max-duration 50 \ + --enable-musan False \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/large-v2.pt \ + --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ + --dataset-format vocalnet \ + --manifest-dir data/fbank \ + --deepspeed \ + --deepspeed_config ./qwen_omni/ds_config_zero1.json \ + --use-flash-attn True \ + --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True +fi diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py index dc38f32bd..1a513fe40 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py @@ -411,4 +411,42 @@ class AsrDataModule: @lru_cache() def train_cuts(self) -> CutSet: logging.info("About to get train cuts") - return load_manifest_lazy(self.args.manifest_dir / "cuts_belle_train.jsonl.gz") + slam_omni_zh_cuts = load_manifest_lazy( + self.args.manifest_dir / "cuts_belle_train.jsonl.gz" + ) + return slam_omni_zh_cuts + + @lru_cache() + def train_cuts_en_vocalnet(self) -> CutSet: + logging.info("About to get train cuts") + VoiceAssistant_cuts = load_manifest_lazy( + self.args.manifest_dir / "cuts_voice_assistant_00001-00049.jsonl.gz" + ) + ultrachat_cuts = load_manifest_lazy( + self.args.manifest_dir / "cuts_ultrachat_train.jsonl.gz" + ) + return CutSet.mux( + VoiceAssistant_cuts, + ultrachat_cuts, + weights=[ + len(VoiceAssistant_cuts), + len(ultrachat_cuts), + ], + ) + + # valid cuts_voice_assistant.00000.jsonl.gz + @lru_cache() + def valid_cuts_en_vocalnet(self) -> CutSet: + logging.info("About to get valid cuts") + VoiceAssistant_cuts = load_manifest_lazy( + self.args.manifest_dir / "cuts_voice_assistant.00000.jsonl.gz" + ) + return VoiceAssistant_cuts + + @lru_cache() + def test_cuts_en_vocalnet(self) -> CutSet: + logging.info("About to get test cuts") + VoiceAssistant_cuts = load_manifest_lazy( + self.args.manifest_dir / "cuts_voice_assistant.00000.jsonl.gz" + ) + return VoiceAssistant_cuts diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py index 95ce16d0e..7665a7680 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py @@ -73,10 +73,9 @@ from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward from icefall import diagnostics from icefall.dist import get_rank, get_world_size from icefall.env import get_env_info -from icefall.utils import ( +from icefall.utils import ( # filter_uneven_sized_batch, AttributeDict, MetricsTracker, - filter_uneven_sized_batch, setup_logger, str2bool, ) @@ -222,6 +221,13 @@ def get_parser(): default=False, help="Whether to unfreeze speech adaptor during training.", ) + + parser.add_argument( + "--dataset-format", + type=str, + default="slam_omni", + help="The format of the dataset.", + ) parser = deepspeed.add_config_arguments(parser) add_model_arguments(parser) @@ -271,6 +277,58 @@ def get_params() -> AttributeDict: return params +def process_batch_slam_omni(batch: dict): + answers = batch["supervisions"]["text"] + questions_with_history = [ + cut.custom["question"] for cut in batch["supervisions"]["cut"] + ] + chat_rounds = [cut.custom["round"] for cut in batch["supervisions"]["cut"]] + answer_cosyvoice_speech_token = [ + cut.custom["answer_cosyvoice_speech_token"] + for cut in batch["supervisions"]["cut"] + ] + last_questions = [ + question.split(": ")[-1].strip() for question in questions_with_history + ] + history_contexts = [ + question.rsplit(":", 1)[0].strip() for question in questions_with_history + ] + + messages = [] + for i, total_round in enumerate(chat_rounds): + message = [] + if total_round > 1: + history_question_answer = history_contexts[i].split("USER:") + history_question_answer = [item for item in history_question_answer if item] + for j in range(total_round - 1): + question_answer = history_question_answer[j].split("ASSISTANT:") + message += [ + {"role": "user", "content": question_answer[0].strip()}, + {"role": "assistant", "content": question_answer[1].strip()}, + ] + message += [ + {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, + {"role": "assistant", "content": answers[i]}, + ] + messages.append(message) + return messages, answer_cosyvoice_speech_token + + +def process_batch_vocalnet(batch: dict): + answers = batch["supervisions"]["text"] + answer_cosyvoice_speech_token = [ + cut.custom["speech_token"] for cut in batch["supervisions"]["cut"] + ] + messages = [] + for i in range(len(answers)): + message = [ + {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, + {"role": "assistant", "content": answers[i]}, + ] + messages.append(message) + return messages, answer_cosyvoice_speech_token + + def compute_loss( params: AttributeDict, tokenizer: AutoTokenizer, @@ -350,15 +408,16 @@ def compute_loss( row = mask_indices[0][i] col = mask_indices[1][i] # + 6 to skip: 'assistant', '\n' 151665, 151645, 198, 151644, 77091, 198 + # WAR: TODO FIXME check qwen3 target_ids[row, : col + 6] = IGNORE_TOKEN_ID attention_mask = input_ids.ne(tokenizer.pad_token_id) return input_ids, attention_mask, target_ids - max_frames = params.max_duration * 1000 // params.frame_shift_ms - allowed_max_frames = int(max_frames * (1.0 + params.allowed_excess_duration_ratio)) - batch = filter_uneven_sized_batch(batch, allowed_max_frames) + # max_frames = params.max_duration * 1000 // params.frame_shift_ms + # allowed_max_frames = int(max_frames * (1.0 + params.allowed_excess_duration_ratio)) + # batch = filter_uneven_sized_batch(batch, allowed_max_frames) device = next(model.parameters()).device feature = batch["inputs"] @@ -369,39 +428,13 @@ def compute_loss( batch_idx_train = params.batch_idx_train - answers = batch["supervisions"]["text"] - questions_with_history = [ - cut.custom["question"] for cut in batch["supervisions"]["cut"] - ] - chat_rounds = [cut.custom["round"] for cut in batch["supervisions"]["cut"]] - answer_cosyvoice_speech_token = [ - cut.custom["answer_cosyvoice_speech_token"] - for cut in batch["supervisions"]["cut"] - ] - last_questions = [ - question.split(": ")[-1].strip() for question in questions_with_history - ] - history_contexts = [ - question.rsplit(":", 1)[0].strip() for question in questions_with_history - ] - - messages = [] - for i, total_round in enumerate(chat_rounds): - message = [] - if total_round > 1: - history_question_answer = history_contexts[i].split("USER:") - history_question_answer = [item for item in history_question_answer if item] - for j in range(total_round - 1): - question_answer = history_question_answer[j].split("ASSISTANT:") - message += [ - {"role": "user", "content": question_answer[0].strip()}, - {"role": "assistant", "content": question_answer[1].strip()}, - ] - message += [ - {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, - {"role": "assistant", "content": answers[i]}, - ] - messages.append(message) + # WAR: TODO FIXME merge process_batch_slam_omni and process_batch_vocalnet + if params.dataset_format == "slam_omni": + messages, answer_cosyvoice_speech_token = process_batch_slam_omni(batch) + elif params.dataset_format == "vocalnet": + messages, answer_cosyvoice_speech_token = process_batch_vocalnet(batch) + else: + raise ValueError(f"Unknown dataset format: {params.dataset_format}") input_ids, attention_mask, target_ids = preprocess(messages, tokenizer) @@ -730,8 +763,12 @@ def run(rank, world_size, args): else: attn_implementation = "eager" torch_dtype = torch.float16 - - codec_vocab_size = 4096 + 4 + if params.dataset_format == "slam_omni": + codec_vocab_size = 4096 + 4 + elif params.dataset_format == "vocalnet": + codec_vocab_size = 6561 + 4 + else: + raise ValueError(f"Unknown dataset format: {params.dataset_format}") # TODO: modify above vocab size or supress_tokens when decoding config = Qwen2Config( vocab_size=codec_vocab_size, @@ -802,12 +839,16 @@ def run(rank, world_size, args): # You should use ../local/display_manifest_statistics.py to get # an utterance duration distribution for your dataset to select # the threshold - if c.duration < 1.0 or c.duration > 20.0: + if c.duration < 1.0 or c.duration > 30.0: # logging.warning( # f"Exclude cut with ID {c.id} from training. Duration: {c.duration}" # ) return False - codec_len = len(c.custom["answer_cosyvoice_speech_token"]) + codec_len = ( + len(c.custom["answer_cosyvoice_speech_token"]) + if "answer_cosyvoice_speech_token" in c.custom + else len(c.custom["speech_token"]) + ) if codec_len > 2200: logging.warning( f"Exclude cut with ID {c.id} from training. Duration: {c.duration}, lenth: {codec_len}" @@ -815,9 +856,17 @@ def run(rank, world_size, args): return False return True - train_cuts = data_module.train_cuts() + if params.dataset_format == "slam_omni": + train_cuts = data_module.train_cuts() + valid_cuts = data_module.dev_cuts() + elif params.dataset_format == "vocalnet": + train_cuts = data_module.train_cuts_en_vocalnet() + valid_cuts = data_module.valid_cuts_en_vocalnet() + else: + raise ValueError(f"Unknown dataset format: {params.dataset_format}") train_cuts = train_cuts.filter(remove_short_and_long_utt) + valid_cuts = valid_cuts.filter(remove_short_and_long_utt) sampler_state_dict = None if params.sampler_state_dict_path: @@ -828,7 +877,6 @@ def run(rank, world_size, args): train_cuts, sampler_state_dict=sampler_state_dict ) - valid_cuts = data_module.dev_cuts() valid_dl = data_module.valid_dataloaders(valid_cuts) if args.tensorboard and rank == 0: From e41c1cabd5e29b8b284befbdc1e5a80aff4f8075 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 8 May 2025 07:56:14 +0000 Subject: [PATCH 30/57] add dependency --- .../qwen_omni/requirements-cosyvoice.txt | 23 +++++++++++++++++++ .../SPEECH2SPEECH/qwen_omni/requirements.txt | 12 ++++++++++ 2 files changed, 35 insertions(+) create mode 100644 egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements-cosyvoice.txt create mode 100644 egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements.txt diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements-cosyvoice.txt b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements-cosyvoice.txt new file mode 100644 index 000000000..8962f76e3 --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements-cosyvoice.txt @@ -0,0 +1,23 @@ +conformer==0.3.2 +diffusers==0.29.0 +gdown==5.1.0 +gradio +hydra-core==1.3.2 +HyperPyYAML==1.2.2 +inflect==7.3.1 +librosa==0.10.2 +lightning==2.2.4 +matplotlib==3.7.5 +#modelscope==1.15.0 +networkx==3.1 +omegaconf==2.3.0 +onnx==1.16.0 +onnxruntime-gpu==1.18.0 +protobuf==4.25 +pydantic==2.7.0 +pyworld==0.3.4 +rich==13.7.1 +soundfile==0.12.1 +tensorboard==2.14.0 +wget==3.2 +WeTextProcessing==1.0.3 diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements.txt b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements.txt new file mode 100644 index 000000000..2db53f3ff --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements.txt @@ -0,0 +1,12 @@ +openai-whisper +kaldialign +lhotse +sentencepiece +pypinyin +tensorboard +librosa +deepspeed +transformers>=4.37.0 +flash-attn +peft +torchmetrics From 37db65984c9d2018e1788788884532ccb3eda134 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 8 May 2025 03:02:34 -0700 Subject: [PATCH 31/57] remove k2 dependency --- .../SPEECH2SPEECH/qwen_omni/data_module.py | 2 +- .../SPEECH2SPEECH/qwen_omni/train.py | 10 +- .../SPEECH2SPEECH/qwen_omni/utils.py | 224 ++++++++++++++++++ 3 files changed, 230 insertions(+), 6 deletions(-) create mode 100644 egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py index 1a513fe40..7bd0a174a 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py @@ -48,7 +48,7 @@ from lhotse.utils import fix_random_seed from speech_dataset import K2SpeechRecognitionDataset from torch.utils.data import DataLoader -from icefall.utils import str2bool +from utils import str2bool class _SeedWorkers: diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py index 7665a7680..0b2642bf0 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py @@ -70,10 +70,10 @@ from transformers import ( ) from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward -from icefall import diagnostics -from icefall.dist import get_rank, get_world_size -from icefall.env import get_env_info -from icefall.utils import ( # filter_uneven_sized_batch, +# from icefall import diagnostics +from utils import get_rank, get_world_size +# from icefall.env import get_env_info +from utils import ( # filter_uneven_sized_batch, AttributeDict, MetricsTracker, setup_logger, @@ -270,7 +270,7 @@ def get_params() -> AttributeDict: "log_interval": 50, "reset_interval": 200, "valid_interval": 5000, - "env_info": get_env_info(), + # "env_info": get_env_info(), } ) diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py new file mode 100644 index 000000000..fe65a8042 --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py @@ -0,0 +1,224 @@ +import argparse +import collections +import json +import logging +import os +import pathlib +import random +import re +import subprocess +from collections import defaultdict +# from contextlib import contextmanager +from dataclasses import dataclass +from datetime import datetime +from pathlib import Path +# from shutil import copyfile +from typing import Dict, Iterable, List, Optional, TextIO, Tuple, Union + +import torch +import torch.distributed as dist +from torch.utils.tensorboard import SummaryWriter + +Pathlike = Union[str, Path] + +def get_world_size(): + if "WORLD_SIZE" in os.environ: + return int(os.environ["WORLD_SIZE"]) + if dist.is_available() and dist.is_initialized(): + return dist.get_world_size() + else: + return 1 + + +def get_rank(): + if "RANK" in os.environ: + return int(os.environ["RANK"]) + elif dist.is_available() and dist.is_initialized(): + return dist.get_rank() + else: + return 0 + +def str2bool(v): + """Used in argparse.ArgumentParser.add_argument to indicate + that a type is a bool type and user can enter + + - yes, true, t, y, 1, to represent True + - no, false, f, n, 0, to represent False + + See https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse # noqa + """ + if isinstance(v, bool): + return v + if v.lower() in ("yes", "true", "t", "y", "1"): + return True + elif v.lower() in ("no", "false", "f", "n", "0"): + return False + else: + raise argparse.ArgumentTypeError("Boolean value expected.") + +class AttributeDict(dict): + def __getattr__(self, key): + if key in self: + return self[key] + raise AttributeError(f"No such attribute '{key}'") + + def __setattr__(self, key, value): + self[key] = value + + def __delattr__(self, key): + if key in self: + del self[key] + return + raise AttributeError(f"No such attribute '{key}'") + + def __str__(self, indent: int = 2): + tmp = {} + for k, v in self.items(): + # PosixPath is ont JSON serializable + if isinstance(v, pathlib.Path) or isinstance(v, torch.device): + v = str(v) + tmp[k] = v + return json.dumps(tmp, indent=indent, sort_keys=True) + +def setup_logger( + log_filename: Pathlike, + log_level: str = "info", + use_console: bool = True, +) -> None: + """Setup log level. + + Args: + log_filename: + The filename to save the log. + log_level: + The log level to use, e.g., "debug", "info", "warning", "error", + "critical" + use_console: + True to also print logs to console. + """ + now = datetime.now() + date_time = now.strftime("%Y-%m-%d-%H-%M-%S") + if dist.is_available() and dist.is_initialized(): + world_size = dist.get_world_size() + rank = dist.get_rank() + formatter = f"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] ({rank}/{world_size}) %(message)s" # noqa + log_filename = f"{log_filename}-{date_time}-{rank}" + else: + formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" + log_filename = f"{log_filename}-{date_time}" + + os.makedirs(os.path.dirname(log_filename), exist_ok=True) + + level = logging.ERROR + if log_level == "debug": + level = logging.DEBUG + elif log_level == "info": + level = logging.INFO + elif log_level == "warning": + level = logging.WARNING + elif log_level == "critical": + level = logging.CRITICAL + + logging.basicConfig( + filename=log_filename, + format=formatter, + level=level, + filemode="w", + force=True, + ) + if use_console: + console = logging.StreamHandler() + console.setLevel(level) + console.setFormatter(logging.Formatter(formatter)) + logging.getLogger("").addHandler(console) + +class MetricsTracker(collections.defaultdict): + def __init__(self): + # Passing the type 'int' to the base-class constructor + # makes undefined items default to int() which is zero. + # This class will play a role as metrics tracker. + # It can record many metrics, including but not limited to loss. + super(MetricsTracker, self).__init__(int) + + def __add__(self, other: "MetricsTracker") -> "MetricsTracker": + ans = MetricsTracker() + for k, v in self.items(): + ans[k] = v + for k, v in other.items(): + if v - v == 0: + ans[k] = ans[k] + v + return ans + + def __mul__(self, alpha: float) -> "MetricsTracker": + ans = MetricsTracker() + for k, v in self.items(): + ans[k] = v * alpha + return ans + + def __str__(self) -> str: + ans_frames = "" + ans_utterances = "" + for k, v in self.norm_items(): + norm_value = "%.4g" % v + if "utt_" not in k: + ans_frames += str(k) + "=" + str(norm_value) + ", " + else: + ans_utterances += str(k) + "=" + str(norm_value) + if k == "utt_duration": + ans_utterances += " frames, " + elif k == "utt_pad_proportion": + ans_utterances += ", " + else: + raise ValueError(f"Unexpected key: {k}") + frames = "%.2f" % self["frames"] + ans_frames += "over " + str(frames) + " frames. " + if ans_utterances != "": + utterances = "%.2f" % self["utterances"] + ans_utterances += "over " + str(utterances) + " utterances." + + return ans_frames + ans_utterances + + def norm_items(self) -> List[Tuple[str, float]]: + """ + Returns a list of pairs, like: + [('ctc_loss', 0.1), ('att_loss', 0.07)] + """ + num_frames = self["frames"] if "frames" in self else 1 + num_utterances = self["utterances"] if "utterances" in self else 1 + ans = [] + for k, v in self.items(): + if k == "frames" or k == "utterances": + continue + norm_value = ( + float(v) / num_frames if "utt_" not in k else float(v) / num_utterances + ) + ans.append((k, norm_value)) + return ans + + def reduce(self, device): + """ + Reduce using torch.distributed, which I believe ensures that + all processes get the total. + """ + keys = sorted(self.keys()) + s = torch.tensor([float(self[k]) for k in keys], device=device) + dist.all_reduce(s, op=dist.ReduceOp.SUM) + for k, v in zip(keys, s.cpu().tolist()): + self[k] = v + + def write_summary( + self, + tb_writer: SummaryWriter, + prefix: str, + batch_idx: int, + ) -> None: + """Add logging information to a TensorBoard writer. + + Args: + tb_writer: a TensorBoard writer + prefix: a prefix for the name of the loss, e.g. "train/valid_", + or "train/current_" + batch_idx: The current batch index, used as the x-axis of the plot. + """ + for k, v in self.norm_items(): + tb_writer.add_scalar(prefix + k, v, batch_idx) \ No newline at end of file From bd2df570ad4b993ea100f381867b98a6dd620f70 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 8 May 2025 03:37:26 -0700 Subject: [PATCH 32/57] add debug script --- .../SPEECH2SPEECH/debug/data_module.py | 480 +++++++++ egs/speech_llm/SPEECH2SPEECH/debug/model.py | 795 ++++++++++++++ egs/speech_llm/SPEECH2SPEECH/debug/prepare.sh | 195 ++++ egs/speech_llm/SPEECH2SPEECH/debug/train.py | 977 ++++++++++++++++++ 4 files changed, 2447 insertions(+) create mode 100644 egs/speech_llm/SPEECH2SPEECH/debug/data_module.py create mode 100644 egs/speech_llm/SPEECH2SPEECH/debug/model.py create mode 100644 egs/speech_llm/SPEECH2SPEECH/debug/prepare.sh create mode 100755 egs/speech_llm/SPEECH2SPEECH/debug/train.py diff --git a/egs/speech_llm/SPEECH2SPEECH/debug/data_module.py b/egs/speech_llm/SPEECH2SPEECH/debug/data_module.py new file mode 100644 index 000000000..5a7c04b6d --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/debug/data_module.py @@ -0,0 +1,480 @@ +# Copyright 2021 Piotr Żelasko +# Copyright 2022 Xiaomi Corporation (Author: Mingshuang Luo) +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import inspect +import logging +from functools import lru_cache +from pathlib import Path +from typing import Any, Dict, Optional + +import torch +from datasets import load_dataset +from lhotse import ( + CutSet, + WhisperFbank, + WhisperFbankConfig, + load_manifest, + load_manifest_lazy, +) +from lhotse.dataset import ( # noqa F401 for PrecomputedFeatures + CutConcatenate, + CutMix, + DynamicBucketingSampler, + PrecomputedFeatures, + SimpleCutSampler, + SpecAugment, +) +from lhotse.dataset.input_strategies import ( # noqa F401 For AudioSamples + AudioSamples, + OnTheFlyFeatures, +) +from lhotse.utils import fix_random_seed +from speech_dataset import K2SpeechRecognitionDataset +from torch.utils.data import DataLoader + +from utils import str2bool + + +class _SeedWorkers: + def __init__(self, seed: int): + self.seed = seed + + def __call__(self, worker_id: int): + fix_random_seed(self.seed + worker_id) + + +class AsrDataModule: + """ + DataModule for k2 ASR experiments. + It assumes there is always one train and valid dataloader, + but there can be multiple test dataloaders (e.g. LibriSpeech test-clean + and test-other). + + It contains all the common data pipeline modules used in ASR + experiments, e.g.: + - dynamic batch size, + - bucketing samplers, + - cut concatenation, + - augmentation, + - on-the-fly feature extraction + + This class should be derived for specific corpora used in ASR tasks. + """ + + def __init__(self, args: argparse.Namespace): + self.args = args + + @classmethod + def add_arguments(cls, parser: argparse.ArgumentParser): + group = parser.add_argument_group( + title="ASR data related options", + description="These options are used for the preparation of " + "PyTorch DataLoaders from Lhotse CutSet's -- they control the " + "effective batch sizes, sampling strategies, applied data " + "augmentations, etc.", + ) + group.add_argument( + "--manifest-dir", + type=Path, + default=Path("data/fbank"), + help="Path to directory with train/valid/test cuts.", + ) + group.add_argument( + "--max-duration", + type=int, + default=300.0, + help="Maximum pooled recordings duration (seconds) in a " + "single batch. You can reduce it if it causes CUDA OOM.", + ) + group.add_argument( + "--bucketing-sampler", + type=str2bool, + default=True, + help="When enabled, the batches will come from buckets of " + "similar duration (saves padding frames).", + ) + group.add_argument( + "--num-buckets", + type=int, + default=30, + help="The number of buckets for the DynamicBucketingSampler" + "(you might want to increase it for larger datasets).", + ) + group.add_argument( + "--on-the-fly-feats", + type=str2bool, + default=False, + help="When enabled, use on-the-fly cut mixing and feature " + "extraction. Will drop existing precomputed feature manifests " + "if available.", + ) + group.add_argument( + "--shuffle", + type=str2bool, + default=True, + help="When enabled (=default), the examples will be " + "shuffled for each epoch.", + ) + group.add_argument( + "--drop-last", + type=str2bool, + default=True, + help="Whether to drop last batch. Used by sampler.", + ) + group.add_argument( + "--return-cuts", + type=str2bool, + default=True, + help="When enabled, each batch will have the " + "field: batch['supervisions']['cut'] with the cuts that " + "were used to construct it.", + ) + + group.add_argument( + "--num-workers", + type=int, + default=2, + help="The number of training dataloader workers that " + "collect the batches.", + ) + + group.add_argument( + "--enable-spec-aug", + type=str2bool, + default=True, + help="When enabled, use SpecAugment for training dataset.", + ) + + group.add_argument( + "--spec-aug-time-warp-factor", + type=int, + default=80, + help="Used only when --enable-spec-aug is True. " + "It specifies the factor for time warping in SpecAugment. " + "Larger values mean more warping. " + "A value less than 1 means to disable time warp.", + ) + + group.add_argument( + "--enable-musan", + type=str2bool, + default=True, + help="When enabled, select noise from MUSAN and mix it" + "with training dataset. ", + ) + + group.add_argument( + "--input-strategy", + type=str, + default="PrecomputedFeatures", + help="AudioSamples or PrecomputedFeatures", + ) + + group.add_argument( + "--huggingface-dataset-path-or-name", + type=str, + default="/workspace/Belle_1.4M-SLAM-Omni", + help="The path or name of the Huggingface dataset", + ) + group.add_argument( + "--audio-key", + type=str, + default="question_audio", + help="The key in the Huggingface dataset containing the audio data", + ) + group.add_argument( + "--text-key", + type=str, + default="answer", + help="The key in the Huggingface dataset containing the text data", + ) + group.add_argument( + "--resample-to-16kHz", + type=str2bool, + default=True, + help="Resample audio to 16kHz. Default: False.", + ) + + def train_dataloaders( + self, + cuts_train: CutSet, + sampler_state_dict: Optional[Dict[str, Any]] = None, + ) -> DataLoader: + """ + Args: + cuts_train: + CutSet for training. + sampler_state_dict: + The state dict for the training sampler. + """ + transforms = [] + if self.args.enable_musan: + logging.info("Enable MUSAN") + logging.info("About to get Musan cuts") + cuts_musan = load_manifest(self.args.manifest_dir / "musan_cuts.jsonl.gz") + transforms.append( + CutMix(cuts=cuts_musan, p=0.5, snr=(10, 20), preserve_id=True) + ) + else: + logging.info("Disable MUSAN") + + input_transforms = [] + if self.args.enable_spec_aug: + logging.info("Enable SpecAugment") + logging.info(f"Time warp factor: {self.args.spec_aug_time_warp_factor}") + # Set the value of num_frame_masks according to Lhotse's version. + # In different Lhotse's versions, the default of num_frame_masks is + # different. + num_frame_masks = 10 + num_frame_masks_parameter = inspect.signature( + SpecAugment.__init__ + ).parameters["num_frame_masks"] + if num_frame_masks_parameter.default == 1: + num_frame_masks = 2 + logging.info(f"Num frame mask: {num_frame_masks}") + input_transforms.append( + SpecAugment( + time_warp_factor=self.args.spec_aug_time_warp_factor, + num_frame_masks=num_frame_masks, + features_mask_size=27, + num_feature_masks=2, + frames_mask_size=100, + ) + ) + else: + logging.info("Disable SpecAugment") + + logging.info("About to create train dataset") + train = K2SpeechRecognitionDataset( + input_strategy=eval(self.args.input_strategy)(), + cut_transforms=transforms, + input_transforms=input_transforms, + return_cuts=self.args.return_cuts, + ) + + if self.args.on_the_fly_feats: + # NOTE: the PerturbSpeed transform should be added only if we + # remove it from data prep stage. + # Add on-the-fly speed perturbation; since originally it would + # have increased epoch size by 3, we will apply prob 2/3 and use + # 3x more epochs. + # Speed perturbation probably should come first before + # concatenation, but in principle the transforms order doesn't have + # to be strict (e.g. could be randomized) + # transforms = [PerturbSpeed(factors=[0.9, 1.1], p=2/3)] + transforms # noqa + # Drop feats to be on the safe side. + train = K2SpeechRecognitionDataset( + cut_transforms=transforms, + input_strategy=OnTheFlyFeatures( + WhisperFbank(WhisperFbankConfig(num_filters=80, device="cuda")) + ), + input_transforms=input_transforms, + return_cuts=self.args.return_cuts, + ) + + if self.args.bucketing_sampler: + logging.info("Using DynamicBucketingSampler.") + train_sampler = DynamicBucketingSampler( + cuts_train, + max_duration=self.args.max_duration, + shuffle=self.args.shuffle, + num_buckets=self.args.num_buckets, + buffer_size=self.args.num_buckets * 2000, + shuffle_buffer_size=self.args.num_buckets * 5000, + drop_last=self.args.drop_last, + ) + else: + logging.info("Using SimpleCutSampler.") + train_sampler = SimpleCutSampler( + cuts_train, + max_duration=self.args.max_duration, + shuffle=self.args.shuffle, + ) + logging.info("About to create train dataloader") + + if sampler_state_dict is not None: + logging.info("Loading sampler state dict") + train_sampler.load_state_dict(sampler_state_dict) + + # 'seed' is derived from the current random state, which will have + # previously been set in the main process. + seed = torch.randint(0, 100000, ()).item() + worker_init_fn = _SeedWorkers(seed) + + train_dl = DataLoader( + train, + sampler=train_sampler, + batch_size=None, + num_workers=self.args.num_workers, + persistent_workers=True, + pin_memory=True, + worker_init_fn=worker_init_fn, + ) + + return train_dl + + def valid_dataloaders(self, cuts_valid: CutSet) -> DataLoader: + """ + Args: + cuts_valid: + CutSet for validation. + """ + logging.info("About to create dev dataset") + + validate = K2SpeechRecognitionDataset( + input_strategy=OnTheFlyFeatures( + WhisperFbank(WhisperFbankConfig(num_filters=80, device="cuda")) + ) + if self.args.on_the_fly_feats + else eval(self.args.input_strategy)(), + return_cuts=self.args.return_cuts, + ) + if self.args.bucketing_sampler: + valid_sampler = DynamicBucketingSampler( + cuts_valid, + max_duration=self.args.max_duration, + shuffle=False, + ) + else: + valid_sampler = SimpleCutSampler( + cuts_valid, + max_duration=self.args.max_duration, + shuffle=False, + ) + logging.info("About to create dev dataloader") + valid_dl = DataLoader( + validate, + sampler=valid_sampler, + batch_size=None, + num_workers=2, + persistent_workers=False, + ) + + return valid_dl + + def test_dataloaders(self, cuts: CutSet) -> DataLoader: + logging.debug("About to create test dataset") + test = K2SpeechRecognitionDataset( + input_strategy=OnTheFlyFeatures( + WhisperFbank(WhisperFbankConfig(num_filters=80, device="cpu")) + ) + if self.args.on_the_fly_feats + else eval(self.args.input_strategy)(), + return_cuts=self.args.return_cuts, + ) + sampler = DynamicBucketingSampler( + cuts, + max_duration=self.args.max_duration, + shuffle=False, + ) + logging.debug("About to create test dataloader") + test_dl = DataLoader( + test, + batch_size=None, + sampler=sampler, + num_workers=self.args.num_workers, + ) + return test_dl + + @lru_cache() + def test_cuts(self) -> CutSet: + logging.info("About to get test cuts") + if self.args.on_the_fly_feats: + pass + else: + return { + "test": load_manifest_lazy( + self.args.manifest_dir / "cuts_belle_test.jsonl.gz" + ) + } + + @lru_cache() + def dev_cuts(self) -> CutSet: + logging.info("About to get test cuts") + if self.args.on_the_fly_feats: + pass + else: + return load_manifest_lazy( + self.args.manifest_dir / "cuts_belle_test.jsonl.gz" + ) + + @lru_cache() + def train_cuts(self) -> CutSet: + logging.info("About to get train cuts") + slam_omni_zh_cuts = load_manifest_lazy( + self.args.manifest_dir / "cuts_belle_train.jsonl.gz" + ) + return slam_omni_zh_cuts + + # @lru_cache() + # def train_cuts_en_vocalnet(self) -> CutSet: + # logging.info("About to get train cuts") + # VoiceAssistant_cuts = load_manifest_lazy( + # self.args.manifest_dir / "cuts_voice_assistant_00001-00049.jsonl.gz" + # ) + # ultrachat_cuts = load_manifest_lazy( + # self.args.manifest_dir / "cuts_ultrachat_train.jsonl.gz" + # ) + # return CutSet.mux( + # VoiceAssistant_cuts, + # ultrachat_cuts, + # weights=[ + # len(VoiceAssistant_cuts), + # len(ultrachat_cuts), + # ], + # ) + + # valid cuts_voice_assistant.00000.jsonl.gz + # @lru_cache() + # def valid_cuts_en_vocalnet(self) -> CutSet: + # logging.info("About to get valid cuts") + # VoiceAssistant_cuts = load_manifest_lazy( + # self.args.manifest_dir / "cuts_voice_assistant.00000.jsonl.gz" + # ) + # return VoiceAssistant_cuts + + # @lru_cache() + # def test_cuts_en_vocalnet(self) -> CutSet: + # logging.info("About to get test cuts") + # VoiceAssistant_cuts = load_manifest_lazy( + # self.args.manifest_dir / "cuts_voice_assistant.00000.jsonl.gz" + # ) + # return VoiceAssistant_cuts + def train_cuts_en_vocalnet(self) -> CutSet: + logging.info("About to get train cuts") + VoiceAssistant_cuts = load_manifest_lazy( + self.args.manifest_dir / "cuts_debug.jsonl.gz" + ) + return VoiceAssistant_cuts + + @lru_cache() + def valid_cuts_en_vocalnet(self) -> CutSet: + logging.info("About to get valid cuts") + VoiceAssistant_cuts = load_manifest_lazy( + self.args.manifest_dir / "cuts_debug.jsonl.gz" + ) + return VoiceAssistant_cuts + + @lru_cache() + def test_cuts_en_vocalnet(self) -> CutSet: + logging.info("About to get test cuts") + VoiceAssistant_cuts = load_manifest_lazy( + self.args.manifest_dir / "cuts_debug.jsonl.gz" + ) + return VoiceAssistant_cuts \ No newline at end of file diff --git a/egs/speech_llm/SPEECH2SPEECH/debug/model.py b/egs/speech_llm/SPEECH2SPEECH/debug/model.py new file mode 100644 index 000000000..dfeb94956 --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/debug/model.py @@ -0,0 +1,795 @@ +from typing import List, Tuple + +import torch +from torch import nn +from torchmetrics.classification import MulticlassAccuracy +from transformers.trainer_pt_utils import LabelSmoother + +IGNORE_TOKEN_ID = LabelSmoother.ignore_index +import logging +from utils import get_rank + +class EncoderProjector(nn.Module): + """ + The encoder projector module. It is used to project the encoder outputs to the same dimension as the language model. + Modified from https://github.com/X-LANCE/SLAM-LLM/blob/main/src/slam_llm/models/projector.py. + Args: + encoder_dim (:obj:`int`): The dimension of the encoder outputs. + llm_dim (:obj:`int`): The dimension of the language model. + downsample_rate (:obj:`int`, `optional`, defaults to 5): The downsample rate to use. + """ + + def __init__(self, encoder_dim, llm_dim, downsample_rate=5): + super().__init__() + self.downsample_rate = downsample_rate + self.linear1 = nn.Linear(encoder_dim * self.downsample_rate, llm_dim) + self.relu = nn.ReLU() + self.linear2 = nn.Linear(llm_dim, llm_dim) + + def forward(self, x): + + batch_size, seq_len, feat_dim = x.size() + num_frames_to_discard = seq_len % self.downsample_rate + if num_frames_to_discard > 0: + x = x[:, :-num_frames_to_discard, :] + seq_len = x.size(1) + + x = x.contiguous() + x = x.view( + batch_size, seq_len // self.downsample_rate, feat_dim * self.downsample_rate + ) + + x = self.linear1(x) + x = self.relu(x) + x = self.linear2(x) + return x + + +class SPEECH_LLM(nn.Module): + """ + The Speech-to-Text model. It consists of an encoder, a language model and an encoder projector. + The encoder is used to extract speech features from the input speech signal. + The encoder projector is used to project the encoder outputs to the same dimension as the language model. + The language model is used to generate the text from the speech features. + Args: + encoder (:obj:`nn.Module`): The encoder module. + llm (:obj:`nn.Module`): The language model module. + encoder_projector (:obj:`nn.Module`): The encoder projector module. + """ + + def __init__( + self, + encoder: nn.Module, + llm: nn.Module, + encoder_projector: nn.Module, + codec_lm: nn.Module = None, + codec_lm_padding_side: str = "left", + ): + super().__init__() + self.encoder = encoder + self.llm = llm + self.encoder_projector = encoder_projector + self.codec_lm = codec_lm + if self.codec_lm: + self.speech_token_projector = nn.Linear( + self.llm.config.hidden_size + self.llm.config.hidden_size, + self.codec_lm.config.hidden_size, + ) + self.codec_lm_head = nn.Linear( + self.codec_lm.config.hidden_size, self.codec_lm.config.vocab_size + ) + self.speech_token_projector = self.speech_token_projector.to( + dtype=torch.float16 + ) + self.codec_lm_head = self.codec_lm_head.to(dtype=torch.float16) + self.loss_fct = torch.nn.CrossEntropyLoss() + self.codec_lm_padding_side = codec_lm_padding_side + + self.audio_accuracy_metric = MulticlassAccuracy( + self.codec_lm.vocab_size, + top_k=10, + average="micro", + multidim_average="global", + ignore_index=IGNORE_TOKEN_ID, + ) + + def _merge_input_ids_with_speech_features( + self, speech_features, inputs_embeds, input_ids, attention_mask, labels=None + ): + """ + Merge the speech features with the input_ids and attention_mask. This is done by replacing the speech tokens + with the speech features and padding the input_ids to the maximum length of the speech features. + Modified from https://github.com/huggingface/transformers/blob/main/src/transformers/models/llava/modeling_llava.py#L277. + Args: + speech_features (:obj:`torch.Tensor`): The speech features to merge with the input_ids. + inputs_embeds (:obj:`torch.Tensor`): The embeddings of the input_ids. + input_ids (:obj:`torch.Tensor`): The input ids to merge. + attention_mask (:obj:`torch.Tensor`): The attention mask to merge. + labels (:obj:`torch.Tensor`, `optional`): The labels to merge. + Returns: + :obj:`Tuple(torch.Tensor)`: The merged embeddings, attention mask, labels and position ids. + """ + num_speechs, speech_len, embed_dim = speech_features.shape + batch_size, sequence_length = input_ids.shape + left_padding = not torch.sum( + input_ids[:, -1] == torch.tensor(self.llm.config.pad_token_id) + ) + # 1. Create a mask to know where special speech tokens are + special_speech_token_mask = input_ids == self.llm.config.default_speech_token_id + num_special_speech_tokens = torch.sum(special_speech_token_mask, dim=-1) + # Compute the maximum embed dimension + max_embed_dim = ( + num_special_speech_tokens.max() * (speech_len - 1) + ) + sequence_length + batch_indices, non_speech_indices = torch.where( + input_ids != self.llm.config.default_speech_token_id + ) + + # 2. Compute the positions where text should be written + # Calculate new positions for text tokens in merged speech-text sequence. + # `special_speech_token_mask` identifies speech tokens. Each speech token will be replaced by `nb_text_tokens_per_speechs - 1` text tokens. + # `torch.cumsum` computes how each speech token shifts subsequent text token positions. + # - 1 to adjust for zero-based indexing, as `cumsum` inherently increases indices by one. + new_token_positions = ( + torch.cumsum((special_speech_token_mask * (speech_len - 1) + 1), -1) - 1 + ) + nb_speech_pad = max_embed_dim - 1 - new_token_positions[:, -1] + if left_padding: + new_token_positions += nb_speech_pad[:, None] # offset for left padding + text_to_overwrite = new_token_positions[batch_indices, non_speech_indices] + + # 3. Create the full embedding, already padded to the maximum position + final_embedding = torch.zeros( + batch_size, + max_embed_dim, + embed_dim, + dtype=inputs_embeds.dtype, + device=inputs_embeds.device, + ) + final_attention_mask = torch.zeros( + batch_size, + max_embed_dim, + dtype=attention_mask.dtype, + device=inputs_embeds.device, + ) + if labels is not None: + final_labels = torch.full( + (batch_size, max_embed_dim), + IGNORE_TOKEN_ID, + dtype=input_ids.dtype, + device=input_ids.device, + ) + # In case the Vision model or the Language model has been offloaded to CPU, we need to manually + # set the corresponding tensors into their correct target device. + target_device = inputs_embeds.device + batch_indices, non_speech_indices, text_to_overwrite = ( + batch_indices.to(target_device), + non_speech_indices.to(target_device), + text_to_overwrite.to(target_device), + ) + attention_mask = attention_mask.to(target_device) + + # 4. Fill the embeddings based on the mask. If we have ["hey" "", "how", "are"] + # we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the speech features + final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[ + batch_indices, non_speech_indices + ] + final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[ + batch_indices, non_speech_indices + ] + if labels is not None: + final_labels[batch_indices, text_to_overwrite] = labels[ + batch_indices, non_speech_indices + ] + + # 5. Fill the embeddings corresponding to the speechs. Anything that is not `text_positions` needs filling (#29835) + speech_to_overwrite = torch.full( + (batch_size, max_embed_dim), + True, + dtype=torch.bool, + device=inputs_embeds.device, + ) + speech_to_overwrite[batch_indices, text_to_overwrite] = False + speech_to_overwrite &= speech_to_overwrite.cumsum(-1) - 1 >= nb_speech_pad[ + :, None + ].to(target_device) + + if speech_to_overwrite.sum() != speech_features.shape[:-1].numel(): + raise ValueError( + f"The input provided to the model are wrong. The number of speech tokens is {torch.sum(special_speech_token_mask)} while" + f" the number of speech given to the model is {num_speechs}. This prevents correct indexing and breaks batch generation." + ) + + final_embedding[speech_to_overwrite] = ( + speech_features.contiguous().reshape(-1, embed_dim).to(target_device) + ) + final_attention_mask |= speech_to_overwrite + position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_( + (final_attention_mask == 0), 1 + ) + + # 6. Mask out the embedding at padding positions, as we later use the past_key_value value to determine the non-attended tokens. + batch_indices, pad_indices = torch.where( + input_ids == self.llm.config.pad_token_id + ) + indices_to_mask = new_token_positions[batch_indices, pad_indices] + + final_embedding[batch_indices, indices_to_mask] = 0 + + if labels is None: + final_labels = None + + return final_embedding, final_attention_mask, final_labels, position_ids + + def forward( + self, + fbank: torch.Tensor = None, + input_ids: torch.LongTensor = None, + attention_mask: torch.Tensor = None, + labels: torch.LongTensor = None, + ): + encoder_outs = self.encoder(fbank) + + speech_features = self.encoder_projector(encoder_outs) + + inputs_embeds = self.llm.get_input_embeddings()(input_ids) + + ( + inputs_embeds, + attention_mask, + labels, + _, + ) = self._merge_input_ids_with_speech_features( + speech_features, inputs_embeds, input_ids, attention_mask, labels + ) + + rank = get_rank() + print(f"Current rank: {rank}, input_ids: {input_ids.shape}, input_ids: {input_ids}") + print(f"Current rank: {rank}, input_embeds: {inputs_embeds.shape}, input_embeds: {inputs_embeds}") + print(f"Current rank: {rank}, attention_mask: {attention_mask.shape}, attention_mask: {attention_mask}") + print(f"Current rank: {rank}, labels: {labels.shape}, labels: {labels}") + model_outputs = self.llm( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + labels=labels, + output_hidden_states=True, + ) + print(f"Current rank: {rank}, model_outputs: {model_outputs}") + + with torch.no_grad(): + preds = torch.argmax(model_outputs.logits, -1) + acc = compute_accuracy( + preds.detach()[:, :-1], + labels.detach()[:, 1:], + ignore_label=IGNORE_TOKEN_ID, + ) + return model_outputs.loss, acc + + def forward_with_speech_output( + self, + fbank: torch.Tensor = None, + input_ids: torch.LongTensor = None, + attention_mask: torch.Tensor = None, + labels: torch.LongTensor = None, + speech_codec_ids: torch.LongTensor = None, + ): + encoder_outs = self.encoder(fbank) + + speech_features = self.encoder_projector(encoder_outs) + + inputs_embeds = self.llm.get_input_embeddings()(input_ids) + + ( + inputs_embeds, + attention_mask, + labels, + _, + ) = self._merge_input_ids_with_speech_features( + speech_features, inputs_embeds, input_ids, attention_mask, labels + ) + input_seq_len = attention_mask.sum(dim=1) # shape, B + ( + text_label_start_index_list, + text_input_start_index_list, + input_question_len_list, + ) = ([], [], []) + for i in range(labels.shape[0]): + input_embeds_valid_index = torch.where(attention_mask[i] != 0)[0] + input_embeds_start_index = input_embeds_valid_index[0] + text_labels_valid_index = torch.where(labels[i] != IGNORE_TOKEN_ID)[0] + text_labels_start_index = text_labels_valid_index[0] + + assert ( + input_seq_len[i] + == input_embeds_valid_index[-1] - input_embeds_start_index + 1 + ), f"input_seq_len: {input_seq_len[i]}, input_embeds_valid_index: {input_embeds_valid_index}, input_embeds_start_index: {input_embeds_start_index}" + assert ( + input_embeds_valid_index[-1] == text_labels_valid_index[-1] + ), f"input_embeds_valid_index: {input_embeds_valid_index}, text_labels_valid_index: {text_labels_valid_index}" + input_question_len = text_labels_start_index - input_embeds_start_index + assert ( + input_question_len + + text_labels_valid_index[-1] + - text_labels_start_index + + 1 + == input_seq_len[i] + ) + text_label_start_index_list.append(text_labels_start_index) + text_input_start_index_list.append(input_embeds_start_index) + input_question_len_list.append(input_question_len) + + rank = get_rank() + print(f"Current rank: {rank}, input_ids: {input_ids.shape}, input_ids: {input_ids}") + print(f"Current rank: {rank}, input_embeds: {inputs_embeds.shape}, input_embeds: {inputs_embeds}") + print(f"Current rank: {rank}, attention_mask: {attention_mask.shape}, attention_mask: {attention_mask}") + print(f"Current rank: {rank}, labels: {labels.shape}, labels: {labels}") + model_outputs = self.llm( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + labels=labels, + output_hidden_states=True, + ) + print(f"Current rank: {rank}, model_outputs: {model_outputs}") + text_loss = model_outputs.loss + delay_step = 1 + # prepare codec lm inputs + audio_codes_lens = [ + len(x) + input_question_len_list[i] + delay_step + 1 + for i, x in enumerate(speech_codec_ids) + ] + max_len_speech_codec = max(audio_codes_lens) + + if self.codec_lm_padding_side == "right": + audio_codes = [ + [self.codec_lm.config.mask_token_id] + * (input_question_len_list[i] + delay_step) + + [self.codec_lm.config.bos_token_id] + + x + + [self.codec_lm.config.pad_token_id] + * (max_len_speech_codec - audio_codes_lens[i]) + for i, x in enumerate(speech_codec_ids) + ] + audio_labels = [ + [self.codec_lm.config.pad_token_id] + * (input_question_len_list[i] + delay_step) + + x + + [self.codec_lm.config.eos_token_id] + + [self.codec_lm.config.pad_token_id] + * (max_len_speech_codec - audio_codes_lens[i]) + for i, x in enumerate(speech_codec_ids) + ] + elif self.codec_lm_padding_side == "left": + audio_codes = [ + [self.codec_lm.config.pad_token_id] + * (max_len_speech_codec - audio_codes_lens[i]) + + [self.codec_lm.config.mask_token_id] + * (input_question_len_list[i] + delay_step) + + [self.codec_lm.config.bos_token_id] + + x + for i, x in enumerate(speech_codec_ids) + ] + audio_labels = [ + [self.codec_lm.config.pad_token_id] + * (max_len_speech_codec - audio_codes_lens[i]) + + [self.codec_lm.config.pad_token_id] + * (input_question_len_list[i] + delay_step) + + x + + [self.codec_lm.config.eos_token_id] + for i, x in enumerate(speech_codec_ids) + ] + audio_codes = torch.tensor( + audio_codes, dtype=torch.int64, device=input_ids.device + ) + audio_labels = torch.tensor( + audio_labels, dtype=torch.int64, device=input_ids.device + ) + + audio_attention_mask = audio_codes.ne(self.codec_lm.config.pad_token_id) + audio_embeddings = self.codec_lm.get_input_embeddings()(audio_codes) + + text_last_hidden_lists, text_embeds_list, text_input_embeds_list = [], [], [] + for i in range(len(text_label_start_index_list)): + text_last_hidden = model_outputs.hidden_states[-1][ + i, + text_input_start_index_list[i] : text_input_start_index_list[i] + + input_seq_len[i] + - 1, + ] + print(233336666666, text_last_hidden, text_last_hidden.shape) + text_last_hidden_lists.append(text_last_hidden) + text_embed = inputs_embeds[ + i, + text_input_start_index_list[i] + + 1 : text_input_start_index_list[i] + + input_seq_len[i], + ] # exclude bos + text_embeds_list.append(text_embed) + + text_input_embeds = torch.cat( + [ + text_last_hidden, + text_embed, + ], + dim=-1, + ) # shape, T, D1 + D2 + text_input_embeds = self.speech_token_projector( + text_input_embeds + ) # shape, T, D_codec + text_input_embeds_list.append(text_input_embeds) + + for i in range(audio_embeddings.shape[0]): + text_input_embeds = text_input_embeds_list[i] + if self.codec_lm_padding_side == "right": + audio_embeddings[i, : text_input_embeds.shape[0]] += text_input_embeds + elif self.codec_lm_padding_side == "left": + start_idx = torch.where( + audio_codes[i] == self.codec_lm.config.mask_token_id + )[0][0] + start_idx_re_compute = torch.where(audio_attention_mask[i] != 0)[0][0] + assert ( + start_idx == start_idx_re_compute + ), f"start_idx: {start_idx}, start_idx_re_compute: {start_idx_re_compute}" + if text_input_embeds.shape[0] > audio_embeddings.shape[1] - start_idx: + text_input_embeds = text_input_embeds[ + : audio_embeddings.shape[1] - start_idx + ] + logging.warning( + f"Truncate text_input_embeds: {text_input_embeds.shape} to {audio_embeddings.shape[1] - start_idx}" + ) + audio_embeddings[ + i, start_idx : start_idx + text_input_embeds.shape[0] + ] += text_input_embeds + + speech_outputs = self.codec_lm( + attention_mask=audio_attention_mask, + inputs_embeds=audio_embeddings, + return_dict=True, + output_hidden_states=True, + ) + last_hidden_state = speech_outputs.hidden_states[-1].clone() + + audio_logits = self.codec_lm_head(last_hidden_state) # shape, B, T, vocab_size + audio_logits = audio_logits.contiguous().view( + -1, self.codec_lm.config.vocab_size + ) + audio_labels = audio_labels.contiguous().view(-1) + audio_labels = audio_labels.masked_fill( + audio_labels == self.codec_lm.config.pad_token_id, IGNORE_TOKEN_ID + ) + codec_loss = self.loss_fct(audio_logits, audio_labels) + audio_preds = torch.argmax(audio_logits, -1) + + with torch.no_grad(): + preds = torch.argmax(model_outputs.logits, -1) + print(23333444444, preds) + print(233335555555, labels) + acc = compute_accuracy( + preds.detach()[:, :-1], + labels.detach()[:, 1:], + ignore_label=IGNORE_TOKEN_ID, + ) + audio_acc = compute_accuracy( + audio_preds.detach(), + audio_labels.detach(), + ignore_label=IGNORE_TOKEN_ID, + ) + audio_topk_acc = self.audio_accuracy_metric( + audio_logits.detach(), audio_labels.detach() + ).item() + + return text_loss, acc, codec_loss, audio_acc, audio_topk_acc + + def decode( + self, + fbank: torch.Tensor = None, + input_ids: torch.LongTensor = None, + attention_mask: torch.Tensor = None, + **kwargs, + ): + + encoder_outs = self.encoder(fbank) + speech_features = self.encoder_projector(encoder_outs) + speech_features = speech_features.to(torch.float16) + inputs_embeds = self.llm.get_input_embeddings()(input_ids) + ( + inputs_embeds, + attention_mask, + _, + _, + ) = self._merge_input_ids_with_speech_features( + speech_features, inputs_embeds, input_ids, attention_mask + ) + generated_ids = self.llm.generate( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + max_new_tokens=kwargs.get("max_new_tokens", 1024), + num_beams=kwargs.get("num_beams", 1), + do_sample=kwargs.get("do_sample", True), + min_length=kwargs.get("min_length", 1), + top_p=kwargs.get("top_p", 0.5), + top_k=kwargs.get("top_k", 20), + repetition_penalty=kwargs.get("repetition_penalty", 1.1), + temperature=kwargs.get("temperature", 0.7), + bos_token_id=self.llm.config.bos_token_id, + eos_token_id=self.llm.config.eos_token_id, + pad_token_id=self.llm.config.pad_token_id, + ) + + return generated_ids + + def decode_with_speech_output( + self, + fbank: torch.Tensor = None, + input_ids: torch.LongTensor = None, # Prompt input_ids + attention_mask: torch.Tensor = None, # Prompt attention_mask + max_text_new_tokens: int = 1024, + max_speech_new_tokens: int = 2048, # Max length for speech tokens + llm_kwargs: dict = None, # Kwargs for text LLM generate + codec_lm_kwargs: dict = None, # Kwargs for codec LM (e.g., temperature for sampling) - NOT IMPLEMENTED YET + ) -> Tuple[torch.LongTensor, List[List[int]]]: + """ + Generates text and corresponding speech tokens using the revised logic. + + Args: + fbank: Input audio features. + input_ids: Input token IDs for the text prompt. + attention_mask: Attention mask for the text prompt. + max_text_new_tokens: Max new tokens for text generation. + max_speech_new_tokens: Max new tokens for speech generation. + llm_kwargs: Additional arguments for self.llm.generate. + codec_lm_kwargs: Additional arguments for self.codec_lm.generate. + + Returns: + Tuple[torch.LongTensor, List[List[int]]]: + - generated_text_ids: Tensor of generated text token IDs (including prompt). + - generated_speech_tokens: List of lists, where each inner list contains + the generated speech codec tokens for a batch item. + """ + assert fbank.shape[0] == 1, "Batch size must be 1 for speech generation." + if ( + not self.codec_lm + or not self.speech_token_projector + or not self.codec_lm_head + ): + raise ValueError( + "codec_lm and associated layers must be initialized to generate speech output." + ) + + device = next(self.parameters()).device # Use model's device + batch_size = fbank.shape[0] + + # --- 1. Prepare Prompt Embeddings --- + encoder_outs = self.encoder(fbank) + speech_features = self.encoder_projector(encoder_outs) + speech_features = speech_features.to(self.llm.dtype) # Ensure matching dtype + + prompt_embeds = self.llm.get_input_embeddings()(input_ids) + + # Merge speech features with prompt embeddings + ( + merged_prompt_inputs_embeds, + merged_prompt_attention_mask, + _, + _, + ) = self._merge_input_ids_with_speech_features( + speech_features, prompt_embeds, input_ids, attention_mask + ) + + # --- 2. Generate Text using LLM --- + # Use merged embeds/mask as input to generate + # Ensure kwargs passed are suitable for llm.generate + # Note: Using default generation params from `decode` if not provided in kwargs + final_llm_kwargs = { + "bos_token_id": self.llm.config.bos_token_id, + "eos_token_id": self.llm.config.eos_token_id, + "pad_token_id": self.llm.config.pad_token_id, + "num_beams": 1, + "do_sample": True, # Typically false for S2ST/S2TT tasks unless exploration needed + "top_p": 0.5, + "top_k": 20, + "repetition_penalty": 1.1, + "temperature": 0.7, + **(llm_kwargs or {}), # User-provided kwargs override defaults + } + + text_outputs = self.llm.generate( + inputs_embeds=merged_prompt_inputs_embeds, + attention_mask=merged_prompt_attention_mask, + max_new_tokens=max_text_new_tokens, + return_dict_in_generate=True, + output_hidden_states=True, + **final_llm_kwargs, + ) + delay_step = 1 + generated_text_ids = text_outputs.sequences # [B, S_full] + eos_token_id = self.llm.config.eos_token_id + eos_token_embedding = self.llm.get_input_embeddings()( + torch.tensor([[eos_token_id]], device=device) + ) + assert ( + generated_text_ids[0, -1] == eos_token_id + ), f"Last token is not EOS: {generated_text_ids[0, -1]} != {eos_token_id}" + thinker_token_embeds_org = [ + token_hidden_states[0].to(self.llm.device) + for token_hidden_states in text_outputs.hidden_states + ] + + first_thinker_token_embed = torch.cat( + [ + thinker_token_embeds_org[0][:, 1:], + thinker_token_embeds_org[1], + ], + dim=1, + ) + + thinker_token_embeds = ( + [first_thinker_token_embed] + + thinker_token_embeds_org[2:] + + [eos_token_embedding] + ) + thinker_hidden_states = [ + token_hidden_states[-1].to(self.llm.device) + for token_hidden_states in text_outputs.hidden_states + ] + + thinker_reply_part = [ + torch.cat( + [ + thinker_hidden_state, + thinker_token_embed, + ], + dim=-1, + ) + for thinker_hidden_state, thinker_token_embed in zip( + thinker_hidden_states[1:], thinker_token_embeds[1:] + ) + ] + thinker_reply_part = torch.cat(thinker_reply_part, dim=1) + # thinker_prompt_part = thinker_hidden_states[0] + thinker_token_embeds[0] + thinker_prompt_part = torch.cat( + [ + thinker_hidden_states[0], + thinker_token_embeds[0], + ], + dim=-1, + ) + + thinker_prompt_part = self.speech_token_projector(thinker_prompt_part) + thinker_reply_part = self.speech_token_projector(thinker_reply_part) + + thinker_prompt_part_seq_len = thinker_prompt_part.shape[1] + talker_input_ids = torch.full( + (batch_size, thinker_prompt_part_seq_len + delay_step + 1), + self.codec_lm.config.mask_token_id, + dtype=torch.long, + device=self.llm.device, + ) + talker_input_ids[:, -1] = self.codec_lm.config.bos_token_id + talker_inputs_embeds = self.codec_lm.get_input_embeddings()(talker_input_ids) + thinker_input_embeds = torch.cat( + [ + thinker_prompt_part, + thinker_reply_part[:, : delay_step + 1, :], + ], + dim=1, + ) + talker_inputs_embeds += thinker_input_embeds + thinker_reply_part = thinker_reply_part[:, delay_step + 1 :, :] + + past_key_values = None + + generated_speech_tokens_list = [] + next_token_ids = None + + for t in range(max_speech_new_tokens): + if t > 0: + talker_inputs_embeds = self.codec_lm.get_input_embeddings()( + next_token_ids + ) + if thinker_reply_part.shape[1] > 0: + talker_inputs_embeds += thinker_reply_part[:, :1, :] + thinker_reply_part = thinker_reply_part[:, 1:, :] + + codec_outputs = self.codec_lm( + inputs_embeds=talker_inputs_embeds, + past_key_values=past_key_values, + use_cache=True, + return_dict=True, + output_hidden_states=True, + ) + last_token_hidden_state = codec_outputs.hidden_states[-1][:, -1, :] + next_token_logits = self.codec_lm_head(last_token_hidden_state) + + next_token_ids = topk_sampling( + next_token_logits, + ) + if next_token_ids[0, 0] == self.codec_lm.config.eos_token_id: + break + + past_key_values = codec_outputs.past_key_values # Update KV cache + generated_speech_tokens_list.append( + next_token_ids.squeeze(1).cpu().tolist()[0] + ) + + return generated_text_ids, generated_speech_tokens_list + + +def compute_accuracy(pad_outputs, pad_targets, ignore_label): + """Calculate accuracy. + Copied from https://github.com/X-LANCE/SLAM-LLM/blob/main/src/slam_llm/utils/metric.py + Args: + pad_outputs (LongTensor): Prediction tensors (B, Lmax). + pad_targets (LongTensor): Target label tensors (B, Lmax). + ignore_label (int): Ignore label id. + + Returns: + float: Accuracy value (0.0 - 1.0). + + """ + mask = pad_targets != ignore_label + numerator = torch.sum( + pad_outputs.masked_select(mask) == pad_targets.masked_select(mask) + ) + denominator = torch.sum(mask) + return numerator.float() / denominator.float() + + +def topk_sampling( + logits, + top_k=50, + top_p=0.95, + temperature=0.8, +): + if temperature != 1.0: + logits = logits / temperature + # Top-p/top-k filtering + logits_filtered = top_k_top_p_filtering( + logits.clone(), top_k=top_k, top_p=top_p, min_tokens_to_keep=2 + ) + # Sample + probs = torch.nn.functional.softmax(logits_filtered, dim=-1) + tokens = torch.multinomial(probs, num_samples=1) + + return tokens + + +# https://github.com/microsoft/unilm/blob/master/xtune/src/transformers/modeling_utils.py +def top_k_top_p_filtering( + logits, top_k=20, top_p=0.5, filter_value=-float("Inf"), min_tokens_to_keep=1 +): + """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering + Args: + logits: logits distribution shape (batch size, vocabulary size) + if top_k > 0: keep only top k tokens with highest probability (top-k filtering). + if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). + Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) + Make sure we keep at least min_tokens_to_keep per batch example in the output + From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 + """ + if top_k > 0: + top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check + # Remove all tokens with a probability less than the last token of the top-k + indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] + logits[indices_to_remove] = filter_value + + if top_p < 1.0: + sorted_logits, sorted_indices = torch.sort(logits, descending=True) + cumulative_probs = torch.cumsum( + torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1 + ) + + # Remove tokens with cumulative probability above the threshold (token with 0 are kept) + sorted_indices_to_remove = cumulative_probs > top_p + if min_tokens_to_keep > 1: + # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) + sorted_indices_to_remove[..., :min_tokens_to_keep] = 0 + # Shift the indices to the right to keep also the first token above the threshold + sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() + sorted_indices_to_remove[..., 0] = 0 + + # scatter sorted tensors to original indexing + indices_to_remove = sorted_indices_to_remove.scatter( + 1, sorted_indices, sorted_indices_to_remove + ) + logits[indices_to_remove] = filter_value + return logits diff --git a/egs/speech_llm/SPEECH2SPEECH/debug/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/debug/prepare.sh new file mode 100644 index 000000000..aa3d34e9d --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/debug/prepare.sh @@ -0,0 +1,195 @@ +#!/usr/bin/env bash + +# fix segmentation fault reported in https://github.com/k2-fsa/icefall/issues/674 +export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python + +export PYTHONPATH=$PYTHONPATH:/workspace/icefall + +set -eou pipefail + +stage=$1 +stop_stage=$2 +# All files generated by this script are saved in "data". +# You can safely remove "data" and rerun this script to regenerate it. +mkdir -p data + +log() { + # This function is from espnet + local fname=${BASH_SOURCE[1]##*/} + echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*" +} + + +if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then + log "stage 0: Clone CosyVoice repo and install requirements inside the container" + # docker: ghcr.io/swivid/f5-tts:main + pip install k2==1.24.4.dev20241030+cuda12.4.torch2.4.0 -f https://k2-fsa.github.io/k2/cuda.html + git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git /workspace/CosyVoice + cd /workspace/CosyVoice + # If you failed to clone submodule due to network failures, please run following command until success + git submodule update --init --recursive + pip install -r qwen_omni/requirements.txt + pip install -r qwen_omni/requirements-cosyvoice.txt + + # For Chinese only dataset, you can use the following command to download the Chinese fine-tuned whisper model. + huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_multi-hans-zh_whisper + # Cosyvoice pretrained model for speech token2wav module + huggingface-cli download --local-dir models/CosyVoice-300M-SFT FunAudioLLM/CosyVoice-300M-SFT + # Qwen Pretrained model + huggingface-cli download --local-dir models/Qwen2.5-0.5B-Instruct Qwen/Qwen2.5-0.5B-Instruct + # Qwen-Omni like speech2speech model trained on worstchan/Belle_1.4M-SLAM-Omni + huggingface-cli download --local-dir models/qwen-omni-like-speech2speech-belle-1.4M yuekai/qwen-omni-like-speech2speech-belle-1.4M + + # For Gradio demo, we follow https://arxiv.org/abs/2412.15649 to use ASR model to decode the history speech as context. + pip install sherpa-onnx + model_path=local/sherpa-onnx-paraformer-zh-2023-09-14 + if [ ! -d $model_path ]; then + wget -nc https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2 + tar xvf sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2 -C local + fi +fi +export PYTHONPATH=$PYTHONPATH:/workspace/CosyVoice + +if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then + log "stage 1: Compute fbank feature from huggingface" + python3 local/compute_whisper_fbank.py \ + --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ + --out-dir data/fbank_test \ + --huggingface-dataset-path-or-name /workspace/Belle_1.4M-SLAM-Omni \ + --audio-key question_audio --text-key answer \ + --prefix belle +fi + +if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then + log "Stage 2: Combine features" + manifest_dir=data/fbank + if [ ! -f $manifest_dir/cuts_belle_00001-01600.jsonl.gz ]; then + mv $manifest_dir/cuts_belle.00000.jsonl.gz ./ + # exclude cust_belle_00000.jsonl.gz for valid and test set + pieces=$(find $manifest_dir -name "cuts_belle.*.jsonl.gz" | sort) + echo $pieces | wc + lhotse combine $pieces data/fbank/cuts_belle_00001-01600.jsonl.gz + mv ./cuts_belle.00000.jsonl.gz $manifest_dir # put it back + cd $manifest_dir && ln -s cuts_belle_00001-01600.jsonl.gz cuts_belle_train.jsonl.gz + ln -s cuts_belle.00000.jsonl.gz cuts_belle_test.jsonl.gz && cd - + fi +fi + +ngpu=8 +exp_dir=./qwen_omni/exp_speech2speech +if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then + log "stage 3: Training Speech2Speech Model" + torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ + --max-duration 50 \ + --enable-musan False \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ + --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ + --manifest-dir data/fbank \ + --deepspeed \ + --deepspeed_config ./qwen_omni/ds_config_zero1.json \ + --use-flash-attn True \ + --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True +fi + +if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then + log "stage 4: Decoding, only support batch_size=1 for now." + cd $exp_dir && ln -s ../../models/qwen-omni-like-speech2speech-belle-1.4M/pytorch_model.bin epoch-999.pt && cd - + python3 ./qwen_omni/decode.py \ + --max-duration 1 \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ + --epoch 999 --avg 1 \ + --manifest-dir data/fbank \ + --use-flash-attn True \ + --method e2e-epoch10_speech2speech \ + --enable-speech-output True \ + --token2wav-path models/CosyVoice-300M-SFT \ + --use-lora True +fi + +if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then + log "stage 5: Gradio Demo" + python3 ./qwen_omni/web_demo.py \ + --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ + --checkpoint-path $exp_dir/epoch-999.pt \ + --use-flash-attn True \ + --enable-speech-output True \ + --asr-model-dir local/sherpa-onnx-paraformer-zh-2023-09-14 \ + --use-lora True --token2wav-path /workspace/CosyVoice-300M-SFT --share +fi + +if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then + log "stage 1: Compute fbank feature from huggingface" + # CUDA_VISIBLE_DEVICES=0 python3 local/compute_whisper_fbank.py \ + # --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ + # --out-dir data/fbank_voice_assistant \ + # --huggingface-dataset-path-or-name worstchan/VoiceAssistant-400K-SLAM-Omni \ + # --audio-key question_audio --text-key answer \ + # --prefix voice_assistant + CUDA_VISIBLE_DEVICES=0 python3 local/compute_whisper_fbank.py \ + --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ + --out-dir data/fbank_voice_assistant_cosy2 \ + --json-file-path /workspace/slam/VoiceAssistant-430K-vocalnet/VoiceAssistant-430K.json \ + --prefix voice_assistant +fi + +if [ $stage -le 7 ] && [ $stop_stage -ge 7 ]; then + log "stage 7: Compute fbank feature from huggingface" + # CUDA_VISIBLE_DEVICES=1 python3 local/compute_whisper_fbank.py \ + # --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ + # --out-dir data/fbank_ultrachat \ + # --huggingface-dataset-path-or-name worstchan/UltraChat-300K-SLAM-Omni \ + # --audio-key question_audio --text-key answer \ + # --prefix ultrachat + CUDA_VISIBLE_DEVICES=1 python3 local/compute_whisper_fbank.py \ + --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ + --out-dir data/fbank_ultrachat_cosy2 \ + --json-file-path /workspace/slam/UltraChat-vocalnet/UltraChat.json \ + --prefix ultrachat +fi + +if [ $stage -le 8 ] && [ $stop_stage -ge 8 ]; then + log "stage 8: Compute fbank feature from huggingface" + + CUDA_VISIBLE_DEVICES=1 python3 local/compute_whisper_fbank.py \ + --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ + --out-dir data/fbank_gigaspeech \ + --huggingface-dataset-path-or-name speechcolab/gigaspeech \ + --subset test --split test \ + --audio-key audio --text-key text \ + --prefix gigaspeech +fi + +if [ $stage -le 9 ] && [ $stop_stage -ge 9 ]; then + log "stage 9: Compute fbank feature from huggingface" + CUDA_VISIBLE_DEVICES=0 python3 local/compute_whisper_fbank.py \ + --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb True \ + --out-dir data/fbank_gigaspeech \ + --huggingface-dataset-path-or-name speechcolab/gigaspeech \ + --subset xl --split train \ + --audio-key audio --text-key text \ + --prefix gigaspeech +fi + + +ngpu=2 +exp_dir=./qwen_omni/exp_speech2speech_en +if [ $stage -le 10 ] && [ $stop_stage -ge 10 ]; then + log "stage 10: Training Speech2Speech Model" + torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ + --max-duration 1 \ + --enable-musan False \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/large-v2.pt \ + --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ + --dataset-format vocalnet \ + --manifest-dir data/fbank \ + --deepspeed \ + --deepspeed_config ./qwen_omni/ds_config_zero1.json \ + --use-flash-attn False --bucketing-sampler False \ + --use-lora False --unfreeze-llm False --unfreeze-speech-projector True --enable-speech-output False + # --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True +fi diff --git a/egs/speech_llm/SPEECH2SPEECH/debug/train.py b/egs/speech_llm/SPEECH2SPEECH/debug/train.py new file mode 100755 index 000000000..3327ee1f1 --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/debug/train.py @@ -0,0 +1,977 @@ +#!/usr/bin/env python3 +# Copyright 2023 Xiaomi Corp. (authors: Xiaoyu Yang) +# 2024 Yuekai Zhang +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +# For Chinese dataset, you can use the following command to download the Chinese fine-tuned whisper model. +huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_multi-hans-zh_whisper +# Qwen Pretrained model +huggingface-cli download --local-dir models/Qwen2.5-0.5B-Instruct Qwen/Qwen2.5-0.5B-Instruct + +torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ + --max-duration 50 \ + --enable-musan False \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ + --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ + --manifest-dir data/fbank \ + --deepspeed \ + --deepspeed_config ./qwen_omni/ds_config_zero1.json \ + --use-flash-attn True \ + --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True +""" + +import argparse +import copy +import logging +import os +import random +import warnings +from pathlib import Path +from shutil import copyfile +from typing import Any, Dict, List, Optional, Tuple, Union + +import deepspeed +import torch +import torch.multiprocessing as mp +import torch.nn as nn +import transformers +import whisper +from data_module import AsrDataModule +from deepspeed.utils.zero_to_fp32 import convert_zero_checkpoint_to_fp32_state_dict +from label_smoothing import LabelSmoothingLoss +from lhotse import CutSet, load_manifest +from lhotse.cut import Cut +from lhotse.dataset.sampling.base import CutSampler +from lhotse.utils import fix_random_seed +from model import IGNORE_TOKEN_ID, SPEECH_LLM, EncoderProjector +from peft import LoraConfig, get_peft_model +from torch import Tensor +from torch.utils.tensorboard import SummaryWriter +from transformers import ( + AutoModelForCausalLM, + AutoTokenizer, + Qwen2Config, + Qwen2ForCausalLM, +) +from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward + +# from icefall import diagnostics +from utils import get_rank, get_world_size +# from icefall.env import get_env_info +from utils import ( # filter_uneven_sized_batch, + AttributeDict, + MetricsTracker, + setup_logger, + str2bool, +) + +DEFAULT_SPEECH_TOKEN = "" + + +def set_batch_count(model: nn.Module, batch_count: float) -> None: + for module in model.modules(): + if hasattr(module, "batch_count"): + module.batch_count = batch_count + + +def add_model_arguments(parser: argparse.ArgumentParser): + parser.add_argument( + "--remove-whisper-encoder-input-length-restriction", + type=str2bool, + default=True, + help="replace whisper encoder forward method to remove input length restriction", + ) + parser.add_argument( + "--llm-path-or-name", + type=str, + default="/workspace/asr/Qwen1.5-0.5B-Chat", + help="Path or name of the large language model.", + ) + + parser.add_argument( + "--speech-encoder-path-or-name", + type=str, + default="whisper-large-v2", + help="Path or name of the speech encoder.", + ) + + parser.add_argument( + "--encoder-projector-ds-rate", + type=int, + default=8, + help="Downsample rate for the encoder projector.", + ) + parser.add_argument( + "--use-flash-attn", + type=str2bool, + default=True, + help="Whether to use flash attention.", + ) + + parser.add_argument( + "--use-lora", + type=str2bool, + default=False, + help="Whether to use lora to fine-tune llm.", + ) + + parser.add_argument( + "--enable-speech-output", + type=str2bool, + default=False, + help="Whether to enable speech codec output.", + ) + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--tensorboard", + type=str2bool, + default=True, + help="Should various information be logged in tensorboard.", + ) + + parser.add_argument( + "--num-epochs", + type=int, + default=10, + help="Number of epochs to train.", + ) + + parser.add_argument( + "--start-epoch", + type=int, + default=1, + help="""Resume training from this epoch. It should be positive. + If larger than 1, it will load checkpoint from + exp-dir/epoch-{start_epoch-1}.pt + """, + ) + + parser.add_argument( + "--exp-dir", + type=str, + default="whisper_qwen/exp", + help="""The experiment dir. + It specifies the directory where all training related + files, e.g., checkpoints, log, etc, are saved + """, + ) + + parser.add_argument( + "--pretrained-model-path", + type=str, + default=None, + help="""The path to the pretrained model if it is not None. Training will + start from this model. e.g. ./wenetspeech/ASR/whisper/exp_large_v2/epoch-4-avg-3.pt + """, + ) + + parser.add_argument( + "--sampler-state-dict-path", + type=str, + default=None, + help="""The path to the sampler state dict if it is not None. Training will start from this sampler state dict. + """, + ) + + parser.add_argument( + "--seed", + type=int, + default=42, + help="The seed for random generators intended for reproducibility", + ) + + parser.add_argument( + "--use-fp16", + type=str2bool, + default=True, + help="Whether to use half precision training.", + ) + + parser.add_argument( + "--unfreeze-llm", + type=str2bool, + default=False, + help="Whether to unfreeze llm during training.", + ) + + parser.add_argument( + "--unfreeze-speech-projector", + type=str2bool, + default=False, + help="Whether to unfreeze speech adaptor during training.", + ) + + parser.add_argument( + "--dataset-format", + type=str, + default="slam_omni", + help="The format of the dataset.", + ) + parser = deepspeed.add_config_arguments(parser) + add_model_arguments(parser) + + return parser + + +def get_params() -> AttributeDict: + """Return a dict containing training parameters. + + All training related parameters that are not passed from the commandline + are saved in the variable `params`. + + Commandline options are merged into `params` after they are parsed, so + you can also access them via `params`. + + Explanation of options saved in `params`: + + - frame_shift_ms: The frame shift in milliseconds. + - allowed_excess_duration_ratio: The allowed excess duration ratio. + - best_train_loss: The best training loss so far. + - best_valid_loss: The best validation loss so far. + - best_train_epoch: The epoch where the best training loss is achieved. + - best_valid_epoch: The epoch where the best validation loss is achieved. + - batch_idx_train: The batch index of the current batch. + - log_interval: Log training stats every `log_interval` batches. + - reset_interval: Reset the stats every `reset_interval` batches. + - valid_interval: Run validation every `valid_interval` batches. + - env_info: The environment information. + """ + params = AttributeDict( + { + "allowed_excess_duration_ratio": 0.1, + "subsampling_factor": 2, + "frame_shift_ms": 10, + "best_train_loss": float("inf"), + "best_valid_loss": float("inf"), + "best_train_epoch": -1, + "best_valid_epoch": -1, + "batch_idx_train": 0, + "log_interval": 50, + "reset_interval": 200, + "valid_interval": 5000, + # "env_info": get_env_info(), + } + ) + + return params + + +def process_batch_slam_omni(batch: dict): + answers = batch["supervisions"]["text"] + questions_with_history = [ + cut.custom["question"] for cut in batch["supervisions"]["cut"] + ] + chat_rounds = [cut.custom["round"] for cut in batch["supervisions"]["cut"]] + answer_cosyvoice_speech_token = [ + cut.custom["answer_cosyvoice_speech_token"] + for cut in batch["supervisions"]["cut"] + ] + last_questions = [ + question.split(": ")[-1].strip() for question in questions_with_history + ] + history_contexts = [ + question.rsplit(":", 1)[0].strip() for question in questions_with_history + ] + + messages = [] + for i, total_round in enumerate(chat_rounds): + message = [] + if total_round > 1: + history_question_answer = history_contexts[i].split("USER:") + history_question_answer = [item for item in history_question_answer if item] + for j in range(total_round - 1): + question_answer = history_question_answer[j].split("ASSISTANT:") + message += [ + {"role": "user", "content": question_answer[0].strip()}, + {"role": "assistant", "content": question_answer[1].strip()}, + ] + message += [ + {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, + {"role": "assistant", "content": answers[i]}, + ] + messages.append(message) + return messages, answer_cosyvoice_speech_token + + +def process_batch_vocalnet(batch: dict): + answers = batch["supervisions"]["text"] + answer_cosyvoice_speech_token = [ + cut.custom["speech_token"] for cut in batch["supervisions"]["cut"] + ] + messages = [] + for i in range(len(answers)): + message = [ + {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, + {"role": "assistant", "content": answers[i]}, + ] + messages.append(message) + return messages, answer_cosyvoice_speech_token + + +def compute_loss( + params: AttributeDict, + tokenizer: AutoTokenizer, + model: nn.Module, + batch: dict, + is_training: bool, +) -> Tuple[Tensor, MetricsTracker]: + """ + Compute the loss for the given batch. + Args: + params: + It is returned by :func:`get_params`. + tokenizer: + The tokenizer used to encode the text. + model: + The model for training. + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + is_training: + Whether it is training. + Returns: + Return a tuple of two elements. The first element is the loss tensor. + """ + # For the uneven-sized batch, the total duration after padding would possibly + # cause OOM. Hence, for each batch, which is sorted descendingly by length, + # we simply drop the last few shortest samples, so that the retained total frames + # (after padding) would not exceed `allowed_max_frames`: + # `allowed_max_frames = int(max_frames * (1.0 + allowed_excess_duration_ratio))`, + # where `max_frames = max_duration * 1000 // frame_shift_ms`. + # We set allowed_excess_duration_ratio=0.1. + + def preprocess( + messages, + tokenizer: transformers.PreTrainedTokenizer, + ) -> Dict: + """Preprocesses the data for supervised fine-tuning.""" + texts = [] + TEMPLATE = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if loop.last %}{{ '<|im_end|>'}}{% else %}{{ '<|im_end|>\n' }}{% endif %}{% endfor %}" + for i, msg in enumerate(messages): + texts.append( + tokenizer.apply_chat_template( + msg, + tokenize=True, + chat_template=TEMPLATE, + add_generation_prompt=False, + padding="longest", # FIX me change padding to longest + truncation=False, + ) + ) + if len(texts) != len(messages): + logging.warning(f"Remove too long text, {messages} ") + max_len_texts = max([len(text) for text in texts]) + if tokenizer.padding_side == "right": + texts = [ + text + [tokenizer.pad_token_id] * (max_len_texts - len(text)) + for text in texts + ] + else: + texts = [ + [tokenizer.pad_token_id] * (max_len_texts - len(text)) + text + for text in texts + ] + input_ids = torch.tensor(texts, dtype=torch.int) + + target_ids = input_ids.clone() + target_ids[target_ids == tokenizer.pad_token_id] = IGNORE_TOKEN_ID + # mask all tokens before token_id 151646 with IGNORE_TOKEN_ID + # first get the indices of the tokens + mask_prompt = True + if mask_prompt: + default_speech_token_id = tokenizer.convert_tokens_to_ids( + DEFAULT_SPEECH_TOKEN + ) + mask_indices = torch.where(input_ids == default_speech_token_id) + for i in range(mask_indices[0].size(0)): + row = mask_indices[0][i] + col = mask_indices[1][i] + # + 6 to skip: 'assistant', '\n' 151665, 151645, 198, 151644, 77091, 198 + # WAR: TODO FIXME check qwen3 + target_ids[row, : col + 6] = IGNORE_TOKEN_ID + + attention_mask = input_ids.ne(tokenizer.pad_token_id) + + return input_ids, attention_mask, target_ids + + # max_frames = params.max_duration * 1000 // params.frame_shift_ms + # allowed_max_frames = int(max_frames * (1.0 + params.allowed_excess_duration_ratio)) + # batch = filter_uneven_sized_batch(batch, allowed_max_frames) + + device = next(model.parameters()).device + feature = batch["inputs"] + + assert feature.ndim == 3 + feature = feature.to(device) + feature = feature.transpose(1, 2) # (N, C, T) + + batch_idx_train = params.batch_idx_train + + # WAR: TODO FIXME merge process_batch_slam_omni and process_batch_vocalnet + if params.dataset_format == "slam_omni": + messages, answer_cosyvoice_speech_token = process_batch_slam_omni(batch) + elif params.dataset_format == "vocalnet": + messages, answer_cosyvoice_speech_token = process_batch_vocalnet(batch) + else: + raise ValueError(f"Unknown dataset format: {params.dataset_format}") + + print(f"messages: {messages}") + + input_ids, attention_mask, target_ids = preprocess(messages, tokenizer) + + target_ids = target_ids.type(torch.LongTensor) + input_ids = input_ids.type(torch.LongTensor) + + with torch.set_grad_enabled(is_training): + if not params.enable_speech_output: + loss, acc = model( + fbank=feature, + input_ids=input_ids.to(device), + attention_mask=attention_mask.to(device), + labels=target_ids.to(device), + ) + else: + ( + text_loss, + acc, + codec_loss, + codec_acc, + codec_topk_acc, + ) = model.forward_with_speech_output( + fbank=feature, + input_ids=input_ids.to(device), + attention_mask=attention_mask.to(device), + labels=target_ids.to(device), + speech_codec_ids=answer_cosyvoice_speech_token, + ) + loss = text_loss + codec_loss + assert loss.requires_grad == is_training + + info = MetricsTracker() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + feature_lens = batch["supervisions"]["num_frames"] + info["frames"] = (feature_lens // params.subsampling_factor).sum().item() + + # Note: We use reduction=sum while computing the loss. + info["loss"] = loss.detach().cpu().item() + info["acc"] = ( + acc * info["frames"] + ) # WAR: to avoid normalization by the number of frames + if params.enable_speech_output: + info["codec_acc"] = codec_acc * info["frames"] + info["codec_topk_acc"] = codec_topk_acc * info["frames"] + info["codec_loss"] = codec_loss.detach().cpu().item() + info["text_loss"] = text_loss.detach().cpu().item() + return loss, info + + +def compute_validation_loss( + params: AttributeDict, + tokenizer: whisper.tokenizer.Tokenizer, + model: nn.Module, + valid_dl: torch.utils.data.DataLoader, + world_size: int = 1, +) -> MetricsTracker: + """Run the validation process.""" + model.eval() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(valid_dl): + with torch.amp.autocast("cuda", enabled=params.use_fp16): + loss, loss_info = compute_loss( + params=params, + tokenizer=tokenizer, + model=model, + batch=batch, + is_training=False, + ) + assert loss.requires_grad is False + tot_loss = tot_loss + loss_info + + if world_size > 1: + tot_loss.reduce(loss.device) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + if loss_value < params.best_valid_loss: + params.best_valid_epoch = params.cur_epoch + params.best_valid_loss = loss_value + exit() + return tot_loss + + +def train_one_epoch( + params: AttributeDict, + tokenizer: AutoTokenizer, + model: nn.Module, + optimizer: torch.optim.Optimizer, + scheduler: torch.optim.lr_scheduler, + train_dl: torch.utils.data.DataLoader, + valid_dl: torch.utils.data.DataLoader, + tb_writer: Optional[SummaryWriter] = None, + world_size: int = 1, + rank: int = 0, +) -> None: + """Train the model for one epoch. + + The training loss from the mean of all frames is saved in + `params.train_loss`. It runs the validation process every + `params.valid_interval` batches. + + Args: + params: + It is returned by :func:`get_params`. + model: + The model for training. + optimizer: + The optimizer we are using. + scheduler: + The learning rate scheduler, we call step() every step. + train_dl: + Dataloader for the training dataset. + valid_dl: + Dataloader for the validation dataset. + scaler: + The scaler used for mix precision training. + model_avg: + The stored model averaged from the start of training. + tb_writer: + Writer to write log messages to tensorboard. + world_size: + Number of nodes in DDP training. If it is 1, DDP is disabled. + rank: + The rank of the node in DDP training. If no DDP is used, it should + be set to 0. + """ + model.encoder_projector.train() + + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(train_dl): + params.batch_idx_train += 1 + batch_size = len(batch["supervisions"]["text"]) + if batch_idx % params.valid_interval == 0: + logging.info("Computing validation loss") + valid_info = compute_validation_loss( + params=params, + tokenizer=tokenizer, + model=model, + valid_dl=valid_dl, + world_size=world_size, + ) + model.train() + logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") + logging.info( + f"Maximum memory allocated so far is {torch.cuda.max_memory_allocated()//1000000}MB" + ) + if tb_writer is not None: + valid_info.write_summary( + tb_writer, "train/valid_", params.batch_idx_train + ) + if batch_idx != 0: + model.save_checkpoint( + save_dir=params.exp_dir, + tag=f"epoch-{params.cur_epoch}-checkpoint-{batch_idx}", + client_state={}, + exclude_frozen_parameters=True, + ) + + if rank == 0: + convert_zero_checkpoint_to_fp32_state_dict( + params.exp_dir, + f"{params.exp_dir}/epoch-{params.cur_epoch}-checkpoint-{batch_idx}.pt", + tag=f"epoch-{params.cur_epoch}-checkpoint-{batch_idx}", + exclude_frozen_parameters=True, + ) + # save sampler state dict into checkpoint + sampler_state_dict = train_dl.sampler.state_dict() + torch.save( + sampler_state_dict, + f"{params.exp_dir}/epoch-{params.cur_epoch}-checkpoint-{batch_idx}-sampler.pt", + ) + os.system( + f"rm -rf {params.exp_dir}/epoch-{params.cur_epoch}-checkpoint-{batch_idx}" + ) + try: + with torch.amp.autocast("cuda", enabled=params.use_fp16): + loss, loss_info = compute_loss( + params=params, + tokenizer=tokenizer, + model=model, + batch=batch, + is_training=True, + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + + # deepspeed's backward() is different from torch's backward() + # in that it does not accept a loss tensor as input. + # It computes the loss internally. + model.backward(loss) + model.step() + + except: # noqa + display_and_save_batch(batch, params=params) + raise + + if batch_idx % params.log_interval == 0: + try: + cur_lr = scheduler.get_last_lr()[0] + except: # noqa + cur_lr = 0.0 + + logging.info( + f"Epoch {params.cur_epoch}, " + f"batch {batch_idx}, loss[{loss_info}], " + f"tot_loss[{tot_loss}], batch size: {batch_size}, " + f"lr: {cur_lr:.2e}, " + ) + + if tb_writer is not None: + tb_writer.add_scalar( + "train/learning_rate", cur_lr, params.batch_idx_train + ) + + loss_info.write_summary( + tb_writer, "train/current_", params.batch_idx_train + ) + tot_loss.write_summary(tb_writer, "train/tot_", params.batch_idx_train) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + params.train_loss = loss_value + if params.train_loss < params.best_train_loss: + params.best_train_epoch = params.cur_epoch + params.best_train_loss = params.train_loss + + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + + fix_random_seed(params.seed) + + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info(params) + + logging.info("About to create model") + + replace_whisper_encoder_forward() + whisper_model = whisper.load_model(params.speech_encoder_path_or_name, "cpu") + speech_encoder = whisper_model.encoder + speech_encoder_dim = whisper_model.dims.n_audio_state + for name, param in speech_encoder.named_parameters(): + param.requires_grad = False + speech_encoder.eval() + + tokenizer = AutoTokenizer.from_pretrained(params.llm_path_or_name) + + if params.use_flash_attn: + attn_implementation = "flash_attention_2" + torch_dtype = torch.float16 + tokenizer.padding_side = "left" + + else: + attn_implementation = "eager" + torch_dtype = torch.float16 + tokenizer.padding_side = "right" + + llm = AutoModelForCausalLM.from_pretrained( + params.llm_path_or_name, + attn_implementation=attn_implementation, + torch_dtype=torch_dtype, + ) + + if not params.unfreeze_llm: + for name, param in llm.named_parameters(): + param.requires_grad = False + llm.eval() + else: + if params.use_lora: + lora_config = LoraConfig( + r=64, + lora_alpha=16, + target_modules=[ + "q_proj", + "k_proj", + "v_proj", + "o_proj", + "up_proj", + "gate_proj", + "down_proj", + ], + lora_dropout=0.05, + task_type="CAUSAL_LM", + ) + llm = get_peft_model(llm, lora_config) + llm.print_trainable_parameters() + + special_tokens_dict = {"additional_special_tokens": [DEFAULT_SPEECH_TOKEN]} + tokenizer.add_special_tokens(special_tokens_dict) + + llm.config.pad_token_id = tokenizer.pad_token_id + llm.config.default_speech_token_id = tokenizer.convert_tokens_to_ids( + DEFAULT_SPEECH_TOKEN + ) + + encoder_projector = EncoderProjector( + speech_encoder_dim, llm.config.hidden_size, params.encoder_projector_ds_rate + ) + if not params.unfreeze_speech_projector: + for name, param in encoder_projector.named_parameters(): + param.requires_grad = False + encoder_projector.eval() + + if params.enable_speech_output: + # Determine attn_implementation and torch_dtype based on use_flash_attn + if params.use_flash_attn: + attn_implementation = "flash_attention_2" + torch_dtype = torch.float16 # Or torch.bfloat16 if needed/supported + else: + attn_implementation = "eager" + torch_dtype = torch.float16 + if params.dataset_format == "slam_omni": + codec_vocab_size = 4096 + 4 + elif params.dataset_format == "vocalnet": + codec_vocab_size = 6561 + 4 + else: + raise ValueError(f"Unknown dataset format: {params.dataset_format}") + # TODO: modify above vocab size or supress_tokens when decoding + config = Qwen2Config( + vocab_size=codec_vocab_size, + hidden_size=1024, + num_hidden_layers=12, + num_attention_heads=16, + num_key_value_heads=16, + intermediate_size=2048, + max_position_embeddings=4096, + ) + + codec_lm = AutoModelForCausalLM.from_config( + config=config, + attn_implementation=attn_implementation, + torch_dtype=torch_dtype, + ) + + codec_lm.resize_token_embeddings(codec_vocab_size) + codec_lm.vocab_size = codec_vocab_size + codec_lm.config.pad_token_id = codec_vocab_size - 1 + codec_lm.config.eos_token_id = codec_vocab_size - 2 + codec_lm.config.bos_token_id = codec_vocab_size - 3 + codec_lm.config.mask_token_id = codec_vocab_size - 4 + else: + codec_lm = None + + model = SPEECH_LLM( + speech_encoder, + llm, + encoder_projector, + codec_lm, + codec_lm_padding_side="left" if params.use_flash_attn else "right", + ) + + if params.pretrained_model_path: + checkpoint = torch.load(params.pretrained_model_path, map_location="cpu") + missing_keys, unexpected_keys = model.load_state_dict(checkpoint, strict=False) + + num_param = sum([p.numel() for p in model.parameters()]) + logging.info(f"Number of model parameters: {num_param}") + + logging.info("Trainable parameters (excluding model.eval modules):") + for name, param in model.named_parameters(): + if param.requires_grad: + logging.info(f"{name}: {param.shape}") + + if torch.cuda.is_available(): + device = torch.device("cuda", rank) + else: + device = torch.device("cpu") + logging.info(f"Device: {device}") + model.to(device) + + assert params.deepspeed and world_size > 1 + logging.info("Using DeepSpeed") + model, optimizer, _, scheduler = deepspeed.initialize( + args=params, model=model, model_parameters=model.parameters() + ) + + data_module = AsrDataModule(args) + + def remove_short_and_long_utt(c: Cut): + # Keep only utterances with duration between 1 second and 20 seconds + # + # Caution: There is a reason to select 20.0 here. Please see + # ../local/display_manifest_statistics.py + # + # You should use ../local/display_manifest_statistics.py to get + # an utterance duration distribution for your dataset to select + # the threshold + if c.duration < 1.0 or c.duration > 30.0: + # logging.warning( + # f"Exclude cut with ID {c.id} from training. Duration: {c.duration}" + # ) + return False + codec_len = ( + len(c.custom["answer_cosyvoice_speech_token"]) + if "answer_cosyvoice_speech_token" in c.custom + else len(c.custom["speech_token"]) + ) + if codec_len > 2200: + logging.warning( + f"Exclude cut with ID {c.id} from training. Duration: {c.duration}, lenth: {codec_len}" + ) + return False + return True + + if params.dataset_format == "slam_omni": + train_cuts = data_module.train_cuts() + valid_cuts = data_module.dev_cuts() + elif params.dataset_format == "vocalnet": + train_cuts = data_module.train_cuts_en_vocalnet() + valid_cuts = data_module.valid_cuts_en_vocalnet() + else: + raise ValueError(f"Unknown dataset format: {params.dataset_format}") + + train_cuts = train_cuts.filter(remove_short_and_long_utt) + valid_cuts = valid_cuts.filter(remove_short_and_long_utt) + + sampler_state_dict = None + if params.sampler_state_dict_path: + sampler_state_dict = torch.load(params.sampler_state_dict_path) + sampler_state_dict["max_duration"] = params.max_duration + + train_dl = data_module.train_dataloaders( + train_cuts, sampler_state_dict=sampler_state_dict + ) + + valid_dl = data_module.valid_dataloaders(valid_cuts) + + if args.tensorboard and rank == 0: + tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") + else: + tb_writer = None + + logging.info(f"start training from epoch {params.start_epoch}") + for epoch in range(params.start_epoch, params.num_epochs + 1): + + fix_random_seed(params.seed + epoch - 1) + train_dl.sampler.set_epoch(epoch - 1) + + if tb_writer is not None: + tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) + + params.cur_epoch = epoch + + train_one_epoch( + params=params, + tokenizer=tokenizer, + model=model, + optimizer=optimizer, + scheduler=scheduler, + train_dl=train_dl, + valid_dl=valid_dl, + tb_writer=tb_writer, + world_size=world_size, + rank=rank, + ) + + model.save_checkpoint( + save_dir=params.exp_dir, + tag=f"epoch-{params.cur_epoch}", + client_state={}, + exclude_frozen_parameters=True, + ) + if rank == 0: + convert_zero_checkpoint_to_fp32_state_dict( + params.exp_dir, + f"{params.exp_dir}/epoch-{params.cur_epoch}.pt", + tag=f"epoch-{params.cur_epoch}", + exclude_frozen_parameters=True, + ) + # save sampler state dict into checkpoint + sampler_state_dict = train_dl.sampler.state_dict() + torch.save( + sampler_state_dict, + f"{params.exp_dir}/epoch-{params.cur_epoch}-sampler.pt", + ) + + os.system(f"rm -rf {params.exp_dir}/epoch-{params.cur_epoch}") + + logging.info("Done!") + + +def display_and_save_batch( + batch: dict, + params: AttributeDict, +) -> None: + """Display the batch statistics and save the batch into disk. + + Args: + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + params: + Parameters for training. See :func:`get_params`. + """ + from lhotse.utils import uuid4 + + filename = f"{params.exp_dir}/batch-{uuid4()}.pt" + logging.info(f"Saving batch to {filename}") + torch.save(batch, filename) + + features = batch["inputs"] + + logging.info(f"features shape: {features.shape}") + + +def main(): + parser = get_parser() + AsrDataModule.add_arguments(parser) + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + world_size = get_world_size() + rank = get_rank() + + torch.set_num_threads(1) + torch.set_num_interop_threads(1) + run(rank=rank, world_size=world_size, args=args) + + +if __name__ == "__main__": + main() From b20a0d0e35f4d0c83146c29c14cdc473311a2781 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 8 May 2025 19:21:41 -0700 Subject: [PATCH 33/57] add on the fly feature --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 8 +- .../SPEECH2SPEECH/qwen_omni/data_module.py | 94 +++++++++++++------ .../SPEECH2SPEECH/qwen_omni/train.py | 5 +- 3 files changed, 71 insertions(+), 36 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index 58465c448..fcdfdd69f 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -174,13 +174,13 @@ if [ $stage -le 9 ] && [ $stop_stage -ge 9 ]; then --prefix gigaspeech fi - -ngpu=2 +# cd /workspace && ln -s /lustre/fsw/general_sa/yuekaiz/s2s slam && cd - +ngpu=4 exp_dir=./qwen_omni/exp_speech2speech_en if [ $stage -le 10 ] && [ $stop_stage -ge 10 ]; then log "stage 10: Training Speech2Speech Model" torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ - --max-duration 50 \ + --max-duration 150 \ --enable-musan False \ --exp-dir $exp_dir \ --speech-encoder-path-or-name models/large-v2.pt \ @@ -189,6 +189,6 @@ if [ $stage -le 10 ] && [ $stop_stage -ge 10 ]; then --manifest-dir data/fbank \ --deepspeed \ --deepspeed_config ./qwen_omni/ds_config_zero1.json \ - --use-flash-attn True \ + --use-flash-attn True --on-the-fly-feats True \ --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True fi diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py index 7bd0a174a..b0b039416 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py @@ -50,7 +50,6 @@ from torch.utils.data import DataLoader from utils import str2bool - class _SeedWorkers: def __init__(self, seed: int): self.seed = seed @@ -149,7 +148,7 @@ class AsrDataModule: group.add_argument( "--num-workers", type=int, - default=2, + default=4, help="The number of training dataloader workers that " "collect the batches.", ) @@ -262,31 +261,35 @@ class AsrDataModule: logging.info("About to create train dataset") train = K2SpeechRecognitionDataset( - input_strategy=eval(self.args.input_strategy)(), + input_strategy=OnTheFlyFeatures( + WhisperFbank(WhisperFbankConfig(num_filters=80, device="cuda")) + ) + if self.args.on_the_fly_feats + else eval(self.args.input_strategy)(), cut_transforms=transforms, input_transforms=input_transforms, return_cuts=self.args.return_cuts, ) - if self.args.on_the_fly_feats: - # NOTE: the PerturbSpeed transform should be added only if we - # remove it from data prep stage. - # Add on-the-fly speed perturbation; since originally it would - # have increased epoch size by 3, we will apply prob 2/3 and use - # 3x more epochs. - # Speed perturbation probably should come first before - # concatenation, but in principle the transforms order doesn't have - # to be strict (e.g. could be randomized) - # transforms = [PerturbSpeed(factors=[0.9, 1.1], p=2/3)] + transforms # noqa - # Drop feats to be on the safe side. - train = K2SpeechRecognitionDataset( - cut_transforms=transforms, - input_strategy=OnTheFlyFeatures( - WhisperFbank(WhisperFbankConfig(num_filters=80, device="cuda")) - ), - input_transforms=input_transforms, - return_cuts=self.args.return_cuts, - ) + # if self.args.on_the_fly_feats: + # # NOTE: the PerturbSpeed transform should be added only if we + # # remove it from data prep stage. + # # Add on-the-fly speed perturbation; since originally it would + # # have increased epoch size by 3, we will apply prob 2/3 and use + # # 3x more epochs. + # # Speed perturbation probably should come first before + # # concatenation, but in principle the transforms order doesn't have + # # to be strict (e.g. could be randomized) + # # transforms = [PerturbSpeed(factors=[0.9, 1.1], p=2/3)] + transforms # noqa + # # Drop feats to be on the safe side. + # train = K2SpeechRecognitionDataset( + # cut_transforms=transforms, + # input_strategy=OnTheFlyFeatures( + # WhisperFbank(WhisperFbankConfig(num_filters=80, device="cuda")) + # ), + # input_transforms=input_transforms, + # return_cuts=self.args.return_cuts, + # ) if self.args.bucketing_sampler: logging.info("Using DynamicBucketingSampler.") @@ -322,7 +325,7 @@ class AsrDataModule: sampler=train_sampler, batch_size=None, num_workers=self.args.num_workers, - persistent_workers=True, + persistent_workers=True if self.args.num_workers > 0 else False, pin_memory=True, worker_init_fn=worker_init_fn, ) @@ -345,19 +348,26 @@ class AsrDataModule: else eval(self.args.input_strategy)(), return_cuts=self.args.return_cuts, ) - - valid_sampler = DynamicBucketingSampler( - cuts_valid, - max_duration=self.args.max_duration, - shuffle=False, - ) + if self.args.bucketing_sampler: + valid_sampler = DynamicBucketingSampler( + cuts_valid, + max_duration=self.args.max_duration, + shuffle=False, + ) + else: + valid_sampler = SimpleCutSampler( + cuts_valid, + max_duration=self.args.max_duration, + shuffle=False, + ) logging.info("About to create dev dataloader") + valid_num_workers = 1 valid_dl = DataLoader( validate, sampler=valid_sampler, batch_size=None, - num_workers=2, - persistent_workers=False, + num_workers=valid_num_workers, + persistent_workers=True if valid_num_workers > 0 else False, ) return valid_dl @@ -450,3 +460,25 @@ class AsrDataModule: self.args.manifest_dir / "cuts_voice_assistant.00000.jsonl.gz" ) return VoiceAssistant_cuts + # def train_cuts_en_vocalnet(self) -> CutSet: + # logging.info("About to get train cuts") + # VoiceAssistant_cuts = load_manifest_lazy( + # self.args.manifest_dir / "cuts_debug.jsonl.gz" + # ) + # return VoiceAssistant_cuts + + # @lru_cache() + # def valid_cuts_en_vocalnet(self) -> CutSet: + # logging.info("About to get valid cuts") + # VoiceAssistant_cuts = load_manifest_lazy( + # self.args.manifest_dir / "cuts_debug.jsonl.gz" + # ) + # return VoiceAssistant_cuts + + # @lru_cache() + # def test_cuts_en_vocalnet(self) -> CutSet: + # logging.info("About to get test cuts") + # VoiceAssistant_cuts = load_manifest_lazy( + # self.args.manifest_dir / "cuts_debug.jsonl.gz" + # ) + # return VoiceAssistant_cuts \ No newline at end of file diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py index 0b2642bf0..d23d578c6 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py @@ -81,7 +81,10 @@ from utils import ( # filter_uneven_sized_batch, ) DEFAULT_SPEECH_TOKEN = "" - +try: + torch.multiprocessing.set_start_method('spawn') +except RuntimeError: + pass def set_batch_count(model: nn.Module, batch_count: float) -> None: for module in model.modules(): From 89781b9bb185f307bb692ed5ff7629d28a9248a3 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 12 May 2025 10:06:59 +0000 Subject: [PATCH 34/57] add cosyvoice2 decode --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 19 ++ .../SPEECH2SPEECH/qwen_omni/data_module.py | 9 +- .../SPEECH2SPEECH/qwen_omni/decode.py | 177 +++++++++++++----- 3 files changed, 155 insertions(+), 50 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index fcdfdd69f..6d8f54135 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -192,3 +192,22 @@ if [ $stage -le 10 ] && [ $stop_stage -ge 10 ]; then --use-flash-attn True --on-the-fly-feats True \ --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True fi + + +if [ $stage -le 11 ] && [ $stop_stage -ge 11 ]; then + log "stage 11: Decoding EN, only support batch_size=1 for now." + exp_dir=./qwen_omni/exp_speech2speech_en_continue + # cd $exp_dir && ln -s ../../models/qwen-omni-like-speech2speech-belle-1.4M/pytorch_model.bin epoch-999.pt && cd - + python3 ./qwen_omni/decode.py \ + --max-duration 1 \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/large-v2.pt \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ + --epoch 997 --avg 1 \ + --manifest-dir data/fbank \ + --use-flash-attn True \ + --method e2e-epoch4_speech2speech \ + --enable-speech-output True \ + --token2wav-path /workspace/CosyVoice2-0.5B \ + --use-lora True +fi diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py index b0b039416..b02c9f4bf 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py @@ -47,9 +47,9 @@ from lhotse.dataset.input_strategies import ( # noqa F401 For AudioSamples from lhotse.utils import fix_random_seed from speech_dataset import K2SpeechRecognitionDataset from torch.utils.data import DataLoader - from utils import str2bool + class _SeedWorkers: def __init__(self, seed: int): self.seed = seed @@ -457,9 +457,10 @@ class AsrDataModule: def test_cuts_en_vocalnet(self) -> CutSet: logging.info("About to get test cuts") VoiceAssistant_cuts = load_manifest_lazy( - self.args.manifest_dir / "cuts_voice_assistant.00000.jsonl.gz" + self.args.manifest_dir / "cuts_voice_assistant_small.00000.jsonl.gz" ) - return VoiceAssistant_cuts + return {"test": VoiceAssistant_cuts} + # def train_cuts_en_vocalnet(self) -> CutSet: # logging.info("About to get train cuts") # VoiceAssistant_cuts = load_manifest_lazy( @@ -481,4 +482,4 @@ class AsrDataModule: # VoiceAssistant_cuts = load_manifest_lazy( # self.args.manifest_dir / "cuts_debug.jsonl.gz" # ) - # return VoiceAssistant_cuts \ No newline at end of file + # return VoiceAssistant_cuts diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py index e4dccf081..793b32112 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py @@ -55,7 +55,8 @@ import torch import torch.nn as nn import transformers import whisper -from cosyvoice.cli.cosyvoice import CosyVoice +from cosyvoice.cli.cosyvoice import CosyVoice, CosyVoice2 +from cosyvoice.utils.file_utils import load_wav from data_module import AsrDataModule from lhotse.cut import Cut from model import SPEECH_LLM, EncoderProjector @@ -75,6 +76,57 @@ from icefall.utils import ( sys.path.append("/workspace/CosyVoice/third_party/Matcha-TTS") +def audio_decode_cosyvoice2( + audio_tokens, prompt_text, prompt_speech_path, codec_decoder +): + """ + Generate audio from tokens with optional tone and prompt embedding. + + Args: + audio_tokens (list): List of audio tokens to be processed. + model_config: Configuration object containing vocab settings. + codec_decoder: Codec decoder for generating audio. + tone_dir (str): The tone directory or setting. + audio_prompt_path (str, optional): Path to the audio prompt file. Required when tone_dir is not "default_tone". + code_layer (int, optional): Number of code layers. Defaults to 1. + num_latency_tokens (int, optional): Number of latency tokens to ignore. Defaults to 0. + speed (float, optional): Speed factor for audio generation. Defaults to 1.0. + + Returns: + torch.Tensor: Generated audio waveform. + """ + prompt_speech_16k = load_wav(prompt_speech_path, 16000) + model_inputs_dict = codec_decoder.frontend.frontend_zero_shot( + "empty", prompt_text, prompt_speech_16k, 24000 + ) + tts_mel, _ = codec_decoder.model.flow.inference( + token=audio_tokens.to(codec_decoder.model.device), + token_len=torch.tensor([audio_tokens.shape[1]], dtype=torch.int32).to( + codec_decoder.model.device + ), + prompt_token=model_inputs_dict["flow_prompt_speech_token"].to( + codec_decoder.model.device + ), + prompt_token_len=torch.tensor( + [model_inputs_dict["flow_prompt_speech_token_len"]], dtype=torch.int32 + ).to(codec_decoder.model.device), + prompt_feat=model_inputs_dict["prompt_speech_feat"].to( + codec_decoder.model.device + ), + prompt_feat_len=model_inputs_dict["prompt_speech_feat_len"].to( + codec_decoder.model.device + ), + embedding=model_inputs_dict["flow_embedding"].to(codec_decoder.model.device), + finalize=True, + ) + + audio_hat, _ = codec_decoder.model.hift.inference( + speech_feat=tts_mel, cache_source=torch.zeros(1, 1, 0) + ) + + return audio_hat + + def audio_decode_cosyvoice(audio_tokens, codec_decoder): """ Generate audio from tokens with optional tone and prompt embedding. @@ -180,7 +232,9 @@ def get_model(params, device): attn_implementation = "eager" torch_dtype = torch.float16 - codec_vocab_size = 4096 + 4 + # TODO: FIX ME + # codec_vocab_size = 4096 + 4 + codec_vocab_size = 6561 + 4 config = Qwen2Config( vocab_size=codec_vocab_size, hidden_size=1024, @@ -346,6 +400,20 @@ def get_parser(): help="The path to the token2wav model", ) + parser.add_argument( + "--prompt_text", + type=str, + default="Romeo and Juliet might be the most famous act of William Shakespeare.", + help="The prompt text", + ) + + parser.add_argument( + "--prompt_speech_path", + type=str, + default="./assets/common_voice_en_2586258.wav", + help="The path to the prompt speech", + ) + add_model_arguments(parser) return parser @@ -437,36 +505,42 @@ def decode_one_batch( 2, ) - chat_rounds = [cut.custom["round"] for cut in batch["supervisions"]["cut"]] + # chat_rounds = [cut.custom["round"] for cut in batch["supervisions"]["cut"]] - questions_with_history = [ - cut.custom["question"] for cut in batch["supervisions"]["cut"] - ] - history_contexts = [ - question.rsplit(":", 1)[0].strip() for question in questions_with_history - ] - last_questions = [ - question.split(": ")[-1].strip() for question in questions_with_history - ] + # questions_with_history = [ + # cut.custom["question"] for cut in batch["supervisions"]["cut"] + # ] + # history_contexts = [ + # question.rsplit(":", 1)[0].strip() for question in questions_with_history + # ] + # last_questions = [ + # question.split(": ")[-1].strip() for question in questions_with_history + # ] + # messages = [] + # for i, total_round in enumerate(chat_rounds): + # message = [] + # if total_round > 1: + # history_question_answer = history_contexts[i].split("USER:") + # history_question_answer = [item for item in history_question_answer if item] + # for j in range(total_round - 1): + # question_answer = history_question_answer[j].split("ASSISTANT:") + # message += [ + # {"role": "user", "content": question_answer[0].strip()}, + # {"role": "assistant", "content": question_answer[1].strip()}, + # ] + # message += [ + # {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, + # {"role": "assistant", "content": ""}, + # ] + # print(f"message: {message}, batch_size {len(chat_rounds)}") + # messages.append(message) messages = [] - for i, total_round in enumerate(chat_rounds): - message = [] - if total_round > 1: - history_question_answer = history_contexts[i].split("USER:") - history_question_answer = [item for item in history_question_answer if item] - for j in range(total_round - 1): - question_answer = history_question_answer[j].split("ASSISTANT:") - message += [ - {"role": "user", "content": question_answer[0].strip()}, - {"role": "assistant", "content": question_answer[1].strip()}, - ] - message += [ + for i in range(len(batch["supervisions"]["cut"])): + message = [ {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, {"role": "assistant", "content": ""}, ] - print(f"message: {message}, batch_size {len(chat_rounds)}") messages.append(message) - input_ids, attention_mask = preprocess(messages, tokenizer) if params.enable_speech_output: generated_ids, generated_speech_output = model.decode_with_speech_output( @@ -478,10 +552,19 @@ def decode_one_batch( ] # WAR: only support batch = 1 for now for cut_id, audio_tokens in zip(cut_ids, generated_speech_output): speech_file_name = params.log_dir / f"{cut_id}.wav" - audio_tokens = [token for token in audio_tokens if token < 4096] + # audio_tokens = [token for token in audio_tokens if token < 4096] audio_tokens = torch.tensor(audio_tokens, dtype=torch.int32).unsqueeze(0) - audio_hat = audio_decode_cosyvoice(audio_tokens, token2wav_model) - sf.write(speech_file_name, audio_hat.squeeze(0).cpu().numpy(), 22050) + if "CosyVoice2" in params.token2wav_path: + audio_hat = audio_decode_cosyvoice2( + audio_tokens, + params.prompt_text, + params.prompt_speech_path, + token2wav_model, + ) + sf.write(speech_file_name, audio_hat.squeeze(0).cpu().numpy(), 24000) + else: + audio_hat = audio_decode_cosyvoice(audio_tokens, token2wav_model) + sf.write(speech_file_name, audio_hat.squeeze(0).cpu().numpy(), 22050) else: generated_ids = model.decode( feature, input_ids.to(device, dtype=torch.long), attention_mask.to(device) @@ -521,18 +604,14 @@ def decode_dataset( results = defaultdict(list) for batch_idx, batch in enumerate(dl): - answers = batch["supervisions"]["text"] - questions_with_history = [ - cut.custom["question"] for cut in batch["supervisions"]["cut"] - ] - answer_cosyvoice_speech_token = [ - cut.custom["answer_cosyvoice_speech_token"] - for cut in batch["supervisions"]["cut"] - ] - texts = [ - question.split(": ")[-1].strip() - for question in questions_with_history - ] + texts = batch["supervisions"]["text"] + # questions_with_history = [ + # cut.custom["question"] for cut in batch["supervisions"]["cut"] + # ] + # texts = [ + # question.split(": ")[-1].strip() + # for question in questions_with_history + # ] cut_ids = [cut.id for cut in batch["supervisions"]["cut"]] hyps_dict = decode_one_batch( @@ -636,9 +715,14 @@ def main(): logging.info(f"device: {device}") model, tokenizer = get_model(params, device) - token2wav_model = CosyVoice( - params.token2wav_path, load_jit=False, load_trt=False, fp16=False - ) + if "CosyVoice2" in params.token2wav_path: + token2wav_model = CosyVoice2( + params.token2wav_path, load_jit=False, load_trt=False, fp16=False + ) + else: + token2wav_model = CosyVoice( + params.token2wav_path, load_jit=False, load_trt=False, fp16=False + ) num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") @@ -656,8 +740,9 @@ def main(): return False return True - test_sets_cuts = data_module.test_cuts() - + # TODO: FIX ME + # test_sets_cuts = data_module.test_cuts() + test_sets_cuts = data_module.test_cuts_en_vocalnet() test_sets = test_sets_cuts.keys() test_dls = [ data_module.test_dataloaders(test_sets_cuts[cuts_name].filter(remove_long_utt)) From cbf3af31fd2144bea91c66fd303f6f213a6740e7 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 13 May 2025 05:37:11 +0000 Subject: [PATCH 35/57] add voicebench eval --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 46 ++++ .../SPEECH2SPEECH/qwen_omni/client.py | 142 ++++++++++ .../SPEECH2SPEECH/qwen_omni/data_module.py | 9 + .../SPEECH2SPEECH/qwen_omni/decode_dist.py | 256 ++++++++++++++++++ .../SPEECH2SPEECH/qwen_omni/server.py | 103 +++++++ .../SPEECH2SPEECH/qwen_omni/web_demo.py | 3 +- 6 files changed, 558 insertions(+), 1 deletion(-) create mode 100644 egs/speech_llm/SPEECH2SPEECH/qwen_omni/client.py create mode 100644 egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode_dist.py create mode 100644 egs/speech_llm/SPEECH2SPEECH/qwen_omni/server.py diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index 6d8f54135..25cd79810 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -211,3 +211,49 @@ if [ $stage -le 11 ] && [ $stop_stage -ge 11 ]; then --token2wav-path /workspace/CosyVoice2-0.5B \ --use-lora True fi + + +if [ $stage -le 12 ] && [ $stop_stage -ge 12 ]; then + log "stage 12: Decoding EN voicebench" + exp_dir=./qwen_omni/exp_speech2speech_en_continue + torchrun --nproc_per_node=2 \ + ./qwen_omni/decode_dist.py \ + --output-dir $exp_dir/log_voicebench \ + --speech-encoder-path-or-name models/large-v2.pt \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ + --use-flash-attn True \ + --enable-speech-output True \ + --checkpoint-path $exp_dir/epoch-10-checkpoint-40000.pt/pytorch_model.bin \ + --use-lora True --subset-name openbookqa --split-name test +fi + + +if [ $stage -le 13 ] && [ $stop_stage -ge 13 ]; then + log "stage 13: Server" + exp_dir=./qwen_omni/exp_speech2speech_en_continue + python3 ./qwen_omni/server.py \ + --speech-encoder-path-or-name models/large-v2.pt \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ + --checkpoint-path $exp_dir/epoch-10-checkpoint-40000.pt/pytorch_model.bin \ + --use-flash-attn True \ + --enable-speech-output True \ + --use-lora True +fi + +if [ $stage -le 14 ] && [ $stop_stage -ge 14 ]; then + log "stage 14: Client" + # datasets=(alpacaeval wildvoice mmsu advbench bbh ifeval commoneval obqa sd-qa) + datasets=(alpacaeval_full wildvoice mmsu advbench bbh ifeval sd-qa) + for dataset in ${datasets[@]}; do + # sd-qa should use usa split + if [ $dataset == "sd-qa" ]; then + split_name="usa" + else + split_name="test" + fi + echo $dataset $split_name + python3 ./qwen_omni/client.py \ + --subset-name $dataset --split-name $split_name \ + --output-dir test_result + done +fi diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/client.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/client.py new file mode 100644 index 000000000..822d7d709 --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/client.py @@ -0,0 +1,142 @@ +# client.py +import argparse +import json +import os + +import requests +from datasets import load_dataset +from tqdm import tqdm + + +def get_args(): + parser = argparse.ArgumentParser(description="Speech-to-Text Client") + parser.add_argument( + "--server-url", + type=str, + default="http://localhost:8000", + help="URL of the FastAPI server", + ) + parser.add_argument( + "--dataset-name", + type=str, + default="hlt-lab/voicebench", + help="Hugging Face dataset name", + ) + parser.add_argument( + "--subset-name", + type=str, + default="commoneval", # Adjust as needed + help="Dataset subset name", + ) + parser.add_argument( + "--split-name", + type=str, + default="test", # Adjust as needed + help="Dataset split name", + ) + parser.add_argument( + "--output-dir", required=True, type=str, help="Directory to save results" + ) + args = parser.parse_args() + return args + + +def main(): + args = get_args() + os.makedirs(args.output_dir, exist_ok=True) + output_filename = os.path.join( + args.output_dir, + f"{args.subset_name}-{args.split_name}.jsonl", + ) + server_decode_url = f"{args.server_url}/decode" + + print("Loading dataset...") + + dataset = load_dataset( + args.dataset_name, + args.subset_name, + split=args.split_name, + trust_remote_code=True, + ) + + print(f"Dataset loaded with {len(dataset)} samples.") + print(f"Sending requests to {server_decode_url}...") + print(f"Saving results to {output_filename}") + + with open(output_filename, "w", encoding="utf-8") as outfile: + # Iterate directly over the dataset + progress_bar = tqdm(dataset, desc="Processing", unit="samples") + for item in progress_bar: + + audio_info = item.get("audio") + assert ( + audio_info["sampling_rate"] == 16000 + ), f"Sampling rate is {audio_info['sampling_rate']}, not 16khz" + + # Prepare data for JSON serialization and server request + audio_array = audio_info["array"].tolist() # Convert numpy array to list + result_dict = {} + for key in item.keys(): + if key != "audio": + # Ensure other fields are JSON serializable + try: + # Attempt to serialize to catch issues early (optional) + json.dumps(item[key]) + result_dict[key] = item[key] + except (TypeError, OverflowError): + print( + f"Warning: Converting non-serializable key '{key}' to string." + ) + result_dict[key] = str( + item[key] + ) # Convert problematic types to string + + payload = { + "audio": audio_array, + "sampling_rate": 16000, + } + + try: + response = requests.post(server_decode_url, json=payload, timeout=60) + response.raise_for_status() + server_response = response.json() + decoded_text = server_response.get("text", "") + + # Add the response to the result dictionary + result_dict["response"] = decoded_text + print(result_dict) + # Write result to JSONL file + json.dump(result_dict, outfile, ensure_ascii=False) + outfile.write("\n") + + except requests.exceptions.RequestException as e: + print(f"\nError sending request for an item: {e}") + error_entry = result_dict # Use the data prepared so far + error_entry["error"] = str(e) + error_entry["response"] = "" + json.dump(error_entry, outfile, ensure_ascii=False) + outfile.write("\n") + except json.JSONDecodeError: + print("\nError decoding server response for an item.") + error_entry = result_dict + error_entry["error"] = "Invalid JSON response from server" + error_entry["response"] = "" + json.dump(error_entry, outfile, ensure_ascii=False) + outfile.write("\n") + except Exception as e: + print(f"\nUnexpected error processing an item: {e}") + error_entry = result_dict + error_entry["error"] = f"Unexpected error: {str(e)}" + error_entry["response"] = "" + json.dump(error_entry, outfile, ensure_ascii=False) + outfile.write("\n") + + # Progress bar updates automatically by iterating over tqdm(dataset) + + # No need to close progress_bar explicitly when iterating directly + + print("Processing finished.") + + +if __name__ == "__main__": + main() diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py index b02c9f4bf..bc75bccd6 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py @@ -461,6 +461,15 @@ class AsrDataModule: ) return {"test": VoiceAssistant_cuts} + def test_cuts_voicebench( + self, + ) -> CutSet: + logging.info("About to get test cuts") + VoiceAssistant_cuts = load_manifest_lazy( + self.args.manifest_dir / "cuts_voice_assistant_small.00000.jsonl.gz" + ) + return {"test": VoiceAssistant_cuts} + # def train_cuts_en_vocalnet(self) -> CutSet: # logging.info("About to get train cuts") # VoiceAssistant_cuts = load_manifest_lazy( diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode_dist.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode_dist.py new file mode 100644 index 000000000..dd69fce10 --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode_dist.py @@ -0,0 +1,256 @@ +# Copyright (c) 2024 Tsinghua Univ. (authors: Xingchen Song) +# 2025 (authors: Yuekai Zhang) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from https://github.com/xingchensong/S3Tokenizer/blob/main/s3tokenizer/cli.py +""" Example Usage +split=test_zh +llm_path=f5-tts/exp_zh/checkpoint-805000 +huggingface-cli download --local-dir f5-tts-small-wenetspeech4tts-basic yuekai/f5-tts-semantic-token-small-wenetspeech4tts-basic +model_path=f5-tts-small-wenetspeech4tts-basic/epoch-10-avg-5.pt +huggingface-cli download nvidia/bigvgan_v2_24khz_100band_256x --local-dir ./bigvgan_v2_24khz_100band_256x +vocoder=./bigvgan_v2_24khz_100band_256x +torchrun --nproc_per_node=2 \ + f5-tts/infer_dist.py \ + --output_dir $output_dir \ + --batch_size 1 \ + --num_workers 2 \ + --llm-model-name-or-path $llm_path \ + --flow-matching-model-path $model_path \ + --decoder-dim 768 --nhead 12 --num-decoder-layers 18 \ + --use-cosyvoice-semantic-token True \ + --vocoder-dir $vocoder \ + --split-name $split -top-k 50 -top-p 0.95 -temperature 0.8 \ + --tokenizer-dir Qwen/Qwen2.5-0.5B-Instruct +""" + +import argparse +import json +import os +from pathlib import Path + +import torch +import torch.distributed as dist +import torch.nn.functional as F +import whisper +from datasets import load_dataset +from torch.utils.data import DataLoader, Dataset, DistributedSampler +from tqdm import tqdm +from train import DEFAULT_SPEECH_TOKEN, add_model_arguments +from transformers import AutoTokenizer +from web_demo import get_model +from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward + +# https://github.com/FunAudioLLM/CosyVoice/tree/main/third_party +# sys.path.append("/workspace/CosyVoice/third_party/Matcha-TTS") +try: + torch.multiprocessing.set_start_method("spawn") +except RuntimeError: + pass + + +def get_args(): + parser = argparse.ArgumentParser(description="extract speech code") + parser.add_argument( + "--split-name", + type=str, + default="test", + help="huggingface dataset split name", + ) + parser.add_argument( + "--subset-name", + type=str, + default="commoneval", + help="subset name", + ) + parser.add_argument( + "--output-dir", required=True, type=str, help="dir to save result" + ) + parser.add_argument( + "--batch-size", + type=int, + default=1, + help="batch size (per-device) for inference", + ) + parser.add_argument( + "--num-workers", type=int, default=2, help="workers for dataloader" + ) + parser.add_argument( + "--prefetch", type=int, default=2, help="prefetch for dataloader" + ) + parser.add_argument( + "--checkpoint-path", + type=str, + default=None, + help="Checkpoint name or path, default to %(default)r", + ) + # parser.add_argument( + # "--top-k", + # type=int, + # default=50, + # help="top k for sampling", + # ) + # parser.add_argument( + # "--top-p", + # type=float, + # default=0.95, + # help="top p for sampling", + # ) + # parser.add_argument( + # "--temperature", + # type=float, + # default=0.8, + # help="temperature for sampling", + # ) + add_model_arguments(parser) + args = parser.parse_args() + return args + + +def init_distributed(): + world_size = int(os.environ.get("WORLD_SIZE", 1)) + local_rank = int(os.environ.get("LOCAL_RANK", 0)) + rank = int(os.environ.get("RANK", 0)) + print( + "Inference on multiple gpus, this gpu {}".format(local_rank) + + ", rank {}, world_size {}".format(rank, world_size) + ) + torch.cuda.set_device(local_rank) + dist.init_process_group("nccl") + return world_size, local_rank, rank + + +def preprocess( + messages, + tokenizer, +): + """Preprocesses the data for supervised fine-tuning.""" + texts = [] + TEMPLATE = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if loop.last %}{{''}}{% else %}{{ '<|im_end|>\n' }}{% endif %}{% endfor %}" + for i, msg in enumerate(messages): + texts.append( + tokenizer.apply_chat_template( + msg, + tokenize=True, + add_generation_prompt=False, + chat_template=TEMPLATE, + padding="longest", + truncation=False, + ) + ) + max_len_texts = max([len(text) for text in texts]) + if tokenizer.padding_side == "right": + texts = [ + text + [tokenizer.pad_token_id] * (max_len_texts - len(text)) + for text in texts + ] + else: + texts = [ + [tokenizer.pad_token_id] * (max_len_texts - len(text)) + text + for text in texts + ] + + input_ids = torch.tensor(texts, dtype=torch.int) + + attention_mask = input_ids.ne(tokenizer.pad_token_id) + + return input_ids, attention_mask + + +def custom_collate(batch): + assert len(batch) == 1 + audio = batch[0]["audio"] + assert audio["sampling_rate"] == 16000 + result = {"audio": audio["array"]} + for keys in batch[0].keys(): + if keys != "audio": + result[keys] = batch[0][keys] + return result + + +def main(): + args = get_args() + os.makedirs(args.output_dir, exist_ok=True) + + assert torch.cuda.is_available() + world_size, local_rank, rank = init_distributed() + device = torch.device(f"cuda:{local_rank}") + + dataset = load_dataset( + "hlt-lab/voicebench", + args.subset_name, + split=args.split_name, + trust_remote_code=True, + ) + + model, tokenizer = get_model(args) + # tokenizer = AutoTokenizer.from_pretrained(args.llm_path_or_name) + sampler = DistributedSampler(dataset, num_replicas=world_size, rank=rank) + + dataloader = DataLoader( + dataset, + batch_size=args.batch_size, + sampler=sampler, + shuffle=False, + num_workers=args.num_workers, + prefetch_factor=args.prefetch, + collate_fn=custom_collate, + ) + + total_steps = len(dataset) + + if rank == 0: + progress_bar = tqdm(total=total_steps, desc="Processing", unit="wavs") + + message = [ + {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, + {"role": "assistant", "content": ""}, + ] + input_ids, attention_mask = preprocess([message], tokenizer) + results_jsonl_file = open( + os.path.join( + args.output_dir, + f"results-{args.subset_name}-{args.split_name}-{rank}-audio.jsonl", + ), + "w", + ) + for batch in dataloader: + audio = batch["audio"] + audio = torch.from_numpy(audio).to(device).to(torch.float32) + fbank = whisper.log_mel_spectrogram(audio, device=device) + fbank = fbank.unsqueeze(0) + generated_ids = model.decode( + fbank, input_ids.to(device, dtype=torch.long), attention_mask.to(device) + ) + hyps = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) + + result_dict = {} + for key in batch.keys(): + if key != "audio": + result_dict[key] = batch[key] + result_dict["response"] = hyps[0] + json.dump(result_dict, results_jsonl_file) + results_jsonl_file.write("\n") + + if rank == 0: + progress_bar.update(world_size * args.batch_size) + + if rank == 0: + progress_bar.close() + + dist.barrier() + dist.destroy_process_group() + + +if __name__ == "__main__": + main() diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/server.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/server.py new file mode 100644 index 000000000..2f06b923a --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/server.py @@ -0,0 +1,103 @@ +# server.py +import argparse +import os +from typing import List + +import torch +import uvicorn +import whisper +from fastapi import FastAPI, HTTPException +from pydantic import BaseModel +from train import DEFAULT_SPEECH_TOKEN, add_model_arguments +from transformers import AutoTokenizer +from web_demo import get_model + + +def get_args(): + parser = argparse.ArgumentParser(description="extract speech code") + parser.add_argument( + "--checkpoint-path", + type=str, + default=None, + help="Checkpoint name or path, default to %(default)r", + ) + add_model_arguments(parser) + args = parser.parse_args() + return args + + +class SpeechRequest(BaseModel): + audio: List[float] # Expecting audio as a list of floats (raw waveform) + sampling_rate: int = 16000 + + +class TextResponse(BaseModel): + text: str + + +def preprocess_prompt(tokenizer): + """Preprocesses the prompt template.""" + texts = [ + tokenizer.apply_chat_template( + message, # Using the hardcoded message + tokenize=True, + add_generation_prompt=False, # Important for generation + chat_template=TEMPLATE, + padding=False, # No padding needed for single prompt + truncation=False, + ) + ] + input_ids = torch.tensor(texts, dtype=torch.long) + attention_mask = torch.ones_like( + input_ids, dtype=torch.bool + ) # Mask is all True for the prompt + return input_ids, attention_mask + + +args = get_args() +model, tokenizer = get_model(args) +app = FastAPI() + +device = torch.device("cuda") +message = [ + {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, + {"role": "assistant", "content": ""}, +] +TEMPLATE = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if loop.last %}{{''}}{% else %}{{ '<|im_end|>\n' }}{% endif %}{% endfor %}" +prompt_input_ids, prompt_attention_mask = preprocess_prompt(tokenizer) +prompt_input_ids = prompt_input_ids.to(device) +prompt_attention_mask = prompt_attention_mask.to(device) + + +@app.post("/decode", response_model=TextResponse) +async def decode_speech(request: SpeechRequest): + """ + Receives audio waveform, processes it, and returns the decoded text. + """ + if request.sampling_rate != 16000: + raise HTTPException( + status_code=400, detail="Only 16kHz sampling rate is supported." + ) + + try: + audio_tensor = torch.tensor(request.audio, dtype=torch.float32).to(device) + fbank = whisper.log_mel_spectrogram(audio_tensor, device=device, n_mels=80) + fbank = fbank.unsqueeze(0) + + with torch.no_grad(): + generated_ids = model.decode(fbank, prompt_input_ids, prompt_attention_mask) + + hyps = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) + + response_text = hyps[0] if hyps else "" + + return TextResponse(text=response_text) + + except Exception as e: + print(f"Error during processing: {e}") + raise HTTPException(status_code=500, detail=f"Internal server error: {e}") + + +if __name__ == "__main__": + print("Starting server...") + uvicorn.run(app, host="0.0.0.0", port=8000) diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/web_demo.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/web_demo.py index e33d2437d..1ad05b0a6 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/web_demo.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/web_demo.py @@ -74,7 +74,8 @@ def get_model(params, device="cuda"): speech_encoder_dim, llm.config.hidden_size, params.encoder_projector_ds_rate ) - codec_vocab_size = 4096 + 4 + # codec_vocab_size = 4096 + 4 + codec_vocab_size = 6561 + 4 config = Qwen2Config( vocab_size=codec_vocab_size, hidden_size=1024, From e65725810ce47a2c0b3933235d337ffb0f9f5b67 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 13 May 2025 09:13:12 +0000 Subject: [PATCH 36/57] fix mmsu --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 3 +-- .../SPEECH2SPEECH/qwen_omni/client.py | 26 ++++++++++++------- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index 25cd79810..c974ee88f 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -242,8 +242,7 @@ fi if [ $stage -le 14 ] && [ $stop_stage -ge 14 ]; then log "stage 14: Client" - # datasets=(alpacaeval wildvoice mmsu advbench bbh ifeval commoneval obqa sd-qa) - datasets=(alpacaeval_full wildvoice mmsu advbench bbh ifeval sd-qa) + datasets=(alpacaeval wildvoice mmsu advbench bbh ifeval commoneval obqa sd-qa) for dataset in ${datasets[@]}; do # sd-qa should use usa split if [ $dataset == "sd-qa" ]; then diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/client.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/client.py index 822d7d709..05c363979 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/client.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/client.py @@ -4,7 +4,7 @@ import json import os import requests -from datasets import load_dataset +from datasets import concatenate_datasets, load_dataset from tqdm import tqdm @@ -31,7 +31,7 @@ def get_args(): parser.add_argument( "--split-name", type=str, - default="test", # Adjust as needed + default=None, # Adjust as needed help="Dataset split name", ) parser.add_argument( @@ -51,13 +51,21 @@ def main(): server_decode_url = f"{args.server_url}/decode" print("Loading dataset...") - - dataset = load_dataset( - args.dataset_name, - args.subset_name, - split=args.split_name, - trust_remote_code=True, - ) + if args.subset_name != "mmsu": + dataset = load_dataset( + args.dataset_name, + args.subset_name, + split=args.split_name, + trust_remote_code=True, + ) + else: + # load all splits and concatenate them + dataset = load_dataset( + args.dataset_name, + args.subset_name, + trust_remote_code=True, + ) + dataset = concatenate_datasets([dataset[subset] for subset in dataset]) print(f"Dataset loaded with {len(dataset)} samples.") print(f"Sending requests to {server_decode_url}...") From f81363d3243731653cc94dfc303f04d1b09e31ac Mon Sep 17 00:00:00 2001 From: root Date: Thu, 15 May 2025 14:16:51 +0000 Subject: [PATCH 37/57] add speech continuation pretraining --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 75 ++++- .../SPEECH2SPEECH/qwen_omni/data_module.py | 306 ++++++++++++++---- .../SPEECH2SPEECH/qwen_omni/requirements.txt | 1 + .../SPEECH2SPEECH/qwen_omni/train.py | 104 +++--- 4 files changed, 391 insertions(+), 95 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index c974ee88f..fd8070691 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -122,7 +122,7 @@ if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then fi if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then - log "stage 1: Compute fbank feature from huggingface" + log "stage 6: Compute fbank feature from huggingface" # CUDA_VISIBLE_DEVICES=0 python3 local/compute_whisper_fbank.py \ # --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ # --out-dir data/fbank_voice_assistant \ @@ -161,10 +161,7 @@ if [ $stage -le 8 ] && [ $stop_stage -ge 8 ]; then --subset test --split test \ --audio-key audio --text-key text \ --prefix gigaspeech -fi -if [ $stage -le 9 ] && [ $stop_stage -ge 9 ]; then - log "stage 9: Compute fbank feature from huggingface" CUDA_VISIBLE_DEVICES=0 python3 local/compute_whisper_fbank.py \ --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb True \ --out-dir data/fbank_gigaspeech \ @@ -195,7 +192,7 @@ fi if [ $stage -le 11 ] && [ $stop_stage -ge 11 ]; then - log "stage 11: Decoding EN, only support batch_size=1 for now." + log "stage 11: Decoding EN, val set only support batch_size=1 for now." exp_dir=./qwen_omni/exp_speech2speech_en_continue # cd $exp_dir && ln -s ../../models/qwen-omni-like-speech2speech-belle-1.4M/pytorch_model.bin epoch-999.pt && cd - python3 ./qwen_omni/decode.py \ @@ -256,3 +253,71 @@ if [ $stage -le 14 ] && [ $stop_stage -ge 14 ]; then --output-dir test_result done fi + + +if [ $stage -le 15 ] && [ $stop_stage -ge 15 ]; then + log "stage 15: Training Speech2Speech Model, adaptor only" + exp_dir=./qwen_omni/exp_speech2text + ngpu=2 + torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ + --max-duration 600 \ + --enable-musan False \ + --audio-key audio --text-key continuation \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/large-v2.pt \ + --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ + --on-the-fly-feats True \ + --deepspeed \ + --deepspeed_config ./qwen_omni/ds_config_zero1.json \ + --use-flash-attn True \ + --dataset-format speech_continuation \ + --start-epoch 2 --pretrained-model-path $exp_dir/epoch-1/pytorch_model.bin \ + --use-lora False --unfreeze-llm False --unfreeze-speech-projector True --enable-speech-output False +fi + +if [ $stage -le 16 ] && [ $stop_stage -ge 16 ]; then + log "stage 16: Training Speech2Speech Model, adaptor only" + exp_dir=./qwen_omni/exp_speech2text + ngpu=4 + + latest_checkpoint_step=-1 + # Check if exp_dir exists and is a directory + if [ -d "$exp_dir" ]; then + # List directories matching checkpoint-* and find the one with the largest step number + for checkpoint_dir in $(ls -d $exp_dir/checkpoint-*/ 2>/dev/null | sort -V); do + checkpoint_name=$(basename "$checkpoint_dir") # e.g., checkpoint-1000 + # Extract step number using parameter expansion + current_step=${checkpoint_name#checkpoint-} + # Ensure current_step is a number + if [[ "$current_step" =~ ^[0-9]+$ ]] && [ "$current_step" -gt "$latest_checkpoint_step" ]; then + latest_checkpoint_step=$current_step + fi + done + fi + + train_cmd_args="--max-duration 1200 \ + --enable-musan False \ + --audio-key audio --text-key continuation \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/large-v2.pt \ + --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ + --on-the-fly-feats True \ + --deepspeed \ + --huggingface-dataset-path-or-name /lustre/fsw/general_sa/yuekaiz/s2s \ + --deepspeed_config ./qwen_omni/ds_config_zero1.json \ + --use-flash-attn True \ + --dataset-format speech_continuation \ + --use-lora False --unfreeze-llm False --unfreeze-speech-projector True --enable-speech-output False" + + if [ "$latest_checkpoint_step" -ge 0 ]; then + log "Continuing training from checkpoint-$latest_checkpoint_step" + step=$latest_checkpoint_step + train_cmd_args="$train_cmd_args --pretrained-model-path $exp_dir/checkpoint-${step}/pytorch_model.bin --sampler-state-dict-path $exp_dir/checkpoint-${step}/sampler.pt" + else + log "Starting training from scratch as no checkpoint was found in $exp_dir" + # No pretrained model or sampler state dict needed for the first run + fi + + torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ + $train_cmd_args +fi diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py index bc75bccd6..1f35f9b84 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py @@ -24,7 +24,7 @@ from pathlib import Path from typing import Any, Dict, Optional import torch -from datasets import load_dataset +from datasets import interleave_datasets, load_dataset from lhotse import ( CutSet, WhisperFbank, @@ -36,6 +36,7 @@ from lhotse.dataset import ( # noqa F401 for PrecomputedFeatures CutConcatenate, CutMix, DynamicBucketingSampler, + PerturbSpeed, PrecomputedFeatures, SimpleCutSampler, SpecAugment, @@ -47,7 +48,7 @@ from lhotse.dataset.input_strategies import ( # noqa F401 For AudioSamples from lhotse.utils import fix_random_seed from speech_dataset import K2SpeechRecognitionDataset from torch.utils.data import DataLoader -from utils import str2bool +from utils import get_rank, str2bool class _SeedWorkers: @@ -123,6 +124,14 @@ class AsrDataModule: "extraction. Will drop existing precomputed feature manifests " "if available.", ) + group.add_argument( + "--on-the-fly-speed-perturb", + type=str2bool, + default=True, + help="When enabled, use on-the-fly speed perturbation. " + "Will drop existing precomputed feature manifests " + "if available.", + ) group.add_argument( "--shuffle", type=str2bool, @@ -188,27 +197,27 @@ class AsrDataModule: group.add_argument( "--huggingface-dataset-path-or-name", type=str, - default="/workspace/Belle_1.4M-SLAM-Omni", + default=None, help="The path or name of the Huggingface dataset", ) group.add_argument( "--audio-key", type=str, - default="question_audio", + default="audio", help="The key in the Huggingface dataset containing the audio data", ) group.add_argument( "--text-key", type=str, - default="answer", + default="text", help="The key in the Huggingface dataset containing the text data", ) - group.add_argument( - "--resample-to-16kHz", - type=str2bool, - default=True, - help="Resample audio to 16kHz. Default: False.", - ) + # group.add_argument( + # "--resample-to-16kHz", + # type=str2bool, + # default=True, + # help="Resample audio to 16kHz. Default: False.", + # ) def train_dataloaders( self, @@ -232,6 +241,8 @@ class AsrDataModule: ) else: logging.info("Disable MUSAN") + if self.args.on_the_fly_speed_perturb and self.args.on_the_fly_feats: + transforms = [PerturbSpeed(factors=[0.9, 1.1], p=2 / 3)] + transforms input_transforms = [] if self.args.enable_spec_aug: @@ -260,9 +271,11 @@ class AsrDataModule: logging.info("Disable SpecAugment") logging.info("About to create train dataset") + rank = get_rank() + train = K2SpeechRecognitionDataset( input_strategy=OnTheFlyFeatures( - WhisperFbank(WhisperFbankConfig(num_filters=80, device="cuda")) + WhisperFbank(WhisperFbankConfig(num_filters=80, device=f"cuda:{rank}")) ) if self.args.on_the_fly_feats else eval(self.args.input_strategy)(), @@ -271,26 +284,6 @@ class AsrDataModule: return_cuts=self.args.return_cuts, ) - # if self.args.on_the_fly_feats: - # # NOTE: the PerturbSpeed transform should be added only if we - # # remove it from data prep stage. - # # Add on-the-fly speed perturbation; since originally it would - # # have increased epoch size by 3, we will apply prob 2/3 and use - # # 3x more epochs. - # # Speed perturbation probably should come first before - # # concatenation, but in principle the transforms order doesn't have - # # to be strict (e.g. could be randomized) - # # transforms = [PerturbSpeed(factors=[0.9, 1.1], p=2/3)] + transforms # noqa - # # Drop feats to be on the safe side. - # train = K2SpeechRecognitionDataset( - # cut_transforms=transforms, - # input_strategy=OnTheFlyFeatures( - # WhisperFbank(WhisperFbankConfig(num_filters=80, device="cuda")) - # ), - # input_transforms=input_transforms, - # return_cuts=self.args.return_cuts, - # ) - if self.args.bucketing_sampler: logging.info("Using DynamicBucketingSampler.") train_sampler = DynamicBucketingSampler( @@ -298,8 +291,7 @@ class AsrDataModule: max_duration=self.args.max_duration, shuffle=self.args.shuffle, num_buckets=self.args.num_buckets, - buffer_size=self.args.num_buckets * 2000, - shuffle_buffer_size=self.args.num_buckets * 5000, + buffer_size=self.args.num_buckets * 1000, drop_last=self.args.drop_last, ) else: @@ -339,10 +331,10 @@ class AsrDataModule: CutSet for validation. """ logging.info("About to create dev dataset") - + rank = get_rank() validate = K2SpeechRecognitionDataset( input_strategy=OnTheFlyFeatures( - WhisperFbank(WhisperFbankConfig(num_filters=80, device="cuda")) + WhisperFbank(WhisperFbankConfig(num_filters=80, device=f"cuda:{rank}")) ) if self.args.on_the_fly_feats else eval(self.args.input_strategy)(), @@ -470,25 +462,231 @@ class AsrDataModule: ) return {"test": VoiceAssistant_cuts} - # def train_cuts_en_vocalnet(self) -> CutSet: + @lru_cache() + def train_cuts_ultravox(self) -> CutSet: + logging.info("About to get train cuts") + if self.args.huggingface_dataset_path_or_name is not None: + librispeech_path = ( + self.args.huggingface_dataset_path_or_name + "/librispeech_asr" + ) + people_speech_path = ( + self.args.huggingface_dataset_path_or_name + "/peoples_speech" + ) + gigaspeech_path = self.args.huggingface_dataset_path_or_name + "/gigaspeech" + else: + librispeech_path = "fixie-ai/librispeech_asr" + people_speech_path = "fixie-ai/peoples_speech" + gigaspeech_path = "fixie-ai/gigaspeech" + # 148_688 + librispeech_other = load_dataset( + librispeech_path, "other", split="train.500", streaming=True + ) + # 104_014 + librispeech_clean_360 = load_dataset( + librispeech_path, "clean", split="train.360", streaming=True + ) + # 28_539 + librispeech_clean_100 = load_dataset( + librispeech_path, "clean", split="train.100", streaming=True + ) + + # 1_501_271 + people_speech_clean = load_dataset( + people_speech_path, "clean", split="train", streaming=True + ) + # 548_000 + people_speech_dirty_sa = load_dataset( + people_speech_path, "dirty_sa", split="train", streaming=True + ) + + # 8_266_422 + + gigaspeech = load_dataset( + gigaspeech_path, "xl-empty-audio-removed", split="train", streaming=True + ) + + librispeech_clean_100_cuts = CutSet.from_huggingface_dataset( + librispeech_clean_100, + audio_key=self.args.audio_key, + text_key=self.args.text_key, + ) + + librispeech_other_cuts = CutSet.from_huggingface_dataset( + librispeech_other, + audio_key=self.args.audio_key, + text_key=self.args.text_key, + ) + + librispeech_clean_360_cuts = CutSet.from_huggingface_dataset( + librispeech_clean_360, + audio_key=self.args.audio_key, + text_key=self.args.text_key, + ) + + gigaspeech_cuts = CutSet.from_huggingface_dataset( + gigaspeech, audio_key=self.args.audio_key, text_key=self.args.text_key + ) + + people_speech_clean_cuts = CutSet.from_huggingface_dataset( + people_speech_clean, + audio_key=self.args.audio_key, + text_key=self.args.text_key, + ) + + people_speech_dirty_sa_cuts = CutSet.from_huggingface_dataset( + people_speech_dirty_sa, + audio_key=self.args.audio_key, + text_key=self.args.text_key, + ) + + return CutSet.mux( + librispeech_clean_100_cuts, + librispeech_clean_360_cuts, + librispeech_other_cuts, + gigaspeech_cuts, + people_speech_clean_cuts, + people_speech_dirty_sa_cuts, + weights=[ + 28539, + 104014, + 148688, + 8266422, + 1501271, + 548000, + ], + ) + + # @lru_cache() + # def train_cuts_ultravox(self) -> CutSet: # logging.info("About to get train cuts") - # VoiceAssistant_cuts = load_manifest_lazy( - # self.args.manifest_dir / "cuts_debug.jsonl.gz" - # ) - # return VoiceAssistant_cuts + # keep_columns = ["audio", "text", "continuation", "id"] + # librispeech_path="fixie-ai/librispeech_asr" + # # 148_688 + # librispeech_other = load_dataset(librispeech_path, 'other', split='train.500', streaming=True) + # # 104_014 + # librispeech_clean_360 = load_dataset(librispeech_path, 'clean', split='train.360', streaming=True) + # # 28_539 + # librispeech_clean_100 = load_dataset(librispeech_path, 'clean', split='train.100', streaming=True) - # @lru_cache() - # def valid_cuts_en_vocalnet(self) -> CutSet: - # logging.info("About to get valid cuts") - # VoiceAssistant_cuts = load_manifest_lazy( - # self.args.manifest_dir / "cuts_debug.jsonl.gz" - # ) - # return VoiceAssistant_cuts + # cols_to_remove = librispeech_clean_100.column_names + # cols_to_remove = [col for col in cols_to_remove if col not in keep_columns] + # librispeech_clean_100 = librispeech_clean_100.remove_columns(cols_to_remove) + # librispeech_clean_360 = librispeech_clean_360.remove_columns(cols_to_remove) + # librispeech_other = librispeech_other.remove_columns(cols_to_remove) + # people_speech_path="fixie-ai/peoples_speech" + # # 1_501_271 + # people_speech_clean = load_dataset(people_speech_path, 'clean', split='train', streaming=True) + # # 548_000 + # people_speech_dirty_sa = load_dataset(people_speech_path, 'dirty_sa', split='train', streaming=True) + # cols_to_remove = people_speech_clean.column_names + # cols_to_remove = [col for col in cols_to_remove if col not in keep_columns] + # people_speech_clean = people_speech_clean.remove_columns(cols_to_remove) + # people_speech_dirty_sa = people_speech_dirty_sa.remove_columns(cols_to_remove) - # @lru_cache() - # def test_cuts_en_vocalnet(self) -> CutSet: - # logging.info("About to get test cuts") - # VoiceAssistant_cuts = load_manifest_lazy( - # self.args.manifest_dir / "cuts_debug.jsonl.gz" + # # 8_266_422 + # gigaspeech_path="fixie-ai/gigaspeech" + # gigaspeech = load_dataset(gigaspeech_path, 'xl-empty-audio-removed', split='train', streaming=True) + # # first rename segment_id to id + # gigaspeech = gigaspeech.rename_column("segment_id", "id") + # cols_to_remove = gigaspeech.column_names + # cols_to_remove = [col for col in cols_to_remove if col not in keep_columns] + # gigaspeech = gigaspeech.remove_columns(cols_to_remove) + + # total_item = 104014 + 28539 + 8266422 + 1501271 + 548000 + 148688 + # final_datasets = interleave_datasets([ + # librispeech_clean_100, + # librispeech_clean_360, + # gigaspeech, + # people_speech_clean, + # people_speech_dirty_sa, + # librispeech_other, + # ], probabilities=[ + # 28539 / total_item, + # 104014 / total_item, + # 8266422 / total_item, + # 1501271 / total_item, + # 548000 / total_item, + # 148688 / total_item, + # ]) + + # train_cuts = CutSet.from_huggingface_dataset( + # final_datasets, audio_key=self.args.audio_key, text_key=self.args.text_key # ) - # return VoiceAssistant_cuts + + # return train_cuts + + @lru_cache() + def valid_cuts_ultravox(self) -> CutSet: + logging.info("About to get valid cuts") + librispeech_path = "fixie-ai/librispeech_asr" + librispeech_clean_valid = load_dataset( + librispeech_path, "clean", split="validation", streaming=True + ) + librispeech_clean_valid_cuts = CutSet.from_huggingface_dataset( + librispeech_clean_valid, + audio_key=self.args.audio_key, + text_key=self.args.text_key, + ) + return librispeech_clean_valid_cuts + + @lru_cache() + def train_cuts_librispeech(self) -> CutSet: + logging.info("About to get train cuts") + + # librispeech_path="fixie-ai/librispeech_asr" + librispeech_path = "/workspace/slam/librispeech_asr" + # 148_688 + librispeech_other = load_dataset( + librispeech_path, "other", split="train.500", streaming=True + ) + # 104_014 + librispeech_clean_360 = load_dataset( + librispeech_path, "clean", split="train.360", streaming=True + ) + # 28_539 + librispeech_clean_100 = load_dataset( + librispeech_path, "clean", split="train.100", streaming=True + ) + + librispeech_clean_100_cuts = CutSet.from_huggingface_dataset( + librispeech_clean_100, + audio_key=self.args.audio_key, + text_key=self.args.text_key, + ) + + librispeech_other_cuts = CutSet.from_huggingface_dataset( + librispeech_other, + audio_key=self.args.audio_key, + text_key=self.args.text_key, + ) + + librispeech_clean_360_cuts = CutSet.from_huggingface_dataset( + librispeech_clean_360, + audio_key=self.args.audio_key, + text_key=self.args.text_key, + ) + + return CutSet.mux( + librispeech_clean_100_cuts, + librispeech_clean_360_cuts, + librispeech_other_cuts, + weights=[ + 28539, + 104014, + 148688, + ], + ) + + @lru_cache() + def train_cuts_gigaspeech(self) -> CutSet: + logging.info("About to get train cuts") + gigaspeech_path = "fixie-ai/gigaspeech" + gigaspeech = load_dataset( + gigaspeech_path, "xl-empty-audio-removed", split="train", streaming=True + ) + + gigaspeech_cuts = CutSet.from_huggingface_dataset( + gigaspeech, audio_key=self.args.audio_key, text_key=self.args.text_key + ) + + return gigaspeech_cuts diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements.txt b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements.txt index 2db53f3ff..573e8232d 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements.txt +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements.txt @@ -10,3 +10,4 @@ transformers>=4.37.0 flash-attn peft torchmetrics +triton==3.3.0 # may be violate with openai-whisper diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py index d23d578c6..1ed0204db 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py @@ -68,24 +68,26 @@ from transformers import ( Qwen2Config, Qwen2ForCausalLM, ) -from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward -# from icefall import diagnostics -from utils import get_rank, get_world_size # from icefall.env import get_env_info +# from icefall import diagnostics from utils import ( # filter_uneven_sized_batch, AttributeDict, MetricsTracker, + get_rank, + get_world_size, setup_logger, str2bool, ) +from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward DEFAULT_SPEECH_TOKEN = "" try: - torch.multiprocessing.set_start_method('spawn') + torch.multiprocessing.set_start_method("spawn") except RuntimeError: pass + def set_batch_count(model: nn.Module, batch_count: float) -> None: for module in model.modules(): if hasattr(module, "batch_count"): @@ -272,7 +274,7 @@ def get_params() -> AttributeDict: "batch_idx_train": 0, "log_interval": 50, "reset_interval": 200, - "valid_interval": 5000, + "valid_interval": 3000, # "env_info": get_env_info(), } ) @@ -332,6 +334,21 @@ def process_batch_vocalnet(batch: dict): return messages, answer_cosyvoice_speech_token +def process_batch_speech_continuation(batch: dict): + messages = [] + for i in range(len(batch["supervisions"]["text"])): + message = [ + { + "role": "user", + "content": f"Continue the following text using less than 50 words:\n\n{DEFAULT_SPEECH_TOKEN}", + }, + {"role": "assistant", "content": batch["supervisions"]["text"][i]}, + ] + # transcript = batch["supervisions"]["cut"][i].custom["text"] + messages.append(message) + return messages + + def compute_loss( params: AttributeDict, tokenizer: AutoTokenizer, @@ -429,13 +446,13 @@ def compute_loss( feature = feature.to(device) feature = feature.transpose(1, 2) # (N, C, T) - batch_idx_train = params.batch_idx_train - # WAR: TODO FIXME merge process_batch_slam_omni and process_batch_vocalnet if params.dataset_format == "slam_omni": messages, answer_cosyvoice_speech_token = process_batch_slam_omni(batch) elif params.dataset_format == "vocalnet": messages, answer_cosyvoice_speech_token = process_batch_vocalnet(batch) + elif params.dataset_format == "speech_continuation": + messages = process_batch_speech_continuation(batch) else: raise ValueError(f"Unknown dataset format: {params.dataset_format}") @@ -566,8 +583,11 @@ def train_one_epoch( The rank of the node in DDP training. If no DDP is used, it should be set to 0. """ - model.encoder_projector.train() - + # model.encoder_projector.train() + model.train() + model.encoder.eval() + if not params.unfreeze_llm: + model.llm.eval() tot_loss = MetricsTracker() for batch_idx, batch in enumerate(train_dl): @@ -583,6 +603,9 @@ def train_one_epoch( world_size=world_size, ) model.train() + model.encoder.eval() + if not params.unfreeze_llm: + model.llm.eval() logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") logging.info( f"Maximum memory allocated so far is {torch.cuda.max_memory_allocated()//1000000}MB" @@ -594,7 +617,7 @@ def train_one_epoch( if batch_idx != 0: model.save_checkpoint( save_dir=params.exp_dir, - tag=f"epoch-{params.cur_epoch}-checkpoint-{batch_idx}", + tag=f"zero-checkpoint-{params.batch_idx_train}", client_state={}, exclude_frozen_parameters=True, ) @@ -602,18 +625,18 @@ def train_one_epoch( if rank == 0: convert_zero_checkpoint_to_fp32_state_dict( params.exp_dir, - f"{params.exp_dir}/epoch-{params.cur_epoch}-checkpoint-{batch_idx}.pt", - tag=f"epoch-{params.cur_epoch}-checkpoint-{batch_idx}", + f"{params.exp_dir}/checkpoint-{params.batch_idx_train}", + tag=f"zero-checkpoint-{params.batch_idx_train}", exclude_frozen_parameters=True, ) # save sampler state dict into checkpoint sampler_state_dict = train_dl.sampler.state_dict() torch.save( sampler_state_dict, - f"{params.exp_dir}/epoch-{params.cur_epoch}-checkpoint-{batch_idx}-sampler.pt", + f"{params.exp_dir}/checkpoint-{params.batch_idx_train}/sampler.pt", ) os.system( - f"rm -rf {params.exp_dir}/epoch-{params.cur_epoch}-checkpoint-{batch_idx}" + f"rm -rf {params.exp_dir}/zero-checkpoint-{params.batch_idx_train}" ) try: with torch.amp.autocast("cuda", enabled=params.use_fp16): @@ -687,9 +710,9 @@ def run(rank, world_size, args): fix_random_seed(params.seed) - setup_logger(f"{params.exp_dir}/log/log-train") + if rank == 0: + setup_logger(f"{params.exp_dir}/log/log-train") logging.info(params) - logging.info("About to create model") replace_whisper_encoder_forward() @@ -698,7 +721,6 @@ def run(rank, world_size, args): speech_encoder_dim = whisper_model.dims.n_audio_state for name, param in speech_encoder.named_parameters(): param.requires_grad = False - speech_encoder.eval() tokenizer = AutoTokenizer.from_pretrained(params.llm_path_or_name) @@ -721,7 +743,7 @@ def run(rank, world_size, args): if not params.unfreeze_llm: for name, param in llm.named_parameters(): param.requires_grad = False - llm.eval() + else: if params.use_lora: lora_config = LoraConfig( @@ -809,6 +831,9 @@ def run(rank, world_size, args): if params.pretrained_model_path: checkpoint = torch.load(params.pretrained_model_path, map_location="cpu") missing_keys, unexpected_keys = model.load_state_dict(checkpoint, strict=False) + # set params.batch_idx_train according to the checkpoint name + if "checkpoint-" in params.pretrained_model_path: + params.batch_idx_train = int(params.pretrained_model_path.split("-")[-1]) num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") @@ -842,21 +867,22 @@ def run(rank, world_size, args): # You should use ../local/display_manifest_statistics.py to get # an utterance duration distribution for your dataset to select # the threshold - if c.duration < 1.0 or c.duration > 30.0: - # logging.warning( - # f"Exclude cut with ID {c.id} from training. Duration: {c.duration}" - # ) - return False - codec_len = ( - len(c.custom["answer_cosyvoice_speech_token"]) - if "answer_cosyvoice_speech_token" in c.custom - else len(c.custom["speech_token"]) - ) - if codec_len > 2200: + if c.duration < 1.0 or c.duration > 29.5: logging.warning( - f"Exclude cut with ID {c.id} from training. Duration: {c.duration}, lenth: {codec_len}" + f"Exclude cut with ID {c.id} from training. Duration: {c.duration}" ) return False + if "speech_token" in c.custom or "answer_cosyvoice_speech_token" in c.custom: + codec_len = ( + len(c.custom["answer_cosyvoice_speech_token"]) + if "answer_cosyvoice_speech_token" in c.custom + else len(c.custom["speech_token"]) + ) + if codec_len > 2200: + logging.warning( + f"Exclude cut with ID {c.id} from training. Duration: {c.duration}, lenth: {codec_len}" + ) + return False return True if params.dataset_format == "slam_omni": @@ -865,6 +891,11 @@ def run(rank, world_size, args): elif params.dataset_format == "vocalnet": train_cuts = data_module.train_cuts_en_vocalnet() valid_cuts = data_module.valid_cuts_en_vocalnet() + elif params.dataset_format == "speech_continuation": + # train_cuts = data_module.train_cuts_ultravox() + # train_cuts = data_module.train_cuts_gigaspeech() + train_cuts = data_module.train_cuts_librispeech() + valid_cuts = data_module.valid_cuts_ultravox() else: raise ValueError(f"Unknown dataset format: {params.dataset_format}") @@ -879,7 +910,7 @@ def run(rank, world_size, args): train_dl = data_module.train_dataloaders( train_cuts, sampler_state_dict=sampler_state_dict ) - + # train_dl = data_module.valid_dataloaders(train_cuts) valid_dl = data_module.valid_dataloaders(valid_cuts) if args.tensorboard and rank == 0: @@ -913,25 +944,25 @@ def run(rank, world_size, args): model.save_checkpoint( save_dir=params.exp_dir, - tag=f"epoch-{params.cur_epoch}", + tag=f"zero-epoch-{params.cur_epoch}", client_state={}, exclude_frozen_parameters=True, ) if rank == 0: convert_zero_checkpoint_to_fp32_state_dict( params.exp_dir, - f"{params.exp_dir}/epoch-{params.cur_epoch}.pt", - tag=f"epoch-{params.cur_epoch}", + f"{params.exp_dir}/epoch-{params.cur_epoch}", + tag=f"zero-epoch-{params.cur_epoch}", exclude_frozen_parameters=True, ) # save sampler state dict into checkpoint sampler_state_dict = train_dl.sampler.state_dict() torch.save( sampler_state_dict, - f"{params.exp_dir}/epoch-{params.cur_epoch}-sampler.pt", + f"{params.exp_dir}/epoch-{params.cur_epoch}/sampler.pt", ) - os.system(f"rm -rf {params.exp_dir}/epoch-{params.cur_epoch}") + os.system(f"rm -rf {params.exp_dir}/zero-epoch-{params.cur_epoch}") logging.info("Done!") @@ -971,6 +1002,7 @@ def main(): torch.set_num_threads(1) torch.set_num_interop_threads(1) + warnings.filterwarnings("ignore", category=FutureWarning) run(rank=rank, world_size=world_size, args=args) From bfb4ebeb8393037f60b0d10e3c9d8a8fe6b389a5 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 15 May 2025 14:32:49 +0000 Subject: [PATCH 38/57] remove triton --- egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements.txt | 2 +- egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements.txt b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements.txt index 573e8232d..85e975175 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements.txt +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements.txt @@ -10,4 +10,4 @@ transformers>=4.37.0 flash-attn peft torchmetrics -triton==3.3.0 # may be violate with openai-whisper +# triton==3.3.0 # may be violate with openai-whisper diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py index 1ed0204db..ba9e97577 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py @@ -867,7 +867,7 @@ def run(rank, world_size, args): # You should use ../local/display_manifest_statistics.py to get # an utterance duration distribution for your dataset to select # the threshold - if c.duration < 1.0 or c.duration > 29.5: + if c.duration < 1.0 or c.duration > 29.0: logging.warning( f"Exclude cut with ID {c.id} from training. Duration: {c.duration}" ) From 0e8c1db4d0be15d126147f6a0334b168989d24a7 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 15 May 2025 22:45:04 -0700 Subject: [PATCH 39/57] fix speed perturb issue --- egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py | 7 ++++--- egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py | 6 +++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py index 1f35f9b84..72f26a803 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py @@ -632,9 +632,10 @@ class AsrDataModule: @lru_cache() def train_cuts_librispeech(self) -> CutSet: logging.info("About to get train cuts") - - # librispeech_path="fixie-ai/librispeech_asr" - librispeech_path = "/workspace/slam/librispeech_asr" + if self.args.huggingface_dataset_path_or_name is not None: + librispeech_path = self.args.huggingface_dataset_path_or_name + "/librispeech_asr" + else: + librispeech_path = "fixie-ai/librispeech_asr" # 148_688 librispeech_other = load_dataset( librispeech_path, "other", split="train.500", streaming=True diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py index ba9e97577..c523c92a5 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py @@ -867,7 +867,7 @@ def run(rank, world_size, args): # You should use ../local/display_manifest_statistics.py to get # an utterance duration distribution for your dataset to select # the threshold - if c.duration < 1.0 or c.duration > 29.0: + if c.duration < 1.0 or c.duration > 25.0: logging.warning( f"Exclude cut with ID {c.id} from training. Duration: {c.duration}" ) @@ -892,9 +892,9 @@ def run(rank, world_size, args): train_cuts = data_module.train_cuts_en_vocalnet() valid_cuts = data_module.valid_cuts_en_vocalnet() elif params.dataset_format == "speech_continuation": - # train_cuts = data_module.train_cuts_ultravox() + train_cuts = data_module.train_cuts_ultravox() # train_cuts = data_module.train_cuts_gigaspeech() - train_cuts = data_module.train_cuts_librispeech() + # train_cuts = data_module.train_cuts_librispeech() valid_cuts = data_module.valid_cuts_ultravox() else: raise ValueError(f"Unknown dataset format: {params.dataset_format}") From e52581e69bde23af025aa26f55bd74c6c9e23843 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 16 May 2025 00:02:12 -0700 Subject: [PATCH 40/57] support local_rank for multi-node --- .../SPEECH2SPEECH/qwen_omni/data_module.py | 6 +++--- egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py | 13 +++++++------ egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py | 8 ++++++++ 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py index 72f26a803..da337791a 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py @@ -48,7 +48,7 @@ from lhotse.dataset.input_strategies import ( # noqa F401 For AudioSamples from lhotse.utils import fix_random_seed from speech_dataset import K2SpeechRecognitionDataset from torch.utils.data import DataLoader -from utils import get_rank, str2bool +from utils import get_local_rank, str2bool class _SeedWorkers: @@ -271,7 +271,7 @@ class AsrDataModule: logging.info("Disable SpecAugment") logging.info("About to create train dataset") - rank = get_rank() + rank = get_local_rank() train = K2SpeechRecognitionDataset( input_strategy=OnTheFlyFeatures( @@ -331,7 +331,7 @@ class AsrDataModule: CutSet for validation. """ logging.info("About to create dev dataset") - rank = get_rank() + rank = get_local_rank() validate = K2SpeechRecognitionDataset( input_strategy=OnTheFlyFeatures( WhisperFbank(WhisperFbankConfig(num_filters=80, device=f"cuda:{rank}")) diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py index c523c92a5..a11ae4b76 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py @@ -75,6 +75,7 @@ from utils import ( # filter_uneven_sized_batch, AttributeDict, MetricsTracker, get_rank, + get_local_rank, get_world_size, setup_logger, str2bool, @@ -274,7 +275,7 @@ def get_params() -> AttributeDict: "batch_idx_train": 0, "log_interval": 50, "reset_interval": 200, - "valid_interval": 3000, + "valid_interval": 1000, # "env_info": get_env_info(), } ) @@ -844,7 +845,7 @@ def run(rank, world_size, args): logging.info(f"{name}: {param.shape}") if torch.cuda.is_available(): - device = torch.device("cuda", rank) + device = torch.device("cuda", get_local_rank()) else: device = torch.device("cpu") logging.info(f"Device: {device}") @@ -867,10 +868,10 @@ def run(rank, world_size, args): # You should use ../local/display_manifest_statistics.py to get # an utterance duration distribution for your dataset to select # the threshold - if c.duration < 1.0 or c.duration > 25.0: - logging.warning( - f"Exclude cut with ID {c.id} from training. Duration: {c.duration}" - ) + if c.duration < 0.8 or c.duration > 20.0: + # logging.warning( + # f"Exclude cut with ID {c.id} from training. Duration: {c.duration}" + # ) return False if "speech_token" in c.custom or "answer_cosyvoice_speech_token" in c.custom: codec_len = ( diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py index fe65a8042..7c6f6c0a6 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py @@ -38,6 +38,14 @@ def get_rank(): else: return 0 +def get_local_rank(): + if "LOCAL_RANK" in os.environ: + return int(os.environ["LOCAL_RANK"]) + elif dist.is_available() and dist.is_initialized(): + return dist.get_local_rank() + else: + return 0 + def str2bool(v): """Used in argparse.ArgumentParser.add_argument to indicate that a type is a bool type and user can enter From 4a294303499cae15357bbbcb2108baa6ddb7e420 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 19 May 2025 01:31:21 +0000 Subject: [PATCH 41/57] add loss type --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 74 ++++- .../SPEECH2SPEECH/qwen_omni/model.py | 66 ++++ .../SPEECH2SPEECH/qwen_omni/server.py | 23 +- .../SPEECH2SPEECH/qwen_omni/train.py | 287 +++++++++++++----- 4 files changed, 367 insertions(+), 83 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index fd8070691..e92e90a2f 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -239,7 +239,8 @@ fi if [ $stage -le 14 ] && [ $stop_stage -ge 14 ]; then log "stage 14: Client" - datasets=(alpacaeval wildvoice mmsu advbench bbh ifeval commoneval obqa sd-qa) + datasets=(alpacaeval_full wildvoice mmsu advbench bbh ifeval commoneval openbookqa sd-qa) + datasets=(openbookqa commoneval) for dataset in ${datasets[@]}; do # sd-qa should use usa split if [ $dataset == "sd-qa" ]; then @@ -250,17 +251,16 @@ if [ $stage -le 14 ] && [ $stop_stage -ge 14 ]; then echo $dataset $split_name python3 ./qwen_omni/client.py \ --subset-name $dataset --split-name $split_name \ - --output-dir test_result + --output-dir result_adapter_librispeech_kl_div_qa_template done fi - if [ $stage -le 15 ] && [ $stop_stage -ge 15 ]; then log "stage 15: Training Speech2Speech Model, adaptor only" exp_dir=./qwen_omni/exp_speech2text ngpu=2 torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ - --max-duration 600 \ + --max-duration 700 \ --enable-musan False \ --audio-key audio --text-key continuation \ --exp-dir $exp_dir \ @@ -271,7 +271,7 @@ if [ $stage -le 15 ] && [ $stop_stage -ge 15 ]; then --deepspeed_config ./qwen_omni/ds_config_zero1.json \ --use-flash-attn True \ --dataset-format speech_continuation \ - --start-epoch 2 --pretrained-model-path $exp_dir/epoch-1/pytorch_model.bin \ + --start-epoch 4 --pretrained-model-path $exp_dir/epoch-3/pytorch_model.bin \ --use-lora False --unfreeze-llm False --unfreeze-speech-projector True --enable-speech-output False fi @@ -321,3 +321,67 @@ if [ $stage -le 16 ] && [ $stop_stage -ge 16 ]; then torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ $train_cmd_args fi + + +if [ $stage -le 17 ] && [ $stop_stage -ge 17 ]; then + log "stage 17: Server for adapter only speech continuation" + exp_dir=./qwen_omni/exp_speech2text + python3 ./qwen_omni/server.py \ + --speech-encoder-path-or-name models/large-v2.pt \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ + --checkpoint-path $exp_dir/epoch-6/pytorch_model.bin \ + --use-flash-attn True \ + --enable-speech-output False \ + --use-lora False --prompt-template continuation +fi + +if [ $stage -le 18 ] && [ $stop_stage -ge 18 ]; then + log "stage 18: Training kl-div Speech2Speech Model, adaptor only" + exp_dir=./qwen_omni/exp_speech2text_kl + ngpu=2 + torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ + --max-duration 700 \ + --enable-musan False \ + --audio-key audio --text-key continuation \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/large-v2.pt \ + --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ + --on-the-fly-feats True \ + --deepspeed \ + --deepspeed_config ./qwen_omni/ds_config_zero1.json \ + --use-flash-attn True \ + --dataset-format speech_continuation \ + --loss-type kl_div --dataset librispeech \ + --pretrained-model-path $exp_dir/checkpoint-1001/pytorch_model.bin --sampler-state-dict-path $exp_dir/checkpoint-1001/sampler.pt \ + --use-lora False --unfreeze-llm False --unfreeze-speech-projector True --enable-speech-output False +fi + +if [ $stage -le 19 ] && [ $stop_stage -ge 19 ]; then + log "stage 19: Server for kl loss" + exp_dir=./qwen_omni/exp_speech2text_kl + python3 ./qwen_omni/server.py \ + --speech-encoder-path-or-name models/large-v2.pt \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ + --checkpoint-path $exp_dir/epoch-10/pytorch_model.bin \ + --use-flash-attn True \ + --enable-speech-output False \ + --use-lora False --prompt-template qa +fi + +if [ $stage -le 20 ] && [ $stop_stage -ge 20 ]; then + log "stage 20: Training Speech2Speech Model, adaptor + lora, second stage" + exp_dir=./qwen_omni/exp_speech2text_kl_llm + pretrained_dir=./qwen_omni/exp_speech2text_kl + ngpu=2 + torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ + --max-duration 200 \ + --enable-musan False \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/large-v2.pt \ + --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ + --deepspeed \ + --deepspeed_config ./qwen_omni/ds_config_zero1.json \ + --use-flash-attn True \ + --pretrained-model-path $pretrained_dir/epoch-10/pytorch_model.bin \ + --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output False --dataset-format vocalnet +fi diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py index a0efbd319..97484486d 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py @@ -64,6 +64,8 @@ class SPEECH_LLM(nn.Module): encoder_projector: nn.Module, codec_lm: nn.Module = None, codec_lm_padding_side: str = "left", + teacher_llm: nn.Module = None, + kl_temperature: float = 2.0, ): super().__init__() self.encoder = encoder @@ -92,6 +94,9 @@ class SPEECH_LLM(nn.Module): multidim_average="global", ignore_index=IGNORE_TOKEN_ID, ) + if teacher_llm is not None: + self.teacher_llm = teacher_llm + self.kl_temperature = kl_temperature def _merge_input_ids_with_speech_features( self, speech_features, inputs_embeds, input_ids, attention_mask, labels=None @@ -256,6 +261,67 @@ class SPEECH_LLM(nn.Module): ) return model_outputs.loss, acc + def forward_kl_div( + self, + fbank: torch.Tensor = None, + input_ids: torch.LongTensor = None, + attention_mask: torch.Tensor = None, + labels: torch.LongTensor = None, + teacher_input_ids: torch.LongTensor = None, + teacher_attention_mask: torch.Tensor = None, + teacher_labels: torch.LongTensor = None, + ): + encoder_outs = self.encoder(fbank) + + speech_features = self.encoder_projector(encoder_outs) + + inputs_embeds = self.llm.get_input_embeddings()(input_ids) + + ( + inputs_embeds, + attention_mask, + labels, + _, + ) = self._merge_input_ids_with_speech_features( + speech_features, inputs_embeds, input_ids, attention_mask, labels + ) + + model_outputs = self.llm( + inputs_embeds=inputs_embeds, attention_mask=attention_mask, labels=labels + ) + + teacher_outputs = self.teacher_llm( + input_ids=teacher_input_ids, + attention_mask=teacher_attention_mask, + ) + + kl_loss = torch.nn.functional.kl_div( + torch.nn.functional.log_softmax( + model_outputs.logits[labels != -100] / self.kl_temperature, + dim=-1, + ), + torch.nn.functional.softmax( + teacher_outputs.logits[teacher_labels != -100] / self.kl_temperature, + dim=-1, + ), + reduction="batchmean", + ) + + with torch.no_grad(): + preds = torch.argmax(model_outputs.logits, -1) + teacher_preds = torch.argmax(teacher_outputs.logits, -1) + acc = compute_accuracy( + preds.detach()[:, :-1], + labels.detach()[:, 1:], + ignore_label=IGNORE_TOKEN_ID, + ) + acc_teacher = compute_accuracy( + teacher_preds.detach()[:, :-1], + teacher_labels.detach()[:, 1:], + ignore_label=IGNORE_TOKEN_ID, + ) + return kl_loss, acc, acc_teacher + def forward_with_speech_output( self, fbank: torch.Tensor = None, diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/server.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/server.py index 2f06b923a..3c9122a09 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/server.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/server.py @@ -21,6 +21,12 @@ def get_args(): default=None, help="Checkpoint name or path, default to %(default)r", ) + parser.add_argument( + "--prompt-template", + type=str, + default=None, + help="Prompt template", + ) add_model_arguments(parser) args = parser.parse_args() return args @@ -59,8 +65,23 @@ model, tokenizer = get_model(args) app = FastAPI() device = torch.device("cuda") +if args.prompt_template is None: + template = f"{DEFAULT_SPEECH_TOKEN}" +elif args.prompt_template == "qa": + template = f"Answer the following question:\n\n{DEFAULT_SPEECH_TOKEN}" +elif args.prompt_template == "continuation": + template = f"Continue the following text using less than 50 words:\n\n{DEFAULT_SPEECH_TOKEN}" +elif args.prompt_template == "asr": + template = ( + f"Repeat the following text, without any explanation: {DEFAULT_SPEECH_TOKEN}" + ) +elif args.prompt_template == "mt": + template = f"Please translate the text to Chinese. Your response should only include the Chinese translation, without any additional words:\n\n{DEFAULT_SPEECH_TOKEN}" +else: + raise ValueError(f"Invalid prompt template: {args.prompt_template}") +print("Using template:", template) message = [ - {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, + {"role": "user", "content": template}, {"role": "assistant", "content": ""}, ] TEMPLATE = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if loop.last %}{{''}}{% else %}{{ '<|im_end|>\n' }}{% endif %}{% endfor %}" diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py index a11ae4b76..81aac84e5 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py @@ -74,8 +74,8 @@ from transformers import ( from utils import ( # filter_uneven_sized_batch, AttributeDict, MetricsTracker, - get_rank, get_local_rank, + get_rank, get_world_size, setup_logger, str2bool, @@ -234,6 +234,21 @@ def get_parser(): default="slam_omni", help="The format of the dataset.", ) + + parser.add_argument( + "--dataset", + type=str, + default="multi_en", + help="The name of the dataset.", + ) + + parser.add_argument( + "--loss-type", + type=str, + default="ce", + help="The type of loss to use.", + ) + parser = deepspeed.add_config_arguments(parser) add_model_arguments(parser) @@ -335,6 +350,22 @@ def process_batch_vocalnet(batch: dict): return messages, answer_cosyvoice_speech_token +def process_batch_text_vocalnet(batch: dict): + pass + answers = batch["supervisions"]["text"] + answer_cosyvoice_speech_token = [ + cut.custom["speech_token"] for cut in batch["supervisions"]["cut"] + ] + messages = [] + for i in range(len(answers)): + message = [ + {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, + {"role": "assistant", "content": answers[i]}, + ] + messages.append(message) + return messages, answer_cosyvoice_speech_token + + def process_batch_speech_continuation(batch: dict): messages = [] for i in range(len(batch["supervisions"]["text"])): @@ -350,6 +381,131 @@ def process_batch_speech_continuation(batch: dict): return messages +def process_batch_text_continuation(batch: dict): + messages = [] + for i in range(len(batch["supervisions"]["text"])): + transcript = batch["supervisions"]["cut"][i].custom["text"] + message = [ + { + "role": "user", + "content": f"Continue the following text using less than 50 words:\n\n{transcript}{DEFAULT_SPEECH_TOKEN}", + }, + {"role": "assistant", "content": batch["supervisions"]["text"][i]}, + ] + messages.append(message) + return messages + + +def preprocess( + messages, + tokenizer: transformers.PreTrainedTokenizer, +) -> Dict: + """Preprocesses the data for supervised fine-tuning.""" + texts = [] + TEMPLATE = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if loop.last %}{{ '<|im_end|>'}}{% else %}{{ '<|im_end|>\n' }}{% endif %}{% endfor %}" + for i, msg in enumerate(messages): + texts.append( + tokenizer.apply_chat_template( + msg, + tokenize=True, + chat_template=TEMPLATE, + add_generation_prompt=False, + padding="longest", # FIX me change padding to longest + truncation=False, + ) + ) + if len(texts) != len(messages): + logging.warning(f"Remove too long text, {messages} ") + max_len_texts = max([len(text) for text in texts]) + if tokenizer.padding_side == "right": + texts = [ + text + [tokenizer.pad_token_id] * (max_len_texts - len(text)) + for text in texts + ] + else: + texts = [ + [tokenizer.pad_token_id] * (max_len_texts - len(text)) + text + for text in texts + ] + input_ids = torch.tensor(texts, dtype=torch.int) + + target_ids = input_ids.clone() + target_ids[target_ids == tokenizer.pad_token_id] = IGNORE_TOKEN_ID + # mask all tokens before token_id 151646 with IGNORE_TOKEN_ID + # first get the indices of the tokens + mask_prompt = True + if mask_prompt: + default_speech_token_id = tokenizer.convert_tokens_to_ids(DEFAULT_SPEECH_TOKEN) + mask_indices = torch.where(input_ids == default_speech_token_id) + for i in range(mask_indices[0].size(0)): + row = mask_indices[0][i] + col = mask_indices[1][i] + # + 6 to skip: 'assistant', '\n' 151665, 151645, 198, 151644, 77091, 198 + # WAR: TODO FIXME check qwen3 + target_ids[row, : col + 6] = IGNORE_TOKEN_ID + attention_mask = input_ids.ne(tokenizer.pad_token_id) + return input_ids, attention_mask, target_ids + + +def preprocess_teacher( + messages, + tokenizer: transformers.PreTrainedTokenizer, +) -> Dict: + """Preprocesses the data for supervised fine-tuning.""" + texts = [] + TEMPLATE = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if loop.last %}{{ '<|im_end|>'}}{% else %}{{ '<|im_end|>\n' }}{% endif %}{% endfor %}" + for i, msg in enumerate(messages): + texts.append( + tokenizer.apply_chat_template( + msg, + tokenize=True, + chat_template=TEMPLATE, + add_generation_prompt=False, + padding="longest", # FIX me change padding to longest + truncation=False, + ) + ) + if len(texts) != len(messages): + logging.warning(f"Remove too long text, {messages} ") + max_len_texts = max([len(text) for text in texts]) + if tokenizer.padding_side == "right": + texts = [ + text + [tokenizer.pad_token_id] * (max_len_texts - len(text)) + for text in texts + ] + else: + texts = [ + [tokenizer.pad_token_id] * (max_len_texts - len(text)) + text + for text in texts + ] + input_ids = torch.tensor(texts, dtype=torch.int) + + target_ids = input_ids.clone() + target_ids[target_ids == tokenizer.pad_token_id] = IGNORE_TOKEN_ID + # mask all tokens before token_id with IGNORE_TOKEN_ID + # first get the indices of the tokens + mask_prompt = True + if mask_prompt: + default_speech_token_id = tokenizer.convert_tokens_to_ids(DEFAULT_SPEECH_TOKEN) + mask_indices = torch.where(input_ids == default_speech_token_id) + for i in range(mask_indices[0].size(0)): + row = mask_indices[0][i] + col = mask_indices[1][i] + # + 2 to skip: 'assistant', '\n' + # WAR: TODO FIXME check qwen3 + # THIS IS THE ONLY DIFFERENCE FROM preprocess + target_ids[row, : col + 6] = IGNORE_TOKEN_ID + target_ids[row, col] = default_speech_token_id + # remove default_speech_token_id from target_ids and input_ids + batch_size = target_ids.size(0) + + target_ids = target_ids[target_ids != default_speech_token_id].view(batch_size, -1) + input_ids = input_ids[input_ids != default_speech_token_id].view(batch_size, -1) + + attention_mask = input_ids.ne(tokenizer.pad_token_id) + return input_ids, attention_mask, target_ids + + def compute_loss( params: AttributeDict, tokenizer: AutoTokenizer, @@ -374,72 +530,6 @@ def compute_loss( Returns: Return a tuple of two elements. The first element is the loss tensor. """ - # For the uneven-sized batch, the total duration after padding would possibly - # cause OOM. Hence, for each batch, which is sorted descendingly by length, - # we simply drop the last few shortest samples, so that the retained total frames - # (after padding) would not exceed `allowed_max_frames`: - # `allowed_max_frames = int(max_frames * (1.0 + allowed_excess_duration_ratio))`, - # where `max_frames = max_duration * 1000 // frame_shift_ms`. - # We set allowed_excess_duration_ratio=0.1. - - def preprocess( - messages, - tokenizer: transformers.PreTrainedTokenizer, - ) -> Dict: - """Preprocesses the data for supervised fine-tuning.""" - texts = [] - TEMPLATE = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if loop.last %}{{ '<|im_end|>'}}{% else %}{{ '<|im_end|>\n' }}{% endif %}{% endfor %}" - for i, msg in enumerate(messages): - texts.append( - tokenizer.apply_chat_template( - msg, - tokenize=True, - chat_template=TEMPLATE, - add_generation_prompt=False, - padding="longest", # FIX me change padding to longest - truncation=False, - ) - ) - if len(texts) != len(messages): - logging.warning(f"Remove too long text, {messages} ") - max_len_texts = max([len(text) for text in texts]) - if tokenizer.padding_side == "right": - texts = [ - text + [tokenizer.pad_token_id] * (max_len_texts - len(text)) - for text in texts - ] - else: - texts = [ - [tokenizer.pad_token_id] * (max_len_texts - len(text)) + text - for text in texts - ] - input_ids = torch.tensor(texts, dtype=torch.int) - - target_ids = input_ids.clone() - target_ids[target_ids == tokenizer.pad_token_id] = IGNORE_TOKEN_ID - # mask all tokens before token_id 151646 with IGNORE_TOKEN_ID - # first get the indices of the tokens - mask_prompt = True - if mask_prompt: - default_speech_token_id = tokenizer.convert_tokens_to_ids( - DEFAULT_SPEECH_TOKEN - ) - mask_indices = torch.where(input_ids == default_speech_token_id) - for i in range(mask_indices[0].size(0)): - row = mask_indices[0][i] - col = mask_indices[1][i] - # + 6 to skip: 'assistant', '\n' 151665, 151645, 198, 151644, 77091, 198 - # WAR: TODO FIXME check qwen3 - target_ids[row, : col + 6] = IGNORE_TOKEN_ID - - attention_mask = input_ids.ne(tokenizer.pad_token_id) - - return input_ids, attention_mask, target_ids - - # max_frames = params.max_duration * 1000 // params.frame_shift_ms - # allowed_max_frames = int(max_frames * (1.0 + params.allowed_excess_duration_ratio)) - # batch = filter_uneven_sized_batch(batch, allowed_max_frames) - device = next(model.parameters()).device feature = batch["inputs"] @@ -452,8 +542,12 @@ def compute_loss( messages, answer_cosyvoice_speech_token = process_batch_slam_omni(batch) elif params.dataset_format == "vocalnet": messages, answer_cosyvoice_speech_token = process_batch_vocalnet(batch) + if params.loss_type == "kl_div": + messages_text = process_batch_text_vocalnet(batch) elif params.dataset_format == "speech_continuation": messages = process_batch_speech_continuation(batch) + if params.loss_type == "kl_div": + messages_text = process_batch_text_continuation(batch) else: raise ValueError(f"Unknown dataset format: {params.dataset_format}") @@ -464,12 +558,30 @@ def compute_loss( with torch.set_grad_enabled(is_training): if not params.enable_speech_output: - loss, acc = model( - fbank=feature, - input_ids=input_ids.to(device), - attention_mask=attention_mask.to(device), - labels=target_ids.to(device), - ) + if params.loss_type == "ce": + loss, acc = model( + fbank=feature, + input_ids=input_ids.to(device), + attention_mask=attention_mask.to(device), + labels=target_ids.to(device), + ) + elif params.loss_type == "kl_div": + ( + teacher_input_ids, + teacher_attention_mask, + teacher_target_ids, + ) = preprocess_teacher(messages_text, tokenizer) + loss, acc, acc_teacher = model.forward_kl_div( + fbank=feature, + input_ids=input_ids.to(device), + attention_mask=attention_mask.to(device), + labels=target_ids.to(device), + teacher_input_ids=teacher_input_ids.to(device), + teacher_attention_mask=teacher_attention_mask.to(device), + teacher_labels=teacher_target_ids.to(device), + ) + else: + raise ValueError(f"Unknown loss type: {params.loss_type}") else: ( text_loss, @@ -498,6 +610,8 @@ def compute_loss( info["acc"] = ( acc * info["frames"] ) # WAR: to avoid normalization by the number of frames + if params.loss_type == "kl_div": + info["acc_teacher"] = acc_teacher * info["frames"] if params.enable_speech_output: info["codec_acc"] = codec_acc * info["frames"] info["codec_topk_acc"] = codec_topk_acc * info["frames"] @@ -820,6 +934,17 @@ def run(rank, world_size, args): codec_lm.config.mask_token_id = codec_vocab_size - 4 else: codec_lm = None + if params.loss_type == "kl_div": + teacher_llm = AutoModelForCausalLM.from_pretrained( + params.llm_path_or_name, + attn_implementation=attn_implementation, + torch_dtype=torch_dtype, + ) + for name, param in teacher_llm.named_parameters(): + param.requires_grad = False + teacher_llm.eval() + else: + teacher_llm = None model = SPEECH_LLM( speech_encoder, @@ -827,6 +952,7 @@ def run(rank, world_size, args): encoder_projector, codec_lm, codec_lm_padding_side="left" if params.use_flash_attn else "right", + teacher_llm=teacher_llm, ) if params.pretrained_model_path: @@ -834,7 +960,9 @@ def run(rank, world_size, args): missing_keys, unexpected_keys = model.load_state_dict(checkpoint, strict=False) # set params.batch_idx_train according to the checkpoint name if "checkpoint-" in params.pretrained_model_path: - params.batch_idx_train = int(params.pretrained_model_path.split("-")[-1]) + params.batch_idx_train = int( + params.pretrained_model_path.split("-")[-1].split("/")[0] + ) num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") @@ -893,9 +1021,14 @@ def run(rank, world_size, args): train_cuts = data_module.train_cuts_en_vocalnet() valid_cuts = data_module.valid_cuts_en_vocalnet() elif params.dataset_format == "speech_continuation": - train_cuts = data_module.train_cuts_ultravox() - # train_cuts = data_module.train_cuts_gigaspeech() - # train_cuts = data_module.train_cuts_librispeech() + if params.dataset == "multi_en": + train_cuts = data_module.train_cuts_ultravox() + elif params.dataset == "librispeech": + train_cuts = data_module.train_cuts_librispeech() + elif params.dataset == "gigaspeech": + train_cuts = data_module.train_cuts_gigaspeech() + else: + raise ValueError(f"Unknown dataset: {params.dataset}") valid_cuts = data_module.valid_cuts_ultravox() else: raise ValueError(f"Unknown dataset format: {params.dataset_format}") From 50fc1aba607b0c1fd50f5ab41763cd12115fa0e1 Mon Sep 17 00:00:00 2001 From: root Date: Sun, 18 May 2025 18:47:22 -0700 Subject: [PATCH 42/57] add multi-node --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index e92e90a2f..b86288c5f 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -295,7 +295,7 @@ if [ $stage -le 16 ] && [ $stop_stage -ge 16 ]; then done fi - train_cmd_args="--max-duration 1200 \ + train_cmd_args="--max-duration 800 \ --enable-musan False \ --audio-key audio --text-key continuation \ --exp-dir $exp_dir \ @@ -318,7 +318,7 @@ if [ $stage -le 16 ] && [ $stop_stage -ge 16 ]; then # No pretrained model or sampler state dict needed for the first run fi - torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ + torchrun --nproc_per_node $ngpu --nnodes $SLURM_JOB_NUM_NODES --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d --rdzv_id $SLURM_JOBID ./qwen_omni/train.py \ $train_cmd_args fi From 9cdd393f4338d63d81b6d40f00258b3fefcfcda0 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 20 May 2025 07:48:49 +0000 Subject: [PATCH 43/57] add server url --- egs/speech_llm/SPEECH2SPEECH/qwen_omni/server.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/server.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/server.py index 3c9122a09..f0da7f905 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/server.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/server.py @@ -27,6 +27,12 @@ def get_args(): default=None, help="Prompt template", ) + parser.add_argument( + "--port", + type=int, + default=8001, + help="Port number", + ) add_model_arguments(parser) args = parser.parse_args() return args @@ -61,6 +67,7 @@ def preprocess_prompt(tokenizer): args = get_args() +print(f"Using port: {args.port}") model, tokenizer = get_model(args) app = FastAPI() @@ -121,4 +128,4 @@ async def decode_speech(request: SpeechRequest): if __name__ == "__main__": print("Starting server...") - uvicorn.run(app, host="0.0.0.0", port=8000) + uvicorn.run(app, host="0.0.0.0", port=args.port) From ca84aff5d618586578601d68db88514a9617d853 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 20 May 2025 00:52:09 -0700 Subject: [PATCH 44/57] remove cosyvoice lib --- egs/speech_llm/SPEECH2SPEECH/qwen_omni/web_demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/web_demo.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/web_demo.py index 1ad05b0a6..562079044 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/web_demo.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/web_demo.py @@ -10,7 +10,7 @@ import sherpa_onnx import soundfile as sf import torch import whisper -from cosyvoice.cli.cosyvoice import CosyVoice +#from cosyvoice.cli.cosyvoice import CosyVoice from gradio_client import utils as client_utils from model import SPEECH_LLM, EncoderProjector from peft import LoraConfig, get_peft_model From 7aa6c80ddb7a634fc36cbc36173005a24896be12 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 21 May 2025 21:54:59 -0700 Subject: [PATCH 45/57] add multi gpu processing --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 90 ++++++++++++++----- .../SPEECH2SPEECH/qwen_omni/client.py | 10 ++- .../SPEECH2SPEECH/qwen_omni/train.py | 17 +++- 3 files changed, 93 insertions(+), 24 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index b86288c5f..98c6ced9b 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -239,20 +239,57 @@ fi if [ $stage -le 14 ] && [ $stop_stage -ge 14 ]; then log "stage 14: Client" - datasets=(alpacaeval_full wildvoice mmsu advbench bbh ifeval commoneval openbookqa sd-qa) - datasets=(openbookqa commoneval) - for dataset in ${datasets[@]}; do - # sd-qa should use usa split - if [ $dataset == "sd-qa" ]; then - split_name="usa" - else - split_name="test" - fi - echo $dataset $split_name - python3 ./qwen_omni/client.py \ - --subset-name $dataset --split-name $split_name \ - --output-dir result_adapter_librispeech_kl_div_qa_template + exp_dir=./qwen_omni/exp_speech2text_first_libri_continuation_second_ce + # The final assignment of datasets in the original script is used here: + # (alpacaeval_full wildvoice mmsu advbench bbh ifeval commoneval openbookqa sd-qa) + declare -a target_datasets=("alpacaeval_full" "wildvoice" "ifeval" "commoneval" "openbookqa" "sd-qa" "advbench" "bbh" "mmsu") + declare -a target_datasets=("openbookqa" "ifeval" "sd-qa" "commoneval" "alpacaeval_full") + + NUM_CLIENT_JOBS=4 # Number of parallel client jobs + BASE_PORT=8000 # Base port for servers + + log "Starting $NUM_CLIENT_JOBS parallel client jobs to process ${#target_datasets[@]} datasets." + + for job_id in $(seq 0 $(($NUM_CLIENT_JOBS - 1))) + do + ( # Start a subshell for backgrounding this client job's tasks + current_port=$(expr $BASE_PORT + $job_id) + log "Client Job $job_id: Initializing. Will connect to port $current_port." + + processed_count_for_this_job=0 + # Iterate over all datasets using their indices + for i in "${!target_datasets[@]}"; do + # Assign dataset to job_id in a round-robin fashion + if [ $(($i % $NUM_CLIENT_JOBS)) -eq $job_id ]; then + dataset="${target_datasets[$i]}" + + # local split_name # Determine split_name based on dataset + if [ "$dataset" == "sd-qa" ]; then + split_name="usa" + else + split_name="test" + fi + + log "Client Job $job_id (Port $current_port): Processing dataset '$dataset' (split '$split_name')" + python3 ./qwen_omni/client.py \ + --subset-name "$dataset" \ + --split-name "$split_name" \ + --output-dir "$exp_dir/results" \ + --port "$current_port" # Assuming client.py accepts --port + + if [ $? -ne 0 ]; then + log "Client Job $job_id (Port $current_port): ERROR processing dataset '$dataset'." + fi + processed_count_for_this_job=$(($processed_count_for_this_job + 1)) + fi + done + log "Client Job $job_id (Port $current_port): Finished. Processed $processed_count_for_this_job datasets." + ) & # Run this client job's subshell in the background done + + log "All client jobs launched. Waiting for completion..." + wait # Wait for all backgrounded client jobs to complete + log "All client jobs have completed." fi if [ $stage -le 15 ] && [ $stop_stage -ge 15 ]; then @@ -324,15 +361,26 @@ fi if [ $stage -le 17 ] && [ $stop_stage -ge 17 ]; then + # pip install gradio sherpa-onnx log "stage 17: Server for adapter only speech continuation" - exp_dir=./qwen_omni/exp_speech2text - python3 ./qwen_omni/server.py \ - --speech-encoder-path-or-name models/large-v2.pt \ - --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ - --checkpoint-path $exp_dir/epoch-6/pytorch_model.bin \ - --use-flash-attn True \ - --enable-speech-output False \ - --use-lora False --prompt-template continuation + exp_dir=./qwen_omni/exp_speech2text_first_libri_continuation_second_ce + + N_GPUS=4 # Define the number of GPUs/processes you want to launch + + for id in $(seq 0 $(($N_GPUS - 1))) + do + log "Launching server on GPU $id with port $(expr 8000 + $id)" + CUDA_VISIBLE_DEVICES=$id python3 ./qwen_omni/server.py \ + --speech-encoder-path-or-name models/large-v2.pt \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ + --checkpoint-path $exp_dir/epoch-10/pytorch_model.bin \ + --use-flash-attn True \ + --enable-speech-output False \ + --port $(expr 8000 + $id) \ + --use-lora True & + done + + wait # Wait for all background processes to complete fi if [ $stage -le 18 ] && [ $stop_stage -ge 18 ]; then diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/client.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/client.py index 05c363979..7dc279e48 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/client.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/client.py @@ -13,9 +13,15 @@ def get_args(): parser.add_argument( "--server-url", type=str, - default="http://localhost:8000", + default="http://localhost", help="URL of the FastAPI server", ) + parser.add_argument( + "--port", + type=int, + default=8000, + help="Port of the FastAPI server", + ) parser.add_argument( "--dataset-name", type=str, @@ -48,7 +54,7 @@ def main(): args.output_dir, f"{args.subset_name}-{args.split_name}.jsonl", ) - server_decode_url = f"{args.server_url}/decode" + server_decode_url = f"{args.server_url}:{args.port}/decode" print("Loading dataset...") if args.subset_name != "mmsu": diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py index 81aac84e5..d5a2f7cf9 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py @@ -380,6 +380,19 @@ def process_batch_speech_continuation(batch: dict): messages.append(message) return messages +def process_batch_asr(batch: dict): + messages = [] + for i in range(len(batch["supervisions"]["text"])): + transcript = batch["supervisions"]["cut"][i].custom["text"] + message = [ + { + "role": "user", + "content": f"Transcribe the following audio into text:\n\n{DEFAULT_SPEECH_TOKEN}", + }, + {"role": "assistant", "content": transcript}, + ] + messages.append(message) + return messages def process_batch_text_continuation(batch: dict): messages = [] @@ -548,6 +561,8 @@ def compute_loss( messages = process_batch_speech_continuation(batch) if params.loss_type == "kl_div": messages_text = process_batch_text_continuation(batch) + elif params.dataset_format == "asr": + messages = process_batch_asr(batch) else: raise ValueError(f"Unknown dataset format: {params.dataset_format}") @@ -1020,7 +1035,7 @@ def run(rank, world_size, args): elif params.dataset_format == "vocalnet": train_cuts = data_module.train_cuts_en_vocalnet() valid_cuts = data_module.valid_cuts_en_vocalnet() - elif params.dataset_format == "speech_continuation": + elif params.dataset_format == "speech_continuation" or params.dataset_format == "asr": if params.dataset == "multi_en": train_cuts = data_module.train_cuts_ultravox() elif params.dataset == "librispeech": From 7a12d88d6c35ab9994fbf937ecd19d86b38403b5 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 21 May 2025 22:18:57 -0700 Subject: [PATCH 46/57] update --- egs/speech_llm/SPEECH2SPEECH/prepare.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index 98c6ced9b..74176fdf2 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -244,6 +244,7 @@ if [ $stage -le 14 ] && [ $stop_stage -ge 14 ]; then # (alpacaeval_full wildvoice mmsu advbench bbh ifeval commoneval openbookqa sd-qa) declare -a target_datasets=("alpacaeval_full" "wildvoice" "ifeval" "commoneval" "openbookqa" "sd-qa" "advbench" "bbh" "mmsu") declare -a target_datasets=("openbookqa" "ifeval" "sd-qa" "commoneval" "alpacaeval_full") + declare -a target_datasets=("alpacaeval_full" "wildvoice" "advbench" "bbh" "mmsu") NUM_CLIENT_JOBS=4 # Number of parallel client jobs BASE_PORT=8000 # Base port for servers @@ -364,6 +365,7 @@ if [ $stage -le 17 ] && [ $stop_stage -ge 17 ]; then # pip install gradio sherpa-onnx log "stage 17: Server for adapter only speech continuation" exp_dir=./qwen_omni/exp_speech2text_first_libri_continuation_second_ce + # exp_dir=./qwen_omni/exp_speech2text_first_asr_second_ce N_GPUS=4 # Define the number of GPUs/processes you want to launch From 9fff18edecee2f471b281d5b225b70f72ca658db Mon Sep 17 00:00:00 2001 From: root Date: Thu, 22 May 2025 19:14:52 -0700 Subject: [PATCH 47/57] refactor code --- .../SPEECH2SPEECH/debug/data_module.py | 480 --------- egs/speech_llm/SPEECH2SPEECH/debug/model.py | 795 -------------- egs/speech_llm/SPEECH2SPEECH/debug/prepare.sh | 195 ---- egs/speech_llm/SPEECH2SPEECH/debug/train.py | 977 ------------------ egs/speech_llm/SPEECH2SPEECH/prepare.sh | 7 +- .../SPEECH2SPEECH/qwen_omni/data_module.py | 112 +- .../SPEECH2SPEECH/qwen_omni/decode.py | 2 +- .../SPEECH2SPEECH/qwen_omni/requirements.txt | 2 + .../SPEECH2SPEECH/qwen_omni/speech_dataset.py | 175 ---- .../SPEECH2SPEECH/qwen_omni/train.py | 267 +++-- .../SPEECH2SPEECH/qwen_omni/utils.py | 3 +- 11 files changed, 141 insertions(+), 2874 deletions(-) delete mode 100644 egs/speech_llm/SPEECH2SPEECH/debug/data_module.py delete mode 100644 egs/speech_llm/SPEECH2SPEECH/debug/model.py delete mode 100644 egs/speech_llm/SPEECH2SPEECH/debug/prepare.sh delete mode 100755 egs/speech_llm/SPEECH2SPEECH/debug/train.py delete mode 100644 egs/speech_llm/SPEECH2SPEECH/qwen_omni/speech_dataset.py diff --git a/egs/speech_llm/SPEECH2SPEECH/debug/data_module.py b/egs/speech_llm/SPEECH2SPEECH/debug/data_module.py deleted file mode 100644 index 5a7c04b6d..000000000 --- a/egs/speech_llm/SPEECH2SPEECH/debug/data_module.py +++ /dev/null @@ -1,480 +0,0 @@ -# Copyright 2021 Piotr Żelasko -# Copyright 2022 Xiaomi Corporation (Author: Mingshuang Luo) -# -# See ../../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import argparse -import inspect -import logging -from functools import lru_cache -from pathlib import Path -from typing import Any, Dict, Optional - -import torch -from datasets import load_dataset -from lhotse import ( - CutSet, - WhisperFbank, - WhisperFbankConfig, - load_manifest, - load_manifest_lazy, -) -from lhotse.dataset import ( # noqa F401 for PrecomputedFeatures - CutConcatenate, - CutMix, - DynamicBucketingSampler, - PrecomputedFeatures, - SimpleCutSampler, - SpecAugment, -) -from lhotse.dataset.input_strategies import ( # noqa F401 For AudioSamples - AudioSamples, - OnTheFlyFeatures, -) -from lhotse.utils import fix_random_seed -from speech_dataset import K2SpeechRecognitionDataset -from torch.utils.data import DataLoader - -from utils import str2bool - - -class _SeedWorkers: - def __init__(self, seed: int): - self.seed = seed - - def __call__(self, worker_id: int): - fix_random_seed(self.seed + worker_id) - - -class AsrDataModule: - """ - DataModule for k2 ASR experiments. - It assumes there is always one train and valid dataloader, - but there can be multiple test dataloaders (e.g. LibriSpeech test-clean - and test-other). - - It contains all the common data pipeline modules used in ASR - experiments, e.g.: - - dynamic batch size, - - bucketing samplers, - - cut concatenation, - - augmentation, - - on-the-fly feature extraction - - This class should be derived for specific corpora used in ASR tasks. - """ - - def __init__(self, args: argparse.Namespace): - self.args = args - - @classmethod - def add_arguments(cls, parser: argparse.ArgumentParser): - group = parser.add_argument_group( - title="ASR data related options", - description="These options are used for the preparation of " - "PyTorch DataLoaders from Lhotse CutSet's -- they control the " - "effective batch sizes, sampling strategies, applied data " - "augmentations, etc.", - ) - group.add_argument( - "--manifest-dir", - type=Path, - default=Path("data/fbank"), - help="Path to directory with train/valid/test cuts.", - ) - group.add_argument( - "--max-duration", - type=int, - default=300.0, - help="Maximum pooled recordings duration (seconds) in a " - "single batch. You can reduce it if it causes CUDA OOM.", - ) - group.add_argument( - "--bucketing-sampler", - type=str2bool, - default=True, - help="When enabled, the batches will come from buckets of " - "similar duration (saves padding frames).", - ) - group.add_argument( - "--num-buckets", - type=int, - default=30, - help="The number of buckets for the DynamicBucketingSampler" - "(you might want to increase it for larger datasets).", - ) - group.add_argument( - "--on-the-fly-feats", - type=str2bool, - default=False, - help="When enabled, use on-the-fly cut mixing and feature " - "extraction. Will drop existing precomputed feature manifests " - "if available.", - ) - group.add_argument( - "--shuffle", - type=str2bool, - default=True, - help="When enabled (=default), the examples will be " - "shuffled for each epoch.", - ) - group.add_argument( - "--drop-last", - type=str2bool, - default=True, - help="Whether to drop last batch. Used by sampler.", - ) - group.add_argument( - "--return-cuts", - type=str2bool, - default=True, - help="When enabled, each batch will have the " - "field: batch['supervisions']['cut'] with the cuts that " - "were used to construct it.", - ) - - group.add_argument( - "--num-workers", - type=int, - default=2, - help="The number of training dataloader workers that " - "collect the batches.", - ) - - group.add_argument( - "--enable-spec-aug", - type=str2bool, - default=True, - help="When enabled, use SpecAugment for training dataset.", - ) - - group.add_argument( - "--spec-aug-time-warp-factor", - type=int, - default=80, - help="Used only when --enable-spec-aug is True. " - "It specifies the factor for time warping in SpecAugment. " - "Larger values mean more warping. " - "A value less than 1 means to disable time warp.", - ) - - group.add_argument( - "--enable-musan", - type=str2bool, - default=True, - help="When enabled, select noise from MUSAN and mix it" - "with training dataset. ", - ) - - group.add_argument( - "--input-strategy", - type=str, - default="PrecomputedFeatures", - help="AudioSamples or PrecomputedFeatures", - ) - - group.add_argument( - "--huggingface-dataset-path-or-name", - type=str, - default="/workspace/Belle_1.4M-SLAM-Omni", - help="The path or name of the Huggingface dataset", - ) - group.add_argument( - "--audio-key", - type=str, - default="question_audio", - help="The key in the Huggingface dataset containing the audio data", - ) - group.add_argument( - "--text-key", - type=str, - default="answer", - help="The key in the Huggingface dataset containing the text data", - ) - group.add_argument( - "--resample-to-16kHz", - type=str2bool, - default=True, - help="Resample audio to 16kHz. Default: False.", - ) - - def train_dataloaders( - self, - cuts_train: CutSet, - sampler_state_dict: Optional[Dict[str, Any]] = None, - ) -> DataLoader: - """ - Args: - cuts_train: - CutSet for training. - sampler_state_dict: - The state dict for the training sampler. - """ - transforms = [] - if self.args.enable_musan: - logging.info("Enable MUSAN") - logging.info("About to get Musan cuts") - cuts_musan = load_manifest(self.args.manifest_dir / "musan_cuts.jsonl.gz") - transforms.append( - CutMix(cuts=cuts_musan, p=0.5, snr=(10, 20), preserve_id=True) - ) - else: - logging.info("Disable MUSAN") - - input_transforms = [] - if self.args.enable_spec_aug: - logging.info("Enable SpecAugment") - logging.info(f"Time warp factor: {self.args.spec_aug_time_warp_factor}") - # Set the value of num_frame_masks according to Lhotse's version. - # In different Lhotse's versions, the default of num_frame_masks is - # different. - num_frame_masks = 10 - num_frame_masks_parameter = inspect.signature( - SpecAugment.__init__ - ).parameters["num_frame_masks"] - if num_frame_masks_parameter.default == 1: - num_frame_masks = 2 - logging.info(f"Num frame mask: {num_frame_masks}") - input_transforms.append( - SpecAugment( - time_warp_factor=self.args.spec_aug_time_warp_factor, - num_frame_masks=num_frame_masks, - features_mask_size=27, - num_feature_masks=2, - frames_mask_size=100, - ) - ) - else: - logging.info("Disable SpecAugment") - - logging.info("About to create train dataset") - train = K2SpeechRecognitionDataset( - input_strategy=eval(self.args.input_strategy)(), - cut_transforms=transforms, - input_transforms=input_transforms, - return_cuts=self.args.return_cuts, - ) - - if self.args.on_the_fly_feats: - # NOTE: the PerturbSpeed transform should be added only if we - # remove it from data prep stage. - # Add on-the-fly speed perturbation; since originally it would - # have increased epoch size by 3, we will apply prob 2/3 and use - # 3x more epochs. - # Speed perturbation probably should come first before - # concatenation, but in principle the transforms order doesn't have - # to be strict (e.g. could be randomized) - # transforms = [PerturbSpeed(factors=[0.9, 1.1], p=2/3)] + transforms # noqa - # Drop feats to be on the safe side. - train = K2SpeechRecognitionDataset( - cut_transforms=transforms, - input_strategy=OnTheFlyFeatures( - WhisperFbank(WhisperFbankConfig(num_filters=80, device="cuda")) - ), - input_transforms=input_transforms, - return_cuts=self.args.return_cuts, - ) - - if self.args.bucketing_sampler: - logging.info("Using DynamicBucketingSampler.") - train_sampler = DynamicBucketingSampler( - cuts_train, - max_duration=self.args.max_duration, - shuffle=self.args.shuffle, - num_buckets=self.args.num_buckets, - buffer_size=self.args.num_buckets * 2000, - shuffle_buffer_size=self.args.num_buckets * 5000, - drop_last=self.args.drop_last, - ) - else: - logging.info("Using SimpleCutSampler.") - train_sampler = SimpleCutSampler( - cuts_train, - max_duration=self.args.max_duration, - shuffle=self.args.shuffle, - ) - logging.info("About to create train dataloader") - - if sampler_state_dict is not None: - logging.info("Loading sampler state dict") - train_sampler.load_state_dict(sampler_state_dict) - - # 'seed' is derived from the current random state, which will have - # previously been set in the main process. - seed = torch.randint(0, 100000, ()).item() - worker_init_fn = _SeedWorkers(seed) - - train_dl = DataLoader( - train, - sampler=train_sampler, - batch_size=None, - num_workers=self.args.num_workers, - persistent_workers=True, - pin_memory=True, - worker_init_fn=worker_init_fn, - ) - - return train_dl - - def valid_dataloaders(self, cuts_valid: CutSet) -> DataLoader: - """ - Args: - cuts_valid: - CutSet for validation. - """ - logging.info("About to create dev dataset") - - validate = K2SpeechRecognitionDataset( - input_strategy=OnTheFlyFeatures( - WhisperFbank(WhisperFbankConfig(num_filters=80, device="cuda")) - ) - if self.args.on_the_fly_feats - else eval(self.args.input_strategy)(), - return_cuts=self.args.return_cuts, - ) - if self.args.bucketing_sampler: - valid_sampler = DynamicBucketingSampler( - cuts_valid, - max_duration=self.args.max_duration, - shuffle=False, - ) - else: - valid_sampler = SimpleCutSampler( - cuts_valid, - max_duration=self.args.max_duration, - shuffle=False, - ) - logging.info("About to create dev dataloader") - valid_dl = DataLoader( - validate, - sampler=valid_sampler, - batch_size=None, - num_workers=2, - persistent_workers=False, - ) - - return valid_dl - - def test_dataloaders(self, cuts: CutSet) -> DataLoader: - logging.debug("About to create test dataset") - test = K2SpeechRecognitionDataset( - input_strategy=OnTheFlyFeatures( - WhisperFbank(WhisperFbankConfig(num_filters=80, device="cpu")) - ) - if self.args.on_the_fly_feats - else eval(self.args.input_strategy)(), - return_cuts=self.args.return_cuts, - ) - sampler = DynamicBucketingSampler( - cuts, - max_duration=self.args.max_duration, - shuffle=False, - ) - logging.debug("About to create test dataloader") - test_dl = DataLoader( - test, - batch_size=None, - sampler=sampler, - num_workers=self.args.num_workers, - ) - return test_dl - - @lru_cache() - def test_cuts(self) -> CutSet: - logging.info("About to get test cuts") - if self.args.on_the_fly_feats: - pass - else: - return { - "test": load_manifest_lazy( - self.args.manifest_dir / "cuts_belle_test.jsonl.gz" - ) - } - - @lru_cache() - def dev_cuts(self) -> CutSet: - logging.info("About to get test cuts") - if self.args.on_the_fly_feats: - pass - else: - return load_manifest_lazy( - self.args.manifest_dir / "cuts_belle_test.jsonl.gz" - ) - - @lru_cache() - def train_cuts(self) -> CutSet: - logging.info("About to get train cuts") - slam_omni_zh_cuts = load_manifest_lazy( - self.args.manifest_dir / "cuts_belle_train.jsonl.gz" - ) - return slam_omni_zh_cuts - - # @lru_cache() - # def train_cuts_en_vocalnet(self) -> CutSet: - # logging.info("About to get train cuts") - # VoiceAssistant_cuts = load_manifest_lazy( - # self.args.manifest_dir / "cuts_voice_assistant_00001-00049.jsonl.gz" - # ) - # ultrachat_cuts = load_manifest_lazy( - # self.args.manifest_dir / "cuts_ultrachat_train.jsonl.gz" - # ) - # return CutSet.mux( - # VoiceAssistant_cuts, - # ultrachat_cuts, - # weights=[ - # len(VoiceAssistant_cuts), - # len(ultrachat_cuts), - # ], - # ) - - # valid cuts_voice_assistant.00000.jsonl.gz - # @lru_cache() - # def valid_cuts_en_vocalnet(self) -> CutSet: - # logging.info("About to get valid cuts") - # VoiceAssistant_cuts = load_manifest_lazy( - # self.args.manifest_dir / "cuts_voice_assistant.00000.jsonl.gz" - # ) - # return VoiceAssistant_cuts - - # @lru_cache() - # def test_cuts_en_vocalnet(self) -> CutSet: - # logging.info("About to get test cuts") - # VoiceAssistant_cuts = load_manifest_lazy( - # self.args.manifest_dir / "cuts_voice_assistant.00000.jsonl.gz" - # ) - # return VoiceAssistant_cuts - def train_cuts_en_vocalnet(self) -> CutSet: - logging.info("About to get train cuts") - VoiceAssistant_cuts = load_manifest_lazy( - self.args.manifest_dir / "cuts_debug.jsonl.gz" - ) - return VoiceAssistant_cuts - - @lru_cache() - def valid_cuts_en_vocalnet(self) -> CutSet: - logging.info("About to get valid cuts") - VoiceAssistant_cuts = load_manifest_lazy( - self.args.manifest_dir / "cuts_debug.jsonl.gz" - ) - return VoiceAssistant_cuts - - @lru_cache() - def test_cuts_en_vocalnet(self) -> CutSet: - logging.info("About to get test cuts") - VoiceAssistant_cuts = load_manifest_lazy( - self.args.manifest_dir / "cuts_debug.jsonl.gz" - ) - return VoiceAssistant_cuts \ No newline at end of file diff --git a/egs/speech_llm/SPEECH2SPEECH/debug/model.py b/egs/speech_llm/SPEECH2SPEECH/debug/model.py deleted file mode 100644 index dfeb94956..000000000 --- a/egs/speech_llm/SPEECH2SPEECH/debug/model.py +++ /dev/null @@ -1,795 +0,0 @@ -from typing import List, Tuple - -import torch -from torch import nn -from torchmetrics.classification import MulticlassAccuracy -from transformers.trainer_pt_utils import LabelSmoother - -IGNORE_TOKEN_ID = LabelSmoother.ignore_index -import logging -from utils import get_rank - -class EncoderProjector(nn.Module): - """ - The encoder projector module. It is used to project the encoder outputs to the same dimension as the language model. - Modified from https://github.com/X-LANCE/SLAM-LLM/blob/main/src/slam_llm/models/projector.py. - Args: - encoder_dim (:obj:`int`): The dimension of the encoder outputs. - llm_dim (:obj:`int`): The dimension of the language model. - downsample_rate (:obj:`int`, `optional`, defaults to 5): The downsample rate to use. - """ - - def __init__(self, encoder_dim, llm_dim, downsample_rate=5): - super().__init__() - self.downsample_rate = downsample_rate - self.linear1 = nn.Linear(encoder_dim * self.downsample_rate, llm_dim) - self.relu = nn.ReLU() - self.linear2 = nn.Linear(llm_dim, llm_dim) - - def forward(self, x): - - batch_size, seq_len, feat_dim = x.size() - num_frames_to_discard = seq_len % self.downsample_rate - if num_frames_to_discard > 0: - x = x[:, :-num_frames_to_discard, :] - seq_len = x.size(1) - - x = x.contiguous() - x = x.view( - batch_size, seq_len // self.downsample_rate, feat_dim * self.downsample_rate - ) - - x = self.linear1(x) - x = self.relu(x) - x = self.linear2(x) - return x - - -class SPEECH_LLM(nn.Module): - """ - The Speech-to-Text model. It consists of an encoder, a language model and an encoder projector. - The encoder is used to extract speech features from the input speech signal. - The encoder projector is used to project the encoder outputs to the same dimension as the language model. - The language model is used to generate the text from the speech features. - Args: - encoder (:obj:`nn.Module`): The encoder module. - llm (:obj:`nn.Module`): The language model module. - encoder_projector (:obj:`nn.Module`): The encoder projector module. - """ - - def __init__( - self, - encoder: nn.Module, - llm: nn.Module, - encoder_projector: nn.Module, - codec_lm: nn.Module = None, - codec_lm_padding_side: str = "left", - ): - super().__init__() - self.encoder = encoder - self.llm = llm - self.encoder_projector = encoder_projector - self.codec_lm = codec_lm - if self.codec_lm: - self.speech_token_projector = nn.Linear( - self.llm.config.hidden_size + self.llm.config.hidden_size, - self.codec_lm.config.hidden_size, - ) - self.codec_lm_head = nn.Linear( - self.codec_lm.config.hidden_size, self.codec_lm.config.vocab_size - ) - self.speech_token_projector = self.speech_token_projector.to( - dtype=torch.float16 - ) - self.codec_lm_head = self.codec_lm_head.to(dtype=torch.float16) - self.loss_fct = torch.nn.CrossEntropyLoss() - self.codec_lm_padding_side = codec_lm_padding_side - - self.audio_accuracy_metric = MulticlassAccuracy( - self.codec_lm.vocab_size, - top_k=10, - average="micro", - multidim_average="global", - ignore_index=IGNORE_TOKEN_ID, - ) - - def _merge_input_ids_with_speech_features( - self, speech_features, inputs_embeds, input_ids, attention_mask, labels=None - ): - """ - Merge the speech features with the input_ids and attention_mask. This is done by replacing the speech tokens - with the speech features and padding the input_ids to the maximum length of the speech features. - Modified from https://github.com/huggingface/transformers/blob/main/src/transformers/models/llava/modeling_llava.py#L277. - Args: - speech_features (:obj:`torch.Tensor`): The speech features to merge with the input_ids. - inputs_embeds (:obj:`torch.Tensor`): The embeddings of the input_ids. - input_ids (:obj:`torch.Tensor`): The input ids to merge. - attention_mask (:obj:`torch.Tensor`): The attention mask to merge. - labels (:obj:`torch.Tensor`, `optional`): The labels to merge. - Returns: - :obj:`Tuple(torch.Tensor)`: The merged embeddings, attention mask, labels and position ids. - """ - num_speechs, speech_len, embed_dim = speech_features.shape - batch_size, sequence_length = input_ids.shape - left_padding = not torch.sum( - input_ids[:, -1] == torch.tensor(self.llm.config.pad_token_id) - ) - # 1. Create a mask to know where special speech tokens are - special_speech_token_mask = input_ids == self.llm.config.default_speech_token_id - num_special_speech_tokens = torch.sum(special_speech_token_mask, dim=-1) - # Compute the maximum embed dimension - max_embed_dim = ( - num_special_speech_tokens.max() * (speech_len - 1) - ) + sequence_length - batch_indices, non_speech_indices = torch.where( - input_ids != self.llm.config.default_speech_token_id - ) - - # 2. Compute the positions where text should be written - # Calculate new positions for text tokens in merged speech-text sequence. - # `special_speech_token_mask` identifies speech tokens. Each speech token will be replaced by `nb_text_tokens_per_speechs - 1` text tokens. - # `torch.cumsum` computes how each speech token shifts subsequent text token positions. - # - 1 to adjust for zero-based indexing, as `cumsum` inherently increases indices by one. - new_token_positions = ( - torch.cumsum((special_speech_token_mask * (speech_len - 1) + 1), -1) - 1 - ) - nb_speech_pad = max_embed_dim - 1 - new_token_positions[:, -1] - if left_padding: - new_token_positions += nb_speech_pad[:, None] # offset for left padding - text_to_overwrite = new_token_positions[batch_indices, non_speech_indices] - - # 3. Create the full embedding, already padded to the maximum position - final_embedding = torch.zeros( - batch_size, - max_embed_dim, - embed_dim, - dtype=inputs_embeds.dtype, - device=inputs_embeds.device, - ) - final_attention_mask = torch.zeros( - batch_size, - max_embed_dim, - dtype=attention_mask.dtype, - device=inputs_embeds.device, - ) - if labels is not None: - final_labels = torch.full( - (batch_size, max_embed_dim), - IGNORE_TOKEN_ID, - dtype=input_ids.dtype, - device=input_ids.device, - ) - # In case the Vision model or the Language model has been offloaded to CPU, we need to manually - # set the corresponding tensors into their correct target device. - target_device = inputs_embeds.device - batch_indices, non_speech_indices, text_to_overwrite = ( - batch_indices.to(target_device), - non_speech_indices.to(target_device), - text_to_overwrite.to(target_device), - ) - attention_mask = attention_mask.to(target_device) - - # 4. Fill the embeddings based on the mask. If we have ["hey" "", "how", "are"] - # we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the speech features - final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[ - batch_indices, non_speech_indices - ] - final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[ - batch_indices, non_speech_indices - ] - if labels is not None: - final_labels[batch_indices, text_to_overwrite] = labels[ - batch_indices, non_speech_indices - ] - - # 5. Fill the embeddings corresponding to the speechs. Anything that is not `text_positions` needs filling (#29835) - speech_to_overwrite = torch.full( - (batch_size, max_embed_dim), - True, - dtype=torch.bool, - device=inputs_embeds.device, - ) - speech_to_overwrite[batch_indices, text_to_overwrite] = False - speech_to_overwrite &= speech_to_overwrite.cumsum(-1) - 1 >= nb_speech_pad[ - :, None - ].to(target_device) - - if speech_to_overwrite.sum() != speech_features.shape[:-1].numel(): - raise ValueError( - f"The input provided to the model are wrong. The number of speech tokens is {torch.sum(special_speech_token_mask)} while" - f" the number of speech given to the model is {num_speechs}. This prevents correct indexing and breaks batch generation." - ) - - final_embedding[speech_to_overwrite] = ( - speech_features.contiguous().reshape(-1, embed_dim).to(target_device) - ) - final_attention_mask |= speech_to_overwrite - position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_( - (final_attention_mask == 0), 1 - ) - - # 6. Mask out the embedding at padding positions, as we later use the past_key_value value to determine the non-attended tokens. - batch_indices, pad_indices = torch.where( - input_ids == self.llm.config.pad_token_id - ) - indices_to_mask = new_token_positions[batch_indices, pad_indices] - - final_embedding[batch_indices, indices_to_mask] = 0 - - if labels is None: - final_labels = None - - return final_embedding, final_attention_mask, final_labels, position_ids - - def forward( - self, - fbank: torch.Tensor = None, - input_ids: torch.LongTensor = None, - attention_mask: torch.Tensor = None, - labels: torch.LongTensor = None, - ): - encoder_outs = self.encoder(fbank) - - speech_features = self.encoder_projector(encoder_outs) - - inputs_embeds = self.llm.get_input_embeddings()(input_ids) - - ( - inputs_embeds, - attention_mask, - labels, - _, - ) = self._merge_input_ids_with_speech_features( - speech_features, inputs_embeds, input_ids, attention_mask, labels - ) - - rank = get_rank() - print(f"Current rank: {rank}, input_ids: {input_ids.shape}, input_ids: {input_ids}") - print(f"Current rank: {rank}, input_embeds: {inputs_embeds.shape}, input_embeds: {inputs_embeds}") - print(f"Current rank: {rank}, attention_mask: {attention_mask.shape}, attention_mask: {attention_mask}") - print(f"Current rank: {rank}, labels: {labels.shape}, labels: {labels}") - model_outputs = self.llm( - inputs_embeds=inputs_embeds, - attention_mask=attention_mask, - labels=labels, - output_hidden_states=True, - ) - print(f"Current rank: {rank}, model_outputs: {model_outputs}") - - with torch.no_grad(): - preds = torch.argmax(model_outputs.logits, -1) - acc = compute_accuracy( - preds.detach()[:, :-1], - labels.detach()[:, 1:], - ignore_label=IGNORE_TOKEN_ID, - ) - return model_outputs.loss, acc - - def forward_with_speech_output( - self, - fbank: torch.Tensor = None, - input_ids: torch.LongTensor = None, - attention_mask: torch.Tensor = None, - labels: torch.LongTensor = None, - speech_codec_ids: torch.LongTensor = None, - ): - encoder_outs = self.encoder(fbank) - - speech_features = self.encoder_projector(encoder_outs) - - inputs_embeds = self.llm.get_input_embeddings()(input_ids) - - ( - inputs_embeds, - attention_mask, - labels, - _, - ) = self._merge_input_ids_with_speech_features( - speech_features, inputs_embeds, input_ids, attention_mask, labels - ) - input_seq_len = attention_mask.sum(dim=1) # shape, B - ( - text_label_start_index_list, - text_input_start_index_list, - input_question_len_list, - ) = ([], [], []) - for i in range(labels.shape[0]): - input_embeds_valid_index = torch.where(attention_mask[i] != 0)[0] - input_embeds_start_index = input_embeds_valid_index[0] - text_labels_valid_index = torch.where(labels[i] != IGNORE_TOKEN_ID)[0] - text_labels_start_index = text_labels_valid_index[0] - - assert ( - input_seq_len[i] - == input_embeds_valid_index[-1] - input_embeds_start_index + 1 - ), f"input_seq_len: {input_seq_len[i]}, input_embeds_valid_index: {input_embeds_valid_index}, input_embeds_start_index: {input_embeds_start_index}" - assert ( - input_embeds_valid_index[-1] == text_labels_valid_index[-1] - ), f"input_embeds_valid_index: {input_embeds_valid_index}, text_labels_valid_index: {text_labels_valid_index}" - input_question_len = text_labels_start_index - input_embeds_start_index - assert ( - input_question_len - + text_labels_valid_index[-1] - - text_labels_start_index - + 1 - == input_seq_len[i] - ) - text_label_start_index_list.append(text_labels_start_index) - text_input_start_index_list.append(input_embeds_start_index) - input_question_len_list.append(input_question_len) - - rank = get_rank() - print(f"Current rank: {rank}, input_ids: {input_ids.shape}, input_ids: {input_ids}") - print(f"Current rank: {rank}, input_embeds: {inputs_embeds.shape}, input_embeds: {inputs_embeds}") - print(f"Current rank: {rank}, attention_mask: {attention_mask.shape}, attention_mask: {attention_mask}") - print(f"Current rank: {rank}, labels: {labels.shape}, labels: {labels}") - model_outputs = self.llm( - inputs_embeds=inputs_embeds, - attention_mask=attention_mask, - labels=labels, - output_hidden_states=True, - ) - print(f"Current rank: {rank}, model_outputs: {model_outputs}") - text_loss = model_outputs.loss - delay_step = 1 - # prepare codec lm inputs - audio_codes_lens = [ - len(x) + input_question_len_list[i] + delay_step + 1 - for i, x in enumerate(speech_codec_ids) - ] - max_len_speech_codec = max(audio_codes_lens) - - if self.codec_lm_padding_side == "right": - audio_codes = [ - [self.codec_lm.config.mask_token_id] - * (input_question_len_list[i] + delay_step) - + [self.codec_lm.config.bos_token_id] - + x - + [self.codec_lm.config.pad_token_id] - * (max_len_speech_codec - audio_codes_lens[i]) - for i, x in enumerate(speech_codec_ids) - ] - audio_labels = [ - [self.codec_lm.config.pad_token_id] - * (input_question_len_list[i] + delay_step) - + x - + [self.codec_lm.config.eos_token_id] - + [self.codec_lm.config.pad_token_id] - * (max_len_speech_codec - audio_codes_lens[i]) - for i, x in enumerate(speech_codec_ids) - ] - elif self.codec_lm_padding_side == "left": - audio_codes = [ - [self.codec_lm.config.pad_token_id] - * (max_len_speech_codec - audio_codes_lens[i]) - + [self.codec_lm.config.mask_token_id] - * (input_question_len_list[i] + delay_step) - + [self.codec_lm.config.bos_token_id] - + x - for i, x in enumerate(speech_codec_ids) - ] - audio_labels = [ - [self.codec_lm.config.pad_token_id] - * (max_len_speech_codec - audio_codes_lens[i]) - + [self.codec_lm.config.pad_token_id] - * (input_question_len_list[i] + delay_step) - + x - + [self.codec_lm.config.eos_token_id] - for i, x in enumerate(speech_codec_ids) - ] - audio_codes = torch.tensor( - audio_codes, dtype=torch.int64, device=input_ids.device - ) - audio_labels = torch.tensor( - audio_labels, dtype=torch.int64, device=input_ids.device - ) - - audio_attention_mask = audio_codes.ne(self.codec_lm.config.pad_token_id) - audio_embeddings = self.codec_lm.get_input_embeddings()(audio_codes) - - text_last_hidden_lists, text_embeds_list, text_input_embeds_list = [], [], [] - for i in range(len(text_label_start_index_list)): - text_last_hidden = model_outputs.hidden_states[-1][ - i, - text_input_start_index_list[i] : text_input_start_index_list[i] - + input_seq_len[i] - - 1, - ] - print(233336666666, text_last_hidden, text_last_hidden.shape) - text_last_hidden_lists.append(text_last_hidden) - text_embed = inputs_embeds[ - i, - text_input_start_index_list[i] - + 1 : text_input_start_index_list[i] - + input_seq_len[i], - ] # exclude bos - text_embeds_list.append(text_embed) - - text_input_embeds = torch.cat( - [ - text_last_hidden, - text_embed, - ], - dim=-1, - ) # shape, T, D1 + D2 - text_input_embeds = self.speech_token_projector( - text_input_embeds - ) # shape, T, D_codec - text_input_embeds_list.append(text_input_embeds) - - for i in range(audio_embeddings.shape[0]): - text_input_embeds = text_input_embeds_list[i] - if self.codec_lm_padding_side == "right": - audio_embeddings[i, : text_input_embeds.shape[0]] += text_input_embeds - elif self.codec_lm_padding_side == "left": - start_idx = torch.where( - audio_codes[i] == self.codec_lm.config.mask_token_id - )[0][0] - start_idx_re_compute = torch.where(audio_attention_mask[i] != 0)[0][0] - assert ( - start_idx == start_idx_re_compute - ), f"start_idx: {start_idx}, start_idx_re_compute: {start_idx_re_compute}" - if text_input_embeds.shape[0] > audio_embeddings.shape[1] - start_idx: - text_input_embeds = text_input_embeds[ - : audio_embeddings.shape[1] - start_idx - ] - logging.warning( - f"Truncate text_input_embeds: {text_input_embeds.shape} to {audio_embeddings.shape[1] - start_idx}" - ) - audio_embeddings[ - i, start_idx : start_idx + text_input_embeds.shape[0] - ] += text_input_embeds - - speech_outputs = self.codec_lm( - attention_mask=audio_attention_mask, - inputs_embeds=audio_embeddings, - return_dict=True, - output_hidden_states=True, - ) - last_hidden_state = speech_outputs.hidden_states[-1].clone() - - audio_logits = self.codec_lm_head(last_hidden_state) # shape, B, T, vocab_size - audio_logits = audio_logits.contiguous().view( - -1, self.codec_lm.config.vocab_size - ) - audio_labels = audio_labels.contiguous().view(-1) - audio_labels = audio_labels.masked_fill( - audio_labels == self.codec_lm.config.pad_token_id, IGNORE_TOKEN_ID - ) - codec_loss = self.loss_fct(audio_logits, audio_labels) - audio_preds = torch.argmax(audio_logits, -1) - - with torch.no_grad(): - preds = torch.argmax(model_outputs.logits, -1) - print(23333444444, preds) - print(233335555555, labels) - acc = compute_accuracy( - preds.detach()[:, :-1], - labels.detach()[:, 1:], - ignore_label=IGNORE_TOKEN_ID, - ) - audio_acc = compute_accuracy( - audio_preds.detach(), - audio_labels.detach(), - ignore_label=IGNORE_TOKEN_ID, - ) - audio_topk_acc = self.audio_accuracy_metric( - audio_logits.detach(), audio_labels.detach() - ).item() - - return text_loss, acc, codec_loss, audio_acc, audio_topk_acc - - def decode( - self, - fbank: torch.Tensor = None, - input_ids: torch.LongTensor = None, - attention_mask: torch.Tensor = None, - **kwargs, - ): - - encoder_outs = self.encoder(fbank) - speech_features = self.encoder_projector(encoder_outs) - speech_features = speech_features.to(torch.float16) - inputs_embeds = self.llm.get_input_embeddings()(input_ids) - ( - inputs_embeds, - attention_mask, - _, - _, - ) = self._merge_input_ids_with_speech_features( - speech_features, inputs_embeds, input_ids, attention_mask - ) - generated_ids = self.llm.generate( - inputs_embeds=inputs_embeds, - attention_mask=attention_mask, - max_new_tokens=kwargs.get("max_new_tokens", 1024), - num_beams=kwargs.get("num_beams", 1), - do_sample=kwargs.get("do_sample", True), - min_length=kwargs.get("min_length", 1), - top_p=kwargs.get("top_p", 0.5), - top_k=kwargs.get("top_k", 20), - repetition_penalty=kwargs.get("repetition_penalty", 1.1), - temperature=kwargs.get("temperature", 0.7), - bos_token_id=self.llm.config.bos_token_id, - eos_token_id=self.llm.config.eos_token_id, - pad_token_id=self.llm.config.pad_token_id, - ) - - return generated_ids - - def decode_with_speech_output( - self, - fbank: torch.Tensor = None, - input_ids: torch.LongTensor = None, # Prompt input_ids - attention_mask: torch.Tensor = None, # Prompt attention_mask - max_text_new_tokens: int = 1024, - max_speech_new_tokens: int = 2048, # Max length for speech tokens - llm_kwargs: dict = None, # Kwargs for text LLM generate - codec_lm_kwargs: dict = None, # Kwargs for codec LM (e.g., temperature for sampling) - NOT IMPLEMENTED YET - ) -> Tuple[torch.LongTensor, List[List[int]]]: - """ - Generates text and corresponding speech tokens using the revised logic. - - Args: - fbank: Input audio features. - input_ids: Input token IDs for the text prompt. - attention_mask: Attention mask for the text prompt. - max_text_new_tokens: Max new tokens for text generation. - max_speech_new_tokens: Max new tokens for speech generation. - llm_kwargs: Additional arguments for self.llm.generate. - codec_lm_kwargs: Additional arguments for self.codec_lm.generate. - - Returns: - Tuple[torch.LongTensor, List[List[int]]]: - - generated_text_ids: Tensor of generated text token IDs (including prompt). - - generated_speech_tokens: List of lists, where each inner list contains - the generated speech codec tokens for a batch item. - """ - assert fbank.shape[0] == 1, "Batch size must be 1 for speech generation." - if ( - not self.codec_lm - or not self.speech_token_projector - or not self.codec_lm_head - ): - raise ValueError( - "codec_lm and associated layers must be initialized to generate speech output." - ) - - device = next(self.parameters()).device # Use model's device - batch_size = fbank.shape[0] - - # --- 1. Prepare Prompt Embeddings --- - encoder_outs = self.encoder(fbank) - speech_features = self.encoder_projector(encoder_outs) - speech_features = speech_features.to(self.llm.dtype) # Ensure matching dtype - - prompt_embeds = self.llm.get_input_embeddings()(input_ids) - - # Merge speech features with prompt embeddings - ( - merged_prompt_inputs_embeds, - merged_prompt_attention_mask, - _, - _, - ) = self._merge_input_ids_with_speech_features( - speech_features, prompt_embeds, input_ids, attention_mask - ) - - # --- 2. Generate Text using LLM --- - # Use merged embeds/mask as input to generate - # Ensure kwargs passed are suitable for llm.generate - # Note: Using default generation params from `decode` if not provided in kwargs - final_llm_kwargs = { - "bos_token_id": self.llm.config.bos_token_id, - "eos_token_id": self.llm.config.eos_token_id, - "pad_token_id": self.llm.config.pad_token_id, - "num_beams": 1, - "do_sample": True, # Typically false for S2ST/S2TT tasks unless exploration needed - "top_p": 0.5, - "top_k": 20, - "repetition_penalty": 1.1, - "temperature": 0.7, - **(llm_kwargs or {}), # User-provided kwargs override defaults - } - - text_outputs = self.llm.generate( - inputs_embeds=merged_prompt_inputs_embeds, - attention_mask=merged_prompt_attention_mask, - max_new_tokens=max_text_new_tokens, - return_dict_in_generate=True, - output_hidden_states=True, - **final_llm_kwargs, - ) - delay_step = 1 - generated_text_ids = text_outputs.sequences # [B, S_full] - eos_token_id = self.llm.config.eos_token_id - eos_token_embedding = self.llm.get_input_embeddings()( - torch.tensor([[eos_token_id]], device=device) - ) - assert ( - generated_text_ids[0, -1] == eos_token_id - ), f"Last token is not EOS: {generated_text_ids[0, -1]} != {eos_token_id}" - thinker_token_embeds_org = [ - token_hidden_states[0].to(self.llm.device) - for token_hidden_states in text_outputs.hidden_states - ] - - first_thinker_token_embed = torch.cat( - [ - thinker_token_embeds_org[0][:, 1:], - thinker_token_embeds_org[1], - ], - dim=1, - ) - - thinker_token_embeds = ( - [first_thinker_token_embed] - + thinker_token_embeds_org[2:] - + [eos_token_embedding] - ) - thinker_hidden_states = [ - token_hidden_states[-1].to(self.llm.device) - for token_hidden_states in text_outputs.hidden_states - ] - - thinker_reply_part = [ - torch.cat( - [ - thinker_hidden_state, - thinker_token_embed, - ], - dim=-1, - ) - for thinker_hidden_state, thinker_token_embed in zip( - thinker_hidden_states[1:], thinker_token_embeds[1:] - ) - ] - thinker_reply_part = torch.cat(thinker_reply_part, dim=1) - # thinker_prompt_part = thinker_hidden_states[0] + thinker_token_embeds[0] - thinker_prompt_part = torch.cat( - [ - thinker_hidden_states[0], - thinker_token_embeds[0], - ], - dim=-1, - ) - - thinker_prompt_part = self.speech_token_projector(thinker_prompt_part) - thinker_reply_part = self.speech_token_projector(thinker_reply_part) - - thinker_prompt_part_seq_len = thinker_prompt_part.shape[1] - talker_input_ids = torch.full( - (batch_size, thinker_prompt_part_seq_len + delay_step + 1), - self.codec_lm.config.mask_token_id, - dtype=torch.long, - device=self.llm.device, - ) - talker_input_ids[:, -1] = self.codec_lm.config.bos_token_id - talker_inputs_embeds = self.codec_lm.get_input_embeddings()(talker_input_ids) - thinker_input_embeds = torch.cat( - [ - thinker_prompt_part, - thinker_reply_part[:, : delay_step + 1, :], - ], - dim=1, - ) - talker_inputs_embeds += thinker_input_embeds - thinker_reply_part = thinker_reply_part[:, delay_step + 1 :, :] - - past_key_values = None - - generated_speech_tokens_list = [] - next_token_ids = None - - for t in range(max_speech_new_tokens): - if t > 0: - talker_inputs_embeds = self.codec_lm.get_input_embeddings()( - next_token_ids - ) - if thinker_reply_part.shape[1] > 0: - talker_inputs_embeds += thinker_reply_part[:, :1, :] - thinker_reply_part = thinker_reply_part[:, 1:, :] - - codec_outputs = self.codec_lm( - inputs_embeds=talker_inputs_embeds, - past_key_values=past_key_values, - use_cache=True, - return_dict=True, - output_hidden_states=True, - ) - last_token_hidden_state = codec_outputs.hidden_states[-1][:, -1, :] - next_token_logits = self.codec_lm_head(last_token_hidden_state) - - next_token_ids = topk_sampling( - next_token_logits, - ) - if next_token_ids[0, 0] == self.codec_lm.config.eos_token_id: - break - - past_key_values = codec_outputs.past_key_values # Update KV cache - generated_speech_tokens_list.append( - next_token_ids.squeeze(1).cpu().tolist()[0] - ) - - return generated_text_ids, generated_speech_tokens_list - - -def compute_accuracy(pad_outputs, pad_targets, ignore_label): - """Calculate accuracy. - Copied from https://github.com/X-LANCE/SLAM-LLM/blob/main/src/slam_llm/utils/metric.py - Args: - pad_outputs (LongTensor): Prediction tensors (B, Lmax). - pad_targets (LongTensor): Target label tensors (B, Lmax). - ignore_label (int): Ignore label id. - - Returns: - float: Accuracy value (0.0 - 1.0). - - """ - mask = pad_targets != ignore_label - numerator = torch.sum( - pad_outputs.masked_select(mask) == pad_targets.masked_select(mask) - ) - denominator = torch.sum(mask) - return numerator.float() / denominator.float() - - -def topk_sampling( - logits, - top_k=50, - top_p=0.95, - temperature=0.8, -): - if temperature != 1.0: - logits = logits / temperature - # Top-p/top-k filtering - logits_filtered = top_k_top_p_filtering( - logits.clone(), top_k=top_k, top_p=top_p, min_tokens_to_keep=2 - ) - # Sample - probs = torch.nn.functional.softmax(logits_filtered, dim=-1) - tokens = torch.multinomial(probs, num_samples=1) - - return tokens - - -# https://github.com/microsoft/unilm/blob/master/xtune/src/transformers/modeling_utils.py -def top_k_top_p_filtering( - logits, top_k=20, top_p=0.5, filter_value=-float("Inf"), min_tokens_to_keep=1 -): - """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering - Args: - logits: logits distribution shape (batch size, vocabulary size) - if top_k > 0: keep only top k tokens with highest probability (top-k filtering). - if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). - Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) - Make sure we keep at least min_tokens_to_keep per batch example in the output - From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 - """ - if top_k > 0: - top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check - # Remove all tokens with a probability less than the last token of the top-k - indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] - logits[indices_to_remove] = filter_value - - if top_p < 1.0: - sorted_logits, sorted_indices = torch.sort(logits, descending=True) - cumulative_probs = torch.cumsum( - torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1 - ) - - # Remove tokens with cumulative probability above the threshold (token with 0 are kept) - sorted_indices_to_remove = cumulative_probs > top_p - if min_tokens_to_keep > 1: - # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) - sorted_indices_to_remove[..., :min_tokens_to_keep] = 0 - # Shift the indices to the right to keep also the first token above the threshold - sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() - sorted_indices_to_remove[..., 0] = 0 - - # scatter sorted tensors to original indexing - indices_to_remove = sorted_indices_to_remove.scatter( - 1, sorted_indices, sorted_indices_to_remove - ) - logits[indices_to_remove] = filter_value - return logits diff --git a/egs/speech_llm/SPEECH2SPEECH/debug/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/debug/prepare.sh deleted file mode 100644 index aa3d34e9d..000000000 --- a/egs/speech_llm/SPEECH2SPEECH/debug/prepare.sh +++ /dev/null @@ -1,195 +0,0 @@ -#!/usr/bin/env bash - -# fix segmentation fault reported in https://github.com/k2-fsa/icefall/issues/674 -export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python - -export PYTHONPATH=$PYTHONPATH:/workspace/icefall - -set -eou pipefail - -stage=$1 -stop_stage=$2 -# All files generated by this script are saved in "data". -# You can safely remove "data" and rerun this script to regenerate it. -mkdir -p data - -log() { - # This function is from espnet - local fname=${BASH_SOURCE[1]##*/} - echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*" -} - - -if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then - log "stage 0: Clone CosyVoice repo and install requirements inside the container" - # docker: ghcr.io/swivid/f5-tts:main - pip install k2==1.24.4.dev20241030+cuda12.4.torch2.4.0 -f https://k2-fsa.github.io/k2/cuda.html - git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git /workspace/CosyVoice - cd /workspace/CosyVoice - # If you failed to clone submodule due to network failures, please run following command until success - git submodule update --init --recursive - pip install -r qwen_omni/requirements.txt - pip install -r qwen_omni/requirements-cosyvoice.txt - - # For Chinese only dataset, you can use the following command to download the Chinese fine-tuned whisper model. - huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_multi-hans-zh_whisper - # Cosyvoice pretrained model for speech token2wav module - huggingface-cli download --local-dir models/CosyVoice-300M-SFT FunAudioLLM/CosyVoice-300M-SFT - # Qwen Pretrained model - huggingface-cli download --local-dir models/Qwen2.5-0.5B-Instruct Qwen/Qwen2.5-0.5B-Instruct - # Qwen-Omni like speech2speech model trained on worstchan/Belle_1.4M-SLAM-Omni - huggingface-cli download --local-dir models/qwen-omni-like-speech2speech-belle-1.4M yuekai/qwen-omni-like-speech2speech-belle-1.4M - - # For Gradio demo, we follow https://arxiv.org/abs/2412.15649 to use ASR model to decode the history speech as context. - pip install sherpa-onnx - model_path=local/sherpa-onnx-paraformer-zh-2023-09-14 - if [ ! -d $model_path ]; then - wget -nc https://github.com/k2-fsa/sherpa-onnx/releases/download/asr-models/sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2 - tar xvf sherpa-onnx-paraformer-zh-2023-09-14.tar.bz2 -C local - fi -fi -export PYTHONPATH=$PYTHONPATH:/workspace/CosyVoice - -if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then - log "stage 1: Compute fbank feature from huggingface" - python3 local/compute_whisper_fbank.py \ - --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ - --out-dir data/fbank_test \ - --huggingface-dataset-path-or-name /workspace/Belle_1.4M-SLAM-Omni \ - --audio-key question_audio --text-key answer \ - --prefix belle -fi - -if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then - log "Stage 2: Combine features" - manifest_dir=data/fbank - if [ ! -f $manifest_dir/cuts_belle_00001-01600.jsonl.gz ]; then - mv $manifest_dir/cuts_belle.00000.jsonl.gz ./ - # exclude cust_belle_00000.jsonl.gz for valid and test set - pieces=$(find $manifest_dir -name "cuts_belle.*.jsonl.gz" | sort) - echo $pieces | wc - lhotse combine $pieces data/fbank/cuts_belle_00001-01600.jsonl.gz - mv ./cuts_belle.00000.jsonl.gz $manifest_dir # put it back - cd $manifest_dir && ln -s cuts_belle_00001-01600.jsonl.gz cuts_belle_train.jsonl.gz - ln -s cuts_belle.00000.jsonl.gz cuts_belle_test.jsonl.gz && cd - - fi -fi - -ngpu=8 -exp_dir=./qwen_omni/exp_speech2speech -if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then - log "stage 3: Training Speech2Speech Model" - torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ - --max-duration 50 \ - --enable-musan False \ - --exp-dir $exp_dir \ - --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ - --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ - --manifest-dir data/fbank \ - --deepspeed \ - --deepspeed_config ./qwen_omni/ds_config_zero1.json \ - --use-flash-attn True \ - --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True -fi - -if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then - log "stage 4: Decoding, only support batch_size=1 for now." - cd $exp_dir && ln -s ../../models/qwen-omni-like-speech2speech-belle-1.4M/pytorch_model.bin epoch-999.pt && cd - - python3 ./qwen_omni/decode.py \ - --max-duration 1 \ - --exp-dir $exp_dir \ - --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ - --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ - --epoch 999 --avg 1 \ - --manifest-dir data/fbank \ - --use-flash-attn True \ - --method e2e-epoch10_speech2speech \ - --enable-speech-output True \ - --token2wav-path models/CosyVoice-300M-SFT \ - --use-lora True -fi - -if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then - log "stage 5: Gradio Demo" - python3 ./qwen_omni/web_demo.py \ - --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ - --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ - --checkpoint-path $exp_dir/epoch-999.pt \ - --use-flash-attn True \ - --enable-speech-output True \ - --asr-model-dir local/sherpa-onnx-paraformer-zh-2023-09-14 \ - --use-lora True --token2wav-path /workspace/CosyVoice-300M-SFT --share -fi - -if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then - log "stage 1: Compute fbank feature from huggingface" - # CUDA_VISIBLE_DEVICES=0 python3 local/compute_whisper_fbank.py \ - # --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ - # --out-dir data/fbank_voice_assistant \ - # --huggingface-dataset-path-or-name worstchan/VoiceAssistant-400K-SLAM-Omni \ - # --audio-key question_audio --text-key answer \ - # --prefix voice_assistant - CUDA_VISIBLE_DEVICES=0 python3 local/compute_whisper_fbank.py \ - --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ - --out-dir data/fbank_voice_assistant_cosy2 \ - --json-file-path /workspace/slam/VoiceAssistant-430K-vocalnet/VoiceAssistant-430K.json \ - --prefix voice_assistant -fi - -if [ $stage -le 7 ] && [ $stop_stage -ge 7 ]; then - log "stage 7: Compute fbank feature from huggingface" - # CUDA_VISIBLE_DEVICES=1 python3 local/compute_whisper_fbank.py \ - # --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ - # --out-dir data/fbank_ultrachat \ - # --huggingface-dataset-path-or-name worstchan/UltraChat-300K-SLAM-Omni \ - # --audio-key question_audio --text-key answer \ - # --prefix ultrachat - CUDA_VISIBLE_DEVICES=1 python3 local/compute_whisper_fbank.py \ - --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ - --out-dir data/fbank_ultrachat_cosy2 \ - --json-file-path /workspace/slam/UltraChat-vocalnet/UltraChat.json \ - --prefix ultrachat -fi - -if [ $stage -le 8 ] && [ $stop_stage -ge 8 ]; then - log "stage 8: Compute fbank feature from huggingface" - - CUDA_VISIBLE_DEVICES=1 python3 local/compute_whisper_fbank.py \ - --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb False \ - --out-dir data/fbank_gigaspeech \ - --huggingface-dataset-path-or-name speechcolab/gigaspeech \ - --subset test --split test \ - --audio-key audio --text-key text \ - --prefix gigaspeech -fi - -if [ $stage -le 9 ] && [ $stop_stage -ge 9 ]; then - log "stage 9: Compute fbank feature from huggingface" - CUDA_VISIBLE_DEVICES=0 python3 local/compute_whisper_fbank.py \ - --num-mel-bins 80 --whisper-fbank True --resample-to-16kHz True --speed-perturb True \ - --out-dir data/fbank_gigaspeech \ - --huggingface-dataset-path-or-name speechcolab/gigaspeech \ - --subset xl --split train \ - --audio-key audio --text-key text \ - --prefix gigaspeech -fi - - -ngpu=2 -exp_dir=./qwen_omni/exp_speech2speech_en -if [ $stage -le 10 ] && [ $stop_stage -ge 10 ]; then - log "stage 10: Training Speech2Speech Model" - torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ - --max-duration 1 \ - --enable-musan False \ - --exp-dir $exp_dir \ - --speech-encoder-path-or-name models/large-v2.pt \ - --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ - --dataset-format vocalnet \ - --manifest-dir data/fbank \ - --deepspeed \ - --deepspeed_config ./qwen_omni/ds_config_zero1.json \ - --use-flash-attn False --bucketing-sampler False \ - --use-lora False --unfreeze-llm False --unfreeze-speech-projector True --enable-speech-output False - # --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True -fi diff --git a/egs/speech_llm/SPEECH2SPEECH/debug/train.py b/egs/speech_llm/SPEECH2SPEECH/debug/train.py deleted file mode 100755 index 3327ee1f1..000000000 --- a/egs/speech_llm/SPEECH2SPEECH/debug/train.py +++ /dev/null @@ -1,977 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2023 Xiaomi Corp. (authors: Xiaoyu Yang) -# 2024 Yuekai Zhang -# -# See ../../../../LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Usage: -# For Chinese dataset, you can use the following command to download the Chinese fine-tuned whisper model. -huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_multi-hans-zh_whisper -# Qwen Pretrained model -huggingface-cli download --local-dir models/Qwen2.5-0.5B-Instruct Qwen/Qwen2.5-0.5B-Instruct - -torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ - --max-duration 50 \ - --enable-musan False \ - --exp-dir $exp_dir \ - --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ - --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ - --manifest-dir data/fbank \ - --deepspeed \ - --deepspeed_config ./qwen_omni/ds_config_zero1.json \ - --use-flash-attn True \ - --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True -""" - -import argparse -import copy -import logging -import os -import random -import warnings -from pathlib import Path -from shutil import copyfile -from typing import Any, Dict, List, Optional, Tuple, Union - -import deepspeed -import torch -import torch.multiprocessing as mp -import torch.nn as nn -import transformers -import whisper -from data_module import AsrDataModule -from deepspeed.utils.zero_to_fp32 import convert_zero_checkpoint_to_fp32_state_dict -from label_smoothing import LabelSmoothingLoss -from lhotse import CutSet, load_manifest -from lhotse.cut import Cut -from lhotse.dataset.sampling.base import CutSampler -from lhotse.utils import fix_random_seed -from model import IGNORE_TOKEN_ID, SPEECH_LLM, EncoderProjector -from peft import LoraConfig, get_peft_model -from torch import Tensor -from torch.utils.tensorboard import SummaryWriter -from transformers import ( - AutoModelForCausalLM, - AutoTokenizer, - Qwen2Config, - Qwen2ForCausalLM, -) -from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward - -# from icefall import diagnostics -from utils import get_rank, get_world_size -# from icefall.env import get_env_info -from utils import ( # filter_uneven_sized_batch, - AttributeDict, - MetricsTracker, - setup_logger, - str2bool, -) - -DEFAULT_SPEECH_TOKEN = "" - - -def set_batch_count(model: nn.Module, batch_count: float) -> None: - for module in model.modules(): - if hasattr(module, "batch_count"): - module.batch_count = batch_count - - -def add_model_arguments(parser: argparse.ArgumentParser): - parser.add_argument( - "--remove-whisper-encoder-input-length-restriction", - type=str2bool, - default=True, - help="replace whisper encoder forward method to remove input length restriction", - ) - parser.add_argument( - "--llm-path-or-name", - type=str, - default="/workspace/asr/Qwen1.5-0.5B-Chat", - help="Path or name of the large language model.", - ) - - parser.add_argument( - "--speech-encoder-path-or-name", - type=str, - default="whisper-large-v2", - help="Path or name of the speech encoder.", - ) - - parser.add_argument( - "--encoder-projector-ds-rate", - type=int, - default=8, - help="Downsample rate for the encoder projector.", - ) - parser.add_argument( - "--use-flash-attn", - type=str2bool, - default=True, - help="Whether to use flash attention.", - ) - - parser.add_argument( - "--use-lora", - type=str2bool, - default=False, - help="Whether to use lora to fine-tune llm.", - ) - - parser.add_argument( - "--enable-speech-output", - type=str2bool, - default=False, - help="Whether to enable speech codec output.", - ) - - -def get_parser(): - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - - parser.add_argument( - "--tensorboard", - type=str2bool, - default=True, - help="Should various information be logged in tensorboard.", - ) - - parser.add_argument( - "--num-epochs", - type=int, - default=10, - help="Number of epochs to train.", - ) - - parser.add_argument( - "--start-epoch", - type=int, - default=1, - help="""Resume training from this epoch. It should be positive. - If larger than 1, it will load checkpoint from - exp-dir/epoch-{start_epoch-1}.pt - """, - ) - - parser.add_argument( - "--exp-dir", - type=str, - default="whisper_qwen/exp", - help="""The experiment dir. - It specifies the directory where all training related - files, e.g., checkpoints, log, etc, are saved - """, - ) - - parser.add_argument( - "--pretrained-model-path", - type=str, - default=None, - help="""The path to the pretrained model if it is not None. Training will - start from this model. e.g. ./wenetspeech/ASR/whisper/exp_large_v2/epoch-4-avg-3.pt - """, - ) - - parser.add_argument( - "--sampler-state-dict-path", - type=str, - default=None, - help="""The path to the sampler state dict if it is not None. Training will start from this sampler state dict. - """, - ) - - parser.add_argument( - "--seed", - type=int, - default=42, - help="The seed for random generators intended for reproducibility", - ) - - parser.add_argument( - "--use-fp16", - type=str2bool, - default=True, - help="Whether to use half precision training.", - ) - - parser.add_argument( - "--unfreeze-llm", - type=str2bool, - default=False, - help="Whether to unfreeze llm during training.", - ) - - parser.add_argument( - "--unfreeze-speech-projector", - type=str2bool, - default=False, - help="Whether to unfreeze speech adaptor during training.", - ) - - parser.add_argument( - "--dataset-format", - type=str, - default="slam_omni", - help="The format of the dataset.", - ) - parser = deepspeed.add_config_arguments(parser) - add_model_arguments(parser) - - return parser - - -def get_params() -> AttributeDict: - """Return a dict containing training parameters. - - All training related parameters that are not passed from the commandline - are saved in the variable `params`. - - Commandline options are merged into `params` after they are parsed, so - you can also access them via `params`. - - Explanation of options saved in `params`: - - - frame_shift_ms: The frame shift in milliseconds. - - allowed_excess_duration_ratio: The allowed excess duration ratio. - - best_train_loss: The best training loss so far. - - best_valid_loss: The best validation loss so far. - - best_train_epoch: The epoch where the best training loss is achieved. - - best_valid_epoch: The epoch where the best validation loss is achieved. - - batch_idx_train: The batch index of the current batch. - - log_interval: Log training stats every `log_interval` batches. - - reset_interval: Reset the stats every `reset_interval` batches. - - valid_interval: Run validation every `valid_interval` batches. - - env_info: The environment information. - """ - params = AttributeDict( - { - "allowed_excess_duration_ratio": 0.1, - "subsampling_factor": 2, - "frame_shift_ms": 10, - "best_train_loss": float("inf"), - "best_valid_loss": float("inf"), - "best_train_epoch": -1, - "best_valid_epoch": -1, - "batch_idx_train": 0, - "log_interval": 50, - "reset_interval": 200, - "valid_interval": 5000, - # "env_info": get_env_info(), - } - ) - - return params - - -def process_batch_slam_omni(batch: dict): - answers = batch["supervisions"]["text"] - questions_with_history = [ - cut.custom["question"] for cut in batch["supervisions"]["cut"] - ] - chat_rounds = [cut.custom["round"] for cut in batch["supervisions"]["cut"]] - answer_cosyvoice_speech_token = [ - cut.custom["answer_cosyvoice_speech_token"] - for cut in batch["supervisions"]["cut"] - ] - last_questions = [ - question.split(": ")[-1].strip() for question in questions_with_history - ] - history_contexts = [ - question.rsplit(":", 1)[0].strip() for question in questions_with_history - ] - - messages = [] - for i, total_round in enumerate(chat_rounds): - message = [] - if total_round > 1: - history_question_answer = history_contexts[i].split("USER:") - history_question_answer = [item for item in history_question_answer if item] - for j in range(total_round - 1): - question_answer = history_question_answer[j].split("ASSISTANT:") - message += [ - {"role": "user", "content": question_answer[0].strip()}, - {"role": "assistant", "content": question_answer[1].strip()}, - ] - message += [ - {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, - {"role": "assistant", "content": answers[i]}, - ] - messages.append(message) - return messages, answer_cosyvoice_speech_token - - -def process_batch_vocalnet(batch: dict): - answers = batch["supervisions"]["text"] - answer_cosyvoice_speech_token = [ - cut.custom["speech_token"] for cut in batch["supervisions"]["cut"] - ] - messages = [] - for i in range(len(answers)): - message = [ - {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, - {"role": "assistant", "content": answers[i]}, - ] - messages.append(message) - return messages, answer_cosyvoice_speech_token - - -def compute_loss( - params: AttributeDict, - tokenizer: AutoTokenizer, - model: nn.Module, - batch: dict, - is_training: bool, -) -> Tuple[Tensor, MetricsTracker]: - """ - Compute the loss for the given batch. - Args: - params: - It is returned by :func:`get_params`. - tokenizer: - The tokenizer used to encode the text. - model: - The model for training. - batch: - A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` - for the content in it. - is_training: - Whether it is training. - Returns: - Return a tuple of two elements. The first element is the loss tensor. - """ - # For the uneven-sized batch, the total duration after padding would possibly - # cause OOM. Hence, for each batch, which is sorted descendingly by length, - # we simply drop the last few shortest samples, so that the retained total frames - # (after padding) would not exceed `allowed_max_frames`: - # `allowed_max_frames = int(max_frames * (1.0 + allowed_excess_duration_ratio))`, - # where `max_frames = max_duration * 1000 // frame_shift_ms`. - # We set allowed_excess_duration_ratio=0.1. - - def preprocess( - messages, - tokenizer: transformers.PreTrainedTokenizer, - ) -> Dict: - """Preprocesses the data for supervised fine-tuning.""" - texts = [] - TEMPLATE = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if loop.last %}{{ '<|im_end|>'}}{% else %}{{ '<|im_end|>\n' }}{% endif %}{% endfor %}" - for i, msg in enumerate(messages): - texts.append( - tokenizer.apply_chat_template( - msg, - tokenize=True, - chat_template=TEMPLATE, - add_generation_prompt=False, - padding="longest", # FIX me change padding to longest - truncation=False, - ) - ) - if len(texts) != len(messages): - logging.warning(f"Remove too long text, {messages} ") - max_len_texts = max([len(text) for text in texts]) - if tokenizer.padding_side == "right": - texts = [ - text + [tokenizer.pad_token_id] * (max_len_texts - len(text)) - for text in texts - ] - else: - texts = [ - [tokenizer.pad_token_id] * (max_len_texts - len(text)) + text - for text in texts - ] - input_ids = torch.tensor(texts, dtype=torch.int) - - target_ids = input_ids.clone() - target_ids[target_ids == tokenizer.pad_token_id] = IGNORE_TOKEN_ID - # mask all tokens before token_id 151646 with IGNORE_TOKEN_ID - # first get the indices of the tokens - mask_prompt = True - if mask_prompt: - default_speech_token_id = tokenizer.convert_tokens_to_ids( - DEFAULT_SPEECH_TOKEN - ) - mask_indices = torch.where(input_ids == default_speech_token_id) - for i in range(mask_indices[0].size(0)): - row = mask_indices[0][i] - col = mask_indices[1][i] - # + 6 to skip: 'assistant', '\n' 151665, 151645, 198, 151644, 77091, 198 - # WAR: TODO FIXME check qwen3 - target_ids[row, : col + 6] = IGNORE_TOKEN_ID - - attention_mask = input_ids.ne(tokenizer.pad_token_id) - - return input_ids, attention_mask, target_ids - - # max_frames = params.max_duration * 1000 // params.frame_shift_ms - # allowed_max_frames = int(max_frames * (1.0 + params.allowed_excess_duration_ratio)) - # batch = filter_uneven_sized_batch(batch, allowed_max_frames) - - device = next(model.parameters()).device - feature = batch["inputs"] - - assert feature.ndim == 3 - feature = feature.to(device) - feature = feature.transpose(1, 2) # (N, C, T) - - batch_idx_train = params.batch_idx_train - - # WAR: TODO FIXME merge process_batch_slam_omni and process_batch_vocalnet - if params.dataset_format == "slam_omni": - messages, answer_cosyvoice_speech_token = process_batch_slam_omni(batch) - elif params.dataset_format == "vocalnet": - messages, answer_cosyvoice_speech_token = process_batch_vocalnet(batch) - else: - raise ValueError(f"Unknown dataset format: {params.dataset_format}") - - print(f"messages: {messages}") - - input_ids, attention_mask, target_ids = preprocess(messages, tokenizer) - - target_ids = target_ids.type(torch.LongTensor) - input_ids = input_ids.type(torch.LongTensor) - - with torch.set_grad_enabled(is_training): - if not params.enable_speech_output: - loss, acc = model( - fbank=feature, - input_ids=input_ids.to(device), - attention_mask=attention_mask.to(device), - labels=target_ids.to(device), - ) - else: - ( - text_loss, - acc, - codec_loss, - codec_acc, - codec_topk_acc, - ) = model.forward_with_speech_output( - fbank=feature, - input_ids=input_ids.to(device), - attention_mask=attention_mask.to(device), - labels=target_ids.to(device), - speech_codec_ids=answer_cosyvoice_speech_token, - ) - loss = text_loss + codec_loss - assert loss.requires_grad == is_training - - info = MetricsTracker() - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - feature_lens = batch["supervisions"]["num_frames"] - info["frames"] = (feature_lens // params.subsampling_factor).sum().item() - - # Note: We use reduction=sum while computing the loss. - info["loss"] = loss.detach().cpu().item() - info["acc"] = ( - acc * info["frames"] - ) # WAR: to avoid normalization by the number of frames - if params.enable_speech_output: - info["codec_acc"] = codec_acc * info["frames"] - info["codec_topk_acc"] = codec_topk_acc * info["frames"] - info["codec_loss"] = codec_loss.detach().cpu().item() - info["text_loss"] = text_loss.detach().cpu().item() - return loss, info - - -def compute_validation_loss( - params: AttributeDict, - tokenizer: whisper.tokenizer.Tokenizer, - model: nn.Module, - valid_dl: torch.utils.data.DataLoader, - world_size: int = 1, -) -> MetricsTracker: - """Run the validation process.""" - model.eval() - - tot_loss = MetricsTracker() - - for batch_idx, batch in enumerate(valid_dl): - with torch.amp.autocast("cuda", enabled=params.use_fp16): - loss, loss_info = compute_loss( - params=params, - tokenizer=tokenizer, - model=model, - batch=batch, - is_training=False, - ) - assert loss.requires_grad is False - tot_loss = tot_loss + loss_info - - if world_size > 1: - tot_loss.reduce(loss.device) - - loss_value = tot_loss["loss"] / tot_loss["frames"] - if loss_value < params.best_valid_loss: - params.best_valid_epoch = params.cur_epoch - params.best_valid_loss = loss_value - exit() - return tot_loss - - -def train_one_epoch( - params: AttributeDict, - tokenizer: AutoTokenizer, - model: nn.Module, - optimizer: torch.optim.Optimizer, - scheduler: torch.optim.lr_scheduler, - train_dl: torch.utils.data.DataLoader, - valid_dl: torch.utils.data.DataLoader, - tb_writer: Optional[SummaryWriter] = None, - world_size: int = 1, - rank: int = 0, -) -> None: - """Train the model for one epoch. - - The training loss from the mean of all frames is saved in - `params.train_loss`. It runs the validation process every - `params.valid_interval` batches. - - Args: - params: - It is returned by :func:`get_params`. - model: - The model for training. - optimizer: - The optimizer we are using. - scheduler: - The learning rate scheduler, we call step() every step. - train_dl: - Dataloader for the training dataset. - valid_dl: - Dataloader for the validation dataset. - scaler: - The scaler used for mix precision training. - model_avg: - The stored model averaged from the start of training. - tb_writer: - Writer to write log messages to tensorboard. - world_size: - Number of nodes in DDP training. If it is 1, DDP is disabled. - rank: - The rank of the node in DDP training. If no DDP is used, it should - be set to 0. - """ - model.encoder_projector.train() - - tot_loss = MetricsTracker() - - for batch_idx, batch in enumerate(train_dl): - params.batch_idx_train += 1 - batch_size = len(batch["supervisions"]["text"]) - if batch_idx % params.valid_interval == 0: - logging.info("Computing validation loss") - valid_info = compute_validation_loss( - params=params, - tokenizer=tokenizer, - model=model, - valid_dl=valid_dl, - world_size=world_size, - ) - model.train() - logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") - logging.info( - f"Maximum memory allocated so far is {torch.cuda.max_memory_allocated()//1000000}MB" - ) - if tb_writer is not None: - valid_info.write_summary( - tb_writer, "train/valid_", params.batch_idx_train - ) - if batch_idx != 0: - model.save_checkpoint( - save_dir=params.exp_dir, - tag=f"epoch-{params.cur_epoch}-checkpoint-{batch_idx}", - client_state={}, - exclude_frozen_parameters=True, - ) - - if rank == 0: - convert_zero_checkpoint_to_fp32_state_dict( - params.exp_dir, - f"{params.exp_dir}/epoch-{params.cur_epoch}-checkpoint-{batch_idx}.pt", - tag=f"epoch-{params.cur_epoch}-checkpoint-{batch_idx}", - exclude_frozen_parameters=True, - ) - # save sampler state dict into checkpoint - sampler_state_dict = train_dl.sampler.state_dict() - torch.save( - sampler_state_dict, - f"{params.exp_dir}/epoch-{params.cur_epoch}-checkpoint-{batch_idx}-sampler.pt", - ) - os.system( - f"rm -rf {params.exp_dir}/epoch-{params.cur_epoch}-checkpoint-{batch_idx}" - ) - try: - with torch.amp.autocast("cuda", enabled=params.use_fp16): - loss, loss_info = compute_loss( - params=params, - tokenizer=tokenizer, - model=model, - batch=batch, - is_training=True, - ) - # summary stats - tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info - - # NOTE: We use reduction==sum and loss is computed over utterances - # in the batch and there is no normalization to it so far. - - # deepspeed's backward() is different from torch's backward() - # in that it does not accept a loss tensor as input. - # It computes the loss internally. - model.backward(loss) - model.step() - - except: # noqa - display_and_save_batch(batch, params=params) - raise - - if batch_idx % params.log_interval == 0: - try: - cur_lr = scheduler.get_last_lr()[0] - except: # noqa - cur_lr = 0.0 - - logging.info( - f"Epoch {params.cur_epoch}, " - f"batch {batch_idx}, loss[{loss_info}], " - f"tot_loss[{tot_loss}], batch size: {batch_size}, " - f"lr: {cur_lr:.2e}, " - ) - - if tb_writer is not None: - tb_writer.add_scalar( - "train/learning_rate", cur_lr, params.batch_idx_train - ) - - loss_info.write_summary( - tb_writer, "train/current_", params.batch_idx_train - ) - tot_loss.write_summary(tb_writer, "train/tot_", params.batch_idx_train) - - loss_value = tot_loss["loss"] / tot_loss["frames"] - params.train_loss = loss_value - if params.train_loss < params.best_train_loss: - params.best_train_epoch = params.cur_epoch - params.best_train_loss = params.train_loss - - -def run(rank, world_size, args): - """ - Args: - rank: - It is a value between 0 and `world_size-1`, which is - passed automatically by `mp.spawn()` in :func:`main`. - The node with rank 0 is responsible for saving checkpoint. - world_size: - Number of GPUs for DDP training. - args: - The return value of get_parser().parse_args() - """ - params = get_params() - params.update(vars(args)) - - fix_random_seed(params.seed) - - setup_logger(f"{params.exp_dir}/log/log-train") - logging.info(params) - - logging.info("About to create model") - - replace_whisper_encoder_forward() - whisper_model = whisper.load_model(params.speech_encoder_path_or_name, "cpu") - speech_encoder = whisper_model.encoder - speech_encoder_dim = whisper_model.dims.n_audio_state - for name, param in speech_encoder.named_parameters(): - param.requires_grad = False - speech_encoder.eval() - - tokenizer = AutoTokenizer.from_pretrained(params.llm_path_or_name) - - if params.use_flash_attn: - attn_implementation = "flash_attention_2" - torch_dtype = torch.float16 - tokenizer.padding_side = "left" - - else: - attn_implementation = "eager" - torch_dtype = torch.float16 - tokenizer.padding_side = "right" - - llm = AutoModelForCausalLM.from_pretrained( - params.llm_path_or_name, - attn_implementation=attn_implementation, - torch_dtype=torch_dtype, - ) - - if not params.unfreeze_llm: - for name, param in llm.named_parameters(): - param.requires_grad = False - llm.eval() - else: - if params.use_lora: - lora_config = LoraConfig( - r=64, - lora_alpha=16, - target_modules=[ - "q_proj", - "k_proj", - "v_proj", - "o_proj", - "up_proj", - "gate_proj", - "down_proj", - ], - lora_dropout=0.05, - task_type="CAUSAL_LM", - ) - llm = get_peft_model(llm, lora_config) - llm.print_trainable_parameters() - - special_tokens_dict = {"additional_special_tokens": [DEFAULT_SPEECH_TOKEN]} - tokenizer.add_special_tokens(special_tokens_dict) - - llm.config.pad_token_id = tokenizer.pad_token_id - llm.config.default_speech_token_id = tokenizer.convert_tokens_to_ids( - DEFAULT_SPEECH_TOKEN - ) - - encoder_projector = EncoderProjector( - speech_encoder_dim, llm.config.hidden_size, params.encoder_projector_ds_rate - ) - if not params.unfreeze_speech_projector: - for name, param in encoder_projector.named_parameters(): - param.requires_grad = False - encoder_projector.eval() - - if params.enable_speech_output: - # Determine attn_implementation and torch_dtype based on use_flash_attn - if params.use_flash_attn: - attn_implementation = "flash_attention_2" - torch_dtype = torch.float16 # Or torch.bfloat16 if needed/supported - else: - attn_implementation = "eager" - torch_dtype = torch.float16 - if params.dataset_format == "slam_omni": - codec_vocab_size = 4096 + 4 - elif params.dataset_format == "vocalnet": - codec_vocab_size = 6561 + 4 - else: - raise ValueError(f"Unknown dataset format: {params.dataset_format}") - # TODO: modify above vocab size or supress_tokens when decoding - config = Qwen2Config( - vocab_size=codec_vocab_size, - hidden_size=1024, - num_hidden_layers=12, - num_attention_heads=16, - num_key_value_heads=16, - intermediate_size=2048, - max_position_embeddings=4096, - ) - - codec_lm = AutoModelForCausalLM.from_config( - config=config, - attn_implementation=attn_implementation, - torch_dtype=torch_dtype, - ) - - codec_lm.resize_token_embeddings(codec_vocab_size) - codec_lm.vocab_size = codec_vocab_size - codec_lm.config.pad_token_id = codec_vocab_size - 1 - codec_lm.config.eos_token_id = codec_vocab_size - 2 - codec_lm.config.bos_token_id = codec_vocab_size - 3 - codec_lm.config.mask_token_id = codec_vocab_size - 4 - else: - codec_lm = None - - model = SPEECH_LLM( - speech_encoder, - llm, - encoder_projector, - codec_lm, - codec_lm_padding_side="left" if params.use_flash_attn else "right", - ) - - if params.pretrained_model_path: - checkpoint = torch.load(params.pretrained_model_path, map_location="cpu") - missing_keys, unexpected_keys = model.load_state_dict(checkpoint, strict=False) - - num_param = sum([p.numel() for p in model.parameters()]) - logging.info(f"Number of model parameters: {num_param}") - - logging.info("Trainable parameters (excluding model.eval modules):") - for name, param in model.named_parameters(): - if param.requires_grad: - logging.info(f"{name}: {param.shape}") - - if torch.cuda.is_available(): - device = torch.device("cuda", rank) - else: - device = torch.device("cpu") - logging.info(f"Device: {device}") - model.to(device) - - assert params.deepspeed and world_size > 1 - logging.info("Using DeepSpeed") - model, optimizer, _, scheduler = deepspeed.initialize( - args=params, model=model, model_parameters=model.parameters() - ) - - data_module = AsrDataModule(args) - - def remove_short_and_long_utt(c: Cut): - # Keep only utterances with duration between 1 second and 20 seconds - # - # Caution: There is a reason to select 20.0 here. Please see - # ../local/display_manifest_statistics.py - # - # You should use ../local/display_manifest_statistics.py to get - # an utterance duration distribution for your dataset to select - # the threshold - if c.duration < 1.0 or c.duration > 30.0: - # logging.warning( - # f"Exclude cut with ID {c.id} from training. Duration: {c.duration}" - # ) - return False - codec_len = ( - len(c.custom["answer_cosyvoice_speech_token"]) - if "answer_cosyvoice_speech_token" in c.custom - else len(c.custom["speech_token"]) - ) - if codec_len > 2200: - logging.warning( - f"Exclude cut with ID {c.id} from training. Duration: {c.duration}, lenth: {codec_len}" - ) - return False - return True - - if params.dataset_format == "slam_omni": - train_cuts = data_module.train_cuts() - valid_cuts = data_module.dev_cuts() - elif params.dataset_format == "vocalnet": - train_cuts = data_module.train_cuts_en_vocalnet() - valid_cuts = data_module.valid_cuts_en_vocalnet() - else: - raise ValueError(f"Unknown dataset format: {params.dataset_format}") - - train_cuts = train_cuts.filter(remove_short_and_long_utt) - valid_cuts = valid_cuts.filter(remove_short_and_long_utt) - - sampler_state_dict = None - if params.sampler_state_dict_path: - sampler_state_dict = torch.load(params.sampler_state_dict_path) - sampler_state_dict["max_duration"] = params.max_duration - - train_dl = data_module.train_dataloaders( - train_cuts, sampler_state_dict=sampler_state_dict - ) - - valid_dl = data_module.valid_dataloaders(valid_cuts) - - if args.tensorboard and rank == 0: - tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") - else: - tb_writer = None - - logging.info(f"start training from epoch {params.start_epoch}") - for epoch in range(params.start_epoch, params.num_epochs + 1): - - fix_random_seed(params.seed + epoch - 1) - train_dl.sampler.set_epoch(epoch - 1) - - if tb_writer is not None: - tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) - - params.cur_epoch = epoch - - train_one_epoch( - params=params, - tokenizer=tokenizer, - model=model, - optimizer=optimizer, - scheduler=scheduler, - train_dl=train_dl, - valid_dl=valid_dl, - tb_writer=tb_writer, - world_size=world_size, - rank=rank, - ) - - model.save_checkpoint( - save_dir=params.exp_dir, - tag=f"epoch-{params.cur_epoch}", - client_state={}, - exclude_frozen_parameters=True, - ) - if rank == 0: - convert_zero_checkpoint_to_fp32_state_dict( - params.exp_dir, - f"{params.exp_dir}/epoch-{params.cur_epoch}.pt", - tag=f"epoch-{params.cur_epoch}", - exclude_frozen_parameters=True, - ) - # save sampler state dict into checkpoint - sampler_state_dict = train_dl.sampler.state_dict() - torch.save( - sampler_state_dict, - f"{params.exp_dir}/epoch-{params.cur_epoch}-sampler.pt", - ) - - os.system(f"rm -rf {params.exp_dir}/epoch-{params.cur_epoch}") - - logging.info("Done!") - - -def display_and_save_batch( - batch: dict, - params: AttributeDict, -) -> None: - """Display the batch statistics and save the batch into disk. - - Args: - batch: - A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` - for the content in it. - params: - Parameters for training. See :func:`get_params`. - """ - from lhotse.utils import uuid4 - - filename = f"{params.exp_dir}/batch-{uuid4()}.pt" - logging.info(f"Saving batch to {filename}") - torch.save(batch, filename) - - features = batch["inputs"] - - logging.info(f"features shape: {features.shape}") - - -def main(): - parser = get_parser() - AsrDataModule.add_arguments(parser) - args = parser.parse_args() - args.exp_dir = Path(args.exp_dir) - - world_size = get_world_size() - rank = get_rank() - - torch.set_num_threads(1) - torch.set_num_interop_threads(1) - run(rank=rank, world_size=world_size, args=args) - - -if __name__ == "__main__": - main() diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index 74176fdf2..4ee6976da 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -240,11 +240,11 @@ fi if [ $stage -le 14 ] && [ $stop_stage -ge 14 ]; then log "stage 14: Client" exp_dir=./qwen_omni/exp_speech2text_first_libri_continuation_second_ce + exp_dir=./qwen_omni/exp_speech2text_first_asr_second_ce + exp_dir=./qwen_omni/exp_speech2text_first_multi_en_continuation_second_qa # The final assignment of datasets in the original script is used here: # (alpacaeval_full wildvoice mmsu advbench bbh ifeval commoneval openbookqa sd-qa) declare -a target_datasets=("alpacaeval_full" "wildvoice" "ifeval" "commoneval" "openbookqa" "sd-qa" "advbench" "bbh" "mmsu") - declare -a target_datasets=("openbookqa" "ifeval" "sd-qa" "commoneval" "alpacaeval_full") - declare -a target_datasets=("alpacaeval_full" "wildvoice" "advbench" "bbh" "mmsu") NUM_CLIENT_JOBS=4 # Number of parallel client jobs BASE_PORT=8000 # Base port for servers @@ -365,7 +365,8 @@ if [ $stage -le 17 ] && [ $stop_stage -ge 17 ]; then # pip install gradio sherpa-onnx log "stage 17: Server for adapter only speech continuation" exp_dir=./qwen_omni/exp_speech2text_first_libri_continuation_second_ce - # exp_dir=./qwen_omni/exp_speech2text_first_asr_second_ce + exp_dir=./qwen_omni/exp_speech2text_first_asr_second_ce + exp_dir=./qwen_omni/exp_speech2text_first_multi_en_continuation_second_qa N_GPUS=4 # Define the number of GPUs/processes you want to launch diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py index da337791a..acdfb4f2c 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py @@ -36,6 +36,7 @@ from lhotse.dataset import ( # noqa F401 for PrecomputedFeatures CutConcatenate, CutMix, DynamicBucketingSampler, + K2SpeechRecognitionDataset, PerturbSpeed, PrecomputedFeatures, SimpleCutSampler, @@ -46,7 +47,6 @@ from lhotse.dataset.input_strategies import ( # noqa F401 For AudioSamples OnTheFlyFeatures, ) from lhotse.utils import fix_random_seed -from speech_dataset import K2SpeechRecognitionDataset from torch.utils.data import DataLoader from utils import get_local_rank, str2bool @@ -203,21 +203,15 @@ class AsrDataModule: group.add_argument( "--audio-key", type=str, - default="audio", + default=None, help="The key in the Huggingface dataset containing the audio data", ) group.add_argument( "--text-key", type=str, - default="text", + default=None, help="The key in the Huggingface dataset containing the text data", ) - # group.add_argument( - # "--resample-to-16kHz", - # type=str2bool, - # default=True, - # help="Resample audio to 16kHz. Default: False.", - # ) def train_dataloaders( self, @@ -389,29 +383,21 @@ class AsrDataModule: return test_dl @lru_cache() - def test_cuts(self) -> CutSet: + def test_cuts_belle(self) -> CutSet: logging.info("About to get test cuts") - if self.args.on_the_fly_feats: - pass - else: - return { - "test": load_manifest_lazy( - self.args.manifest_dir / "cuts_belle_test.jsonl.gz" - ) - } - - @lru_cache() - def dev_cuts(self) -> CutSet: - logging.info("About to get test cuts") - if self.args.on_the_fly_feats: - pass - else: - return load_manifest_lazy( + return { + "test": load_manifest_lazy( self.args.manifest_dir / "cuts_belle_test.jsonl.gz" ) - + } @lru_cache() - def train_cuts(self) -> CutSet: + def dev_cuts_belle(self) -> CutSet: + logging.info("About to get test cuts") + return load_manifest_lazy( + self.args.manifest_dir / "cuts_belle_test.jsonl.gz" + ) + @lru_cache() + def train_cuts_belle(self) -> CutSet: logging.info("About to get train cuts") slam_omni_zh_cuts = load_manifest_lazy( self.args.manifest_dir / "cuts_belle_train.jsonl.gz" @@ -435,8 +421,6 @@ class AsrDataModule: len(ultrachat_cuts), ], ) - - # valid cuts_voice_assistant.00000.jsonl.gz @lru_cache() def valid_cuts_en_vocalnet(self) -> CutSet: logging.info("About to get valid cuts") @@ -453,15 +437,6 @@ class AsrDataModule: ) return {"test": VoiceAssistant_cuts} - def test_cuts_voicebench( - self, - ) -> CutSet: - logging.info("About to get test cuts") - VoiceAssistant_cuts = load_manifest_lazy( - self.args.manifest_dir / "cuts_voice_assistant_small.00000.jsonl.gz" - ) - return {"test": VoiceAssistant_cuts} - @lru_cache() def train_cuts_ultravox(self) -> CutSet: logging.info("About to get train cuts") @@ -556,65 +531,6 @@ class AsrDataModule: ], ) - # @lru_cache() - # def train_cuts_ultravox(self) -> CutSet: - # logging.info("About to get train cuts") - # keep_columns = ["audio", "text", "continuation", "id"] - # librispeech_path="fixie-ai/librispeech_asr" - # # 148_688 - # librispeech_other = load_dataset(librispeech_path, 'other', split='train.500', streaming=True) - # # 104_014 - # librispeech_clean_360 = load_dataset(librispeech_path, 'clean', split='train.360', streaming=True) - # # 28_539 - # librispeech_clean_100 = load_dataset(librispeech_path, 'clean', split='train.100', streaming=True) - - # cols_to_remove = librispeech_clean_100.column_names - # cols_to_remove = [col for col in cols_to_remove if col not in keep_columns] - # librispeech_clean_100 = librispeech_clean_100.remove_columns(cols_to_remove) - # librispeech_clean_360 = librispeech_clean_360.remove_columns(cols_to_remove) - # librispeech_other = librispeech_other.remove_columns(cols_to_remove) - # people_speech_path="fixie-ai/peoples_speech" - # # 1_501_271 - # people_speech_clean = load_dataset(people_speech_path, 'clean', split='train', streaming=True) - # # 548_000 - # people_speech_dirty_sa = load_dataset(people_speech_path, 'dirty_sa', split='train', streaming=True) - # cols_to_remove = people_speech_clean.column_names - # cols_to_remove = [col for col in cols_to_remove if col not in keep_columns] - # people_speech_clean = people_speech_clean.remove_columns(cols_to_remove) - # people_speech_dirty_sa = people_speech_dirty_sa.remove_columns(cols_to_remove) - - # # 8_266_422 - # gigaspeech_path="fixie-ai/gigaspeech" - # gigaspeech = load_dataset(gigaspeech_path, 'xl-empty-audio-removed', split='train', streaming=True) - # # first rename segment_id to id - # gigaspeech = gigaspeech.rename_column("segment_id", "id") - # cols_to_remove = gigaspeech.column_names - # cols_to_remove = [col for col in cols_to_remove if col not in keep_columns] - # gigaspeech = gigaspeech.remove_columns(cols_to_remove) - - # total_item = 104014 + 28539 + 8266422 + 1501271 + 548000 + 148688 - # final_datasets = interleave_datasets([ - # librispeech_clean_100, - # librispeech_clean_360, - # gigaspeech, - # people_speech_clean, - # people_speech_dirty_sa, - # librispeech_other, - # ], probabilities=[ - # 28539 / total_item, - # 104014 / total_item, - # 8266422 / total_item, - # 1501271 / total_item, - # 548000 / total_item, - # 148688 / total_item, - # ]) - - # train_cuts = CutSet.from_huggingface_dataset( - # final_datasets, audio_key=self.args.audio_key, text_key=self.args.text_key - # ) - - # return train_cuts - @lru_cache() def valid_cuts_ultravox(self) -> CutSet: logging.info("About to get valid cuts") diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py index 793b32112..1bf2c6d9f 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py @@ -741,7 +741,7 @@ def main(): return True # TODO: FIX ME - # test_sets_cuts = data_module.test_cuts() + # test_sets_cuts = data_module.test_cuts_belle() test_sets_cuts = data_module.test_cuts_en_vocalnet() test_sets = test_sets_cuts.keys() test_dls = [ diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements.txt b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements.txt index 85e975175..ce14647fc 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements.txt +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/requirements.txt @@ -11,3 +11,5 @@ flash-attn peft torchmetrics # triton==3.3.0 # may be violate with openai-whisper +gradio +sherpa-onnx \ No newline at end of file diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/speech_dataset.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/speech_dataset.py deleted file mode 100644 index 43a4efb5a..000000000 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/speech_dataset.py +++ /dev/null @@ -1,175 +0,0 @@ -from typing import Callable, Dict, List, Union - -import torch -from lhotse import validate -from lhotse.cut import CutSet -from lhotse.dataset.input_strategies import BatchIO, PrecomputedFeatures -from lhotse.utils import compute_num_frames, ifnone -from lhotse.workarounds import Hdf5MemoryIssueFix -from torch.utils.data.dataloader import DataLoader, default_collate - - -class K2SpeechRecognitionDataset(torch.utils.data.Dataset): - """ - The PyTorch Dataset for the speech recognition task using k2 library. - - This dataset expects to be queried with lists of cut IDs, - for which it loads features and automatically collates/batches them. - - To use it with a PyTorch DataLoader, set ``batch_size=None`` - and provide a :class:`SimpleCutSampler` sampler. - - Each item in this dataset is a dict of: - - .. code-block:: - - { - 'inputs': float tensor with shape determined by :attr:`input_strategy`: - - single-channel: - - features: (B, T, F) - - audio: (B, T) - - multi-channel: currently not supported - 'supervisions': [ - { - 'sequence_idx': Tensor[int] of shape (S,) - 'text': List[str] of len S - - # For feature input strategies - 'start_frame': Tensor[int] of shape (S,) - 'num_frames': Tensor[int] of shape (S,) - - # For audio input strategies - 'start_sample': Tensor[int] of shape (S,) - 'num_samples': Tensor[int] of shape (S,) - - # Optionally, when return_cuts=True - 'cut': List[AnyCut] of len S - } - ] - } - - Dimension symbols legend: - * ``B`` - batch size (number of Cuts) - * ``S`` - number of supervision segments (greater or equal to B, as each Cut may have multiple supervisions) - * ``T`` - number of frames of the longest Cut - * ``F`` - number of features - - The 'sequence_idx' field is the index of the Cut used to create the example in the Dataset. - """ - - def __init__( - self, - return_cuts: bool = False, - cut_transforms: List[Callable[[CutSet], CutSet]] = None, - input_transforms: List[Callable[[torch.Tensor], torch.Tensor]] = None, - input_strategy: BatchIO = PrecomputedFeatures(), - ): - """ - k2 ASR IterableDataset constructor. - - :param return_cuts: When ``True``, will additionally return a "cut" field in each batch with the Cut - objects used to create that batch. - :param cut_transforms: A list of transforms to be applied on each sampled batch, - before converting cuts to an input representation (audio/features). - Examples: cut concatenation, noise cuts mixing, etc. - :param input_transforms: A list of transforms to be applied on each sampled batch, - after the cuts are converted to audio/features. - Examples: normalization, SpecAugment, etc. - :param input_strategy: Converts cuts into a collated batch of audio/features. - By default, reads pre-computed features from disk. - """ - super().__init__() - # Initialize the fields - self.return_cuts = return_cuts - self.cut_transforms = ifnone(cut_transforms, []) - self.input_transforms = ifnone(input_transforms, []) - self.input_strategy = input_strategy - - # This attribute is a workaround to constantly growing HDF5 memory - # throughout the epoch. It regularly closes open file handles to - # reset the internal HDF5 caches. - self.hdf5_fix = Hdf5MemoryIssueFix(reset_interval=100) - - def __getitem__(self, cuts: CutSet) -> Dict[str, Union[torch.Tensor, List[str]]]: - """ - Return a new batch, with the batch size automatically determined using the constraints - of max_duration and max_cuts. - """ - validate_for_asr(cuts) - - self.hdf5_fix.update() - - # Sort the cuts by duration so that the first one determines the batch time dimensions. - cuts = cuts.sort_by_duration(ascending=False) - - # Optional CutSet transforms - e.g. padding, or speed perturbation that adjusts - # the supervision boundaries. - for tnfm in self.cut_transforms: - cuts = tnfm(cuts) - - # Sort the cuts again after transforms - cuts = cuts.sort_by_duration(ascending=False) - - # Get a tensor with batched feature matrices, shape (B, T, F) - # Collation performs auto-padding, if necessary. - input_tpl = self.input_strategy(cuts) - if len(input_tpl) == 3: - # An input strategy with fault tolerant audio reading mode. - # "cuts" may be a subset of the original "cuts" variable, - # that only has cuts for which we succesfully read the audio. - inputs, _, cuts = input_tpl - else: - inputs, _ = input_tpl - - # Get a dict of tensors that encode the positional information about supervisions - # in the batch of feature matrices. The tensors are named "sequence_idx", - # "start_frame/sample" and "num_frames/samples". - supervision_intervals = self.input_strategy.supervision_intervals(cuts) - - # Apply all available transforms on the inputs, i.e. either audio or features. - # This could be feature extraction, global MVN, SpecAugment, etc. - segments = torch.stack(list(supervision_intervals.values()), dim=1) - for tnfm in self.input_transforms: - inputs = tnfm(inputs, supervision_segments=segments) - - batch = { - "inputs": inputs, - "supervisions": default_collate( - [ - { - "text": supervision.text, - } - for sequence_idx, cut in enumerate(cuts) - for supervision in cut.supervisions - ] - ), - } - # Update the 'supervisions' field with sequence_idx and start/num frames/samples - batch["supervisions"].update(supervision_intervals) - if self.return_cuts: - batch["supervisions"]["cut"] = [ - cut for cut in cuts for sup in cut.supervisions - ] - - return batch - - -def validate_for_asr(cuts: CutSet) -> None: - validate(cuts) - tol = 2e-3 # 1ms - for cut in cuts: - for supervision in cut.supervisions: - assert supervision.start >= -tol, ( - f"Supervisions starting before the cut are not supported for ASR" - f" (sup id: {supervision.id}, cut id: {cut.id})" - ) - - # Supervision start time is relative to Cut ... - # https://lhotse.readthedocs.io/en/v0.10_e/cuts.html - # - # 'supervision.end' is end of supervision inside the Cut - assert supervision.end <= cut.duration + tol, ( - f"Supervisions ending after the cut " - f"are not supported for ASR" - f" (sup id: {supervision.id}, cut id: {cut.id})" - ) diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py index d5a2f7cf9..e65cc7829 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py @@ -89,12 +89,6 @@ except RuntimeError: pass -def set_batch_count(model: nn.Module, batch_count: float) -> None: - for module in model.modules(): - if hasattr(module, "batch_count"): - module.batch_count = batch_count - - def add_model_arguments(parser: argparse.ArgumentParser): parser.add_argument( "--remove-whisper-encoder-input-length-restriction", @@ -143,6 +137,13 @@ def add_model_arguments(parser: argparse.ArgumentParser): help="Whether to enable speech codec output.", ) + parser.add_argument( + "--speech-tokenizer-type", + type=str, + default="cosyvoice2", + help="The type of the speech tokenizer. cosyvoice2: 6561, cosyvoice1: 4096", + ) + def get_parser(): parser = argparse.ArgumentParser( @@ -229,10 +230,10 @@ def get_parser(): ) parser.add_argument( - "--dataset-format", + "--prompt-template", type=str, - default="slam_omni", - help="The format of the dataset.", + default="speech_qa", + help="The prompt template to use.", ) parser.add_argument( @@ -291,123 +292,89 @@ def get_params() -> AttributeDict: "log_interval": 50, "reset_interval": 200, "valid_interval": 1000, - # "env_info": get_env_info(), } ) return params -def process_batch_slam_omni(batch: dict): +def extract_text_and_speech_token( + batch: dict, + prompt_template: str, + enable_speech_output: bool +) -> Tuple[List[Dict[str, str]], Optional[List[Any]]]: + """ + Extracts messages and speech tokens from a batch based on the dataset format. + Uses the global DEFAULT_SPEECH_TOKEN. + """ + messages = [] + speech_tokens = None # Initialize as None + if enable_speech_output: + if "answer_cosyvoice_speech_token" in batch["supervisions"]["cut"][0].custom: + assert "speech_token" not in batch["supervisions"]["cut"][0].custom + speech_tokens = [ + cut.custom["answer_cosyvoice_speech_token"] + for cut in batch["supervisions"]["cut"] + ] + elif "speech_token" in batch["supervisions"]["cut"][0].custom: + speech_tokens = [ + cut.custom["speech_token"] for cut in batch["supervisions"]["cut"] + ] + else: + raise ValueError("Unknown speech token type") answers = batch["supervisions"]["text"] - questions_with_history = [ - cut.custom["question"] for cut in batch["supervisions"]["cut"] - ] - chat_rounds = [cut.custom["round"] for cut in batch["supervisions"]["cut"]] - answer_cosyvoice_speech_token = [ - cut.custom["answer_cosyvoice_speech_token"] - for cut in batch["supervisions"]["cut"] - ] - last_questions = [ - question.split(": ")[-1].strip() for question in questions_with_history - ] - history_contexts = [ - question.rsplit(":", 1)[0].strip() for question in questions_with_history - ] + batch_size = len(answers) - messages = [] - for i, total_round in enumerate(chat_rounds): - message = [] - if total_round > 1: - history_question_answer = history_contexts[i].split("USER:") - history_question_answer = [item for item in history_question_answer if item] - for j in range(total_round - 1): - question_answer = history_question_answer[j].split("ASSISTANT:") - message += [ - {"role": "user", "content": question_answer[0].strip()}, - {"role": "assistant", "content": question_answer[1].strip()}, - ] - message += [ - {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, - {"role": "assistant", "content": answers[i]}, - ] - messages.append(message) - return messages, answer_cosyvoice_speech_token + if prompt_template == "speech_qa": + for i in range(batch_size): + message_list_item = [] + if 'round' in batch["supervisions"]["cut"][i].custom: + # slam_omni format dataset + current_question_with_history = batch["supervisions"]["cut"][i].custom["question"] + total_round = batch["supervisions"]["cut"][i].custom["round"] + history_context = current_question_with_history.rsplit(":", 1)[0].strip() + if total_round > 1: + history_question_answer = history_context.split("USER:") + history_question_answer = [item for item in history_question_answer if item] + for j in range(total_round - 1): + question_answer = history_question_answer[j].split("ASSISTANT:") + message_list_item += [ + {"role": "user", "content": question_answer[0].strip()}, + {"role": "assistant", "content": question_answer[1].strip()}, + ] + message_list_item += [ + {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, + {"role": "assistant", "content": answers[i]}, + ] + messages.append(message_list_item) + elif prompt_template == "speech_continuation": + # speech_tokens remains None + for i in range(batch_size): + message_list_item = [ + { + "role": "user", + "content": f"Continue the following text using less than 50 words:\\n\\n{DEFAULT_SPEECH_TOKEN}", + }, + {"role": "assistant", "content": answers[i]}, + ] + messages.append(message_list_item) -def process_batch_vocalnet(batch: dict): - answers = batch["supervisions"]["text"] - answer_cosyvoice_speech_token = [ - cut.custom["speech_token"] for cut in batch["supervisions"]["cut"] - ] - messages = [] - for i in range(len(answers)): - message = [ - {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, - {"role": "assistant", "content": answers[i]}, - ] - messages.append(message) - return messages, answer_cosyvoice_speech_token - - -def process_batch_text_vocalnet(batch: dict): - pass - answers = batch["supervisions"]["text"] - answer_cosyvoice_speech_token = [ - cut.custom["speech_token"] for cut in batch["supervisions"]["cut"] - ] - messages = [] - for i in range(len(answers)): - message = [ - {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, - {"role": "assistant", "content": answers[i]}, - ] - messages.append(message) - return messages, answer_cosyvoice_speech_token - - -def process_batch_speech_continuation(batch: dict): - messages = [] - for i in range(len(batch["supervisions"]["text"])): - message = [ - { - "role": "user", - "content": f"Continue the following text using less than 50 words:\n\n{DEFAULT_SPEECH_TOKEN}", - }, - {"role": "assistant", "content": batch["supervisions"]["text"][i]}, - ] - # transcript = batch["supervisions"]["cut"][i].custom["text"] - messages.append(message) - return messages - -def process_batch_asr(batch: dict): - messages = [] - for i in range(len(batch["supervisions"]["text"])): - transcript = batch["supervisions"]["cut"][i].custom["text"] - message = [ - { - "role": "user", - "content": f"Transcribe the following audio into text:\n\n{DEFAULT_SPEECH_TOKEN}", - }, - {"role": "assistant", "content": transcript}, - ] - messages.append(message) - return messages - -def process_batch_text_continuation(batch: dict): - messages = [] - for i in range(len(batch["supervisions"]["text"])): - transcript = batch["supervisions"]["cut"][i].custom["text"] - message = [ - { - "role": "user", - "content": f"Continue the following text using less than 50 words:\n\n{transcript}{DEFAULT_SPEECH_TOKEN}", - }, - {"role": "assistant", "content": batch["supervisions"]["text"][i]}, - ] - messages.append(message) - return messages + elif prompt_template == "asr": + # speech_tokens remains None + for i in range(batch_size): + message_list_item = [ + { + "role": "user", + "content": f"Transcribe the following audio into text:\\n\\n{DEFAULT_SPEECH_TOKEN}", + }, + {"role": "assistant", "content": answers[i]}, + ] + messages.append(message_list_item) + else: + raise ValueError(f"Unknown prompt template: {prompt_template}") + return messages, speech_tokens def preprocess( messages, @@ -459,6 +426,19 @@ def preprocess( attention_mask = input_ids.ne(tokenizer.pad_token_id) return input_ids, attention_mask, target_ids +def process_batch_text_continuation(batch: dict): + messages = [] + for i in range(len(batch["supervisions"]["text"])): + transcript = batch["supervisions"]["cut"][i].custom["text"] + message = [ + { + "role": "user", + "content": f"Continue the following text using less than 50 words:\n\n{transcript}{DEFAULT_SPEECH_TOKEN}", + }, + {"role": "assistant", "content": batch["supervisions"]["text"][i]}, + ] + messages.append(message) + return messages def preprocess_teacher( messages, @@ -551,20 +531,9 @@ def compute_loss( feature = feature.transpose(1, 2) # (N, C, T) # WAR: TODO FIXME merge process_batch_slam_omni and process_batch_vocalnet - if params.dataset_format == "slam_omni": - messages, answer_cosyvoice_speech_token = process_batch_slam_omni(batch) - elif params.dataset_format == "vocalnet": - messages, answer_cosyvoice_speech_token = process_batch_vocalnet(batch) - if params.loss_type == "kl_div": - messages_text = process_batch_text_vocalnet(batch) - elif params.dataset_format == "speech_continuation": - messages = process_batch_speech_continuation(batch) - if params.loss_type == "kl_div": - messages_text = process_batch_text_continuation(batch) - elif params.dataset_format == "asr": - messages = process_batch_asr(batch) - else: - raise ValueError(f"Unknown dataset format: {params.dataset_format}") + messages, answer_cosyvoice_speech_token = extract_text_and_speech_token( + batch, params.prompt_template, params.enable_speech_output + ) input_ids, attention_mask, target_ids = preprocess(messages, tokenizer) @@ -581,6 +550,8 @@ def compute_loss( labels=target_ids.to(device), ) elif params.loss_type == "kl_div": + assert params.prompt_template == "speech_continuation" + messages_text = process_batch_text_continuation(batch) ( teacher_input_ids, teacher_attention_mask, @@ -598,6 +569,7 @@ def compute_loss( else: raise ValueError(f"Unknown loss type: {params.loss_type}") else: + assert params.loss_type == "ce" ( text_loss, acc, @@ -918,13 +890,13 @@ def run(rank, world_size, args): else: attn_implementation = "eager" torch_dtype = torch.float16 - if params.dataset_format == "slam_omni": - codec_vocab_size = 4096 + 4 - elif params.dataset_format == "vocalnet": + if params.speech_tokenizer_type == "cosyvoice2": codec_vocab_size = 6561 + 4 + elif params.speech_tokenizer_type == "cosyvoice1": + codec_vocab_size = 4096 + 4 else: - raise ValueError(f"Unknown dataset format: {params.dataset_format}") - # TODO: modify above vocab size or supress_tokens when decoding + raise ValueError(f"Unknown speech tokenizer type: {params.speech_tokenizer_type}") + config = Qwen2Config( vocab_size=codec_vocab_size, hidden_size=1024, @@ -1029,24 +1001,23 @@ def run(rank, world_size, args): return False return True - if params.dataset_format == "slam_omni": - train_cuts = data_module.train_cuts() - valid_cuts = data_module.dev_cuts() - elif params.dataset_format == "vocalnet": + if params.dataset == "slam_omni_belle": + train_cuts = data_module.train_cuts_belle() + valid_cuts = data_module.dev_cuts_belle() + elif params.dataset == "vocalnet_ultrachat_voiceassistant": train_cuts = data_module.train_cuts_en_vocalnet() valid_cuts = data_module.valid_cuts_en_vocalnet() - elif params.dataset_format == "speech_continuation" or params.dataset_format == "asr": - if params.dataset == "multi_en": - train_cuts = data_module.train_cuts_ultravox() - elif params.dataset == "librispeech": - train_cuts = data_module.train_cuts_librispeech() - elif params.dataset == "gigaspeech": - train_cuts = data_module.train_cuts_gigaspeech() - else: - raise ValueError(f"Unknown dataset: {params.dataset}") + elif params.dataset == "ultravox_multi_en": + train_cuts = data_module.train_cuts_ultravox() + valid_cuts = data_module.valid_cuts_ultravox() + elif params.dataset == "librispeech": + train_cuts = data_module.train_cuts_librispeech() + valid_cuts = data_module.valid_cuts_ultravox() + elif params.dataset == "gigaspeech": + train_cuts = data_module.train_cuts_gigaspeech() valid_cuts = data_module.valid_cuts_ultravox() else: - raise ValueError(f"Unknown dataset format: {params.dataset_format}") + raise ValueError(f"Unknown dataset: {params.dataset}") train_cuts = train_cuts.filter(remove_short_and_long_utt) valid_cuts = valid_cuts.filter(remove_short_and_long_utt) diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py index 7c6f6c0a6..f245712a8 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py @@ -8,11 +8,10 @@ import random import re import subprocess from collections import defaultdict -# from contextlib import contextmanager from dataclasses import dataclass from datetime import datetime from pathlib import Path -# from shutil import copyfile + from typing import Dict, Iterable, List, Optional, TextIO, Tuple, Union import torch From dd858f0cd1942513d20d187f5deb8498d655bf94 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 22 May 2025 23:16:33 -0700 Subject: [PATCH 48/57] support instruct s2s --- .../SPEECH2SPEECH/qwen_omni/data_module.py | 185 ++++++++++++++++-- .../SPEECH2SPEECH/qwen_omni/train.py | 154 ++++++++------- 2 files changed, 250 insertions(+), 89 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py index acdfb4f2c..a52f84b0c 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py @@ -413,6 +413,8 @@ class AsrDataModule: ultrachat_cuts = load_manifest_lazy( self.args.manifest_dir / "cuts_ultrachat_train.jsonl.gz" ) + VoiceAssistant_cuts = VoiceAssistant_cuts.resample(16000) + ultrachat_cuts = ultrachat_cuts.resample(16000) return CutSet.mux( VoiceAssistant_cuts, ultrachat_cuts, @@ -427,6 +429,7 @@ class AsrDataModule: VoiceAssistant_cuts = load_manifest_lazy( self.args.manifest_dir / "cuts_voice_assistant.00000.jsonl.gz" ) + VoiceAssistant_cuts = VoiceAssistant_cuts.resample(16000) return VoiceAssistant_cuts @lru_cache() @@ -435,6 +438,7 @@ class AsrDataModule: VoiceAssistant_cuts = load_manifest_lazy( self.args.manifest_dir / "cuts_voice_assistant_small.00000.jsonl.gz" ) + VoiceAssistant_cuts = VoiceAssistant_cuts.resample(16000) return {"test": VoiceAssistant_cuts} @lru_cache() @@ -482,36 +486,36 @@ class AsrDataModule: librispeech_clean_100_cuts = CutSet.from_huggingface_dataset( librispeech_clean_100, - audio_key=self.args.audio_key, - text_key=self.args.text_key, + audio_key="audio", + text_key="text", ) librispeech_other_cuts = CutSet.from_huggingface_dataset( librispeech_other, - audio_key=self.args.audio_key, - text_key=self.args.text_key, + audio_key="audio", + text_key="text", ) librispeech_clean_360_cuts = CutSet.from_huggingface_dataset( librispeech_clean_360, - audio_key=self.args.audio_key, - text_key=self.args.text_key, + audio_key="audio", + text_key="text", ) gigaspeech_cuts = CutSet.from_huggingface_dataset( - gigaspeech, audio_key=self.args.audio_key, text_key=self.args.text_key + gigaspeech, audio_key="audio", text_key="text" ) people_speech_clean_cuts = CutSet.from_huggingface_dataset( people_speech_clean, - audio_key=self.args.audio_key, - text_key=self.args.text_key, + audio_key="audio", + text_key="text", ) people_speech_dirty_sa_cuts = CutSet.from_huggingface_dataset( people_speech_dirty_sa, - audio_key=self.args.audio_key, - text_key=self.args.text_key, + audio_key="audio", + text_key="text", ) return CutSet.mux( @@ -540,8 +544,8 @@ class AsrDataModule: ) librispeech_clean_valid_cuts = CutSet.from_huggingface_dataset( librispeech_clean_valid, - audio_key=self.args.audio_key, - text_key=self.args.text_key, + audio_key="audio", + text_key="text", ) return librispeech_clean_valid_cuts @@ -567,20 +571,20 @@ class AsrDataModule: librispeech_clean_100_cuts = CutSet.from_huggingface_dataset( librispeech_clean_100, - audio_key=self.args.audio_key, - text_key=self.args.text_key, + audio_key="audio", + text_key="text", ) librispeech_other_cuts = CutSet.from_huggingface_dataset( librispeech_other, - audio_key=self.args.audio_key, - text_key=self.args.text_key, + audio_key="audio", + text_key="text", ) librispeech_clean_360_cuts = CutSet.from_huggingface_dataset( librispeech_clean_360, - audio_key=self.args.audio_key, - text_key=self.args.text_key, + audio_key="audio", + text_key="text", ) return CutSet.mux( @@ -603,7 +607,148 @@ class AsrDataModule: ) gigaspeech_cuts = CutSet.from_huggingface_dataset( - gigaspeech, audio_key=self.args.audio_key, text_key=self.args.text_key + gigaspeech, audio_key="audio", text_key="text" ) return gigaspeech_cuts + + @lru_cache() + def train_cuts_instruct_s2s(self) -> CutSet: + logging.info("About to get train cuts") + if self.args.huggingface_dataset_path_or_name is not None: + data_path = self.args.huggingface_dataset_path_or_name + "/InstructS2S-200K" + else: + data_path = "yuekai/InstructS2S-200K" + # 148_688 + instruct_s2s_train = load_dataset( + data_path, split="train", streaming=True + ) + + instruct_s2s_train_cuts = CutSet.from_huggingface_dataset( + instruct_s2s_train, + audio_key="question_audio", + text_key="answer", + ) + + instruct_s2s_train_cuts = instruct_s2s_train_cuts.resample(16000) + + return instruct_s2s_train_cuts + + @lru_cache() + def train_cuts_en_speech2speech(self) -> CutSet: + logging.info("About to get train cuts") + VoiceAssistant_cuts = load_manifest_lazy( + self.args.manifest_dir / "cuts_voice_assistant_00001-00049.jsonl.gz" + ) + ultrachat_cuts = load_manifest_lazy( + self.args.manifest_dir / "cuts_ultrachat_train.jsonl.gz" + ) + + if self.args.huggingface_dataset_path_or_name is not None: + data_path = self.args.huggingface_dataset_path_or_name + "/InstructS2S-200K" + else: + data_path = "yuekai/InstructS2S-200K" + # 148_688 + instruct_s2s_train = load_dataset( + data_path, split="train", streaming=True + ) + + instruct_s2s_train_cuts = CutSet.from_huggingface_dataset( + instruct_s2s_train, + audio_key="question_audio", + text_key="answer", + ) + + instruct_s2s_train_cuts = instruct_s2s_train_cuts.resample(16000) + + + return CutSet.mux( + VoiceAssistant_cuts, + ultrachat_cuts, + instruct_s2s_train_cuts, + weights=[ + len(VoiceAssistant_cuts), + len(ultrachat_cuts), + 423_000, + ], + ) + + @lru_cache() + def train_cuts_en_speech2speech_librispeech(self) -> CutSet: + logging.info("About to get train cuts") + VoiceAssistant_cuts = load_manifest_lazy( + self.args.manifest_dir / "cuts_voice_assistant_00001-00049.jsonl.gz" + ) + ultrachat_cuts = load_manifest_lazy( + self.args.manifest_dir / "cuts_ultrachat_train.jsonl.gz" + ) + + if self.args.huggingface_dataset_path_or_name is not None: + data_path = self.args.huggingface_dataset_path_or_name + "/InstructS2S-200K" + else: + data_path = "yuekai/InstructS2S-200K" + # 148_688 + instruct_s2s_train = load_dataset( + data_path, split="train", streaming=True + ) + + instruct_s2s_train_cuts = CutSet.from_huggingface_dataset( + instruct_s2s_train, + audio_key="question_audio", + text_key="answer", + ) + + instruct_s2s_train_cuts = instruct_s2s_train_cuts.resample(16000) + + if self.args.huggingface_dataset_path_or_name is not None: + librispeech_path = self.args.huggingface_dataset_path_or_name + "/librispeech_asr" + else: + librispeech_path = "fixie-ai/librispeech_asr" + # 148_688 + librispeech_other = load_dataset( + librispeech_path, "other", split="train.500", streaming=True + ) + # 104_014 + librispeech_clean_360 = load_dataset( + librispeech_path, "clean", split="train.360", streaming=True + ) + # 28_539 + librispeech_clean_100 = load_dataset( + librispeech_path, "clean", split="train.100", streaming=True + ) + + librispeech_clean_100_cuts = CutSet.from_huggingface_dataset( + librispeech_clean_100, + audio_key="audio", + text_key="text", + ) + + librispeech_other_cuts = CutSet.from_huggingface_dataset( + librispeech_other, + audio_key="audio", + text_key="text", + ) + + librispeech_clean_360_cuts = CutSet.from_huggingface_dataset( + librispeech_clean_360, + audio_key="audio", + text_key="text", + ) + + + return CutSet.mux( + librispeech_other_cuts, + VoiceAssistant_cuts, + ultrachat_cuts, + librispeech_clean_360_cuts, + instruct_s2s_train_cuts, + librispeech_clean_100_cuts, + weights=[ + 148688, + len(VoiceAssistant_cuts), + len(ultrachat_cuts), + 104014, + 423_000, + 28539, + ], + ) \ No newline at end of file diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py index e65cc7829..9554d85e4 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py @@ -193,6 +193,13 @@ def get_parser(): """, ) + parser.add_argument( + "--last-stage-model-path", + type=str, + default=None, + help="""The path to the last stage model if it is not None. Training will start from this model. + """, + ) parser.add_argument( "--sampler-state-dict-path", type=str, @@ -229,13 +236,6 @@ def get_parser(): help="Whether to unfreeze speech adaptor during training.", ) - parser.add_argument( - "--prompt-template", - type=str, - default="speech_qa", - help="The prompt template to use.", - ) - parser.add_argument( "--dataset", type=str, @@ -300,7 +300,6 @@ def get_params() -> AttributeDict: def extract_text_and_speech_token( batch: dict, - prompt_template: str, enable_speech_output: bool ) -> Tuple[List[Dict[str, str]], Optional[List[Any]]]: """ @@ -325,54 +324,54 @@ def extract_text_and_speech_token( answers = batch["supervisions"]["text"] batch_size = len(answers) - if prompt_template == "speech_qa": - for i in range(batch_size): - message_list_item = [] - if 'round' in batch["supervisions"]["cut"][i].custom: - # slam_omni format dataset - current_question_with_history = batch["supervisions"]["cut"][i].custom["question"] - total_round = batch["supervisions"]["cut"][i].custom["round"] - history_context = current_question_with_history.rsplit(":", 1)[0].strip() - if total_round > 1: - history_question_answer = history_context.split("USER:") - history_question_answer = [item for item in history_question_answer if item] - for j in range(total_round - 1): - question_answer = history_question_answer[j].split("ASSISTANT:") - message_list_item += [ - {"role": "user", "content": question_answer[0].strip()}, - {"role": "assistant", "content": question_answer[1].strip()}, - ] - message_list_item += [ - {"role": "user", "content": f"{DEFAULT_SPEECH_TOKEN}"}, - {"role": "assistant", "content": answers[i]}, - ] - messages.append(message_list_item) + prompt_template_dict = { + "speech_qa": f"{DEFAULT_SPEECH_TOKEN}", + "speech_continuation": f"Continue the following text using less than 50 words:\\n\\n{DEFAULT_SPEECH_TOKEN}", + "asr": f"Transcribe the following audio into text:\\n\\n{DEFAULT_SPEECH_TOKEN}", + } - elif prompt_template == "speech_continuation": - # speech_tokens remains None - for i in range(batch_size): - message_list_item = [ - { - "role": "user", - "content": f"Continue the following text using less than 50 words:\\n\\n{DEFAULT_SPEECH_TOKEN}", - }, - {"role": "assistant", "content": answers[i]}, - ] - messages.append(message_list_item) + for i in range(batch_size): + # Initialize prompt_template with the original default. + # The 'prompt_template' argument to the function seems unused if we determine it here. + # For now, I will proceed assuming the internal logic dictates the template. + # If the function argument `prompt_template` was meant to be the default, this logic would need adjustment. + current_prompt_template = "speech_qa" # Default value for prompt_template for the current item + target = answers[i] + message_list_item = [] + + custom_data = batch["supervisions"]["cut"][i].custom - elif prompt_template == "asr": - # speech_tokens remains None - for i in range(batch_size): - message_list_item = [ - { - "role": "user", - "content": f"Transcribe the following audio into text:\\n\\n{DEFAULT_SPEECH_TOKEN}", - }, - {"role": "assistant", "content": answers[i]}, - ] - messages.append(message_list_item) - else: - raise ValueError(f"Unknown prompt template: {prompt_template}") + if 'round' in custom_data: + # slam_omni format dataset + # For 'round' type, the current interaction's user prompt will use current_prompt_template ("speech_qa") + current_question_with_history = custom_data["question"] + total_round = custom_data["round"] + history_context = current_question_with_history.rsplit(":", 1)[0].strip() + if total_round > 1: + history_question_answer = history_context.split("USER:") + history_question_answer = [item for item in history_question_answer if item] + for j in range(total_round - 1): + question_answer = history_question_answer[j].split("ASSISTANT:") + message_list_item += [ + {"role": "user", "content": question_answer[0].strip()}, + {"role": "assistant", "content": question_answer[1].strip()}, + ] + elif 'continuation' in custom_data: + # see https://huggingface.co/datasets/fixie-ai/librispeech_asr + ASR_PROBABILITY = 0.3 + if random.random() < ASR_PROBABILITY: + current_prompt_template = "asr" + else: + current_prompt_template = "speech_continuation" + target = custom_data["continuation"] + else: + # single-round, speech2speech conversation data + pass + message_list_item += [ + {"role": "user", "content": prompt_template_dict[current_prompt_template]}, + {"role": "assistant", "content": target}, + ] + messages.append(message_list_item) return messages, speech_tokens @@ -428,14 +427,17 @@ def preprocess( def process_batch_text_continuation(batch: dict): messages = [] - for i in range(len(batch["supervisions"]["text"])): - transcript = batch["supervisions"]["cut"][i].custom["text"] + transcripts = batch["supervisions"]["text"] + continuations = [ + cut.custom["continuation"] for cut in batch["supervisions"]["cut"] + ] + for i in range(len(transcripts)): message = [ { "role": "user", - "content": f"Continue the following text using less than 50 words:\n\n{transcript}{DEFAULT_SPEECH_TOKEN}", + "content": f"Continue the following text using less than 50 words:\n\n{transcripts[i]}{DEFAULT_SPEECH_TOKEN}", }, - {"role": "assistant", "content": batch["supervisions"]["text"][i]}, + {"role": "assistant", "content": continuations[i]}, ] messages.append(message) return messages @@ -532,7 +534,7 @@ def compute_loss( # WAR: TODO FIXME merge process_batch_slam_omni and process_batch_vocalnet messages, answer_cosyvoice_speech_token = extract_text_and_speech_token( - batch, params.prompt_template, params.enable_speech_output + batch, params.enable_speech_output ) input_ids, attention_mask, target_ids = preprocess(messages, tokenizer) @@ -550,7 +552,6 @@ def compute_loss( labels=target_ids.to(device), ) elif params.loss_type == "kl_div": - assert params.prompt_template == "speech_continuation" messages_text = process_batch_text_continuation(batch) ( teacher_input_ids, @@ -942,15 +943,18 @@ def run(rank, world_size, args): teacher_llm=teacher_llm, ) - if params.pretrained_model_path: - checkpoint = torch.load(params.pretrained_model_path, map_location="cpu") - missing_keys, unexpected_keys = model.load_state_dict(checkpoint, strict=False) - # set params.batch_idx_train according to the checkpoint name - if "checkpoint-" in params.pretrained_model_path: - params.batch_idx_train = int( - params.pretrained_model_path.split("-")[-1].split("/")[0] - ) - + if params.pretrained_model_path or params.last_stage_model_path: + if params.pretrained_model_path is None: + checkpoint = torch.load(params.last_stage_model_path, map_location="cpu") + missing_keys, unexpected_keys = model.load_state_dict(checkpoint, strict=False) + else: + checkpoint = torch.load(params.pretrained_model_path, map_location="cpu") + missing_keys, unexpected_keys = model.load_state_dict(checkpoint, strict=False) + # set params.batch_idx_train according to the checkpoint name + if "checkpoint-" in params.pretrained_model_path: + params.batch_idx_train = int( + params.pretrained_model_path.split("-")[-1].split("/")[0] + ) num_param = sum([p.numel() for p in model.parameters()]) logging.info(f"Number of model parameters: {num_param}") @@ -999,6 +1003,12 @@ def run(rank, world_size, args): f"Exclude cut with ID {c.id} from training. Duration: {c.duration}, lenth: {codec_len}" ) return False + if "question" in c.custom: + if len(c.custom["question"]) > 1200: + # logging.warning( + # f"Exclude cut with ID {c.id} from training. question length: {len(c.custom['question'])}" + # ) + return False return True if params.dataset == "slam_omni_belle": @@ -1007,6 +1017,12 @@ def run(rank, world_size, args): elif params.dataset == "vocalnet_ultrachat_voiceassistant": train_cuts = data_module.train_cuts_en_vocalnet() valid_cuts = data_module.valid_cuts_en_vocalnet() + elif params.dataset == "vocalnet_ultrachat_voiceassistant_instruct_s2s": + train_cuts = data_module.train_cuts_en_speech2speech() + valid_cuts = data_module.valid_cuts_en_vocalnet() + elif params.dataset == "vocalnet_ultrachat_voiceassistant_instruct_s2s_librispeech": + train_cuts = data_module.train_cuts_en_speech2speech_librispeech() + valid_cuts = data_module.valid_cuts_en_vocalnet() elif params.dataset == "ultravox_multi_en": train_cuts = data_module.train_cuts_ultravox() valid_cuts = data_module.valid_cuts_ultravox() From e6e1f3fa4f70c7c7299d2e3074134085cbb9d78e Mon Sep 17 00:00:00 2001 From: root Date: Fri, 23 May 2025 01:53:05 -0700 Subject: [PATCH 49/57] add tts stage --- .../SPEECH2SPEECH/qwen_omni/data_module.py | 65 ++++++++++++++++++- .../SPEECH2SPEECH/qwen_omni/train.py | 3 + 2 files changed, 65 insertions(+), 3 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py index a52f84b0c..457c3e107 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/data_module.py @@ -24,7 +24,7 @@ from pathlib import Path from typing import Any, Dict, Optional import torch -from datasets import interleave_datasets, load_dataset +from datasets import interleave_datasets, load_dataset, Audio, Features, Value, Sequence from lhotse import ( CutSet, WhisperFbank, @@ -49,7 +49,9 @@ from lhotse.dataset.input_strategies import ( # noqa F401 For AudioSamples from lhotse.utils import fix_random_seed from torch.utils.data import DataLoader from utils import get_local_rank, str2bool - +import io +import wave +import random class _SeedWorkers: def __init__(self, seed: int): @@ -751,4 +753,61 @@ class AsrDataModule: 423_000, 28539, ], - ) \ No newline at end of file + ) + + @lru_cache() + def train_cuts_emilia_en(self) -> CutSet: + logging.info("About to get train cuts") + data_path = "/lustre/fsw/general_sa/yuekaiz/s2s" + "/emilia_en" + # if self.args.huggingface_dataset_path_or_name is not None: + # data_path = self.args.huggingface_dataset_path_or_name + "/emilia_en" + # else: + # data_path = "yuekai/emilia_en" + + emilia_en_data = load_dataset( + data_path, split="train", streaming=True + ) + + def update_wav_path(example): + sampling_rate = 16000 # From current_features + duration = 1 # seconds, arbitrary duration for random audio + num_channels = 1 # mono + sample_width = 2 # 2 bytes = 16-bit audio + + num_frames = int(duration * sampling_rate) + + # Generate random bytes for the PCM data part + # This will be random noise, but structurally valid for a WAV file + pcm_data = bytes([random.randint(0, 255) for _ in range(num_frames * num_channels * sample_width)]) + + # Create a WAV file in memory + audio_buffer = io.BytesIO() + with wave.open(audio_buffer, 'wb') as wf: + wf.setnchannels(num_channels) + wf.setsampwidth(sample_width) + wf.setframerate(sampling_rate) + wf.writeframes(pcm_data) # writeframes expects bytes + + example["wav"] = audio_buffer.getvalue() + return example + + emilia_en_data = emilia_en_data.map(update_wav_path) + current_features = Features({ + 'id': Value('string'), + 'text': Value('string'), + 'duration': Value('float'), + 'language': Value('string'), + 'dnsmos': Value('float'), + 'speech_token': Sequence(Value('int32')), + 'wav': Audio(sampling_rate=16000) + + }) + emilia_en_data = emilia_en_data.rename_column("code", "speech_token") + emilia_en_data = emilia_en_data.cast(current_features) + + emilia_en_train_cuts = CutSet.from_huggingface_dataset( + emilia_en_data, # Adjusted from instruct_s2s_train + audio_key="wav", + text_key="text", + ) + return emilia_en_train_cuts \ No newline at end of file diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py index 9554d85e4..87b8315f1 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py @@ -1032,6 +1032,9 @@ def run(rank, world_size, args): elif params.dataset == "gigaspeech": train_cuts = data_module.train_cuts_gigaspeech() valid_cuts = data_module.valid_cuts_ultravox() + elif params.dataset == "emilia_en": + train_cuts = data_module.train_cuts_emilia_en() + valid_cuts = data_module.valid_cuts_emilia_en() else: raise ValueError(f"Unknown dataset: {params.dataset}") From 39700d5c94716ec7cbfe5823e61ac59a64577076 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 26 May 2025 19:53:16 -0700 Subject: [PATCH 50/57] refactor train to reuse code --- .../SPEECH2SPEECH/qwen_omni/model.py | 29 +- .../SPEECH2SPEECH/qwen_omni/train.py | 135 +++-- .../SPEECH2SPEECH/qwen_omni/train_tts.py | 552 ++++++++++++++++++ 3 files changed, 632 insertions(+), 84 deletions(-) create mode 100755 egs/speech_llm/SPEECH2SPEECH/qwen_omni/train_tts.py diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py index 97484486d..5ba3c1a7c 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py @@ -59,9 +59,9 @@ class SPEECH_LLM(nn.Module): def __init__( self, - encoder: nn.Module, - llm: nn.Module, - encoder_projector: nn.Module, + encoder: nn.Module = None, + llm: nn.Module = None, + encoder_projector: nn.Module = None, codec_lm: nn.Module = None, codec_lm_padding_side: str = "left", teacher_llm: nn.Module = None, @@ -330,20 +330,19 @@ class SPEECH_LLM(nn.Module): labels: torch.LongTensor = None, speech_codec_ids: torch.LongTensor = None, ): - encoder_outs = self.encoder(fbank) - - speech_features = self.encoder_projector(encoder_outs) - inputs_embeds = self.llm.get_input_embeddings()(input_ids) + if fbank is not None: + encoder_outs = self.encoder(fbank) + speech_features = self.encoder_projector(encoder_outs) + ( + inputs_embeds, + attention_mask, + labels, + _, + ) = self._merge_input_ids_with_speech_features( + speech_features, inputs_embeds, input_ids, attention_mask, labels + ) - ( - inputs_embeds, - attention_mask, - labels, - _, - ) = self._merge_input_ids_with_speech_features( - speech_features, inputs_embeds, input_ids, attention_mask, labels - ) input_seq_len = attention_mask.sum(dim=1) # shape, B ( text_label_start_index_list, diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py index 87b8315f1..f1b25d3e6 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py @@ -69,8 +69,6 @@ from transformers import ( Qwen2ForCausalLM, ) -# from icefall.env import get_env_info -# from icefall import diagnostics from utils import ( # filter_uneven_sized_batch, AttributeDict, MetricsTracker, @@ -137,6 +135,13 @@ def add_model_arguments(parser: argparse.ArgumentParser): help="Whether to enable speech codec output.", ) + parser.add_argument( + "--enable-speech-input", + type=str2bool, + default=True, + help="Whether to enable speech fbank input.", + ) + parser.add_argument( "--speech-tokenizer-type", type=str, @@ -145,11 +150,7 @@ def add_model_arguments(parser: argparse.ArgumentParser): ) -def get_parser(): - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - +def add_training_arguments(parser: argparse.ArgumentParser): parser.add_argument( "--tensorboard", type=str2bool, @@ -243,6 +244,12 @@ def get_parser(): help="The name of the dataset.", ) + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( "--loss-type", type=str, @@ -252,7 +259,7 @@ def get_parser(): parser = deepspeed.add_config_arguments(parser) add_model_arguments(parser) - + add_training_arguments(parser) return parser @@ -532,7 +539,6 @@ def compute_loss( feature = feature.to(device) feature = feature.transpose(1, 2) # (N, C, T) - # WAR: TODO FIXME merge process_batch_slam_omni and process_batch_vocalnet messages, answer_cosyvoice_speech_token = extract_text_and_speech_token( batch, params.enable_speech_output ) @@ -686,9 +692,9 @@ def train_one_epoch( The rank of the node in DDP training. If no DDP is used, it should be set to 0. """ - # model.encoder_projector.train() model.train() - model.encoder.eval() + if params.enable_speech_input: + model.encoder.eval() if not params.unfreeze_llm: model.llm.eval() tot_loss = MetricsTracker() @@ -706,7 +712,8 @@ def train_one_epoch( world_size=world_size, ) model.train() - model.encoder.eval() + if params.enable_speech_input: + model.encoder.eval() if not params.unfreeze_llm: model.llm.eval() logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") @@ -796,36 +803,11 @@ def train_one_epoch( params.best_train_loss = params.train_loss -def run(rank, world_size, args): - """ - Args: - rank: - It is a value between 0 and `world_size-1`, which is - passed automatically by `mp.spawn()` in :func:`main`. - The node with rank 0 is responsible for saving checkpoint. - world_size: - Number of GPUs for DDP training. - args: - The return value of get_parser().parse_args() - """ - params = get_params() - params.update(vars(args)) - - fix_random_seed(params.seed) - - if rank == 0: - setup_logger(f"{params.exp_dir}/log/log-train") - logging.info(params) - logging.info("About to create model") - - replace_whisper_encoder_forward() - whisper_model = whisper.load_model(params.speech_encoder_path_or_name, "cpu") - speech_encoder = whisper_model.encoder - speech_encoder_dim = whisper_model.dims.n_audio_state - for name, param in speech_encoder.named_parameters(): - param.requires_grad = False - +def get_model(params): + """Load and prepare the speech-to-speech model.""" tokenizer = AutoTokenizer.from_pretrained(params.llm_path_or_name) + special_tokens_dict = {"additional_special_tokens": [DEFAULT_SPEECH_TOKEN]} + tokenizer.add_special_tokens(special_tokens_dict) if params.use_flash_attn: attn_implementation = "flash_attention_2" @@ -842,11 +824,9 @@ def run(rank, world_size, args): attn_implementation=attn_implementation, torch_dtype=torch_dtype, ) - if not params.unfreeze_llm: for name, param in llm.named_parameters(): param.requires_grad = False - else: if params.use_lora: lora_config = LoraConfig( @@ -867,21 +847,29 @@ def run(rank, world_size, args): llm = get_peft_model(llm, lora_config) llm.print_trainable_parameters() - special_tokens_dict = {"additional_special_tokens": [DEFAULT_SPEECH_TOKEN]} - tokenizer.add_special_tokens(special_tokens_dict) - llm.config.pad_token_id = tokenizer.pad_token_id llm.config.default_speech_token_id = tokenizer.convert_tokens_to_ids( DEFAULT_SPEECH_TOKEN ) - encoder_projector = EncoderProjector( - speech_encoder_dim, llm.config.hidden_size, params.encoder_projector_ds_rate - ) - if not params.unfreeze_speech_projector: - for name, param in encoder_projector.named_parameters(): + if params.enable_speech_input: + if params.remove_whisper_encoder_input_length_restriction: + replace_whisper_encoder_forward() + whisper_model = whisper.load_model(params.speech_encoder_path_or_name, "cpu") + speech_encoder = whisper_model.encoder + speech_encoder_dim = whisper_model.dims.n_audio_state + for name, param in speech_encoder.named_parameters(): param.requires_grad = False - encoder_projector.eval() + encoder_projector = EncoderProjector( + speech_encoder_dim, llm.config.hidden_size, params.encoder_projector_ds_rate + ) + if not params.unfreeze_speech_projector: + for name, param in encoder_projector.named_parameters(): + param.requires_grad = False + encoder_projector.eval() + else: + speech_encoder = None + encoder_projector = None if params.enable_speech_output: # Determine attn_implementation and torch_dtype based on use_flash_attn @@ -922,17 +910,6 @@ def run(rank, world_size, args): codec_lm.config.mask_token_id = codec_vocab_size - 4 else: codec_lm = None - if params.loss_type == "kl_div": - teacher_llm = AutoModelForCausalLM.from_pretrained( - params.llm_path_or_name, - attn_implementation=attn_implementation, - torch_dtype=torch_dtype, - ) - for name, param in teacher_llm.named_parameters(): - param.requires_grad = False - teacher_llm.eval() - else: - teacher_llm = None model = SPEECH_LLM( speech_encoder, @@ -940,9 +917,7 @@ def run(rank, world_size, args): encoder_projector, codec_lm, codec_lm_padding_side="left" if params.use_flash_attn else "right", - teacher_llm=teacher_llm, ) - if params.pretrained_model_path or params.last_stage_model_path: if params.pretrained_model_path is None: checkpoint = torch.load(params.last_stage_model_path, map_location="cpu") @@ -963,6 +938,32 @@ def run(rank, world_size, args): if param.requires_grad: logging.info(f"{name}: {param.shape}") + return model, tokenizer + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + + fix_random_seed(params.seed) + + if rank == 0: + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info(params) + logging.info("About to create model") + + model, tokenizer = get_model(params) + if torch.cuda.is_available(): device = torch.device("cuda", get_local_rank()) else: @@ -1032,9 +1033,6 @@ def run(rank, world_size, args): elif params.dataset == "gigaspeech": train_cuts = data_module.train_cuts_gigaspeech() valid_cuts = data_module.valid_cuts_ultravox() - elif params.dataset == "emilia_en": - train_cuts = data_module.train_cuts_emilia_en() - valid_cuts = data_module.valid_cuts_emilia_en() else: raise ValueError(f"Unknown dataset: {params.dataset}") @@ -1049,7 +1047,6 @@ def run(rank, world_size, args): train_dl = data_module.train_dataloaders( train_cuts, sampler_state_dict=sampler_state_dict ) - # train_dl = data_module.valid_dataloaders(train_cuts) valid_dl = data_module.valid_dataloaders(valid_cuts) if args.tensorboard and rank == 0: diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train_tts.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train_tts.py new file mode 100755 index 000000000..8fd6609a4 --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train_tts.py @@ -0,0 +1,552 @@ +#!/usr/bin/env python3 +# Copyright 2023 Xiaomi Corp. (authors: Xiaoyu Yang) +# 2024 Yuekai Zhang +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +# For Chinese dataset, you can use the following command to download the Chinese fine-tuned whisper model. +huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_multi-hans-zh_whisper +# Qwen Pretrained model +huggingface-cli download --local-dir models/Qwen2.5-0.5B-Instruct Qwen/Qwen2.5-0.5B-Instruct + +torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ + --max-duration 50 \ + --enable-musan False \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ + --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ + --manifest-dir data/fbank \ + --deepspeed \ + --deepspeed_config ./qwen_omni/ds_config_zero1.json \ + --use-flash-attn True \ + --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True +""" + +import argparse +import copy +import logging +import os +import random +import warnings +from pathlib import Path +from shutil import copyfile +from typing import Any, Dict, List, Optional, Tuple, Union + +import deepspeed +import torch +import torch.multiprocessing as mp +import torch.nn as nn +import transformers + +from deepspeed.utils.zero_to_fp32 import convert_zero_checkpoint_to_fp32_state_dict +from label_smoothing import LabelSmoothingLoss + +from lhotse.utils import fix_random_seed +from model import IGNORE_TOKEN_ID, SPEECH_LLM +from peft import LoraConfig, get_peft_model +from torch import Tensor +from torch.utils.tensorboard import SummaryWriter +from transformers import ( + AutoModelForCausalLM, + AutoTokenizer, + Qwen2Config, + Qwen2ForCausalLM, +) +from torchdata.stateful_dataloader import StatefulDataLoader +from torch.utils.data import DistributedSampler, DataLoader + +from train import add_model_arguments, add_training_arguments, get_params, compute_validation_loss, get_model, display_and_save_batch +from utils import ( # filter_uneven_sized_batch, + AttributeDict, + MetricsTracker, + get_local_rank, + get_rank, + get_world_size, + setup_logger, + str2bool, +) + +DEFAULT_SPEECH_TOKEN = "" +try: + torch.multiprocessing.set_start_method("spawn") +except RuntimeError: + pass + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + # parser.add_argument( + # "--loss-type", + # type=str, + # default="ce", + # help="The type of loss to use.", + # ) + + parser = deepspeed.add_config_arguments(parser) + add_model_arguments(parser) + add_training_arguments(parser) + return parser + +def preprocess( + messages, + tokenizer: transformers.PreTrainedTokenizer, +) -> Dict: + """Preprocesses the data for supervised fine-tuning.""" + texts = [] + TEMPLATE = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if loop.last %}{{ '<|im_end|>'}}{% else %}{{ '<|im_end|>\n' }}{% endif %}{% endfor %}" + for i, msg in enumerate(messages): + texts.append( + tokenizer.apply_chat_template( + msg, + tokenize=True, + chat_template=TEMPLATE, + add_generation_prompt=False, + padding="longest", # FIX me change padding to longest + truncation=False, + ) + ) + if len(texts) != len(messages): + logging.warning(f"Remove too long text, {messages} ") + max_len_texts = max([len(text) for text in texts]) + if tokenizer.padding_side == "right": + texts = [ + text + [tokenizer.pad_token_id] * (max_len_texts - len(text)) + for text in texts + ] + else: + texts = [ + [tokenizer.pad_token_id] * (max_len_texts - len(text)) + text + for text in texts + ] + input_ids = torch.tensor(texts, dtype=torch.int) + + target_ids = input_ids.clone() + target_ids[target_ids == tokenizer.pad_token_id] = IGNORE_TOKEN_ID + # mask all tokens before token_id with IGNORE_TOKEN_ID + # first get the indices of the tokens + mask_prompt = True + if mask_prompt: + default_speech_token_id = tokenizer.convert_tokens_to_ids(DEFAULT_SPEECH_TOKEN) + mask_indices = torch.where(input_ids == default_speech_token_id) + for i in range(mask_indices[0].size(0)): + row = mask_indices[0][i] + col = mask_indices[1][i] + # + 2 to skip: 'assistant', '\n' + # WAR: TODO FIXME check qwen3 + # THIS IS THE ONLY DIFFERENCE FROM preprocess + target_ids[row, : col + 6] = IGNORE_TOKEN_ID + target_ids[row, col] = default_speech_token_id + # remove default_speech_token_id from target_ids and input_ids + batch_size = target_ids.size(0) + + target_ids = target_ids[target_ids != default_speech_token_id].view(batch_size, -1) + input_ids = input_ids[input_ids != default_speech_token_id].view(batch_size, -1) + + attention_mask = input_ids.ne(tokenizer.pad_token_id) + return input_ids, attention_mask, target_ids + +def data_collator(batch, tokenizer, cut_off_len=2048): + speech_tokens, messages, durations, ids, lang, dnsmos = [], [], [], [], [], [] + for i, item in enumerate(batch): + speech_tokens.append(item["code"]) + message_list_item = [] + message_list_item += [ + {"role": "user", "content": f"Generate a speech from the following text:\n\n{item['text']}{DEFAULT_SPEECH_TOKEN}"}, + {"role": "assistant", "content": item["text"]}, + ] + messages.append(message_list_item) + durations.append(item["duration"]) + ids.append(item["id"]) + lang.append(item["language"]) + dnsmos.append(item["dnsmos"]) + + input_ids, attention_mask, target_ids = preprocess(messages, tokenizer) + target_ids = target_ids.type(torch.LongTensor) + input_ids = input_ids.type(torch.LongTensor) + + return { + "speech_tokens": speech_tokens, + "input_ids": input_ids, + "attention_mask": attention_mask, + "target_ids": target_ids, + "durations": durations, + "ids": ids, + "lang": lang, + "dnsmos": dnsmos, + } + +def compute_loss( + params: AttributeDict, + tokenizer: AutoTokenizer, + model: nn.Module, + batch: dict, + is_training: bool, +) -> Tuple[Tensor, MetricsTracker]: + """ + Compute the loss for the given batch. + Args: + params: + It is returned by :func:`get_params`. + tokenizer: + The tokenizer used to encode the text. + model: + The model for training. + batch: + A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` + for the content in it. + is_training: + Whether it is training. + Returns: + Return a tuple of two elements. The first element is the loss tensor. + """ + device = next(model.parameters()).device + input_ids, attention_mask, target_ids, answer_cosyvoice_speech_token = batch["input_ids"], batch["attention_mask"], batch["target_ids"], batch["speech_tokens"] + + with torch.set_grad_enabled(is_training): + ( + text_loss, + acc, + codec_loss, + codec_acc, + codec_topk_acc, + ) = model.forward_with_speech_output( + input_ids=input_ids.to(device), + attention_mask=attention_mask.to(device), + labels=target_ids.to(device), + speech_codec_ids=answer_cosyvoice_speech_token, + ) + loss = text_loss + codec_loss + assert loss.requires_grad == is_training + + info = MetricsTracker() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + feature_lens = batch["supervisions"]["num_frames"] + info["frames"] = (feature_lens // params.subsampling_factor).sum().item() + + # Note: We use reduction=sum while computing the loss. + info["loss"] = loss.detach().cpu().item() + info["acc"] = ( + acc * info["frames"] + ) # WAR: to avoid normalization by the number of frames + + info["codec_acc"] = codec_acc * info["frames"] + info["codec_topk_acc"] = codec_topk_acc * info["frames"] + info["codec_loss"] = codec_loss.detach().cpu().item() + info["text_loss"] = text_loss.detach().cpu().item() + return loss, info + + +def train_one_epoch( + params: AttributeDict, + tokenizer: AutoTokenizer, + model: nn.Module, + optimizer: torch.optim.Optimizer, + scheduler: torch.optim.lr_scheduler, + train_dl: torch.utils.data.DataLoader, + valid_dl: torch.utils.data.DataLoader, + tb_writer: Optional[SummaryWriter] = None, + world_size: int = 1, + rank: int = 0, +) -> None: + """Train the model for one epoch. + + The training loss from the mean of all frames is saved in + `params.train_loss`. It runs the validation process every + `params.valid_interval` batches. + + Args: + params: + It is returned by :func:`get_params`. + model: + The model for training. + optimizer: + The optimizer we are using. + scheduler: + The learning rate scheduler, we call step() every step. + train_dl: + Dataloader for the training dataset. + valid_dl: + Dataloader for the validation dataset. + scaler: + The scaler used for mix precision training. + model_avg: + The stored model averaged from the start of training. + tb_writer: + Writer to write log messages to tensorboard. + world_size: + Number of nodes in DDP training. If it is 1, DDP is disabled. + rank: + The rank of the node in DDP training. If no DDP is used, it should + be set to 0. + """ + model.train() + model.encoder.eval() + if not params.unfreeze_llm: + model.llm.eval() + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(train_dl): + params.batch_idx_train += 1 + batch_size = len(batch["supervisions"]["text"]) + if batch_idx % params.valid_interval == 0: + logging.info("Computing validation loss") + valid_info = compute_validation_loss( + params=params, + tokenizer=tokenizer, + model=model, + valid_dl=valid_dl, + world_size=world_size, + ) + model.train() + model.encoder.eval() + if not params.unfreeze_llm: + model.llm.eval() + logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") + logging.info( + f"Maximum memory allocated so far is {torch.cuda.max_memory_allocated()//1000000}MB" + ) + if tb_writer is not None: + valid_info.write_summary( + tb_writer, "train/valid_", params.batch_idx_train + ) + if batch_idx != 0: + model.save_checkpoint( + save_dir=params.exp_dir, + tag=f"zero-checkpoint-{params.batch_idx_train}", + client_state={}, + exclude_frozen_parameters=True, + ) + + if rank == 0: + convert_zero_checkpoint_to_fp32_state_dict( + params.exp_dir, + f"{params.exp_dir}/checkpoint-{params.batch_idx_train}", + tag=f"zero-checkpoint-{params.batch_idx_train}", + exclude_frozen_parameters=True, + ) + # save sampler state dict into checkpoint + # sampler_state_dict = train_dl.sampler.state_dict() + sampler_state_dict = train_dl.state_dict() + torch.save( + sampler_state_dict, + f"{params.exp_dir}/checkpoint-{params.batch_idx_train}/sampler.pt", + ) + os.system( + f"rm -rf {params.exp_dir}/zero-checkpoint-{params.batch_idx_train}" + ) + try: + with torch.amp.autocast("cuda", enabled=params.use_fp16): + loss, loss_info = compute_loss( + params=params, + tokenizer=tokenizer, + model=model, + batch=batch, + is_training=True, + ) + # summary stats + tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info + + # NOTE: We use reduction==sum and loss is computed over utterances + # in the batch and there is no normalization to it so far. + + # deepspeed's backward() is different from torch's backward() + # in that it does not accept a loss tensor as input. + # It computes the loss internally. + model.backward(loss) + model.step() + + except: # noqa + display_and_save_batch(batch, params=params) + raise + + if batch_idx % params.log_interval == 0: + try: + cur_lr = scheduler.get_last_lr()[0] + except: # noqa + cur_lr = 0.0 + + logging.info( + f"Epoch {params.cur_epoch}, " + f"batch {batch_idx}, loss[{loss_info}], " + f"tot_loss[{tot_loss}], batch size: {batch_size}, " + f"lr: {cur_lr:.2e}, " + ) + + if tb_writer is not None: + tb_writer.add_scalar( + "train/learning_rate", cur_lr, params.batch_idx_train + ) + + loss_info.write_summary( + tb_writer, "train/current_", params.batch_idx_train + ) + tot_loss.write_summary(tb_writer, "train/tot_", params.batch_idx_train) + + loss_value = tot_loss["loss"] / tot_loss["frames"] + params.train_loss = loss_value + if params.train_loss < params.best_train_loss: + params.best_train_epoch = params.cur_epoch + params.best_train_loss = params.train_loss + + + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + + fix_random_seed(params.seed) + + if rank == 0: + setup_logger(f"{params.exp_dir}/log/log-train") + logging.info(params) + logging.info("About to create model") + + model, tokenizer = get_model(params) + + if torch.cuda.is_available(): + device = torch.device("cuda", get_local_rank()) + else: + device = torch.device("cpu") + logging.info(f"Device: {device}") + model.to(device) + + assert params.deepspeed and world_size > 1 + logging.info("Using DeepSpeed") + model, optimizer, _, scheduler = deepspeed.initialize( + args=params, model=model, model_parameters=model.parameters() + ) + + sampler_state_dict = None + if params.sampler_state_dict_path: + sampler_state_dict = torch.load(params.sampler_state_dict_path) + + data_path = "/lustre/fsw/general_sa/yuekaiz/s2s" + "/emilia_en" + ds = load_dataset(data_path, split="train") + train_test_split = dataset.train_test_split(test_size=1000, seed=42) + train_dataset, eval_dataset = train_test_split["train"], train_test_split["test"] + + sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank) + train_dl = StatefulDataLoader( + train_dataset, + batch_size=2, + sampler=sampler, + shuffle=False, + num_workers=1, + prefetch_factor=1, + collate_fn=lambda features: data_collator( + features, tokenizer + ), + ) + train_dl.load_state_dict(sampler_state_dict) + valid_sampler = DistributedSampler(eval_dataset, num_replicas=world_size, rank=rank) + valid_dl = DataLoader( + eval_dataset, + batch_size=2, + sampler=valid_sampler, + shuffle=False, + num_workers=1, + prefetch_factor=1, + collate_fn=lambda features: data_collator( + features + ), + ) + + if args.tensorboard and rank == 0: + tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard") + else: + tb_writer = None + + logging.info(f"start training from epoch {params.start_epoch}") + for epoch in range(params.start_epoch, params.num_epochs + 1): + + fix_random_seed(params.seed + epoch - 1) + train_dl.sampler.set_epoch(epoch - 1) + + if tb_writer is not None: + tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train) + + params.cur_epoch = epoch + + train_one_epoch( + params=params, + tokenizer=tokenizer, + model=model, + optimizer=optimizer, + scheduler=scheduler, + train_dl=train_dl, + valid_dl=valid_dl, + tb_writer=tb_writer, + world_size=world_size, + rank=rank, + ) + + model.save_checkpoint( + save_dir=params.exp_dir, + tag=f"zero-epoch-{params.cur_epoch}", + client_state={}, + exclude_frozen_parameters=True, + ) + if rank == 0: + convert_zero_checkpoint_to_fp32_state_dict( + params.exp_dir, + f"{params.exp_dir}/epoch-{params.cur_epoch}", + tag=f"zero-epoch-{params.cur_epoch}", + exclude_frozen_parameters=True, + ) + # save sampler state dict into checkpoint + # sampler_state_dict = train_dl.sampler.state_dict() + sampler_state_dict = train_dl.state_dict() + torch.save( + sampler_state_dict, + f"{params.exp_dir}/epoch-{params.cur_epoch}/sampler.pt", + ) + + os.system(f"rm -rf {params.exp_dir}/zero-epoch-{params.cur_epoch}") + + logging.info("Done!") + + +def main(): + parser = get_parser() + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + world_size = get_world_size() + rank = get_rank() + + torch.set_num_threads(1) + torch.set_num_interop_threads(1) + warnings.filterwarnings("ignore", category=FutureWarning) + run(rank=rank, world_size=world_size, args=args) + + +if __name__ == "__main__": + main() From 1281d7a515b0bfbdd7e7e4e926f4d0c644af1448 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 27 May 2025 00:18:23 -0700 Subject: [PATCH 51/57] add tts training --- .../SPEECH2SPEECH/qwen_omni/train_tts.py | 120 ++++++++++-------- 1 file changed, 70 insertions(+), 50 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train_tts.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train_tts.py index 8fd6609a4..38132e71e 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train_tts.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train_tts.py @@ -50,6 +50,7 @@ import torch import torch.multiprocessing as mp import torch.nn as nn import transformers +from datasets import load_dataset from deepspeed.utils.zero_to_fp32 import convert_zero_checkpoint_to_fp32_state_dict from label_smoothing import LabelSmoothingLoss @@ -68,7 +69,7 @@ from transformers import ( from torchdata.stateful_dataloader import StatefulDataLoader from torch.utils.data import DistributedSampler, DataLoader -from train import add_model_arguments, add_training_arguments, get_params, compute_validation_loss, get_model, display_and_save_batch +from train import add_model_arguments, add_training_arguments, get_params, get_model from utils import ( # filter_uneven_sized_batch, AttributeDict, MetricsTracker, @@ -91,12 +92,12 @@ def get_parser(): formatter_class=argparse.ArgumentDefaultsHelpFormatter ) - # parser.add_argument( - # "--loss-type", - # type=str, - # default="ce", - # help="The type of loss to use.", - # ) + parser.add_argument( + "--batch-size", + type=int, + default=16, + help="The batch size to use.", + ) parser = deepspeed.add_config_arguments(parser) add_model_arguments(parser) @@ -161,7 +162,7 @@ def preprocess( attention_mask = input_ids.ne(tokenizer.pad_token_id) return input_ids, attention_mask, target_ids -def data_collator(batch, tokenizer, cut_off_len=2048): +def data_collator(batch): speech_tokens, messages, durations, ids, lang, dnsmos = [], [], [], [], [], [] for i, item in enumerate(batch): speech_tokens.append(item["code"]) @@ -176,21 +177,15 @@ def data_collator(batch, tokenizer, cut_off_len=2048): lang.append(item["language"]) dnsmos.append(item["dnsmos"]) - input_ids, attention_mask, target_ids = preprocess(messages, tokenizer) - target_ids = target_ids.type(torch.LongTensor) - input_ids = input_ids.type(torch.LongTensor) - return { "speech_tokens": speech_tokens, - "input_ids": input_ids, - "attention_mask": attention_mask, - "target_ids": target_ids, + "messages": messages, "durations": durations, "ids": ids, "lang": lang, "dnsmos": dnsmos, } - + def compute_loss( params: AttributeDict, tokenizer: AutoTokenizer, @@ -216,7 +211,10 @@ def compute_loss( Return a tuple of two elements. The first element is the loss tensor. """ device = next(model.parameters()).device - input_ids, attention_mask, target_ids, answer_cosyvoice_speech_token = batch["input_ids"], batch["attention_mask"], batch["target_ids"], batch["speech_tokens"] + messages, answer_cosyvoice_speech_token = batch["messages"], batch["speech_tokens"] + input_ids, attention_mask, target_ids = preprocess(messages, tokenizer) + target_ids = target_ids.type(torch.LongTensor) + input_ids = input_ids.type(torch.LongTensor) with torch.set_grad_enabled(is_training): ( @@ -235,24 +233,51 @@ def compute_loss( assert loss.requires_grad == is_training info = MetricsTracker() - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - feature_lens = batch["supervisions"]["num_frames"] - info["frames"] = (feature_lens // params.subsampling_factor).sum().item() - + info["frames"] = len(messages) # Note: We use reduction=sum while computing the loss. + info["acc"] = acc * len(messages) + info["codec_acc"] = codec_acc * len(messages) + info["codec_topk_acc"] = codec_topk_acc * len(messages) info["loss"] = loss.detach().cpu().item() - info["acc"] = ( - acc * info["frames"] - ) # WAR: to avoid normalization by the number of frames - - info["codec_acc"] = codec_acc * info["frames"] - info["codec_topk_acc"] = codec_topk_acc * info["frames"] info["codec_loss"] = codec_loss.detach().cpu().item() info["text_loss"] = text_loss.detach().cpu().item() return loss, info +def compute_validation_loss( + params: AttributeDict, + tokenizer: AutoTokenizer, + model: nn.Module, + valid_dl: torch.utils.data.DataLoader, + world_size: int = 1, +) -> MetricsTracker: + """Run the validation process.""" + model.eval() + tot_loss = MetricsTracker() + + for batch_idx, batch in enumerate(valid_dl): + with torch.amp.autocast("cuda", enabled=params.use_fp16): + loss, loss_info = compute_loss( + params=params, + tokenizer=tokenizer, + model=model, + batch=batch, + is_training=False, + ) + assert loss.requires_grad is False + tot_loss = tot_loss + loss_info + + # FIX ME + if world_size > 1: + tot_loss.reduce(loss.device) + + loss_value = tot_loss["loss"] + if loss_value < params.best_valid_loss: + params.best_valid_epoch = params.cur_epoch + params.best_valid_loss = loss_value + + return tot_loss + def train_one_epoch( params: AttributeDict, tokenizer: AutoTokenizer, @@ -297,14 +322,14 @@ def train_one_epoch( be set to 0. """ model.train() - model.encoder.eval() + # model.encoder.eval() if not params.unfreeze_llm: model.llm.eval() tot_loss = MetricsTracker() for batch_idx, batch in enumerate(train_dl): params.batch_idx_train += 1 - batch_size = len(batch["supervisions"]["text"]) + batch_size = len(batch["durations"]) if batch_idx % params.valid_interval == 0: logging.info("Computing validation loss") valid_info = compute_validation_loss( @@ -315,7 +340,7 @@ def train_one_epoch( world_size=world_size, ) model.train() - model.encoder.eval() + # model.encoder.eval() if not params.unfreeze_llm: model.llm.eval() logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}") @@ -373,7 +398,6 @@ def train_one_epoch( model.step() except: # noqa - display_and_save_batch(batch, params=params) raise if batch_idx % params.log_interval == 0: @@ -399,7 +423,7 @@ def train_one_epoch( ) tot_loss.write_summary(tb_writer, "train/tot_", params.batch_idx_train) - loss_value = tot_loss["loss"] / tot_loss["frames"] + loss_value = tot_loss["loss"] params.train_loss = loss_value if params.train_loss < params.best_train_loss: params.best_train_epoch = params.cur_epoch @@ -421,6 +445,7 @@ def run(rank, world_size, args): """ params = get_params() params.update(vars(args)) + params.valid_interval = 2000 fix_random_seed(params.seed) @@ -428,9 +453,7 @@ def run(rank, world_size, args): setup_logger(f"{params.exp_dir}/log/log-train") logging.info(params) logging.info("About to create model") - model, tokenizer = get_model(params) - if torch.cuda.is_available(): device = torch.device("cuda", get_local_rank()) else: @@ -447,36 +470,34 @@ def run(rank, world_size, args): sampler_state_dict = None if params.sampler_state_dict_path: sampler_state_dict = torch.load(params.sampler_state_dict_path) - - data_path = "/lustre/fsw/general_sa/yuekaiz/s2s" + "/emilia_en" - ds = load_dataset(data_path, split="train") - train_test_split = dataset.train_test_split(test_size=1000, seed=42) + # print(params.dataset) + ds = load_dataset(params.dataset, split="train") + # shuffle the dataset + ds = ds.shuffle(seed=42) + train_test_split = ds.train_test_split(test_size=1000, seed=42) train_dataset, eval_dataset = train_test_split["train"], train_test_split["test"] + # train_dataset, eval_dataset = train_test_split["test"], train_test_split["test"] sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank) train_dl = StatefulDataLoader( train_dataset, - batch_size=2, + batch_size=params.batch_size, sampler=sampler, shuffle=False, - num_workers=1, - prefetch_factor=1, - collate_fn=lambda features: data_collator( - features, tokenizer - ), + num_workers=4, + prefetch_factor=2, + collate_fn=data_collator ) train_dl.load_state_dict(sampler_state_dict) valid_sampler = DistributedSampler(eval_dataset, num_replicas=world_size, rank=rank) valid_dl = DataLoader( eval_dataset, - batch_size=2, + batch_size=params.batch_size, sampler=valid_sampler, shuffle=False, num_workers=1, prefetch_factor=1, - collate_fn=lambda features: data_collator( - features - ), + collate_fn=data_collator ) if args.tensorboard and rank == 0: @@ -533,7 +554,6 @@ def run(rank, world_size, args): logging.info("Done!") - def main(): parser = get_parser() args = parser.parse_args() From 5a7c72cb4747e224cbabcdd851f1f3bf0b3f82a2 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 27 May 2025 02:12:22 -0700 Subject: [PATCH 52/57] add tts task decode --- egs/speech_llm/SPEECH2SPEECH/exp.sh | 233 ++++++++++++++ egs/speech_llm/SPEECH2SPEECH/prepare.sh | 10 +- .../SPEECH2SPEECH/qwen_omni/decode.py | 6 +- .../SPEECH2SPEECH/qwen_omni/decode_tts.py | 294 ++++++++++++++++++ .../SPEECH2SPEECH/qwen_omni/model.py | 46 ++- 5 files changed, 558 insertions(+), 31 deletions(-) create mode 100644 egs/speech_llm/SPEECH2SPEECH/exp.sh create mode 100755 egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode_tts.py diff --git a/egs/speech_llm/SPEECH2SPEECH/exp.sh b/egs/speech_llm/SPEECH2SPEECH/exp.sh new file mode 100644 index 000000000..2e8085fe7 --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/exp.sh @@ -0,0 +1,233 @@ +#!/usr/bin/env bash + +# fix segmentation fault reported in https://github.com/k2-fsa/icefall/issues/674 +export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python + + +set -eou pipefail + +stage=$1 +stop_stage=$2 + + +log() { + # This function is from espnet + local fname=${BASH_SOURCE[1]##*/} + echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*" +} + +if [ $stage -le 17 ] && [ $stop_stage -ge 17 ]; then + echo "cd /workspace && ln -s /lustre/fsw/general_sa/yuekaiz/s2s slam && cd -" + if [ ! -L "/workspace/slam" ]; then + cd /workspace && ln -s /lustre/fsw/general_sa/yuekaiz/s2s slam && cd - + fi + log "stage 17: Training Speech2Speech Model, full parameters" + exp_dir=./qwen_omni/exp_speech2text_first_multi_en_continuation_second_three_s2s + pretrained_dir=./qwen_omni/exp_speech2text + ngpu=4 + + latest_checkpoint_step=-1 + # Check if exp_dir exists and is a directory + if [ -d "$exp_dir" ]; then + # List directories matching checkpoint-* and find the one with the largest step number + for checkpoint_dir in $(ls -d $exp_dir/checkpoint-*/ 2>/dev/null | sort -V); do + checkpoint_name=$(basename "$checkpoint_dir") # e.g., checkpoint-1000 + # Extract step number using parameter expansion + current_step=${checkpoint_name#checkpoint-} + # Ensure current_step is a number + if [[ "$current_step" =~ ^[0-9]+$ ]] && [ "$current_step" -gt "$latest_checkpoint_step" ]; then + latest_checkpoint_step=$current_step + fi + done + fi + + train_cmd_args="--max-duration 200 \ + --enable-musan False \ + --exp-dir $exp_dir \ + --last-stage-model-path $pretrained_dir/checkpoint-58548/pytorch_model.bin \ + --speech-encoder-path-or-name models/large-v2.pt \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ + --on-the-fly-feats True --on-the-fly-speed-perturb False\ + --deepspeed \ + --huggingface-dataset-path-or-name /lustre/fsw/general_sa/yuekaiz/s2s \ + --deepspeed_config ./qwen_omni/ds_config_zero1.json \ + --use-flash-attn True --on-the-fly-feats True \ + --dataset vocalnet_ultrachat_voiceassistant_instruct_s2s --num-epochs 10 \ + --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output False" + + if [ "$latest_checkpoint_step" -ge 0 ]; then + log "Continuing training from checkpoint-$latest_checkpoint_step" + step=$latest_checkpoint_step + train_cmd_args="$train_cmd_args --pretrained-model-path $exp_dir/checkpoint-${step}/pytorch_model.bin --sampler-state-dict-path $exp_dir/checkpoint-${step}/sampler.pt" + else + log "Starting training from scratch as no checkpoint was found in $exp_dir" + # No pretrained model or sampler state dict needed for the first run + fi + + torchrun --nproc_per_node $ngpu --nnodes $SLURM_JOB_NUM_NODES --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d --rdzv_id $SLURM_JOBID ./qwen_omni/train.py \ + $train_cmd_args +fi + +if [ $stage -le 18 ] && [ $stop_stage -ge 18 ]; then + echo "cd /workspace && ln -s /lustre/fsw/general_sa/yuekaiz/s2s slam && cd -" + # check if the link exists, if not exist, create it + if [ ! -L "/workspace/slam" ]; then + cd /workspace && ln -s /lustre/fsw/general_sa/yuekaiz/s2s slam && cd - + fi + log "stage 17: Training Speech2Speech Model, full parameters" + exp_dir=./qwen_omni/exp_speech2text_first_multi_en_continuation_second_three_s2s_librispeech + pretrained_dir=./qwen_omni/exp_speech2text + ngpu=4 + + latest_checkpoint_step=-1 + # Check if exp_dir exists and is a directory + if [ -d "$exp_dir" ]; then + # List directories matching checkpoint-* and find the one with the largest step number + for checkpoint_dir in $(ls -d $exp_dir/checkpoint-*/ 2>/dev/null | sort -V); do + checkpoint_name=$(basename "$checkpoint_dir") # e.g., checkpoint-1000 + # Extract step number using parameter expansion + current_step=${checkpoint_name#checkpoint-} + # Ensure current_step is a number + if [[ "$current_step" =~ ^[0-9]+$ ]] && [ "$current_step" -gt "$latest_checkpoint_step" ]; then + latest_checkpoint_step=$current_step + fi + done + fi + + train_cmd_args="--max-duration 200 \ + --enable-musan False \ + --exp-dir $exp_dir \ + --last-stage-model-path $pretrained_dir/checkpoint-58548/pytorch_model.bin \ + --speech-encoder-path-or-name models/large-v2.pt \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ + --on-the-fly-feats True --on-the-fly-speed-perturb False\ + --deepspeed \ + --huggingface-dataset-path-or-name /lustre/fsw/general_sa/yuekaiz/s2s \ + --deepspeed_config ./qwen_omni/ds_config_zero1.json \ + --use-flash-attn True --on-the-fly-feats True \ + --dataset vocalnet_ultrachat_voiceassistant_instruct_s2s_librispeech --num-epochs 10 \ + --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output False" + + if [ "$latest_checkpoint_step" -ge 0 ]; then + log "Continuing training from checkpoint-$latest_checkpoint_step" + step=$latest_checkpoint_step + train_cmd_args="$train_cmd_args --pretrained-model-path $exp_dir/checkpoint-${step}/pytorch_model.bin --sampler-state-dict-path $exp_dir/checkpoint-${step}/sampler.pt" + else + log "Starting training from scratch as no checkpoint was found in $exp_dir" + # No pretrained model or sampler state dict needed for the first run + fi + + torchrun --nproc_per_node $ngpu --nnodes $SLURM_JOB_NUM_NODES --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d --rdzv_id $SLURM_JOBID ./qwen_omni/train.py \ + $train_cmd_args +fi + +export HF_HOME="/lustre/fsw/general_sa/yuekaiz/.cache/huggingface" +if [ $stage -le 19 ] && [ $stop_stage -ge 19 ]; then + log "stage 19: Training TTS Model" + exp_dir=./qwen_omni/exp_tts + pretrained_dir=./qwen_omni/exp_speech2text + ngpu=4 + + latest_checkpoint_step=-1 + # Check if exp_dir exists and is a directory + if [ -d "$exp_dir" ]; then + # List directories matching checkpoint-* and find the one with the largest step number + for checkpoint_dir in $(ls -d $exp_dir/checkpoint-*/ 2>/dev/null | sort -V); do + checkpoint_name=$(basename "$checkpoint_dir") # e.g., checkpoint-1000 + # Extract step number using parameter expansion + current_step=${checkpoint_name#checkpoint-} + # Ensure current_step is a number + if [[ "$current_step" =~ ^[0-9]+$ ]] && [ "$current_step" -gt "$latest_checkpoint_step" ]; then + latest_checkpoint_step=$current_step + fi + done + fi + + train_cmd_args="--batch-size 64 \ + --exp-dir $exp_dir \ + --last-stage-model-path $pretrained_dir/checkpoint-58548/pytorch_model.bin \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ + --enable-speech-input False \ + --deepspeed \ + --dataset /lustre/fsw/general_sa/yuekaiz/s2s/emilia_en \ + --deepspeed_config ./qwen_omni/ds_config_zero1.json \ + --use-flash-attn True \ + --num-epochs 2 \ + --use-lora False --unfreeze-llm False --enable-speech-output True" + + if [ "$latest_checkpoint_step" -ge 0 ]; then + log "Continuing training from checkpoint-$latest_checkpoint_step" + step=$latest_checkpoint_step + train_cmd_args="$train_cmd_args --pretrained-model-path $exp_dir/checkpoint-${step}/pytorch_model.bin --sampler-state-dict-path $exp_dir/checkpoint-${step}/sampler.pt" + else + log "Starting training from scratch as no checkpoint was found in $exp_dir" + # No pretrained model or sampler state dict needed for the first run + fi + + torchrun --nproc_per_node $ngpu --nnodes $SLURM_JOB_NUM_NODES --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d --rdzv_id $SLURM_JOBID ./qwen_omni/train_tts.py \ + $train_cmd_args +fi + + +if [ $stage -le 20 ] && [ $stop_stage -ge 20 ]; then + log "stage 20: Training TTS Model" + echo "cd /workspace && ln -s /lustre/fsw/general_sa/yuekaiz/s2s slam && cd -" + if [ ! -L "/workspace/slam" ]; then + cd /workspace && ln -s /lustre/fsw/general_sa/yuekaiz/s2s slam && cd - + fi + exp_dir=./qwen_omni/exp_test + ngpu=4 + + latest_checkpoint_step=-1 + # Check if exp_dir exists and is a directory + if [ -d "$exp_dir" ]; then + # List directories matching checkpoint-* and find the one with the largest step number + for checkpoint_dir in $(ls -d $exp_dir/checkpoint-*/ 2>/dev/null | sort -V); do + checkpoint_name=$(basename "$checkpoint_dir") # e.g., checkpoint-1000 + # Extract step number using parameter expansion + current_step=${checkpoint_name#checkpoint-} + # Ensure current_step is a number + if [[ "$current_step" =~ ^[0-9]+$ ]] && [ "$current_step" -gt "$latest_checkpoint_step" ]; then + latest_checkpoint_step=$current_step + fi + done + fi + + train_cmd_args="--max-duration 150 \ + --enable-musan False \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/large-v2.pt \ + --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ + --dataset vocalnet_ultrachat_voiceassistant \ + --manifest-dir data/fbank \ + --deepspeed \ + --deepspeed_config ./qwen_omni/ds_config_zero1.json \ + --use-flash-attn True --on-the-fly-feats True \ + --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True" + + if [ "$latest_checkpoint_step" -ge 0 ]; then + log "Continuing training from checkpoint-$latest_checkpoint_step" + step=$latest_checkpoint_step + train_cmd_args="$train_cmd_args --pretrained-model-path $exp_dir/checkpoint-${step}/pytorch_model.bin --sampler-state-dict-path $exp_dir/checkpoint-${step}/sampler.pt" + else + log "Starting training from scratch as no checkpoint was found in $exp_dir" + # No pretrained model or sampler state dict needed for the first run + fi + + torchrun --nproc_per_node $ngpu --nnodes $SLURM_JOB_NUM_NODES --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d --rdzv_id $SLURM_JOBID ./qwen_omni/train.py \ + $train_cmd_args +fi + +if [ $stage -le 21 ] && [ $stop_stage -ge 21 ]; then + log "stage 21: TTS Decoding Test Set" + exp_dir=./qwen_omni/exp_tts + torchrun --nproc_per_node=4 python3 ./qwen_omni/decode_tts.py \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/large-v2.pt \ + --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ + --pretrained-model-path $exp_dir/checkpoint-32001/pytorch_model.bin \ + --use-flash-attn True \ + --enable-speech-output True \ + --token2wav-path /lustre/fsw/general_sa/yuekaiz/s2s/CosyVoice2-0.5B \ + --use-lora True +fi \ No newline at end of file diff --git a/egs/speech_llm/SPEECH2SPEECH/prepare.sh b/egs/speech_llm/SPEECH2SPEECH/prepare.sh index 4ee6976da..a75cd33ff 100644 --- a/egs/speech_llm/SPEECH2SPEECH/prepare.sh +++ b/egs/speech_llm/SPEECH2SPEECH/prepare.sh @@ -242,9 +242,13 @@ if [ $stage -le 14 ] && [ $stop_stage -ge 14 ]; then exp_dir=./qwen_omni/exp_speech2text_first_libri_continuation_second_ce exp_dir=./qwen_omni/exp_speech2text_first_asr_second_ce exp_dir=./qwen_omni/exp_speech2text_first_multi_en_continuation_second_qa + exp_dir=./qwen_omni/exp_speech2text_first_multi_en_continuation_second_three_s2s_librispeech + # exp_dir=./qwen_omni/exp_speech2text_first_multi_en_continuation_second_three_s2s # The final assignment of datasets in the original script is used here: # (alpacaeval_full wildvoice mmsu advbench bbh ifeval commoneval openbookqa sd-qa) declare -a target_datasets=("alpacaeval_full" "wildvoice" "ifeval" "commoneval" "openbookqa" "sd-qa" "advbench" "bbh" "mmsu") + declare -a target_datasets=("alpacaeval_full" "wildvoice" "ifeval" "commoneval" "openbookqa" "sd-qa" "advbench" "bbh") + declare -a target_datasets=("mmsu") NUM_CLIENT_JOBS=4 # Number of parallel client jobs BASE_PORT=8000 # Base port for servers @@ -367,6 +371,8 @@ if [ $stage -le 17 ] && [ $stop_stage -ge 17 ]; then exp_dir=./qwen_omni/exp_speech2text_first_libri_continuation_second_ce exp_dir=./qwen_omni/exp_speech2text_first_asr_second_ce exp_dir=./qwen_omni/exp_speech2text_first_multi_en_continuation_second_qa + exp_dir=./qwen_omni/exp_speech2text_first_multi_en_continuation_second_three_s2s_librispeech + exp_dir=./qwen_omni/exp_speech2text_first_multi_en_continuation_second_three_s2s N_GPUS=4 # Define the number of GPUs/processes you want to launch @@ -376,10 +382,10 @@ if [ $stage -le 17 ] && [ $stop_stage -ge 17 ]; then CUDA_VISIBLE_DEVICES=$id python3 ./qwen_omni/server.py \ --speech-encoder-path-or-name models/large-v2.pt \ --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ - --checkpoint-path $exp_dir/epoch-10/pytorch_model.bin \ + --checkpoint-path $exp_dir/checkpoint-55276/pytorch_model.bin \ --use-flash-attn True \ --enable-speech-output False \ - --port $(expr 8000 + $id) \ + --port $(expr 18000 + $id) \ --use-lora True & done diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py index 1bf2c6d9f..43f6e95b3 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py @@ -77,7 +77,7 @@ sys.path.append("/workspace/CosyVoice/third_party/Matcha-TTS") def audio_decode_cosyvoice2( - audio_tokens, prompt_text, prompt_speech_path, codec_decoder + audio_tokens, prompt_text, prompt_speech_16k, codec_decoder ): """ Generate audio from tokens with optional tone and prompt embedding. @@ -95,7 +95,6 @@ def audio_decode_cosyvoice2( Returns: torch.Tensor: Generated audio waveform. """ - prompt_speech_16k = load_wav(prompt_speech_path, 16000) model_inputs_dict = codec_decoder.frontend.frontend_zero_shot( "empty", prompt_text, prompt_speech_16k, 24000 ) @@ -555,10 +554,11 @@ def decode_one_batch( # audio_tokens = [token for token in audio_tokens if token < 4096] audio_tokens = torch.tensor(audio_tokens, dtype=torch.int32).unsqueeze(0) if "CosyVoice2" in params.token2wav_path: + prompt_speech_16k = load_wav(params.prompt_speech_path, 16000) audio_hat = audio_decode_cosyvoice2( audio_tokens, params.prompt_text, - params.prompt_speech_path, + prompt_speech_16k, token2wav_model, ) sf.write(speech_file_name, audio_hat.squeeze(0).cpu().numpy(), 24000) diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode_tts.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode_tts.py new file mode 100755 index 000000000..3dcb9d7fe --- /dev/null +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode_tts.py @@ -0,0 +1,294 @@ +#!/usr/bin/env python3 +# Copyright 2023 Xiaomi Corp. (authors: Xiaoyu Yang) +# 2024 Yuekai Zhang +# +# See ../../../../LICENSE for clarification regarding multiple authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Usage: +# For Chinese dataset, you can use the following command to download the Chinese fine-tuned whisper model. +huggingface-cli download --local-dir models/whisper yuekai/icefall_asr_multi-hans-zh_whisper +# Qwen Pretrained model +huggingface-cli download --local-dir models/Qwen2.5-0.5B-Instruct Qwen/Qwen2.5-0.5B-Instruct + +torchrun --nproc_per_node $ngpu ./qwen_omni/train.py \ + --max-duration 50 \ + --enable-musan False \ + --exp-dir $exp_dir \ + --speech-encoder-path-or-name models/whisper/v1.1/whisper-large-v2-multi-hans-zh-epoch-3-avg-10.pt \ + --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ + --manifest-dir data/fbank \ + --deepspeed \ + --deepspeed_config ./qwen_omni/ds_config_zero1.json \ + --use-flash-attn True \ + --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True +""" + +import argparse +import copy +import logging +import os +import random +import warnings +from pathlib import Path +from shutil import copyfile +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +import torch.multiprocessing as mp +import torch.nn as nn +import transformers +from datasets import load_dataset + +from deepspeed.utils.zero_to_fp32 import convert_zero_checkpoint_to_fp32_state_dict +from label_smoothing import LabelSmoothingLoss + +from lhotse.utils import fix_random_seed +from model import IGNORE_TOKEN_ID, SPEECH_LLM +from peft import LoraConfig, get_peft_model +from torch import Tensor +from torch.utils.tensorboard import SummaryWriter +from transformers import ( + AutoModelForCausalLM, + AutoTokenizer, + Qwen2Config, + Qwen2ForCausalLM, +) +from torchdata.stateful_dataloader import StatefulDataLoader +from torch.utils.data import DistributedSampler, DataLoader + +from train import add_model_arguments, add_training_arguments, get_params, get_model +from decode import audio_decode_cosyvoice2 +from utils import ( # filter_uneven_sized_batch, + AttributeDict, + MetricsTracker, + get_local_rank, + get_rank, + get_world_size, + setup_logger, + str2bool, +) +from cosyvoice.cli.cosyvoice import CosyVoice2 +sys.path.append("/lustre/fsw/general_sa/yuekaiz/s2s/CosyVoice/third_party/Matcha-TTS") + +DEFAULT_SPEECH_TOKEN = "" +try: + torch.multiprocessing.set_start_method("spawn") +except RuntimeError: + pass + + +def get_parser(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "--batch-size", + type=int, + default=1, + help="The batch size to use.", + ) + + parser.add_argument( + "--split-name", + type=str, + default="test_en", + choices=["wenetspeech4tts", "test_zh", "test_en", "test_hard"], + help="huggingface dataset split name", + ) + parser.add_argument( + "--token2wav-path", + type=str, + default="/workspace/CosyVoice-300M-SFT", + help="The path to the token2wav model", + ) + + add_model_arguments(parser) + + return parser + +def preprocess( + messages, + tokenizer: transformers.PreTrainedTokenizer, +) -> Dict: + """Preprocesses the data for supervised fine-tuning.""" + texts = [] + TEMPLATE = "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content']}}{% if loop.last %}{{ '<|im_end|>'}}{% else %}{{ '<|im_end|>\n' }}{% endif %}{% endfor %}" + for i, msg in enumerate(messages): + texts.append( + tokenizer.apply_chat_template( + msg, + tokenize=True, + chat_template=TEMPLATE, + add_generation_prompt=False, + padding="longest", # FIX me change padding to longest + truncation=False, + ) + ) + if len(texts) != len(messages): + logging.warning(f"Remove too long text, {messages} ") + max_len_texts = max([len(text) for text in texts]) + if tokenizer.padding_side == "right": + texts = [ + text + [tokenizer.pad_token_id] * (max_len_texts - len(text)) + for text in texts + ] + else: + texts = [ + [tokenizer.pad_token_id] * (max_len_texts - len(text)) + text + for text in texts + ] + input_ids = torch.tensor(texts, dtype=torch.int) + + target_ids = input_ids.clone() + target_ids[target_ids == tokenizer.pad_token_id] = IGNORE_TOKEN_ID + # mask all tokens before token_id with IGNORE_TOKEN_ID + # first get the indices of the tokens + mask_prompt = True + if mask_prompt: + default_speech_token_id = tokenizer.convert_tokens_to_ids(DEFAULT_SPEECH_TOKEN) + mask_indices = torch.where(input_ids == default_speech_token_id) + for i in range(mask_indices[0].size(0)): + row = mask_indices[0][i] + col = mask_indices[1][i] + # + 2 to skip: 'assistant', '\n' + # WAR: TODO FIXME check qwen3 + # THIS IS THE ONLY DIFFERENCE FROM preprocess + target_ids[row, : col + 6] = IGNORE_TOKEN_ID + target_ids[row, col] = default_speech_token_id + # remove default_speech_token_id from target_ids and input_ids + batch_size = target_ids.size(0) + + target_ids = target_ids[target_ids != default_speech_token_id].view(batch_size, -1) + input_ids = input_ids[input_ids != default_speech_token_id].view(batch_size, -1) + + attention_mask = input_ids.ne(tokenizer.pad_token_id) + return input_ids, attention_mask, target_ids + +def data_collator(batch): + prompt_texts, prompt_speech_16k, messages, ids = [], [], [], [] + for i, item in enumerate(batch): + # speech_tokens.append(item["prompt_audio_cosy2_tokens"]) + message_list_item = [] + message_list_item += [ + {"role": "user", "content": f"Generate a speech from the following text:\n\n{item['target_text']}{DEFAULT_SPEECH_TOKEN}"}, + {"role": "assistant", "content": ""}, + ] + messages.append(message_list_item) + + ids.append(item["id"]) + prompt_texts.append(item["prompt_text"]) + prompt_speech_16k.append(item["prompt_audio"]) + print(item["prompt_audio"], 233333333333333333) + + + return { + "prompt_texts": prompt_texts, + "prompt_speech_16k": prompt_speech_16k, + "messages": messages, + "ids": ids, + } + +def run(rank, world_size, args): + """ + Args: + rank: + It is a value between 0 and `world_size-1`, which is + passed automatically by `mp.spawn()` in :func:`main`. + The node with rank 0 is responsible for saving checkpoint. + world_size: + Number of GPUs for DDP training. + args: + The return value of get_parser().parse_args() + """ + params = get_params() + params.update(vars(args)) + params.log_dir = Path(params.exp_dir) / f"log-results-wav" + params.log_dir.mkdir(parents=True, exist_ok=True) + + fix_random_seed(params.seed) + + if rank == 0: + setup_logger(f"{params.exp_dir}/log/log-decode-tts") + logging.info(params) + logging.info("About to create model") + model, tokenizer = get_model(params) + if torch.cuda.is_available(): + device = torch.device("cuda", get_local_rank()) + else: + device = torch.device("cpu") + logging.info(f"Device: {device}") + model.to(device) + + assert params.deepspeed and world_size > 1 + logging.info("Using DeepSpeed") + + dataset = load_dataset("yuekai/seed_tts_cosy2", split=params.split_name) + + sampler = DistributedSampler(dataset, num_replicas=world_size, rank=rank) + data_loader = DataLoader( + dataset, + batch_size=params.batch_size, + sampler=sampler, + shuffle=False, + num_workers=1, + prefetch_factor=1, + collate_fn=data_collator + ) + token2wav_model = CosyVoice2( + params.token2wav_path, load_jit=False, load_trt=False, fp16=False + ) + for batch in data_loader: + messages = batch["messages"] + prompt_texts = batch["prompt_texts"] + prompt_speech_16k = batch["prompt_speech_16k"] + ids = batch["ids"] + input_ids, attention_mask, _ = preprocess(messages, tokenizer) + generated_ids, generated_speech_output = model.decode_with_speech_output( + None, input_ids.to(device, dtype=torch.long), attention_mask.to(device) + ) + generated_speech_output = [ + generated_speech_output + ] # WAR: only support batch = 1 for now + for cut_id, audio_tokens, prompt_text, prompt_speech in zip(ids, generated_speech_output, prompt_texts, prompt_speech_16k): + speech_file_name = params.log_dir / f"{cut_id}.wav" + audio_tokens = torch.tensor(audio_tokens, dtype=torch.int32).unsqueeze(0) + if "CosyVoice2" in params.token2wav_path: + audio_hat = audio_decode_cosyvoice2( + audio_tokens, + prompt_text, + prompt_speech, + token2wav_model, + ) + sf.write(speech_file_name, audio_hat.squeeze(0).cpu().numpy(), 24000) + + logging.info("Done!") + +def main(): + parser = get_parser() + args = parser.parse_args() + args.exp_dir = Path(args.exp_dir) + + world_size = get_world_size() + rank = get_rank() + + torch.set_num_threads(1) + torch.set_num_interop_threads(1) + warnings.filterwarnings("ignore", category=FutureWarning) + run(rank=rank, world_size=world_size, args=args) + + +if __name__ == "__main__": + main() diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py index 5ba3c1a7c..3def803b5 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py @@ -479,12 +479,12 @@ class SPEECH_LLM(nn.Module): start_idx == start_idx_re_compute ), f"start_idx: {start_idx}, start_idx_re_compute: {start_idx_re_compute}" if text_input_embeds.shape[0] > audio_embeddings.shape[1] - start_idx: - text_input_embeds = text_input_embeds[ - : audio_embeddings.shape[1] - start_idx - ] logging.warning( f"Truncate text_input_embeds: {text_input_embeds.shape} to {audio_embeddings.shape[1] - start_idx}" ) + text_input_embeds = text_input_embeds[ + : audio_embeddings.shape[1] - start_idx + ] audio_embeddings[ i, start_idx : start_idx + text_input_embeds.shape[0] ] += text_input_embeds @@ -592,35 +592,29 @@ class SPEECH_LLM(nn.Module): - generated_speech_tokens: List of lists, where each inner list contains the generated speech codec tokens for a batch item. """ - assert fbank.shape[0] == 1, "Batch size must be 1 for speech generation." - if ( - not self.codec_lm - or not self.speech_token_projector - or not self.codec_lm_head - ): - raise ValueError( - "codec_lm and associated layers must be initialized to generate speech output." - ) + batch_size = input_ids.shape[0] + assert batch_size == 1, "Batch size must be 1 for speech generation." device = next(self.parameters()).device # Use model's device - batch_size = fbank.shape[0] - - # --- 1. Prepare Prompt Embeddings --- - encoder_outs = self.encoder(fbank) - speech_features = self.encoder_projector(encoder_outs) - speech_features = speech_features.to(self.llm.dtype) # Ensure matching dtype prompt_embeds = self.llm.get_input_embeddings()(input_ids) # Merge speech features with prompt embeddings - ( - merged_prompt_inputs_embeds, - merged_prompt_attention_mask, - _, - _, - ) = self._merge_input_ids_with_speech_features( - speech_features, prompt_embeds, input_ids, attention_mask - ) + if fbank is not None: + encoder_outs = self.encoder(fbank) + speech_features = self.encoder_projector(encoder_outs) + speech_features = speech_features.to(self.llm.dtype) # Ensure matching dtype + ( + merged_prompt_inputs_embeds, + merged_prompt_attention_mask, + _, + _, + ) = self._merge_input_ids_with_speech_features( + speech_features, prompt_embeds, input_ids, attention_mask + ) + else: + merged_prompt_inputs_embeds = prompt_embeds + merged_prompt_attention_mask = attention_mask # --- 2. Generate Text using LLM --- # Use merged embeds/mask as input to generate From 49256fa91751868cc2ea022e12efbc4a161ae99b Mon Sep 17 00:00:00 2001 From: root Date: Wed, 28 May 2025 02:34:07 +0000 Subject: [PATCH 53/57] fix tts stage decode --- egs/speech_llm/SPEECH2SPEECH/exp.sh | 12 +- .../SPEECH2SPEECH/qwen_omni/decode.py | 15 +- .../SPEECH2SPEECH/qwen_omni/decode_tts.py | 64 ++++-- .../SPEECH2SPEECH/qwen_omni/utils.py | 206 +++++++++++++++++- 4 files changed, 252 insertions(+), 45 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/exp.sh b/egs/speech_llm/SPEECH2SPEECH/exp.sh index 2e8085fe7..03461b97b 100644 --- a/egs/speech_llm/SPEECH2SPEECH/exp.sh +++ b/egs/speech_llm/SPEECH2SPEECH/exp.sh @@ -2,8 +2,8 @@ # fix segmentation fault reported in https://github.com/k2-fsa/icefall/issues/674 export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python - - +export PYTHONPATH=$PYTHONPATH:/workspace/CosyVoice +export HF_HOME="/lustre/fsw/general_sa/yuekaiz/.cache/huggingface" set -eou pipefail stage=$1 @@ -121,7 +121,6 @@ if [ $stage -le 18 ] && [ $stop_stage -ge 18 ]; then $train_cmd_args fi -export HF_HOME="/lustre/fsw/general_sa/yuekaiz/.cache/huggingface" if [ $stage -le 19 ] && [ $stop_stage -ge 19 ]; then log "stage 19: Training TTS Model" exp_dir=./qwen_omni/exp_tts @@ -218,16 +217,17 @@ if [ $stage -le 20 ] && [ $stop_stage -ge 20 ]; then $train_cmd_args fi + if [ $stage -le 21 ] && [ $stop_stage -ge 21 ]; then log "stage 21: TTS Decoding Test Set" exp_dir=./qwen_omni/exp_tts - torchrun --nproc_per_node=4 python3 ./qwen_omni/decode_tts.py \ + torchrun --nproc_per_node=2 ./qwen_omni/decode_tts.py \ --exp-dir $exp_dir \ --speech-encoder-path-or-name models/large-v2.pt \ --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ --pretrained-model-path $exp_dir/checkpoint-32001/pytorch_model.bin \ --use-flash-attn True \ --enable-speech-output True \ - --token2wav-path /lustre/fsw/general_sa/yuekaiz/s2s/CosyVoice2-0.5B \ + --token2wav-path /workspace/CosyVoice2-0.5B \ --use-lora True -fi \ No newline at end of file +fi diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py index 43f6e95b3..8e915cf26 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode.py @@ -63,16 +63,9 @@ from model import SPEECH_LLM, EncoderProjector from peft import LoraConfig, get_peft_model from train import DEFAULT_SPEECH_TOKEN, add_model_arguments from transformers import AutoModelForCausalLM, AutoTokenizer, Qwen2Config +from utils import AttributeDict, setup_logger, store_transcripts, write_error_stats from whisper_encoder_forward_monkey_patch import replace_whisper_encoder_forward -from icefall.env import get_env_info -from icefall.utils import ( - AttributeDict, - setup_logger, - store_transcripts, - write_error_stats, -) - sys.path.append("/workspace/CosyVoice/third_party/Matcha-TTS") @@ -418,11 +411,7 @@ def get_parser(): def get_params() -> AttributeDict: - params = AttributeDict( - { - "env_info": get_env_info(), - } - ) + params = AttributeDict({}) return params diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode_tts.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode_tts.py index 3dcb9d7fe..c9383232c 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode_tts.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/decode_tts.py @@ -40,36 +40,34 @@ import copy import logging import os import random +import sys import warnings from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union +import soundfile as sf import torch import torch.multiprocessing as mp import torch.nn as nn import transformers -from datasets import load_dataset - -from deepspeed.utils.zero_to_fp32 import convert_zero_checkpoint_to_fp32_state_dict +from cosyvoice.cli.cosyvoice import CosyVoice2 +from datasets import Audio, load_dataset +from decode import audio_decode_cosyvoice2 from label_smoothing import LabelSmoothingLoss - from lhotse.utils import fix_random_seed from model import IGNORE_TOKEN_ID, SPEECH_LLM from peft import LoraConfig, get_peft_model from torch import Tensor +from torch.utils.data import DataLoader, DistributedSampler from torch.utils.tensorboard import SummaryWriter +from train import add_model_arguments, add_training_arguments, get_model, get_params from transformers import ( AutoModelForCausalLM, AutoTokenizer, Qwen2Config, Qwen2ForCausalLM, ) -from torchdata.stateful_dataloader import StatefulDataLoader -from torch.utils.data import DistributedSampler, DataLoader - -from train import add_model_arguments, add_training_arguments, get_params, get_model -from decode import audio_decode_cosyvoice2 from utils import ( # filter_uneven_sized_batch, AttributeDict, MetricsTracker, @@ -79,9 +77,9 @@ from utils import ( # filter_uneven_sized_batch, setup_logger, str2bool, ) -from cosyvoice.cli.cosyvoice import CosyVoice2 -sys.path.append("/lustre/fsw/general_sa/yuekaiz/s2s/CosyVoice/third_party/Matcha-TTS") +# sys.path.append("/lustre/fsw/general_sa/yuekaiz/s2s/CosyVoice/third_party/Matcha-TTS") +sys.path.append("/workspace/CosyVoice/third_party/Matcha-TTS") DEFAULT_SPEECH_TOKEN = "" try: torch.multiprocessing.set_start_method("spawn") @@ -116,9 +114,11 @@ def get_parser(): ) add_model_arguments(parser) + add_training_arguments(parser) return parser - + + def preprocess( messages, tokenizer: transformers.PreTrainedTokenizer, @@ -177,30 +177,41 @@ def preprocess( attention_mask = input_ids.ne(tokenizer.pad_token_id) return input_ids, attention_mask, target_ids + def data_collator(batch): - prompt_texts, prompt_speech_16k, messages, ids = [], [], [], [] + prompt_texts, prompt_speech_16k, messages, ids, target_texts = [], [], [], [], [] for i, item in enumerate(batch): # speech_tokens.append(item["prompt_audio_cosy2_tokens"]) message_list_item = [] message_list_item += [ - {"role": "user", "content": f"Generate a speech from the following text:\n\n{item['target_text']}{DEFAULT_SPEECH_TOKEN}"}, + { + "role": "user", + "content": f"Generate a speech from the following text:\n\n{item['target_text']}{DEFAULT_SPEECH_TOKEN}", + }, {"role": "assistant", "content": ""}, ] messages.append(message_list_item) + target_texts.append(item["target_text"]) ids.append(item["id"]) prompt_texts.append(item["prompt_text"]) - prompt_speech_16k.append(item["prompt_audio"]) - print(item["prompt_audio"], 233333333333333333) + speech_org = item["prompt_audio"] + speech_org = torch.tensor(speech_org["array"], dtype=torch.float32).unsqueeze(0) + speech_org = speech_org.mean(dim=0, keepdim=True) + prompt_speech_16k.append(speech_org) + + # resample to 16k return { "prompt_texts": prompt_texts, + "target_texts": target_texts, "prompt_speech_16k": prompt_speech_16k, "messages": messages, "ids": ids, } + def run(rank, world_size, args): """ Args: @@ -215,7 +226,7 @@ def run(rank, world_size, args): """ params = get_params() params.update(vars(args)) - params.log_dir = Path(params.exp_dir) / f"log-results-wav" + params.log_dir = Path(params.exp_dir) / "log-results-wav" params.log_dir.mkdir(parents=True, exist_ok=True) fix_random_seed(params.seed) @@ -232,11 +243,9 @@ def run(rank, world_size, args): logging.info(f"Device: {device}") model.to(device) - assert params.deepspeed and world_size > 1 - logging.info("Using DeepSpeed") - dataset = load_dataset("yuekai/seed_tts_cosy2", split=params.split_name) - + dataset = dataset.cast_column("prompt_audio", Audio(sampling_rate=16000)) + sampler = DistributedSampler(dataset, num_replicas=world_size, rank=rank) data_loader = DataLoader( dataset, @@ -245,7 +254,7 @@ def run(rank, world_size, args): shuffle=False, num_workers=1, prefetch_factor=1, - collate_fn=data_collator + collate_fn=data_collator, ) token2wav_model = CosyVoice2( params.token2wav_path, load_jit=False, load_trt=False, fp16=False @@ -254,6 +263,7 @@ def run(rank, world_size, args): messages = batch["messages"] prompt_texts = batch["prompt_texts"] prompt_speech_16k = batch["prompt_speech_16k"] + target_texts = batch["target_texts"] ids = batch["ids"] input_ids, attention_mask, _ = preprocess(messages, tokenizer) generated_ids, generated_speech_output = model.decode_with_speech_output( @@ -262,8 +272,13 @@ def run(rank, world_size, args): generated_speech_output = [ generated_speech_output ] # WAR: only support batch = 1 for now - for cut_id, audio_tokens, prompt_text, prompt_speech in zip(ids, generated_speech_output, prompt_texts, prompt_speech_16k): + for cut_id, audio_tokens, prompt_text, prompt_speech, target_text in zip( + ids, generated_speech_output, prompt_texts, prompt_speech_16k, target_texts + ): speech_file_name = params.log_dir / f"{cut_id}.wav" + # save target_text to file + with open(params.log_dir / f"{cut_id}.txt", "w") as f: + f.write(f"{target_text}\n") audio_tokens = torch.tensor(audio_tokens, dtype=torch.int32).unsqueeze(0) if "CosyVoice2" in params.token2wav_path: audio_hat = audio_decode_cosyvoice2( @@ -276,6 +291,7 @@ def run(rank, world_size, args): logging.info("Done!") + def main(): parser = get_parser() args = parser.parse_args() @@ -285,7 +301,7 @@ def main(): rank = get_rank() torch.set_num_threads(1) - torch.set_num_interop_threads(1) + # torch.set_num_interop_threads(1) warnings.filterwarnings("ignore", category=FutureWarning) run(rank=rank, world_size=world_size, args=args) diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py index f245712a8..0ebaa6eb4 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py @@ -11,15 +11,16 @@ from collections import defaultdict from dataclasses import dataclass from datetime import datetime from pathlib import Path - from typing import Dict, Iterable, List, Optional, TextIO, Tuple, Union +import kaldialign import torch import torch.distributed as dist from torch.utils.tensorboard import SummaryWriter Pathlike = Union[str, Path] + def get_world_size(): if "WORLD_SIZE" in os.environ: return int(os.environ["WORLD_SIZE"]) @@ -37,6 +38,7 @@ def get_rank(): else: return 0 + def get_local_rank(): if "LOCAL_RANK" in os.environ: return int(os.environ["LOCAL_RANK"]) @@ -45,6 +47,7 @@ def get_local_rank(): else: return 0 + def str2bool(v): """Used in argparse.ArgumentParser.add_argument to indicate that a type is a bool type and user can enter @@ -63,6 +66,7 @@ def str2bool(v): else: raise argparse.ArgumentTypeError("Boolean value expected.") + class AttributeDict(dict): def __getattr__(self, key): if key in self: @@ -87,6 +91,7 @@ class AttributeDict(dict): tmp[k] = v return json.dumps(tmp, indent=indent, sort_keys=True) + def setup_logger( log_filename: Pathlike, log_level: str = "info", @@ -139,6 +144,7 @@ def setup_logger( console.setFormatter(logging.Formatter(formatter)) logging.getLogger("").addHandler(console) + class MetricsTracker(collections.defaultdict): def __init__(self): # Passing the type 'int' to the base-class constructor @@ -228,4 +234,200 @@ class MetricsTracker(collections.defaultdict): batch_idx: The current batch index, used as the x-axis of the plot. """ for k, v in self.norm_items(): - tb_writer.add_scalar(prefix + k, v, batch_idx) \ No newline at end of file + tb_writer.add_scalar(prefix + k, v, batch_idx) + + +def store_transcripts( + filename: Pathlike, texts: Iterable[Tuple[str, str, str]], char_level: bool = False +) -> None: + """Save predicted results and reference transcripts to a file. + + Args: + filename: + File to save the results to. + texts: + An iterable of tuples. The first element is the cur_id, the second is + the reference transcript and the third element is the predicted result. + If it is a multi-talker ASR system, the ref and hyp may also be lists of + strings. + Returns: + Return None. + """ + with open(filename, "w", encoding="utf8") as f: + for cut_id, ref, hyp in texts: + if char_level: + ref = list("".join(ref)) + hyp = list("".join(hyp)) + print(f"{cut_id}:\tref={ref}", file=f) + print(f"{cut_id}:\thyp={hyp}", file=f) + + +def write_error_stats( + f: TextIO, + test_set_name: str, + results: List[Tuple[str, str]], + enable_log: bool = True, + compute_CER: bool = False, + sclite_mode: bool = False, +) -> float: + """Write statistics based on predicted results and reference transcripts. + + It will write the following to the given file: + + - WER + - number of insertions, deletions, substitutions, corrects and total + reference words. For example:: + + Errors: 23 insertions, 57 deletions, 212 substitutions, over 2606 + reference words (2337 correct) + + - The difference between the reference transcript and predicted result. + An instance is given below:: + + THE ASSOCIATION OF (EDISON->ADDISON) ILLUMINATING COMPANIES + + The above example shows that the reference word is `EDISON`, + but it is predicted to `ADDISON` (a substitution error). + + Another example is:: + + FOR THE FIRST DAY (SIR->*) I THINK + + The reference word `SIR` is missing in the predicted + results (a deletion error). + results: + An iterable of tuples. The first element is the cut_id, the second is + the reference transcript and the third element is the predicted result. + enable_log: + If True, also print detailed WER to the console. + Otherwise, it is written only to the given file. + Returns: + Return None. + """ + subs: Dict[Tuple[str, str], int] = defaultdict(int) + ins: Dict[str, int] = defaultdict(int) + dels: Dict[str, int] = defaultdict(int) + + # `words` stores counts per word, as follows: + # corr, ref_sub, hyp_sub, ins, dels + words: Dict[str, List[int]] = defaultdict(lambda: [0, 0, 0, 0, 0]) + num_corr = 0 + ERR = "*" + + if compute_CER: + for i, res in enumerate(results): + cut_id, ref, hyp = res + ref = list("".join(ref)) + hyp = list("".join(hyp)) + results[i] = (cut_id, ref, hyp) + + for cut_id, ref, hyp in results: + ali = kaldialign.align(ref, hyp, ERR, sclite_mode=sclite_mode) + for ref_word, hyp_word in ali: + if ref_word == ERR: + ins[hyp_word] += 1 + words[hyp_word][3] += 1 + elif hyp_word == ERR: + dels[ref_word] += 1 + words[ref_word][4] += 1 + elif hyp_word != ref_word: + subs[(ref_word, hyp_word)] += 1 + words[ref_word][1] += 1 + words[hyp_word][2] += 1 + else: + words[ref_word][0] += 1 + num_corr += 1 + ref_len = sum([len(r) for _, r, _ in results]) + sub_errs = sum(subs.values()) + ins_errs = sum(ins.values()) + del_errs = sum(dels.values()) + tot_errs = sub_errs + ins_errs + del_errs + tot_err_rate = "%.2f" % (100.0 * tot_errs / ref_len) + + if enable_log: + logging.info( + f"[{test_set_name}] %WER {tot_errs / ref_len:.2%} " + f"[{tot_errs} / {ref_len}, {ins_errs} ins, " + f"{del_errs} del, {sub_errs} sub ]" + ) + + print(f"%WER = {tot_err_rate}", file=f) + print( + f"Errors: {ins_errs} insertions, {del_errs} deletions, " + f"{sub_errs} substitutions, over {ref_len} reference " + f"words ({num_corr} correct)", + file=f, + ) + print( + "Search below for sections starting with PER-UTT DETAILS:, " + "SUBSTITUTIONS:, DELETIONS:, INSERTIONS:, PER-WORD STATS:", + file=f, + ) + + print("", file=f) + print("PER-UTT DETAILS: corr or (ref->hyp) ", file=f) + for cut_id, ref, hyp in results: + ali = kaldialign.align(ref, hyp, ERR) + combine_successive_errors = True + if combine_successive_errors: + ali = [[[x], [y]] for x, y in ali] + for i in range(len(ali) - 1): + if ali[i][0] != ali[i][1] and ali[i + 1][0] != ali[i + 1][1]: + ali[i + 1][0] = ali[i][0] + ali[i + 1][0] + ali[i + 1][1] = ali[i][1] + ali[i + 1][1] + ali[i] = [[], []] + ali = [ + [ + list(filter(lambda a: a != ERR, x)), + list(filter(lambda a: a != ERR, y)), + ] + for x, y in ali + ] + ali = list(filter(lambda x: x != [[], []], ali)) + ali = [ + [ + ERR if x == [] else " ".join(x), + ERR if y == [] else " ".join(y), + ] + for x, y in ali + ] + + print( + f"{cut_id}:\t" + + " ".join( + ( + ref_word if ref_word == hyp_word else f"({ref_word}->{hyp_word})" + for ref_word, hyp_word in ali + ) + ), + file=f, + ) + + print("", file=f) + print("SUBSTITUTIONS: count ref -> hyp", file=f) + + for count, (ref, hyp) in sorted([(v, k) for k, v in subs.items()], reverse=True): + print(f"{count} {ref} -> {hyp}", file=f) + + print("", file=f) + print("DELETIONS: count ref", file=f) + for count, ref in sorted([(v, k) for k, v in dels.items()], reverse=True): + print(f"{count} {ref}", file=f) + + print("", file=f) + print("INSERTIONS: count hyp", file=f) + for count, hyp in sorted([(v, k) for k, v in ins.items()], reverse=True): + print(f"{count} {hyp}", file=f) + + print("", file=f) + print("PER-WORD STATS: word corr tot_errs count_in_ref count_in_hyp", file=f) + for _, word, counts in sorted( + [(sum(v[1:]), k, v) for k, v in words.items()], reverse=True + ): + (corr, ref_sub, hyp_sub, ins, dels) = counts + tot_errs = ref_sub + hyp_sub + ins + dels + ref_count = corr + ref_sub + dels + hyp_count = corr + hyp_sub + ins + + print(f"{word} {corr} {tot_errs} {ref_count} {hyp_count}", file=f) + return float(tot_err_rate) From 4c0396f8f2004d89b5168811e836d870521f08d7 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 2 Jun 2025 23:16:03 -0700 Subject: [PATCH 54/57] support text2speech ultrachat --- egs/speech_llm/SPEECH2SPEECH/exp.sh | 129 +++++++++--------- .../SPEECH2SPEECH/qwen_omni/model.py | 10 +- .../SPEECH2SPEECH/qwen_omni/train_tts.py | 121 ++++++++++++++-- .../SPEECH2SPEECH/qwen_omni/utils.py | 46 ++++++- 4 files changed, 224 insertions(+), 82 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/exp.sh b/egs/speech_llm/SPEECH2SPEECH/exp.sh index 03461b97b..26b2c8745 100644 --- a/egs/speech_llm/SPEECH2SPEECH/exp.sh +++ b/egs/speech_llm/SPEECH2SPEECH/exp.sh @@ -3,7 +3,7 @@ # fix segmentation fault reported in https://github.com/k2-fsa/icefall/issues/674 export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python export PYTHONPATH=$PYTHONPATH:/workspace/CosyVoice -export HF_HOME="/lustre/fsw/general_sa/yuekaiz/.cache/huggingface" +# export HF_HOME="/lustre/fsw/general_sa/yuekaiz/.cache/huggingface" set -eou pipefail stage=$1 @@ -123,7 +123,9 @@ fi if [ $stage -le 19 ] && [ $stop_stage -ge 19 ]; then log "stage 19: Training TTS Model" - exp_dir=./qwen_omni/exp_tts + exp_dir=./qwen_omni/exp_tts_ultra_chat_voice_assistant + exp_dir=./qwen_omni/exp_tts_emilia_en_tts_only_template + exp_dir=./qwen_omni/exp_tts_emilia_en_tts_three_concat pretrained_dir=./qwen_omni/exp_speech2text ngpu=4 @@ -141,17 +143,16 @@ if [ $stage -le 19 ] && [ $stop_stage -ge 19 ]; then fi done fi - - train_cmd_args="--batch-size 64 \ + # --dataset ultra_chat_voice_assistant + train_cmd_args="--batch-size 30 \ --exp-dir $exp_dir \ - --last-stage-model-path $pretrained_dir/checkpoint-58548/pytorch_model.bin \ --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ --enable-speech-input False \ --deepspeed \ - --dataset /lustre/fsw/general_sa/yuekaiz/s2s/emilia_en \ + --dataset /lustre/fsw/general_sa/yuekaiz/s2s/VoxBox/manifests_emilia_en \ --deepspeed_config ./qwen_omni/ds_config_zero1.json \ --use-flash-attn True \ - --num-epochs 2 \ + --num-epochs 3 \ --use-lora False --unfreeze-llm False --enable-speech-output True" if [ "$latest_checkpoint_step" -ge 0 ]; then @@ -168,66 +169,66 @@ if [ $stage -le 19 ] && [ $stop_stage -ge 19 ]; then fi -if [ $stage -le 20 ] && [ $stop_stage -ge 20 ]; then - log "stage 20: Training TTS Model" - echo "cd /workspace && ln -s /lustre/fsw/general_sa/yuekaiz/s2s slam && cd -" - if [ ! -L "/workspace/slam" ]; then - cd /workspace && ln -s /lustre/fsw/general_sa/yuekaiz/s2s slam && cd - - fi - exp_dir=./qwen_omni/exp_test - ngpu=4 +# if [ $stage -le 20 ] && [ $stop_stage -ge 20 ]; then +# log "stage 20: Training TTS Model" +# echo "cd /workspace && ln -s /lustre/fsw/general_sa/yuekaiz/s2s slam && cd -" +# if [ ! -L "/workspace/slam" ]; then +# cd /workspace && ln -s /lustre/fsw/general_sa/yuekaiz/s2s slam && cd - +# fi +# exp_dir=./qwen_omni/exp_test +# ngpu=4 - latest_checkpoint_step=-1 - # Check if exp_dir exists and is a directory - if [ -d "$exp_dir" ]; then - # List directories matching checkpoint-* and find the one with the largest step number - for checkpoint_dir in $(ls -d $exp_dir/checkpoint-*/ 2>/dev/null | sort -V); do - checkpoint_name=$(basename "$checkpoint_dir") # e.g., checkpoint-1000 - # Extract step number using parameter expansion - current_step=${checkpoint_name#checkpoint-} - # Ensure current_step is a number - if [[ "$current_step" =~ ^[0-9]+$ ]] && [ "$current_step" -gt "$latest_checkpoint_step" ]; then - latest_checkpoint_step=$current_step - fi - done - fi +# latest_checkpoint_step=-1 +# # Check if exp_dir exists and is a directory +# if [ -d "$exp_dir" ]; then +# # List directories matching checkpoint-* and find the one with the largest step number +# for checkpoint_dir in $(ls -d $exp_dir/checkpoint-*/ 2>/dev/null | sort -V); do +# checkpoint_name=$(basename "$checkpoint_dir") # e.g., checkpoint-1000 +# # Extract step number using parameter expansion +# current_step=${checkpoint_name#checkpoint-} +# # Ensure current_step is a number +# if [[ "$current_step" =~ ^[0-9]+$ ]] && [ "$current_step" -gt "$latest_checkpoint_step" ]; then +# latest_checkpoint_step=$current_step +# fi +# done +# fi - train_cmd_args="--max-duration 150 \ - --enable-musan False \ - --exp-dir $exp_dir \ - --speech-encoder-path-or-name models/large-v2.pt \ - --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ - --dataset vocalnet_ultrachat_voiceassistant \ - --manifest-dir data/fbank \ - --deepspeed \ - --deepspeed_config ./qwen_omni/ds_config_zero1.json \ - --use-flash-attn True --on-the-fly-feats True \ - --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True" +# train_cmd_args="--max-duration 150 \ +# --enable-musan False \ +# --exp-dir $exp_dir \ +# --speech-encoder-path-or-name models/large-v2.pt \ +# --llm-path-or-name Qwen/Qwen2.5-0.5B-Instruct \ +# --dataset vocalnet_ultrachat_voiceassistant \ +# --manifest-dir data/fbank \ +# --deepspeed \ +# --deepspeed_config ./qwen_omni/ds_config_zero1.json \ +# --use-flash-attn True --on-the-fly-feats True \ +# --use-lora True --unfreeze-llm True --unfreeze-speech-projector True --enable-speech-output True" - if [ "$latest_checkpoint_step" -ge 0 ]; then - log "Continuing training from checkpoint-$latest_checkpoint_step" - step=$latest_checkpoint_step - train_cmd_args="$train_cmd_args --pretrained-model-path $exp_dir/checkpoint-${step}/pytorch_model.bin --sampler-state-dict-path $exp_dir/checkpoint-${step}/sampler.pt" - else - log "Starting training from scratch as no checkpoint was found in $exp_dir" - # No pretrained model or sampler state dict needed for the first run - fi +# if [ "$latest_checkpoint_step" -ge 0 ]; then +# log "Continuing training from checkpoint-$latest_checkpoint_step" +# step=$latest_checkpoint_step +# train_cmd_args="$train_cmd_args --pretrained-model-path $exp_dir/checkpoint-${step}/pytorch_model.bin --sampler-state-dict-path $exp_dir/checkpoint-${step}/sampler.pt" +# else +# log "Starting training from scratch as no checkpoint was found in $exp_dir" +# # No pretrained model or sampler state dict needed for the first run +# fi - torchrun --nproc_per_node $ngpu --nnodes $SLURM_JOB_NUM_NODES --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d --rdzv_id $SLURM_JOBID ./qwen_omni/train.py \ - $train_cmd_args -fi +# torchrun --nproc_per_node $ngpu --nnodes $SLURM_JOB_NUM_NODES --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d --rdzv_id $SLURM_JOBID ./qwen_omni/train.py \ +# $train_cmd_args +# fi -if [ $stage -le 21 ] && [ $stop_stage -ge 21 ]; then - log "stage 21: TTS Decoding Test Set" - exp_dir=./qwen_omni/exp_tts - torchrun --nproc_per_node=2 ./qwen_omni/decode_tts.py \ - --exp-dir $exp_dir \ - --speech-encoder-path-or-name models/large-v2.pt \ - --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ - --pretrained-model-path $exp_dir/checkpoint-32001/pytorch_model.bin \ - --use-flash-attn True \ - --enable-speech-output True \ - --token2wav-path /workspace/CosyVoice2-0.5B \ - --use-lora True -fi +# if [ $stage -le 21 ] && [ $stop_stage -ge 21 ]; then +# log "stage 21: TTS Decoding Test Set" +# exp_dir=./qwen_omni/exp_tts +# torchrun --nproc_per_node=2 ./qwen_omni/decode_tts.py \ +# --exp-dir $exp_dir \ +# --speech-encoder-path-or-name models/large-v2.pt \ +# --llm-path-or-name models/Qwen2.5-0.5B-Instruct \ +# --pretrained-model-path $exp_dir/checkpoint-32001/pytorch_model.bin \ +# --use-flash-attn True \ +# --enable-speech-output True \ +# --token2wav-path /workspace/CosyVoice2-0.5B \ +# --use-lora True +# fi diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py index 3def803b5..baec602bb 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/model.py @@ -437,7 +437,8 @@ class SPEECH_LLM(nn.Module): audio_attention_mask = audio_codes.ne(self.codec_lm.config.pad_token_id) audio_embeddings = self.codec_lm.get_input_embeddings()(audio_codes) - text_last_hidden_lists, text_embeds_list, text_input_embeds_list = [], [], [] + # text_last_hidden_lists, text_embeds_list, text_input_embeds_list = [], [], [] + text_input_embeds_list = [] for i in range(len(text_label_start_index_list)): text_last_hidden = model_outputs.hidden_states[-1][ i, @@ -445,14 +446,14 @@ class SPEECH_LLM(nn.Module): + input_seq_len[i] - 1, ] - text_last_hidden_lists.append(text_last_hidden) + # text_last_hidden_lists.append(text_last_hidden) text_embed = inputs_embeds[ i, text_input_start_index_list[i] + 1 : text_input_start_index_list[i] + input_seq_len[i], ] # exclude bos - text_embeds_list.append(text_embed) + # text_embeds_list.append(text_embed) text_input_embeds = torch.cat( [ @@ -480,8 +481,9 @@ class SPEECH_LLM(nn.Module): ), f"start_idx: {start_idx}, start_idx_re_compute: {start_idx_re_compute}" if text_input_embeds.shape[0] > audio_embeddings.shape[1] - start_idx: logging.warning( - f"Truncate text_input_embeds: {text_input_embeds.shape} to {audio_embeddings.shape[1] - start_idx}" + f"Truncate text_input_embeds: {text_input_embeds.shape} to {audio_embeddings.shape[1] - start_idx}\naudio_codes_lens: {audio_codes_lens[i]}\ninput_question_len_list: {input_question_len_list[i]}\ninput_seq_len: {input_seq_len[i]}\n" ) + # breakpoint() text_input_embeds = text_input_embeds[ : audio_embeddings.shape[1] - start_idx ] diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train_tts.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train_tts.py index 38132e71e..6d35ed6a9 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train_tts.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train_tts.py @@ -68,6 +68,7 @@ from transformers import ( ) from torchdata.stateful_dataloader import StatefulDataLoader from torch.utils.data import DistributedSampler, DataLoader +from pathlib import Path from train import add_model_arguments, add_training_arguments, get_params, get_model from utils import ( # filter_uneven_sized_batch, @@ -171,11 +172,14 @@ def data_collator(batch): {"role": "user", "content": f"Generate a speech from the following text:\n\n{item['text']}{DEFAULT_SPEECH_TOKEN}"}, {"role": "assistant", "content": item["text"]}, ] + # message_list_item += [ + # {"role": "user", "content": f"TTS{DEFAULT_SPEECH_TOKEN}"}, + # {"role": "assistant", "content": item["text"]}, + # ] messages.append(message_list_item) durations.append(item["duration"]) - ids.append(item["id"]) + ids.append(item["index"] if "index" in item else item["id"]) lang.append(item["language"]) - dnsmos.append(item["dnsmos"]) return { "speech_tokens": speech_tokens, @@ -183,7 +187,92 @@ def data_collator(batch): "durations": durations, "ids": ids, "lang": lang, - "dnsmos": dnsmos, + } + +def data_collator_concate_items(batch, concat_items_num: int = 3): + """Concatenate *concat_items_num* consecutive dataset items into one. + + The function groups the incoming ``batch`` (a list of dataset items) + into non-overlapping chunks of *concat_items_num*. For each group it + concatenates the textual fields and speech codec tokens so that the + model generates one longer utterance instead of several short ones. + + Any remainder (when ``len(batch)`` is not divisible by + *concat_items_num*) is also kept as a smaller group. + """ + + grouped_speech_tokens, grouped_messages, grouped_durations = [], [], [] + grouped_ids, grouped_lang = [], [] + + # Iterate over the batch in strides of *concat_items_num* + for start_idx in range(0, len(batch), concat_items_num): + group = batch[start_idx : start_idx + concat_items_num] + if not group: + continue + + # 1) Speech tokens -------------------------------------------------- + # ``item['code']`` can be a list[int] or a 1-D tensor. Use the first + # element to decide how to concatenate. + first_code = group[0]["code"] + if isinstance(first_code, torch.Tensor): + concat_code = torch.cat([item["code"] for item in group], dim=0) + else: + # assume list / iterable of ints + concat_code = [] + for item in group: + concat_code.extend(item["code"]) + + # 2) Text ----------------------------------------------------------- + concat_text = "".join([item["text"] for item in group]) + + # 3) Build chat template messages ----------------------------------- + message_list_item = [ + { + "role": "user", + "content": f"Generate a speech from the following text:\n\n{concat_text}{DEFAULT_SPEECH_TOKEN}", + }, + {"role": "assistant", "content": concat_text}, + ] + + # 4) Misc meta fields ---------------------------------------------- + total_duration = sum(item["duration"] for item in group) + group_ids = [item.get("index", item.get("id")) for item in group] + language = group[0].get("language", "") + + # 5) Append to output lists ---------------------------------------- + grouped_speech_tokens.append(concat_code) + grouped_messages.append(message_list_item) + grouped_durations.append(total_duration) + grouped_ids.append(group_ids) + grouped_lang.append(language) + + return { + "speech_tokens": grouped_speech_tokens, + "messages": grouped_messages, + "durations": grouped_durations, + "ids": grouped_ids, + "lang": grouped_lang, + } + +def data_collator_ultra_chat(batch): + speech_tokens, messages, durations, ids, lang, dnsmos = [], [], [], [], [], [] + for i, item in enumerate(batch): + speech_tokens.append(item["custom"]["speech_token"]) + text = item["supervisions"][0]["text"] + message_list_item = [] + message_list_item += [ + {"role": "user", "content": f"Generate a speech from the following text:\n\n{text}{DEFAULT_SPEECH_TOKEN}"}, + {"role": "assistant", "content": text}, + ] + messages.append(message_list_item) + durations.append(item["duration"]) + ids.append(item["id"]) + + return { + "speech_tokens": speech_tokens, + "messages": messages, + "durations": durations, + "ids": ids, } def compute_loss( @@ -470,13 +559,21 @@ def run(rank, world_size, args): sampler_state_dict = None if params.sampler_state_dict_path: sampler_state_dict = torch.load(params.sampler_state_dict_path) - # print(params.dataset) - ds = load_dataset(params.dataset, split="train") - # shuffle the dataset - ds = ds.shuffle(seed=42) - train_test_split = ds.train_test_split(test_size=1000, seed=42) - train_dataset, eval_dataset = train_test_split["train"], train_test_split["test"] - # train_dataset, eval_dataset = train_test_split["test"], train_test_split["test"] + if params.dataset == "ultra_chat_voice_assistant": + data_dir = "data/fbank" + json_file_lists = ["data/fbank/cuts_voice_assistant_00001-00049.jsonl", "data/fbank/cuts_ultrachat_train.jsonl.gz"] + ds = load_dataset("json", data_files=json_file_lists, split="train") + # shuffle the dataset + train_dataset = ds.shuffle(seed=42) + eval_dataset = load_dataset("json", data_files=["data/fbank/cuts_voice_assistant.00000.jsonl"], split="train") + else: + data_dir = Path(params.dataset) + json_file_lists = [str(file) for file in data_dir.glob("*.jsonl")] + ds = load_dataset("json", data_files=json_file_lists, split="train") + # shuffle the dataset + ds = ds.shuffle(seed=42) + train_test_split = ds.train_test_split(test_size=1000, seed=42) + train_dataset, eval_dataset = train_test_split["train"], train_test_split["test"] sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank) train_dl = StatefulDataLoader( @@ -486,7 +583,7 @@ def run(rank, world_size, args): shuffle=False, num_workers=4, prefetch_factor=2, - collate_fn=data_collator + collate_fn=data_collator_ultra_chat if params.dataset == "ultra_chat_voice_assistant" else data_collator ) train_dl.load_state_dict(sampler_state_dict) valid_sampler = DistributedSampler(eval_dataset, num_replicas=world_size, rank=rank) @@ -497,7 +594,7 @@ def run(rank, world_size, args): shuffle=False, num_workers=1, prefetch_factor=1, - collate_fn=data_collator + collate_fn=data_collator_ultra_chat if params.dataset == "ultra_chat_voice_assistant" else data_collator ) if args.tensorboard and rank == 0: diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py index 0ebaa6eb4..81f7c0d5c 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py @@ -12,12 +12,12 @@ from dataclasses import dataclass from datetime import datetime from pathlib import Path from typing import Dict, Iterable, List, Optional, TextIO, Tuple, Union - +from tqdm import tqdm import kaldialign import torch import torch.distributed as dist from torch.utils.tensorboard import SummaryWriter - +import numpy as np Pathlike = Union[str, Path] @@ -431,3 +431,45 @@ def write_error_stats( print(f"{word} {corr} {tot_errs} {ref_count} {hyp_count}", file=f) return float(tot_err_rate) + + +def get_dataset_statistics(dataset, save_filename): + speech_token_lengths = [] + text_lengths = [] + for item in tqdm(dataset): + if 'custom' not in item: + speech_token = item["code"] + text = item["text"] + else: + speech_token = item["custom"]["speech_token"] + text = item["supervisions"][0]["text"] + speech_token_lengths.append(len(speech_token)) + text_lengths.append(len(text)) + speech_token_length_array = np.array(speech_token_lengths) + text_length_array = np.array(text_lengths) + # 计算并存储统计指标 + def get_length_stats(lengths_array): + length_stats = [] + length_stats.append(["count", f"{len(lengths_array)}"]) # 总数 + length_stats.append(["mean", f"{np.mean(lengths_array):.1f}"]) + length_stats.append(["std", f"{np.std(lengths_array):.1f}"]) + length_stats.append(["min", f"{np.min(lengths_array):.1f}"]) + length_stats.append(["25%", f"{np.percentile(lengths_array, 25):.1f}"]) + length_stats.append(["50% (median)", f"{np.median(lengths_array):.1f}"]) # median 和 50% percentile 是一样的 + length_stats.append(["75%", f"{np.percentile(lengths_array, 75):.1f}"]) + length_stats.append(["99%", f"{np.percentile(lengths_array, 99):.1f}"]) + length_stats.append(["99.5%", f"{np.percentile(lengths_array, 99.5):.1f}"]) + length_stats.append(["99.9%", f"{np.percentile(lengths_array, 99.9):.1f}"]) + length_stats.append(["max", f"{np.max(lengths_array):.1f}"]) + return length_stats + speech_length_stats = get_length_stats(speech_token_length_array) + text_length_stats = get_length_stats(text_length_array) + with open(save_filename, "w") as f: + print("speech_tokens 长度统计指标:", file=f) + for stat_name, stat_value in speech_length_stats: + print(f"{stat_name:<15}: {stat_value}", file=f) + print("\ntext 长度统计指标:", file=f) + for stat_name, stat_value in text_length_stats: + print(f"{stat_name:<15}: {stat_value}", file=f) + + return speech_token_lengths, text_lengths From 5becf6927dbea6db288a6091ae47093455708bf6 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 3 Jun 2025 00:18:21 -0700 Subject: [PATCH 55/57] remove concat three items --- .../SPEECH2SPEECH/qwen_omni/train_tts.py | 67 +------------------ 1 file changed, 1 insertion(+), 66 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train_tts.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train_tts.py index 6d35ed6a9..e505c0700 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train_tts.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train_tts.py @@ -189,71 +189,6 @@ def data_collator(batch): "lang": lang, } -def data_collator_concate_items(batch, concat_items_num: int = 3): - """Concatenate *concat_items_num* consecutive dataset items into one. - - The function groups the incoming ``batch`` (a list of dataset items) - into non-overlapping chunks of *concat_items_num*. For each group it - concatenates the textual fields and speech codec tokens so that the - model generates one longer utterance instead of several short ones. - - Any remainder (when ``len(batch)`` is not divisible by - *concat_items_num*) is also kept as a smaller group. - """ - - grouped_speech_tokens, grouped_messages, grouped_durations = [], [], [] - grouped_ids, grouped_lang = [], [] - - # Iterate over the batch in strides of *concat_items_num* - for start_idx in range(0, len(batch), concat_items_num): - group = batch[start_idx : start_idx + concat_items_num] - if not group: - continue - - # 1) Speech tokens -------------------------------------------------- - # ``item['code']`` can be a list[int] or a 1-D tensor. Use the first - # element to decide how to concatenate. - first_code = group[0]["code"] - if isinstance(first_code, torch.Tensor): - concat_code = torch.cat([item["code"] for item in group], dim=0) - else: - # assume list / iterable of ints - concat_code = [] - for item in group: - concat_code.extend(item["code"]) - - # 2) Text ----------------------------------------------------------- - concat_text = "".join([item["text"] for item in group]) - - # 3) Build chat template messages ----------------------------------- - message_list_item = [ - { - "role": "user", - "content": f"Generate a speech from the following text:\n\n{concat_text}{DEFAULT_SPEECH_TOKEN}", - }, - {"role": "assistant", "content": concat_text}, - ] - - # 4) Misc meta fields ---------------------------------------------- - total_duration = sum(item["duration"] for item in group) - group_ids = [item.get("index", item.get("id")) for item in group] - language = group[0].get("language", "") - - # 5) Append to output lists ---------------------------------------- - grouped_speech_tokens.append(concat_code) - grouped_messages.append(message_list_item) - grouped_durations.append(total_duration) - grouped_ids.append(group_ids) - grouped_lang.append(language) - - return { - "speech_tokens": grouped_speech_tokens, - "messages": grouped_messages, - "durations": grouped_durations, - "ids": grouped_ids, - "lang": grouped_lang, - } - def data_collator_ultra_chat(batch): speech_tokens, messages, durations, ids, lang, dnsmos = [], [], [], [], [], [] for i, item in enumerate(batch): @@ -550,7 +485,7 @@ def run(rank, world_size, args): logging.info(f"Device: {device}") model.to(device) - assert params.deepspeed and world_size > 1 + # assert params.deepspeed and world_size > 1 logging.info("Using DeepSpeed") model, optimizer, _, scheduler = deepspeed.initialize( args=params, model=model, model_parameters=model.parameters() From 80677a55f86a4320c34d3f3958abbd61a5144efd Mon Sep 17 00:00:00 2001 From: root Date: Tue, 3 Jun 2025 00:48:39 -0700 Subject: [PATCH 56/57] remove stats --- .../SPEECH2SPEECH/qwen_omni/utils.py | 44 +------------------ 1 file changed, 1 insertion(+), 43 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py index 81f7c0d5c..fad7f272c 100644 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/utils.py @@ -430,46 +430,4 @@ def write_error_stats( hyp_count = corr + hyp_sub + ins print(f"{word} {corr} {tot_errs} {ref_count} {hyp_count}", file=f) - return float(tot_err_rate) - - -def get_dataset_statistics(dataset, save_filename): - speech_token_lengths = [] - text_lengths = [] - for item in tqdm(dataset): - if 'custom' not in item: - speech_token = item["code"] - text = item["text"] - else: - speech_token = item["custom"]["speech_token"] - text = item["supervisions"][0]["text"] - speech_token_lengths.append(len(speech_token)) - text_lengths.append(len(text)) - speech_token_length_array = np.array(speech_token_lengths) - text_length_array = np.array(text_lengths) - # 计算并存储统计指标 - def get_length_stats(lengths_array): - length_stats = [] - length_stats.append(["count", f"{len(lengths_array)}"]) # 总数 - length_stats.append(["mean", f"{np.mean(lengths_array):.1f}"]) - length_stats.append(["std", f"{np.std(lengths_array):.1f}"]) - length_stats.append(["min", f"{np.min(lengths_array):.1f}"]) - length_stats.append(["25%", f"{np.percentile(lengths_array, 25):.1f}"]) - length_stats.append(["50% (median)", f"{np.median(lengths_array):.1f}"]) # median 和 50% percentile 是一样的 - length_stats.append(["75%", f"{np.percentile(lengths_array, 75):.1f}"]) - length_stats.append(["99%", f"{np.percentile(lengths_array, 99):.1f}"]) - length_stats.append(["99.5%", f"{np.percentile(lengths_array, 99.5):.1f}"]) - length_stats.append(["99.9%", f"{np.percentile(lengths_array, 99.9):.1f}"]) - length_stats.append(["max", f"{np.max(lengths_array):.1f}"]) - return length_stats - speech_length_stats = get_length_stats(speech_token_length_array) - text_length_stats = get_length_stats(text_length_array) - with open(save_filename, "w") as f: - print("speech_tokens 长度统计指标:", file=f) - for stat_name, stat_value in speech_length_stats: - print(f"{stat_name:<15}: {stat_value}", file=f) - print("\ntext 长度统计指标:", file=f) - for stat_name, stat_value in text_length_stats: - print(f"{stat_name:<15}: {stat_value}", file=f) - - return speech_token_lengths, text_lengths + return float(tot_err_rate) \ No newline at end of file From 559f9e2deff33077461428d422d9f03c95988b01 Mon Sep 17 00:00:00 2001 From: root Date: Wed, 4 Jun 2025 10:02:42 +0000 Subject: [PATCH 57/57] fix repeat bos and pad id --- .../SPEECH2SPEECH/qwen_omni/train.py | 84 +++++++++++-------- 1 file changed, 49 insertions(+), 35 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py index f1b25d3e6..5b5628f74 100755 --- a/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/qwen_omni/train.py @@ -68,7 +68,6 @@ from transformers import ( Qwen2Config, Qwen2ForCausalLM, ) - from utils import ( # filter_uneven_sized_batch, AttributeDict, MetricsTracker, @@ -306,8 +305,7 @@ def get_params() -> AttributeDict: def extract_text_and_speech_token( - batch: dict, - enable_speech_output: bool + batch: dict, enable_speech_output: bool ) -> Tuple[List[Dict[str, str]], Optional[List[Any]]]: """ Extracts messages and speech tokens from a batch based on the dataset format. @@ -342,28 +340,34 @@ def extract_text_and_speech_token( # The 'prompt_template' argument to the function seems unused if we determine it here. # For now, I will proceed assuming the internal logic dictates the template. # If the function argument `prompt_template` was meant to be the default, this logic would need adjustment. - current_prompt_template = "speech_qa" # Default value for prompt_template for the current item + current_prompt_template = ( + "speech_qa" # Default value for prompt_template for the current item + ) target = answers[i] - message_list_item = [] - + message_list_item = [] + custom_data = batch["supervisions"]["cut"][i].custom - if 'round' in custom_data: + if "round" in custom_data: # slam_omni format dataset # For 'round' type, the current interaction's user prompt will use current_prompt_template ("speech_qa") current_question_with_history = custom_data["question"] total_round = custom_data["round"] - history_context = current_question_with_history.rsplit(":", 1)[0].strip() + history_context = current_question_with_history.rsplit(":", 1)[ + 0 + ].strip() if total_round > 1: history_question_answer = history_context.split("USER:") - history_question_answer = [item for item in history_question_answer if item] + history_question_answer = [ + item for item in history_question_answer if item + ] for j in range(total_round - 1): question_answer = history_question_answer[j].split("ASSISTANT:") message_list_item += [ {"role": "user", "content": question_answer[0].strip()}, {"role": "assistant", "content": question_answer[1].strip()}, ] - elif 'continuation' in custom_data: + elif "continuation" in custom_data: # see https://huggingface.co/datasets/fixie-ai/librispeech_asr ASR_PROBABILITY = 0.3 if random.random() < ASR_PROBABILITY: @@ -382,6 +386,7 @@ def extract_text_and_speech_token( return messages, speech_tokens + def preprocess( messages, tokenizer: transformers.PreTrainedTokenizer, @@ -432,12 +437,11 @@ def preprocess( attention_mask = input_ids.ne(tokenizer.pad_token_id) return input_ids, attention_mask, target_ids + def process_batch_text_continuation(batch: dict): messages = [] transcripts = batch["supervisions"]["text"] - continuations = [ - cut.custom["continuation"] for cut in batch["supervisions"]["cut"] - ] + continuations = [cut.custom["continuation"] for cut in batch["supervisions"]["cut"]] for i in range(len(transcripts)): message = [ { @@ -449,6 +453,7 @@ def process_batch_text_continuation(batch: dict): messages.append(message) return messages + def preprocess_teacher( messages, tokenizer: transformers.PreTrainedTokenizer, @@ -827,27 +832,29 @@ def get_model(params): if not params.unfreeze_llm: for name, param in llm.named_parameters(): param.requires_grad = False - else: - if params.use_lora: - lora_config = LoraConfig( - r=64, - lora_alpha=16, - target_modules=[ - "q_proj", - "k_proj", - "v_proj", - "o_proj", - "up_proj", - "gate_proj", - "down_proj", - ], - lora_dropout=0.05, - task_type="CAUSAL_LM", - ) - llm = get_peft_model(llm, lora_config) - llm.print_trainable_parameters() + if params.use_lora: + lora_config = LoraConfig( + r=64, + lora_alpha=16, + target_modules=[ + "q_proj", + "k_proj", + "v_proj", + "o_proj", + "up_proj", + "gate_proj", + "down_proj", + ], + lora_dropout=0.05, + task_type="CAUSAL_LM", + ) + llm = get_peft_model(llm, lora_config) + llm.print_trainable_parameters() llm.config.pad_token_id = tokenizer.pad_token_id + llm.config.pad_token_id = tokenizer.convert_tokens_to_ids("<|endoftext|>") + llm.config.bos_token_id = tokenizer.convert_tokens_to_ids("<|im_start|>") + llm.config.eos_token_id = tokenizer.convert_tokens_to_ids("<|im_end|>") llm.config.default_speech_token_id = tokenizer.convert_tokens_to_ids( DEFAULT_SPEECH_TOKEN ) @@ -884,7 +891,9 @@ def get_model(params): elif params.speech_tokenizer_type == "cosyvoice1": codec_vocab_size = 4096 + 4 else: - raise ValueError(f"Unknown speech tokenizer type: {params.speech_tokenizer_type}") + raise ValueError( + f"Unknown speech tokenizer type: {params.speech_tokenizer_type}" + ) config = Qwen2Config( vocab_size=codec_vocab_size, @@ -921,10 +930,14 @@ def get_model(params): if params.pretrained_model_path or params.last_stage_model_path: if params.pretrained_model_path is None: checkpoint = torch.load(params.last_stage_model_path, map_location="cpu") - missing_keys, unexpected_keys = model.load_state_dict(checkpoint, strict=False) + missing_keys, unexpected_keys = model.load_state_dict( + checkpoint, strict=False + ) else: checkpoint = torch.load(params.pretrained_model_path, map_location="cpu") - missing_keys, unexpected_keys = model.load_state_dict(checkpoint, strict=False) + missing_keys, unexpected_keys = model.load_state_dict( + checkpoint, strict=False + ) # set params.batch_idx_train according to the checkpoint name if "checkpoint-" in params.pretrained_model_path: params.batch_idx_train = int( @@ -940,6 +953,7 @@ def get_model(params): return model, tokenizer + def run(rank, world_size, args): """ Args: