diff --git a/README.md b/README.md index 0e550ffb1..498f7e3b4 100644 --- a/README.md +++ b/README.md @@ -383,3 +383,7 @@ Please see: [![Open In Colab](https://colab.research.google.com/assets/colab-bad [vctk]: egs/vctk/TTS [ljspeech]: egs/ljspeech/TTS [libritts_tts]: egs/libritts/TTS + +## Acknowledgements + +Some contributors to this project were supported by Xiaomi Corporation. Others were supported by National Science Foundation CCRI award 2120435. This is not an exhaustive list of sources of support. diff --git a/egs/librispeech/ASR/zipformer/finetune.py b/egs/librispeech/ASR/zipformer/finetune.py index 2ff631914..2c869a57a 100755 --- a/egs/librispeech/ASR/zipformer/finetune.py +++ b/egs/librispeech/ASR/zipformer/finetune.py @@ -140,8 +140,8 @@ def add_finetune_arguments(parser: argparse.ArgumentParser): type=str2bool, default=False, help=""" - Whether to adapt. If true, we will mix 5% of the new data - with 95% of the original data to fine-tune. This is useful + Whether to adapt. If true, we will mix 5%% of the new data + with 95%% of the original data to fine-tune. This is useful if you want to maintain the performance on the original domain """, ) @@ -1134,7 +1134,7 @@ def train_one_epoch( f"Epoch {params.cur_epoch}, " f"batch {batch_idx}, loss[{loss_info}], " f"tot_loss[{tot_loss}], batch size: {batch_size}, " - f"lr: {cur_lr:.2e}, " + f"lr: {cur_lr: .2e}, " + (f"grad_scale: {scaler._scale.item()}" if params.use_fp16 else "") ) diff --git a/icefall/utils.py b/icefall/utils.py index aab479e56..ffb926566 100644 --- a/icefall/utils.py +++ b/icefall/utils.py @@ -186,7 +186,7 @@ class AttributeDict(dict): tmp = {} for k, v in self.items(): # PosixPath is ont JSON serializable - if isinstance(v, pathlib.Path) or isinstance(v, torch.device): + if isinstance(v, (pathlib.Path, torch.device, torch.dtype)): v = str(v) tmp[k] = v return json.dumps(tmp, indent=indent, sort_keys=True)