diff --git a/.github/workflows/run-gigaspeech-2022-05-13.yml b/.github/workflows/run-gigaspeech-2022-05-13.yml
index d250b72b0..dc33751d3 100644
--- a/.github/workflows/run-gigaspeech-2022-05-13.yml
+++ b/.github/workflows/run-gigaspeech-2022-05-13.yml
@@ -59,6 +59,8 @@ jobs:
- name: Install Python dependencies
run: |
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
+ pip uninstall -y protobuf
+ pip install --no-binary protobuf protobuf
- name: Cache kaldifeat
id: my-cache
diff --git a/.github/workflows/run-librispeech-2022-03-12.yml b/.github/workflows/run-librispeech-2022-03-12.yml
index b18b84378..291f2bc71 100644
--- a/.github/workflows/run-librispeech-2022-03-12.yml
+++ b/.github/workflows/run-librispeech-2022-03-12.yml
@@ -59,6 +59,8 @@ jobs:
- name: Install Python dependencies
run: |
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
+ pip uninstall -y protobuf
+ pip install --no-binary protobuf protobuf
- name: Cache kaldifeat
id: my-cache
@@ -99,7 +101,7 @@ jobs:
with:
path: |
~/tmp/fbank-libri
- key: cache-libri-fbank-test-clean-and-test-other
+ key: cache-libri-fbank-test-clean-and-test-other-v2
- name: Compute fbank for LibriSpeech test-clean and test-other
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
diff --git a/.github/workflows/run-librispeech-2022-04-29.yml b/.github/workflows/run-librispeech-2022-04-29.yml
index 6c8188b48..b04718f86 100644
--- a/.github/workflows/run-librispeech-2022-04-29.yml
+++ b/.github/workflows/run-librispeech-2022-04-29.yml
@@ -59,6 +59,8 @@ jobs:
- name: Install Python dependencies
run: |
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
+ pip uninstall -y protobuf
+ pip install --no-binary protobuf protobuf
- name: Cache kaldifeat
id: my-cache
@@ -99,7 +101,7 @@ jobs:
with:
path: |
~/tmp/fbank-libri
- key: cache-libri-fbank-test-clean-and-test-other
+ key: cache-libri-fbank-test-clean-and-test-other-v2
- name: Compute fbank for LibriSpeech test-clean and test-other
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
diff --git a/.github/workflows/run-librispeech-2022-05-13.yml b/.github/workflows/run-librispeech-2022-05-13.yml
index 2290e18d4..bb3d74e55 100644
--- a/.github/workflows/run-librispeech-2022-05-13.yml
+++ b/.github/workflows/run-librispeech-2022-05-13.yml
@@ -59,6 +59,8 @@ jobs:
- name: Install Python dependencies
run: |
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
+ pip uninstall -y protobuf
+ pip install --no-binary protobuf protobuf
- name: Cache kaldifeat
id: my-cache
@@ -99,7 +101,7 @@ jobs:
with:
path: |
~/tmp/fbank-libri
- key: cache-libri-fbank-test-clean-and-test-other
+ key: cache-libri-fbank-test-clean-and-test-other-v2
- name: Compute fbank for LibriSpeech test-clean and test-other
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
diff --git a/.github/workflows/run-librispeech-pruned-transducer-stateless3-2022-05-13.yml b/.github/workflows/run-librispeech-pruned-transducer-stateless3-2022-05-13.yml
index 512f1b334..47976fc2c 100644
--- a/.github/workflows/run-librispeech-pruned-transducer-stateless3-2022-05-13.yml
+++ b/.github/workflows/run-librispeech-pruned-transducer-stateless3-2022-05-13.yml
@@ -59,6 +59,8 @@ jobs:
- name: Install Python dependencies
run: |
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
+ pip uninstall -y protobuf
+ pip install --no-binary protobuf protobuf
- name: Cache kaldifeat
id: my-cache
@@ -99,7 +101,7 @@ jobs:
with:
path: |
~/tmp/fbank-libri
- key: cache-libri-fbank-test-clean-and-test-other
+ key: cache-libri-fbank-test-clean-and-test-other-v2
- name: Compute fbank for LibriSpeech test-clean and test-other
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
diff --git a/.github/workflows/run-librispeech-transducer-stateless2-2022-04-19.yml b/.github/workflows/run-librispeech-transducer-stateless2-2022-04-19.yml
index 3864f4aa3..e05b04bee 100644
--- a/.github/workflows/run-librispeech-transducer-stateless2-2022-04-19.yml
+++ b/.github/workflows/run-librispeech-transducer-stateless2-2022-04-19.yml
@@ -59,6 +59,8 @@ jobs:
- name: Install Python dependencies
run: |
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
+ pip uninstall -y protobuf
+ pip install --no-binary protobuf protobuf
- name: Cache kaldifeat
id: my-cache
@@ -99,7 +101,7 @@ jobs:
with:
path: |
~/tmp/fbank-libri
- key: cache-libri-fbank-test-clean-and-test-other
+ key: cache-libri-fbank-test-clean-and-test-other-v2
- name: Compute fbank for LibriSpeech test-clean and test-other
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
diff --git a/.github/workflows/run-pretrained-conformer-ctc.yml b/.github/workflows/run-pretrained-conformer-ctc.yml
index 69f15060b..f4c6bf507 100644
--- a/.github/workflows/run-pretrained-conformer-ctc.yml
+++ b/.github/workflows/run-pretrained-conformer-ctc.yml
@@ -49,6 +49,8 @@ jobs:
- name: Install Python dependencies
run: |
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
+ pip uninstall -y protobuf
+ pip install --no-binary protobuf protobuf
- name: Cache kaldifeat
id: my-cache
diff --git a/.github/workflows/run-pretrained-transducer-stateless-librispeech-100h.yml b/.github/workflows/run-pretrained-transducer-stateless-librispeech-100h.yml
index f77d9e658..348a68095 100644
--- a/.github/workflows/run-pretrained-transducer-stateless-librispeech-100h.yml
+++ b/.github/workflows/run-pretrained-transducer-stateless-librispeech-100h.yml
@@ -58,6 +58,8 @@ jobs:
- name: Install Python dependencies
run: |
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
+ pip uninstall -y protobuf
+ pip install --no-binary protobuf protobuf
- name: Cache kaldifeat
id: my-cache
@@ -98,7 +100,7 @@ jobs:
with:
path: |
~/tmp/fbank-libri
- key: cache-libri-fbank-test-clean-and-test-other
+ key: cache-libri-fbank-test-clean-and-test-other-v2
- name: Compute fbank for LibriSpeech test-clean and test-other
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
diff --git a/.github/workflows/run-pretrained-transducer-stateless-librispeech-multi-datasets.yml b/.github/workflows/run-pretrained-transducer-stateless-librispeech-multi-datasets.yml
index ddfa62073..d1369c2b1 100644
--- a/.github/workflows/run-pretrained-transducer-stateless-librispeech-multi-datasets.yml
+++ b/.github/workflows/run-pretrained-transducer-stateless-librispeech-multi-datasets.yml
@@ -58,6 +58,8 @@ jobs:
- name: Install Python dependencies
run: |
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
+ pip uninstall -y protobuf
+ pip install --no-binary protobuf protobuf
- name: Cache kaldifeat
id: my-cache
@@ -98,7 +100,7 @@ jobs:
with:
path: |
~/tmp/fbank-libri
- key: cache-libri-fbank-test-clean-and-test-other
+ key: cache-libri-fbank-test-clean-and-test-other-v2
- name: Compute fbank for LibriSpeech test-clean and test-other
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
diff --git a/.github/workflows/run-pretrained-transducer-stateless-modified-2-aishell.yml b/.github/workflows/run-pretrained-transducer-stateless-modified-2-aishell.yml
index 659dbc9da..9d095a0aa 100644
--- a/.github/workflows/run-pretrained-transducer-stateless-modified-2-aishell.yml
+++ b/.github/workflows/run-pretrained-transducer-stateless-modified-2-aishell.yml
@@ -49,6 +49,8 @@ jobs:
- name: Install Python dependencies
run: |
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
+ pip uninstall -y protobuf
+ pip install --no-binary protobuf protobuf
- name: Cache kaldifeat
id: my-cache
diff --git a/.github/workflows/run-pretrained-transducer-stateless-modified-aishell.yml b/.github/workflows/run-pretrained-transducer-stateless-modified-aishell.yml
index f4e56bd6c..868fe6fbe 100644
--- a/.github/workflows/run-pretrained-transducer-stateless-modified-aishell.yml
+++ b/.github/workflows/run-pretrained-transducer-stateless-modified-aishell.yml
@@ -49,6 +49,8 @@ jobs:
- name: Install Python dependencies
run: |
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
+ pip uninstall -y protobuf
+ pip install --no-binary protobuf protobuf
- name: Cache kaldifeat
id: my-cache
diff --git a/.github/workflows/run-pretrained-transducer-stateless.yml b/.github/workflows/run-pretrained-transducer-stateless.yml
index cdea78a88..78c1ca059 100644
--- a/.github/workflows/run-pretrained-transducer-stateless.yml
+++ b/.github/workflows/run-pretrained-transducer-stateless.yml
@@ -58,6 +58,8 @@ jobs:
- name: Install Python dependencies
run: |
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
+ pip uninstall -y protobuf
+ pip install --no-binary protobuf protobuf
- name: Cache kaldifeat
id: my-cache
@@ -98,7 +100,7 @@ jobs:
with:
path: |
~/tmp/fbank-libri
- key: cache-libri-fbank-test-clean-and-test-other
+ key: cache-libri-fbank-test-clean-and-test-other-v2
- name: Compute fbank for LibriSpeech test-clean and test-other
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
diff --git a/.github/workflows/run-pretrained-transducer.yml b/.github/workflows/run-pretrained-transducer.yml
index f1b051047..959e57278 100644
--- a/.github/workflows/run-pretrained-transducer.yml
+++ b/.github/workflows/run-pretrained-transducer.yml
@@ -49,6 +49,8 @@ jobs:
- name: Install Python dependencies
run: |
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
+ pip uninstall -y protobuf
+ pip install --no-binary protobuf protobuf
- name: Cache kaldifeat
id: my-cache
diff --git a/.github/workflows/run-yesno-recipe.yml b/.github/workflows/run-yesno-recipe.yml
index 38c36a7c6..ce77c47df 100644
--- a/.github/workflows/run-yesno-recipe.yml
+++ b/.github/workflows/run-yesno-recipe.yml
@@ -62,6 +62,8 @@ jobs:
- name: Install Python dependencies
run: |
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
+ pip uninstall -y protobuf
+ pip install --no-binary protobuf protobuf
- name: Run yesno recipe
shell: bash
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index fce14c460..f9dab7afe 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -76,6 +76,9 @@ jobs:
pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.org/nightly/
pip install git+https://github.com/lhotse-speech/lhotse
# icefall requirements
+ pip uninstall -y protobuf
+ pip install --no-binary protobuf protobuf
+
pip install -r requirements.txt
- name: Install graphviz
diff --git a/README.md b/README.md
index d88ed7aac..2096681ea 100644
--- a/README.md
+++ b/README.md
@@ -2,6 +2,14 @@
+## Introduction
+
+icefall contains ASR recipes for various datasets
+using .
+
+You can use to deploy models
+trained with icefall.
+
## Installation
Please refer to
@@ -12,7 +20,7 @@ for installation.
Please refer to
for more information.
-We provide 6 recipes at present:
+We provide the following recipes:
- [yesno][yesno]
- [LibriSpeech][librispeech]
@@ -22,6 +30,7 @@ We provide 6 recipes at present:
- [GigaSpeech][gigaspeech]
- [Aidatatang_200zh][aidatatang_200zh]
- [WenetSpeech][wenetspeech]
+ - [Alimeeting][alimeeting]
### yesno
@@ -126,7 +135,7 @@ The best CER we currently have is:
| CER | 4.26 |
-We provide a Colab notebook to run a pre-trained conformer CTC model: [](https://colab.research.google.com/drive/1WnG17io5HEZ0Gn_cnh_VzK5QYOoiiklC?usp=sharing)
+We provide a Colab notebook to run a pre-trained conformer CTC model: [
#### Transducer Stateless Model
@@ -247,6 +256,20 @@ We provide one model for this recipe: [Pruned stateless RNN-T: Conformer encoder
We provide a Colab notebook to run a pre-trained Pruned Transducer Stateless model: [](https://colab.research.google.com/drive/1EV4e1CHa1GZgEF-bZgizqI9RyFFehIiN?usp=sharing)
+### Alimeeting
+
+We provide one model for this recipe: [Pruned stateless RNN-T: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss][Alimeeting_pruned_transducer_stateless2].
+
+#### Pruned stateless RNN-T: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss (trained with far subset)
+
+| | Eval | Test-Net |
+|----------------------|--------|----------|
+| greedy search | 31.77 | 34.66 |
+| fast beam search | 31.39 | 33.02 |
+| modified beam search | 30.38 | 34.25 |
+
+We provide a Colab notebook to run a pre-trained Pruned Transducer Stateless model: [](https://colab.research.google.com/drive/1tKr3f0mL17uO_ljdHGKtR7HOmthYHwJG?usp=sharing)
+
## Deployment with C++
Once you have trained a model in icefall, you may want to deploy it with C++,
@@ -274,6 +297,7 @@ Please see: [
diff --git a/egs/aidatatang_200zh/ASR/local/compute_fbank_aidatatang_200zh.py b/egs/aidatatang_200zh/ASR/local/compute_fbank_aidatatang_200zh.py
index 3c4cfc7f8..9850cf251 100755
--- a/egs/aidatatang_200zh/ASR/local/compute_fbank_aidatatang_200zh.py
+++ b/egs/aidatatang_200zh/ASR/local/compute_fbank_aidatatang_200zh.py
@@ -29,7 +29,7 @@ import os
from pathlib import Path
import torch
-from lhotse import CutSet, Fbank, FbankConfig, LilcomHdf5Writer
+from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter
from lhotse.recipes.utils import read_manifests_if_cached
from icefall.utils import get_executor
@@ -43,7 +43,7 @@ torch.set_num_interop_threads(1)
def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80):
- src_dir = Path("data/manifests/aidatatang_200zh")
+ src_dir = Path("data/manifests")
output_dir = Path("data/fbank")
num_jobs = min(15, os.cpu_count())
@@ -52,8 +52,13 @@ def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80):
"dev",
"test",
)
+ prefix = "aidatatang"
+ suffix = "jsonl.gz"
manifests = read_manifests_if_cached(
- dataset_parts=dataset_parts, output_dir=src_dir
+ dataset_parts=dataset_parts,
+ output_dir=src_dir,
+ prefix=prefix,
+ suffix=suffix,
)
assert manifests is not None
@@ -61,10 +66,14 @@ def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80):
with get_executor() as ex: # Initialize the executor only once.
for partition, m in manifests.items():
- if (output_dir / f"cuts_{partition}.json.gz").is_file():
+ if (output_dir / f"{prefix}_cuts_{partition}.{suffix}").is_file():
logging.info(f"{partition} already exists - skipping.")
continue
logging.info(f"Processing {partition}")
+
+ for sup in m["supervisions"]:
+ sup.custom = {"origin": "aidatatang_200zh"}
+
cut_set = CutSet.from_manifests(
recordings=m["recordings"],
supervisions=m["supervisions"],
@@ -77,13 +86,14 @@ def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80):
)
cut_set = cut_set.compute_and_store_features(
extractor=extractor,
- storage_path=f"{output_dir}/feats_{partition}",
+ storage_path=f"{output_dir}/{prefix}_feats_{partition}",
# when an executor is specified, make more partitions
num_jobs=num_jobs if ex is None else 80,
executor=ex,
- storage_type=LilcomHdf5Writer,
+ storage_type=LilcomChunkyWriter,
)
- cut_set.to_json(output_dir / f"cuts_{partition}.json.gz")
+
+ cut_set.to_file(output_dir / f"{prefix}_cuts_{partition}.{suffix}")
def get_args():
diff --git a/egs/aidatatang_200zh/ASR/local/display_manifest_statistics.py b/egs/aidatatang_200zh/ASR/local/display_manifest_statistics.py
index 2352785ac..d66e5cfca 100644
--- a/egs/aidatatang_200zh/ASR/local/display_manifest_statistics.py
+++ b/egs/aidatatang_200zh/ASR/local/display_manifest_statistics.py
@@ -25,19 +25,19 @@ for usage.
"""
-from lhotse import load_manifest
+from lhotse import load_manifest_lazy
def main():
paths = [
- "./data/fbank/cuts_train.json.gz",
- "./data/fbank/cuts_dev.json.gz",
- "./data/fbank/cuts_test.json.gz",
+ "./data/fbank/aidatatang_cuts_train.jsonl.gz",
+ "./data/fbank/aidatatang_cuts_dev.jsonl.gz",
+ "./data/fbank/aidatatang_cuts_test.jsonl.gz",
]
for path in paths:
print(f"Starting display the statistics for {path}")
- cuts = load_manifest(path)
+ cuts = load_manifest_lazy(path)
cuts.describe()
@@ -45,7 +45,7 @@ if __name__ == "__main__":
main()
"""
-Starting display the statistics for ./data/fbank/cuts_train.json.gz
+Starting display the statistics for ./data/fbank/aidatatang_cuts_train.jsonl.gz
Cuts count: 494715
Total duration (hours): 422.6
Speech duration (hours): 422.6 (100.0%)
@@ -61,7 +61,7 @@ min 1.0
99.5% 8.0
99.9% 9.5
max 18.1
-Starting display the statistics for ./data/fbank/cuts_dev.json.gz
+Starting display the statistics for ./data/fbank/aidatatang_cuts_dev.jsonl.gz
Cuts count: 24216
Total duration (hours): 20.2
Speech duration (hours): 20.2 (100.0%)
@@ -77,7 +77,7 @@ min 1.2
99.5% 7.3
99.9% 8.8
max 11.3
-Starting display the statistics for ./data/fbank/cuts_test.json.gz
+Starting display the statistics for ./data/fbank/aidatatang_cuts_test.jsonl.gz
Cuts count: 48144
Total duration (hours): 40.2
Speech duration (hours): 40.2 (100.0%)
diff --git a/egs/aidatatang_200zh/ASR/pruned_transducer_stateless2/asr_datamodule.py b/egs/aidatatang_200zh/ASR/pruned_transducer_stateless2/asr_datamodule.py
index 447a011cb..6a5b57e24 100644
--- a/egs/aidatatang_200zh/ASR/pruned_transducer_stateless2/asr_datamodule.py
+++ b/egs/aidatatang_200zh/ASR/pruned_transducer_stateless2/asr_datamodule.py
@@ -28,10 +28,10 @@ from lhotse import (
Fbank,
FbankConfig,
load_manifest,
+ load_manifest_lazy,
set_caching_enabled,
)
from lhotse.dataset import (
- BucketingSampler,
CutConcatenate,
CutMix,
DynamicBucketingSampler,
@@ -206,7 +206,7 @@ class Aidatatang_200zhAsrDataModule:
"""
logging.info("About to get Musan cuts")
cuts_musan = load_manifest(
- self.args.manifest_dir / "cuts_musan.json.gz"
+ self.args.manifest_dir / "musan_cuts.jsonl.gz"
)
transforms = []
@@ -290,13 +290,12 @@ class Aidatatang_200zhAsrDataModule:
)
if self.args.bucketing_sampler:
- logging.info("Using BucketingSampler.")
- train_sampler = BucketingSampler(
+ logging.info("Using DynamicBucketingSampler.")
+ train_sampler = DynamicBucketingSampler(
cuts_train,
max_duration=self.args.max_duration,
shuffle=self.args.shuffle,
num_buckets=self.args.num_buckets,
- bucket_method="equal_duration",
drop_last=True,
)
else:
@@ -402,14 +401,20 @@ class Aidatatang_200zhAsrDataModule:
@lru_cache()
def train_cuts(self) -> CutSet:
logging.info("About to get train cuts")
- return load_manifest(self.args.manifest_dir / "cuts_train.json.gz")
+ return load_manifest_lazy(
+ self.args.manifest_dir / "aidatatang_cuts_train.jsonl.gz"
+ )
@lru_cache()
def valid_cuts(self) -> CutSet:
logging.info("About to get dev cuts")
- return load_manifest(self.args.manifest_dir / "cuts_dev.json.gz")
+ return load_manifest_lazy(
+ self.args.manifest_dir / "aidatatang_cuts_dev.jsonl.gz"
+ )
@lru_cache()
def test_cuts(self) -> List[CutSet]:
logging.info("About to get test cuts")
- return load_manifest(self.args.manifest_dir / "cuts_test.json.gz")
+ return load_manifest_lazy(
+ self.args.manifest_dir / "aidatatang_cuts_test.jsonl.gz"
+ )
diff --git a/egs/aishell/ASR/conformer_ctc/train.py b/egs/aishell/ASR/conformer_ctc/train.py
index 369ad310f..a228cc1fe 100755
--- a/egs/aishell/ASR/conformer_ctc/train.py
+++ b/egs/aishell/ASR/conformer_ctc/train.py
@@ -195,9 +195,9 @@ def get_params() -> AttributeDict:
"best_train_epoch": -1,
"best_valid_epoch": -1,
"batch_idx_train": 0,
- "log_interval": 10,
+ "log_interval": 50,
"reset_interval": 200,
- "valid_interval": 3000,
+ "valid_interval": 2000,
# parameters for k2.ctc_loss
"beam_size": 10,
"reduction": "sum",
diff --git a/egs/aishell/ASR/local/compute_fbank_aidatatang_200zh.py b/egs/aishell/ASR/local/compute_fbank_aidatatang_200zh.py
new file mode 100755
index 000000000..8cdfad71f
--- /dev/null
+++ b/egs/aishell/ASR/local/compute_fbank_aidatatang_200zh.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python3
+# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+This file computes fbank features of the aidatatang_200zh dataset.
+It looks for manifests in the directory data/manifests.
+
+The generated fbank features are saved in data/fbank.
+"""
+
+import argparse
+import logging
+import os
+from pathlib import Path
+
+import torch
+from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter
+from lhotse.recipes.utils import read_manifests_if_cached
+
+from icefall.utils import get_executor
+
+# Torch's multithreaded behavior needs to be disabled or
+# it wastes a lot of CPU and slow things down.
+# Do this outside of main() in case it needs to take effect
+# even when we are not invoking the main (e.g. when spawning subprocesses).
+torch.set_num_threads(1)
+torch.set_num_interop_threads(1)
+
+
+def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80):
+ src_dir = Path("data/manifests")
+ output_dir = Path("data/fbank")
+ num_jobs = min(15, os.cpu_count())
+
+ dataset_parts = (
+ "train",
+ "test",
+ "dev",
+ )
+ prefix = "aidatatang"
+ suffix = "jsonl.gz"
+ manifests = read_manifests_if_cached(
+ dataset_parts=dataset_parts,
+ output_dir=src_dir,
+ prefix=prefix,
+ suffix=suffix,
+ )
+ assert manifests is not None
+
+ extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins))
+
+ with get_executor() as ex: # Initialize the executor only once.
+ for partition, m in manifests.items():
+ if (output_dir / f"{prefix}_cuts_{partition}.{suffix}").is_file():
+ logging.info(f"{partition} already exists - skipping.")
+ continue
+ logging.info(f"Processing {partition}")
+
+ for sup in m["supervisions"]:
+ sup.custom = {"origin": "aidatatang_200zh"}
+
+ cut_set = CutSet.from_manifests(
+ recordings=m["recordings"],
+ supervisions=m["supervisions"],
+ )
+ if "train" in partition:
+ cut_set = (
+ cut_set
+ + cut_set.perturb_speed(0.9)
+ + cut_set.perturb_speed(1.1)
+ )
+ cut_set = cut_set.compute_and_store_features(
+ extractor=extractor,
+ storage_path=f"{output_dir}/{prefix}_feats_{partition}",
+ # when an executor is specified, make more partitions
+ num_jobs=num_jobs if ex is None else 80,
+ executor=ex,
+ storage_type=LilcomChunkyWriter,
+ )
+
+ cut_set.to_file(output_dir / f"{prefix}_cuts_{partition}.{suffix}")
+
+
+def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--num-mel-bins",
+ type=int,
+ default=80,
+ help="""The number of mel bins for Fbank""",
+ )
+
+ return parser.parse_args()
+
+
+if __name__ == "__main__":
+ formatter = (
+ "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
+ )
+
+ logging.basicConfig(format=formatter, level=logging.INFO)
+
+ args = get_args()
+ compute_fbank_aidatatang_200zh(num_mel_bins=args.num_mel_bins)
diff --git a/egs/aishell/ASR/local/compute_fbank_aishell.py b/egs/aishell/ASR/local/compute_fbank_aishell.py
index 70dee81d8..e27e35ec5 100755
--- a/egs/aishell/ASR/local/compute_fbank_aishell.py
+++ b/egs/aishell/ASR/local/compute_fbank_aishell.py
@@ -29,7 +29,7 @@ import os
from pathlib import Path
import torch
-from lhotse import CutSet, Fbank, FbankConfig, LilcomHdf5Writer
+from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter
from lhotse.recipes.utils import read_manifests_if_cached
from icefall.utils import get_executor
@@ -52,8 +52,13 @@ def compute_fbank_aishell(num_mel_bins: int = 80):
"dev",
"test",
)
+ prefix = "aishell"
+ suffix = "jsonl.gz"
manifests = read_manifests_if_cached(
- prefix="aishell", dataset_parts=dataset_parts, output_dir=src_dir
+ dataset_parts=dataset_parts,
+ output_dir=src_dir,
+ prefix=prefix,
+ suffix=suffix,
)
assert manifests is not None
@@ -61,7 +66,7 @@ def compute_fbank_aishell(num_mel_bins: int = 80):
with get_executor() as ex: # Initialize the executor only once.
for partition, m in manifests.items():
- if (output_dir / f"cuts_{partition}.json.gz").is_file():
+ if (output_dir / f"{prefix}_cuts_{partition}.{suffix}").is_file():
logging.info(f"{partition} already exists - skipping.")
continue
logging.info(f"Processing {partition}")
@@ -77,13 +82,13 @@ def compute_fbank_aishell(num_mel_bins: int = 80):
)
cut_set = cut_set.compute_and_store_features(
extractor=extractor,
- storage_path=f"{output_dir}/feats_{partition}",
+ storage_path=f"{output_dir}/{prefix}_feats_{partition}",
# when an executor is specified, make more partitions
num_jobs=num_jobs if ex is None else 80,
executor=ex,
- storage_type=LilcomHdf5Writer,
+ storage_type=LilcomChunkyWriter,
)
- cut_set.to_json(output_dir / f"cuts_{partition}.json.gz")
+ cut_set.to_file(output_dir / f"{prefix}_cuts_{partition}.{suffix}")
def get_args():
diff --git a/egs/aishell/ASR/local/display_manifest_statistics.py b/egs/aishell/ASR/local/display_manifest_statistics.py
index 0ae731a1d..c478f7331 100755
--- a/egs/aishell/ASR/local/display_manifest_statistics.py
+++ b/egs/aishell/ASR/local/display_manifest_statistics.py
@@ -25,18 +25,18 @@ for usage.
"""
-from lhotse import load_manifest
+from lhotse import load_manifest_lazy
def main():
- # path = "./data/fbank/cuts_train.json.gz"
- # path = "./data/fbank/cuts_test.json.gz"
- # path = "./data/fbank/cuts_dev.json.gz"
- # path = "./data/fbank/aidatatang_200zh/cuts_train_raw.jsonl.gz"
- # path = "./data/fbank/aidatatang_200zh/cuts_test_raw.jsonl.gz"
- path = "./data/fbank/aidatatang_200zh/cuts_dev_raw.jsonl.gz"
+ # path = "./data/fbank/aishell_cuts_train.jsonl.gz"
+ # path = "./data/fbank/aishell_cuts_test.jsonl.gz"
+ path = "./data/fbank/aishell_cuts_dev.jsonl.gz"
+ # path = "./data/fbank/aidatatang_cuts_train.jsonl.gz"
+ # path = "./data/fbank/aidatatang_cuts_test.jsonl.gz"
+ # path = "./data/fbank/aidatatang_cuts_dev.jsonl.gz"
- cuts = load_manifest(path)
+ cuts = load_manifest_lazy(path)
cuts.describe()
diff --git a/egs/aishell/ASR/local/process_aidatatang_200zh.py b/egs/aishell/ASR/local/process_aidatatang_200zh.py
deleted file mode 100755
index ac2b86927..000000000
--- a/egs/aishell/ASR/local/process_aidatatang_200zh.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2022 Xiaomi Corp. (Fangjun Kuang)
-#
-# See ../../../../LICENSE for clarification regarding multiple authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from pathlib import Path
-
-from lhotse import CutSet
-from lhotse.recipes.utils import read_manifests_if_cached
-
-
-def preprocess_aidatatang_200zh():
- src_dir = Path("data/manifests/aidatatang_200zh")
- output_dir = Path("data/fbank/aidatatang_200zh")
- output_dir.mkdir(exist_ok=True, parents=True)
-
- dataset_parts = (
- "train",
- "test",
- "dev",
- )
-
- logging.info("Loading manifest")
- manifests = read_manifests_if_cached(
- dataset_parts=dataset_parts, output_dir=src_dir, prefix="aidatatang"
- )
- assert len(manifests) > 0
-
- for partition, m in manifests.items():
- logging.info(f"Processing {partition}")
- raw_cuts_path = output_dir / f"cuts_{partition}_raw.jsonl.gz"
- if raw_cuts_path.is_file():
- logging.info(f"{partition} already exists - skipping")
- continue
-
- for sup in m["supervisions"]:
- sup.custom = {"origin": "aidatatang_200zh"}
-
- cut_set = CutSet.from_manifests(
- recordings=m["recordings"],
- supervisions=m["supervisions"],
- )
-
- logging.info(f"Saving to {raw_cuts_path}")
- cut_set.to_file(raw_cuts_path)
-
-
-def main():
- formatter = (
- "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
- )
- logging.basicConfig(format=formatter, level=logging.INFO)
-
- preprocess_aidatatang_200zh()
-
-
-if __name__ == "__main__":
- main()
diff --git a/egs/aishell/ASR/prepare_aidatatang_200zh.sh b/egs/aishell/ASR/prepare_aidatatang_200zh.sh
index 60b2060ec..f1d4d18a7 100755
--- a/egs/aishell/ASR/prepare_aidatatang_200zh.sh
+++ b/egs/aishell/ASR/prepare_aidatatang_200zh.sh
@@ -42,18 +42,18 @@ if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
log "Stage 1: Prepare manifest"
# We assume that you have downloaded the aidatatang_200zh corpus
# to $dl_dir/aidatatang_200zh
- if [ ! -f data/manifests/aidatatang_200zh/.manifests.done ]; then
- mkdir -p data/manifests/aidatatang_200zh
- lhotse prepare aidatatang-200zh $dl_dir data/manifests/aidatatang_200zh
- touch data/manifests/aidatatang_200zh/.manifests.done
+ if [ ! -f data/manifests/.aidatatang_200zh_manifests.done ]; then
+ mkdir -p data/manifests
+ lhotse prepare aidatatang-200zh $dl_dir data/manifests
+ touch data/manifests/.aidatatang_200zh_manifests.done
fi
fi
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
log "Stage 2: Process aidatatang_200zh"
- if [ ! -f data/fbank/aidatatang_200zh/.fbank.done ]; then
- mkdir -p data/fbank/aidatatang_200zh
- lhotse prepare aidatatang-200zh $dl_dir data/manifests/aidatatang_200zh
- touch data/fbank/aidatatang_200zh/.fbank.done
+ if [ ! -f data/fbank/.aidatatang_200zh_fbank.done ]; then
+ mkdir -p data/fbank
+ ./local/compute_fbank_aidatatang_200zh.py
+ touch data/fbank/.aidatatang_200zh_fbank.done
fi
fi
diff --git a/egs/aishell/ASR/tdnn_lstm_ctc/asr_datamodule.py b/egs/aishell/ASR/tdnn_lstm_ctc/asr_datamodule.py
index 507db2933..d24ba6bb7 100644
--- a/egs/aishell/ASR/tdnn_lstm_ctc/asr_datamodule.py
+++ b/egs/aishell/ASR/tdnn_lstm_ctc/asr_datamodule.py
@@ -23,11 +23,11 @@ from functools import lru_cache
from pathlib import Path
from typing import List
-from lhotse import CutSet, Fbank, FbankConfig, load_manifest
+from lhotse import CutSet, Fbank, FbankConfig, load_manifest, load_manifest_lazy
from lhotse.dataset import (
- BucketingSampler,
CutConcatenate,
CutMix,
+ DynamicBucketingSampler,
K2SpeechRecognitionDataset,
PrecomputedFeatures,
SingleCutSampler,
@@ -93,7 +93,7 @@ class AishellAsrDataModule:
"--num-buckets",
type=int,
default=30,
- help="The number of buckets for the BucketingSampler"
+ help="The number of buckets for the DynamicBucketingSampler"
"(you might want to increase it for larger datasets).",
)
group.add_argument(
@@ -133,6 +133,12 @@ class AishellAsrDataModule:
help="When enabled (=default), the examples will be "
"shuffled for each epoch.",
)
+ group.add_argument(
+ "--drop-last",
+ type=str2bool,
+ default=True,
+ help="Whether to drop last batch. Used by sampler.",
+ )
group.add_argument(
"--return-cuts",
type=str2bool,
@@ -178,7 +184,7 @@ class AishellAsrDataModule:
def train_dataloaders(self, cuts_train: CutSet) -> DataLoader:
logging.info("About to get Musan cuts")
cuts_musan = load_manifest(
- self.args.manifest_dir / "cuts_musan.json.gz"
+ self.args.manifest_dir / "musan_cuts.jsonl.gz"
)
transforms = []
@@ -262,14 +268,13 @@ class AishellAsrDataModule:
)
if self.args.bucketing_sampler:
- logging.info("Using BucketingSampler.")
- train_sampler = BucketingSampler(
+ logging.info("Using DynamicBucketingSampler.")
+ train_sampler = DynamicBucketingSampler(
cuts_train,
max_duration=self.args.max_duration,
shuffle=self.args.shuffle,
num_buckets=self.args.num_buckets,
- bucket_method="equal_duration",
- drop_last=True,
+ drop_last=self.args.drop_last,
)
else:
logging.info("Using SingleCutSampler.")
@@ -313,7 +318,7 @@ class AishellAsrDataModule:
cut_transforms=transforms,
return_cuts=self.args.return_cuts,
)
- valid_sampler = BucketingSampler(
+ valid_sampler = DynamicBucketingSampler(
cuts_valid,
max_duration=self.args.max_duration,
shuffle=False,
@@ -337,8 +342,10 @@ class AishellAsrDataModule:
else PrecomputedFeatures(),
return_cuts=self.args.return_cuts,
)
- sampler = BucketingSampler(
- cuts, max_duration=self.args.max_duration, shuffle=False
+ sampler = DynamicBucketingSampler(
+ cuts,
+ max_duration=self.args.max_duration,
+ shuffle=False,
)
test_dl = DataLoader(
test,
@@ -351,17 +358,21 @@ class AishellAsrDataModule:
@lru_cache()
def train_cuts(self) -> CutSet:
logging.info("About to get train cuts")
- cuts_train = load_manifest(
- self.args.manifest_dir / "cuts_train.json.gz"
+ cuts_train = load_manifest_lazy(
+ self.args.manifest_dir / "aishell_cuts_train.jsonl.gz"
)
return cuts_train
@lru_cache()
def valid_cuts(self) -> CutSet:
logging.info("About to get dev cuts")
- return load_manifest(self.args.manifest_dir / "cuts_dev.json.gz")
+ return load_manifest_lazy(
+ self.args.manifest_dir / "aishell_cuts_dev.jsonl.gz"
+ )
@lru_cache()
def test_cuts(self) -> List[CutSet]:
logging.info("About to get test cuts")
- return load_manifest(self.args.manifest_dir / "cuts_test.json.gz")
+ return load_manifest_lazy(
+ self.args.manifest_dir / "aishell_cuts_test.jsonl.gz"
+ )
diff --git a/egs/aishell/ASR/tdnn_lstm_ctc/train.py b/egs/aishell/ASR/tdnn_lstm_ctc/train.py
index 3327cdb79..7619b0551 100755
--- a/egs/aishell/ASR/tdnn_lstm_ctc/train.py
+++ b/egs/aishell/ASR/tdnn_lstm_ctc/train.py
@@ -15,6 +15,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+"""
+Usage
+ export CUDA_VISIBLE_DEVICES="0,1,2,3"
+ ./tdnn_lstm_ctc/train.py \
+ --world-size 4 \
+ --num-epochs 20 \
+ --max-duration 300
+"""
import argparse
import logging
diff --git a/egs/aishell/ASR/transducer_stateless/conformer.py b/egs/aishell/ASR/transducer_stateless/conformer.py
index 149df92ab..7e8dc4ace 100644
--- a/egs/aishell/ASR/transducer_stateless/conformer.py
+++ b/egs/aishell/ASR/transducer_stateless/conformer.py
@@ -110,9 +110,7 @@ class Conformer(Transformer):
x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C)
# Caution: We assume the subsampling factor is 4!
- with warnings.catch_warnings():
- warnings.simplefilter("ignore")
- lengths = ((x_lens - 1) // 2 - 1) // 2
+ lengths = (((x_lens - 1) >> 1) - 1) >> 1
assert x.size(0) == lengths.max().item()
mask = make_pad_mask(lengths)
diff --git a/egs/aishell/ASR/transducer_stateless/train.py b/egs/aishell/ASR/transducer_stateless/train.py
index f615c78f4..d54157709 100755
--- a/egs/aishell/ASR/transducer_stateless/train.py
+++ b/egs/aishell/ASR/transducer_stateless/train.py
@@ -21,6 +21,7 @@
import argparse
import logging
+import warnings
from pathlib import Path
from shutil import copyfile
from typing import Optional, Tuple
@@ -386,7 +387,11 @@ def compute_loss(
assert loss.requires_grad == is_training
info = MetricsTracker()
- info["frames"] = (feature_lens // params.subsampling_factor).sum().item()
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ info["frames"] = (
+ (feature_lens // params.subsampling_factor).sum().item()
+ )
# Note: We use reduction=sum while computing the loss.
info["loss"] = loss.detach().cpu().item()
@@ -599,21 +604,18 @@ def run(rank, world_size, args):
train_cuts = aishell.train_cuts()
def remove_short_and_long_utt(c: Cut):
- # Keep only utterances with duration between 1 second and 20 seconds
- return 1.0 <= c.duration <= 20.0
-
- num_in_total = len(train_cuts)
+ # Keep only utterances with duration between 1 second and 12 seconds
+ #
+ # Caution: There is a reason to select 12.0 here. Please see
+ # ../local/display_manifest_statistics.py
+ #
+ # You should use ../local/display_manifest_statistics.py to get
+ # an utterance duration distribution for your dataset to select
+ # the threshold
+ return 1.0 <= c.duration <= 12.0
train_cuts = train_cuts.filter(remove_short_and_long_utt)
- num_left = len(train_cuts)
- num_removed = num_in_total - num_left
- removed_percent = num_removed / num_in_total * 100
-
- logging.info(f"Before removing short and long utterances: {num_in_total}")
- logging.info(f"After removing short and long utterances: {num_left}")
- logging.info(f"Removed {num_removed} utterances ({removed_percent:.5f}%)")
-
train_dl = aishell.train_dataloaders(train_cuts)
valid_dl = aishell.valid_dataloaders(aishell.valid_cuts())
diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/aidatatang_200zh.py b/egs/aishell/ASR/transducer_stateless_modified-2/aidatatang_200zh.py
index 84ca64c89..26d4ee111 100644
--- a/egs/aishell/ASR/transducer_stateless_modified-2/aidatatang_200zh.py
+++ b/egs/aishell/ASR/transducer_stateless_modified-2/aidatatang_200zh.py
@@ -18,7 +18,7 @@
import logging
from pathlib import Path
-from lhotse import CutSet, load_manifest
+from lhotse import CutSet, load_manifest_lazy
class AIDatatang200zh:
@@ -28,26 +28,26 @@ class AIDatatang200zh:
manifest_dir:
It is expected to contain the following files::
- - cuts_dev_raw.jsonl.gz
- - cuts_train_raw.jsonl.gz
- - cuts_test_raw.jsonl.gz
+ - aidatatang_cuts_dev.jsonl.gz
+ - aidatatang_cuts_train.jsonl.gz
+ - aidatatang_cuts_test.jsonl.gz
"""
self.manifest_dir = Path(manifest_dir)
def train_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_train_raw.jsonl.gz"
+ f = self.manifest_dir / "aidatatang_cuts_train.jsonl.gz"
logging.info(f"About to get train cuts from {f}")
- cuts_train = load_manifest(f)
+ cuts_train = load_manifest_lazy(f)
return cuts_train
def valid_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_valid_raw.jsonl.gz"
+ f = self.manifest_dir / "aidatatang_cuts_valid.jsonl.gz"
logging.info(f"About to get valid cuts from {f}")
- cuts_valid = load_manifest(f)
+ cuts_valid = load_manifest_lazy(f)
return cuts_valid
def test_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_test_raw.jsonl.gz"
+ f = self.manifest_dir / "aidatatang_cuts_test.jsonl.gz"
logging.info(f"About to get test cuts from {f}")
- cuts_test = load_manifest(f)
+ cuts_test = load_manifest_lazy(f)
return cuts_test
diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/aishell.py b/egs/aishell/ASR/transducer_stateless_modified-2/aishell.py
index 94d1da066..ddeca4d88 100644
--- a/egs/aishell/ASR/transducer_stateless_modified-2/aishell.py
+++ b/egs/aishell/ASR/transducer_stateless_modified-2/aishell.py
@@ -18,7 +18,7 @@
import logging
from pathlib import Path
-from lhotse import CutSet, load_manifest
+from lhotse import CutSet, load_manifest_lazy
class AIShell:
@@ -28,26 +28,26 @@ class AIShell:
manifest_dir:
It is expected to contain the following files::
- - cuts_dev.json.gz
- - cuts_train.json.gz
- - cuts_test.json.gz
+ - aishell_cuts_dev.jsonl.gz
+ - aishell_cuts_train.jsonl.gz
+ - aishell_cuts_test.jsonl.gz
"""
self.manifest_dir = Path(manifest_dir)
def train_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_train.json.gz"
+ f = self.manifest_dir / "aishell_cuts_train.jsonl.gz"
logging.info(f"About to get train cuts from {f}")
- cuts_train = load_manifest(f)
+ cuts_train = load_manifest_lazy(f)
return cuts_train
def valid_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_dev.json.gz"
+ f = self.manifest_dir / "aishell_cuts_dev.jsonl.gz"
logging.info(f"About to get valid cuts from {f}")
- cuts_valid = load_manifest(f)
+ cuts_valid = load_manifest_lazy(f)
return cuts_valid
def test_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_test.json.gz"
+ f = self.manifest_dir / "aishell_cuts_test.jsonl.gz"
logging.info(f"About to get test cuts from {f}")
- cuts_test = load_manifest(f)
+ cuts_test = load_manifest_lazy(f)
return cuts_test
diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/asr_datamodule.py b/egs/aishell/ASR/transducer_stateless_modified-2/asr_datamodule.py
index 20eb8155c..838e53658 100644
--- a/egs/aishell/ASR/transducer_stateless_modified-2/asr_datamodule.py
+++ b/egs/aishell/ASR/transducer_stateless_modified-2/asr_datamodule.py
@@ -24,7 +24,6 @@ from typing import Optional
from lhotse import CutSet, Fbank, FbankConfig
from lhotse.dataset import (
- BucketingSampler,
CutMix,
DynamicBucketingSampler,
K2SpeechRecognitionDataset,
@@ -73,8 +72,7 @@ class AsrDataModule:
"--num-buckets",
type=int,
default=30,
- help="The number of buckets for the BucketingSampler "
- "and DynamicBucketingSampler."
+ help="The number of buckets for the DynamicBucketingSampler "
"(you might want to increase it for larger datasets).",
)
@@ -147,7 +145,6 @@ class AsrDataModule:
def train_dataloaders(
self,
cuts_train: CutSet,
- dynamic_bucketing: bool,
on_the_fly_feats: bool,
cuts_musan: Optional[CutSet] = None,
) -> DataLoader:
@@ -157,9 +154,6 @@ class AsrDataModule:
Cuts for training.
cuts_musan:
If not None, it is the cuts for mixing.
- dynamic_bucketing:
- True to use DynamicBucketingSampler;
- False to use BucketingSampler.
on_the_fly_feats:
True to use OnTheFlyFeatures;
False to use PrecomputedFeatures.
@@ -232,25 +226,14 @@ class AsrDataModule:
return_cuts=self.args.return_cuts,
)
- if dynamic_bucketing:
- logging.info("Using DynamicBucketingSampler.")
- train_sampler = DynamicBucketingSampler(
- cuts_train,
- max_duration=self.args.max_duration,
- shuffle=self.args.shuffle,
- num_buckets=self.args.num_buckets,
- drop_last=True,
- )
- else:
- logging.info("Using BucketingSampler.")
- train_sampler = BucketingSampler(
- cuts_train,
- max_duration=self.args.max_duration,
- shuffle=self.args.shuffle,
- num_buckets=self.args.num_buckets,
- bucket_method="equal_duration",
- drop_last=True,
- )
+ logging.info("Using DynamicBucketingSampler.")
+ train_sampler = DynamicBucketingSampler(
+ cuts_train,
+ max_duration=self.args.max_duration,
+ shuffle=self.args.shuffle,
+ num_buckets=self.args.num_buckets,
+ drop_last=True,
+ )
logging.info("About to create train dataloader")
train_dl = DataLoader(
@@ -279,7 +262,7 @@ class AsrDataModule:
cut_transforms=transforms,
return_cuts=self.args.return_cuts,
)
- valid_sampler = BucketingSampler(
+ valid_sampler = DynamicBucketingSampler(
cuts_valid,
max_duration=self.args.max_duration,
shuffle=False,
@@ -303,8 +286,10 @@ class AsrDataModule:
else PrecomputedFeatures(),
return_cuts=self.args.return_cuts,
)
- sampler = BucketingSampler(
- cuts, max_duration=self.args.max_duration, shuffle=False
+ sampler = DynamicBucketingSampler(
+ cuts,
+ max_duration=self.args.max_duration,
+ shuffle=False,
)
logging.debug("About to create test dataloader")
test_dl = DataLoader(
diff --git a/egs/aishell/ASR/transducer_stateless_modified-2/train.py b/egs/aishell/ASR/transducer_stateless_modified-2/train.py
index 53d4e455f..962fffdf5 100755
--- a/egs/aishell/ASR/transducer_stateless_modified-2/train.py
+++ b/egs/aishell/ASR/transducer_stateless_modified-2/train.py
@@ -41,6 +41,7 @@ export CUDA_VISIBLE_DEVICES="0,1,2"
import argparse
import logging
import random
+import warnings
from pathlib import Path
from shutil import copyfile
from typing import Optional, Tuple
@@ -446,7 +447,11 @@ def compute_loss(
assert loss.requires_grad == is_training
info = MetricsTracker()
- info["frames"] = (feature_lens // params.subsampling_factor).sum().item()
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ info["frames"] = (
+ (feature_lens // params.subsampling_factor).sum().item()
+ )
# Note: We use reduction=sum while computing the loss.
info["loss"] = loss.detach().cpu().item()
@@ -636,19 +641,15 @@ def train_one_epoch(
def filter_short_and_long_utterances(cuts: CutSet) -> CutSet:
def remove_short_and_long_utt(c: Cut):
# Keep only utterances with duration between 1 second and 12 seconds
+ #
+ # Caution: There is a reason to select 12.0 here. Please see
+ # ../local/display_manifest_statistics.py
+ #
+ # You should use ../local/display_manifest_statistics.py to get
+ # an utterance duration distribution for your dataset to select
+ # the threshold
return 1.0 <= c.duration <= 12.0
- num_in_total = len(cuts)
- cuts = cuts.filter(remove_short_and_long_utt)
-
- num_left = len(cuts)
- num_removed = num_in_total - num_left
- removed_percent = num_removed / num_in_total * 100
-
- logging.info(f"Before removing short and long utterances: {num_in_total}")
- logging.info(f"After removing short and long utterances: {num_left}")
- logging.info(f"Removed {num_removed} utterances ({removed_percent:.5f}%)")
-
return cuts
@@ -728,15 +729,14 @@ def run(rank, world_size, args):
train_cuts = aishell.train_cuts()
train_cuts = filter_short_and_long_utterances(train_cuts)
- datatang = AIDatatang200zh(
- manifest_dir=f"{args.manifest_dir}/aidatatang_200zh"
- )
+ datatang = AIDatatang200zh(manifest_dir=args.manifest_dir)
train_datatang_cuts = datatang.train_cuts()
train_datatang_cuts = filter_short_and_long_utterances(train_datatang_cuts)
+ train_datatang_cuts = train_datatang_cuts.repeat(times=None)
if args.enable_musan:
cuts_musan = load_manifest(
- Path(args.manifest_dir) / "cuts_musan.json.gz"
+ Path(args.manifest_dir) / "musan_cuts.jsonl.gz"
)
else:
cuts_musan = None
@@ -745,22 +745,23 @@ def run(rank, world_size, args):
train_dl = asr_datamodule.train_dataloaders(
train_cuts,
- dynamic_bucketing=False,
on_the_fly_feats=False,
cuts_musan=cuts_musan,
)
datatang_train_dl = asr_datamodule.train_dataloaders(
train_datatang_cuts,
- dynamic_bucketing=True,
- on_the_fly_feats=True,
+ on_the_fly_feats=False,
cuts_musan=cuts_musan,
)
valid_cuts = aishell.valid_cuts()
valid_dl = asr_datamodule.valid_dataloaders(valid_cuts)
- for dl in [train_dl, datatang_train_dl]:
+ for dl in [
+ train_dl,
+ # datatang_train_dl
+ ]:
scan_pessimistic_batches_for_oom(
model=model,
train_dl=dl,
diff --git a/egs/aishell/ASR/transducer_stateless_modified/train.py b/egs/aishell/ASR/transducer_stateless_modified/train.py
index 524854b73..d3ffccafa 100755
--- a/egs/aishell/ASR/transducer_stateless_modified/train.py
+++ b/egs/aishell/ASR/transducer_stateless_modified/train.py
@@ -37,6 +37,7 @@ export CUDA_VISIBLE_DEVICES="0,1,2"
import argparse
import logging
+import warnings
from pathlib import Path
from shutil import copyfile
from typing import Optional, Tuple
@@ -411,7 +412,11 @@ def compute_loss(
assert loss.requires_grad == is_training
info = MetricsTracker()
- info["frames"] = (feature_lens // params.subsampling_factor).sum().item()
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ info["frames"] = (
+ (feature_lens // params.subsampling_factor).sum().item()
+ )
# Note: We use reduction=sum while computing the loss.
info["loss"] = loss.detach().cpu().item()
@@ -625,20 +630,17 @@ def run(rank, world_size, args):
def remove_short_and_long_utt(c: Cut):
# Keep only utterances with duration between 1 second and 12 seconds
+ #
+ # Caution: There is a reason to select 12.0 here. Please see
+ # ../local/display_manifest_statistics.py
+ #
+ # You should use ../local/display_manifest_statistics.py to get
+ # an utterance duration distribution for your dataset to select
+ # the threshold
return 1.0 <= c.duration <= 12.0
- num_in_total = len(train_cuts)
-
train_cuts = train_cuts.filter(remove_short_and_long_utt)
- num_left = len(train_cuts)
- num_removed = num_in_total - num_left
- removed_percent = num_removed / num_in_total * 100
-
- logging.info(f"Before removing short and long utterances: {num_in_total}")
- logging.info(f"After removing short and long utterances: {num_left}")
- logging.info(f"Removed {num_removed} utterances ({removed_percent:.5f}%)")
-
train_dl = aishell.train_dataloaders(train_cuts)
valid_dl = aishell.valid_dataloaders(aishell.valid_cuts())
diff --git a/egs/alimeeting/ASR/README.md b/egs/alimeeting/ASR/README.md
new file mode 100644
index 000000000..257fe38d5
--- /dev/null
+++ b/egs/alimeeting/ASR/README.md
@@ -0,0 +1,19 @@
+
+# Introduction
+
+This recipe includes some different ASR models trained with Alimeeting (far).
+
+[./RESULTS.md](./RESULTS.md) contains the latest results.
+
+# Transducers
+
+There are various folders containing the name `transducer` in this folder.
+The following table lists the differences among them.
+
+| | Encoder | Decoder | Comment |
+|---------------------------------------|---------------------|--------------------|-----------------------------|
+| `pruned_transducer_stateless2` | Conformer(modified) | Embedding + Conv1d | Using k2 pruned RNN-T loss | |
+
+The decoder in `transducer_stateless` is modified from the paper
+[Rnn-Transducer with Stateless Prediction Network](https://ieeexplore.ieee.org/document/9054419/).
+We place an additional Conv1d layer right after the input embedding layer.
diff --git a/egs/alimeeting/ASR/RESULTS.md b/egs/alimeeting/ASR/RESULTS.md
new file mode 100644
index 000000000..745795a20
--- /dev/null
+++ b/egs/alimeeting/ASR/RESULTS.md
@@ -0,0 +1,71 @@
+## Results
+
+### Alimeeting Char training results (Pruned Transducer Stateless2)
+
+#### 2022-06-01
+
+Using the codes from this PR https://github.com/k2-fsa/icefall/pull/378.
+
+The WERs are
+| | eval | test | comment |
+|------------------------------------|------------|------------|------------------------------------------|
+| greedy search | 31.77 | 34.66 | --epoch 29, --avg 18, --max-duration 100 |
+| modified beam search (beam size 4) | 30.38 | 33.02 | --epoch 29, --avg 18, --max-duration 100 |
+| fast beam search (set as default) | 31.39 | 34.25 | --epoch 29, --avg 18, --max-duration 1500|
+
+The training command for reproducing is given below:
+
+```
+export CUDA_VISIBLE_DEVICES="0,1,2,3"
+
+./pruned_transducer_stateless2/train.py \
+ --world-size 4 \
+ --num-epochs 30 \
+ --start-epoch 0 \
+ --exp-dir pruned_transducer_stateless2/exp \
+ --lang-dir data/lang_char \
+ --max-duration 220 \
+ --save-every-n 1000
+
+```
+
+The tensorboard training log can be found at
+https://tensorboard.dev/experiment/AoqgSvZKTZCJhJbOuG3W6g/#scalars
+
+The decoding command is:
+```
+epoch=29
+avg=18
+
+## greedy search
+./pruned_transducer_stateless2/decode.py \
+ --epoch $epoch \
+ --avg $avg \
+ --exp-dir pruned_transducer_stateless2/exp \
+ --lang-dir ./data/lang_char \
+ --max-duration 100
+
+## modified beam search
+./pruned_transducer_stateless2/decode.py \
+ --epoch $epoch \
+ --avg $avg \
+ --exp-dir pruned_transducer_stateless2/exp \
+ --lang-dir ./data/lang_char \
+ --max-duration 100 \
+ --decoding-method modified_beam_search \
+ --beam-size 4
+
+## fast beam search
+./pruned_transducer_stateless2/decode.py \
+ --epoch $epoch \
+ --avg $avg \
+ --exp-dir ./pruned_transducer_stateless2/exp \
+ --lang-dir ./data/lang_char \
+ --max-duration 1500 \
+ --decoding-method fast_beam_search \
+ --beam 4 \
+ --max-contexts 4 \
+ --max-states 8
+```
+
+A pre-trained model and decoding logs can be found at
diff --git a/egs/alimeeting/ASR/local/__init__.py b/egs/alimeeting/ASR/local/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/egs/alimeeting/ASR/local/compute_fbank_alimeeting.py b/egs/alimeeting/ASR/local/compute_fbank_alimeeting.py
new file mode 100755
index 000000000..2ff473c60
--- /dev/null
+++ b/egs/alimeeting/ASR/local/compute_fbank_alimeeting.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python3
+# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+This file computes fbank features of the aishell dataset.
+It looks for manifests in the directory data/manifests.
+
+The generated fbank features are saved in data/fbank.
+"""
+
+import argparse
+import logging
+import os
+from pathlib import Path
+
+import torch
+from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter
+from lhotse.recipes.utils import read_manifests_if_cached
+
+from icefall.utils import get_executor
+
+# Torch's multithreaded behavior needs to be disabled or
+# it wastes a lot of CPU and slow things down.
+# Do this outside of main() in case it needs to take effect
+# even when we are not invoking the main (e.g. when spawning subprocesses).
+torch.set_num_threads(1)
+torch.set_num_interop_threads(1)
+
+
+def compute_fbank_alimeeting(num_mel_bins: int = 80):
+ src_dir = Path("data/manifests")
+ output_dir = Path("data/fbank")
+ num_jobs = min(15, os.cpu_count())
+
+ dataset_parts = (
+ "train",
+ "eval",
+ "test",
+ )
+
+ prefix = "alimeeting"
+ suffix = "jsonl.gz"
+ manifests = read_manifests_if_cached(
+ dataset_parts=dataset_parts,
+ output_dir=src_dir,
+ prefix=prefix,
+ suffix=suffix,
+ )
+ assert manifests is not None
+
+ extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins))
+
+ with get_executor() as ex: # Initialize the executor only once.
+ for partition, m in manifests.items():
+ if (output_dir / f"{prefix}_cuts_{partition}.{suffix}").is_file():
+ logging.info(f"{partition} already exists - skipping.")
+ continue
+ logging.info(f"Processing {partition}")
+ cut_set = CutSet.from_manifests(
+ recordings=m["recordings"],
+ supervisions=m["supervisions"],
+ )
+ if "train" in partition:
+ cut_set = (
+ cut_set
+ + cut_set.perturb_speed(0.9)
+ + cut_set.perturb_speed(1.1)
+ )
+ cur_num_jobs = num_jobs if ex is None else 80
+ cur_num_jobs = min(cur_num_jobs, len(cut_set))
+
+ cut_set = cut_set.compute_and_store_features(
+ extractor=extractor,
+ storage_path=f"{output_dir}/{prefix}_feats_{partition}",
+ # when an executor is specified, make more partitions
+ num_jobs=cur_num_jobs,
+ executor=ex,
+ storage_type=LilcomChunkyWriter,
+ )
+
+ logging.info("About splitting cuts into smaller chunks")
+ cut_set = cut_set.trim_to_supervisions(
+ keep_overlapping=False,
+ min_duration=None,
+ )
+ cut_set.to_file(output_dir / f"{prefix}_cuts_{partition}.{suffix}")
+
+
+def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--num-mel-bins",
+ type=int,
+ default=80,
+ help="""The number of mel bins for Fbank""",
+ )
+
+ return parser.parse_args()
+
+
+if __name__ == "__main__":
+ formatter = (
+ "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
+ )
+
+ logging.basicConfig(format=formatter, level=logging.INFO)
+
+ args = get_args()
+ compute_fbank_alimeeting(num_mel_bins=args.num_mel_bins)
diff --git a/egs/alimeeting/ASR/local/compute_fbank_musan.py b/egs/alimeeting/ASR/local/compute_fbank_musan.py
new file mode 120000
index 000000000..5833f2484
--- /dev/null
+++ b/egs/alimeeting/ASR/local/compute_fbank_musan.py
@@ -0,0 +1 @@
+../../../librispeech/ASR/local/compute_fbank_musan.py
\ No newline at end of file
diff --git a/egs/alimeeting/ASR/local/display_manifest_statistics.py b/egs/alimeeting/ASR/local/display_manifest_statistics.py
new file mode 100644
index 000000000..16cdecc91
--- /dev/null
+++ b/egs/alimeeting/ASR/local/display_manifest_statistics.py
@@ -0,0 +1,96 @@
+# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang
+# Mingshuang Luo)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This file displays duration statistics of utterances in a manifest.
+You can use the displayed value to choose minimum/maximum duration
+to remove short and long utterances during the training.
+See the function `remove_short_and_long_utt()`
+in ../../../librispeech/ASR/transducer/train.py
+for usage.
+"""
+
+
+from lhotse import load_manifest_lazy
+
+
+def main():
+ paths = [
+ "./data/fbank/alimeeting_cuts_train.jsonl.gz",
+ "./data/fbank/alimeeting_cuts_eval.jsonl.gz",
+ "./data/fbank/alimeeting_cuts_test.jsonl.gz",
+ ]
+
+ for path in paths:
+ print(f"Starting display the statistics for {path}")
+ cuts = load_manifest_lazy(path)
+ cuts.describe()
+
+
+if __name__ == "__main__":
+ main()
+
+"""
+Starting display the statistics for ./data/fbank/alimeeting_cuts_train.jsonl.gz
+Cuts count: 559092
+Total duration (hours): 424.6
+Speech duration (hours): 424.6 (100.0%)
+***
+Duration statistics (seconds):
+mean 2.7
+std 3.0
+min 0.0
+25% 0.7
+50% 1.7
+75% 3.6
+99% 13.6
+99.5% 14.7
+99.9% 16.2
+max 284.3
+Starting display the statistics for ./data/fbank/alimeeting_cuts_eval.jsonl.gz
+Cuts count: 6457
+Total duration (hours): 4.9
+Speech duration (hours): 4.9 (100.0%)
+***
+Duration statistics (seconds):
+mean 2.7
+std 3.1
+min 0.1
+25% 0.6
+50% 1.6
+75% 3.5
+99% 13.6
+99.5% 14.1
+99.9% 14.7
+max 15.8
+Starting display the statistics for ./data/fbank/alimeeting_cuts_test.jsonl.gz
+Cuts count: 16358
+Total duration (hours): 12.5
+Speech duration (hours): 12.5 (100.0%)
+***
+Duration statistics (seconds):
+mean 2.7
+std 2.9
+min 0.1
+25% 0.7
+50% 1.7
+75% 3.5
+99% 13.7
+99.5% 14.2
+99.9% 14.8
+max 15.7
+"""
diff --git a/egs/alimeeting/ASR/local/prepare_char.py b/egs/alimeeting/ASR/local/prepare_char.py
new file mode 100755
index 000000000..d9e47d17a
--- /dev/null
+++ b/egs/alimeeting/ASR/local/prepare_char.py
@@ -0,0 +1,248 @@
+#!/usr/bin/env python3
+# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang,
+# Wei Kang)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+
+This script takes as input `lang_dir`, which should contain::
+
+ - lang_dir/text,
+ - lang_dir/words.txt
+
+and generates the following files in the directory `lang_dir`:
+
+ - lexicon.txt
+ - lexicon_disambig.txt
+ - L.pt
+ - L_disambig.pt
+ - tokens.txt
+"""
+
+import re
+from pathlib import Path
+from typing import Dict, List
+
+import k2
+import torch
+from prepare_lang import (
+ Lexicon,
+ add_disambig_symbols,
+ add_self_loops,
+ write_lexicon,
+ write_mapping,
+)
+
+
+def lexicon_to_fst_no_sil(
+ lexicon: Lexicon,
+ token2id: Dict[str, int],
+ word2id: Dict[str, int],
+ need_self_loops: bool = False,
+) -> k2.Fsa:
+ """Convert a lexicon to an FST (in k2 format).
+
+ Args:
+ lexicon:
+ The input lexicon. See also :func:`read_lexicon`
+ token2id:
+ A dict mapping tokens to IDs.
+ word2id:
+ A dict mapping words to IDs.
+ need_self_loops:
+ If True, add self-loop to states with non-epsilon output symbols
+ on at least one arc out of the state. The input label for this
+ self loop is `token2id["#0"]` and the output label is `word2id["#0"]`.
+ Returns:
+ Return an instance of `k2.Fsa` representing the given lexicon.
+ """
+ loop_state = 0 # words enter and leave from here
+ next_state = 1 # the next un-allocated state, will be incremented as we go
+
+ arcs = []
+
+ # The blank symbol is defined in local/train_bpe_model.py
+ assert token2id[""] == 0
+ assert word2id[""] == 0
+
+ eps = 0
+
+ for word, pieces in lexicon:
+ assert len(pieces) > 0, f"{word} has no pronunciations"
+ cur_state = loop_state
+
+ word = word2id[word]
+ pieces = [
+ token2id[i] if i in token2id else token2id[""] for i in pieces
+ ]
+
+ for i in range(len(pieces) - 1):
+ w = word if i == 0 else eps
+ arcs.append([cur_state, next_state, pieces[i], w, 0])
+
+ cur_state = next_state
+ next_state += 1
+
+ # now for the last piece of this word
+ i = len(pieces) - 1
+ w = word if i == 0 else eps
+ arcs.append([cur_state, loop_state, pieces[i], w, 0])
+
+ if need_self_loops:
+ disambig_token = token2id["#0"]
+ disambig_word = word2id["#0"]
+ arcs = add_self_loops(
+ arcs,
+ disambig_token=disambig_token,
+ disambig_word=disambig_word,
+ )
+
+ final_state = next_state
+ arcs.append([loop_state, final_state, -1, -1, 0])
+ arcs.append([final_state])
+
+ arcs = sorted(arcs, key=lambda arc: arc[0])
+ arcs = [[str(i) for i in arc] for arc in arcs]
+ arcs = [" ".join(arc) for arc in arcs]
+ arcs = "\n".join(arcs)
+
+ fsa = k2.Fsa.from_str(arcs, acceptor=False)
+ return fsa
+
+
+def contain_oov(token_sym_table: Dict[str, int], tokens: List[str]) -> bool:
+ """Check if all the given tokens are in token symbol table.
+
+ Args:
+ token_sym_table:
+ Token symbol table that contains all the valid tokens.
+ tokens:
+ A list of tokens.
+ Returns:
+ Return True if there is any token not in the token_sym_table,
+ otherwise False.
+ """
+ for tok in tokens:
+ if tok not in token_sym_table:
+ return True
+ return False
+
+
+def generate_lexicon(
+ token_sym_table: Dict[str, int], words: List[str]
+) -> Lexicon:
+ """Generate a lexicon from a word list and token_sym_table.
+
+ Args:
+ token_sym_table:
+ Token symbol table that mapping token to token ids.
+ words:
+ A list of strings representing words.
+ Returns:
+ Return a dict whose keys are words and values are the corresponding
+ tokens.
+ """
+ lexicon = []
+ for word in words:
+ chars = list(word.strip(" \t"))
+ if contain_oov(token_sym_table, chars):
+ continue
+ lexicon.append((word, chars))
+
+ # The OOV word is
+ lexicon.append(("", [""]))
+ return lexicon
+
+
+def generate_tokens(text_file: str) -> Dict[str, int]:
+ """Generate tokens from the given text file.
+
+ Args:
+ text_file:
+ A file that contains text lines to generate tokens.
+ Returns:
+ Return a dict whose keys are tokens and values are token ids ranged
+ from 0 to len(keys) - 1.
+ """
+ tokens: Dict[str, int] = dict()
+ tokens[""] = 0
+ tokens[""] = 1
+ tokens[""] = 2
+ whitespace = re.compile(r"([ \t\r\n]+)")
+ with open(text_file, "r", encoding="utf-8") as f:
+ for line in f:
+ line = re.sub(whitespace, "", line)
+ chars = list(line)
+ for char in chars:
+ if char not in tokens:
+ tokens[char] = len(tokens)
+ return tokens
+
+
+def main():
+ lang_dir = Path("data/lang_char")
+ text_file = lang_dir / "text"
+
+ word_sym_table = k2.SymbolTable.from_file(lang_dir / "words.txt")
+
+ words = word_sym_table.symbols
+
+ excluded = ["", "!SIL", "", "", "#0", "", ""]
+ for w in excluded:
+ if w in words:
+ words.remove(w)
+
+ token_sym_table = generate_tokens(text_file)
+
+ lexicon = generate_lexicon(token_sym_table, words)
+
+ lexicon_disambig, max_disambig = add_disambig_symbols(lexicon)
+
+ next_token_id = max(token_sym_table.values()) + 1
+ for i in range(max_disambig + 1):
+ disambig = f"#{i}"
+ assert disambig not in token_sym_table
+ token_sym_table[disambig] = next_token_id
+ next_token_id += 1
+
+ word_sym_table.add("#0")
+ word_sym_table.add("")
+ word_sym_table.add("")
+
+ write_mapping(lang_dir / "tokens.txt", token_sym_table)
+
+ write_lexicon(lang_dir / "lexicon.txt", lexicon)
+ write_lexicon(lang_dir / "lexicon_disambig.txt", lexicon_disambig)
+
+ L = lexicon_to_fst_no_sil(
+ lexicon,
+ token2id=token_sym_table,
+ word2id=word_sym_table,
+ )
+
+ L_disambig = lexicon_to_fst_no_sil(
+ lexicon_disambig,
+ token2id=token_sym_table,
+ word2id=word_sym_table,
+ need_self_loops=True,
+ )
+ torch.save(L.as_dict(), lang_dir / "L.pt")
+ torch.save(L_disambig.as_dict(), lang_dir / "L_disambig.pt")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/egs/alimeeting/ASR/local/prepare_lang.py b/egs/alimeeting/ASR/local/prepare_lang.py
new file mode 100755
index 000000000..e5ae89ec4
--- /dev/null
+++ b/egs/alimeeting/ASR/local/prepare_lang.py
@@ -0,0 +1,390 @@
+#!/usr/bin/env python3
+# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+This script takes as input a lexicon file "data/lang_phone/lexicon.txt"
+consisting of words and tokens (i.e., phones) and does the following:
+
+1. Add disambiguation symbols to the lexicon and generate lexicon_disambig.txt
+
+2. Generate tokens.txt, the token table mapping a token to a unique integer.
+
+3. Generate words.txt, the word table mapping a word to a unique integer.
+
+4. Generate L.pt, in k2 format. It can be loaded by
+
+ d = torch.load("L.pt")
+ lexicon = k2.Fsa.from_dict(d)
+
+5. Generate L_disambig.pt, in k2 format.
+"""
+import argparse
+import math
+from collections import defaultdict
+from pathlib import Path
+from typing import Any, Dict, List, Tuple
+
+import k2
+import torch
+
+from icefall.lexicon import read_lexicon, write_lexicon
+
+Lexicon = List[Tuple[str, List[str]]]
+
+
+def write_mapping(filename: str, sym2id: Dict[str, int]) -> None:
+ """Write a symbol to ID mapping to a file.
+
+ Note:
+ No need to implement `read_mapping` as it can be done
+ through :func:`k2.SymbolTable.from_file`.
+
+ Args:
+ filename:
+ Filename to save the mapping.
+ sym2id:
+ A dict mapping symbols to IDs.
+ Returns:
+ Return None.
+ """
+ with open(filename, "w", encoding="utf-8") as f:
+ for sym, i in sym2id.items():
+ f.write(f"{sym} {i}\n")
+
+
+def get_tokens(lexicon: Lexicon) -> List[str]:
+ """Get tokens from a lexicon.
+
+ Args:
+ lexicon:
+ It is the return value of :func:`read_lexicon`.
+ Returns:
+ Return a list of unique tokens.
+ """
+ ans = set()
+ for _, tokens in lexicon:
+ ans.update(tokens)
+ sorted_ans = sorted(list(ans))
+ return sorted_ans
+
+
+def get_words(lexicon: Lexicon) -> List[str]:
+ """Get words from a lexicon.
+
+ Args:
+ lexicon:
+ It is the return value of :func:`read_lexicon`.
+ Returns:
+ Return a list of unique words.
+ """
+ ans = set()
+ for word, _ in lexicon:
+ ans.add(word)
+ sorted_ans = sorted(list(ans))
+ return sorted_ans
+
+
+def add_disambig_symbols(lexicon: Lexicon) -> Tuple[Lexicon, int]:
+ """It adds pseudo-token disambiguation symbols #1, #2 and so on
+ at the ends of tokens to ensure that all pronunciations are different,
+ and that none is a prefix of another.
+
+ See also add_lex_disambig.pl from kaldi.
+
+ Args:
+ lexicon:
+ It is returned by :func:`read_lexicon`.
+ Returns:
+ Return a tuple with two elements:
+
+ - The output lexicon with disambiguation symbols
+ - The ID of the max disambiguation symbol that appears
+ in the lexicon
+ """
+
+ # (1) Work out the count of each token-sequence in the
+ # lexicon.
+ count = defaultdict(int)
+ for _, tokens in lexicon:
+ count[" ".join(tokens)] += 1
+
+ # (2) For each left sub-sequence of each token-sequence, note down
+ # that it exists (for identifying prefixes of longer strings).
+ issubseq = defaultdict(int)
+ for _, tokens in lexicon:
+ tokens = tokens.copy()
+ tokens.pop()
+ while tokens:
+ issubseq[" ".join(tokens)] = 1
+ tokens.pop()
+
+ # (3) For each entry in the lexicon:
+ # if the token sequence is unique and is not a
+ # prefix of another word, no disambig symbol.
+ # Else output #1, or #2, #3, ... if the same token-seq
+ # has already been assigned a disambig symbol.
+ ans = []
+
+ # We start with #1 since #0 has its own purpose
+ first_allowed_disambig = 1
+ max_disambig = first_allowed_disambig - 1
+ last_used_disambig_symbol_of = defaultdict(int)
+
+ for word, tokens in lexicon:
+ tokenseq = " ".join(tokens)
+ assert tokenseq != ""
+ if issubseq[tokenseq] == 0 and count[tokenseq] == 1:
+ ans.append((word, tokens))
+ continue
+
+ cur_disambig = last_used_disambig_symbol_of[tokenseq]
+ if cur_disambig == 0:
+ cur_disambig = first_allowed_disambig
+ else:
+ cur_disambig += 1
+
+ if cur_disambig > max_disambig:
+ max_disambig = cur_disambig
+ last_used_disambig_symbol_of[tokenseq] = cur_disambig
+ tokenseq += f" #{cur_disambig}"
+ ans.append((word, tokenseq.split()))
+ return ans, max_disambig
+
+
+def generate_id_map(symbols: List[str]) -> Dict[str, int]:
+ """Generate ID maps, i.e., map a symbol to a unique ID.
+
+ Args:
+ symbols:
+ A list of unique symbols.
+ Returns:
+ A dict containing the mapping between symbols and IDs.
+ """
+ return {sym: i for i, sym in enumerate(symbols)}
+
+
+def add_self_loops(
+ arcs: List[List[Any]], disambig_token: int, disambig_word: int
+) -> List[List[Any]]:
+ """Adds self-loops to states of an FST to propagate disambiguation symbols
+ through it. They are added on each state with non-epsilon output symbols
+ on at least one arc out of the state.
+
+ See also fstaddselfloops.pl from Kaldi. One difference is that
+ Kaldi uses OpenFst style FSTs and it has multiple final states.
+ This function uses k2 style FSTs and it does not need to add self-loops
+ to the final state.
+
+ The input label of a self-loop is `disambig_token`, while the output
+ label is `disambig_word`.
+
+ Args:
+ arcs:
+ A list-of-list. The sublist contains
+ `[src_state, dest_state, label, aux_label, score]`
+ disambig_token:
+ It is the token ID of the symbol `#0`.
+ disambig_word:
+ It is the word ID of the symbol `#0`.
+
+ Return:
+ Return new `arcs` containing self-loops.
+ """
+ states_needs_self_loops = set()
+ for arc in arcs:
+ src, dst, ilabel, olabel, score = arc
+ if olabel != 0:
+ states_needs_self_loops.add(src)
+
+ ans = []
+ for s in states_needs_self_loops:
+ ans.append([s, s, disambig_token, disambig_word, 0])
+
+ return arcs + ans
+
+
+def lexicon_to_fst(
+ lexicon: Lexicon,
+ token2id: Dict[str, int],
+ word2id: Dict[str, int],
+ sil_token: str = "SIL",
+ sil_prob: float = 0.5,
+ need_self_loops: bool = False,
+) -> k2.Fsa:
+ """Convert a lexicon to an FST (in k2 format) with optional silence at
+ the beginning and end of each word.
+
+ Args:
+ lexicon:
+ The input lexicon. See also :func:`read_lexicon`
+ token2id:
+ A dict mapping tokens to IDs.
+ word2id:
+ A dict mapping words to IDs.
+ sil_token:
+ The silence token.
+ sil_prob:
+ The probability for adding a silence at the beginning and end
+ of the word.
+ need_self_loops:
+ If True, add self-loop to states with non-epsilon output symbols
+ on at least one arc out of the state. The input label for this
+ self loop is `token2id["#0"]` and the output label is `word2id["#0"]`.
+ Returns:
+ Return an instance of `k2.Fsa` representing the given lexicon.
+ """
+ assert sil_prob > 0.0 and sil_prob < 1.0
+ # CAUTION: we use score, i.e, negative cost.
+ sil_score = math.log(sil_prob)
+ no_sil_score = math.log(1.0 - sil_prob)
+
+ start_state = 0
+ loop_state = 1 # words enter and leave from here
+ sil_state = 2 # words terminate here when followed by silence; this state
+ # has a silence transition to loop_state.
+ next_state = 3 # the next un-allocated state, will be incremented as we go.
+ arcs = []
+
+ assert token2id[""] == 0
+ assert word2id[""] == 0
+
+ eps = 0
+
+ sil_token = token2id[sil_token]
+
+ arcs.append([start_state, loop_state, eps, eps, no_sil_score])
+ arcs.append([start_state, sil_state, eps, eps, sil_score])
+ arcs.append([sil_state, loop_state, sil_token, eps, 0])
+
+ for word, tokens in lexicon:
+ assert len(tokens) > 0, f"{word} has no pronunciations"
+ cur_state = loop_state
+
+ word = word2id[word]
+ tokens = [token2id[i] for i in tokens]
+
+ for i in range(len(tokens) - 1):
+ w = word if i == 0 else eps
+ arcs.append([cur_state, next_state, tokens[i], w, 0])
+
+ cur_state = next_state
+ next_state += 1
+
+ # now for the last token of this word
+ # It has two out-going arcs, one to the loop state,
+ # the other one to the sil_state.
+ i = len(tokens) - 1
+ w = word if i == 0 else eps
+ arcs.append([cur_state, loop_state, tokens[i], w, no_sil_score])
+ arcs.append([cur_state, sil_state, tokens[i], w, sil_score])
+
+ if need_self_loops:
+ disambig_token = token2id["#0"]
+ disambig_word = word2id["#0"]
+ arcs = add_self_loops(
+ arcs,
+ disambig_token=disambig_token,
+ disambig_word=disambig_word,
+ )
+
+ final_state = next_state
+ arcs.append([loop_state, final_state, -1, -1, 0])
+ arcs.append([final_state])
+
+ arcs = sorted(arcs, key=lambda arc: arc[0])
+ arcs = [[str(i) for i in arc] for arc in arcs]
+ arcs = [" ".join(arc) for arc in arcs]
+ arcs = "\n".join(arcs)
+
+ fsa = k2.Fsa.from_str(arcs, acceptor=False)
+ return fsa
+
+
+def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--lang-dir", type=str, help="The lang dir, data/lang_phone"
+ )
+ return parser.parse_args()
+
+
+def main():
+ out_dir = Path(get_args().lang_dir)
+ lexicon_filename = out_dir / "lexicon.txt"
+ sil_token = "SIL"
+ sil_prob = 0.5
+
+ lexicon = read_lexicon(lexicon_filename)
+ tokens = get_tokens(lexicon)
+ words = get_words(lexicon)
+
+ lexicon_disambig, max_disambig = add_disambig_symbols(lexicon)
+
+ for i in range(max_disambig + 1):
+ disambig = f"#{i}"
+ assert disambig not in tokens
+ tokens.append(f"#{i}")
+
+ assert "" not in tokens
+ tokens = [""] + tokens
+
+ assert "" not in words
+ assert "#0" not in words
+ assert "" not in words
+ assert "" not in words
+
+ words = [""] + words + ["#0", "", ""]
+
+ token2id = generate_id_map(tokens)
+ word2id = generate_id_map(words)
+
+ write_mapping(out_dir / "tokens.txt", token2id)
+ write_mapping(out_dir / "words.txt", word2id)
+ write_lexicon(out_dir / "lexicon_disambig.txt", lexicon_disambig)
+
+ L = lexicon_to_fst(
+ lexicon,
+ token2id=token2id,
+ word2id=word2id,
+ sil_token=sil_token,
+ sil_prob=sil_prob,
+ )
+
+ L_disambig = lexicon_to_fst(
+ lexicon_disambig,
+ token2id=token2id,
+ word2id=word2id,
+ sil_token=sil_token,
+ sil_prob=sil_prob,
+ need_self_loops=True,
+ )
+ torch.save(L.as_dict(), out_dir / "L.pt")
+ torch.save(L_disambig.as_dict(), out_dir / "L_disambig.pt")
+
+ if False:
+ # Just for debugging, will remove it
+ L.labels_sym = k2.SymbolTable.from_file(out_dir / "tokens.txt")
+ L.aux_labels_sym = k2.SymbolTable.from_file(out_dir / "words.txt")
+ L_disambig.labels_sym = L.labels_sym
+ L_disambig.aux_labels_sym = L.aux_labels_sym
+ L.draw(out_dir / "L.png", title="L")
+ L_disambig.draw(out_dir / "L_disambig.png", title="L_disambig")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/egs/alimeeting/ASR/local/prepare_words.py b/egs/alimeeting/ASR/local/prepare_words.py
new file mode 100755
index 000000000..65aca2983
--- /dev/null
+++ b/egs/alimeeting/ASR/local/prepare_words.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2021 Xiaomi Corp. (authors: Mingshuang Luo)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+This script takes as input words.txt without ids:
+ - words_no_ids.txt
+and generates the new words.txt with related ids.
+ - words.txt
+"""
+
+
+import argparse
+import logging
+
+from tqdm import tqdm
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(
+ description="Prepare words.txt",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ )
+ parser.add_argument(
+ "--input-file",
+ default="data/lang_char/words_no_ids.txt",
+ type=str,
+ help="the words file without ids for WenetSpeech",
+ )
+ parser.add_argument(
+ "--output-file",
+ default="data/lang_char/words.txt",
+ type=str,
+ help="the words file with ids for WenetSpeech",
+ )
+
+ return parser
+
+
+def main():
+ parser = get_parser()
+ args = parser.parse_args()
+
+ input_file = args.input_file
+ output_file = args.output_file
+
+ f = open(input_file, "r", encoding="utf-8")
+ lines = f.readlines()
+ new_lines = []
+ add_words = [" 0", "!SIL 1", " 2", " 3"]
+ new_lines.extend(add_words)
+
+ logging.info("Starting reading the input file")
+ for i in tqdm(range(len(lines))):
+ x = lines[i]
+ idx = 4 + i
+ new_line = str(x.strip("\n")) + " " + str(idx)
+ new_lines.append(new_line)
+
+ logging.info("Starting writing the words.txt")
+ f_out = open(output_file, "w", encoding="utf-8")
+ for line in new_lines:
+ f_out.write(line)
+ f_out.write("\n")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/egs/alimeeting/ASR/local/test_prepare_lang.py b/egs/alimeeting/ASR/local/test_prepare_lang.py
new file mode 100755
index 000000000..d4cf62bba
--- /dev/null
+++ b/egs/alimeeting/ASR/local/test_prepare_lang.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python3
+# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Copyright (c) 2021 Xiaomi Corporation (authors: Fangjun Kuang)
+
+import os
+import tempfile
+
+import k2
+from prepare_lang import (
+ add_disambig_symbols,
+ generate_id_map,
+ get_phones,
+ get_words,
+ lexicon_to_fst,
+ read_lexicon,
+ write_lexicon,
+ write_mapping,
+)
+
+
+def generate_lexicon_file() -> str:
+ fd, filename = tempfile.mkstemp()
+ os.close(fd)
+ s = """
+ !SIL SIL
+ SPN
+ SPN
+ f f
+ a a
+ foo f o o
+ bar b a r
+ bark b a r k
+ food f o o d
+ food2 f o o d
+ fo f o
+ """.strip()
+ with open(filename, "w") as f:
+ f.write(s)
+ return filename
+
+
+def test_read_lexicon(filename: str):
+ lexicon = read_lexicon(filename)
+ phones = get_phones(lexicon)
+ words = get_words(lexicon)
+ print(lexicon)
+ print(phones)
+ print(words)
+ lexicon_disambig, max_disambig = add_disambig_symbols(lexicon)
+ print(lexicon_disambig)
+ print("max disambig:", f"#{max_disambig}")
+
+ phones = ["", "SIL", "SPN"] + phones
+ for i in range(max_disambig + 1):
+ phones.append(f"#{i}")
+ words = [""] + words
+
+ phone2id = generate_id_map(phones)
+ word2id = generate_id_map(words)
+
+ print(phone2id)
+ print(word2id)
+
+ write_mapping("phones.txt", phone2id)
+ write_mapping("words.txt", word2id)
+
+ write_lexicon("a.txt", lexicon)
+ write_lexicon("a_disambig.txt", lexicon_disambig)
+
+ fsa = lexicon_to_fst(lexicon, phone2id=phone2id, word2id=word2id)
+ fsa.labels_sym = k2.SymbolTable.from_file("phones.txt")
+ fsa.aux_labels_sym = k2.SymbolTable.from_file("words.txt")
+ fsa.draw("L.pdf", title="L")
+
+ fsa_disambig = lexicon_to_fst(
+ lexicon_disambig, phone2id=phone2id, word2id=word2id
+ )
+ fsa_disambig.labels_sym = k2.SymbolTable.from_file("phones.txt")
+ fsa_disambig.aux_labels_sym = k2.SymbolTable.from_file("words.txt")
+ fsa_disambig.draw("L_disambig.pdf", title="L_disambig")
+
+
+def main():
+ filename = generate_lexicon_file()
+ test_read_lexicon(filename)
+ os.remove(filename)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/egs/alimeeting/ASR/local/text2segments.py b/egs/alimeeting/ASR/local/text2segments.py
new file mode 100644
index 000000000..3df727c67
--- /dev/null
+++ b/egs/alimeeting/ASR/local/text2segments.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# Copyright 2021 Xiaomi Corp. (authors: Mingshuang Luo)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+This script takes as input "text", which refers to the transcript file for
+WenetSpeech:
+ - text
+and generates the output file text_word_segmentation which is implemented
+with word segmenting:
+ - text_words_segmentation
+"""
+
+
+import argparse
+
+import jieba
+from tqdm import tqdm
+
+jieba.enable_paddle()
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(
+ description="Chinese Word Segmentation for text",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ )
+ parser.add_argument(
+ "--input-file",
+ default="data/lang_char/text",
+ type=str,
+ help="the input text file for WenetSpeech",
+ )
+ parser.add_argument(
+ "--output-file",
+ default="data/lang_char/text_words_segmentation",
+ type=str,
+ help="the text implemented with words segmenting for WenetSpeech",
+ )
+
+ return parser
+
+
+def main():
+ parser = get_parser()
+ args = parser.parse_args()
+
+ input_file = args.input_file
+ output_file = args.output_file
+
+ f = open(input_file, "r", encoding="utf-8")
+ lines = f.readlines()
+ new_lines = []
+ for i in tqdm(range(len(lines))):
+ x = lines[i].rstrip()
+ seg_list = jieba.cut(x, use_paddle=True)
+ new_line = " ".join(seg_list)
+ new_lines.append(new_line)
+
+ f_new = open(output_file, "w", encoding="utf-8")
+ for line in new_lines:
+ f_new.write(line)
+ f_new.write("\n")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/egs/alimeeting/ASR/local/text2token.py b/egs/alimeeting/ASR/local/text2token.py
new file mode 100755
index 000000000..71be2a613
--- /dev/null
+++ b/egs/alimeeting/ASR/local/text2token.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python3
+# Copyright 2017 Johns Hopkins University (authors: Shinji Watanabe)
+# 2022 Xiaomi Corp. (authors: Mingshuang Luo)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import argparse
+import codecs
+import re
+import sys
+from typing import List
+
+from pypinyin import lazy_pinyin, pinyin
+
+is_python2 = sys.version_info[0] == 2
+
+
+def exist_or_not(i, match_pos):
+ start_pos = None
+ end_pos = None
+ for pos in match_pos:
+ if pos[0] <= i < pos[1]:
+ start_pos = pos[0]
+ end_pos = pos[1]
+ break
+
+ return start_pos, end_pos
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(
+ description="convert raw text to tokenized text",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+ )
+ parser.add_argument(
+ "--nchar",
+ "-n",
+ default=1,
+ type=int,
+ help="number of characters to split, i.e., \
+ aabb -> a a b b with -n 1 and aa bb with -n 2",
+ )
+ parser.add_argument(
+ "--skip-ncols", "-s", default=0, type=int, help="skip first n columns"
+ )
+ parser.add_argument(
+ "--space", default="", type=str, help="space symbol"
+ )
+ parser.add_argument(
+ "--non-lang-syms",
+ "-l",
+ default=None,
+ type=str,
+ help="list of non-linguistic symobles, e.g., etc.",
+ )
+ parser.add_argument(
+ "text", type=str, default=False, nargs="?", help="input text"
+ )
+ parser.add_argument(
+ "--trans_type",
+ "-t",
+ type=str,
+ default="char",
+ choices=["char", "pinyin", "lazy_pinyin"],
+ help="""Transcript type. char/pinyin/lazy_pinyin""",
+ )
+ return parser
+
+
+def token2id(
+ texts, token_table, token_type: str = "lazy_pinyin", oov: str = ""
+) -> List[List[int]]:
+ """Convert token to id.
+ Args:
+ texts:
+ The input texts, it refers to the chinese text here.
+ token_table:
+ The token table is built based on "data/lang_xxx/token.txt"
+ token_type:
+ The type of token, such as "pinyin" and "lazy_pinyin".
+ oov:
+ Out of vocabulary token. When a word(token) in the transcript
+ does not exist in the token list, it is replaced with `oov`.
+
+ Returns:
+ The list of ids for the input texts.
+ """
+ if texts is None:
+ raise ValueError("texts can't be None!")
+ else:
+ oov_id = token_table[oov]
+ ids: List[List[int]] = []
+ for text in texts:
+ chars_list = list(str(text))
+ if token_type == "lazy_pinyin":
+ text = lazy_pinyin(chars_list)
+ sub_ids = [
+ token_table[txt] if txt in token_table else oov_id
+ for txt in text
+ ]
+ ids.append(sub_ids)
+ else: # token_type = "pinyin"
+ text = pinyin(chars_list)
+ sub_ids = [
+ token_table[txt[0]] if txt[0] in token_table else oov_id
+ for txt in text
+ ]
+ ids.append(sub_ids)
+ return ids
+
+
+def main():
+ parser = get_parser()
+ args = parser.parse_args()
+
+ rs = []
+ if args.non_lang_syms is not None:
+ with codecs.open(args.non_lang_syms, "r", encoding="utf-8") as f:
+ nls = [x.rstrip() for x in f.readlines()]
+ rs = [re.compile(re.escape(x)) for x in nls]
+
+ if args.text:
+ f = codecs.open(args.text, encoding="utf-8")
+ else:
+ f = codecs.getreader("utf-8")(
+ sys.stdin if is_python2 else sys.stdin.buffer
+ )
+
+ sys.stdout = codecs.getwriter("utf-8")(
+ sys.stdout if is_python2 else sys.stdout.buffer
+ )
+ line = f.readline()
+ n = args.nchar
+ while line:
+ x = line.split()
+ print(" ".join(x[: args.skip_ncols]), end=" ")
+ a = " ".join(x[args.skip_ncols :]) # noqa E203
+
+ # get all matched positions
+ match_pos = []
+ for r in rs:
+ i = 0
+ while i >= 0:
+ m = r.search(a, i)
+ if m:
+ match_pos.append([m.start(), m.end()])
+ i = m.end()
+ else:
+ break
+ if len(match_pos) > 0:
+ chars = []
+ i = 0
+ while i < len(a):
+ start_pos, end_pos = exist_or_not(i, match_pos)
+ if start_pos is not None:
+ chars.append(a[start_pos:end_pos])
+ i = end_pos
+ else:
+ chars.append(a[i])
+ i += 1
+ a = chars
+
+ if args.trans_type == "pinyin":
+ a = pinyin(list(str(a)))
+ a = [one[0] for one in a]
+
+ if args.trans_type == "lazy_pinyin":
+ a = lazy_pinyin(list(str(a)))
+
+ a = [a[j : j + n] for j in range(0, len(a), n)] # noqa E203
+
+ a_flat = []
+ for z in a:
+ a_flat.append("".join(z))
+
+ a_chars = "".join(a_flat)
+ print(a_chars)
+ line = f.readline()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/egs/alimeeting/ASR/prepare.sh b/egs/alimeeting/ASR/prepare.sh
new file mode 100755
index 000000000..eb2ac697d
--- /dev/null
+++ b/egs/alimeeting/ASR/prepare.sh
@@ -0,0 +1,133 @@
+#!/usr/bin/env bash
+
+set -eou pipefail
+
+stage=-1
+stop_stage=100
+
+# We assume dl_dir (download dir) contains the following
+# directories and files. If not, they will be downloaded
+# by this script automatically.
+#
+# - $dl_dir/alimeeting
+# This directory contains the following files downloaded from
+# https://openslr.org/62/
+#
+# - Train_Ali_far.tar.gz
+# - Train_Ali_near.tar.gz
+# - Test_Ali.tar.gz
+# - Eval_Ali.tar.gz
+#
+# - $dl_dir/musan
+# This directory contains the following directories downloaded from
+# http://www.openslr.org/17/
+#
+# - music
+# - noise
+# - speech
+
+dl_dir=$PWD/download
+
+. shared/parse_options.sh || exit 1
+
+# All files generated by this script are saved in "data".
+# You can safely remove "data" and rerun this script to regenerate it.
+mkdir -p data
+
+log() {
+ # This function is from espnet
+ local fname=${BASH_SOURCE[1]##*/}
+ echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
+}
+
+log "dl_dir: $dl_dir"
+
+if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
+ log "Stage 0: Download data"
+
+ if [ ! -f $dl_dir/alimeeting/Train_Ali_far.tar.gz ]; then
+ lhotse download ali-meeting $dl_dir/alimeeting
+ fi
+fi
+
+if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
+ log "Stage 1: Prepare alimeeting manifest"
+ # We assume that you have downloaded the alimeeting corpus
+ # to $dl_dir/alimeeting
+ if [ ! -f data/manifests/alimeeting/.manifests.done ]; then
+ mkdir -p data/manifests/alimeeting
+ lhotse prepare ali-meeting $dl_dir/alimeeting data/manifests/alimeeting
+ touch data/manifests/alimeeting/.manifests.done
+ fi
+fi
+
+if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
+ log "Stage 2: Process alimeeting"
+ if [ ! -f data/fbank/alimeeting/.fbank.done ]; then
+ mkdir -p data/fbank/alimeeting
+ lhotse prepare ali-meeting $dl_dir/alimeeting data/manifests/alimeeting
+ fi
+fi
+
+if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
+ log "Stage 3: Prepare musan manifest"
+ # We assume that you have downloaded the musan corpus
+ # to data/musan
+ if [ ! -f data/manifests/.musan_manifests.done ]; then
+ log "It may take 6 minutes"
+ mkdir -p data/manifests
+ lhotse prepare musan $dl_dir/musan data/manifests
+ touch data/manifests/.musan_manifests.done
+ fi
+fi
+
+if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
+ log "Stage 4: Compute fbank for musan"
+ if [ ! -f data/fbank/.msuan.done ]; then
+ mkdir -p data/fbank
+ ./local/compute_fbank_musan.py
+ touch data/fbank/.msuan.done
+ fi
+fi
+
+if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
+ log "Stage 5: Compute fbank for alimeeting"
+ if [ ! -f data/fbank/.alimeeting.done ]; then
+ mkdir -p data/fbank
+ ./local/compute_fbank_alimeeting.py
+ touch data/fbank/.alimeeting.done
+ fi
+fi
+
+if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then
+ log "Stage 6: Prepare char based lang"
+ lang_char_dir=data/lang_char
+ mkdir -p $lang_char_dir
+
+ # Prepare text.
+ # Note: in Linux, you can install jq with the following command:
+ # wget -O jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64
+ gunzip -c data/manifests/alimeeting/supervisions_train.jsonl.gz \
+ | jq ".text" | sed 's/"//g' \
+ | ./local/text2token.py -t "char" > $lang_char_dir/text
+
+ # Prepare words segments
+ python ./local/text2segments.py \
+ --input $lang_char_dir/text \
+ --output $lang_char_dir/text_words_segmentation
+
+ cat $lang_char_dir/text_words_segmentation | sed "s/ /\n/g" \
+ | sort -u | sed "/^$/d" \
+ | uniq > $lang_char_dir/words_no_ids.txt
+
+ # Prepare words.txt
+ if [ ! -f $lang_char_dir/words.txt ]; then
+ ./local/prepare_words.py \
+ --input-file $lang_char_dir/words_no_ids.txt \
+ --output-file $lang_char_dir/words.txt
+ fi
+
+ if [ ! -f $lang_char_dir/L_disambig.pt ]; then
+ ./local/prepare_char.py
+ fi
+fi
diff --git a/egs/alimeeting/ASR/pruned_transducer_stateless2/__init__.py b/egs/alimeeting/ASR/pruned_transducer_stateless2/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/egs/alimeeting/ASR/pruned_transducer_stateless2/asr_datamodule.py b/egs/alimeeting/ASR/pruned_transducer_stateless2/asr_datamodule.py
new file mode 100644
index 000000000..bf6faad7a
--- /dev/null
+++ b/egs/alimeeting/ASR/pruned_transducer_stateless2/asr_datamodule.py
@@ -0,0 +1,421 @@
+# Copyright 2021 Piotr Żelasko
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import argparse
+import inspect
+import logging
+from functools import lru_cache
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+import torch
+from lhotse import (
+ CutSet,
+ Fbank,
+ FbankConfig,
+ load_manifest,
+ load_manifest_lazy,
+ set_caching_enabled,
+)
+from lhotse.dataset import (
+ CutConcatenate,
+ CutMix,
+ DynamicBucketingSampler,
+ K2SpeechRecognitionDataset,
+ PrecomputedFeatures,
+ SingleCutSampler,
+ SpecAugment,
+)
+from lhotse.dataset.input_strategies import OnTheFlyFeatures
+from lhotse.utils import fix_random_seed
+from torch.utils.data import DataLoader
+
+from icefall.utils import str2bool
+
+set_caching_enabled(False)
+torch.set_num_threads(1)
+
+
+class _SeedWorkers:
+ def __init__(self, seed: int):
+ self.seed = seed
+
+ def __call__(self, worker_id: int):
+ fix_random_seed(self.seed + worker_id)
+
+
+class AlimeetingAsrDataModule:
+ """
+ DataModule for k2 ASR experiments.
+ It assumes there is always one train and valid dataloader,
+ but there can be multiple test dataloaders (e.g. LibriSpeech test-clean
+ and test-other).
+ It contains all the common data pipeline modules used in ASR
+ experiments, e.g.:
+ - dynamic batch size,
+ - bucketing samplers,
+ - cut concatenation,
+ - augmentation,
+ - on-the-fly feature extraction
+ This class should be derived for specific corpora used in ASR tasks.
+ """
+
+ def __init__(self, args: argparse.Namespace):
+ self.args = args
+
+ @classmethod
+ def add_arguments(cls, parser: argparse.ArgumentParser):
+ group = parser.add_argument_group(
+ title="ASR data related options",
+ description="These options are used for the preparation of "
+ "PyTorch DataLoaders from Lhotse CutSet's -- they control the "
+ "effective batch sizes, sampling strategies, applied data "
+ "augmentations, etc.",
+ )
+ group.add_argument(
+ "--manifest-dir",
+ type=Path,
+ default=Path("data/fbank"),
+ help="Path to directory with train/dev/test cuts.",
+ )
+ group.add_argument(
+ "--max-duration",
+ type=int,
+ default=200.0,
+ help="Maximum pooled recordings duration (seconds) in a "
+ "single batch. You can reduce it if it causes CUDA OOM.",
+ )
+ group.add_argument(
+ "--bucketing-sampler",
+ type=str2bool,
+ default=True,
+ help="When enabled, the batches will come from buckets of "
+ "similar duration (saves padding frames).",
+ )
+ group.add_argument(
+ "--num-buckets",
+ type=int,
+ default=300,
+ help="The number of buckets for the DynamicBucketingSampler"
+ "(you might want to increase it for larger datasets).",
+ )
+ group.add_argument(
+ "--concatenate-cuts",
+ type=str2bool,
+ default=False,
+ help="When enabled, utterances (cuts) will be concatenated "
+ "to minimize the amount of padding.",
+ )
+ group.add_argument(
+ "--duration-factor",
+ type=float,
+ default=1.0,
+ help="Determines the maximum duration of a concatenated cut "
+ "relative to the duration of the longest cut in a batch.",
+ )
+ group.add_argument(
+ "--gap",
+ type=float,
+ default=1.0,
+ help="The amount of padding (in seconds) inserted between "
+ "concatenated cuts. This padding is filled with noise when "
+ "noise augmentation is used.",
+ )
+ group.add_argument(
+ "--on-the-fly-feats",
+ type=str2bool,
+ default=False,
+ help="When enabled, use on-the-fly cut mixing and feature "
+ "extraction. Will drop existing precomputed feature manifests "
+ "if available.",
+ )
+ group.add_argument(
+ "--shuffle",
+ type=str2bool,
+ default=True,
+ help="When enabled (=default), the examples will be "
+ "shuffled for each epoch.",
+ )
+ group.add_argument(
+ "--return-cuts",
+ type=str2bool,
+ default=True,
+ help="When enabled, each batch will have the "
+ "field: batch['supervisions']['cut'] with the cuts that "
+ "were used to construct it.",
+ )
+
+ group.add_argument(
+ "--num-workers",
+ type=int,
+ default=2,
+ help="The number of training dataloader workers that "
+ "collect the batches.",
+ )
+
+ group.add_argument(
+ "--enable-spec-aug",
+ type=str2bool,
+ default=True,
+ help="When enabled, use SpecAugment for training dataset.",
+ )
+
+ group.add_argument(
+ "--spec-aug-time-warp-factor",
+ type=int,
+ default=80,
+ help="Used only when --enable-spec-aug is True. "
+ "It specifies the factor for time warping in SpecAugment. "
+ "Larger values mean more warping. "
+ "A value less than 1 means to disable time warp.",
+ )
+
+ group.add_argument(
+ "--enable-musan",
+ type=str2bool,
+ default=True,
+ help="When enabled, select noise from MUSAN and mix it"
+ "with training dataset. ",
+ )
+
+ def train_dataloaders(
+ self,
+ cuts_train: CutSet,
+ sampler_state_dict: Optional[Dict[str, Any]] = None,
+ ) -> DataLoader:
+ """
+ Args:
+ cuts_train:
+ CutSet for training.
+ sampler_state_dict:
+ The state dict for the training sampler.
+ """
+ logging.info("About to get Musan cuts")
+ cuts_musan = load_manifest(
+ self.args.manifest_dir / "musan_cuts.jsonl.gz"
+ )
+
+ transforms = []
+ if self.args.enable_musan:
+ logging.info("Enable MUSAN")
+ transforms.append(
+ CutMix(
+ cuts=cuts_musan, prob=0.5, snr=(10, 20), preserve_id=True
+ )
+ )
+ else:
+ logging.info("Disable MUSAN")
+
+ if self.args.concatenate_cuts:
+ logging.info(
+ f"Using cut concatenation with duration factor "
+ f"{self.args.duration_factor} and gap {self.args.gap}."
+ )
+ # Cut concatenation should be the first transform in the list,
+ # so that if we e.g. mix noise in, it will fill the gaps between
+ # different utterances.
+ transforms = [
+ CutConcatenate(
+ duration_factor=self.args.duration_factor, gap=self.args.gap
+ )
+ ] + transforms
+
+ input_transforms = []
+ if self.args.enable_spec_aug:
+ logging.info("Enable SpecAugment")
+ logging.info(
+ f"Time warp factor: {self.args.spec_aug_time_warp_factor}"
+ )
+ # Set the value of num_frame_masks according to Lhotse's version.
+ # In different Lhotse's versions, the default of num_frame_masks is
+ # different.
+ num_frame_masks = 10
+ num_frame_masks_parameter = inspect.signature(
+ SpecAugment.__init__
+ ).parameters["num_frame_masks"]
+ if num_frame_masks_parameter.default == 1:
+ num_frame_masks = 2
+ logging.info(f"Num frame mask: {num_frame_masks}")
+ input_transforms.append(
+ SpecAugment(
+ time_warp_factor=self.args.spec_aug_time_warp_factor,
+ num_frame_masks=num_frame_masks,
+ features_mask_size=27,
+ num_feature_masks=2,
+ frames_mask_size=100,
+ )
+ )
+ else:
+ logging.info("Disable SpecAugment")
+
+ logging.info("About to create train dataset")
+ train = K2SpeechRecognitionDataset(
+ cut_transforms=transforms,
+ input_transforms=input_transforms,
+ return_cuts=self.args.return_cuts,
+ )
+
+ if self.args.on_the_fly_feats:
+ # NOTE: the PerturbSpeed transform should be added only if we
+ # remove it from data prep stage.
+ # Add on-the-fly speed perturbation; since originally it would
+ # have increased epoch size by 3, we will apply prob 2/3 and use
+ # 3x more epochs.
+ # Speed perturbation probably should come first before
+ # concatenation, but in principle the transforms order doesn't have
+ # to be strict (e.g. could be randomized)
+ # transforms = [PerturbSpeed(factors=[0.9, 1.1], p=2/3)] + transforms # noqa
+ # Drop feats to be on the safe side.
+ train = K2SpeechRecognitionDataset(
+ cut_transforms=transforms,
+ input_strategy=OnTheFlyFeatures(
+ Fbank(FbankConfig(num_mel_bins=80))
+ ),
+ input_transforms=input_transforms,
+ return_cuts=self.args.return_cuts,
+ )
+
+ if self.args.bucketing_sampler:
+ logging.info("Using DynamicBucketingSampler.")
+ train_sampler = DynamicBucketingSampler(
+ cuts_train,
+ max_duration=self.args.max_duration,
+ shuffle=self.args.shuffle,
+ num_buckets=self.args.num_buckets,
+ buffer_size=30000,
+ drop_last=True,
+ )
+ else:
+ logging.info("Using SingleCutSampler.")
+ train_sampler = SingleCutSampler(
+ cuts_train,
+ max_duration=self.args.max_duration,
+ shuffle=self.args.shuffle,
+ )
+ logging.info("About to create train dataloader")
+
+ # 'seed' is derived from the current random state, which will have
+ # previously been set in the main process.
+ seed = torch.randint(0, 100000, ()).item()
+ worker_init_fn = _SeedWorkers(seed)
+
+ train_dl = DataLoader(
+ train,
+ sampler=train_sampler,
+ batch_size=None,
+ num_workers=self.args.num_workers,
+ persistent_workers=False,
+ worker_init_fn=worker_init_fn,
+ )
+
+ if sampler_state_dict is not None:
+ logging.info("Loading sampler state dict")
+ train_dl.sampler.load_state_dict(sampler_state_dict)
+
+ return train_dl
+
+ def valid_dataloaders(self, cuts_valid: CutSet) -> DataLoader:
+ transforms = []
+ if self.args.concatenate_cuts:
+ transforms = [
+ CutConcatenate(
+ duration_factor=self.args.duration_factor, gap=self.args.gap
+ )
+ ] + transforms
+
+ logging.info("About to create dev dataset")
+ if self.args.on_the_fly_feats:
+ validate = K2SpeechRecognitionDataset(
+ cut_transforms=transforms,
+ input_strategy=OnTheFlyFeatures(
+ Fbank(FbankConfig(num_mel_bins=80))
+ ),
+ return_cuts=self.args.return_cuts,
+ )
+ else:
+ validate = K2SpeechRecognitionDataset(
+ cut_transforms=transforms,
+ return_cuts=self.args.return_cuts,
+ )
+ valid_sampler = DynamicBucketingSampler(
+ cuts_valid,
+ max_duration=self.args.max_duration,
+ shuffle=False,
+ )
+ logging.info("About to create dev dataloader")
+
+ from lhotse.dataset.iterable_dataset import IterableDatasetWrapper
+
+ dev_iter_dataset = IterableDatasetWrapper(
+ dataset=validate,
+ sampler=valid_sampler,
+ )
+ valid_dl = DataLoader(
+ dev_iter_dataset,
+ batch_size=None,
+ num_workers=self.args.num_workers,
+ persistent_workers=False,
+ )
+
+ return valid_dl
+
+ def test_dataloaders(self, cuts: CutSet) -> DataLoader:
+ logging.debug("About to create test dataset")
+ test = K2SpeechRecognitionDataset(
+ input_strategy=OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80)))
+ if self.args.on_the_fly_feats
+ else PrecomputedFeatures(),
+ return_cuts=self.args.return_cuts,
+ )
+ sampler = DynamicBucketingSampler(
+ cuts,
+ max_duration=self.args.max_duration,
+ shuffle=False,
+ )
+ from lhotse.dataset.iterable_dataset import IterableDatasetWrapper
+
+ test_iter_dataset = IterableDatasetWrapper(
+ dataset=test,
+ sampler=sampler,
+ )
+ test_dl = DataLoader(
+ test_iter_dataset,
+ batch_size=None,
+ num_workers=self.args.num_workers,
+ )
+ return test_dl
+
+ @lru_cache()
+ def train_cuts(self) -> CutSet:
+ logging.info("About to get train cuts")
+ return load_manifest_lazy(
+ self.args.manifest_dir / "alimeeting_cuts_train.jsonl.gz"
+ )
+
+ @lru_cache()
+ def valid_cuts(self) -> CutSet:
+ logging.info("About to get dev cuts")
+ return load_manifest_lazy(
+ self.args.manifest_dir / "alimeeting_cuts_eval.jsonl.gz"
+ )
+
+ @lru_cache()
+ def test_cuts(self) -> List[CutSet]:
+ logging.info("About to get test cuts")
+ return load_manifest_lazy(
+ self.args.manifest_dir / "alimeeting_cuts_test.jsonl.gz"
+ )
diff --git a/egs/alimeeting/ASR/pruned_transducer_stateless2/beam_search.py b/egs/alimeeting/ASR/pruned_transducer_stateless2/beam_search.py
new file mode 120000
index 000000000..e24eca39f
--- /dev/null
+++ b/egs/alimeeting/ASR/pruned_transducer_stateless2/beam_search.py
@@ -0,0 +1 @@
+../../../librispeech/ASR/pruned_transducer_stateless2/beam_search.py
\ No newline at end of file
diff --git a/egs/alimeeting/ASR/pruned_transducer_stateless2/conformer.py b/egs/alimeeting/ASR/pruned_transducer_stateless2/conformer.py
new file mode 120000
index 000000000..a65957180
--- /dev/null
+++ b/egs/alimeeting/ASR/pruned_transducer_stateless2/conformer.py
@@ -0,0 +1 @@
+../../../librispeech/ASR/pruned_transducer_stateless2/conformer.py
\ No newline at end of file
diff --git a/egs/alimeeting/ASR/pruned_transducer_stateless2/decode.py b/egs/alimeeting/ASR/pruned_transducer_stateless2/decode.py
new file mode 100755
index 000000000..cb455838e
--- /dev/null
+++ b/egs/alimeeting/ASR/pruned_transducer_stateless2/decode.py
@@ -0,0 +1,615 @@
+#!/usr/bin/env python3
+#
+# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+When training with the far data, usage:
+(1) greedy search
+./pruned_transducer_stateless2/decode.py \
+ --epoch 29 \
+ --avg 18 \
+ --exp-dir ./pruned_transducer_stateless2/exp \
+ --lang-dir data/lang_char \
+ --max-duration 100 \
+ --decoding-method greedy_search
+
+(2) modified beam search
+./pruned_transducer_stateless2/decode.py \
+ --epoch 29 \
+ --avg 18 \
+ --exp-dir ./pruned_transducer_stateless2/exp \
+ --lang-dir data/lang_char \
+ --max-duration 100 \
+ --decoding-method modified_beam_search \
+ --beam-size 4
+
+(3) fast beam search
+./pruned_transducer_stateless2/decode.py \
+ --epoch 29 \
+ --avg 18 \
+ --exp-dir ./pruned_transducer_stateless2/exp \
+ --lang-dir data/lang_char \
+ --max-duration 1500 \
+ --decoding-method fast_beam_search \
+ --beam 4 \
+ --max-contexts 4 \
+ --max-states 8
+"""
+
+
+import argparse
+import logging
+from collections import defaultdict
+from pathlib import Path
+from typing import Dict, List, Optional, Tuple
+
+import k2
+import torch
+import torch.nn as nn
+from asr_datamodule import AlimeetingAsrDataModule
+from beam_search import (
+ beam_search,
+ fast_beam_search_one_best,
+ greedy_search,
+ greedy_search_batch,
+ modified_beam_search,
+)
+from lhotse.cut import Cut
+from train import get_params, get_transducer_model
+
+from icefall.checkpoint import (
+ average_checkpoints,
+ find_checkpoints,
+ load_checkpoint,
+)
+from icefall.lexicon import Lexicon
+from icefall.utils import (
+ AttributeDict,
+ setup_logger,
+ store_transcripts,
+ write_error_stats,
+)
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+
+ parser.add_argument(
+ "--epoch",
+ type=int,
+ default=28,
+ help="It specifies the checkpoint to use for decoding."
+ "Note: Epoch counts from 0.",
+ )
+
+ parser.add_argument(
+ "--batch",
+ type=int,
+ default=None,
+ help="It specifies the batch checkpoint to use for decoding."
+ "Note: Epoch counts from 0.",
+ )
+
+ parser.add_argument(
+ "--avg",
+ type=int,
+ default=15,
+ help="Number of checkpoints to average. Automatically select "
+ "consecutive checkpoints before the checkpoint specified by "
+ "'--epoch'. ",
+ )
+
+ parser.add_argument(
+ "--avg-last-n",
+ type=int,
+ default=0,
+ help="""If positive, --epoch and --avg are ignored and it
+ will use the last n checkpoints exp_dir/checkpoint-xxx.pt
+ where xxx is the number of processed batches while
+ saving that checkpoint.
+ """,
+ )
+
+ parser.add_argument(
+ "--exp-dir",
+ type=str,
+ default="pruned_transducer_stateless2/exp",
+ help="The experiment dir",
+ )
+
+ parser.add_argument(
+ "--lang-dir",
+ type=str,
+ default="data/lang_char",
+ help="""The lang dir
+ It contains language related input files such as
+ "lexicon.txt"
+ """,
+ )
+
+ parser.add_argument(
+ "--decoding-method",
+ type=str,
+ default="greedy_search",
+ help="""Possible values are:
+ - greedy_search
+ - beam_search
+ - modified_beam_search
+ - fast_beam_search
+ """,
+ )
+
+ parser.add_argument(
+ "--beam-size",
+ type=int,
+ default=4,
+ help="""An interger indicating how many candidates we will keep for each
+ frame. Used only when --decoding-method is beam_search or
+ modified_beam_search.""",
+ )
+
+ parser.add_argument(
+ "--beam",
+ type=float,
+ default=4,
+ help="""A floating point value to calculate the cutoff score during beam
+ search (i.e., `cutoff = max-score - beam`), which is the same as the
+ `beam` in Kaldi.
+ Used only when --decoding-method is fast_beam_search""",
+ )
+
+ parser.add_argument(
+ "--max-contexts",
+ type=int,
+ default=4,
+ help="""Used only when --decoding-method is
+ fast_beam_search""",
+ )
+
+ parser.add_argument(
+ "--max-states",
+ type=int,
+ default=8,
+ help="""Used only when --decoding-method is
+ fast_beam_search""",
+ )
+
+ parser.add_argument(
+ "--context-size",
+ type=int,
+ default=2,
+ help="The context size in the decoder. 1 means bigram; "
+ "2 means tri-gram",
+ )
+ parser.add_argument(
+ "--max-sym-per-frame",
+ type=int,
+ default=1,
+ help="""Maximum number of symbols per frame.
+ Used only when --decoding_method is greedy_search""",
+ )
+
+ return parser
+
+
+def decode_one_batch(
+ params: AttributeDict,
+ model: nn.Module,
+ lexicon: Lexicon,
+ batch: dict,
+ decoding_graph: Optional[k2.Fsa] = None,
+) -> Dict[str, List[List[str]]]:
+ """Decode one batch and return the result in a dict. The dict has the
+ following format:
+
+ - key: It indicates the setting used for decoding. For example,
+ if greedy_search is used, it would be "greedy_search"
+ If beam search with a beam size of 7 is used, it would be
+ "beam_7"
+ - value: It contains the decoding result. `len(value)` equals to
+ batch size. `value[i]` is the decoding result for the i-th
+ utterance in the given batch.
+ Args:
+ params:
+ It's the return value of :func:`get_params`.
+ model:
+ The neural model.
+ batch:
+ It is the return value from iterating
+ `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation
+ for the format of the `batch`.
+ decoding_graph:
+ The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used
+ only when --decoding_method is fast_beam_search.
+ Returns:
+ Return the decoding result. See above description for the format of
+ the returned dict.
+ """
+ device = model.device
+ feature = batch["inputs"]
+ assert feature.ndim == 3
+
+ feature = feature.to(device)
+ # at entry, feature is (N, T, C)
+
+ supervisions = batch["supervisions"]
+ feature_lens = supervisions["num_frames"].to(device)
+ encoder_out, encoder_out_lens = model.encoder(
+ x=feature, x_lens=feature_lens
+ )
+ hyps = []
+
+ if params.decoding_method == "fast_beam_search":
+ hyp_tokens = fast_beam_search_one_best(
+ model=model,
+ decoding_graph=decoding_graph,
+ encoder_out=encoder_out,
+ encoder_out_lens=encoder_out_lens,
+ beam=params.beam,
+ max_contexts=params.max_contexts,
+ max_states=params.max_states,
+ )
+ for i in range(encoder_out.size(0)):
+ hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]])
+ elif (
+ params.decoding_method == "greedy_search"
+ and params.max_sym_per_frame == 1
+ ):
+ hyp_tokens = greedy_search_batch(
+ model=model,
+ encoder_out=encoder_out,
+ encoder_out_lens=encoder_out_lens,
+ )
+ for i in range(encoder_out.size(0)):
+ hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]])
+ elif params.decoding_method == "modified_beam_search":
+ hyp_tokens = modified_beam_search(
+ model=model,
+ encoder_out=encoder_out,
+ encoder_out_lens=encoder_out_lens,
+ beam=params.beam_size,
+ )
+ for i in range(encoder_out.size(0)):
+ hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]])
+ else:
+ batch_size = encoder_out.size(0)
+
+ for i in range(batch_size):
+ # fmt: off
+ encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]]
+ # fmt: on
+ if params.decoding_method == "greedy_search":
+ hyp = greedy_search(
+ model=model,
+ encoder_out=encoder_out_i,
+ max_sym_per_frame=params.max_sym_per_frame,
+ )
+ elif params.decoding_method == "beam_search":
+ hyp = beam_search(
+ model=model,
+ encoder_out=encoder_out_i,
+ beam=params.beam_size,
+ )
+ else:
+ raise ValueError(
+ f"Unsupported decoding method: {params.decoding_method}"
+ )
+ hyps.append([lexicon.token_table[idx] for idx in hyp])
+
+ if params.decoding_method == "greedy_search":
+ return {"greedy_search": hyps}
+ elif params.decoding_method == "fast_beam_search":
+ return {
+ (
+ f"beam_{params.beam}_"
+ f"max_contexts_{params.max_contexts}_"
+ f"max_states_{params.max_states}"
+ ): hyps
+ }
+ else:
+ return {f"beam_size_{params.beam_size}": hyps}
+
+
+def decode_dataset(
+ dl: torch.utils.data.DataLoader,
+ params: AttributeDict,
+ model: nn.Module,
+ lexicon: Lexicon,
+ decoding_graph: Optional[k2.Fsa] = None,
+) -> Dict[str, List[Tuple[List[str], List[str]]]]:
+ """Decode dataset.
+
+ Args:
+ dl:
+ PyTorch's dataloader containing the dataset to decode.
+ params:
+ It is returned by :func:`get_params`.
+ model:
+ The neural model.
+ decoding_graph:
+ The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used
+ only when --decoding_method is fast_beam_search.
+ Returns:
+ Return a dict, whose key may be "greedy_search" if greedy search
+ is used, or it may be "beam_7" if beam size of 7 is used.
+ Its value is a list of tuples. Each tuple contains two elements:
+ The first is the reference transcript, and the second is the
+ predicted result.
+ """
+ num_cuts = 0
+
+ try:
+ num_batches = len(dl)
+ except TypeError:
+ num_batches = "?"
+
+ if params.decoding_method == "greedy_search":
+ log_interval = 100
+ else:
+ log_interval = 50
+
+ results = defaultdict(list)
+ for batch_idx, batch in enumerate(dl):
+ texts = batch["supervisions"]["text"]
+ texts = [list(str(text).replace(" ", "")) for text in texts]
+
+ hyps_dict = decode_one_batch(
+ params=params,
+ model=model,
+ lexicon=lexicon,
+ decoding_graph=decoding_graph,
+ batch=batch,
+ )
+
+ for name, hyps in hyps_dict.items():
+ this_batch = []
+ assert len(hyps) == len(texts)
+ for hyp_words, ref_text in zip(hyps, texts):
+ this_batch.append((ref_text, hyp_words))
+
+ results[name].extend(this_batch)
+
+ num_cuts += len(texts)
+
+ if batch_idx % log_interval == 0:
+ batch_str = f"{batch_idx}/{num_batches}"
+
+ logging.info(
+ f"batch {batch_str}, cuts processed until now is {num_cuts}"
+ )
+ return results
+
+
+def save_results(
+ params: AttributeDict,
+ test_set_name: str,
+ results_dict: Dict[str, List[Tuple[List[int], List[int]]]],
+):
+ test_set_wers = dict()
+ for key, results in results_dict.items():
+ recog_path = (
+ params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt"
+ )
+ store_transcripts(filename=recog_path, texts=results)
+ logging.info(f"The transcripts are stored in {recog_path}")
+
+ # The following prints out WERs, per-word error statistics and aligned
+ # ref/hyp pairs.
+ errs_filename = (
+ params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt"
+ )
+ with open(errs_filename, "w") as f:
+ wer = write_error_stats(
+ f, f"{test_set_name}-{key}", results, enable_log=True
+ )
+ test_set_wers[key] = wer
+
+ logging.info("Wrote detailed error stats to {}".format(errs_filename))
+
+ test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1])
+ errs_info = (
+ params.res_dir
+ / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt"
+ )
+ with open(errs_info, "w") as f:
+ print("settings\tWER", file=f)
+ for key, val in test_set_wers:
+ print("{}\t{}".format(key, val), file=f)
+
+ s = "\nFor {}, WER of different settings are:\n".format(test_set_name)
+ note = "\tbest for {}".format(test_set_name)
+ for key, val in test_set_wers:
+ s += "{}\t{}{}\n".format(key, val, note)
+ note = ""
+ logging.info(s)
+
+
+@torch.no_grad()
+def main():
+ parser = get_parser()
+ AlimeetingAsrDataModule.add_arguments(parser)
+ args = parser.parse_args()
+ args.exp_dir = Path(args.exp_dir)
+
+ params = get_params()
+ params.update(vars(args))
+
+ assert params.decoding_method in (
+ "greedy_search",
+ "beam_search",
+ "fast_beam_search",
+ "modified_beam_search",
+ )
+ params.res_dir = params.exp_dir / params.decoding_method
+
+ params.suffix = f"epoch-{params.epoch}-avg-{params.avg}"
+ if "fast_beam_search" in params.decoding_method:
+ params.suffix += f"-beam-{params.beam}"
+ params.suffix += f"-max-contexts-{params.max_contexts}"
+ params.suffix += f"-max-states-{params.max_states}"
+ elif "beam_search" in params.decoding_method:
+ params.suffix += f"-beam-{params.beam_size}"
+ else:
+ params.suffix += f"-context-{params.context_size}"
+ params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}"
+
+ setup_logger(f"{params.res_dir}/log-decode-{params.suffix}")
+ logging.info("Decoding started")
+
+ device = torch.device("cpu")
+ if torch.cuda.is_available():
+ device = torch.device("cuda", 0)
+
+ logging.info(f"Device: {device}")
+
+ lexicon = Lexicon(params.lang_dir)
+ params.blank_id = lexicon.token_table[""]
+ params.vocab_size = max(lexicon.tokens) + 1
+
+ logging.info(params)
+
+ logging.info("About to create model")
+ model = get_transducer_model(params)
+
+ if params.avg_last_n > 0:
+ filenames = find_checkpoints(params.exp_dir)[: params.avg_last_n]
+ logging.info(f"averaging {filenames}")
+ model.to(device)
+ model.load_state_dict(average_checkpoints(filenames, device=device))
+ elif params.avg == 1:
+ load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model)
+ elif params.batch is not None:
+ filenames = f"{params.exp_dir}/checkpoint-{params.batch}.pt"
+ logging.info(f"averaging {filenames}")
+ model.to(device)
+ model.load_state_dict(average_checkpoints([filenames], device=device))
+ else:
+ start = params.epoch - params.avg + 1
+ filenames = []
+ for i in range(start, params.epoch + 1):
+ if start >= 0:
+ filenames.append(f"{params.exp_dir}/epoch-{i}.pt")
+ logging.info(f"averaging {filenames}")
+ model.to(device)
+ model.load_state_dict(average_checkpoints(filenames, device=device))
+
+ average = average_checkpoints(filenames, device=device)
+ checkpoint = {"model": average}
+ torch.save(
+ checkpoint,
+ "pruned_transducer_stateless2/exp/pretrained_epoch_29_avg_18.pt",
+ )
+
+ model.to(device)
+ model.eval()
+ model.device = device
+
+ if params.decoding_method == "fast_beam_search":
+ decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device)
+ else:
+ decoding_graph = None
+
+ num_param = sum([p.numel() for p in model.parameters()])
+ logging.info(f"Number of model parameters: {num_param}")
+
+ # Note: Please use "pip install webdataset==0.1.103"
+ # for installing the webdataset.
+ import glob
+ import os
+
+ from lhotse import CutSet
+ from lhotse.dataset.webdataset import export_to_webdataset
+
+ alimeeting = AlimeetingAsrDataModule(args)
+
+ dev = "eval"
+ test = "test"
+
+ if not os.path.exists(f"{dev}/shared-0.tar"):
+ os.makedirs(dev)
+ dev_cuts = alimeeting.valid_cuts()
+ export_to_webdataset(
+ dev_cuts,
+ output_path=f"{dev}/shared-%d.tar",
+ shard_size=300,
+ )
+
+ if not os.path.exists(f"{test}/shared-0.tar"):
+ os.makedirs(test)
+ test_cuts = alimeeting.test_cuts()
+ export_to_webdataset(
+ test_cuts,
+ output_path=f"{test}/shared-%d.tar",
+ shard_size=300,
+ )
+
+ dev_shards = [
+ str(path)
+ for path in sorted(glob.glob(os.path.join(dev, "shared-*.tar")))
+ ]
+ cuts_dev_webdataset = CutSet.from_webdataset(
+ dev_shards,
+ split_by_worker=True,
+ split_by_node=True,
+ shuffle_shards=True,
+ )
+
+ test_shards = [
+ str(path)
+ for path in sorted(glob.glob(os.path.join(test, "shared-*.tar")))
+ ]
+ cuts_test_webdataset = CutSet.from_webdataset(
+ test_shards,
+ split_by_worker=True,
+ split_by_node=True,
+ shuffle_shards=True,
+ )
+
+ def remove_short_and_long_utt(c: Cut):
+ return 1.0 <= c.duration
+
+ cuts_dev_webdataset = cuts_dev_webdataset.filter(remove_short_and_long_utt)
+ cuts_test_webdataset = cuts_test_webdataset.filter(
+ remove_short_and_long_utt
+ )
+
+ dev_dl = alimeeting.valid_dataloaders(cuts_dev_webdataset)
+ test_dl = alimeeting.test_dataloaders(cuts_test_webdataset)
+
+ test_sets = ["dev", "test"]
+ test_dl = [dev_dl, test_dl]
+
+ for test_set, test_dl in zip(test_sets, test_dl):
+ results_dict = decode_dataset(
+ dl=test_dl,
+ params=params,
+ model=model,
+ lexicon=lexicon,
+ decoding_graph=decoding_graph,
+ )
+ save_results(
+ params=params,
+ test_set_name=test_set,
+ results_dict=results_dict,
+ )
+
+ logging.info("Done!")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/egs/alimeeting/ASR/pruned_transducer_stateless2/decoder.py b/egs/alimeeting/ASR/pruned_transducer_stateless2/decoder.py
new file mode 120000
index 000000000..722e1c894
--- /dev/null
+++ b/egs/alimeeting/ASR/pruned_transducer_stateless2/decoder.py
@@ -0,0 +1 @@
+../../../librispeech/ASR/pruned_transducer_stateless2/decoder.py
\ No newline at end of file
diff --git a/egs/alimeeting/ASR/pruned_transducer_stateless2/encoder_interface.py b/egs/alimeeting/ASR/pruned_transducer_stateless2/encoder_interface.py
new file mode 120000
index 000000000..653c5b09a
--- /dev/null
+++ b/egs/alimeeting/ASR/pruned_transducer_stateless2/encoder_interface.py
@@ -0,0 +1 @@
+../../../librispeech/ASR/transducer_stateless/encoder_interface.py
\ No newline at end of file
diff --git a/egs/alimeeting/ASR/pruned_transducer_stateless2/export.py b/egs/alimeeting/ASR/pruned_transducer_stateless2/export.py
new file mode 100644
index 000000000..0a69e0a57
--- /dev/null
+++ b/egs/alimeeting/ASR/pruned_transducer_stateless2/export.py
@@ -0,0 +1,178 @@
+# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script converts several saved checkpoints
+# to a single one using model averaging.
+"""
+Usage:
+./pruned_transducer_stateless2/export.py \
+ --exp-dir ./pruned_transducer_stateless2/exp \
+ --lang-dir data/lang_char \
+ --epoch 29 \
+ --avg 18
+
+It will generate a file exp_dir/pretrained.pt
+
+To use the generated file with `pruned_transducer_stateless2/decode.py`,
+you can do:
+
+ cd /path/to/exp_dir
+ ln -s pretrained.pt epoch-9999.pt
+
+ cd /path/to/egs/alimeeting/ASR
+ ./pruned_transducer_stateless2/decode.py \
+ --exp-dir ./pruned_transducer_stateless2/exp \
+ --epoch 9999 \
+ --avg 1 \
+ --max-duration 100 \
+ --lang-dir data/lang_char
+"""
+
+import argparse
+import logging
+from pathlib import Path
+
+import torch
+from train import get_params, get_transducer_model
+
+from icefall.checkpoint import average_checkpoints, load_checkpoint
+from icefall.lexicon import Lexicon
+from icefall.utils import str2bool
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+
+ parser.add_argument(
+ "--epoch",
+ type=int,
+ default=28,
+ help="It specifies the checkpoint to use for decoding."
+ "Note: Epoch counts from 0.",
+ )
+
+ parser.add_argument(
+ "--avg",
+ type=int,
+ default=15,
+ help="Number of checkpoints to average. Automatically select "
+ "consecutive checkpoints before the checkpoint specified by "
+ "'--epoch'. ",
+ )
+
+ parser.add_argument(
+ "--exp-dir",
+ type=str,
+ default="pruned_transducer_stateless2/exp",
+ help="""It specifies the directory where all training related
+ files, e.g., checkpoints, log, etc, are saved
+ """,
+ )
+
+ parser.add_argument(
+ "--lang-dir",
+ type=str,
+ default="data/lang_char",
+ help="The lang dir",
+ )
+
+ parser.add_argument(
+ "--jit",
+ type=str2bool,
+ default=False,
+ help="""True to save a model after applying torch.jit.script.
+ """,
+ )
+
+ parser.add_argument(
+ "--context-size",
+ type=int,
+ default=2,
+ help="The context size in the decoder. 1 means bigram; "
+ "2 means tri-gram",
+ )
+
+ return parser
+
+
+def main():
+ args = get_parser().parse_args()
+ args.exp_dir = Path(args.exp_dir)
+
+ assert args.jit is False, "Support torchscript will be added later"
+
+ params = get_params()
+ params.update(vars(args))
+
+ device = torch.device("cpu")
+ if torch.cuda.is_available():
+ device = torch.device("cuda", 0)
+
+ logging.info(f"device: {device}")
+
+ lexicon = Lexicon(params.lang_dir)
+
+ params.blank_id = 0
+ params.vocab_size = max(lexicon.tokens) + 1
+
+ logging.info(params)
+
+ logging.info("About to create model")
+ model = get_transducer_model(params)
+
+ model.to(device)
+
+ if params.avg == 1:
+ load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model)
+ else:
+ start = params.epoch - params.avg + 1
+ filenames = []
+ for i in range(start, params.epoch + 1):
+ if start >= 0:
+ filenames.append(f"{params.exp_dir}/epoch-{i}.pt")
+ logging.info(f"averaging {filenames}")
+ model.to(device)
+ model.load_state_dict(average_checkpoints(filenames, device=device))
+
+ model.eval()
+
+ model.to("cpu")
+ model.eval()
+
+ if params.jit:
+ logging.info("Using torch.jit.script")
+ model = torch.jit.script(model)
+ filename = params.exp_dir / "cpu_jit.pt"
+ model.save(str(filename))
+ logging.info(f"Saved to {filename}")
+ else:
+ logging.info("Not using torch.jit.script")
+ # Save it using a format so that it can be loaded
+ # by :func:`load_checkpoint`
+ filename = params.exp_dir / "pretrained.pt"
+ torch.save({"model": model.state_dict()}, str(filename))
+ logging.info(f"Saved to {filename}")
+
+
+if __name__ == "__main__":
+ formatter = (
+ "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
+ )
+
+ logging.basicConfig(format=formatter, level=logging.INFO)
+ main()
diff --git a/egs/alimeeting/ASR/pruned_transducer_stateless2/joiner.py b/egs/alimeeting/ASR/pruned_transducer_stateless2/joiner.py
new file mode 120000
index 000000000..9052f3cbb
--- /dev/null
+++ b/egs/alimeeting/ASR/pruned_transducer_stateless2/joiner.py
@@ -0,0 +1 @@
+../../../librispeech/ASR/pruned_transducer_stateless2/joiner.py
\ No newline at end of file
diff --git a/egs/alimeeting/ASR/pruned_transducer_stateless2/model.py b/egs/alimeeting/ASR/pruned_transducer_stateless2/model.py
new file mode 120000
index 000000000..a99e74334
--- /dev/null
+++ b/egs/alimeeting/ASR/pruned_transducer_stateless2/model.py
@@ -0,0 +1 @@
+../../../librispeech/ASR/pruned_transducer_stateless2/model.py
\ No newline at end of file
diff --git a/egs/alimeeting/ASR/pruned_transducer_stateless2/optim.py b/egs/alimeeting/ASR/pruned_transducer_stateless2/optim.py
new file mode 120000
index 000000000..0a2f285aa
--- /dev/null
+++ b/egs/alimeeting/ASR/pruned_transducer_stateless2/optim.py
@@ -0,0 +1 @@
+../../../librispeech/ASR/pruned_transducer_stateless2/optim.py
\ No newline at end of file
diff --git a/egs/alimeeting/ASR/pruned_transducer_stateless2/pretrained.py b/egs/alimeeting/ASR/pruned_transducer_stateless2/pretrained.py
new file mode 100644
index 000000000..93b1e1f57
--- /dev/null
+++ b/egs/alimeeting/ASR/pruned_transducer_stateless2/pretrained.py
@@ -0,0 +1,347 @@
+#!/usr/bin/env python3
+# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
+# 2022 Xiaomi Crop. (authors: Mingshuang Luo)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Here, the far data is used for training, usage:
+
+(1) greedy search
+./pruned_transducer_stateless2/pretrained.py \
+ --checkpoint ./pruned_transducer_stateless2/exp/pretrained.pt \
+ --lang-dir ./data/lang_char \
+ --decoding-method greedy_search \
+ --max-sym-per-frame 1 \
+ /path/to/foo.wav \
+ /path/to/bar.wav
+
+(2) modified beam search
+./pruned_transducer_stateless2/pretrained.py \
+ --checkpoint ./pruned_transducer_stateless2/exp/pretrained.pt \
+ --lang-dir ./data/lang_char \
+ --decoding-method modified_beam_search \
+ --beam-size 4 \
+ /path/to/foo.wav \
+ /path/to/bar.wav
+
+(3) fast beam search
+./pruned_transducer_stateless2/pretrained.py \
+ --checkpoint ./pruned_transducer_stateless/exp/pretrained.pt \
+ --lang-dir ./data/lang_char \
+ --decoding-method fast_beam_search \
+ --beam 4 \
+ --max-contexts 4 \
+ --max-states 8 \
+ /path/to/foo.wav \
+ /path/to/bar.wav
+
+You can also use `./pruned_transducer_stateless2/exp/epoch-xx.pt`.
+
+Note: ./pruned_transducer_stateless2/exp/pretrained.pt is generated by
+./pruned_transducer_stateless2/export.py
+"""
+
+
+import argparse
+import logging
+import math
+from typing import List
+
+import k2
+import kaldifeat
+import torch
+import torchaudio
+from beam_search import (
+ beam_search,
+ fast_beam_search_one_best,
+ greedy_search,
+ greedy_search_batch,
+ modified_beam_search,
+)
+from torch.nn.utils.rnn import pad_sequence
+from train import get_params, get_transducer_model
+
+from icefall.lexicon import Lexicon
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+
+ parser.add_argument(
+ "--checkpoint",
+ type=str,
+ required=True,
+ help="Path to the checkpoint. "
+ "The checkpoint is assumed to be saved by "
+ "icefall.checkpoint.save_checkpoint().",
+ )
+
+ parser.add_argument(
+ "--lang-dir",
+ type=str,
+ help="""Path to lang.
+ """,
+ )
+
+ parser.add_argument(
+ "--decoding-method",
+ type=str,
+ default="greedy_search",
+ help="""Possible values are:
+ - greedy_search
+ - modified_beam_search
+ - fast_beam_search
+ """,
+ )
+
+ parser.add_argument(
+ "sound_files",
+ type=str,
+ nargs="+",
+ help="The input sound file(s) to transcribe. "
+ "Supported formats are those supported by torchaudio.load(). "
+ "For example, wav and flac are supported. "
+ "The sample rate has to be 16kHz.",
+ )
+
+ parser.add_argument(
+ "--sample-rate",
+ type=int,
+ default=16000,
+ help="The sample rate of the input sound file",
+ )
+
+ parser.add_argument(
+ "--beam-size",
+ type=int,
+ default=4,
+ help="Used only when --method is beam_search and modified_beam_search ",
+ )
+
+ parser.add_argument(
+ "--beam",
+ type=float,
+ default=4,
+ help="""A floating point value to calculate the cutoff score during beam
+ search (i.e., `cutoff = max-score - beam`), which is the same as the
+ `beam` in Kaldi.
+ Used only when --decoding-method is fast_beam_search""",
+ )
+
+ parser.add_argument(
+ "--max-contexts",
+ type=int,
+ default=4,
+ help="""Used only when --decoding-method is
+ fast_beam_search""",
+ )
+
+ parser.add_argument(
+ "--max-states",
+ type=int,
+ default=8,
+ help="""Used only when --decoding-method is
+ fast_beam_search""",
+ )
+
+ parser.add_argument(
+ "--context-size",
+ type=int,
+ default=2,
+ help="The context size in the decoder. 1 means bigram; "
+ "2 means tri-gram",
+ )
+
+ parser.add_argument(
+ "--max-sym-per-frame",
+ type=int,
+ default=1,
+ help="""Maximum number of symbols per frame. Used only when
+ --method is greedy_search.
+ """,
+ )
+
+ return parser
+
+
+def read_sound_files(
+ filenames: List[str], expected_sample_rate: float
+) -> List[torch.Tensor]:
+ """Read a list of sound files into a list 1-D float32 torch tensors.
+ Args:
+ filenames:
+ A list of sound filenames.
+ expected_sample_rate:
+ The expected sample rate of the sound files.
+ Returns:
+ Return a list of 1-D float32 torch tensors.
+ """
+ ans = []
+ for f in filenames:
+ wave, sample_rate = torchaudio.load(f)
+ assert sample_rate == expected_sample_rate, (
+ f"expected sample rate: {expected_sample_rate}. "
+ f"Given: {sample_rate}"
+ )
+ # We use only the first channel
+ ans.append(wave[0])
+ return ans
+
+
+@torch.no_grad()
+def main():
+ parser = get_parser()
+ args = parser.parse_args()
+
+ params = get_params()
+
+ params.update(vars(args))
+
+ lexicon = Lexicon(params.lang_dir)
+ params.blank_id = lexicon.token_table[""]
+ params.vocab_size = max(lexicon.tokens) + 1
+
+ logging.info(f"{params}")
+
+ device = torch.device("cpu")
+ if torch.cuda.is_available():
+ device = torch.device("cuda", 0)
+
+ logging.info(f"device: {device}")
+
+ logging.info("Creating model")
+ model = get_transducer_model(params)
+
+ checkpoint = torch.load(args.checkpoint, map_location="cpu")
+ model.load_state_dict(checkpoint["model"], strict=False)
+ model.to(device)
+ model.eval()
+ model.device = device
+
+ if params.decoding_method == "fast_beam_search":
+ decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device)
+ else:
+ decoding_graph = None
+
+ logging.info("Constructing Fbank computer")
+ opts = kaldifeat.FbankOptions()
+ opts.device = device
+ opts.frame_opts.dither = 0
+ opts.frame_opts.snip_edges = False
+ opts.frame_opts.samp_freq = params.sample_rate
+ opts.mel_opts.num_bins = params.feature_dim
+
+ fbank = kaldifeat.Fbank(opts)
+
+ logging.info(f"Reading sound files: {params.sound_files}")
+ waves = read_sound_files(
+ filenames=params.sound_files, expected_sample_rate=params.sample_rate
+ )
+ waves = [w.to(device) for w in waves]
+
+ logging.info("Decoding started")
+ features = fbank(waves)
+ feature_lengths = [f.size(0) for f in features]
+
+ features = pad_sequence(
+ features, batch_first=True, padding_value=math.log(1e-10)
+ )
+
+ feature_lengths = torch.tensor(feature_lengths, device=device)
+
+ with torch.no_grad():
+ encoder_out, encoder_out_lens = model.encoder(
+ x=features, x_lens=feature_lengths
+ )
+
+ hyps = []
+ msg = f"Using {params.decoding_method}"
+ logging.info(msg)
+
+ if params.decoding_method == "fast_beam_search":
+ hyp_tokens = fast_beam_search_one_best(
+ model=model,
+ decoding_graph=decoding_graph,
+ encoder_out=encoder_out,
+ encoder_out_lens=encoder_out_lens,
+ beam=params.beam,
+ max_contexts=params.max_contexts,
+ max_states=params.max_states,
+ )
+ for i in range(encoder_out.size(0)):
+ hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]])
+ elif (
+ params.decoding_method == "greedy_search"
+ and params.max_sym_per_frame == 1
+ ):
+ hyp_tokens = greedy_search_batch(
+ model=model,
+ encoder_out=encoder_out,
+ encoder_out_lens=encoder_out_lens,
+ )
+ for i in range(encoder_out.size(0)):
+ hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]])
+ elif params.decoding_method == "modified_beam_search":
+ hyp_tokens = modified_beam_search(
+ model=model,
+ encoder_out=encoder_out,
+ encoder_out_lens=encoder_out_lens,
+ beam=params.beam_size,
+ )
+ for i in range(encoder_out.size(0)):
+ hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]])
+ else:
+ batch_size = encoder_out.size(0)
+
+ for i in range(batch_size):
+ # fmt: off
+ encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]]
+ # fmt: on
+ if params.decoding_method == "greedy_search":
+ hyp = greedy_search(
+ model=model,
+ encoder_out=encoder_out_i,
+ max_sym_per_frame=params.max_sym_per_frame,
+ )
+ elif params.decoding_method == "beam_search":
+ hyp = beam_search(
+ model=model,
+ encoder_out=encoder_out_i,
+ beam=params.beam_size,
+ )
+ else:
+ raise ValueError(
+ f"Unsupported decoding method: {params.decoding_method}"
+ )
+ hyps.append([lexicon.token_table[idx] for idx in hyp])
+
+ s = "\n"
+ for filename, hyp in zip(params.sound_files, hyps):
+ words = " ".join(hyp)
+ s += f"{filename}:\n{words}\n\n"
+ logging.info(s)
+
+ logging.info("Decoding Done")
+
+
+if __name__ == "__main__":
+ formatter = (
+ "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
+ )
+
+ logging.basicConfig(format=formatter, level=logging.INFO)
+ main()
diff --git a/egs/alimeeting/ASR/pruned_transducer_stateless2/scaling.py b/egs/alimeeting/ASR/pruned_transducer_stateless2/scaling.py
new file mode 120000
index 000000000..c10cdfe12
--- /dev/null
+++ b/egs/alimeeting/ASR/pruned_transducer_stateless2/scaling.py
@@ -0,0 +1 @@
+../../../librispeech/ASR/pruned_transducer_stateless2/scaling.py
\ No newline at end of file
diff --git a/egs/alimeeting/ASR/pruned_transducer_stateless2/train.py b/egs/alimeeting/ASR/pruned_transducer_stateless2/train.py
new file mode 100644
index 000000000..81a0ede7f
--- /dev/null
+++ b/egs/alimeeting/ASR/pruned_transducer_stateless2/train.py
@@ -0,0 +1,972 @@
+#!/usr/bin/env python3
+# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang,
+# Wei Kang
+# Mingshuang Luo)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Usage:
+
+export CUDA_VISIBLE_DEVICES="0,1,2,3"
+
+./pruned_transducer_stateless2/train.py \
+ --world-size 4 \
+ --num-epochs 30 \
+ --start-epoch 0 \
+ --exp-dir pruned_transducer_stateless2/exp \
+ --lang-dir data/lang_char \
+ --max-duration 220 \
+ --save-every-n 1000
+
+# For mix precision training:
+
+./pruned_transducer_stateless2/train.py \
+ --world-size 4 \
+ --num-epochs 30 \
+ --start-epoch 0 \
+ --exp-dir pruned_transducer_stateless2/exp \
+ --lang-dir data/lang_char \
+ --max-duration 220 \
+ --save-every-n 1000
+ --use-fp16 True
+
+"""
+
+import argparse
+import logging
+import os
+import warnings
+from pathlib import Path
+from shutil import copyfile
+from typing import Any, Dict, Optional, Tuple, Union
+
+import k2
+import optim
+import torch
+import torch.multiprocessing as mp
+import torch.nn as nn
+from asr_datamodule import AlimeetingAsrDataModule
+from conformer import Conformer
+from decoder import Decoder
+from joiner import Joiner
+from lhotse.cut import Cut
+from lhotse.dataset.sampling.base import CutSampler
+from lhotse.utils import fix_random_seed
+from model import Transducer
+from optim import Eden, Eve
+from torch import Tensor
+from torch.cuda.amp import GradScaler
+from torch.nn.parallel import DistributedDataParallel as DDP
+from torch.utils.tensorboard import SummaryWriter
+
+from icefall import diagnostics
+from icefall.char_graph_compiler import CharCtcTrainingGraphCompiler
+from icefall.checkpoint import load_checkpoint, remove_checkpoints
+from icefall.checkpoint import save_checkpoint as save_checkpoint_impl
+from icefall.checkpoint import save_checkpoint_with_global_batch_idx
+from icefall.dist import cleanup_dist, setup_dist
+from icefall.env import get_env_info
+from icefall.lexicon import Lexicon
+from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool
+
+LRSchedulerType = Union[
+ torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler
+]
+
+os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+
+ parser.add_argument(
+ "--world-size",
+ type=int,
+ default=1,
+ help="Number of GPUs for DDP training.",
+ )
+
+ parser.add_argument(
+ "--master-port",
+ type=int,
+ default=12359,
+ help="Master port to use for DDP training.",
+ )
+
+ parser.add_argument(
+ "--tensorboard",
+ type=str2bool,
+ default=True,
+ help="Should various information be logged in tensorboard.",
+ )
+
+ parser.add_argument(
+ "--num-epochs",
+ type=int,
+ default=30,
+ help="Number of epochs to train.",
+ )
+
+ parser.add_argument(
+ "--start-epoch",
+ type=int,
+ default=0,
+ help="""Resume training from from this epoch.
+ If it is positive, it will load checkpoint from
+ transducer_stateless2/exp/epoch-{start_epoch-1}.pt
+ """,
+ )
+
+ parser.add_argument(
+ "--start-batch",
+ type=int,
+ default=0,
+ help="""If positive, --start-epoch is ignored and
+ it loads the checkpoint from exp-dir/checkpoint-{start_batch}.pt
+ """,
+ )
+
+ parser.add_argument(
+ "--exp-dir",
+ type=str,
+ default="pruned_transducer_stateless2/exp",
+ help="""The experiment dir.
+ It specifies the directory where all training related
+ files, e.g., checkpoints, log, etc, are saved
+ """,
+ )
+
+ parser.add_argument(
+ "--lang-dir",
+ type=str,
+ default="data/lang_char",
+ help="""The lang dir
+ It contains language related input files such as
+ "lexicon.txt"
+ """,
+ )
+
+ parser.add_argument(
+ "--initial-lr",
+ type=float,
+ default=0.003,
+ help="The initial learning rate. This value should not need to be changed.",
+ )
+
+ parser.add_argument(
+ "--lr-batches",
+ type=float,
+ default=5000,
+ help="""Number of steps that affects how rapidly the learning rate decreases.
+ We suggest not to change this.""",
+ )
+
+ parser.add_argument(
+ "--lr-epochs",
+ type=float,
+ default=6,
+ help="""Number of epochs that affects how rapidly the learning rate decreases.
+ """,
+ )
+
+ parser.add_argument(
+ "--context-size",
+ type=int,
+ default=2,
+ help="The context size in the decoder. 1 means bigram; "
+ "2 means tri-gram",
+ )
+
+ parser.add_argument(
+ "--prune-range",
+ type=int,
+ default=5,
+ help="The prune range for rnnt loss, it means how many symbols(context)"
+ "we are using to compute the loss",
+ )
+
+ parser.add_argument(
+ "--lm-scale",
+ type=float,
+ default=0.25,
+ help="The scale to smooth the loss with lm "
+ "(output of prediction network) part.",
+ )
+
+ parser.add_argument(
+ "--am-scale",
+ type=float,
+ default=0.0,
+ help="The scale to smooth the loss with am (output of encoder network)"
+ "part.",
+ )
+
+ parser.add_argument(
+ "--simple-loss-scale",
+ type=float,
+ default=0.5,
+ help="To get pruning ranges, we will calculate a simple version"
+ "loss(joiner is just addition), this simple loss also uses for"
+ "training (as a regularization item). We will scale the simple loss"
+ "with this parameter before adding to the final loss.",
+ )
+
+ parser.add_argument(
+ "--seed",
+ type=int,
+ default=42,
+ help="The seed for random generators intended for reproducibility",
+ )
+
+ parser.add_argument(
+ "--print-diagnostics",
+ type=str2bool,
+ default=False,
+ help="Accumulate stats on activations, print them and exit.",
+ )
+
+ parser.add_argument(
+ "--save-every-n",
+ type=int,
+ default=8000,
+ help="""Save checkpoint after processing this number of batches"
+ periodically. We save checkpoint to exp-dir/ whenever
+ params.batch_idx_train % save_every_n == 0. The checkpoint filename
+ has the form: f'exp-dir/checkpoint-{params.batch_idx_train}.pt'
+ Note: It also saves checkpoint to `exp-dir/epoch-xxx.pt` at the
+ end of each epoch where `xxx` is the epoch number counting from 0.
+ """,
+ )
+
+ parser.add_argument(
+ "--keep-last-k",
+ type=int,
+ default=20,
+ help="""Only keep this number of checkpoints on disk.
+ For instance, if it is 3, there are only 3 checkpoints
+ in the exp-dir with filenames `checkpoint-xxx.pt`.
+ It does not affect checkpoints with name `epoch-xxx.pt`.
+ """,
+ )
+
+ parser.add_argument(
+ "--use-fp16",
+ type=str2bool,
+ default=False,
+ help="Whether to use half precision training.",
+ )
+
+ return parser
+
+
+def get_params() -> AttributeDict:
+ """Return a dict containing training parameters.
+ All training related parameters that are not passed from the commandline
+ are saved in the variable `params`.
+ Commandline options are merged into `params` after they are parsed, so
+ you can also access them via `params`.
+ Explanation of options saved in `params`:
+ - best_train_loss: Best training loss so far. It is used to select
+ the model that has the lowest training loss. It is
+ updated during the training.
+ - best_valid_loss: Best validation loss so far. It is used to select
+ the model that has the lowest validation loss. It is
+ updated during the training.
+ - best_train_epoch: It is the epoch that has the best training loss.
+ - best_valid_epoch: It is the epoch that has the best validation loss.
+ - batch_idx_train: Used to writing statistics to tensorboard. It
+ contains number of batches trained so far across
+ epochs.
+ - log_interval: Print training loss if batch_idx % log_interval` is 0
+ - reset_interval: Reset statistics if batch_idx % reset_interval is 0
+ - valid_interval: Run validation if batch_idx % valid_interval is 0
+ - feature_dim: The model input dim. It has to match the one used
+ in computing features.
+ - subsampling_factor: The subsampling factor for the model.
+ - encoder_dim: Hidden dim for multi-head attention model.
+ - num_decoder_layers: Number of decoder layer of transformer decoder.
+ - warm_step: The warm_step for Noam optimizer.
+ """
+ params = AttributeDict(
+ {
+ "best_train_loss": float("inf"),
+ "best_valid_loss": float("inf"),
+ "best_train_epoch": -1,
+ "best_valid_epoch": -1,
+ "batch_idx_train": 10,
+ "log_interval": 1,
+ "reset_interval": 200,
+ "valid_interval": 400,
+ # parameters for conformer
+ "feature_dim": 80,
+ "subsampling_factor": 4,
+ "encoder_dim": 512,
+ "nhead": 8,
+ "dim_feedforward": 2048,
+ "num_encoder_layers": 12,
+ # parameters for decoder
+ "decoder_dim": 512,
+ # parameters for joiner
+ "joiner_dim": 512,
+ # parameters for Noam
+ "model_warm_step": 200,
+ "env_info": get_env_info(),
+ }
+ )
+
+ return params
+
+
+def get_encoder_model(params: AttributeDict) -> nn.Module:
+ # TODO: We can add an option to switch between Conformer and Transformer
+ encoder = Conformer(
+ num_features=params.feature_dim,
+ subsampling_factor=params.subsampling_factor,
+ d_model=params.encoder_dim,
+ nhead=params.nhead,
+ dim_feedforward=params.dim_feedforward,
+ num_encoder_layers=params.num_encoder_layers,
+ )
+ return encoder
+
+
+def get_decoder_model(params: AttributeDict) -> nn.Module:
+ decoder = Decoder(
+ vocab_size=params.vocab_size,
+ decoder_dim=params.decoder_dim,
+ blank_id=params.blank_id,
+ context_size=params.context_size,
+ )
+ return decoder
+
+
+def get_joiner_model(params: AttributeDict) -> nn.Module:
+ joiner = Joiner(
+ encoder_dim=params.encoder_dim,
+ decoder_dim=params.decoder_dim,
+ joiner_dim=params.joiner_dim,
+ vocab_size=params.vocab_size,
+ )
+ return joiner
+
+
+def get_transducer_model(params: AttributeDict) -> nn.Module:
+ encoder = get_encoder_model(params)
+ decoder = get_decoder_model(params)
+ joiner = get_joiner_model(params)
+
+ model = Transducer(
+ encoder=encoder,
+ decoder=decoder,
+ joiner=joiner,
+ encoder_dim=params.encoder_dim,
+ decoder_dim=params.decoder_dim,
+ joiner_dim=params.joiner_dim,
+ vocab_size=params.vocab_size,
+ )
+ return model
+
+
+def load_checkpoint_if_available(
+ params: AttributeDict,
+ model: nn.Module,
+ optimizer: Optional[torch.optim.Optimizer] = None,
+ scheduler: Optional[LRSchedulerType] = None,
+) -> Optional[Dict[str, Any]]:
+ """Load checkpoint from file.
+ If params.start_batch is positive, it will load the checkpoint from
+ `params.exp_dir/checkpoint-{params.start_batch}.pt`. Otherwise, if
+ params.start_epoch is positive, it will load the checkpoint from
+ `params.start_epoch - 1`.
+ Apart from loading state dict for `model` and `optimizer` it also updates
+ `best_train_epoch`, `best_train_loss`, `best_valid_epoch`,
+ and `best_valid_loss` in `params`.
+ Args:
+ params:
+ The return value of :func:`get_params`.
+ model:
+ The training model.
+ optimizer:
+ The optimizer that we are using.
+ scheduler:
+ The scheduler that we are using.
+ Returns:
+ Return a dict containing previously saved training info.
+ """
+ if params.start_batch > 0:
+ filename = params.exp_dir / f"checkpoint-{params.start_batch}.pt"
+ elif params.start_epoch > 0:
+ filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt"
+ else:
+ return None
+
+ assert filename.is_file(), f"{filename} does not exist!"
+
+ saved_params = load_checkpoint(
+ filename,
+ model=model,
+ optimizer=optimizer,
+ scheduler=scheduler,
+ )
+
+ keys = [
+ "best_train_epoch",
+ "best_valid_epoch",
+ "batch_idx_train",
+ "best_train_loss",
+ "best_valid_loss",
+ ]
+ for k in keys:
+ params[k] = saved_params[k]
+
+ if params.start_batch > 0:
+ if "cur_epoch" in saved_params:
+ params["start_epoch"] = saved_params["cur_epoch"]
+
+ return saved_params
+
+
+def save_checkpoint(
+ params: AttributeDict,
+ model: nn.Module,
+ optimizer: Optional[torch.optim.Optimizer] = None,
+ scheduler: Optional[LRSchedulerType] = None,
+ sampler: Optional[CutSampler] = None,
+ scaler: Optional[GradScaler] = None,
+ rank: int = 0,
+) -> None:
+ """Save model, optimizer, scheduler and training stats to file.
+ Args:
+ params:
+ It is returned by :func:`get_params`.
+ model:
+ The training model.
+ optimizer:
+ The optimizer used in the training.
+ sampler:
+ The sampler for the training dataset.
+ scaler:
+ The scaler used for mix precision training.
+ """
+ if rank != 0:
+ return
+ filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt"
+ save_checkpoint_impl(
+ filename=filename,
+ model=model,
+ params=params,
+ optimizer=optimizer,
+ scheduler=scheduler,
+ sampler=sampler,
+ scaler=scaler,
+ rank=rank,
+ )
+
+ if params.best_train_epoch == params.cur_epoch:
+ best_train_filename = params.exp_dir / "best-train-loss.pt"
+ copyfile(src=filename, dst=best_train_filename)
+
+ if params.best_valid_epoch == params.cur_epoch:
+ best_valid_filename = params.exp_dir / "best-valid-loss.pt"
+ copyfile(src=filename, dst=best_valid_filename)
+
+
+def compute_loss(
+ params: AttributeDict,
+ model: nn.Module,
+ graph_compiler: CharCtcTrainingGraphCompiler,
+ batch: dict,
+ is_training: bool,
+ warmup: float = 1.0,
+) -> Tuple[Tensor, MetricsTracker]:
+ """
+ Compute CTC loss given the model and its inputs.
+ Args:
+ params:
+ Parameters for training. See :func:`get_params`.
+ model:
+ The model for training. It is an instance of Conformer in our case.
+ batch:
+ A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()`
+ for the content in it.
+ is_training:
+ True for training. False for validation. When it is True, this
+ function enables autograd during computation; when it is False, it
+ disables autograd.
+ warmup: a floating point value which increases throughout training;
+ values >= 1.0 are fully warmed up and have all modules present.
+ """
+ device = model.device
+ feature = batch["inputs"]
+ # at entry, feature is (N, T, C)
+ assert feature.ndim == 3
+ feature = feature.to(device)
+
+ supervisions = batch["supervisions"]
+ feature_lens = supervisions["num_frames"].to(device)
+
+ texts = batch["supervisions"]["text"]
+
+ y = graph_compiler.texts_to_ids(texts)
+ if type(y) == list:
+ y = k2.RaggedTensor(y).to(device)
+ else:
+ y = y.to(device)
+
+ with torch.set_grad_enabled(is_training):
+ simple_loss, pruned_loss = model(
+ x=feature,
+ x_lens=feature_lens,
+ y=y,
+ prune_range=params.prune_range,
+ am_scale=params.am_scale,
+ lm_scale=params.lm_scale,
+ warmup=warmup,
+ )
+ # after the main warmup step, we keep pruned_loss_scale small
+ # for the same amount of time (model_warm_step), to avoid
+ # overwhelming the simple_loss and causing it to diverge,
+ # in case it had not fully learned the alignment yet.
+ pruned_loss_scale = (
+ 0.0
+ if warmup < 1.0
+ else (0.1 if warmup > 1.0 and warmup < 2.0 else 1.0)
+ )
+ loss = (
+ params.simple_loss_scale * simple_loss
+ + pruned_loss_scale * pruned_loss
+ )
+ assert loss.requires_grad == is_training
+
+ info = MetricsTracker()
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ info["frames"] = (
+ (feature_lens // params.subsampling_factor).sum().item()
+ )
+
+ # Note: We use reduction=sum while computing the loss.
+ info["loss"] = loss.detach().cpu().item()
+ info["simple_loss"] = simple_loss.detach().cpu().item()
+ info["pruned_loss"] = pruned_loss.detach().cpu().item()
+
+ return loss, info
+
+
+def compute_validation_loss(
+ params: AttributeDict,
+ model: nn.Module,
+ graph_compiler: CharCtcTrainingGraphCompiler,
+ valid_dl: torch.utils.data.DataLoader,
+ world_size: int = 1,
+) -> MetricsTracker:
+ """Run the validation process."""
+ model.eval()
+
+ tot_loss = MetricsTracker()
+
+ for batch_idx, batch in enumerate(valid_dl):
+ loss, loss_info = compute_loss(
+ params=params,
+ model=model,
+ graph_compiler=graph_compiler,
+ batch=batch,
+ is_training=False,
+ )
+ assert loss.requires_grad is False
+ tot_loss = tot_loss + loss_info
+
+ if world_size > 1:
+ tot_loss.reduce(loss.device)
+
+ loss_value = tot_loss["loss"] / tot_loss["frames"]
+ if loss_value < params.best_valid_loss:
+ params.best_valid_epoch = params.cur_epoch
+ params.best_valid_loss = loss_value
+
+ return tot_loss
+
+
+def train_one_epoch(
+ params: AttributeDict,
+ model: nn.Module,
+ optimizer: torch.optim.Optimizer,
+ scheduler: LRSchedulerType,
+ graph_compiler: CharCtcTrainingGraphCompiler,
+ train_dl: torch.utils.data.DataLoader,
+ valid_dl: torch.utils.data.DataLoader,
+ scaler: GradScaler,
+ tb_writer: Optional[SummaryWriter] = None,
+ world_size: int = 1,
+ rank: int = 0,
+) -> None:
+ """Train the model for one epoch.
+ The training loss from the mean of all frames is saved in
+ `params.train_loss`. It runs the validation process every
+ `params.valid_interval` batches.
+ Args:
+ params:
+ It is returned by :func:`get_params`.
+ model:
+ The model for training.
+ optimizer:
+ The optimizer we are using.
+ scheduler:
+ The learning rate scheduler, we call step() every step.
+ train_dl:
+ Dataloader for the training dataset.
+ valid_dl:
+ Dataloader for the validation dataset.
+ scaler:
+ The scaler used for mix precision training.
+ tb_writer:
+ Writer to write log messages to tensorboard.
+ world_size:
+ Number of nodes in DDP training. If it is 1, DDP is disabled.
+ rank:
+ The rank of the node in DDP training. If no DDP is used, it should
+ be set to 0.
+ """
+ model.train()
+
+ tot_loss = MetricsTracker()
+
+ for batch_idx, batch in enumerate(train_dl):
+
+ params.batch_idx_train += 1
+ batch_size = len(batch["supervisions"]["text"])
+
+ with torch.cuda.amp.autocast(enabled=params.use_fp16):
+ loss, loss_info = compute_loss(
+ params=params,
+ model=model,
+ graph_compiler=graph_compiler,
+ batch=batch,
+ is_training=True,
+ warmup=(params.batch_idx_train / params.model_warm_step),
+ )
+ # summary stats
+ tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info
+
+ # NOTE: We use reduction==sum and loss is computed over utterances
+ # in the batch and there is no normalization to it so far.
+ scaler.scale(loss).backward()
+ scheduler.step_batch(params.batch_idx_train)
+ scaler.step(optimizer)
+ scaler.update()
+ optimizer.zero_grad()
+
+ if params.print_diagnostics and batch_idx == 5:
+ return
+
+ if (
+ params.batch_idx_train > 0
+ and params.batch_idx_train % params.save_every_n == 0
+ ):
+ save_checkpoint_with_global_batch_idx(
+ out_dir=params.exp_dir,
+ global_batch_idx=params.batch_idx_train,
+ model=model,
+ params=params,
+ optimizer=optimizer,
+ scheduler=scheduler,
+ sampler=train_dl.sampler,
+ scaler=scaler,
+ rank=rank,
+ )
+ remove_checkpoints(
+ out_dir=params.exp_dir,
+ topk=params.keep_last_k,
+ rank=rank,
+ )
+
+ if batch_idx % params.log_interval == 0:
+ cur_lr = scheduler.get_last_lr()[0]
+ logging.info(
+ f"Epoch {params.cur_epoch}, "
+ f"batch {batch_idx}, loss[{loss_info}], "
+ f"tot_loss[{tot_loss}], batch size: {batch_size}, "
+ f"lr: {cur_lr:.2e}"
+ )
+
+ if tb_writer is not None:
+ tb_writer.add_scalar(
+ "train/learning_rate", cur_lr, params.batch_idx_train
+ )
+
+ loss_info.write_summary(
+ tb_writer, "train/current_", params.batch_idx_train
+ )
+ tot_loss.write_summary(
+ tb_writer, "train/tot_", params.batch_idx_train
+ )
+
+ if batch_idx > 0 and batch_idx % params.valid_interval == 0:
+ logging.info("Computing validation loss")
+ valid_info = compute_validation_loss(
+ params=params,
+ model=model,
+ graph_compiler=graph_compiler,
+ valid_dl=valid_dl,
+ world_size=world_size,
+ )
+ model.train()
+ logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}")
+ if tb_writer is not None:
+ valid_info.write_summary(
+ tb_writer, "train/valid_", params.batch_idx_train
+ )
+
+ loss_value = tot_loss["loss"] / tot_loss["frames"]
+ params.train_loss = loss_value
+ if params.train_loss < params.best_train_loss:
+ params.best_train_epoch = params.cur_epoch
+ params.best_train_loss = params.train_loss
+
+
+def run(rank, world_size, args):
+ """
+ Args:
+ rank:
+ It is a value between 0 and `world_size-1`, which is
+ passed automatically by `mp.spawn()` in :func:`main`.
+ The node with rank 0 is responsible for saving checkpoint.
+ world_size:
+ Number of GPUs for DDP training.
+ args:
+ The return value of get_parser().parse_args()
+ """
+ params = get_params()
+ params.update(vars(args))
+
+ fix_random_seed(params.seed)
+ if world_size > 1:
+ setup_dist(rank, world_size, params.master_port)
+
+ setup_logger(f"{params.exp_dir}/log/log-train")
+ logging.info("Training started")
+
+ if args.tensorboard and rank == 0:
+ tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard")
+ else:
+ tb_writer = None
+
+ device = torch.device("cpu")
+ if torch.cuda.is_available():
+ device = torch.device("cuda", rank)
+ logging.info(f"Device: {device}")
+
+ lexicon = Lexicon(params.lang_dir)
+ graph_compiler = CharCtcTrainingGraphCompiler(
+ lexicon=lexicon,
+ device=device,
+ )
+
+ params.blank_id = lexicon.token_table[""]
+ params.vocab_size = max(lexicon.tokens) + 1
+
+ logging.info(params)
+
+ logging.info("About to create model")
+ model = get_transducer_model(params)
+
+ num_param = sum([p.numel() for p in model.parameters()])
+ logging.info(f"Number of model parameters: {num_param}")
+
+ checkpoints = load_checkpoint_if_available(params=params, model=model)
+
+ model.to(device)
+ if world_size > 1:
+ logging.info("Using DDP")
+ model = DDP(model, device_ids=[rank])
+ model.device = device
+
+ optimizer = Eve(model.parameters(), lr=params.initial_lr)
+
+ scheduler = Eden(optimizer, params.lr_batches, params.lr_epochs)
+
+ if checkpoints and "optimizer" in checkpoints:
+ logging.info("Loading optimizer state dict")
+ optimizer.load_state_dict(checkpoints["optimizer"])
+
+ if (
+ checkpoints
+ and "scheduler" in checkpoints
+ and checkpoints["scheduler"] is not None
+ ):
+ logging.info("Loading scheduler state dict")
+ scheduler.load_state_dict(checkpoints["scheduler"])
+
+ if params.print_diagnostics:
+ opts = diagnostics.TensorDiagnosticOptions(
+ 2 ** 22
+ ) # allow 4 megabytes per sub-module
+ diagnostic = diagnostics.attach_diagnostics(model, opts)
+
+ alimeeting = AlimeetingAsrDataModule(args)
+
+ train_cuts = alimeeting.train_cuts()
+ valid_cuts = alimeeting.valid_cuts()
+
+ def remove_short_and_long_utt(c: Cut):
+ # Keep only utterances with duration between 1 second and 15.0 seconds
+ #
+ # Caution: There is a reason to select 10.0 here. Please see
+ # ../local/display_manifest_statistics.py
+ #
+ # You should use ../local/display_manifest_statistics.py to get
+ # an utterance duration distribution for your dataset to select
+ # the threshold
+ return 1.0 <= c.duration <= 15.0
+
+ train_cuts = train_cuts.filter(remove_short_and_long_utt)
+
+ valid_dl = alimeeting.valid_dataloaders(valid_cuts)
+
+ if params.start_batch > 0 and checkpoints and "sampler" in checkpoints:
+ # We only load the sampler's state dict when it loads a checkpoint
+ # saved in the middle of an epoch
+ sampler_state_dict = checkpoints["sampler"]
+ else:
+ sampler_state_dict = None
+
+ train_dl = alimeeting.train_dataloaders(
+ train_cuts, sampler_state_dict=sampler_state_dict
+ )
+
+ if not params.print_diagnostics and params.start_batch == 0:
+ scan_pessimistic_batches_for_oom(
+ model=model,
+ train_dl=train_dl,
+ optimizer=optimizer,
+ graph_compiler=graph_compiler,
+ params=params,
+ )
+
+ scaler = GradScaler(enabled=params.use_fp16)
+ if checkpoints and "grad_scaler" in checkpoints:
+ logging.info("Loading grad scaler state dict")
+ scaler.load_state_dict(checkpoints["grad_scaler"])
+
+ for epoch in range(params.start_epoch, params.num_epochs):
+ scheduler.step_epoch(epoch)
+ fix_random_seed(params.seed + epoch)
+ train_dl.sampler.set_epoch(epoch)
+
+ if tb_writer is not None:
+ tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train)
+
+ params.cur_epoch = epoch
+
+ train_one_epoch(
+ params=params,
+ model=model,
+ optimizer=optimizer,
+ scheduler=scheduler,
+ graph_compiler=graph_compiler,
+ train_dl=train_dl,
+ valid_dl=valid_dl,
+ scaler=scaler,
+ tb_writer=tb_writer,
+ world_size=world_size,
+ rank=rank,
+ )
+
+ if params.print_diagnostics:
+ diagnostic.print_diagnostics()
+ break
+
+ save_checkpoint(
+ params=params,
+ model=model,
+ optimizer=optimizer,
+ scheduler=scheduler,
+ sampler=train_dl.sampler,
+ scaler=scaler,
+ rank=rank,
+ )
+
+ logging.info("Done!")
+
+ if world_size > 1:
+ torch.distributed.barrier()
+ cleanup_dist()
+
+
+def scan_pessimistic_batches_for_oom(
+ model: nn.Module,
+ train_dl: torch.utils.data.DataLoader,
+ optimizer: torch.optim.Optimizer,
+ graph_compiler: CharCtcTrainingGraphCompiler,
+ params: AttributeDict,
+):
+ from lhotse.dataset import find_pessimistic_batches
+
+ logging.info(
+ "Sanity check -- see if any of the batches in epoch 0 would cause OOM."
+ )
+ batches, crit_values = find_pessimistic_batches(train_dl.sampler)
+ for criterion, cuts in batches.items():
+ batch = train_dl.dataset[cuts]
+ try:
+ # warmup = 0.0 is so that the derivs for the pruned loss stay zero
+ # (i.e. are not remembered by the decaying-average in adam), because
+ # we want to avoid these params being subject to shrinkage in adam.
+ with torch.cuda.amp.autocast(enabled=params.use_fp16):
+ loss, _ = compute_loss(
+ params=params,
+ model=model,
+ graph_compiler=graph_compiler,
+ batch=batch,
+ is_training=True,
+ warmup=0.0,
+ )
+ loss.backward()
+ optimizer.step()
+ optimizer.zero_grad()
+ except RuntimeError as e:
+ if "CUDA out of memory" in str(e):
+ logging.error(
+ "Your GPU ran out of memory with the current "
+ "max_duration setting. We recommend decreasing "
+ "max_duration and trying again.\n"
+ f"Failing criterion: {criterion} "
+ f"(={crit_values[criterion]}) ..."
+ )
+ raise
+
+
+def main():
+ parser = get_parser()
+ AlimeetingAsrDataModule.add_arguments(parser)
+ args = parser.parse_args()
+ args.lang_dir = Path(args.lang_dir)
+ args.exp_dir = Path(args.exp_dir)
+
+ world_size = args.world_size
+ assert world_size >= 1
+ if world_size > 1:
+ mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True)
+ else:
+ run(rank=0, world_size=1, args=args)
+
+
+torch.set_num_threads(1)
+torch.set_num_interop_threads(1)
+
+if __name__ == "__main__":
+ main()
diff --git a/egs/alimeeting/ASR/shared b/egs/alimeeting/ASR/shared
new file mode 120000
index 000000000..3a3b28f96
--- /dev/null
+++ b/egs/alimeeting/ASR/shared
@@ -0,0 +1 @@
+../../../egs/aishell/ASR/shared
\ No newline at end of file
diff --git a/egs/gigaspeech/ASR/conformer_ctc/asr_datamodule.py b/egs/gigaspeech/ASR/conformer_ctc/asr_datamodule.py
index ab958fa68..d78e26240 100644
--- a/egs/gigaspeech/ASR/conformer_ctc/asr_datamodule.py
+++ b/egs/gigaspeech/ASR/conformer_ctc/asr_datamodule.py
@@ -20,9 +20,8 @@ import logging
from functools import lru_cache
from pathlib import Path
-from lhotse import CutSet, Fbank, FbankConfig, load_manifest
+from lhotse import CutSet, Fbank, FbankConfig, load_manifest, load_manifest_lazy
from lhotse.dataset import (
- BucketingSampler,
CutConcatenate,
CutMix,
DynamicBucketingSampler,
@@ -191,7 +190,7 @@ class GigaSpeechAsrDataModule:
def train_dataloaders(self, cuts_train: CutSet) -> DataLoader:
logging.info("About to get Musan cuts")
cuts_musan = load_manifest(
- self.args.manifest_dir / "cuts_musan.json.gz"
+ self.args.manifest_dir / "musan_cuts.jsonl.gz"
)
transforms = []
@@ -315,7 +314,7 @@ class GigaSpeechAsrDataModule:
cut_transforms=transforms,
return_cuts=self.args.return_cuts,
)
- valid_sampler = BucketingSampler(
+ valid_sampler = DynamicBucketingSampler(
cuts_valid,
max_duration=self.args.max_duration,
shuffle=False,
@@ -339,8 +338,10 @@ class GigaSpeechAsrDataModule:
else PrecomputedFeatures(),
return_cuts=self.args.return_cuts,
)
- sampler = BucketingSampler(
- cuts, max_duration=self.args.max_duration, shuffle=False
+ sampler = DynamicBucketingSampler(
+ cuts,
+ max_duration=self.args.max_duration,
+ shuffle=False,
)
logging.debug("About to create test dataloader")
test_dl = DataLoader(
@@ -361,7 +362,9 @@ class GigaSpeechAsrDataModule:
@lru_cache()
def dev_cuts(self) -> CutSet:
logging.info("About to get dev cuts")
- cuts_valid = load_manifest(self.args.manifest_dir / "cuts_DEV.jsonl.gz")
+ cuts_valid = load_manifest_lazy(
+ self.args.manifest_dir / "cuts_DEV.jsonl.gz"
+ )
if self.args.small_dev:
return cuts_valid.subset(first=1000)
else:
@@ -370,4 +373,4 @@ class GigaSpeechAsrDataModule:
@lru_cache()
def test_cuts(self) -> CutSet:
logging.info("About to get test cuts")
- return load_manifest(self.args.manifest_dir / "cuts_TEST.jsonl.gz")
+ return load_manifest_lazy(self.args.manifest_dir / "cuts_TEST.jsonl.gz")
diff --git a/egs/gigaspeech/ASR/local/compute_fbank_musan.py b/egs/gigaspeech/ASR/local/compute_fbank_musan.py
deleted file mode 100755
index 562872993..000000000
--- a/egs/gigaspeech/ASR/local/compute_fbank_musan.py
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2021 Johns Hopkins University (Piotr Żelasko)
-# Copyright 2021 Xiaomi Corp. (Fangjun Kuang)
-#
-# See ../../../../LICENSE for clarification regarding multiple authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from pathlib import Path
-
-import torch
-from lhotse import (
- CutSet,
- KaldifeatFbank,
- KaldifeatFbankConfig,
- combine,
-)
-from lhotse.recipes.utils import read_manifests_if_cached
-
-# Torch's multithreaded behavior needs to be disabled or
-# it wastes a lot of CPU and slow things down.
-# Do this outside of main() in case it needs to take effect
-# even when we are not invoking the main (e.g. when spawning subprocesses).
-torch.set_num_threads(1)
-torch.set_num_interop_threads(1)
-
-
-def compute_fbank_musan():
- src_dir = Path("data/manifests")
- output_dir = Path("data/fbank")
-
- # number of workers in dataloader
- num_workers = 10
-
- # number of seconds in a batch
- batch_duration = 600
-
- dataset_parts = (
- "music",
- "speech",
- "noise",
- )
-
- manifests = read_manifests_if_cached(
- prefix="musan", dataset_parts=dataset_parts, output_dir=src_dir
- )
- assert manifests is not None
-
- musan_cuts_path = output_dir / "cuts_musan.json.gz"
-
- if musan_cuts_path.is_file():
- logging.info(f"{musan_cuts_path} already exists - skipping")
- return
-
- logging.info("Extracting features for Musan")
-
- device = torch.device("cpu")
- if torch.cuda.is_available():
- device = torch.device("cuda", 0)
- extractor = KaldifeatFbank(KaldifeatFbankConfig(device=device))
-
- logging.info(f"device: {device}")
-
- musan_cuts = (
- CutSet.from_manifests(
- recordings=combine(
- part["recordings"] for part in manifests.values()
- )
- )
- .cut_into_windows(10.0)
- .filter(lambda c: c.duration > 5)
- .compute_and_store_features_batch(
- extractor=extractor,
- storage_path=f"{output_dir}/feats_musan",
- num_workers=num_workers,
- batch_duration=batch_duration,
- )
- )
- musan_cuts.to_json(musan_cuts_path)
-
-
-def main():
- formatter = (
- "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
- )
- logging.basicConfig(format=formatter, level=logging.INFO)
-
- compute_fbank_musan()
-
-
-if __name__ == "__main__":
- main()
diff --git a/egs/gigaspeech/ASR/local/compute_fbank_musan.py b/egs/gigaspeech/ASR/local/compute_fbank_musan.py
new file mode 120000
index 000000000..5833f2484
--- /dev/null
+++ b/egs/gigaspeech/ASR/local/compute_fbank_musan.py
@@ -0,0 +1 @@
+../../../librispeech/ASR/local/compute_fbank_musan.py
\ No newline at end of file
diff --git a/egs/gigaspeech/ASR/pruned_transducer_stateless2/asr_datamodule.py b/egs/gigaspeech/ASR/pruned_transducer_stateless2/asr_datamodule.py
index ff3d3b07a..c87686e1e 100644
--- a/egs/gigaspeech/ASR/pruned_transducer_stateless2/asr_datamodule.py
+++ b/egs/gigaspeech/ASR/pruned_transducer_stateless2/asr_datamodule.py
@@ -23,9 +23,8 @@ from pathlib import Path
from typing import Any, Dict, Optional
import torch
-from lhotse import CutSet, Fbank, FbankConfig, load_manifest
+from lhotse import CutSet, Fbank, FbankConfig, load_manifest, load_manifest_lazy
from lhotse.dataset import (
- BucketingSampler,
CutConcatenate,
CutMix,
DynamicBucketingSampler,
@@ -218,7 +217,7 @@ class GigaSpeechAsrDataModule:
logging.info("Enable MUSAN")
logging.info("About to get Musan cuts")
cuts_musan = load_manifest(
- self.args.manifest_dir / "cuts_musan.json.gz"
+ self.args.manifest_dir / "musan_cuts.jsonl.gz"
)
transforms.append(
CutMix(
@@ -358,7 +357,7 @@ class GigaSpeechAsrDataModule:
cut_transforms=transforms,
return_cuts=self.args.return_cuts,
)
- valid_sampler = BucketingSampler(
+ valid_sampler = DynamicBucketingSampler(
cuts_valid,
max_duration=self.args.max_duration,
shuffle=False,
@@ -382,8 +381,10 @@ class GigaSpeechAsrDataModule:
else PrecomputedFeatures(),
return_cuts=self.args.return_cuts,
)
- sampler = BucketingSampler(
- cuts, max_duration=self.args.max_duration, shuffle=False
+ sampler = DynamicBucketingSampler(
+ cuts,
+ max_duration=self.args.max_duration,
+ shuffle=False,
)
logging.debug("About to create test dataloader")
test_dl = DataLoader(
@@ -404,7 +405,9 @@ class GigaSpeechAsrDataModule:
@lru_cache()
def dev_cuts(self) -> CutSet:
logging.info("About to get dev cuts")
- cuts_valid = load_manifest(self.args.manifest_dir / "cuts_DEV.jsonl.gz")
+ cuts_valid = load_manifest_lazy(
+ self.args.manifest_dir / "cuts_DEV.jsonl.gz"
+ )
if self.args.small_dev:
return cuts_valid.subset(first=1000)
else:
@@ -413,4 +416,4 @@ class GigaSpeechAsrDataModule:
@lru_cache()
def test_cuts(self) -> CutSet:
logging.info("About to get test cuts")
- return load_manifest(self.args.manifest_dir / "cuts_TEST.jsonl.gz")
+ return load_manifest_lazy(self.args.manifest_dir / "cuts_TEST.jsonl.gz")
diff --git a/egs/librispeech/ASR/README.md b/egs/librispeech/ASR/README.md
index 6ccf2fcc6..e2aaa9d7e 100644
--- a/egs/librispeech/ASR/README.md
+++ b/egs/librispeech/ASR/README.md
@@ -21,6 +21,8 @@ The following table lists the differences among them.
| `pruned_transducer_stateless3` | Conformer(modified) | Embedding + Conv1d | Using k2 pruned RNN-T loss + using GigaSpeech as extra training data |
| `pruned_transducer_stateless4` | Conformer(modified) | Embedding + Conv1d | same as pruned_transducer_stateless2 + save averaged models periodically during training |
| `pruned_transducer_stateless5` | Conformer(modified) | Embedding + Conv1d | same as pruned_transducer_stateless4 + more layers + random combiner|
+| `pruned_transducer_stateless6` | Conformer(modified) | Embedding + Conv1d | same as pruned_transducer_stateless4 + distillation with hubert|
+| `pruned_stateless_emformer_rnnt2` | Emformer(from torchaudio) | Embedding + Conv1d | Using Emformer from torchaudio for streaming ASR|
The decoder in `transducer_stateless` is modified from the paper
diff --git a/egs/librispeech/ASR/RESULTS-100hours.md b/egs/librispeech/ASR/RESULTS-100hours.md
index 2e1bbd687..3a064e69d 100644
--- a/egs/librispeech/ASR/RESULTS-100hours.md
+++ b/egs/librispeech/ASR/RESULTS-100hours.md
@@ -3,6 +3,31 @@
This page shows the WERs for test-clean/test-other using only
train-clean-100 subset as training data.
+## Distillation with hubert
+### 2022-05-27
+Related models/log/tensorboard:
+https://huggingface.co/GuoLiyong/stateless6_baseline_vs_disstillation
+
+Following results are obtained by ./distillation_with_hubert.sh
+
+The only differences is in pruned_transducer_stateless6/train.py.
+
+For baseline: set enable_distillation=False
+
+For distillation: set enable_distillation=True (the default)
+
+Decoding method is modified beam search.
+| | test-clean | test-other | comment |
+|-------------------------------------|------------|------------|------------------------------------------|
+| baseline no vq distillation | 7.09 | 18.88 | --epoch 20, --avg 10, --max-duration 200 |
+| baseline no vq distillation | 6.83 | 18.19 | --epoch 30, --avg 10, --max-duration 200 |
+| baseline no vq distillation | 6.73 | 17.79 | --epoch 40, --avg 10, --max-duration 200 |
+| baseline no vq distillation | 6.75 | 17.68 | --epoch 50, --avg 10, --max-duration 200 |
+| distillation with hubert | 5.82 | 15.98 | --epoch 20, --avg 10, --max-duration 200 |
+| distillation with hubert | 5.52 | 15.15 | --epoch 30, --avg 10, --max-duration 200 |
+| distillation with hubert | 5.45 | 14.94 | --epoch 40, --avg 10, --max-duration 200 |
+| distillation with hubert | 5.50 | 14.77 | --epoch 50, --avg 10, --max-duration 200 |
+
## Conformer encoder + embedding decoder
### 2022-02-21
diff --git a/egs/librispeech/ASR/RESULTS.md b/egs/librispeech/ASR/RESULTS.md
index 453751ba5..66410ef40 100644
--- a/egs/librispeech/ASR/RESULTS.md
+++ b/egs/librispeech/ASR/RESULTS.md
@@ -1,5 +1,73 @@
## Results
+### LibriSpeech BPE training results (Pruned Stateless Emformer RNN-T)
+
+[pruned_stateless_emformer_rnnt2](./pruned_stateless_emformer_rnnt2)
+
+Use .
+
+Use [Emformer](https://arxiv.org/abs/2010.10759) from [torchaudio](https://github.com/pytorch/audio)
+for streaming ASR. The Emformer model is imported from torchaudio without modifications.
+
+You can use to deploy it.
+
+| | test-clean | test-other | comment |
+|-------------------------------------|------------|------------|----------------------------------------|
+| greedy search (max sym per frame 1) | 4.28 | 11.42 | --epoch 39 --avg 6 --max-duration 600 |
+| modified beam search | 4.22 | 11.16 | --epoch 39 --avg 6 --max-duration 600 |
+| fast beam search | 4.29 | 11.26 | --epoch 39 --avg 6 --max-duration 600 |
+
+
+The training commands are:
+```bash
+export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
+
+./pruned_stateless_emformer_rnnt2/train.py \
+ --world-size 8 \
+ --num-epochs 40 \
+ --start-epoch 1 \
+ --exp-dir pruned_stateless_emformer_rnnt2/exp-full \
+ --full-libri 1 \
+ --use-fp16 0 \
+ --max-duration 200 \
+ --prune-range 5 \
+ --lm-scale 0.25 \
+ --master-port 12358 \
+ --num-encoder-layers 18 \
+ --left-context-length 128 \
+ --segment-length 8 \
+ --right-context-length 4
+```
+
+The tensorboard log can be found at
+
+
+The decoding commands are:
+```bash
+for m in greedy_search fast_beam_search modified_beam_search; do
+ for epoch in 39; do
+ for avg in 6; do
+ ./pruned_stateless_emformer_rnnt2/decode.py \
+ --epoch $epoch \
+ --avg $avg \
+ --use-averaged-model 1 \
+ --exp-dir pruned_stateless_emformer_rnnt2/exp-full \
+ --max-duration 50 \
+ --decoding-method $m \
+ --num-encoder-layers 18 \
+ --left-context-length 128 \
+ --segment-length 8 \
+ --right-context-length 4
+ done
+ done
+done
+```
+
+You can find a pretrained model, training logs, decoding logs, and decoding
+results at:
+
+
+
### LibriSpeech BPE training results (Pruned Stateless Transducer 5)
[pruned_transducer_stateless5](./pruned_transducer_stateless5)
@@ -193,6 +261,126 @@ You can find a pretrained model, training logs, decoding logs, and decoding
results at:
+
+### LibriSpeech BPE training results (Pruned Stateless Transducer 4)
+
+[pruned_transducer_stateless4](./pruned_transducer_stateless4)
+
+This version saves averaged model during training, and decodes with averaged model.
+
+See for details about the idea of model averaging.
+
+#### Training on full librispeech
+
+See
+
+Using commit `ec0b0e92297cc03fdb09f48cd235e84d2c04156b`.
+
+The WERs are:
+
+| | test-clean | test-other | comment |
+|-------------------------------------|------------|------------|-------------------------------------------------------------------------------|
+| greedy search (max sym per frame 1) | 2.75 | 6.74 | --epoch 30 --avg 6 --use_averaged_model False |
+| greedy search (max sym per frame 1) | 2.69 | 6.64 | --epoch 30 --avg 6 --use_averaged_model True |
+| fast beam search | 2.72 | 6.67 | --epoch 30 --avg 6 --use_averaged_model False |
+| fast beam search | 2.66 | 6.6 | --epoch 30 --avg 6 --use_averaged_model True |
+| modified beam search | 2.67 | 6.68 | --epoch 30 --avg 6 --use_averaged_model False |
+| modified beam search | 2.62 | 6.57 | --epoch 30 --avg 6 --use_averaged_model True |
+
+The training command is:
+
+```bash
+./pruned_transducer_stateless4/train.py \
+ --world-size 6 \
+ --num-epochs 30 \
+ --start-epoch 1 \
+ --exp-dir pruned_transducer_stateless4/exp \
+ --full-libri 1 \
+ --max-duration 300 \
+ --save-every-n 8000 \
+ --keep-last-k 20 \
+ --average-period 100
+```
+
+The tensorboard log can be found at
+
+
+The decoding command using greedy search is:
+```bash
+./pruned_transducer_stateless4/decode.py \
+ --epoch 30 \
+ --avg 6 \
+ --exp-dir pruned_transducer_stateless4/exp \
+ --max-duration 300 \
+ --decoding-method greedy_search \
+ --use-averaged-model True
+```
+
+The decoding command using fast beam search is:
+```bash
+./pruned_transducer_stateless4/decode.py \
+ --epoch 30 \
+ --avg 6 \
+ --exp-dir pruned_transducer_stateless4/exp \
+ --max-duration 300 \
+ --decoding-method fast_beam_search \
+ --use-averaged-model True \
+ --beam 4 \
+ --max-contexts 4 \
+ --max-states 8
+```
+
+The decoding command using modified beam search is:
+```bash
+./pruned_transducer_stateless4/decode.py \
+ --epoch 30 \
+ --avg 6 \
+ --exp-dir pruned_transducer_stateless4/exp \
+ --max-duration 300 \
+ --decoding-method modified_beam_search \
+ --use-averaged-model True \
+ --beam-size 4
+```
+
+Pretrained models, training logs, decoding logs, and decoding results
+are available at
+
+
+#### Training on train-clean-100
+
+See
+
+Using commit `ec0b0e92297cc03fdb09f48cd235e84d2c04156b`.
+
+The WERs are:
+
+| | test-clean | test-other | comment |
+|-------------------------------------|------------|------------|-------------------------------------------------------------------------------|
+| greedy search (max sym per frame 1) | 7.0 | 18.95 | --epoch 30 --avg 10 --use_averaged_model False |
+| greedy search (max sym per frame 1) | 6.92 | 18.65 | --epoch 30 --avg 10 --use_averaged_model True |
+| fast beam search | 6.82 | 18.47 | --epoch 30 --avg 10 --use_averaged_model False |
+| fast beam search | 6.74 | 18.2 | --epoch 30 --avg 10 --use_averaged_model True |
+| modified beam search | 6.74 | 18.39 | --epoch 30 --avg 10 --use_averaged_model False |
+| modified beam search | 6.74 | 18.12 | --epoch 30 --avg 10 --use_averaged_model True |
+
+The training command is:
+
+```bash
+./pruned_transducer_stateless4/train.py \
+ --world-size 3 \
+ --num-epochs 30 \
+ --start-epoch 1 \
+ --exp-dir pruned_transducer_stateless4/exp \
+ --full-libri 0 \
+ --max-duration 300 \
+ --save-every-n 8000 \
+ --keep-last-k 20 \
+ --average-period 100
+```
+
+The tensorboard log can be found at
+
+
### LibriSpeech BPE training results (Pruned Stateless Transducer 3, 2022-04-29)
[pruned_transducer_stateless3](./pruned_transducer_stateless3)
diff --git a/egs/librispeech/ASR/conformer_ctc/ali.py b/egs/librispeech/ASR/conformer_ctc/ali.py
index 42fa2308e..2828e309e 100755
--- a/egs/librispeech/ASR/conformer_ctc/ali.py
+++ b/egs/librispeech/ASR/conformer_ctc/ali.py
@@ -96,14 +96,14 @@ def get_parser():
- labels_xxx.h5
- aux_labels_xxx.h5
- - cuts_xxx.json.gz
+ - librispeech_cuts_xxx.jsonl.gz
where xxx is the value of `--dataset`. For instance, if
`--dataset` is `train-clean-100`, it will contain 3 files:
- `labels_train-clean-100.h5`
- `aux_labels_train-clean-100.h5`
- - `cuts_train-clean-100.json.gz`
+ - `librispeech_cuts_train-clean-100.jsonl.gz`
Note: Both labels_xxx.h5 and aux_labels_xxx.h5 contain framewise
alignment. The difference is that labels_xxx.h5 contains repeats.
@@ -289,7 +289,9 @@ def main():
out_labels_ali_filename = out_dir / f"labels_{params.dataset}.h5"
out_aux_labels_ali_filename = out_dir / f"aux_labels_{params.dataset}.h5"
- out_manifest_filename = out_dir / f"cuts_{params.dataset}.json.gz"
+ out_manifest_filename = (
+ out_dir / f"librispeech_cuts_{params.dataset}.jsonl.gz"
+ )
for f in (
out_labels_ali_filename,
diff --git a/egs/librispeech/ASR/conformer_ctc/train.py b/egs/librispeech/ASR/conformer_ctc/train.py
index b81bd6330..fc8fc8863 100755
--- a/egs/librispeech/ASR/conformer_ctc/train.py
+++ b/egs/librispeech/ASR/conformer_ctc/train.py
@@ -17,6 +17,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+"""
+Usage:
+ export CUDA_VISIBLE_DEVICES="0,1,2,3"
+ ./conformer_ctc/train.py \
+ --exp-dir ./conformer_ctc/exp \
+ --world-size 4 \
+ --full-libri 1 \
+ --max-duration 200 \
+ --num-epochs 20
+"""
+
import argparse
import logging
from pathlib import Path
@@ -29,6 +40,7 @@ import torch.multiprocessing as mp
import torch.nn as nn
from asr_datamodule import LibriSpeechAsrDataModule
from conformer import Conformer
+from lhotse.cut import Cut
from lhotse.utils import fix_random_seed
from torch import Tensor
from torch.nn.parallel import DistributedDataParallel as DDP
@@ -676,6 +688,20 @@ def run(rank, world_size, args):
if params.full_libri:
train_cuts += librispeech.train_clean_360_cuts()
train_cuts += librispeech.train_other_500_cuts()
+
+ def remove_short_and_long_utt(c: Cut):
+ # Keep only utterances with duration between 1 second and 20 seconds
+ #
+ # Caution: There is a reason to select 20.0 here. Please see
+ # ../local/display_manifest_statistics.py
+ #
+ # You should use ../local/display_manifest_statistics.py to get
+ # an utterance duration distribution for your dataset to select
+ # the threshold
+ return 1.0 <= c.duration <= 20.0
+
+ train_cuts = train_cuts.filter(remove_short_and_long_utt)
+
train_dl = librispeech.train_dataloaders(train_cuts)
valid_cuts = librispeech.dev_clean_cuts()
diff --git a/egs/librispeech/ASR/distillation_with_hubert.sh b/egs/librispeech/ASR/distillation_with_hubert.sh
new file mode 100644
index 000000000..e18ba8f55
--- /dev/null
+++ b/egs/librispeech/ASR/distillation_with_hubert.sh
@@ -0,0 +1,144 @@
+# A short introduction about distillation framework.
+#
+# A typical traditional distillation method is
+# Loss(teacher embedding, student embedding).
+#
+# Comparing to these, the proposed distillation framework contains two mainly steps:
+# codebook indexes = quantizer.encode(teacher embedding)
+# Loss(codebook indexes, student embedding)
+#
+# Things worth to meantion:
+# 1. The float type teacher embedding is quantized into a sequence of
+# 8-bit integer codebook indexes.
+# 2. a middle layer 36(1-based) out of total 48 layers is used to extract
+# teacher embeddings.
+# 3. a middle layer 6(1-based) out of total 6 layers is used to extract
+# student embeddings.
+
+# This is an example to do distillation with librispeech clean-100 subset.
+# run with command:
+# bash distillation_with_hubert.sh [0|1|2|3|4]
+#
+# For example command
+# bash distillation_with_hubert.sh 0
+# will download hubert model.
+stage=$1
+
+# Set the GPUs available.
+# This script requires at least one GPU.
+# You MUST set environment variable "CUDA_VISIBLE_DEVICES",
+# even you only have ONE GPU. It needed by CodebookIndexExtractor to determine numbert of jobs to extract codebook indexes parallelly.
+
+# Suppose only one GPU exists:
+# export CUDA_VISIBLE_DEVICES="0"
+#
+# Suppose GPU 2,3,4,5 are available.
+export CUDA_VISIBLE_DEVICES="2,3,4,5"
+
+
+if [ $stage -eq 0 ]; then
+ # Preparation stage.
+
+ # Install fairseq according to:
+ # https://github.com/pytorch/fairseq
+ # when testing this code:
+ # commit 806855bf660ea748ed7ffb42fe8dcc881ca3aca0 is used.
+ has_fairseq=$(python3 -c "import importlib; print(importlib.util.find_spec('fairseq') is not None)")
+ if [ $has_fairseq == 'False' ]; then
+ echo "Please install fairseq before running following stages"
+ exit 1
+ fi
+
+ # Install quantization toolkit:
+ # pip install git+https://github.com/danpovey/quantization.git@master
+ # when testing this code:
+ # commit c17ffe67aa2e6ca6b6855c50fde812f2eed7870b is used.
+
+ has_quantization=$(python3 -c "import importlib; print(importlib.util.find_spec('quantization') is not None)")
+ if [ $has_quantization == 'False' ]; then
+ echo "Please install quantization before running following stages"
+ exit 1
+ fi
+
+ echo "Download hubert model."
+ # Parameters about model.
+ exp_dir=./pruned_transducer_stateless6/exp/
+ model_id=hubert_xtralarge_ll60k_finetune_ls960
+ hubert_model_dir=${exp_dir}/hubert_models
+ hubert_model=${hubert_model_dir}/${model_id}.pt
+ mkdir -p ${hubert_model_dir}
+ # For more models refer to: https://github.com/pytorch/fairseq/tree/main/examples/hubert
+ if [ -f ${hubert_model} ]; then
+ echo "hubert model alread exists."
+ else
+ wget -c https://dl.fbaipublicfiles.com/hubert/${model_id} -P ${hubert_model}
+ wget -c wget https://dl.fbaipublicfiles.com/fairseq/wav2vec/dict.ltr.txt -P ${hubert_model_dir}
+ fi
+fi
+
+if [ ! -d ./data/fbank ]; then
+ echo "This script assumes ./data/fbank is already generated by prepare.sh"
+ exit 1
+fi
+
+if [ $stage -eq 1 ]; then
+ # This stage is not directly used by codebook indexes extraction.
+ # It is a method to "prove" that the downloaed hubert model
+ # is inferenced in an correct way if WERs look like normal.
+ # Expect WERs:
+ # [test-clean-ctc_greedy_search] %WER 2.04% [1075 / 52576, 92 ins, 104 del, 879 sub ]
+ # [test-other-ctc_greedy_search] %WER 3.71% [1942 / 52343, 152 ins, 126 del, 1664 sub ]
+ ./pruned_transducer_stateless6/hubert_decode.py
+fi
+
+if [ $stage -eq 2 ]; then
+ # Analysis of disk usage:
+ # With num_codebooks==8, each teacher embedding is quantized into
+ # a sequence of eight 8-bit integers, i.e. only eight bytes are needed.
+ # Training dataset including clean-100h with speed perturb 0.9 and 1.1 has 300 hours.
+ # The output frame rates of Hubert is 50 per second.
+ # Theoretically, 412M = 300 * 3600 * 50 * 8 / 1024 / 1024 is needed.
+ # The actual size of all "*.h5" files storaging codebook index is 450M.
+ # I think the extra "48M" usage is some meta information.
+
+ # Time consumption analysis:
+ # For quantizer training data(teacher embedding) extraction, only 1000 utts from clean-100 are used.
+ # Together with quantizer training, no more than 20 minutes will be used.
+ #
+ # For codebook indexes extraction,
+ # with two pieces of NVIDIA A100 gpus, around three hours needed to process 300 hours training data,
+ # i.e. clean-100 with speed purteb 0.9 and 1.1.
+
+ # GPU usage:
+ # During quantizer's training data(teacher embedding) and it's training,
+ # only the first ONE GPU is used.
+ # During codebook indexes extraction, ALL GPUs set by CUDA_VISIBLE_DEVICES are used.
+ ./pruned_transducer_stateless6/extract_codebook_index.py \
+ --full-libri False
+fi
+
+if [ $stage -eq 3 ]; then
+ # Example training script.
+ # Note: it's better to set spec-aug-time-warpi-factor=-1
+ WORLD_SIZE=$(echo ${CUDA_VISIBLE_DEVICES} | awk '{n=split($1, _, ","); print n}')
+ ./pruned_transducer_stateless6/train.py \
+ --manifest-dir ./data/vq_fbank \
+ --master-port 12359 \
+ --full-libri False \
+ --spec-aug-time-warp-factor -1 \
+ --max-duration 300 \
+ --world-size ${WORLD_SIZE} \
+ --num-epochs 20
+fi
+
+if [ $stage -eq 4 ]; then
+ # Results should be similar to:
+ # errs-test-clean-beam_size_4-epoch-20-avg-10-beam-4.txt:%WER = 5.67
+ # errs-test-other-beam_size_4-epoch-20-avg-10-beam-4.txt:%WER = 15.60
+ ./pruned_transducer_stateless6/decode.py \
+ --decoding-method "modified_beam_search" \
+ --epoch 20 \
+ --avg 10 \
+ --max-duration 200 \
+ --exp-dir ./pruned_transducer_stateless6/exp
+fi
diff --git a/egs/librispeech/ASR/local/compute_fbank_gigaspeech_dev_test.py b/egs/librispeech/ASR/local/compute_fbank_gigaspeech_dev_test.py
index 9f1039893..68d93d2c5 100644
--- a/egs/librispeech/ASR/local/compute_fbank_gigaspeech_dev_test.py
+++ b/egs/librispeech/ASR/local/compute_fbank_gigaspeech_dev_test.py
@@ -20,11 +20,7 @@ import logging
from pathlib import Path
import torch
-from lhotse import (
- CutSet,
- KaldifeatFbank,
- KaldifeatFbankConfig,
-)
+from lhotse import CutSet, KaldifeatFbank, KaldifeatFbankConfig
# Torch's multithreaded behavior needs to be disabled or
# it wastes a lot of CPU and slow things down.
@@ -51,13 +47,16 @@ def compute_fbank_gigaspeech_dev_test():
logging.info(f"device: {device}")
+ prefix = "gigaspeech"
+ suffix = "jsonl.gz"
+
for partition in subsets:
- cuts_path = in_out_dir / f"cuts_{partition}.jsonl.gz"
+ cuts_path = in_out_dir / f"{prefix}_cuts_{partition}.{suffix}"
if cuts_path.is_file():
logging.info(f"{cuts_path} exists - skipping")
continue
- raw_cuts_path = in_out_dir / f"cuts_{partition}_raw.jsonl.gz"
+ raw_cuts_path = in_out_dir / f"{prefix}_cuts_{partition}_raw.{suffix}"
logging.info(f"Loading {raw_cuts_path}")
cut_set = CutSet.from_file(raw_cuts_path)
@@ -66,7 +65,7 @@ def compute_fbank_gigaspeech_dev_test():
cut_set = cut_set.compute_and_store_features_batch(
extractor=extractor,
- storage_path=f"{in_out_dir}/feats_{partition}",
+ storage_path=f"{in_out_dir}/{prefix}_feats_{partition}",
num_workers=num_workers,
batch_duration=batch_duration,
)
diff --git a/egs/librispeech/ASR/local/compute_fbank_gigaspeech_splits.py b/egs/librispeech/ASR/local/compute_fbank_gigaspeech_splits.py
index a7ed2467d..f826f064e 100644
--- a/egs/librispeech/ASR/local/compute_fbank_gigaspeech_splits.py
+++ b/egs/librispeech/ASR/local/compute_fbank_gigaspeech_splits.py
@@ -77,7 +77,7 @@ def get_parser():
def compute_fbank_gigaspeech_splits(args):
num_splits = args.num_splits
- output_dir = f"data/fbank/XL_split_{num_splits}"
+ output_dir = f"data/fbank/gigaspeech_XL_split_{num_splits}"
output_dir = Path(output_dir)
assert output_dir.exists(), f"{output_dir} does not exist!"
@@ -96,17 +96,19 @@ def compute_fbank_gigaspeech_splits(args):
extractor = KaldifeatFbank(KaldifeatFbankConfig(device=device))
logging.info(f"device: {device}")
+ prefix = "gigaspeech"
+
num_digits = 8 # num_digits is fixed by lhotse split-lazy
for i in range(start, stop):
idx = f"{i + 1}".zfill(num_digits)
logging.info(f"Processing {idx}/{num_splits}")
- cuts_path = output_dir / f"cuts_XL.{idx}.jsonl.gz"
+ cuts_path = output_dir / f"{prefix}_cuts_XL.{idx}.jsonl.gz"
if cuts_path.is_file():
logging.info(f"{cuts_path} exists - skipping")
continue
- raw_cuts_path = output_dir / f"cuts_XL_raw.{idx}.jsonl.gz"
+ raw_cuts_path = output_dir / f"{prefix}_cuts_XL_raw.{idx}.jsonl.gz"
if not raw_cuts_path.is_file():
logging.info(f"{raw_cuts_path} does not exist - skipping it")
continue
@@ -115,13 +117,13 @@ def compute_fbank_gigaspeech_splits(args):
cut_set = CutSet.from_file(raw_cuts_path)
logging.info("Computing features")
- if (output_dir / f"feats_XL_{idx}.lca").exists():
- logging.info(f"Removing {output_dir}/feats_XL_{idx}.lca")
- os.remove(output_dir / f"feats_XL_{idx}.lca")
+ if (output_dir / f"{prefix}_feats_XL_{idx}.lca").exists():
+ logging.info(f"Removing {output_dir}/{prefix}_feats_XL_{idx}.lca")
+ os.remove(output_dir / f"{prefix}_feats_XL_{idx}.lca")
cut_set = cut_set.compute_and_store_features_batch(
extractor=extractor,
- storage_path=f"{output_dir}/feats_XL_{idx}",
+ storage_path=f"{output_dir}/{prefix}_feats_XL_{idx}",
num_workers=args.num_workers,
batch_duration=args.batch_duration,
)
diff --git a/egs/librispeech/ASR/local/compute_fbank_librispeech.py b/egs/librispeech/ASR/local/compute_fbank_librispeech.py
index 92f4f6ab7..642d9fd32 100755
--- a/egs/librispeech/ASR/local/compute_fbank_librispeech.py
+++ b/egs/librispeech/ASR/local/compute_fbank_librispeech.py
@@ -28,7 +28,7 @@ import os
from pathlib import Path
import torch
-from lhotse import ChunkedLilcomHdf5Writer, CutSet, Fbank, FbankConfig
+from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter
from lhotse.recipes.utils import read_manifests_if_cached
from icefall.utils import get_executor
@@ -56,8 +56,13 @@ def compute_fbank_librispeech():
"train-clean-360",
"train-other-500",
)
+ prefix = "librispeech"
+ suffix = "jsonl.gz"
manifests = read_manifests_if_cached(
- prefix="librispeech", dataset_parts=dataset_parts, output_dir=src_dir
+ dataset_parts=dataset_parts,
+ output_dir=src_dir,
+ prefix=prefix,
+ suffix=suffix,
)
assert manifests is not None
@@ -65,7 +70,8 @@ def compute_fbank_librispeech():
with get_executor() as ex: # Initialize the executor only once.
for partition, m in manifests.items():
- if (output_dir / f"cuts_{partition}.json.gz").is_file():
+ cuts_filename = f"{prefix}_cuts_{partition}.{suffix}"
+ if (output_dir / cuts_filename).is_file():
logging.info(f"{partition} already exists - skipping.")
continue
logging.info(f"Processing {partition}")
@@ -81,13 +87,13 @@ def compute_fbank_librispeech():
)
cut_set = cut_set.compute_and_store_features(
extractor=extractor,
- storage_path=f"{output_dir}/feats_{partition}",
+ storage_path=f"{output_dir}/{prefix}_feats_{partition}",
# when an executor is specified, make more partitions
num_jobs=num_jobs if ex is None else 80,
executor=ex,
- storage_type=ChunkedLilcomHdf5Writer,
+ storage_type=LilcomChunkyWriter,
)
- cut_set.to_json(output_dir / f"cuts_{partition}.json.gz")
+ cut_set.to_file(output_dir / cuts_filename)
if __name__ == "__main__":
diff --git a/egs/librispeech/ASR/local/compute_fbank_musan.py b/egs/librispeech/ASR/local/compute_fbank_musan.py
index 368bea4e8..fef372129 100755
--- a/egs/librispeech/ASR/local/compute_fbank_musan.py
+++ b/egs/librispeech/ASR/local/compute_fbank_musan.py
@@ -28,7 +28,7 @@ import os
from pathlib import Path
import torch
-from lhotse import ChunkedLilcomHdf5Writer, CutSet, Fbank, FbankConfig, combine
+from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter, combine
from lhotse.recipes.utils import read_manifests_if_cached
from icefall.utils import get_executor
@@ -52,12 +52,22 @@ def compute_fbank_musan():
"speech",
"noise",
)
+ prefix = "musan"
+ suffix = "jsonl.gz"
manifests = read_manifests_if_cached(
- prefix="musan", dataset_parts=dataset_parts, output_dir=src_dir
+ dataset_parts=dataset_parts,
+ output_dir=src_dir,
+ prefix=prefix,
+ suffix=suffix,
)
assert manifests is not None
- musan_cuts_path = output_dir / "cuts_musan.json.gz"
+ assert len(manifests) == len(dataset_parts), (
+ len(manifests),
+ len(dataset_parts),
+ )
+
+ musan_cuts_path = output_dir / "musan_cuts.jsonl.gz"
if musan_cuts_path.is_file():
logging.info(f"{musan_cuts_path} already exists - skipping")
@@ -79,13 +89,13 @@ def compute_fbank_musan():
.filter(lambda c: c.duration > 5)
.compute_and_store_features(
extractor=extractor,
- storage_path=f"{output_dir}/feats_musan",
+ storage_path=f"{output_dir}/musan_feats",
num_jobs=num_jobs if ex is None else 80,
executor=ex,
- storage_type=ChunkedLilcomHdf5Writer,
+ storage_type=LilcomChunkyWriter,
)
)
- musan_cuts.to_json(musan_cuts_path)
+ musan_cuts.to_file(musan_cuts_path)
if __name__ == "__main__":
diff --git a/egs/librispeech/ASR/local/display_manifest_statistics.py b/egs/librispeech/ASR/local/display_manifest_statistics.py
index 15bd206fa..c3c684235 100755
--- a/egs/librispeech/ASR/local/display_manifest_statistics.py
+++ b/egs/librispeech/ASR/local/display_manifest_statistics.py
@@ -25,19 +25,19 @@ for usage.
"""
-from lhotse import load_manifest
+from lhotse import load_manifest_lazy
def main():
- path = "./data/fbank/cuts_train-clean-100.json.gz"
- path = "./data/fbank/cuts_train-clean-360.json.gz"
- path = "./data/fbank/cuts_train-other-500.json.gz"
- path = "./data/fbank/cuts_dev-clean.json.gz"
- path = "./data/fbank/cuts_dev-other.json.gz"
- path = "./data/fbank/cuts_test-clean.json.gz"
- path = "./data/fbank/cuts_test-other.json.gz"
+ # path = "./data/fbank/librispeech_cuts_train-clean-100.jsonl.gz"
+ # path = "./data/fbank/librispeech_cuts_train-clean-360.jsonl.gz"
+ # path = "./data/fbank/librispeech_cuts_train-other-500.jsonl.gz"
+ # path = "./data/fbank/librispeech_cuts_dev-clean.jsonl.gz"
+ # path = "./data/fbank/librispeech_cuts_dev-other.jsonl.gz"
+ # path = "./data/fbank/librispeech_cuts_test-clean.jsonl.gz"
+ path = "./data/fbank/librispeech_cuts_test-other.jsonl.gz"
- cuts = load_manifest(path)
+ cuts = load_manifest_lazy(path)
cuts.describe()
diff --git a/egs/librispeech/ASR/local/preprocess_gigaspeech.py b/egs/librispeech/ASR/local/preprocess_gigaspeech.py
index cd1345904..0f4ae820b 100644
--- a/egs/librispeech/ASR/local/preprocess_gigaspeech.py
+++ b/egs/librispeech/ASR/local/preprocess_gigaspeech.py
@@ -58,17 +58,19 @@ def preprocess_giga_speech():
)
logging.info("Loading manifest (may take 4 minutes)")
+ prefix = "gigaspeech"
+ suffix = "jsonl.gz"
manifests = read_manifests_if_cached(
dataset_parts=dataset_parts,
output_dir=src_dir,
- prefix="gigaspeech",
- suffix="jsonl.gz",
+ prefix=prefix,
+ suffix=suffix,
)
assert manifests is not None
for partition, m in manifests.items():
logging.info(f"Processing {partition}")
- raw_cuts_path = output_dir / f"cuts_{partition}_raw.jsonl.gz"
+ raw_cuts_path = output_dir / f"{prefix}_cuts_{partition}_raw.{suffix}"
if raw_cuts_path.is_file():
logging.info(f"{partition} already exists - skipping")
continue
diff --git a/egs/librispeech/ASR/local/validate_manifest.py b/egs/librispeech/ASR/local/validate_manifest.py
index 8d3d4c7ce..7c57d629a 100755
--- a/egs/librispeech/ASR/local/validate_manifest.py
+++ b/egs/librispeech/ASR/local/validate_manifest.py
@@ -25,7 +25,7 @@ We will add more checks later if needed.
Usage example:
python3 ./local/validate_manifest.py \
- ./data/fbank/cuts_train-clean-100.json.gz
+ ./data/fbank/librispeech_cuts_train-clean-100.jsonl.gz
"""
@@ -33,7 +33,7 @@ import argparse
import logging
from pathlib import Path
-from lhotse import load_manifest, CutSet
+from lhotse import CutSet, load_manifest_lazy
from lhotse.cut import Cut
@@ -76,7 +76,7 @@ def main():
logging.info(f"Validating {manifest}")
assert manifest.is_file(), f"{manifest} does not exist"
- cut_set = load_manifest(manifest)
+ cut_set = load_manifest_lazy(manifest)
assert isinstance(cut_set, CutSet)
for c in cut_set:
diff --git a/egs/librispeech/ASR/prepare.sh b/egs/librispeech/ASR/prepare.sh
index 8cfb046c8..17a638502 100755
--- a/egs/librispeech/ASR/prepare.sh
+++ b/egs/librispeech/ASR/prepare.sh
@@ -40,9 +40,9 @@ dl_dir=$PWD/download
# It will generate data/lang_bpe_xxx,
# data/lang_bpe_yyy if the array contains xxx, yyy
vocab_sizes=(
- 5000
- 2000
- 1000
+ # 5000
+ # 2000
+ # 1000
500
)
@@ -132,7 +132,7 @@ if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
)
for part in ${parts[@]}; do
python3 ./local/validate_manifest.py \
- data/fbank/cuts_${part}.json.gz
+ data/fbank/librispeech_cuts_${part}.jsonl.gz
done
touch data/fbank/.librispeech-validated.done
fi
diff --git a/egs/librispeech/ASR/prepare_giga_speech.sh b/egs/librispeech/ASR/prepare_giga_speech.sh
index 26b921eab..6f85ddc29 100755
--- a/egs/librispeech/ASR/prepare_giga_speech.sh
+++ b/egs/librispeech/ASR/prepare_giga_speech.sh
@@ -124,9 +124,9 @@ fi
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
log "Stage 4: Split XL subset into ${num_splits} pieces"
- split_dir=data/fbank/XL_split_${num_splits}
+ split_dir=data/fbank/gigaspeech_XL_split_${num_splits}
if [ ! -f $split_dir/.split_completed ]; then
- lhotse split-lazy ./data/fbank/cuts_XL_raw.jsonl.gz $split_dir $chunk_size
+ lhotse split-lazy ./data/fbank/gigaspeech_cuts_XL_raw.jsonl.gz $split_dir $chunk_size
touch $split_dir/.split_completed
fi
fi
diff --git a/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/asr_datamodule.py b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/asr_datamodule.py
new file mode 120000
index 000000000..b4e5427e0
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/asr_datamodule.py
@@ -0,0 +1 @@
+../pruned_transducer_stateless/asr_datamodule.py
\ No newline at end of file
diff --git a/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/beam_search.py b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/beam_search.py
new file mode 120000
index 000000000..227d2247c
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/beam_search.py
@@ -0,0 +1 @@
+../pruned_transducer_stateless/beam_search.py
\ No newline at end of file
diff --git a/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/decode.py b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/decode.py
new file mode 100755
index 000000000..e9989579b
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/decode.py
@@ -0,0 +1,645 @@
+#!/usr/bin/env python3
+#
+# Copyright 2021-2022 Xiaomi Corporation (Author: Fangjun Kuang,
+# Zengwei Yao)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Usage:
+(1) greedy search
+./pruned_stateless_emformer_rnnt/decode.py \
+ --epoch 30 \
+ --avg 15 \
+ --exp-dir ./pruned_stateless_emformer_rnnt/exp \
+ --max-duration 600 \
+ --decoding-method greedy_search
+
+(2) beam search (not recommended)
+./pruned_stateless_emformer_rnnt/decode.py \
+ --epoch 30 \
+ --avg 15 \
+ --exp-dir ./pruned_stateless_emformer_rnnt/exp \
+ --max-duration 600 \
+ --decoding-method beam_search \
+ --beam-size 4
+
+(3) modified beam search
+./pruned_stateless_emformer_rnnt/decode.py \
+ --epoch 30 \
+ --avg 15 \
+ --exp-dir ./pruned_stateless_emformer_rnnt/exp \
+ --max-duration 600 \
+ --decoding-method modified_beam_search \
+ --beam-size 4
+
+(4) fast beam search
+./pruned_stateless_emformer_rnnt/decode.py \
+ --epoch 30 \
+ --avg 15 \
+ --exp-dir ./pruned_stateless_emformer_rnnt/exp \
+ --max-duration 600 \
+ --decoding-method fast_beam_search \
+ --beam 4 \
+ --max-contexts 4 \
+ --max-states 8
+"""
+
+
+import argparse
+import logging
+import math
+from collections import defaultdict
+from pathlib import Path
+from typing import Dict, List, Optional, Tuple
+
+import k2
+import sentencepiece as spm
+import torch
+import torch.nn as nn
+from asr_datamodule import LibriSpeechAsrDataModule
+from beam_search import (
+ beam_search,
+ fast_beam_search_one_best,
+ greedy_search,
+ greedy_search_batch,
+ modified_beam_search,
+)
+from train import add_model_arguments, get_params, get_transducer_model
+
+from icefall.checkpoint import (
+ average_checkpoints,
+ average_checkpoints_with_averaged_model,
+ find_checkpoints,
+ load_checkpoint,
+)
+from icefall.utils import (
+ AttributeDict,
+ setup_logger,
+ store_transcripts,
+ str2bool,
+ write_error_stats,
+)
+
+LOG_EPS = math.log(1e-10)
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+
+ parser.add_argument(
+ "--epoch",
+ type=int,
+ default=30,
+ help="""It specifies the checkpoint to use for decoding.
+ Note: Epoch counts from 1.
+ You can specify --avg to use more checkpoints for model averaging.""",
+ )
+
+ parser.add_argument(
+ "--iter",
+ type=int,
+ default=0,
+ help="""If positive, --epoch is ignored and it
+ will use the checkpoint exp_dir/checkpoint-iter.pt.
+ You can specify --avg to use more checkpoints for model averaging.
+ """,
+ )
+
+ parser.add_argument(
+ "--avg",
+ type=int,
+ default=15,
+ help="Number of checkpoints to average. Automatically select "
+ "consecutive checkpoints before the checkpoint specified by "
+ "'--epoch' and '--iter'",
+ )
+
+ parser.add_argument(
+ "--use-averaged-model",
+ type=str2bool,
+ default=False,
+ help="Whether to load averaged model. Currently it only supports "
+ "using --epoch. If True, it would decode with the averaged model "
+ "over the epoch range from `epoch-avg` (excluded) to `epoch`."
+ "Actually only the models with epoch number of `epoch-avg` and "
+ "`epoch` are loaded for averaging. ",
+ )
+
+ parser.add_argument(
+ "--exp-dir",
+ type=str,
+ default="pruned_stateless_emformer_rnnt/exp",
+ help="The experiment dir",
+ )
+
+ parser.add_argument(
+ "--bpe-model",
+ type=str,
+ default="data/lang_bpe_500/bpe.model",
+ help="Path to the BPE model",
+ )
+
+ parser.add_argument(
+ "--decoding-method",
+ type=str,
+ default="greedy_search",
+ help="""Possible values are:
+ - greedy_search
+ - beam_search
+ - modified_beam_search
+ - fast_beam_search
+ """,
+ )
+
+ parser.add_argument(
+ "--beam-size",
+ type=int,
+ default=4,
+ help="""An integer indicating how many candidates we will keep for each
+ frame. Used only when --decoding-method is beam_search or
+ modified_beam_search.""",
+ )
+
+ parser.add_argument(
+ "--beam",
+ type=float,
+ default=4,
+ help="""A floating point value to calculate the cutoff score during beam
+ search (i.e., `cutoff = max-score - beam`), which is the same as the
+ `beam` in Kaldi.
+ Used only when --decoding-method is fast_beam_search""",
+ )
+
+ parser.add_argument(
+ "--max-contexts",
+ type=int,
+ default=4,
+ help="""Used only when --decoding-method is
+ fast_beam_search""",
+ )
+
+ parser.add_argument(
+ "--max-states",
+ type=int,
+ default=8,
+ help="""Used only when --decoding-method is
+ fast_beam_search""",
+ )
+
+ parser.add_argument(
+ "--context-size",
+ type=int,
+ default=2,
+ help="The context size in the decoder. 1 means bigram; "
+ "2 means tri-gram",
+ )
+ parser.add_argument(
+ "--max-sym-per-frame",
+ type=int,
+ default=1,
+ help="""Maximum number of symbols per frame.
+ Used only when --decoding_method is greedy_search""",
+ )
+
+ add_model_arguments(parser)
+
+ return parser
+
+
+def decode_one_batch(
+ params: AttributeDict,
+ model: nn.Module,
+ sp: spm.SentencePieceProcessor,
+ batch: dict,
+ decoding_graph: Optional[k2.Fsa] = None,
+) -> Dict[str, List[List[str]]]:
+ """Decode one batch and return the result in a dict. The dict has the
+ following format:
+
+ - key: It indicates the setting used for decoding. For example,
+ if greedy_search is used, it would be "greedy_search"
+ If beam search with a beam size of 7 is used, it would be
+ "beam_7"
+ - value: It contains the decoding result. `len(value)` equals to
+ batch size. `value[i]` is the decoding result for the i-th
+ utterance in the given batch.
+ Args:
+ params:
+ It's the return value of :func:`get_params`.
+ model:
+ The neural model.
+ sp:
+ The BPE model.
+ batch:
+ It is the return value from iterating
+ `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation
+ for the format of the `batch`.
+ decoding_graph:
+ The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used
+ only when --decoding_method is fast_beam_search.
+ Returns:
+ Return the decoding result. See above description for the format of
+ the returned dict.
+ """
+ device = next(model.parameters()).device
+ feature = batch["inputs"]
+ assert feature.ndim == 3
+
+ feature = feature.to(device)
+ # at entry, feature is (N, T, C)
+
+ supervisions = batch["supervisions"]
+ feature_lens = supervisions["num_frames"].to(device)
+
+ feature_lens += params.left_context_length
+ feature = torch.nn.functional.pad(
+ feature,
+ pad=(0, 0, 0, params.left_context_length),
+ value=LOG_EPS,
+ )
+
+ encoder_out, encoder_out_lens = model.encoder(
+ x=feature, x_lens=feature_lens
+ )
+ hyps = []
+
+ if params.decoding_method == "fast_beam_search":
+ hyp_tokens = fast_beam_search_one_best(
+ model=model,
+ decoding_graph=decoding_graph,
+ encoder_out=encoder_out,
+ encoder_out_lens=encoder_out_lens,
+ beam=params.beam,
+ max_contexts=params.max_contexts,
+ max_states=params.max_states,
+ )
+ for hyp in sp.decode(hyp_tokens):
+ hyps.append(hyp.split())
+ elif (
+ params.decoding_method == "greedy_search"
+ and params.max_sym_per_frame == 1
+ ):
+ hyp_tokens = greedy_search_batch(
+ model=model,
+ encoder_out=encoder_out,
+ encoder_out_lens=encoder_out_lens,
+ )
+ for hyp in sp.decode(hyp_tokens):
+ hyps.append(hyp.split())
+ elif params.decoding_method == "modified_beam_search":
+ hyp_tokens = modified_beam_search(
+ model=model,
+ encoder_out=encoder_out,
+ encoder_out_lens=encoder_out_lens,
+ beam=params.beam_size,
+ )
+ for hyp in sp.decode(hyp_tokens):
+ hyps.append(hyp.split())
+ else:
+ batch_size = encoder_out.size(0)
+
+ for i in range(batch_size):
+ # fmt: off
+ encoder_out_i = encoder_out[i:i + 1, :encoder_out_lens[i]]
+ # fmt: on
+ if params.decoding_method == "greedy_search":
+ hyp = greedy_search(
+ model=model,
+ encoder_out=encoder_out_i,
+ max_sym_per_frame=params.max_sym_per_frame,
+ )
+ elif params.decoding_method == "beam_search":
+ hyp = beam_search(
+ model=model,
+ encoder_out=encoder_out_i,
+ beam=params.beam_size,
+ )
+ else:
+ raise ValueError(
+ f"Unsupported decoding method: {params.decoding_method}"
+ )
+ hyps.append(sp.decode(hyp).split())
+
+ if params.decoding_method == "greedy_search":
+ return {"greedy_search": hyps}
+ elif params.decoding_method == "fast_beam_search":
+ return {
+ (
+ f"beam_{params.beam}_"
+ f"max_contexts_{params.max_contexts}_"
+ f"max_states_{params.max_states}"
+ ): hyps
+ }
+ else:
+ return {f"beam_size_{params.beam_size}": hyps}
+
+
+def decode_dataset(
+ dl: torch.utils.data.DataLoader,
+ params: AttributeDict,
+ model: nn.Module,
+ sp: spm.SentencePieceProcessor,
+ decoding_graph: Optional[k2.Fsa] = None,
+) -> Dict[str, List[Tuple[List[str], List[str]]]]:
+ """Decode dataset.
+
+ Args:
+ dl:
+ PyTorch's dataloader containing the dataset to decode.
+ params:
+ It is returned by :func:`get_params`.
+ model:
+ The neural model.
+ sp:
+ The BPE model.
+ decoding_graph:
+ The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used
+ only when --decoding_method is fast_beam_search.
+ Returns:
+ Return a dict, whose key may be "greedy_search" if greedy search
+ is used, or it may be "beam_7" if beam size of 7 is used.
+ Its value is a list of tuples. Each tuple contains two elements:
+ The first is the reference transcript, and the second is the
+ predicted result.
+ """
+ num_cuts = 0
+
+ try:
+ num_batches = len(dl)
+ except TypeError:
+ num_batches = "?"
+
+ if params.decoding_method == "greedy_search":
+ log_interval = 50
+ else:
+ log_interval = 10
+
+ results = defaultdict(list)
+ for batch_idx, batch in enumerate(dl):
+ texts = batch["supervisions"]["text"]
+
+ hyps_dict = decode_one_batch(
+ params=params,
+ model=model,
+ sp=sp,
+ decoding_graph=decoding_graph,
+ batch=batch,
+ )
+
+ for name, hyps in hyps_dict.items():
+ this_batch = []
+ assert len(hyps) == len(texts)
+ for hyp_words, ref_text in zip(hyps, texts):
+ ref_words = ref_text.split()
+ this_batch.append((ref_words, hyp_words))
+
+ results[name].extend(this_batch)
+
+ num_cuts += len(texts)
+
+ if batch_idx % log_interval == 0:
+ batch_str = f"{batch_idx}/{num_batches}"
+
+ logging.info(
+ f"batch {batch_str}, cuts processed until now is {num_cuts}"
+ )
+ return results
+
+
+def save_results(
+ params: AttributeDict,
+ test_set_name: str,
+ results_dict: Dict[str, List[Tuple[List[int], List[int]]]],
+):
+ test_set_wers = dict()
+ for key, results in results_dict.items():
+ recog_path = (
+ params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt"
+ )
+ store_transcripts(filename=recog_path, texts=results)
+ logging.info(f"The transcripts are stored in {recog_path}")
+
+ # The following prints out WERs, per-word error statistics and aligned
+ # ref/hyp pairs.
+ errs_filename = (
+ params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt"
+ )
+ with open(errs_filename, "w") as f:
+ wer = write_error_stats(
+ f, f"{test_set_name}-{key}", results, enable_log=True
+ )
+ test_set_wers[key] = wer
+
+ logging.info("Wrote detailed error stats to {}".format(errs_filename))
+
+ test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1])
+ errs_info = (
+ params.res_dir
+ / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt"
+ )
+ with open(errs_info, "w") as f:
+ print("settings\tWER", file=f)
+ for key, val in test_set_wers:
+ print("{}\t{}".format(key, val), file=f)
+
+ s = "\nFor {}, WER of different settings are:\n".format(test_set_name)
+ note = "\tbest for {}".format(test_set_name)
+ for key, val in test_set_wers:
+ s += "{}\t{}{}\n".format(key, val, note)
+ note = ""
+ logging.info(s)
+
+
+@torch.no_grad()
+def main():
+ parser = get_parser()
+ LibriSpeechAsrDataModule.add_arguments(parser)
+ args = parser.parse_args()
+ args.exp_dir = Path(args.exp_dir)
+
+ params = get_params()
+ params.update(vars(args))
+
+ assert params.decoding_method in (
+ "greedy_search",
+ "beam_search",
+ "fast_beam_search",
+ "modified_beam_search",
+ )
+ params.res_dir = params.exp_dir / params.decoding_method
+
+ if params.iter > 0:
+ params.suffix = f"iter-{params.iter}-avg-{params.avg}"
+ else:
+ params.suffix = f"epoch-{params.epoch}-avg-{params.avg}"
+
+ if "fast_beam_search" in params.decoding_method:
+ params.suffix += f"-beam-{params.beam}"
+ params.suffix += f"-max-contexts-{params.max_contexts}"
+ params.suffix += f"-max-states-{params.max_states}"
+ elif "beam_search" in params.decoding_method:
+ params.suffix += (
+ f"-{params.decoding_method}-beam-size-{params.beam_size}"
+ )
+ else:
+ params.suffix += f"-context-{params.context_size}"
+ params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}"
+
+ if params.use_averaged_model:
+ params.suffix += "-use-averaged-model"
+
+ setup_logger(f"{params.res_dir}/log-decode-{params.suffix}")
+ logging.info("Decoding started")
+
+ device = torch.device("cpu")
+ if torch.cuda.is_available():
+ device = torch.device("cuda", 0)
+
+ logging.info(f"Device: {device}")
+
+ sp = spm.SentencePieceProcessor()
+ sp.load(params.bpe_model)
+
+ # and are defined in local/train_bpe_model.py
+ params.blank_id = sp.piece_to_id("")
+ params.unk_id = sp.piece_to_id("")
+ params.vocab_size = sp.get_piece_size()
+
+ logging.info(params)
+
+ logging.info("About to create model")
+ model = get_transducer_model(params)
+
+ if not params.use_averaged_model:
+ if params.iter > 0:
+ filenames = find_checkpoints(
+ params.exp_dir, iteration=-params.iter
+ )[: params.avg]
+ if len(filenames) == 0:
+ raise ValueError(
+ f"No checkpoints found for"
+ f" --iter {params.iter}, --avg {params.avg}"
+ )
+ elif len(filenames) < params.avg:
+ raise ValueError(
+ f"Not enough checkpoints ({len(filenames)}) found for"
+ f" --iter {params.iter}, --avg {params.avg}"
+ )
+ logging.info(f"averaging {filenames}")
+ model.to(device)
+ model.load_state_dict(average_checkpoints(filenames, device=device))
+ elif params.avg == 1:
+ load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model)
+ else:
+ start = params.epoch - params.avg + 1
+ filenames = []
+ for i in range(start, params.epoch + 1):
+ if i >= 1:
+ filenames.append(f"{params.exp_dir}/epoch-{i}.pt")
+ logging.info(f"averaging {filenames}")
+ model.to(device)
+ model.load_state_dict(average_checkpoints(filenames, device=device))
+ else:
+ if params.iter > 0:
+ filenames = find_checkpoints(
+ params.exp_dir, iteration=-params.iter
+ )[: params.avg + 1]
+ if len(filenames) == 0:
+ raise ValueError(
+ f"No checkpoints found for"
+ f" --iter {params.iter}, --avg {params.avg}"
+ )
+ elif len(filenames) < params.avg + 1:
+ raise ValueError(
+ f"Not enough checkpoints ({len(filenames)}) found for"
+ f" --iter {params.iter}, --avg {params.avg}"
+ )
+ filename_start = filenames[-1]
+ filename_end = filenames[0]
+ logging.info(
+ "Calculating the averaged model over iteration checkpoints"
+ f" from {filename_start} (excluded) to {filename_end}"
+ )
+ model.to(device)
+ model.load_state_dict(
+ average_checkpoints_with_averaged_model(
+ filename_start=filename_start,
+ filename_end=filename_end,
+ device=device,
+ )
+ )
+ else:
+ assert params.avg > 0, params.avg
+ start = params.epoch - params.avg
+ assert start >= 1, start
+ filename_start = f"{params.exp_dir}/epoch-{start}.pt"
+ filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt"
+ logging.info(
+ f"Calculating the averaged model over epoch range from "
+ f"{start} (excluded) to {params.epoch}"
+ )
+ model.to(device)
+ model.load_state_dict(
+ average_checkpoints_with_averaged_model(
+ filename_start=filename_start,
+ filename_end=filename_end,
+ device=device,
+ )
+ )
+
+ model.to(device)
+ model.eval()
+
+ if params.decoding_method == "fast_beam_search":
+ decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device)
+ else:
+ decoding_graph = None
+
+ num_param = sum([p.numel() for p in model.parameters()])
+ logging.info(f"Number of model parameters: {num_param}")
+
+ librispeech = LibriSpeechAsrDataModule(args)
+
+ test_clean_cuts = librispeech.test_clean_cuts()
+ test_other_cuts = librispeech.test_other_cuts()
+
+ test_clean_dl = librispeech.test_dataloaders(test_clean_cuts)
+ test_other_dl = librispeech.test_dataloaders(test_other_cuts)
+
+ test_sets = ["test-clean", "test-other"]
+ test_dl = [test_clean_dl, test_other_dl]
+
+ for test_set, test_dl in zip(test_sets, test_dl):
+ results_dict = decode_dataset(
+ dl=test_dl,
+ params=params,
+ model=model,
+ sp=sp,
+ decoding_graph=decoding_graph,
+ )
+
+ save_results(
+ params=params,
+ test_set_name=test_set,
+ results_dict=results_dict,
+ )
+
+ logging.info("Done!")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/decoder.py b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/decoder.py
new file mode 120000
index 000000000..0d5f10dc0
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/decoder.py
@@ -0,0 +1 @@
+../pruned_transducer_stateless/decoder.py
\ No newline at end of file
diff --git a/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/emformer.py b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/emformer.py
new file mode 100644
index 000000000..318cd5094
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/emformer.py
@@ -0,0 +1,315 @@
+# Copyright 2022 Xiaomi Corporation (Author: Mingshuang Luo)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+from typing import List, Optional, Tuple
+
+import torch
+import torch.nn as nn
+from encoder_interface import EncoderInterface
+from subsampling import Conv2dSubsampling, VggSubsampling
+
+try:
+ from torchaudio.models import Emformer as _Emformer
+except ImportError:
+ import torchaudio
+
+ print(
+ "Please install torchaudio >= 0.11.0. "
+ f"Current version: {torchaudio.__version__}"
+ )
+ raise
+
+
+def unstack_states(
+ states: List[List[torch.Tensor]],
+) -> List[List[List[torch.Tensor]]]:
+ """Unstack the emformer state corresponding to a batch of utterances
+ into a list of states, were the i-th entry is the state from the i-th
+ utterance in the batch.
+
+ Args:
+ states:
+ A list-of-list of tensors. ``len(states)`` equals to number of
+ layers in the emformer. ``states[i]]`` contains the states for
+ the i-th layer. ``states[i][k]`` is either a 3-D tensor of shape
+ ``(T, N, C)`` or a 2-D tensor of shape ``(C, N)``
+ """
+ batch_size = states[0][0].size(1)
+ num_layers = len(states)
+
+ ans = [None] * batch_size
+ for i in range(batch_size):
+ ans[i] = [[] for _ in range(num_layers)]
+
+ for li, layer in enumerate(states):
+ for s in layer:
+ s_list = s.unbind(dim=1)
+ # We will use stack(dim=1) later in stack_states()
+ for bi, b in enumerate(ans):
+ b[li].append(s_list[bi])
+ return ans
+
+
+def stack_states(
+ state_list: List[List[List[torch.Tensor]]],
+) -> List[List[torch.Tensor]]:
+ """Stack list of emformer states that correspond to separate utterances
+ into a single emformer state so that it can be used as an input for
+ emformer when those utterances are formed into a batch.
+
+ Note:
+ It is the inverse of :func:`unstack_states`.
+
+ Args:
+ state_list:
+ Each element in state_list corresponding to the internal state
+ of the emformer model for a single utterance.
+ Returns:
+ Return a new state corresponding to a batch of utterances.
+ See the input argument of :func:`unstack_states` for the meaning
+ of the returned tensor.
+ """
+ batch_size = len(state_list)
+ ans = []
+ for layer in state_list[0]:
+ # layer is a list of tensors
+ if batch_size > 1:
+ ans.append([[s] for s in layer])
+ # Note: We will stack ans[layer][s][] later to get ans[layer][s]
+ else:
+ ans.append([s.unsqueeze(1) for s in layer])
+
+ for b, states in enumerate(state_list[1:], 1):
+ for li, layer in enumerate(states):
+ for si, s in enumerate(layer):
+ ans[li][si].append(s)
+ if b == batch_size - 1:
+ ans[li][si] = torch.stack(ans[li][si], dim=1)
+ # We will use unbind(dim=1) later in unstack_states()
+ return ans
+
+
+class Emformer(EncoderInterface):
+ """This is just a simple wrapper around torchaudio.models.Emformer.
+ We may replace it with our own implementation some time later.
+ """
+
+ def __init__(
+ self,
+ num_features: int,
+ output_dim: int,
+ d_model: int,
+ nhead: int,
+ dim_feedforward: int,
+ num_encoder_layers: int,
+ segment_length: int,
+ left_context_length: int,
+ right_context_length: int,
+ max_memory_size: int = 0,
+ dropout: float = 0.1,
+ subsampling_factor: int = 4,
+ vgg_frontend: bool = False,
+ ) -> None:
+ """
+ Args:
+ num_features:
+ The input dimension of the model.
+ output_dim:
+ The output dimension of the model.
+ d_model:
+ Attention dimension.
+ nhead:
+ Number of heads in multi-head attention.
+ dim_feedforward:
+ The output dimension of the feedforward layers in encoder.
+ num_encoder_layers:
+ Number of encoder layers.
+ segment_length:
+ Number of frames per segment before subsampling.
+ left_context_length:
+ Number of frames in the left context before subsampling.
+ right_context_length:
+ Number of frames in the right context before subsampling.
+ max_memory_size:
+ TODO.
+ dropout:
+ Dropout in encoder.
+ subsampling_factor:
+ Number of output frames is num_in_frames // subsampling_factor.
+ Currently, subsampling_factor MUST be 4.
+ vgg_frontend:
+ True to use vgg style frontend for subsampling.
+ """
+ super().__init__()
+
+ self.subsampling_factor = subsampling_factor
+ if subsampling_factor != 4:
+ raise NotImplementedError("Support only 'subsampling_factor=4'.")
+
+ # self.encoder_embed converts the input of shape (N, T, num_features)
+ # to the shape (N, T//subsampling_factor, d_model).
+ # That is, it does two things simultaneously:
+ # (1) subsampling: T -> T//subsampling_factor
+ # (2) embedding: num_features -> d_model
+ if vgg_frontend:
+ self.encoder_embed = VggSubsampling(num_features, d_model)
+ else:
+ self.encoder_embed = Conv2dSubsampling(num_features, d_model)
+
+ self.segment_length = segment_length # before subsampling
+ self.right_context_length = right_context_length
+
+ assert right_context_length % subsampling_factor == 0
+ assert segment_length % subsampling_factor == 0
+ assert left_context_length % subsampling_factor == 0
+
+ left_context_length = left_context_length // subsampling_factor
+ right_context_length = right_context_length // subsampling_factor
+ segment_length = segment_length // subsampling_factor
+
+ self.model = _Emformer(
+ input_dim=d_model,
+ num_heads=nhead,
+ ffn_dim=dim_feedforward,
+ num_layers=num_encoder_layers,
+ segment_length=segment_length,
+ dropout=dropout,
+ activation="relu",
+ left_context_length=left_context_length,
+ right_context_length=right_context_length,
+ max_memory_size=max_memory_size,
+ weight_init_scale_strategy="depthwise",
+ tanh_on_mem=False,
+ negative_inf=-1e8,
+ )
+
+ self.encoder_output_layer = nn.Sequential(
+ nn.Dropout(p=dropout), nn.Linear(d_model, output_dim)
+ )
+ self.log_eps = math.log(1e-10)
+
+ self._has_init_state = False
+ self._init_state = torch.jit.Attribute([], List[List[torch.Tensor]])
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ x_lens: torch.Tensor,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Args:
+ x:
+ Input features of shape (N, T, C).
+ x_lens:
+ A int32 tensor of shape (N,) containing valid frames in `x` before
+ padding. We have `x.size(1) == x_lens.max()`
+ Returns:
+ Return a tuple containing two tensors:
+
+ - encoder_out, a tensor of shape (N, T', C)
+ - encoder_out_lens, a int32 tensor of shape (N,) containing the
+ valid frames in `encoder_out` before padding
+ """
+ x = nn.functional.pad(
+ x,
+ # (left, right, top, bottom)
+ # left/right are for the channel dimension, i.e., axis 2
+ # top/bottom are for the time dimension, i.e., axis 1
+ (0, 0, 0, self.right_context_length),
+ value=self.log_eps,
+ ) # (N, T, C) -> (N, T+right_context_length, C)
+
+ x = self.encoder_embed(x)
+
+ # Caution: We assume the subsampling factor is 4!
+ x_lens = (((x_lens - 1) >> 1) - 1) >> 1
+
+ emformer_out, emformer_out_lens = self.model(x, x_lens)
+ logits = self.encoder_output_layer(emformer_out)
+
+ return logits, emformer_out_lens
+
+ @torch.jit.export
+ def streaming_forward(
+ self,
+ x: torch.Tensor,
+ x_lens: torch.Tensor,
+ states: Optional[List[List[torch.Tensor]]] = None,
+ ):
+ """
+ Args:
+ x:
+ A 3-D tensor of shape (N, T, C). Note: x also contains right
+ context frames.
+ x_lens:
+ A 2-D tensor of shap containing the number of valid frames for each
+ element in `x` before padding. Note: It also counts right context
+ frames.
+ states:
+ Internal states of the model.
+ Returns:
+ Return a tuple containing 3 tensors:
+ - encoder_out, a 3-D tensor of shape (N, T, C)
+ - encoder_out_lens: a 1-D tensor of shape (N,)
+ - next_state, internal model states for the next invocation
+ """
+ x = self.encoder_embed(x)
+
+ # Caution: We assume the subsampling factor is 4!
+ x_lens = (((x_lens - 1) >> 1) - 1) >> 1
+
+ emformer_out, emformer_out_lens, states = self.model.infer(
+ x, x_lens, states
+ )
+
+ if x.size(1) != (
+ self.model.segment_length + self.model.right_context_length
+ ):
+ raise ValueError(
+ "Incorrect input shape."
+ f"{x.size(1)} vs {self.model.segment_length} + "
+ f"{self.model.right_context_length}"
+ )
+
+ logits = self.encoder_output_layer(emformer_out)
+
+ return logits, emformer_out_lens, states
+
+ @torch.jit.export
+ def get_init_state(self, device: torch.device) -> List[List[torch.Tensor]]:
+ """Return the initial state of each layer.
+
+ Returns:
+ Return the initial state of each layer. NOTE: the returned
+ tensors are on the given device. `len(ans) == num_emformer_layers`.
+ """
+ if self._has_init_state:
+ # Note(fangjun): It is OK to share the init state as it is
+ # not going to be modified by the model
+ return self._init_state
+
+ batch_size = 1
+
+ ans: List[List[torch.Tensor]] = []
+ for layer in self.model.emformer_layers:
+ s = layer._init_state(batch_size=batch_size, device=device)
+ ans.append(s)
+
+ self._has_init_state = True
+ self._init_state = ans
+
+ return ans
diff --git a/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/encoder_interface.py b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/encoder_interface.py
new file mode 120000
index 000000000..a478f2351
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/encoder_interface.py
@@ -0,0 +1 @@
+../pruned_transducer_stateless/encoder_interface.py
\ No newline at end of file
diff --git a/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/export.py b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/export.py
new file mode 100755
index 000000000..2375f5001
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/export.py
@@ -0,0 +1,281 @@
+#!/usr/bin/env python3
+#
+# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script converts several saved checkpoints
+# to a single one using model averaging.
+"""
+Usage:
+./prunted_stateless_emformer_rnnt/export.py \
+ --exp-dir ./prunted_stateless_emformer_rnnt/exp \
+ --bpe-model data/lang_bpe_500/bpe.model \
+ --epoch 20 \
+ --avg 10
+
+It will generate a file exp_dir/pretrained.pt
+
+To use the generated file with `prunted_stateless_emformer_rnnt/decode.py`,
+you can do:
+
+ cd /path/to/exp_dir
+ ln -s pretrained.pt epoch-9999.pt
+
+ cd /path/to/egs/librispeech/ASR
+ ./prunted_stateless_emformer_rnnt/decode.py \
+ --exp-dir ./prunted_stateless_emformer_rnnt/exp \
+ --epoch 9999 \
+ --avg 1 \
+ --max-duration 600 \
+ --decoding-method greedy_search \
+ --bpe-model data/lang_bpe_500/bpe.model
+"""
+
+import argparse
+import logging
+from pathlib import Path
+
+import sentencepiece as spm
+import torch
+from train import add_model_arguments, get_params, get_transducer_model
+
+from icefall.checkpoint import (
+ average_checkpoints,
+ average_checkpoints_with_averaged_model,
+ find_checkpoints,
+ load_checkpoint,
+)
+from icefall.utils import str2bool
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+
+ parser.add_argument(
+ "--epoch",
+ type=int,
+ default=28,
+ help="""It specifies the checkpoint to use for averaging.
+ Note: Epoch counts from 1.
+ You can specify --avg to use more checkpoints for model averaging.""",
+ )
+
+ parser.add_argument(
+ "--iter",
+ type=int,
+ default=0,
+ help="""If positive, --epoch is ignored and it
+ will use the checkpoint exp_dir/checkpoint-iter.pt.
+ You can specify --avg to use more checkpoints for model averaging.
+ """,
+ )
+
+ parser.add_argument(
+ "--avg",
+ type=int,
+ default=15,
+ help="Number of checkpoints to average. Automatically select "
+ "consecutive checkpoints before the checkpoint specified by "
+ "'--epoch' and '--iter'",
+ )
+
+ parser.add_argument(
+ "--use-averaged-model",
+ type=str2bool,
+ default=False,
+ help="Whether to load averaged model. Currently it only supports "
+ "using --epoch. If True, it would decode with the averaged model "
+ "over the epoch range from `epoch-avg` (excluded) to `epoch`."
+ "Actually only the models with epoch number of `epoch-avg` and "
+ "`epoch` are loaded for averaging. ",
+ )
+
+ parser.add_argument(
+ "--exp-dir",
+ type=str,
+ default="prunted_stateless_emformer_rnnt/exp",
+ help="""It specifies the directory where all training related
+ files, e.g., checkpoints, log, etc, are saved
+ """,
+ )
+
+ parser.add_argument(
+ "--bpe-model",
+ type=str,
+ default="data/lang_bpe_500/bpe.model",
+ help="Path to the BPE model",
+ )
+
+ parser.add_argument(
+ "--jit",
+ type=str2bool,
+ default=False,
+ help="""True to save a model after applying torch.jit.script.
+ """,
+ )
+
+ parser.add_argument(
+ "--context-size",
+ type=int,
+ default=2,
+ help="The context size in the decoder. 1 means bigram; "
+ "2 means tri-gram",
+ )
+
+ add_model_arguments(parser)
+
+ return parser
+
+
+def main():
+ args = get_parser().parse_args()
+ args.exp_dir = Path(args.exp_dir)
+
+ params = get_params()
+ params.update(vars(args))
+
+ device = torch.device("cpu")
+ if torch.cuda.is_available():
+ device = torch.device("cuda", 0)
+
+ logging.info(f"device: {device}")
+
+ sp = spm.SentencePieceProcessor()
+ sp.load(params.bpe_model)
+
+ # and are defined in local/train_bpe_model.py
+ params.blank_id = sp.piece_to_id("")
+ params.unk_id = sp.piece_to_id("")
+ params.vocab_size = sp.get_piece_size()
+
+ logging.info(params)
+
+ logging.info("About to create model")
+ model = get_transducer_model(params)
+
+ if not params.use_averaged_model:
+ if params.iter > 0:
+ filenames = find_checkpoints(
+ params.exp_dir, iteration=-params.iter
+ )[: params.avg]
+ if len(filenames) == 0:
+ raise ValueError(
+ f"No checkpoints found for"
+ f" --iter {params.iter}, --avg {params.avg}"
+ )
+ elif len(filenames) < params.avg:
+ raise ValueError(
+ f"Not enough checkpoints ({len(filenames)}) found for"
+ f" --iter {params.iter}, --avg {params.avg}"
+ )
+ logging.info(f"averaging {filenames}")
+ model.to(device)
+ model.load_state_dict(average_checkpoints(filenames, device=device))
+ elif params.avg == 1:
+ load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model)
+ else:
+ start = params.epoch - params.avg + 1
+ filenames = []
+ for i in range(start, params.epoch + 1):
+ if i >= 1:
+ filenames.append(f"{params.exp_dir}/epoch-{i}.pt")
+ logging.info(f"averaging {filenames}")
+ model.to(device)
+ model.load_state_dict(average_checkpoints(filenames, device=device))
+ else:
+ if params.iter > 0:
+ filenames = find_checkpoints(
+ params.exp_dir, iteration=-params.iter
+ )[: params.avg + 1]
+ if len(filenames) == 0:
+ raise ValueError(
+ f"No checkpoints found for"
+ f" --iter {params.iter}, --avg {params.avg}"
+ )
+ elif len(filenames) < params.avg + 1:
+ raise ValueError(
+ f"Not enough checkpoints ({len(filenames)}) found for"
+ f" --iter {params.iter}, --avg {params.avg}"
+ )
+ filename_start = filenames[-1]
+ filename_end = filenames[0]
+ logging.info(
+ "Calculating the averaged model over iteration checkpoints"
+ f" from {filename_start} (excluded) to {filename_end}"
+ )
+ model.to(device)
+ model.load_state_dict(
+ average_checkpoints_with_averaged_model(
+ filename_start=filename_start,
+ filename_end=filename_end,
+ device=device,
+ )
+ )
+ else:
+ assert params.avg > 0, params.avg
+ start = params.epoch - params.avg
+ assert start >= 1, start
+ filename_start = f"{params.exp_dir}/epoch-{start}.pt"
+ filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt"
+ logging.info(
+ f"Calculating the averaged model over epoch range from "
+ f"{start} (excluded) to {params.epoch}"
+ )
+ model.to(device)
+ model.load_state_dict(
+ average_checkpoints_with_averaged_model(
+ filename_start=filename_start,
+ filename_end=filename_end,
+ device=device,
+ )
+ )
+
+ model.eval()
+
+ model.to("cpu")
+ model.eval()
+ for p in model.parameters():
+ p.requires_grad_(False)
+
+ if params.jit:
+ # We won't use the forward() method of the model in C++, so just ignore
+ # it here.
+ # Otherwise, one of its arguments is a ragged tensor and is not
+ # torch scriptabe.
+ model.__class__.forward = torch.jit.ignore(model.__class__.forward)
+ logging.info("Using torch.jit.script")
+ model = torch.jit.script(model)
+ filename = params.exp_dir / "cpu_jit.pt"
+ model.save(str(filename))
+ logging.info(f"Saved to {filename}")
+ else:
+ logging.info("Not using torch.jit.script")
+ # Save it using a format so that it can be loaded
+ # by :func:`load_checkpoint`
+ filename = params.exp_dir / "pretrained.pt"
+ torch.save({"model": model.state_dict()}, str(filename))
+ logging.info(f"Saved to {filename}")
+
+
+if __name__ == "__main__":
+ formatter = (
+ "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
+ )
+
+ logging.basicConfig(format=formatter, level=logging.INFO)
+ main()
diff --git a/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/joiner.py b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/joiner.py
new file mode 120000
index 000000000..81ad47c55
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/joiner.py
@@ -0,0 +1 @@
+../pruned_transducer_stateless/joiner.py
\ No newline at end of file
diff --git a/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/model.py b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/model.py
new file mode 100644
index 000000000..2f019bcdb
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/model.py
@@ -0,0 +1,169 @@
+# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, Wei Kang)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import k2
+import torch
+import torch.nn as nn
+from encoder_interface import EncoderInterface
+
+from icefall.utils import add_sos
+
+
+class Transducer(nn.Module):
+ """It implements https://arxiv.org/pdf/1211.3711.pdf
+ "Sequence Transduction with Recurrent Neural Networks"
+ """
+
+ def __init__(
+ self,
+ encoder: EncoderInterface,
+ decoder: nn.Module,
+ joiner: nn.Module,
+ ):
+ """
+ Args:
+ encoder:
+ It is the transcription network in the paper. Its accepts
+ two inputs: `x` of (N, T, C) and `x_lens` of shape (N,).
+ It returns two tensors: `logits` of shape (N, T, C) and
+ `logit_lens` of shape (N,).
+ decoder:
+ It is the prediction network in the paper. Its input shape
+ is (N, U) and its output shape is (N, U, C). It should contain
+ one attribute: `blank_id`.
+ joiner:
+ It has two inputs with shapes: (N, T, C) and (N, U, C). Its
+ output shape is (N, T, U, C). Note that its output contains
+ unnormalized probs, i.e., not processed by log-softmax.
+ """
+ super().__init__()
+ assert isinstance(encoder, EncoderInterface), type(encoder)
+ assert hasattr(decoder, "blank_id")
+
+ self.encoder = encoder
+ self.decoder = decoder
+ self.joiner = joiner
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ x_lens: torch.Tensor,
+ y: k2.RaggedTensor,
+ prune_range: int = 5,
+ am_scale: float = 0.0,
+ lm_scale: float = 0.0,
+ ) -> torch.Tensor:
+ """
+ Args:
+ x:
+ A 3-D tensor of shape (N, T, C).
+ x_lens:
+ A 1-D tensor of shape (N,). It contains the number of frames in `x`
+ before padding.
+ y:
+ A ragged tensor with 2 axes [utt][label]. It contains labels of each
+ utterance.
+ prune_range:
+ The prune range for rnnt loss, it means how many symbols(context)
+ we are considering for each frame to compute the loss.
+ am_scale:
+ The scale to smooth the loss with am (output of encoder network)
+ part
+ lm_scale:
+ The scale to smooth the loss with lm (output of predictor network)
+ part
+ Returns:
+ Return the transducer loss.
+
+ Note:
+ Regarding am_scale & lm_scale, it will make the loss-function one of
+ the form:
+ lm_scale * lm_probs + am_scale * am_probs +
+ (1-lm_scale-am_scale) * combined_probs
+ """
+ assert x.ndim == 3, x.shape
+ assert x_lens.ndim == 1, x_lens.shape
+ assert y.num_axes == 2, y.num_axes
+
+ assert x.size(0) == x_lens.size(0) == y.dim0
+
+ encoder_out, x_lens = self.encoder(x, x_lens)
+ assert torch.all(x_lens > 0)
+
+ # Now for the decoder, i.e., the prediction network
+ row_splits = y.shape.row_splits(1)
+ y_lens = row_splits[1:] - row_splits[:-1]
+
+ blank_id = self.decoder.blank_id
+ sos_y = add_sos(y, sos_id=blank_id)
+
+ # sos_y_padded: [B, S + 1], start with SOS.
+ sos_y_padded = sos_y.pad(mode="constant", padding_value=blank_id)
+
+ # decoder_out: [B, S + 1, C]
+ decoder_out = self.decoder(sos_y_padded)
+
+ # Note: y does not start with SOS
+ # y_padded : [B, S]
+ y_padded = y.pad(mode="constant", padding_value=0)
+
+ y_padded = y_padded.to(torch.int64)
+ boundary = torch.zeros(
+ (x.size(0), 4), dtype=torch.int64, device=x.device
+ )
+ boundary[:, 2] = y_lens
+ boundary[:, 3] = x_lens
+
+ simple_loss, (px_grad, py_grad) = k2.rnnt_loss_smoothed(
+ lm=decoder_out,
+ am=encoder_out,
+ symbols=y_padded,
+ termination_symbol=blank_id,
+ lm_only_scale=lm_scale,
+ am_only_scale=am_scale,
+ boundary=boundary,
+ reduction="sum",
+ return_grad=True,
+ )
+
+ # ranges : [B, T, prune_range]
+ ranges = k2.get_rnnt_prune_ranges(
+ px_grad=px_grad,
+ py_grad=py_grad,
+ boundary=boundary,
+ s_range=prune_range,
+ )
+
+ # am_pruned : [B, T, prune_range, C]
+ # lm_pruned : [B, T, prune_range, C]
+ am_pruned, lm_pruned = k2.do_rnnt_pruning(
+ am=encoder_out, lm=decoder_out, ranges=ranges
+ )
+
+ # logits : [B, T, prune_range, C]
+ logits = self.joiner(am_pruned, lm_pruned)
+
+ pruned_loss = k2.rnnt_loss_pruned(
+ logits=logits,
+ symbols=y_padded,
+ ranges=ranges,
+ termination_symbol=blank_id,
+ boundary=boundary,
+ reduction="sum",
+ )
+
+ return (simple_loss, pruned_loss)
diff --git a/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/noam.py b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/noam.py
new file mode 100644
index 000000000..e46bf35fb
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/noam.py
@@ -0,0 +1,104 @@
+# Copyright 2021 University of Chinese Academy of Sciences (author: Han Zhu)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+
+
+class Noam(object):
+ """
+ Implements Noam optimizer.
+
+ Proposed in
+ "Attention Is All You Need", https://arxiv.org/pdf/1706.03762.pdf
+
+ Modified from
+ https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/optimizer.py # noqa
+
+ Args:
+ params:
+ iterable of parameters to optimize or dicts defining parameter groups
+ model_size:
+ attention dimension of the transformer model
+ factor:
+ learning rate factor
+ warm_step:
+ warmup steps
+ """
+
+ def __init__(
+ self,
+ params,
+ model_size: int = 256,
+ factor: float = 10.0,
+ warm_step: int = 25000,
+ weight_decay=0,
+ ) -> None:
+ """Construct an Noam object."""
+ self.optimizer = torch.optim.Adam(
+ params, lr=0, betas=(0.9, 0.98), eps=1e-9, weight_decay=weight_decay
+ )
+ self._step = 0
+ self.warmup = warm_step
+ self.factor = factor
+ self.model_size = model_size
+ self._rate = 0
+
+ @property
+ def param_groups(self):
+ """Return param_groups."""
+ return self.optimizer.param_groups
+
+ def step(self):
+ """Update parameters and rate."""
+ self._step += 1
+ rate = self.rate()
+ for p in self.optimizer.param_groups:
+ p["lr"] = rate
+ self._rate = rate
+ self.optimizer.step()
+
+ def rate(self, step=None):
+ """Implement `lrate` above."""
+ if step is None:
+ step = self._step
+ return (
+ self.factor
+ * self.model_size ** (-0.5)
+ * min(step ** (-0.5), step * self.warmup ** (-1.5))
+ )
+
+ def zero_grad(self):
+ """Reset gradient."""
+ self.optimizer.zero_grad()
+
+ def state_dict(self):
+ """Return state_dict."""
+ return {
+ "_step": self._step,
+ "warmup": self.warmup,
+ "factor": self.factor,
+ "model_size": self.model_size,
+ "_rate": self._rate,
+ "optimizer": self.optimizer.state_dict(),
+ }
+
+ def load_state_dict(self, state_dict):
+ """Load state_dict."""
+ for key, value in state_dict.items():
+ if key == "optimizer":
+ self.optimizer.load_state_dict(state_dict["optimizer"])
+ else:
+ setattr(self, key, value)
diff --git a/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/subsampling.py b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/subsampling.py
new file mode 120000
index 000000000..6fee09e58
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/subsampling.py
@@ -0,0 +1 @@
+../conformer_ctc/subsampling.py
\ No newline at end of file
diff --git a/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/test_emformer.py b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/test_emformer.py
new file mode 100755
index 000000000..aef506e81
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/test_emformer.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python3
+# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+To run this file, do:
+
+ cd icefall/egs/librispeech/ASR
+ python ./pruned_stateless_emformer_rnnt/test_emformer.py
+"""
+
+import torch
+from emformer import Emformer, stack_states, unstack_states
+
+
+def test_emformer():
+ N = 3
+ T = 300
+ C = 80
+
+ output_dim = 500
+
+ encoder = Emformer(
+ num_features=C,
+ output_dim=output_dim,
+ d_model=512,
+ nhead=8,
+ dim_feedforward=2048,
+ num_encoder_layers=20,
+ segment_length=16,
+ left_context_length=120,
+ right_context_length=4,
+ vgg_frontend=False,
+ )
+
+ x = torch.rand(N, T, C)
+ x_lens = torch.randint(100, T, (N,))
+ x_lens[0] = T
+
+ y, y_lens = encoder(x, x_lens)
+
+ y_lens = (((x_lens - 1) >> 1) - 1) >> 1
+ assert x.size(0) == x.size(0)
+ assert y.size(1) == max(y_lens)
+ assert y.size(2) == output_dim
+
+ num_param = sum([p.numel() for p in encoder.parameters()])
+ print(f"Number of encoder parameters: {num_param}")
+
+
+def test_emformer_streaming_forward():
+ N = 3
+ C = 80
+
+ output_dim = 500
+
+ encoder = Emformer(
+ num_features=C,
+ output_dim=output_dim,
+ d_model=512,
+ nhead=8,
+ dim_feedforward=2048,
+ num_encoder_layers=20,
+ segment_length=16,
+ left_context_length=120,
+ right_context_length=4,
+ vgg_frontend=False,
+ )
+
+ x = torch.rand(N, 23, C)
+ x_lens = torch.full((N,), 23)
+ y, y_lens, states = encoder.streaming_forward(x=x, x_lens=x_lens)
+
+ state_list = unstack_states(states)
+ states2 = stack_states(state_list)
+
+ for ss, ss2 in zip(states, states2):
+ for s, s2 in zip(ss, ss2):
+ assert torch.allclose(s, s2), f"{s.sum()}, {s2.sum()}"
+
+
+def test_emformer_init_state():
+ num_encoder_layers = 20
+ d_model = 512
+ encoder = Emformer(
+ num_features=80,
+ output_dim=500,
+ d_model=512,
+ nhead=8,
+ dim_feedforward=2048,
+ num_encoder_layers=num_encoder_layers,
+ segment_length=16,
+ left_context_length=120,
+ right_context_length=4,
+ vgg_frontend=False,
+ )
+ init_state = encoder.get_init_state()
+ assert len(init_state) == num_encoder_layers
+ layer0_state = init_state[0]
+ assert len(layer0_state) == 4
+
+ assert layer0_state[0].shape == (
+ 0, # max_memory_size
+ 1, # batch_size
+ d_model, # input_dim
+ )
+
+ assert layer0_state[1].shape == (
+ encoder.model.left_context_length,
+ 1, # batch_size
+ d_model, # input_dim
+ )
+ assert layer0_state[2].shape == layer0_state[1].shape
+ assert layer0_state[3].shape == (
+ 1, # always 1
+ 1, # batch_size
+ )
+
+
+@torch.no_grad()
+def main():
+ test_emformer()
+ test_emformer_streaming_forward()
+ test_emformer_init_state()
+
+
+if __name__ == "__main__":
+ torch.manual_seed(20220329)
+ main()
diff --git a/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/test_model.py b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/test_model.py
new file mode 100755
index 000000000..573817b85
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/test_model.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+To run this file, do:
+
+ cd icefall/egs/librispeech/ASR
+ python ./pruned_stateless_emformer_rnnt/test_model.py
+"""
+
+import torch
+from train import get_params, get_transducer_model
+
+
+def test_model():
+ params = get_params()
+ params.vocab_size = 500
+ params.blank_id = 0
+ params.context_size = 2
+ params.unk_id = 2
+
+ params.attention_dim = 512
+ params.nhead = 8
+ params.dim_feedforward = 2048
+ params.num_encoder_layers = 18
+ params.left_context_length = 128
+ params.segment_length = 8
+ params.right_context_length = 4
+ params.memory_size = 0
+
+ model = get_transducer_model(params)
+
+ num_param = sum([p.numel() for p in model.parameters()])
+ print(f"Number of model parameters: {num_param}")
+ model.__class__.forward = torch.jit.ignore(model.__class__.forward)
+ torch.jit.script(model)
+
+
+def main():
+ test_model()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/train.py b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/train.py
new file mode 100755
index 000000000..cd62787fa
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/train.py
@@ -0,0 +1,1034 @@
+#!/usr/bin/env python3
+# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang,
+# Wei Kang,
+# Mingshuang Luo,)
+# Zengwei Yao)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Usage:
+
+export CUDA_VISIBLE_DEVICES="0,1,2,3"
+
+./pruned_stateless_emformer_rnnt/train.py \
+ --world-size 4 \
+ --num-epochs 30 \
+ --start-epoch 1 \
+ --exp-dir pruned_stateless_emformer_rnnt/exp \
+ --full-libri 1 \
+ --max-duration 300
+"""
+
+
+import argparse
+import copy
+import logging
+import warnings
+from pathlib import Path
+from shutil import copyfile
+from typing import Any, Dict, Optional, Tuple, Union
+
+import k2
+import sentencepiece as spm
+import torch
+import torch.multiprocessing as mp
+import torch.nn as nn
+from asr_datamodule import LibriSpeechAsrDataModule
+from decoder import Decoder
+from emformer import Emformer
+from joiner import Joiner
+from lhotse.cut import Cut
+from lhotse.dataset.sampling.base import CutSampler
+from lhotse.utils import fix_random_seed
+from model import Transducer
+from noam import Noam
+from torch import Tensor
+from torch.cuda.amp import GradScaler
+from torch.nn.parallel import DistributedDataParallel as DDP
+from torch.utils.tensorboard import SummaryWriter
+
+from icefall import diagnostics
+from icefall.checkpoint import load_checkpoint, remove_checkpoints
+from icefall.checkpoint import save_checkpoint as save_checkpoint_impl
+from icefall.checkpoint import (
+ save_checkpoint_with_global_batch_idx,
+ update_averaged_model,
+)
+from icefall.dist import cleanup_dist, setup_dist
+from icefall.env import get_env_info
+from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool
+
+
+def add_model_arguments(parser: argparse.ArgumentParser):
+ parser.add_argument(
+ "--attention-dim",
+ type=int,
+ default=512,
+ help="Attention dim for the Emformer",
+ )
+
+ parser.add_argument(
+ "--nhead",
+ type=int,
+ default=8,
+ help="Number of attention heads for the Emformer",
+ )
+
+ parser.add_argument(
+ "--dim-feedforward",
+ type=int,
+ default=2048,
+ help="Feed-forward dimension for the Emformer",
+ )
+
+ parser.add_argument(
+ "--num-encoder-layers",
+ type=int,
+ default=18,
+ help="Number of encoder layers for the Emformer",
+ )
+
+ parser.add_argument(
+ "--left-context-length",
+ type=int,
+ default=128,
+ help="Number of frames for the left context in the Emformer",
+ )
+
+ parser.add_argument(
+ "--segment-length",
+ type=int,
+ default=8,
+ help="Number of frames for each segment in the Emformer",
+ )
+
+ parser.add_argument(
+ "--right-context-length",
+ type=int,
+ default=4,
+ help="Number of frames for right context in the Emformer",
+ )
+
+ parser.add_argument(
+ "--memory-size",
+ type=int,
+ default=0,
+ help="Number of entries in the memory for the Emformer",
+ )
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+
+ parser.add_argument(
+ "--world-size",
+ type=int,
+ default=1,
+ help="Number of GPUs for DDP training.",
+ )
+
+ parser.add_argument(
+ "--master-port",
+ type=int,
+ default=12354,
+ help="Master port to use for DDP training.",
+ )
+
+ parser.add_argument(
+ "--tensorboard",
+ type=str2bool,
+ default=True,
+ help="Should various information be logged in tensorboard.",
+ )
+
+ parser.add_argument(
+ "--num-epochs",
+ type=int,
+ default=30,
+ help="Number of epochs to train.",
+ )
+
+ parser.add_argument(
+ "--start-epoch",
+ type=int,
+ default=1,
+ help="""Resume training from this epoch. It should be positive.
+ If larger than 1, it will load checkpoint from
+ exp-dir/epoch-{start_epoch-1}.pt
+ """,
+ )
+
+ parser.add_argument(
+ "--start-batch",
+ type=int,
+ default=0,
+ help="""If positive, --start-epoch is ignored and
+ it loads the checkpoint from exp-dir/checkpoint-{start_batch}.pt
+ """,
+ )
+
+ parser.add_argument(
+ "--exp-dir",
+ type=str,
+ default="pruned_stateless_emformer_rnnt/exp",
+ help="""The experiment dir.
+ It specifies the directory where all training related
+ files, e.g., checkpoints, log, etc, are saved
+ """,
+ )
+
+ parser.add_argument(
+ "--bpe-model",
+ type=str,
+ default="data/lang_bpe_500/bpe.model",
+ help="Path to the BPE model",
+ )
+
+ parser.add_argument(
+ "--lr-factor",
+ type=float,
+ default=5.0,
+ help="The lr_factor for Noam optimizer",
+ )
+
+ parser.add_argument(
+ "--context-size",
+ type=int,
+ default=2,
+ help="The context size in the decoder. 1 means bigram; "
+ "2 means tri-gram",
+ )
+
+ parser.add_argument(
+ "--prune-range",
+ type=int,
+ default=5,
+ help="The prune range for rnnt loss, it means how many symbols(context)"
+ "we are using to compute the loss",
+ )
+
+ parser.add_argument(
+ "--lm-scale",
+ type=float,
+ default=0.25,
+ help="The scale to smooth the loss with lm "
+ "(output of prediction network) part.",
+ )
+
+ parser.add_argument(
+ "--am-scale",
+ type=float,
+ default=0.0,
+ help="The scale to smooth the loss with am (output of encoder network)"
+ "part.",
+ )
+
+ parser.add_argument(
+ "--simple-loss-scale",
+ type=float,
+ default=0.5,
+ help="To get pruning ranges, we will calculate a simple version"
+ "loss(joiner is just addition), this simple loss also uses for"
+ "training (as a regularization item). We will scale the simple loss"
+ "with this parameter before adding to the final loss.",
+ )
+
+ parser.add_argument(
+ "--seed",
+ type=int,
+ default=42,
+ help="The seed for random generators intended for reproducibility",
+ )
+
+ parser.add_argument(
+ "--print-diagnostics",
+ type=str2bool,
+ default=False,
+ help="Accumulate stats on activations, print them and exit.",
+ )
+
+ parser.add_argument(
+ "--save-every-n",
+ type=int,
+ default=8000,
+ help="""Save checkpoint after processing this number of batches"
+ periodically. We save checkpoint to exp-dir/ whenever
+ params.batch_idx_train % save_every_n == 0. The checkpoint filename
+ has the form: f'exp-dir/checkpoint-{params.batch_idx_train}.pt'
+ Note: It also saves checkpoint to `exp-dir/epoch-xxx.pt` at the
+ end of each epoch where `xxx` is the epoch number counting from 0.
+ """,
+ )
+
+ parser.add_argument(
+ "--keep-last-k",
+ type=int,
+ default=20,
+ help="""Only keep this number of checkpoints on disk.
+ For instance, if it is 3, there are only 3 checkpoints
+ in the exp-dir with filenames `checkpoint-xxx.pt`.
+ It does not affect checkpoints with name `epoch-xxx.pt`.
+ """,
+ )
+
+ parser.add_argument(
+ "--average-period",
+ type=int,
+ default=100,
+ help="""Update the averaged model, namely `model_avg`, after processing
+ this number of batches. `model_avg` is a separate version of model,
+ in which each floating-point parameter is the average of all the
+ parameters from the start of training. Each time we take the average,
+ we do: `model_avg = model * (average_period / batch_idx_train) +
+ model_avg * ((batch_idx_train - average_period) / batch_idx_train)`.
+ """,
+ )
+
+ parser.add_argument(
+ "--use-fp16",
+ type=str2bool,
+ default=False,
+ help="Whether to use half precision training.",
+ )
+
+ add_model_arguments(parser)
+
+ return parser
+
+
+def get_params() -> AttributeDict:
+ """Return a dict containing training parameters.
+
+ All training related parameters that are not passed from the commandline
+ are saved in the variable `params`.
+
+ Commandline options are merged into `params` after they are parsed, so
+ you can also access them via `params`.
+
+ Explanation of options saved in `params`:
+
+ - best_train_loss: Best training loss so far. It is used to select
+ the model that has the lowest training loss. It is
+ updated during the training.
+
+ - best_valid_loss: Best validation loss so far. It is used to select
+ the model that has the lowest validation loss. It is
+ updated during the training.
+
+ - best_train_epoch: It is the epoch that has the best training loss.
+
+ - best_valid_epoch: It is the epoch that has the best validation loss.
+
+ - batch_idx_train: Used to writing statistics to tensorboard. It
+ contains number of batches trained so far across
+ epochs.
+
+ - log_interval: Print training loss if batch_idx % log_interval` is 0
+
+ - reset_interval: Reset statistics if batch_idx % reset_interval is 0
+
+ - valid_interval: Run validation if batch_idx % valid_interval is 0
+
+ - feature_dim: The model input dim. It has to match the one used
+ in computing features.
+
+ - subsampling_factor: The subsampling factor for the model.
+
+ - attention_dim: Hidden dim for multi-head attention model.
+
+ - num_decoder_layers: Number of decoder layer of transformer decoder.
+
+ - warm_step: The warm_step for Noam optimizer.
+ """
+ params = AttributeDict(
+ {
+ "best_train_loss": float("inf"),
+ "best_valid_loss": float("inf"),
+ "best_train_epoch": -1,
+ "best_valid_epoch": -1,
+ "batch_idx_train": 0,
+ "log_interval": 50,
+ "reset_interval": 200,
+ "valid_interval": 3000, # For the 100h subset, use 800
+ # parameters for Emformer
+ "feature_dim": 80,
+ "subsampling_factor": 4,
+ "vgg_frontend": False,
+ # parameters for decoder
+ "embedding_dim": 512,
+ # parameters for Noam
+ "warm_step": 80000, # For the 100h subset, use 20000
+ "env_info": get_env_info(),
+ }
+ )
+
+ return params
+
+
+def get_encoder_model(params: AttributeDict) -> nn.Module:
+ encoder = Emformer(
+ num_features=params.feature_dim,
+ output_dim=params.vocab_size,
+ subsampling_factor=params.subsampling_factor,
+ d_model=params.attention_dim,
+ nhead=params.nhead,
+ dim_feedforward=params.dim_feedforward,
+ num_encoder_layers=params.num_encoder_layers,
+ vgg_frontend=params.vgg_frontend,
+ left_context_length=params.left_context_length,
+ segment_length=params.segment_length,
+ right_context_length=params.right_context_length,
+ max_memory_size=params.memory_size,
+ )
+ return encoder
+
+
+def get_decoder_model(params: AttributeDict) -> nn.Module:
+ decoder = Decoder(
+ vocab_size=params.vocab_size,
+ embedding_dim=params.embedding_dim,
+ blank_id=params.blank_id,
+ unk_id=params.unk_id,
+ context_size=params.context_size,
+ )
+ return decoder
+
+
+def get_joiner_model(params: AttributeDict) -> nn.Module:
+ joiner = Joiner(
+ input_dim=params.vocab_size,
+ inner_dim=params.embedding_dim,
+ output_dim=params.vocab_size,
+ )
+ return joiner
+
+
+def get_transducer_model(params: AttributeDict) -> nn.Module:
+ encoder = get_encoder_model(params)
+ decoder = get_decoder_model(params)
+ joiner = get_joiner_model(params)
+
+ model = Transducer(
+ encoder=encoder,
+ decoder=decoder,
+ joiner=joiner,
+ )
+ return model
+
+
+def load_checkpoint_if_available(
+ params: AttributeDict,
+ model: nn.Module,
+ model_avg: Optional[nn.Module] = None,
+ optimizer: Optional[torch.optim.Optimizer] = None,
+) -> Optional[Dict[str, Any]]:
+ """Load checkpoint from file.
+
+ If params.start_batch is positive, it will load the checkpoint from
+ `params.exp_dir/checkpoint-{params.start_batch}.pt`. Otherwise, if
+ params.start_epoch is larger than 1, it will load the checkpoint from
+ `params.start_epoch - 1`.
+
+ Apart from loading state dict for `model` and `optimizer` it also updates
+ `best_train_epoch`, `best_train_loss`, `best_valid_epoch`,
+ and `best_valid_loss` in `params`.
+
+ Args:
+ params:
+ The return value of :func:`get_params`.
+ model:
+ The training model.
+ model_avg:
+ The stored model averaged from the start of training.
+ optimizer:
+ The optimizer that we are using.
+ Returns:
+ Return a dict containing previously saved training info.
+ """
+ if params.start_batch > 0:
+ filename = params.exp_dir / f"checkpoint-{params.start_batch}.pt"
+ elif params.start_epoch > 1:
+ filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt"
+ else:
+ return None
+
+ assert filename.is_file(), f"{filename} does not exist!"
+
+ saved_params = load_checkpoint(
+ filename,
+ model=model,
+ model_avg=model_avg,
+ optimizer=optimizer,
+ )
+
+ keys = [
+ "best_train_epoch",
+ "best_valid_epoch",
+ "batch_idx_train",
+ "best_train_loss",
+ "best_valid_loss",
+ ]
+ for k in keys:
+ params[k] = saved_params[k]
+
+ if params.start_batch > 0:
+ if "cur_epoch" in saved_params:
+ params["start_epoch"] = saved_params["cur_epoch"]
+
+ if "cur_batch_idx" in saved_params:
+ params["cur_batch_idx"] = saved_params["cur_batch_idx"]
+
+ return saved_params
+
+
+def save_checkpoint(
+ params: AttributeDict,
+ model: Union[nn.Module, DDP],
+ model_avg: Optional[nn.Module] = None,
+ optimizer: Optional[torch.optim.Optimizer] = None,
+ sampler: Optional[CutSampler] = None,
+ scaler: Optional[GradScaler] = None,
+ rank: int = 0,
+) -> None:
+ """Save model, optimizer, and training stats to file.
+
+ Args:
+ params:
+ It is returned by :func:`get_params`.
+ model:
+ The training model.
+ model_avg:
+ The stored model averaged from the start of training.
+ optimizer:
+ The optimizer used in the training.
+ sampler:
+ The sampler for the training dataset.
+ scaler:
+ The scaler used for mix precision training.
+ """
+ if rank != 0:
+ return
+ filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt"
+ save_checkpoint_impl(
+ filename=filename,
+ model=model,
+ model_avg=model_avg,
+ params=params,
+ optimizer=optimizer,
+ sampler=sampler,
+ scaler=scaler,
+ rank=rank,
+ )
+
+ if params.best_train_epoch == params.cur_epoch:
+ best_train_filename = params.exp_dir / "best-train-loss.pt"
+ copyfile(src=filename, dst=best_train_filename)
+
+ if params.best_valid_epoch == params.cur_epoch:
+ best_valid_filename = params.exp_dir / "best-valid-loss.pt"
+ copyfile(src=filename, dst=best_valid_filename)
+
+
+def compute_loss(
+ params: AttributeDict,
+ model: Union[nn.Module, DDP],
+ sp: spm.SentencePieceProcessor,
+ batch: dict,
+ is_training: bool,
+) -> Tuple[Tensor, MetricsTracker]:
+ """
+ Compute pruned RNN-T loss given the model and its inputs.
+
+ Args:
+ params:
+ Parameters for training. See :func:`get_params`.
+ model:
+ The model for training. It is an instance of Emformer in our case.
+ batch:
+ A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()`
+ for the content in it.
+ is_training:
+ True for training. False for validation. When it is True, this
+ function enables autograd during computation; when it is False, it
+ disables autograd.
+ """
+ device = (
+ model.device
+ if isinstance(model, DDP)
+ else next(model.parameters()).device
+ )
+ feature = batch["inputs"]
+ # at entry, feature is (N, T, C)
+ assert feature.ndim == 3
+ feature = feature.to(device)
+
+ supervisions = batch["supervisions"]
+ feature_lens = supervisions["num_frames"].to(device)
+
+ texts = batch["supervisions"]["text"]
+ y = sp.encode(texts, out_type=int)
+ y = k2.RaggedTensor(y).to(device)
+
+ with torch.set_grad_enabled(is_training):
+ simple_loss, pruned_loss = model(
+ x=feature,
+ x_lens=feature_lens,
+ y=y,
+ prune_range=params.prune_range,
+ am_scale=params.am_scale,
+ lm_scale=params.lm_scale,
+ )
+ loss = params.simple_loss_scale * simple_loss + pruned_loss
+
+ assert loss.requires_grad == is_training
+
+ info = MetricsTracker()
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ info["frames"] = (
+ (feature_lens // params.subsampling_factor).sum().item()
+ )
+
+ # Note: We use reduction=sum while computing the loss.
+ info["loss"] = loss.detach().cpu().item()
+ info["simple_loss"] = simple_loss.detach().cpu().item()
+ info["pruned_loss"] = pruned_loss.detach().cpu().item()
+
+ return loss, info
+
+
+def compute_validation_loss(
+ params: AttributeDict,
+ model: Union[nn.Module, DDP],
+ sp: spm.SentencePieceProcessor,
+ valid_dl: torch.utils.data.DataLoader,
+ world_size: int = 1,
+) -> MetricsTracker:
+ """Run the validation process."""
+ model.eval()
+
+ tot_loss = MetricsTracker()
+
+ for batch_idx, batch in enumerate(valid_dl):
+ loss, loss_info = compute_loss(
+ params=params,
+ model=model,
+ sp=sp,
+ batch=batch,
+ is_training=False,
+ )
+ assert loss.requires_grad is False
+ tot_loss = tot_loss + loss_info
+
+ if world_size > 1:
+ tot_loss.reduce(loss.device)
+
+ loss_value = tot_loss["loss"] / tot_loss["frames"]
+ if loss_value < params.best_valid_loss:
+ params.best_valid_epoch = params.cur_epoch
+ params.best_valid_loss = loss_value
+
+ return tot_loss
+
+
+def train_one_epoch(
+ params: AttributeDict,
+ model: Union[nn.Module, DDP],
+ optimizer: torch.optim.Optimizer,
+ sp: spm.SentencePieceProcessor,
+ train_dl: torch.utils.data.DataLoader,
+ valid_dl: torch.utils.data.DataLoader,
+ scaler: GradScaler,
+ model_avg: Optional[nn.Module] = None,
+ tb_writer: Optional[SummaryWriter] = None,
+ world_size: int = 1,
+ rank: int = 0,
+) -> None:
+ """Train the model for one epoch.
+
+ The training loss from the mean of all frames is saved in
+ `params.train_loss`. It runs the validation process every
+ `params.valid_interval` batches.
+
+ Args:
+ params:
+ It is returned by :func:`get_params`.
+ model:
+ The model for training.
+ optimizer:
+ The optimizer we are using.
+ train_dl:
+ Dataloader for the training dataset.
+ valid_dl:
+ Dataloader for the validation dataset.
+ scaler:
+ The scaler used for mix precision training.
+ model_avg:
+ The stored model averaged from the start of training.
+ tb_writer:
+ Writer to write log messages to tensorboard.
+ world_size:
+ Number of nodes in DDP training. If it is 1, DDP is disabled.
+ rank:
+ The rank of the node in DDP training. If no DDP is used, it should
+ be set to 0.
+ """
+ model.train()
+
+ tot_loss = MetricsTracker()
+
+ cur_batch_idx = params.get("cur_batch_idx", 0)
+
+ for batch_idx, batch in enumerate(train_dl):
+ if batch_idx < cur_batch_idx:
+ continue
+ cur_batch_idx = batch_idx
+
+ params.batch_idx_train += 1
+ batch_size = len(batch["supervisions"]["text"])
+
+ with torch.cuda.amp.autocast(enabled=params.use_fp16):
+ loss, loss_info = compute_loss(
+ params=params,
+ model=model,
+ sp=sp,
+ batch=batch,
+ is_training=True,
+ )
+ # summary stats
+ tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info
+
+ # NOTE: We use reduction==sum and loss is computed over utterances
+ # in the batch and there is no normalization to it so far.
+ scaler.scale(loss).backward()
+ scaler.step(optimizer)
+ scaler.update()
+ optimizer.zero_grad()
+
+ if params.print_diagnostics and batch_idx == 30:
+ return
+
+ if (
+ rank == 0
+ and params.batch_idx_train > 0
+ and params.batch_idx_train % params.average_period == 0
+ ):
+ update_averaged_model(
+ params=params,
+ model_cur=model,
+ model_avg=model_avg,
+ )
+
+ if (
+ params.batch_idx_train > 0
+ and params.batch_idx_train % params.save_every_n == 0
+ ):
+ params.cur_batch_idx = batch_idx
+ save_checkpoint_with_global_batch_idx(
+ out_dir=params.exp_dir,
+ global_batch_idx=params.batch_idx_train,
+ model=model,
+ model_avg=model_avg,
+ params=params,
+ optimizer=optimizer,
+ sampler=train_dl.sampler,
+ scaler=scaler,
+ rank=rank,
+ )
+ del params.cur_batch_idx
+ remove_checkpoints(
+ out_dir=params.exp_dir,
+ topk=params.keep_last_k,
+ rank=rank,
+ )
+
+ if batch_idx % params.log_interval == 0:
+ cur_lr = optimizer.rate()
+ logging.info(
+ f"Epoch {params.cur_epoch}, "
+ f"batch {batch_idx}, loss[{loss_info}], "
+ f"tot_loss[{tot_loss}], batch size: {batch_size}, "
+ f"lr: {cur_lr:.2e}"
+ )
+
+ if tb_writer is not None:
+ tb_writer.add_scalar(
+ "train/learning_rate", cur_lr, params.batch_idx_train
+ )
+
+ loss_info.write_summary(
+ tb_writer, "train/current_", params.batch_idx_train
+ )
+ tot_loss.write_summary(
+ tb_writer, "train/tot_", params.batch_idx_train
+ )
+
+ if batch_idx > 0 and batch_idx % params.valid_interval == 0:
+ logging.info("Computing validation loss")
+ valid_info = compute_validation_loss(
+ params=params,
+ model=model,
+ sp=sp,
+ valid_dl=valid_dl,
+ world_size=world_size,
+ )
+ model.train()
+ logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}")
+ if tb_writer is not None:
+ valid_info.write_summary(
+ tb_writer, "train/valid_", params.batch_idx_train
+ )
+
+ loss_value = tot_loss["loss"] / tot_loss["frames"]
+ params.train_loss = loss_value
+ if params.train_loss < params.best_train_loss:
+ params.best_train_epoch = params.cur_epoch
+ params.best_train_loss = params.train_loss
+
+
+def run(rank, world_size, args):
+ """
+ Args:
+ rank:
+ It is a value between 0 and `world_size-1`, which is
+ passed automatically by `mp.spawn()` in :func:`main`.
+ The node with rank 0 is responsible for saving checkpoint.
+ world_size:
+ Number of GPUs for DDP training.
+ args:
+ The return value of get_parser().parse_args()
+ """
+ params = get_params()
+ params.update(vars(args))
+ if params.full_libri is False:
+ params.valid_interval = 800
+ params.warm_step = 20000
+
+ fix_random_seed(params.seed)
+ if world_size > 1:
+ setup_dist(rank, world_size, params.master_port)
+
+ setup_logger(f"{params.exp_dir}/log/log-train")
+ logging.info("Training started")
+
+ if args.tensorboard and rank == 0:
+ tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard")
+ else:
+ tb_writer = None
+
+ device = torch.device("cpu")
+ if torch.cuda.is_available():
+ device = torch.device("cuda", rank)
+ logging.info(f"Device: {device}")
+
+ sp = spm.SentencePieceProcessor()
+ sp.load(params.bpe_model)
+
+ # and are defined in local/train_bpe_model.py
+ params.blank_id = sp.piece_to_id("")
+ params.unk_id = sp.piece_to_id("")
+ params.vocab_size = sp.get_piece_size()
+
+ logging.info(params)
+
+ logging.info("About to create model")
+ model = get_transducer_model(params)
+
+ num_param = sum([p.numel() for p in model.parameters()])
+ logging.info(f"Number of model parameters: {num_param}")
+
+ assert params.save_every_n >= params.average_period
+ model_avg: Optional[nn.Module] = None
+ if rank == 0:
+ # model_avg is only used with rank 0
+ model_avg = copy.deepcopy(model)
+
+ assert params.start_epoch > 0, params.start_epoch
+ checkpoints = load_checkpoint_if_available(
+ params=params, model=model, model_avg=model_avg
+ )
+
+ model.to(device)
+ if world_size > 1:
+ logging.info("Using DDP")
+ model = DDP(model, device_ids=[rank])
+
+ optimizer = Noam(
+ model.parameters(),
+ model_size=params.attention_dim,
+ factor=params.lr_factor,
+ warm_step=params.warm_step,
+ )
+
+ if checkpoints and "optimizer" in checkpoints:
+ logging.info("Loading optimizer state dict")
+ optimizer.load_state_dict(checkpoints["optimizer"])
+
+ if params.print_diagnostics:
+ diagnostic = diagnostics.attach_diagnostics(model)
+
+ librispeech = LibriSpeechAsrDataModule(args)
+
+ train_cuts = librispeech.train_clean_100_cuts()
+ if params.full_libri:
+ train_cuts += librispeech.train_clean_360_cuts()
+ train_cuts += librispeech.train_other_500_cuts()
+
+ def remove_short_and_long_utt(c: Cut):
+ # Keep only utterances with duration between 1 second and 20 seconds
+ #
+ # Caution: There is a reason to select 20.0 here. Please see
+ # ../local/display_manifest_statistics.py
+ #
+ # You should use ../local/display_manifest_statistics.py to get
+ # an utterance duration distribution for your dataset to select
+ # the threshold
+ return 1.0 <= c.duration <= 20.0
+
+ train_cuts = train_cuts.filter(remove_short_and_long_utt)
+
+ if params.start_batch > 0 and checkpoints and "sampler" in checkpoints:
+ # We only load the sampler's state dict when it loads a checkpoint
+ # saved in the middle of an epoch
+ sampler_state_dict = checkpoints["sampler"]
+ else:
+ sampler_state_dict = None
+
+ train_dl = librispeech.train_dataloaders(
+ train_cuts, sampler_state_dict=sampler_state_dict
+ )
+
+ valid_cuts = librispeech.dev_clean_cuts()
+ valid_cuts += librispeech.dev_other_cuts()
+ valid_dl = librispeech.valid_dataloaders(valid_cuts)
+
+ if not params.print_diagnostics:
+ scan_pessimistic_batches_for_oom(
+ model=model,
+ train_dl=train_dl,
+ optimizer=optimizer,
+ sp=sp,
+ params=params,
+ )
+
+ scaler = GradScaler(enabled=params.use_fp16)
+ if checkpoints and "grad_scaler" in checkpoints:
+ logging.info("Loading grad scaler state dict")
+ scaler.load_state_dict(checkpoints["grad_scaler"])
+
+ for epoch in range(params.start_epoch, params.num_epochs + 1):
+ fix_random_seed(params.seed + epoch - 1)
+ train_dl.sampler.set_epoch(epoch - 1)
+
+ if tb_writer is not None:
+ tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train)
+
+ params.cur_epoch = epoch
+
+ train_one_epoch(
+ params=params,
+ model=model,
+ model_avg=model_avg,
+ optimizer=optimizer,
+ sp=sp,
+ train_dl=train_dl,
+ valid_dl=valid_dl,
+ scaler=scaler,
+ tb_writer=tb_writer,
+ world_size=world_size,
+ rank=rank,
+ )
+
+ if params.print_diagnostics:
+ diagnostic.print_diagnostics()
+ break
+
+ save_checkpoint(
+ params=params,
+ model=model,
+ model_avg=model_avg,
+ optimizer=optimizer,
+ sampler=train_dl.sampler,
+ scaler=scaler,
+ rank=rank,
+ )
+
+ logging.info("Done!")
+
+ if world_size > 1:
+ torch.distributed.barrier()
+ cleanup_dist()
+
+
+def scan_pessimistic_batches_for_oom(
+ model: Union[nn.Module, DDP],
+ train_dl: torch.utils.data.DataLoader,
+ optimizer: torch.optim.Optimizer,
+ sp: spm.SentencePieceProcessor,
+ params: AttributeDict,
+):
+ from lhotse.dataset import find_pessimistic_batches
+
+ logging.info(
+ "Sanity check -- see if any of the batches in epoch 1 would cause OOM."
+ )
+ batches, crit_values = find_pessimistic_batches(train_dl.sampler)
+ for criterion, cuts in batches.items():
+ batch = train_dl.dataset[cuts]
+ try:
+ with torch.cuda.amp.autocast(enabled=params.use_fp16):
+ loss, _ = compute_loss(
+ params=params,
+ model=model,
+ sp=sp,
+ batch=batch,
+ is_training=True,
+ )
+ loss.backward()
+ optimizer.step()
+ optimizer.zero_grad()
+ except RuntimeError as e:
+ if "CUDA out of memory" in str(e):
+ logging.error(
+ "Your GPU ran out of memory with the current "
+ "max_duration setting. We recommend decreasing "
+ "max_duration and trying again.\n"
+ f"Failing criterion: {criterion} "
+ f"(={crit_values[criterion]}) ..."
+ )
+ raise
+
+
+def main():
+ parser = get_parser()
+ LibriSpeechAsrDataModule.add_arguments(parser)
+ args = parser.parse_args()
+ args.exp_dir = Path(args.exp_dir)
+
+ world_size = args.world_size
+ assert world_size >= 1
+ if world_size > 1:
+ mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True)
+ else:
+ run(rank=0, world_size=1, args=args)
+
+
+torch.set_num_threads(1)
+torch.set_num_interop_threads(1)
+
+if __name__ == "__main__":
+ main()
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/joiner.py b/egs/librispeech/ASR/pruned_transducer_stateless/joiner.py
index 7c5a93a86..c9522df8a 100644
--- a/egs/librispeech/ASR/pruned_transducer_stateless/joiner.py
+++ b/egs/librispeech/ASR/pruned_transducer_stateless/joiner.py
@@ -32,13 +32,15 @@ class Joiner(nn.Module):
"""
Args:
encoder_out:
- Output from the encoder. Its shape is (N, T, s_range, C).
+ Output from the encoder. Its shape is (N, T, s_range, C) during
+ training or (N, C) in case of streaming decoding.
decoder_out:
- Output from the decoder. Its shape is (N, T, s_range, C).
- Returns:
+ Output from the decoder. Its shape is (N, T, s_range, C) during
+ training or (N, C) in case of streaming decoding.
Return a tensor of shape (N, T, s_range, C).
"""
- assert encoder_out.ndim == decoder_out.ndim == 4
+ assert encoder_out.ndim == decoder_out.ndim
+ assert encoder_out.ndim in (2, 4)
assert encoder_out.shape == decoder_out.shape
logit = encoder_out + decoder_out
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless/train.py b/egs/librispeech/ASR/pruned_transducer_stateless/train.py
index c360d025a..448419759 100755
--- a/egs/librispeech/ASR/pruned_transducer_stateless/train.py
+++ b/egs/librispeech/ASR/pruned_transducer_stateless/train.py
@@ -456,7 +456,7 @@ def compute_loss(
is_training: bool,
) -> Tuple[Tensor, MetricsTracker]:
"""
- Compute CTC loss given the model and its inputs.
+ Compute RNN-T loss given the model and its inputs.
Args:
params:
@@ -807,28 +807,8 @@ def run(rank, world_size, args):
# the threshold
return 1.0 <= c.duration <= 20.0
- num_in_total = len(train_cuts)
-
train_cuts = train_cuts.filter(remove_short_and_long_utt)
- try:
- num_left = len(train_cuts)
- num_removed = num_in_total - num_left
- removed_percent = num_removed / num_in_total * 100
-
- logging.info(
- f"Before removing short and long utterances: {num_in_total}"
- )
- logging.info(f"After removing short and long utterances: {num_left}")
- logging.info(
- f"Removed {num_removed} utterances ({removed_percent:.5f}%)"
- )
- except TypeError as e:
- # You can ignore this error as previous versions of Lhotse work fine
- # for the above code. In recent versions of Lhotse, it uses
- # lazy filter, producing cutsets that don't have the __len__ method
- logging.info(str(e))
-
if params.start_batch > 0 and checkpoints and "sampler" in checkpoints:
# We only load the sampler's state dict when it loads a checkpoint
# saved in the middle of an epoch
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless2/train.py b/egs/librispeech/ASR/pruned_transducer_stateless2/train.py
index eed2df755..36ee7ca74 100755
--- a/egs/librispeech/ASR/pruned_transducer_stateless2/train.py
+++ b/egs/librispeech/ASR/pruned_transducer_stateless2/train.py
@@ -510,7 +510,7 @@ def compute_loss(
warmup: float = 1.0,
) -> Tuple[Tensor, MetricsTracker]:
"""
- Compute CTC loss given the model and its inputs.
+ Compute RNN-T loss given the model and its inputs.
Args:
params:
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/asr_datamodule.py b/egs/librispeech/ASR/pruned_transducer_stateless3/asr_datamodule.py
index df1e52202..b54d1aa39 100644
--- a/egs/librispeech/ASR/pruned_transducer_stateless3/asr_datamodule.py
+++ b/egs/librispeech/ASR/pruned_transducer_stateless3/asr_datamodule.py
@@ -22,7 +22,6 @@ from typing import Optional
from lhotse import CutSet, Fbank, FbankConfig
from lhotse.dataset import (
- BucketingSampler,
CutMix,
DynamicBucketingSampler,
K2SpeechRecognitionDataset,
@@ -71,8 +70,7 @@ class AsrDataModule:
"--num-buckets",
type=int,
default=30,
- help="The number of buckets for the BucketingSampler "
- "and DynamicBucketingSampler."
+ help="The number of buckets for the DynamicBucketingSampler. "
"(you might want to increase it for larger datasets).",
)
@@ -152,7 +150,6 @@ class AsrDataModule:
def train_dataloaders(
self,
cuts_train: CutSet,
- dynamic_bucketing: bool,
on_the_fly_feats: bool,
cuts_musan: Optional[CutSet] = None,
) -> DataLoader:
@@ -162,9 +159,6 @@ class AsrDataModule:
Cuts for training.
cuts_musan:
If not None, it is the cuts for mixing.
- dynamic_bucketing:
- True to use DynamicBucketingSampler;
- False to use BucketingSampler.
on_the_fly_feats:
True to use OnTheFlyFeatures;
False to use PrecomputedFeatures.
@@ -230,25 +224,14 @@ class AsrDataModule:
return_cuts=self.args.return_cuts,
)
- if dynamic_bucketing:
- logging.info("Using DynamicBucketingSampler.")
- train_sampler = DynamicBucketingSampler(
- cuts_train,
- max_duration=self.args.max_duration,
- shuffle=self.args.shuffle,
- num_buckets=self.args.num_buckets,
- drop_last=True,
- )
- else:
- logging.info("Using BucketingSampler.")
- train_sampler = BucketingSampler(
- cuts_train,
- max_duration=self.args.max_duration,
- shuffle=self.args.shuffle,
- num_buckets=self.args.num_buckets,
- bucket_method="equal_duration",
- drop_last=True,
- )
+ logging.info("Using DynamicBucketingSampler.")
+ train_sampler = DynamicBucketingSampler(
+ cuts_train,
+ max_duration=self.args.max_duration,
+ shuffle=self.args.shuffle,
+ num_buckets=self.args.num_buckets,
+ drop_last=True,
+ )
logging.info("About to create train dataloader")
train_dl = DataLoader(
@@ -277,10 +260,12 @@ class AsrDataModule:
cut_transforms=transforms,
return_cuts=self.args.return_cuts,
)
- valid_sampler = BucketingSampler(
+ valid_sampler = DynamicBucketingSampler(
cuts_valid,
max_duration=self.args.max_duration,
shuffle=False,
+ num_buckets=self.args.num_buckets,
+ drop_last=False,
)
logging.info("About to create dev dataloader")
valid_dl = DataLoader(
@@ -301,8 +286,12 @@ class AsrDataModule:
else PrecomputedFeatures(),
return_cuts=self.args.return_cuts,
)
- sampler = BucketingSampler(
- cuts, max_duration=self.args.max_duration, shuffle=False
+ sampler = DynamicBucketingSampler(
+ cuts,
+ max_duration=self.args.max_duration,
+ shuffle=False,
+ num_buckets=self.args.num_buckets,
+ drop_last=True,
)
logging.debug("About to create test dataloader")
test_dl = DataLoader(
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/gigaspeech.py b/egs/librispeech/ASR/pruned_transducer_stateless3/gigaspeech.py
index 3f8bf3ba9..36f32c6b3 100644
--- a/egs/librispeech/ASR/pruned_transducer_stateless3/gigaspeech.py
+++ b/egs/librispeech/ASR/pruned_transducer_stateless3/gigaspeech.py
@@ -22,7 +22,7 @@ import re
from pathlib import Path
import lhotse
-from lhotse import CutSet, load_manifest
+from lhotse import CutSet, load_manifest_lazy
class GigaSpeech:
@@ -32,13 +32,13 @@ class GigaSpeech:
manifest_dir:
It is expected to contain the following files::
- - XL_split_2000/cuts_XL.*.jsonl.gz
- - cuts_L_raw.jsonl.gz
- - cuts_M_raw.jsonl.gz
- - cuts_S_raw.jsonl.gz
- - cuts_XS_raw.jsonl.gz
- - cuts_DEV_raw.jsonl.gz
- - cuts_TEST_raw.jsonl.gz
+ - gigaspeech_XL_split_2000/gigaspeech_cuts_XL.*.jsonl.gz
+ - gigaspeech_cuts_L_raw.jsonl.gz
+ - gigaspeech_cuts_M_raw.jsonl.gz
+ - gigaspeech_cuts_S_raw.jsonl.gz
+ - gigaspeech_cuts_XS_raw.jsonl.gz
+ - gigaspeech_cuts_DEV_raw.jsonl.gz
+ - gigaspeech_cuts_TEST_raw.jsonl.gz
"""
self.manifest_dir = Path(manifest_dir)
@@ -46,10 +46,12 @@ class GigaSpeech:
logging.info("About to get train-XL cuts")
filenames = list(
- glob.glob(f"{self.manifest_dir}/XL_split_2000/cuts_XL.*.jsonl.gz")
+ glob.glob(
+ f"{self.manifest_dir}/gigaspeech_XL_split_2000/gigaspeech_cuts_XL.*.jsonl.gz" # noqa
+ )
)
- pattern = re.compile(r"cuts_XL.([0-9]+).jsonl.gz")
+ pattern = re.compile(r"gigaspeech_cuts_XL.([0-9]+).jsonl.gz")
idx_filenames = [
(int(pattern.search(f).group(1)), f) for f in filenames
]
@@ -64,31 +66,31 @@ class GigaSpeech:
)
def train_L_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_L_raw.jsonl.gz"
+ f = self.manifest_dir / "gigaspeech_cuts_L_raw.jsonl.gz"
logging.info(f"About to get train-L cuts from {f}")
return CutSet.from_jsonl_lazy(f)
def train_M_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_M_raw.jsonl.gz"
+ f = self.manifest_dir / "gigaspeech_cuts_M_raw.jsonl.gz"
logging.info(f"About to get train-M cuts from {f}")
return CutSet.from_jsonl_lazy(f)
def train_S_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_S_raw.jsonl.gz"
+ f = self.manifest_dir / "gigaspeech_cuts_S_raw.jsonl.gz"
logging.info(f"About to get train-S cuts from {f}")
return CutSet.from_jsonl_lazy(f)
def train_XS_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_XS_raw.jsonl.gz"
+ f = self.manifest_dir / "gigaspeech_cuts_XS_raw.jsonl.gz"
logging.info(f"About to get train-XS cuts from {f}")
return CutSet.from_jsonl_lazy(f)
def test_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_TEST.jsonl.gz"
+ f = self.manifest_dir / "gigaspeech_cuts_TEST.jsonl.gz"
logging.info(f"About to get TEST cuts from {f}")
- return load_manifest(f)
+ return load_manifest_lazy(f)
def dev_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_DEV.jsonl.gz"
+ f = self.manifest_dir / "gigaspeech_cuts_DEV.jsonl.gz"
logging.info(f"About to get DEV cuts from {f}")
- return load_manifest(f)
+ return load_manifest_lazy(f)
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/librispeech.py b/egs/librispeech/ASR/pruned_transducer_stateless3/librispeech.py
index 00b7c8334..6dba8e9fe 100644
--- a/egs/librispeech/ASR/pruned_transducer_stateless3/librispeech.py
+++ b/egs/librispeech/ASR/pruned_transducer_stateless3/librispeech.py
@@ -18,7 +18,7 @@
import logging
from pathlib import Path
-from lhotse import CutSet, load_manifest
+from lhotse import CutSet, load_manifest_lazy
class LibriSpeech:
@@ -28,47 +28,47 @@ class LibriSpeech:
manifest_dir:
It is expected to contain the following files::
- - cuts_dev-clean.json.gz
- - cuts_dev-other.json.gz
- - cuts_test-clean.json.gz
- - cuts_test-other.json.gz
- - cuts_train-clean-100.json.gz
- - cuts_train-clean-360.json.gz
- - cuts_train-other-500.json.gz
+ - librispeech_cuts_dev-clean.jsonl.gz
+ - librispeech_cuts_dev-other.jsonl.gz
+ - librispeech_cuts_test-clean.jsonl.gz
+ - librispeech_cuts_test-other.jsonl.gz
+ - librispeech_cuts_train-clean-100.jsonl.gz
+ - librispeech_cuts_train-clean-360.jsonl.gz
+ - librispeech_cuts_train-other-500.jsonl.gz
"""
self.manifest_dir = Path(manifest_dir)
def train_clean_100_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_train-clean-100.json.gz"
+ f = self.manifest_dir / "librispeech_cuts_train-clean-100.jsonl.gz"
logging.info(f"About to get train-clean-100 cuts from {f}")
- return load_manifest(f)
+ return load_manifest_lazy(f)
def train_clean_360_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_train-clean-360.json.gz"
+ f = self.manifest_dir / "librispeech_cuts_train-clean-360.jsonl.gz"
logging.info(f"About to get train-clean-360 cuts from {f}")
- return load_manifest(f)
+ return load_manifest_lazy(f)
def train_other_500_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_train-other-500.json.gz"
+ f = self.manifest_dir / "librispeech_cuts_train-other-500.jsonl.gz"
logging.info(f"About to get train-other-500 cuts from {f}")
- return load_manifest(f)
+ return load_manifest_lazy(f)
def test_clean_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_test-clean.json.gz"
+ f = self.manifest_dir / "librispeech_cuts_test-clean.jsonl.gz"
logging.info(f"About to get test-clean cuts from {f}")
- return load_manifest(f)
+ return load_manifest_lazy(f)
def test_other_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_test-other.json.gz"
+ f = self.manifest_dir / "librispeech_cuts_test-other.jsonl.gz"
logging.info(f"About to get test-other cuts from {f}")
- return load_manifest(f)
+ return load_manifest_lazy(f)
def dev_clean_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_dev-clean.json.gz"
+ f = self.manifest_dir / "librispeech_cuts_dev-clean.jsonl.gz"
logging.info(f"About to get dev-clean cuts from {f}")
- return load_manifest(f)
+ return load_manifest_lazy(f)
def dev_other_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_dev-other.json.gz"
+ f = self.manifest_dir / "librispeech_cuts_dev-other.jsonl.gz"
logging.info(f"About to get dev-other cuts from {f}")
- return load_manifest(f)
+ return load_manifest_lazy(f)
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless3/train.py b/egs/librispeech/ASR/pruned_transducer_stateless3/train.py
index f5a25a226..92eae78d1 100755
--- a/egs/librispeech/ASR/pruned_transducer_stateless3/train.py
+++ b/egs/librispeech/ASR/pruned_transducer_stateless3/train.py
@@ -546,7 +546,7 @@ def compute_loss(
warmup: float = 1.0,
) -> Tuple[Tensor, MetricsTracker]:
"""
- Compute CTC loss given the model and its inputs.
+ Compute RNN-T loss given the model and its inputs.
Args:
params:
@@ -799,7 +799,7 @@ def train_one_epoch(
f"tot_loss[{tot_loss}], "
f"libri_tot_loss[{libri_tot_loss}], "
f"giga_tot_loss[{giga_tot_loss}], "
- f"batch size: {batch_size}"
+ f"batch size: {batch_size}, "
f"lr: {cur_lr:.2e}"
)
@@ -969,7 +969,7 @@ def run(rank, world_size, args):
if args.enable_musan:
cuts_musan = load_manifest(
- Path(args.manifest_dir) / "cuts_musan.json.gz"
+ Path(args.manifest_dir) / "musan_cuts.jsonl.gz"
)
else:
cuts_musan = None
@@ -978,14 +978,12 @@ def run(rank, world_size, args):
train_dl = asr_datamodule.train_dataloaders(
train_cuts,
- dynamic_bucketing=False,
on_the_fly_feats=False,
cuts_musan=cuts_musan,
)
giga_train_dl = asr_datamodule.train_dataloaders(
train_giga_cuts,
- dynamic_bucketing=True,
on_the_fly_feats=False,
cuts_musan=cuts_musan,
)
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless4/decode.py b/egs/librispeech/ASR/pruned_transducer_stateless4/decode.py
index d1af63aaa..70afc3ea3 100755
--- a/egs/librispeech/ASR/pruned_transducer_stateless4/decode.py
+++ b/egs/librispeech/ASR/pruned_transducer_stateless4/decode.py
@@ -128,7 +128,7 @@ def get_parser():
parser.add_argument(
"--use-averaged-model",
type=str2bool,
- default=False,
+ default=True,
help="Whether to load averaged model. Currently it only supports "
"using --epoch. If True, it would decode with the averaged model "
"over the epoch range from `epoch-avg` (excluded) to `epoch`."
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless4/export.py b/egs/librispeech/ASR/pruned_transducer_stateless4/export.py
deleted file mode 120000
index 19c56a722..000000000
--- a/egs/librispeech/ASR/pruned_transducer_stateless4/export.py
+++ /dev/null
@@ -1 +0,0 @@
-../pruned_transducer_stateless2/export.py
\ No newline at end of file
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless4/export.py b/egs/librispeech/ASR/pruned_transducer_stateless4/export.py
new file mode 100755
index 000000000..8f64b5d64
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless4/export.py
@@ -0,0 +1,273 @@
+#!/usr/bin/env python3
+#
+# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script converts several saved checkpoints
+# to a single one using model averaging.
+"""
+Usage:
+./pruned_transducer_stateless4/export.py \
+ --exp-dir ./pruned_transducer_stateless4/exp \
+ --bpe-model data/lang_bpe_500/bpe.model \
+ --epoch 20 \
+ --avg 10
+
+It will generate a file exp_dir/pretrained.pt
+
+To use the generated file with `pruned_transducer_stateless4/decode.py`,
+you can do:
+
+ cd /path/to/exp_dir
+ ln -s pretrained.pt epoch-9999.pt
+
+ cd /path/to/egs/librispeech/ASR
+ ./pruned_transducer_stateless4/decode.py \
+ --exp-dir ./pruned_transducer_stateless4/exp \
+ --epoch 9999 \
+ --avg 1 \
+ --max-duration 100 \
+ --bpe-model data/lang_bpe_500/bpe.model \
+ --use-averaged-model False
+"""
+
+import argparse
+import logging
+from pathlib import Path
+
+import sentencepiece as spm
+import torch
+from train import get_params, get_transducer_model
+
+from icefall.checkpoint import (
+ average_checkpoints,
+ average_checkpoints_with_averaged_model,
+ find_checkpoints,
+ load_checkpoint,
+)
+from icefall.utils import str2bool
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+
+ parser.add_argument(
+ "--epoch",
+ type=int,
+ default=28,
+ help="""It specifies the checkpoint to use for averaging.
+ Note: Epoch counts from 0.
+ You can specify --avg to use more checkpoints for model averaging.""",
+ )
+
+ parser.add_argument(
+ "--iter",
+ type=int,
+ default=0,
+ help="""If positive, --epoch is ignored and it
+ will use the checkpoint exp_dir/checkpoint-iter.pt.
+ You can specify --avg to use more checkpoints for model averaging.
+ """,
+ )
+
+ parser.add_argument(
+ "--avg",
+ type=int,
+ default=15,
+ help="Number of checkpoints to average. Automatically select "
+ "consecutive checkpoints before the checkpoint specified by "
+ "'--epoch' and '--iter'",
+ )
+
+ parser.add_argument(
+ "--exp-dir",
+ type=str,
+ default="pruned_transducer_stateless2/exp",
+ help="""It specifies the directory where all training related
+ files, e.g., checkpoints, log, etc, are saved
+ """,
+ )
+
+ parser.add_argument(
+ "--bpe-model",
+ type=str,
+ default="data/lang_bpe_500/bpe.model",
+ help="Path to the BPE model",
+ )
+
+ parser.add_argument(
+ "--jit",
+ type=str2bool,
+ default=False,
+ help="""True to save a model after applying torch.jit.script.
+ """,
+ )
+
+ parser.add_argument(
+ "--context-size",
+ type=int,
+ default=2,
+ help="The context size in the decoder. 1 means bigram; "
+ "2 means tri-gram",
+ )
+
+ parser.add_argument(
+ "--use-averaged-model",
+ type=str2bool,
+ default=True,
+ help="Whether to load averaged model. Currently it only supports "
+ "using --epoch. If True, it would decode with the averaged model "
+ "over the epoch range from `epoch-avg` (excluded) to `epoch`."
+ "Actually only the models with epoch number of `epoch-avg` and "
+ "`epoch` are loaded for averaging. ",
+ )
+
+ return parser
+
+
+def main():
+ args = get_parser().parse_args()
+ args.exp_dir = Path(args.exp_dir)
+
+ params = get_params()
+ params.update(vars(args))
+
+ device = torch.device("cpu")
+
+ logging.info(f"device: {device}")
+
+ sp = spm.SentencePieceProcessor()
+ sp.load(params.bpe_model)
+
+ # is defined in local/train_bpe_model.py
+ params.blank_id = sp.piece_to_id("")
+ params.vocab_size = sp.get_piece_size()
+
+ logging.info(params)
+
+ logging.info("About to create model")
+ model = get_transducer_model(params)
+
+ model.to(device)
+
+ if not params.use_averaged_model:
+ if params.iter > 0:
+ filenames = find_checkpoints(
+ params.exp_dir, iteration=-params.iter
+ )[: params.avg]
+ if len(filenames) == 0:
+ raise ValueError(
+ f"No checkpoints found for"
+ f" --iter {params.iter}, --avg {params.avg}"
+ )
+ elif len(filenames) < params.avg:
+ raise ValueError(
+ f"Not enough checkpoints ({len(filenames)}) found for"
+ f" --iter {params.iter}, --avg {params.avg}"
+ )
+ logging.info(f"averaging {filenames}")
+ model.to(device)
+ model.load_state_dict(average_checkpoints(filenames, device=device))
+ elif params.avg == 1:
+ load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model)
+ else:
+ start = params.epoch - params.avg + 1
+ filenames = []
+ for i in range(start, params.epoch + 1):
+ if i >= 1:
+ filenames.append(f"{params.exp_dir}/epoch-{i}.pt")
+ logging.info(f"averaging {filenames}")
+ model.to(device)
+ model.load_state_dict(average_checkpoints(filenames, device=device))
+ else:
+ if params.iter > 0:
+ filenames = find_checkpoints(
+ params.exp_dir, iteration=-params.iter
+ )[: params.avg + 1]
+ if len(filenames) == 0:
+ raise ValueError(
+ f"No checkpoints found for"
+ f" --iter {params.iter}, --avg {params.avg}"
+ )
+ elif len(filenames) < params.avg + 1:
+ raise ValueError(
+ f"Not enough checkpoints ({len(filenames)}) found for"
+ f" --iter {params.iter}, --avg {params.avg}"
+ )
+ filename_start = filenames[-1]
+ filename_end = filenames[0]
+ logging.info(
+ "Calculating the averaged model over iteration checkpoints"
+ f" from {filename_start} (excluded) to {filename_end}"
+ )
+ model.to(device)
+ model.load_state_dict(
+ average_checkpoints_with_averaged_model(
+ filename_start=filename_start,
+ filename_end=filename_end,
+ device=device,
+ )
+ )
+ else:
+ assert params.avg > 0, params.avg
+ start = params.epoch - params.avg
+ assert start >= 1, start
+ filename_start = f"{params.exp_dir}/epoch-{start}.pt"
+ filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt"
+ logging.info(
+ f"Calculating the averaged model over epoch range from "
+ f"{start} (excluded) to {params.epoch}"
+ )
+ model.to(device)
+ model.load_state_dict(
+ average_checkpoints_with_averaged_model(
+ filename_start=filename_start,
+ filename_end=filename_end,
+ device=device,
+ )
+ )
+
+ model.eval()
+
+ if params.jit:
+ # We won't use the forward() method of the model in C++, so just ignore
+ # it here.
+ # Otherwise, one of its arguments is a ragged tensor and is not
+ # torch scriptabe.
+ model.__class__.forward = torch.jit.ignore(model.__class__.forward)
+ logging.info("Using torch.jit.script")
+ model = torch.jit.script(model)
+ filename = params.exp_dir / "cpu_jit.pt"
+ model.save(str(filename))
+ logging.info(f"Saved to {filename}")
+ else:
+ logging.info("Not using torch.jit.script")
+ # Save it using a format so that it can be loaded
+ # by :func:`load_checkpoint`
+ filename = params.exp_dir / "pretrained.pt"
+ torch.save({"model": model.state_dict()}, str(filename))
+ logging.info(f"Saved to {filename}")
+
+
+if __name__ == "__main__":
+ formatter = (
+ "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
+ )
+
+ logging.basicConfig(format=formatter, level=logging.INFO)
+ main()
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless4/train.py b/egs/librispeech/ASR/pruned_transducer_stateless4/train.py
index ca7207122..48c0e683d 100755
--- a/egs/librispeech/ASR/pruned_transducer_stateless4/train.py
+++ b/egs/librispeech/ASR/pruned_transducer_stateless4/train.py
@@ -536,7 +536,7 @@ def compute_loss(
warmup: float = 1.0,
) -> Tuple[Tensor, MetricsTracker]:
"""
- Compute CTC loss given the model and its inputs.
+ Compute RNN-T loss given the model and its inputs.
Args:
params:
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless5/train.py b/egs/librispeech/ASR/pruned_transducer_stateless5/train.py
index 7252ee436..e77eb19ff 100755
--- a/egs/librispeech/ASR/pruned_transducer_stateless5/train.py
+++ b/egs/librispeech/ASR/pruned_transducer_stateless5/train.py
@@ -577,7 +577,7 @@ def compute_loss(
warmup: float = 1.0,
) -> Tuple[Tensor, MetricsTracker]:
"""
- Compute CTC loss given the model and its inputs.
+ Compute RNN-T loss given the model and its inputs.
Args:
params:
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/__init__.py b/egs/librispeech/ASR/pruned_transducer_stateless6/__init__.py
new file mode 120000
index 000000000..b24e5e357
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless6/__init__.py
@@ -0,0 +1 @@
+../pruned_transducer_stateless2/__init__.py
\ No newline at end of file
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/asr_datamodule.py b/egs/librispeech/ASR/pruned_transducer_stateless6/asr_datamodule.py
new file mode 120000
index 000000000..a074d6085
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless6/asr_datamodule.py
@@ -0,0 +1 @@
+../pruned_transducer_stateless2/asr_datamodule.py
\ No newline at end of file
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/beam_search.py b/egs/librispeech/ASR/pruned_transducer_stateless6/beam_search.py
new file mode 120000
index 000000000..8554e44cc
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless6/beam_search.py
@@ -0,0 +1 @@
+../pruned_transducer_stateless2/beam_search.py
\ No newline at end of file
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/conformer.py b/egs/librispeech/ASR/pruned_transducer_stateless6/conformer.py
new file mode 100644
index 000000000..a0781da1f
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless6/conformer.py
@@ -0,0 +1,1064 @@
+#!/usr/bin/env python3
+# Copyright (c) 2021 University of Chinese Academy of Sciences (author: Han Zhu)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import math
+import warnings
+from typing import List, Optional, Tuple
+
+import torch
+from encoder_interface import EncoderInterface
+from scaling import (
+ ActivationBalancer,
+ BasicNorm,
+ DoubleSwish,
+ ScaledConv1d,
+ ScaledConv2d,
+ ScaledLinear,
+)
+from torch import Tensor, nn
+
+from icefall.utils import make_pad_mask
+
+
+class Conformer(EncoderInterface):
+ """
+ Args:
+ num_features (int): Number of input features
+ subsampling_factor (int): subsampling factor of encoder (the convolution layers before transformers)
+ d_model (int): attention dimension, also the output dimension
+ nhead (int): number of head
+ dim_feedforward (int): feedforward dimention
+ num_encoder_layers (int): number of encoder layers
+ dropout (float): dropout rate
+ layer_dropout (float): layer-dropout rate.
+ cnn_module_kernel (int): Kernel size of convolution module
+ vgg_frontend (bool): whether to use vgg frontend.
+ """
+
+ def __init__(
+ self,
+ num_features: int,
+ subsampling_factor: int = 4,
+ d_model: int = 256,
+ nhead: int = 4,
+ dim_feedforward: int = 2048,
+ num_encoder_layers: int = 12,
+ dropout: float = 0.1,
+ layer_dropout: float = 0.075,
+ cnn_module_kernel: int = 31,
+ middle_output_layer: int = None, # 0-based layer index
+ ) -> None:
+ super(Conformer, self).__init__()
+
+ self.num_features = num_features
+ self.subsampling_factor = subsampling_factor
+ if subsampling_factor != 4:
+ raise NotImplementedError("Support only 'subsampling_factor=4'.")
+
+ # self.encoder_embed converts the input of shape (N, T, num_features)
+ # to the shape (N, T//subsampling_factor, d_model).
+ # That is, it does two things simultaneously:
+ # (1) subsampling: T -> T//subsampling_factor
+ # (2) embedding: num_features -> d_model
+ self.encoder_embed = Conv2dSubsampling(num_features, d_model)
+
+ self.encoder_pos = RelPositionalEncoding(d_model, dropout)
+
+ encoder_layer = ConformerEncoderLayer(
+ d_model,
+ nhead,
+ dim_feedforward,
+ dropout,
+ layer_dropout,
+ cnn_module_kernel,
+ )
+
+ output_layers = []
+ if middle_output_layer is not None:
+ assert (
+ middle_output_layer >= 0
+ and middle_output_layer < num_encoder_layers
+ )
+ output_layers.append(middle_output_layer)
+
+ # The last layer is always needed.
+ output_layers.append(num_encoder_layers - 1)
+
+ self.encoder = ConformerEncoder(
+ encoder_layer, num_encoder_layers, output_layers=output_layers
+ )
+
+ def forward(
+ self, x: torch.Tensor, x_lens: torch.Tensor, warmup: float = 1.0
+ ) -> Tuple[List[torch.Tensor], torch.Tensor]:
+ """
+ Args:
+ x:
+ The input tensor. Its shape is (batch_size, seq_len, feature_dim).
+ x_lens:
+ A tensor of shape (batch_size,) containing the number of frames in
+ `x` before padding.
+ warmup:
+ A floating point value that gradually increases from 0 throughout
+ training; when it is >= 1.0 we are "fully warmed up". It is used
+ to turn modules on sequentially.
+ Returns:
+ Return a tuple containing 2 tensors:
+ - embeddings: its shape is (batch_size, output_seq_len, d_model)
+ - lengths, a tensor of shape (batch_size,) containing the number
+ of frames in `embeddings` before padding.
+ """
+ x = self.encoder_embed(x)
+ x, pos_emb = self.encoder_pos(x)
+ x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C)
+
+ # Caution: We assume the subsampling factor is 4!
+
+ # lengths = ((x_lens - 1) // 2 - 1) // 2 # issue an warning
+ #
+ # Note: rounding_mode in torch.div() is available only in torch >= 1.8.0
+ lengths = (((x_lens - 1) >> 1) - 1) >> 1
+
+ assert x.size(0) == lengths.max().item()
+ mask = make_pad_mask(lengths)
+
+ layer_results = self.encoder(
+ x, pos_emb, src_key_padding_mask=mask, warmup=warmup
+ ) # (T, N, C)
+
+ return layer_results, lengths
+
+
+class ConformerEncoderLayer(nn.Module):
+ """
+ ConformerEncoderLayer is made up of self-attn, feedforward and convolution networks.
+ See: "Conformer: Convolution-augmented Transformer for Speech Recognition"
+
+ Args:
+ d_model: the number of expected features in the input (required).
+ nhead: the number of heads in the multiheadattention models (required).
+ dim_feedforward: the dimension of the feedforward network model (default=2048).
+ dropout: the dropout value (default=0.1).
+ cnn_module_kernel (int): Kernel size of convolution module.
+
+ Examples::
+ >>> encoder_layer = ConformerEncoderLayer(d_model=512, nhead=8)
+ >>> src = torch.rand(10, 32, 512)
+ >>> pos_emb = torch.rand(32, 19, 512)
+ >>> out = encoder_layer(src, pos_emb)
+ """
+
+ def __init__(
+ self,
+ d_model: int,
+ nhead: int,
+ dim_feedforward: int = 2048,
+ dropout: float = 0.1,
+ layer_dropout: float = 0.075,
+ cnn_module_kernel: int = 31,
+ ) -> None:
+ super(ConformerEncoderLayer, self).__init__()
+
+ self.layer_dropout = layer_dropout
+
+ self.d_model = d_model
+
+ self.self_attn = RelPositionMultiheadAttention(
+ d_model, nhead, dropout=0.0
+ )
+
+ self.feed_forward = nn.Sequential(
+ ScaledLinear(d_model, dim_feedforward),
+ ActivationBalancer(channel_dim=-1),
+ DoubleSwish(),
+ nn.Dropout(dropout),
+ ScaledLinear(dim_feedforward, d_model, initial_scale=0.25),
+ )
+
+ self.feed_forward_macaron = nn.Sequential(
+ ScaledLinear(d_model, dim_feedforward),
+ ActivationBalancer(channel_dim=-1),
+ DoubleSwish(),
+ nn.Dropout(dropout),
+ ScaledLinear(dim_feedforward, d_model, initial_scale=0.25),
+ )
+
+ self.conv_module = ConvolutionModule(d_model, cnn_module_kernel)
+
+ self.norm_final = BasicNorm(d_model)
+
+ # try to ensure the output is close to zero-mean (or at least, zero-median).
+ self.balancer = ActivationBalancer(
+ channel_dim=-1, min_positive=0.45, max_positive=0.55, max_abs=6.0
+ )
+
+ self.dropout = nn.Dropout(dropout)
+
+ def forward(
+ self,
+ src: Tensor,
+ pos_emb: Tensor,
+ src_mask: Optional[Tensor] = None,
+ src_key_padding_mask: Optional[Tensor] = None,
+ warmup: float = 1.0,
+ ) -> Tensor:
+ """
+ Pass the input through the encoder layer.
+
+ Args:
+ src: the sequence to the encoder layer (required).
+ pos_emb: Positional embedding tensor (required).
+ src_mask: the mask for the src sequence (optional).
+ src_key_padding_mask: the mask for the src keys per batch (optional).
+ warmup: controls selective bypass of of layers; if < 1.0, we will
+ bypass layers more frequently.
+
+ Shape:
+ src: (S, N, E).
+ pos_emb: (N, 2*S-1, E)
+ src_mask: (S, S).
+ src_key_padding_mask: (N, S).
+ S is the source sequence length, N is the batch size, E is the feature number
+ """
+ src_orig = src
+
+ warmup_scale = min(0.1 + warmup, 1.0)
+ # alpha = 1.0 means fully use this encoder layer, 0.0 would mean
+ # completely bypass it.
+ if self.training:
+ alpha = (
+ warmup_scale
+ if torch.rand(()).item() <= (1.0 - self.layer_dropout)
+ else 0.1
+ )
+ else:
+ alpha = 1.0
+
+ # macaron style feed forward module
+ src = src + self.dropout(self.feed_forward_macaron(src))
+
+ # multi-headed self-attention module
+ src_att = self.self_attn(
+ src,
+ src,
+ src,
+ pos_emb=pos_emb,
+ attn_mask=src_mask,
+ key_padding_mask=src_key_padding_mask,
+ )[0]
+ src = src + self.dropout(src_att)
+
+ # convolution module
+ src = src + self.dropout(self.conv_module(src))
+
+ # feed forward module
+ src = src + self.dropout(self.feed_forward(src))
+
+ src = self.norm_final(self.balancer(src))
+
+ if alpha != 1.0:
+ src = alpha * src + (1 - alpha) * src_orig
+
+ return src
+
+
+class ConformerEncoder(nn.Module):
+ r"""ConformerEncoder is a stack of N encoder layers
+
+ Args:
+ encoder_layer: an instance of the ConformerEncoderLayer() class (required).
+ num_layers: the number of sub-encoder-layers in the encoder (required).
+
+ Examples::
+ >>> encoder_layer = ConformerEncoderLayer(d_model=512, nhead=8)
+ >>> conformer_encoder = ConformerEncoder(encoder_layer, num_layers=6)
+ >>> src = torch.rand(10, 32, 512)
+ >>> pos_emb = torch.rand(32, 19, 512)
+ >>> out = conformer_encoder(src, pos_emb)
+ """
+
+ def __init__(
+ self,
+ encoder_layer: nn.Module,
+ num_layers: int,
+ output_layers: List[int],
+ ) -> None:
+ super().__init__()
+ self.layers = nn.ModuleList(
+ [copy.deepcopy(encoder_layer) for i in range(num_layers)]
+ )
+ self.num_layers = num_layers
+ self.output_layers = output_layers
+
+ def forward(
+ self,
+ src: Tensor,
+ pos_emb: Tensor,
+ mask: Optional[Tensor] = None,
+ src_key_padding_mask: Optional[Tensor] = None,
+ warmup: float = 1.0,
+ ) -> List[Tensor]:
+ r"""Pass the input through the encoder layers in turn.
+
+ Args:
+ src: the sequence to the encoder (required).
+ pos_emb: Positional embedding tensor (required).
+ mask: the mask for the src sequence (optional).
+ src_key_padding_mask: the mask for the src keys per batch (optional).
+
+ Shape:
+ src: (S, N, E).
+ pos_emb: (N, 2*S-1, E)
+ mask: (S, S).
+ src_key_padding_mask: (N, S).
+ S is the source sequence length, T is the target sequence length, N is the batch size, E is the feature number
+
+ """
+ output = src
+
+ layer_results = []
+ for i, mod in enumerate(self.layers):
+ output = mod(
+ output,
+ pos_emb,
+ src_mask=mask,
+ src_key_padding_mask=src_key_padding_mask,
+ warmup=warmup,
+ )
+ if i in self.output_layers:
+ # (T, N, C) --> (N, T, C)
+ layer_results.append(output.permute(1, 0, 2))
+
+ return layer_results
+
+
+class RelPositionalEncoding(torch.nn.Module):
+ """Relative positional encoding module.
+
+ See : Appendix B in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context"
+ Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/embedding.py
+
+ Args:
+ d_model: Embedding dimension.
+ dropout_rate: Dropout rate.
+ max_len: Maximum input length.
+
+ """
+
+ def __init__(
+ self, d_model: int, dropout_rate: float, max_len: int = 5000
+ ) -> None:
+ """Construct an PositionalEncoding object."""
+ super(RelPositionalEncoding, self).__init__()
+ self.d_model = d_model
+ self.dropout = torch.nn.Dropout(p=dropout_rate)
+ self.pe = None
+ self.extend_pe(torch.tensor(0.0).expand(1, max_len))
+
+ def extend_pe(self, x: Tensor) -> None:
+ """Reset the positional encodings."""
+ if self.pe is not None:
+ # self.pe contains both positive and negative parts
+ # the length of self.pe is 2 * input_len - 1
+ if self.pe.size(1) >= x.size(1) * 2 - 1:
+ # Note: TorchScript doesn't implement operator== for torch.Device
+ if self.pe.dtype != x.dtype or str(self.pe.device) != str(
+ x.device
+ ):
+ self.pe = self.pe.to(dtype=x.dtype, device=x.device)
+ return
+ # Suppose `i` means to the position of query vecotr and `j` means the
+ # position of key vector. We use position relative positions when keys
+ # are to the left (i>j) and negative relative positions otherwise (i Tuple[Tensor, Tensor]:
+ """Add positional encoding.
+
+ Args:
+ x (torch.Tensor): Input tensor (batch, time, `*`).
+
+ Returns:
+ torch.Tensor: Encoded tensor (batch, time, `*`).
+ torch.Tensor: Encoded tensor (batch, 2*time-1, `*`).
+
+ """
+ self.extend_pe(x)
+ pos_emb = self.pe[
+ :,
+ self.pe.size(1) // 2
+ - x.size(1)
+ + 1 : self.pe.size(1) // 2 # noqa E203
+ + x.size(1),
+ ]
+ return self.dropout(x), self.dropout(pos_emb)
+
+
+class RelPositionMultiheadAttention(nn.Module):
+ r"""Multi-Head Attention layer with relative position encoding
+
+ See reference: "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context"
+
+ Args:
+ embed_dim: total dimension of the model.
+ num_heads: parallel attention heads.
+ dropout: a Dropout layer on attn_output_weights. Default: 0.0.
+
+ Examples::
+
+ >>> rel_pos_multihead_attn = RelPositionMultiheadAttention(embed_dim, num_heads)
+ >>> attn_output, attn_output_weights = multihead_attn(query, key, value, pos_emb)
+ """
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ ) -> None:
+ super(RelPositionMultiheadAttention, self).__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ assert (
+ self.head_dim * num_heads == self.embed_dim
+ ), "embed_dim must be divisible by num_heads"
+
+ self.in_proj = ScaledLinear(embed_dim, 3 * embed_dim, bias=True)
+ self.out_proj = ScaledLinear(
+ embed_dim, embed_dim, bias=True, initial_scale=0.25
+ )
+
+ # linear transformation for positional encoding.
+ self.linear_pos = ScaledLinear(embed_dim, embed_dim, bias=False)
+ # these two learnable bias are used in matrix c and matrix d
+ # as described in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" Section 3.3
+ self.pos_bias_u = nn.Parameter(torch.Tensor(num_heads, self.head_dim))
+ self.pos_bias_v = nn.Parameter(torch.Tensor(num_heads, self.head_dim))
+ self.pos_bias_u_scale = nn.Parameter(torch.zeros(()).detach())
+ self.pos_bias_v_scale = nn.Parameter(torch.zeros(()).detach())
+ self._reset_parameters()
+
+ def _pos_bias_u(self):
+ return self.pos_bias_u * self.pos_bias_u_scale.exp()
+
+ def _pos_bias_v(self):
+ return self.pos_bias_v * self.pos_bias_v_scale.exp()
+
+ def _reset_parameters(self) -> None:
+ nn.init.normal_(self.pos_bias_u, std=0.01)
+ nn.init.normal_(self.pos_bias_v, std=0.01)
+
+ def forward(
+ self,
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ pos_emb: Tensor,
+ key_padding_mask: Optional[Tensor] = None,
+ need_weights: bool = True,
+ attn_mask: Optional[Tensor] = None,
+ ) -> Tuple[Tensor, Optional[Tensor]]:
+ r"""
+ Args:
+ query, key, value: map a query and a set of key-value pairs to an output.
+ pos_emb: Positional embedding tensor
+ key_padding_mask: if provided, specified padding elements in the key will
+ be ignored by the attention. When given a binary mask and a value is True,
+ the corresponding value on the attention layer will be ignored. When given
+ a byte mask and a value is non-zero, the corresponding value on the attention
+ layer will be ignored
+ need_weights: output attn_output_weights.
+ attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
+ the batches while a 3D mask allows to specify a different mask for the entries of each batch.
+
+ Shape:
+ - Inputs:
+ - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
+ the embedding dimension.
+ - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
+ the embedding dimension.
+ - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
+ the embedding dimension.
+ - pos_emb: :math:`(N, 2*L-1, E)` where L is the target sequence length, N is the batch size, E is
+ the embedding dimension.
+ - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
+ If a ByteTensor is provided, the non-zero positions will be ignored while the position
+ with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
+ value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
+ - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
+ 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
+ S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked
+ positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
+ while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
+ is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
+ is provided, it will be added to the attention weight.
+
+ - Outputs:
+ - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
+ E is the embedding dimension.
+ - attn_output_weights: :math:`(N, L, S)` where N is the batch size,
+ L is the target sequence length, S is the source sequence length.
+ """
+ return self.multi_head_attention_forward(
+ query,
+ key,
+ value,
+ pos_emb,
+ self.embed_dim,
+ self.num_heads,
+ self.in_proj.get_weight(),
+ self.in_proj.get_bias(),
+ self.dropout,
+ self.out_proj.get_weight(),
+ self.out_proj.get_bias(),
+ training=self.training,
+ key_padding_mask=key_padding_mask,
+ need_weights=need_weights,
+ attn_mask=attn_mask,
+ )
+
+ def rel_shift(self, x: Tensor) -> Tensor:
+ """Compute relative positional encoding.
+
+ Args:
+ x: Input tensor (batch, head, time1, 2*time1-1).
+ time1 means the length of query vector.
+
+ Returns:
+ Tensor: tensor of shape (batch, head, time1, time2)
+ (note: time2 has the same value as time1, but it is for
+ the key, while time1 is for the query).
+ """
+ (batch_size, num_heads, time1, n) = x.shape
+ assert n == 2 * time1 - 1
+ # Note: TorchScript requires explicit arg for stride()
+ batch_stride = x.stride(0)
+ head_stride = x.stride(1)
+ time1_stride = x.stride(2)
+ n_stride = x.stride(3)
+ return x.as_strided(
+ (batch_size, num_heads, time1, time1),
+ (batch_stride, head_stride, time1_stride - n_stride, n_stride),
+ storage_offset=n_stride * (time1 - 1),
+ )
+
+ def multi_head_attention_forward(
+ self,
+ query: Tensor,
+ key: Tensor,
+ value: Tensor,
+ pos_emb: Tensor,
+ embed_dim_to_check: int,
+ num_heads: int,
+ in_proj_weight: Tensor,
+ in_proj_bias: Tensor,
+ dropout_p: float,
+ out_proj_weight: Tensor,
+ out_proj_bias: Tensor,
+ training: bool = True,
+ key_padding_mask: Optional[Tensor] = None,
+ need_weights: bool = True,
+ attn_mask: Optional[Tensor] = None,
+ ) -> Tuple[Tensor, Optional[Tensor]]:
+ r"""
+ Args:
+ query, key, value: map a query and a set of key-value pairs to an output.
+ pos_emb: Positional embedding tensor
+ embed_dim_to_check: total dimension of the model.
+ num_heads: parallel attention heads.
+ in_proj_weight, in_proj_bias: input projection weight and bias.
+ dropout_p: probability of an element to be zeroed.
+ out_proj_weight, out_proj_bias: the output projection weight and bias.
+ training: apply dropout if is ``True``.
+ key_padding_mask: if provided, specified padding elements in the key will
+ be ignored by the attention. This is an binary mask. When the value is True,
+ the corresponding value on the attention layer will be filled with -inf.
+ need_weights: output attn_output_weights.
+ attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
+ the batches while a 3D mask allows to specify a different mask for the entries of each batch.
+
+ Shape:
+ Inputs:
+ - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
+ the embedding dimension.
+ - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
+ the embedding dimension.
+ - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
+ the embedding dimension.
+ - pos_emb: :math:`(N, 2*L-1, E)` or :math:`(1, 2*L-1, E)` where L is the target sequence
+ length, N is the batch size, E is the embedding dimension.
+ - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
+ If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
+ will be unchanged. If a BoolTensor is provided, the positions with the
+ value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
+ - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
+ 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
+ S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
+ positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
+ while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
+ are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
+ is provided, it will be added to the attention weight.
+
+ Outputs:
+ - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
+ E is the embedding dimension.
+ - attn_output_weights: :math:`(N, L, S)` where N is the batch size,
+ L is the target sequence length, S is the source sequence length.
+ """
+
+ tgt_len, bsz, embed_dim = query.size()
+ assert embed_dim == embed_dim_to_check
+ assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
+
+ head_dim = embed_dim // num_heads
+ assert (
+ head_dim * num_heads == embed_dim
+ ), "embed_dim must be divisible by num_heads"
+
+ scaling = float(head_dim) ** -0.5
+
+ if torch.equal(query, key) and torch.equal(key, value):
+ # self-attention
+ q, k, v = nn.functional.linear(
+ query, in_proj_weight, in_proj_bias
+ ).chunk(3, dim=-1)
+
+ elif torch.equal(key, value):
+ # encoder-decoder attention
+ # This is inline in_proj function with in_proj_weight and in_proj_bias
+ _b = in_proj_bias
+ _start = 0
+ _end = embed_dim
+ _w = in_proj_weight[_start:_end, :]
+ if _b is not None:
+ _b = _b[_start:_end]
+ q = nn.functional.linear(query, _w, _b)
+
+ # This is inline in_proj function with in_proj_weight and in_proj_bias
+ _b = in_proj_bias
+ _start = embed_dim
+ _end = None
+ _w = in_proj_weight[_start:, :]
+ if _b is not None:
+ _b = _b[_start:]
+ k, v = nn.functional.linear(key, _w, _b).chunk(2, dim=-1)
+
+ else:
+ # This is inline in_proj function with in_proj_weight and in_proj_bias
+ _b = in_proj_bias
+ _start = 0
+ _end = embed_dim
+ _w = in_proj_weight[_start:_end, :]
+ if _b is not None:
+ _b = _b[_start:_end]
+ q = nn.functional.linear(query, _w, _b)
+
+ # This is inline in_proj function with in_proj_weight and in_proj_bias
+ _b = in_proj_bias
+ _start = embed_dim
+ _end = embed_dim * 2
+ _w = in_proj_weight[_start:_end, :]
+ if _b is not None:
+ _b = _b[_start:_end]
+ k = nn.functional.linear(key, _w, _b)
+
+ # This is inline in_proj function with in_proj_weight and in_proj_bias
+ _b = in_proj_bias
+ _start = embed_dim * 2
+ _end = None
+ _w = in_proj_weight[_start:, :]
+ if _b is not None:
+ _b = _b[_start:]
+ v = nn.functional.linear(value, _w, _b)
+
+ if attn_mask is not None:
+ assert (
+ attn_mask.dtype == torch.float32
+ or attn_mask.dtype == torch.float64
+ or attn_mask.dtype == torch.float16
+ or attn_mask.dtype == torch.uint8
+ or attn_mask.dtype == torch.bool
+ ), "Only float, byte, and bool types are supported for attn_mask, not {}".format(
+ attn_mask.dtype
+ )
+ if attn_mask.dtype == torch.uint8:
+ warnings.warn(
+ "Byte tensor for attn_mask is deprecated. Use bool tensor instead."
+ )
+ attn_mask = attn_mask.to(torch.bool)
+
+ if attn_mask.dim() == 2:
+ attn_mask = attn_mask.unsqueeze(0)
+ if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
+ raise RuntimeError(
+ "The size of the 2D attn_mask is not correct."
+ )
+ elif attn_mask.dim() == 3:
+ if list(attn_mask.size()) != [
+ bsz * num_heads,
+ query.size(0),
+ key.size(0),
+ ]:
+ raise RuntimeError(
+ "The size of the 3D attn_mask is not correct."
+ )
+ else:
+ raise RuntimeError(
+ "attn_mask's dimension {} is not supported".format(
+ attn_mask.dim()
+ )
+ )
+ # attn_mask's dim is 3 now.
+
+ # convert ByteTensor key_padding_mask to bool
+ if (
+ key_padding_mask is not None
+ and key_padding_mask.dtype == torch.uint8
+ ):
+ warnings.warn(
+ "Byte tensor for key_padding_mask is deprecated. Use bool tensor instead."
+ )
+ key_padding_mask = key_padding_mask.to(torch.bool)
+
+ q = (q * scaling).contiguous().view(tgt_len, bsz, num_heads, head_dim)
+ k = k.contiguous().view(-1, bsz, num_heads, head_dim)
+ v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
+
+ src_len = k.size(0)
+
+ if key_padding_mask is not None:
+ assert key_padding_mask.size(0) == bsz, "{} == {}".format(
+ key_padding_mask.size(0), bsz
+ )
+ assert key_padding_mask.size(1) == src_len, "{} == {}".format(
+ key_padding_mask.size(1), src_len
+ )
+
+ q = q.transpose(0, 1) # (batch, time1, head, d_k)
+
+ pos_emb_bsz = pos_emb.size(0)
+ assert pos_emb_bsz in (1, bsz) # actually it is 1
+ p = self.linear_pos(pos_emb).view(pos_emb_bsz, -1, num_heads, head_dim)
+ p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)
+
+ q_with_bias_u = (q + self._pos_bias_u()).transpose(
+ 1, 2
+ ) # (batch, head, time1, d_k)
+
+ q_with_bias_v = (q + self._pos_bias_v()).transpose(
+ 1, 2
+ ) # (batch, head, time1, d_k)
+
+ # compute attention score
+ # first compute matrix a and matrix c
+ # as described in "Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context" Section 3.3
+ k = k.permute(1, 2, 3, 0) # (batch, head, d_k, time2)
+ matrix_ac = torch.matmul(
+ q_with_bias_u, k
+ ) # (batch, head, time1, time2)
+
+ # compute matrix b and matrix d
+ matrix_bd = torch.matmul(
+ q_with_bias_v, p.transpose(-2, -1)
+ ) # (batch, head, time1, 2*time1-1)
+ matrix_bd = self.rel_shift(matrix_bd)
+
+ attn_output_weights = (
+ matrix_ac + matrix_bd
+ ) # (batch, head, time1, time2)
+
+ attn_output_weights = attn_output_weights.view(
+ bsz * num_heads, tgt_len, -1
+ )
+
+ assert list(attn_output_weights.size()) == [
+ bsz * num_heads,
+ tgt_len,
+ src_len,
+ ]
+
+ if attn_mask is not None:
+ if attn_mask.dtype == torch.bool:
+ attn_output_weights.masked_fill_(attn_mask, float("-inf"))
+ else:
+ attn_output_weights += attn_mask
+
+ if key_padding_mask is not None:
+ attn_output_weights = attn_output_weights.view(
+ bsz, num_heads, tgt_len, src_len
+ )
+ attn_output_weights = attn_output_weights.masked_fill(
+ key_padding_mask.unsqueeze(1).unsqueeze(2),
+ float("-inf"),
+ )
+ attn_output_weights = attn_output_weights.view(
+ bsz * num_heads, tgt_len, src_len
+ )
+
+ attn_output_weights = nn.functional.softmax(attn_output_weights, dim=-1)
+ attn_output_weights = nn.functional.dropout(
+ attn_output_weights, p=dropout_p, training=training
+ )
+
+ attn_output = torch.bmm(attn_output_weights, v)
+ assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
+ attn_output = (
+ attn_output.transpose(0, 1)
+ .contiguous()
+ .view(tgt_len, bsz, embed_dim)
+ )
+ attn_output = nn.functional.linear(
+ attn_output, out_proj_weight, out_proj_bias
+ )
+
+ if need_weights:
+ # average attention weights over heads
+ attn_output_weights = attn_output_weights.view(
+ bsz, num_heads, tgt_len, src_len
+ )
+ return attn_output, attn_output_weights.sum(dim=1) / num_heads
+ else:
+ return attn_output, None
+
+
+class ConvolutionModule(nn.Module):
+ """ConvolutionModule in Conformer model.
+ Modified from https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/conformer/convolution.py
+
+ Args:
+ channels (int): The number of channels of conv layers.
+ kernel_size (int): Kernerl size of conv layers.
+ bias (bool): Whether to use bias in conv layers (default=True).
+
+ """
+
+ def __init__(
+ self, channels: int, kernel_size: int, bias: bool = True
+ ) -> None:
+ """Construct an ConvolutionModule object."""
+ super(ConvolutionModule, self).__init__()
+ # kernerl_size should be a odd number for 'SAME' padding
+ assert (kernel_size - 1) % 2 == 0
+
+ self.pointwise_conv1 = ScaledConv1d(
+ channels,
+ 2 * channels,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=bias,
+ )
+
+ # after pointwise_conv1 we put x through a gated linear unit (nn.functional.glu).
+ # For most layers the normal rms value of channels of x seems to be in the range 1 to 4,
+ # but sometimes, for some reason, for layer 0 the rms ends up being very large,
+ # between 50 and 100 for different channels. This will cause very peaky and
+ # sparse derivatives for the sigmoid gating function, which will tend to make
+ # the loss function not learn effectively. (for most layers the average absolute values
+ # are in the range 0.5..9.0, and the average p(x>0), i.e. positive proportion,
+ # at the output of pointwise_conv1.output is around 0.35 to 0.45 for different
+ # layers, which likely breaks down as 0.5 for the "linear" half and
+ # 0.2 to 0.3 for the part that goes into the sigmoid. The idea is that if we
+ # constrain the rms values to a reasonable range via a constraint of max_abs=10.0,
+ # it will be in a better position to start learning something, i.e. to latch onto
+ # the correct range.
+ self.deriv_balancer1 = ActivationBalancer(
+ channel_dim=1, max_abs=10.0, min_positive=0.05, max_positive=1.0
+ )
+
+ self.depthwise_conv = ScaledConv1d(
+ channels,
+ channels,
+ kernel_size,
+ stride=1,
+ padding=(kernel_size - 1) // 2,
+ groups=channels,
+ bias=bias,
+ )
+
+ self.deriv_balancer2 = ActivationBalancer(
+ channel_dim=1, min_positive=0.05, max_positive=1.0
+ )
+
+ self.activation = DoubleSwish()
+
+ self.pointwise_conv2 = ScaledConv1d(
+ channels,
+ channels,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=bias,
+ initial_scale=0.25,
+ )
+
+ def forward(self, x: Tensor) -> Tensor:
+ """Compute convolution module.
+
+ Args:
+ x: Input tensor (#time, batch, channels).
+
+ Returns:
+ Tensor: Output tensor (#time, batch, channels).
+
+ """
+ # exchange the temporal dimension and the feature dimension
+ x = x.permute(1, 2, 0) # (#batch, channels, time).
+
+ # GLU mechanism
+ x = self.pointwise_conv1(x) # (batch, 2*channels, time)
+
+ x = self.deriv_balancer1(x)
+ x = nn.functional.glu(x, dim=1) # (batch, channels, time)
+
+ # 1D Depthwise Conv
+ x = self.depthwise_conv(x)
+
+ x = self.deriv_balancer2(x)
+ x = self.activation(x)
+
+ x = self.pointwise_conv2(x) # (batch, channel, time)
+
+ return x.permute(2, 0, 1)
+
+
+class Conv2dSubsampling(nn.Module):
+ """Convolutional 2D subsampling (to 1/4 length).
+
+ Convert an input of shape (N, T, idim) to an output
+ with shape (N, T', odim), where
+ T' = ((T-1)//2 - 1)//2, which approximates T' == T//4
+
+ It is based on
+ https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/subsampling.py # noqa
+ """
+
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ layer1_channels: int = 8,
+ layer2_channels: int = 32,
+ layer3_channels: int = 128,
+ ) -> None:
+ """
+ Args:
+ in_channels:
+ Number of channels in. The input shape is (N, T, in_channels).
+ Caution: It requires: T >=7, in_channels >=7
+ out_channels
+ Output dim. The output shape is (N, ((T-1)//2 - 1)//2, out_channels)
+ layer1_channels:
+ Number of channels in layer1
+ layer1_channels:
+ Number of channels in layer2
+ """
+ assert in_channels >= 7
+ super().__init__()
+
+ self.conv = nn.Sequential(
+ ScaledConv2d(
+ in_channels=1,
+ out_channels=layer1_channels,
+ kernel_size=3,
+ padding=1,
+ ),
+ ActivationBalancer(channel_dim=1),
+ DoubleSwish(),
+ ScaledConv2d(
+ in_channels=layer1_channels,
+ out_channels=layer2_channels,
+ kernel_size=3,
+ stride=2,
+ ),
+ ActivationBalancer(channel_dim=1),
+ DoubleSwish(),
+ ScaledConv2d(
+ in_channels=layer2_channels,
+ out_channels=layer3_channels,
+ kernel_size=3,
+ stride=2,
+ ),
+ ActivationBalancer(channel_dim=1),
+ DoubleSwish(),
+ )
+ self.out = ScaledLinear(
+ layer3_channels * (((in_channels - 1) // 2 - 1) // 2), out_channels
+ )
+ # set learn_eps=False because out_norm is preceded by `out`, and `out`
+ # itself has learned scale, so the extra degree of freedom is not
+ # needed.
+ self.out_norm = BasicNorm(out_channels, learn_eps=False)
+ # constrain median of output to be close to zero.
+ self.out_balancer = ActivationBalancer(
+ channel_dim=-1, min_positive=0.45, max_positive=0.55
+ )
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Subsample x.
+
+ Args:
+ x:
+ Its shape is (N, T, idim).
+
+ Returns:
+ Return a tensor of shape (N, ((T-1)//2 - 1)//2, odim)
+ """
+ # On entry, x is (N, T, idim)
+ x = x.unsqueeze(1) # (N, T, idim) -> (N, 1, T, idim) i.e., (N, C, H, W)
+ x = self.conv(x)
+ # Now x is of shape (N, odim, ((T-1)//2 - 1)//2, ((idim-1)//2 - 1)//2)
+ b, c, t, f = x.size()
+ x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
+ # Now x is of shape (N, ((T-1)//2 - 1))//2, odim)
+ x = self.out_norm(x)
+ x = self.out_balancer(x)
+ return x
+
+
+if __name__ == "__main__":
+ feature_dim = 50
+ c = Conformer(num_features=feature_dim, d_model=128, nhead=4)
+ batch_size = 5
+ seq_len = 20
+ # Just make sure the forward pass runs.
+ f = c(
+ torch.randn(batch_size, seq_len, feature_dim),
+ torch.full((batch_size,), seq_len, dtype=torch.int64),
+ warmup=0.5,
+ )
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/decode.py b/egs/librispeech/ASR/pruned_transducer_stateless6/decode.py
new file mode 100755
index 000000000..4739a6526
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless6/decode.py
@@ -0,0 +1,634 @@
+#!/usr/bin/env python3
+#
+# Copyright 2021-2022 Xiaomi Corporation (Author: Fangjun Kuang,
+# Zengwei Yao)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Usage:
+(1) greedy search
+./pruned_transducer_stateless6/decode.py \
+ --epoch 30 \
+ --avg 15 \
+ --exp-dir ./pruned_transducer_stateless6/exp \
+ --max-duration 600 \
+ --decoding-method greedy_search
+
+(2) beam search (not recommended)
+./pruned_transducer_stateless6/decode.py \
+ --epoch 30 \
+ --avg 15 \
+ --exp-dir ./pruned_transducer_stateless6/exp \
+ --max-duration 600 \
+ --decoding-method beam_search \
+ --beam-size 4
+
+(3) modified beam search
+./pruned_transducer_stateless6/decode.py \
+ --epoch 30 \
+ --avg 15 \
+ --exp-dir ./pruned_transducer_stateless6/exp \
+ --max-duration 600 \
+ --decoding-method modified_beam_search \
+ --beam-size 4
+
+(4) fast beam search
+./pruned_transducer_stateless6/decode.py \
+ --epoch 30 \
+ --avg 15 \
+ --exp-dir ./pruned_transducer_stateless6/exp \
+ --max-duration 600 \
+ --decoding-method fast_beam_search \
+ --beam 4 \
+ --max-contexts 4 \
+ --max-states 8
+"""
+
+
+import argparse
+import logging
+from collections import defaultdict
+from pathlib import Path
+from typing import Dict, List, Optional, Tuple
+
+import k2
+import sentencepiece as spm
+import torch
+import torch.nn as nn
+from asr_datamodule import LibriSpeechAsrDataModule
+from beam_search import (
+ beam_search,
+ fast_beam_search_one_best,
+ greedy_search,
+ greedy_search_batch,
+ modified_beam_search,
+)
+from train import get_params, get_transducer_model
+
+from icefall.checkpoint import (
+ average_checkpoints,
+ average_checkpoints_with_averaged_model,
+ find_checkpoints,
+ load_checkpoint,
+)
+from icefall.utils import (
+ AttributeDict,
+ setup_logger,
+ store_transcripts,
+ str2bool,
+ write_error_stats,
+)
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+
+ parser.add_argument(
+ "--epoch",
+ type=int,
+ default=30,
+ help="""It specifies the checkpoint to use for decoding.
+ Note: Epoch counts from 1.
+ You can specify --avg to use more checkpoints for model averaging.""",
+ )
+
+ parser.add_argument(
+ "--iter",
+ type=int,
+ default=0,
+ help="""If positive, --epoch is ignored and it
+ will use the checkpoint exp_dir/checkpoint-iter.pt.
+ You can specify --avg to use more checkpoints for model averaging.
+ """,
+ )
+
+ parser.add_argument(
+ "--avg",
+ type=int,
+ default=15,
+ help="Number of checkpoints to average. Automatically select "
+ "consecutive checkpoints before the checkpoint specified by "
+ "'--epoch' and '--iter'",
+ )
+
+ parser.add_argument(
+ "--use-averaged-model",
+ type=str2bool,
+ default=False,
+ help="Whether to load averaged model. Currently it only supports "
+ "using --epoch. If True, it would decode with the averaged model "
+ "over the epoch range from `epoch-avg` (excluded) to `epoch`."
+ "Actually only the models with epoch number of `epoch-avg` and "
+ "`epoch` are loaded for averaging. ",
+ )
+
+ parser.add_argument(
+ "--exp-dir",
+ type=str,
+ default="pruned_transducer_stateless6/exp",
+ help="The experiment dir",
+ )
+
+ parser.add_argument(
+ "--bpe-model",
+ type=str,
+ default="data/lang_bpe_500/bpe.model",
+ help="Path to the BPE model",
+ )
+
+ parser.add_argument(
+ "--decoding-method",
+ type=str,
+ default="greedy_search",
+ help="""Possible values are:
+ - greedy_search
+ - beam_search
+ - modified_beam_search
+ - fast_beam_search
+ """,
+ )
+
+ parser.add_argument(
+ "--beam-size",
+ type=int,
+ default=4,
+ help="""An integer indicating how many candidates we will keep for each
+ frame. Used only when --decoding-method is beam_search or
+ modified_beam_search.""",
+ )
+
+ parser.add_argument(
+ "--beam",
+ type=float,
+ default=4,
+ help="""A floating point value to calculate the cutoff score during beam
+ search (i.e., `cutoff = max-score - beam`), which is the same as the
+ `beam` in Kaldi.
+ Used only when --decoding-method is fast_beam_search""",
+ )
+
+ parser.add_argument(
+ "--max-contexts",
+ type=int,
+ default=4,
+ help="""Used only when --decoding-method is
+ fast_beam_search""",
+ )
+
+ parser.add_argument(
+ "--max-states",
+ type=int,
+ default=8,
+ help="""Used only when --decoding-method is
+ fast_beam_search""",
+ )
+
+ parser.add_argument(
+ "--context-size",
+ type=int,
+ default=2,
+ help="The context size in the decoder. 1 means bigram; "
+ "2 means tri-gram",
+ )
+ parser.add_argument(
+ "--max-sym-per-frame",
+ type=int,
+ default=1,
+ help="""Maximum number of symbols per frame.
+ Used only when --decoding_method is greedy_search""",
+ )
+
+ return parser
+
+
+def decode_one_batch(
+ params: AttributeDict,
+ model: nn.Module,
+ sp: spm.SentencePieceProcessor,
+ batch: dict,
+ decoding_graph: Optional[k2.Fsa] = None,
+) -> Dict[str, List[List[str]]]:
+ """Decode one batch and return the result in a dict. The dict has the
+ following format:
+
+ - key: It indicates the setting used for decoding. For example,
+ if greedy_search is used, it would be "greedy_search"
+ If beam search with a beam size of 7 is used, it would be
+ "beam_7"
+ - value: It contains the decoding result. `len(value)` equals to
+ batch size. `value[i]` is the decoding result for the i-th
+ utterance in the given batch.
+ Args:
+ params:
+ It's the return value of :func:`get_params`.
+ model:
+ The neural model.
+ sp:
+ The BPE model.
+ batch:
+ It is the return value from iterating
+ `lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation
+ for the format of the `batch`.
+ decoding_graph:
+ The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used
+ only when --decoding_method is fast_beam_search.
+ Returns:
+ Return the decoding result. See above description for the format of
+ the returned dict.
+ """
+ device = next(model.parameters()).device
+ feature = batch["inputs"]
+ assert feature.ndim == 3
+
+ feature = feature.to(device)
+ # at entry, feature is (N, T, C)
+
+ supervisions = batch["supervisions"]
+ feature_lens = supervisions["num_frames"].to(device)
+
+ layer_results, encoder_out_lens = model.encoder(
+ x=feature, x_lens=feature_lens
+ )
+ encoder_out = layer_results[-1]
+ hyps = []
+
+ if params.decoding_method == "fast_beam_search":
+ hyp_tokens = fast_beam_search_one_best(
+ model=model,
+ decoding_graph=decoding_graph,
+ encoder_out=encoder_out,
+ encoder_out_lens=encoder_out_lens,
+ beam=params.beam,
+ max_contexts=params.max_contexts,
+ max_states=params.max_states,
+ )
+ for hyp in sp.decode(hyp_tokens):
+ hyps.append(hyp.split())
+ elif (
+ params.decoding_method == "greedy_search"
+ and params.max_sym_per_frame == 1
+ ):
+ hyp_tokens = greedy_search_batch(
+ model=model,
+ encoder_out=encoder_out,
+ encoder_out_lens=encoder_out_lens,
+ )
+ for hyp in sp.decode(hyp_tokens):
+ hyps.append(hyp.split())
+ elif params.decoding_method == "modified_beam_search":
+ hyp_tokens = modified_beam_search(
+ model=model,
+ encoder_out=encoder_out,
+ encoder_out_lens=encoder_out_lens,
+ beam=params.beam_size,
+ )
+ for hyp in sp.decode(hyp_tokens):
+ hyps.append(hyp.split())
+ else:
+ batch_size = encoder_out.size(0)
+
+ for i in range(batch_size):
+ # fmt: off
+ encoder_out_i = encoder_out[i:i + 1, :encoder_out_lens[i]]
+ # fmt: on
+ if params.decoding_method == "greedy_search":
+ hyp = greedy_search(
+ model=model,
+ encoder_out=encoder_out_i,
+ max_sym_per_frame=params.max_sym_per_frame,
+ )
+ elif params.decoding_method == "beam_search":
+ hyp = beam_search(
+ model=model,
+ encoder_out=encoder_out_i,
+ beam=params.beam_size,
+ )
+ else:
+ raise ValueError(
+ f"Unsupported decoding method: {params.decoding_method}"
+ )
+ hyps.append(sp.decode(hyp).split())
+
+ if params.decoding_method == "greedy_search":
+ return {"greedy_search": hyps}
+ elif params.decoding_method == "fast_beam_search":
+ return {
+ (
+ f"beam_{params.beam}_"
+ f"max_contexts_{params.max_contexts}_"
+ f"max_states_{params.max_states}"
+ ): hyps
+ }
+ else:
+ return {f"beam_size_{params.beam_size}": hyps}
+
+
+def decode_dataset(
+ dl: torch.utils.data.DataLoader,
+ params: AttributeDict,
+ model: nn.Module,
+ sp: spm.SentencePieceProcessor,
+ decoding_graph: Optional[k2.Fsa] = None,
+) -> Dict[str, List[Tuple[List[str], List[str]]]]:
+ """Decode dataset.
+
+ Args:
+ dl:
+ PyTorch's dataloader containing the dataset to decode.
+ params:
+ It is returned by :func:`get_params`.
+ model:
+ The neural model.
+ sp:
+ The BPE model.
+ decoding_graph:
+ The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used
+ only when --decoding_method is fast_beam_search.
+ Returns:
+ Return a dict, whose key may be "greedy_search" if greedy search
+ is used, or it may be "beam_7" if beam size of 7 is used.
+ Its value is a list of tuples. Each tuple contains two elements:
+ The first is the reference transcript, and the second is the
+ predicted result.
+ """
+ num_cuts = 0
+
+ try:
+ num_batches = len(dl)
+ except TypeError:
+ num_batches = "?"
+
+ if params.decoding_method == "greedy_search":
+ log_interval = 50
+ else:
+ log_interval = 10
+
+ results = defaultdict(list)
+ for batch_idx, batch in enumerate(dl):
+ texts = batch["supervisions"]["text"]
+
+ hyps_dict = decode_one_batch(
+ params=params,
+ model=model,
+ sp=sp,
+ decoding_graph=decoding_graph,
+ batch=batch,
+ )
+
+ for name, hyps in hyps_dict.items():
+ this_batch = []
+ assert len(hyps) == len(texts)
+ for hyp_words, ref_text in zip(hyps, texts):
+ ref_words = ref_text.split()
+ this_batch.append((ref_words, hyp_words))
+
+ results[name].extend(this_batch)
+
+ num_cuts += len(texts)
+
+ if batch_idx % log_interval == 0:
+ batch_str = f"{batch_idx}/{num_batches}"
+
+ logging.info(
+ f"batch {batch_str}, cuts processed until now is {num_cuts}"
+ )
+ return results
+
+
+def save_results(
+ params: AttributeDict,
+ test_set_name: str,
+ results_dict: Dict[str, List[Tuple[List[int], List[int]]]],
+):
+ test_set_wers = dict()
+ for key, results in results_dict.items():
+ recog_path = (
+ params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt"
+ )
+ store_transcripts(filename=recog_path, texts=results)
+ logging.info(f"The transcripts are stored in {recog_path}")
+
+ # The following prints out WERs, per-word error statistics and aligned
+ # ref/hyp pairs.
+ errs_filename = (
+ params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt"
+ )
+ with open(errs_filename, "w") as f:
+ wer = write_error_stats(
+ f, f"{test_set_name}-{key}", results, enable_log=True
+ )
+ test_set_wers[key] = wer
+
+ logging.info("Wrote detailed error stats to {}".format(errs_filename))
+
+ test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1])
+ errs_info = (
+ params.res_dir
+ / f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt"
+ )
+ with open(errs_info, "w") as f:
+ print("settings\tWER", file=f)
+ for key, val in test_set_wers:
+ print("{}\t{}".format(key, val), file=f)
+
+ s = "\nFor {}, WER of different settings are:\n".format(test_set_name)
+ note = "\tbest for {}".format(test_set_name)
+ for key, val in test_set_wers:
+ s += "{}\t{}{}\n".format(key, val, note)
+ note = ""
+ logging.info(s)
+
+
+@torch.no_grad()
+def main():
+ parser = get_parser()
+ LibriSpeechAsrDataModule.add_arguments(parser)
+ args = parser.parse_args()
+ args.exp_dir = Path(args.exp_dir)
+
+ params = get_params()
+ params.update(vars(args))
+
+ assert params.decoding_method in (
+ "greedy_search",
+ "beam_search",
+ "fast_beam_search",
+ "modified_beam_search",
+ )
+ params.res_dir = params.exp_dir / params.decoding_method
+
+ if params.iter > 0:
+ params.suffix = f"iter-{params.iter}-avg-{params.avg}"
+ else:
+ params.suffix = f"epoch-{params.epoch}-avg-{params.avg}"
+
+ if "fast_beam_search" in params.decoding_method:
+ params.suffix += f"-beam-{params.beam}"
+ params.suffix += f"-max-contexts-{params.max_contexts}"
+ params.suffix += f"-max-states-{params.max_states}"
+ elif "beam_search" in params.decoding_method:
+ params.suffix += (
+ f"-{params.decoding_method}-beam-size-{params.beam_size}"
+ )
+ else:
+ params.suffix += f"-context-{params.context_size}"
+ params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}"
+
+ if params.use_averaged_model:
+ params.suffix += "-use-averaged-model"
+
+ setup_logger(f"{params.res_dir}/log-decode-{params.suffix}")
+ logging.info("Decoding started")
+
+ device = torch.device("cpu")
+ if torch.cuda.is_available():
+ device = torch.device("cuda", 0)
+
+ logging.info(f"Device: {device}")
+
+ sp = spm.SentencePieceProcessor()
+ sp.load(params.bpe_model)
+
+ # and are defined in local/train_bpe_model.py
+ params.blank_id = sp.piece_to_id("")
+ params.unk_id = sp.piece_to_id("")
+ params.vocab_size = sp.get_piece_size()
+
+ logging.info(params)
+
+ logging.info("About to create model")
+ model = get_transducer_model(params)
+
+ if not params.use_averaged_model:
+ if params.iter > 0:
+ filenames = find_checkpoints(
+ params.exp_dir, iteration=-params.iter
+ )[: params.avg]
+ if len(filenames) == 0:
+ raise ValueError(
+ f"No checkpoints found for"
+ f" --iter {params.iter}, --avg {params.avg}"
+ )
+ elif len(filenames) < params.avg:
+ raise ValueError(
+ f"Not enough checkpoints ({len(filenames)}) found for"
+ f" --iter {params.iter}, --avg {params.avg}"
+ )
+ logging.info(f"averaging {filenames}")
+ model.to(device)
+ model.load_state_dict(average_checkpoints(filenames, device=device))
+ elif params.avg == 1:
+ load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model)
+ else:
+ start = params.epoch - params.avg + 1
+ filenames = []
+ for i in range(start, params.epoch + 1):
+ if i >= 1:
+ filenames.append(f"{params.exp_dir}/epoch-{i}.pt")
+ logging.info(f"averaging {filenames}")
+ model.to(device)
+ model.load_state_dict(average_checkpoints(filenames, device=device))
+ else:
+ if params.iter > 0:
+ filenames = find_checkpoints(
+ params.exp_dir, iteration=-params.iter
+ )[: params.avg + 1]
+ if len(filenames) == 0:
+ raise ValueError(
+ f"No checkpoints found for"
+ f" --iter {params.iter}, --avg {params.avg}"
+ )
+ elif len(filenames) < params.avg + 1:
+ raise ValueError(
+ f"Not enough checkpoints ({len(filenames)}) found for"
+ f" --iter {params.iter}, --avg {params.avg}"
+ )
+ filename_start = filenames[-1]
+ filename_end = filenames[0]
+ logging.info(
+ "Calculating the averaged model over iteration checkpoints"
+ f" from {filename_start} (excluded) to {filename_end}"
+ )
+ model.to(device)
+ model.load_state_dict(
+ average_checkpoints_with_averaged_model(
+ filename_start=filename_start,
+ filename_end=filename_end,
+ device=device,
+ )
+ )
+ else:
+ assert params.avg > 0, params.avg
+ start = params.epoch - params.avg
+ assert start >= 1, start
+ filename_start = f"{params.exp_dir}/epoch-{start}.pt"
+ filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt"
+ logging.info(
+ f"Calculating the averaged model over epoch range from "
+ f"{start} (excluded) to {params.epoch}"
+ )
+ model.to(device)
+ model.load_state_dict(
+ average_checkpoints_with_averaged_model(
+ filename_start=filename_start,
+ filename_end=filename_end,
+ device=device,
+ )
+ )
+
+ model.to(device)
+ model.eval()
+
+ if params.decoding_method == "fast_beam_search":
+ decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device)
+ else:
+ decoding_graph = None
+
+ num_param = sum([p.numel() for p in model.parameters()])
+ logging.info(f"Number of model parameters: {num_param}")
+
+ librispeech = LibriSpeechAsrDataModule(args)
+
+ test_clean_cuts = librispeech.test_clean_cuts()
+ test_other_cuts = librispeech.test_other_cuts()
+
+ test_clean_dl = librispeech.test_dataloaders(test_clean_cuts)
+ test_other_dl = librispeech.test_dataloaders(test_other_cuts)
+
+ test_sets = ["test-clean", "test-other"]
+ test_dl = [test_clean_dl, test_other_dl]
+
+ for test_set, test_dl in zip(test_sets, test_dl):
+ results_dict = decode_dataset(
+ dl=test_dl,
+ params=params,
+ model=model,
+ sp=sp,
+ decoding_graph=decoding_graph,
+ )
+
+ save_results(
+ params=params,
+ test_set_name=test_set,
+ results_dict=results_dict,
+ )
+
+ logging.info("Done!")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/decoder.py b/egs/librispeech/ASR/pruned_transducer_stateless6/decoder.py
new file mode 120000
index 000000000..0793c5709
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless6/decoder.py
@@ -0,0 +1 @@
+../pruned_transducer_stateless2/decoder.py
\ No newline at end of file
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/encoder_interface.py b/egs/librispeech/ASR/pruned_transducer_stateless6/encoder_interface.py
new file mode 120000
index 000000000..b9aa0ae08
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless6/encoder_interface.py
@@ -0,0 +1 @@
+../pruned_transducer_stateless2/encoder_interface.py
\ No newline at end of file
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/export.py b/egs/librispeech/ASR/pruned_transducer_stateless6/export.py
new file mode 100755
index 000000000..cff9c7377
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless6/export.py
@@ -0,0 +1,217 @@
+#!/usr/bin/env python3
+#
+# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script converts several saved checkpoints
+# to a single one using model averaging.
+"""
+Usage:
+./pruned_transducer_stateless2/export.py \
+ --exp-dir ./pruned_transducer_stateless2/exp \
+ --bpe-model data/lang_bpe_500/bpe.model \
+ --epoch 20 \
+ --avg 10
+
+It will generate a file exp_dir/pretrained.pt
+
+To use the generated file with `pruned_transducer_stateless2/decode.py`,
+you can do:
+
+ cd /path/to/exp_dir
+ ln -s pretrained.pt epoch-9999.pt
+
+ cd /path/to/egs/librispeech/ASR
+ ./pruned_transducer_stateless2/decode.py \
+ --exp-dir ./pruned_transducer_stateless2/exp \
+ --epoch 9999 \
+ --avg 1 \
+ --max-duration 100 \
+ --bpe-model data/lang_bpe_500/bpe.model
+"""
+
+import argparse
+import logging
+from pathlib import Path
+
+import sentencepiece as spm
+import torch
+from train import get_params, get_transducer_model
+
+from icefall.checkpoint import (
+ average_checkpoints,
+ find_checkpoints,
+ load_checkpoint,
+)
+from icefall.utils import str2bool
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+
+ parser.add_argument(
+ "--epoch",
+ type=int,
+ default=28,
+ help="""It specifies the checkpoint to use for averaging.
+ Note: Epoch counts from 0.
+ You can specify --avg to use more checkpoints for model averaging.""",
+ )
+
+ parser.add_argument(
+ "--iter",
+ type=int,
+ default=0,
+ help="""If positive, --epoch is ignored and it
+ will use the checkpoint exp_dir/checkpoint-iter.pt.
+ You can specify --avg to use more checkpoints for model averaging.
+ """,
+ )
+
+ parser.add_argument(
+ "--avg",
+ type=int,
+ default=15,
+ help="Number of checkpoints to average. Automatically select "
+ "consecutive checkpoints before the checkpoint specified by "
+ "'--epoch' and '--iter'",
+ )
+
+ parser.add_argument(
+ "--exp-dir",
+ type=str,
+ default="pruned_transducer_stateless2/exp",
+ help="""It specifies the directory where all training related
+ files, e.g., checkpoints, log, etc, are saved
+ """,
+ )
+
+ parser.add_argument(
+ "--bpe-model",
+ type=str,
+ default="data/lang_bpe_500/bpe.model",
+ help="Path to the BPE model",
+ )
+
+ parser.add_argument(
+ "--jit",
+ type=str2bool,
+ default=False,
+ help="""True to save a model after applying torch.jit.script.
+ """,
+ )
+
+ parser.add_argument(
+ "--context-size",
+ type=int,
+ default=2,
+ help="The context size in the decoder. 1 means bigram; "
+ "2 means tri-gram",
+ )
+
+ return parser
+
+
+def main():
+ args = get_parser().parse_args()
+ args.exp_dir = Path(args.exp_dir)
+
+ params = get_params()
+ params.update(vars(args))
+
+ device = torch.device("cpu")
+ if torch.cuda.is_available():
+ device = torch.device("cuda", 0)
+
+ logging.info(f"device: {device}")
+
+ sp = spm.SentencePieceProcessor()
+ sp.load(params.bpe_model)
+
+ # is defined in local/train_bpe_model.py
+ params.blank_id = sp.piece_to_id("")
+ params.vocab_size = sp.get_piece_size()
+
+ logging.info(params)
+
+ logging.info("About to create model")
+ model = get_transducer_model(params)
+
+ model.to(device)
+
+ if params.iter > 0:
+ filenames = find_checkpoints(params.exp_dir, iteration=-params.iter)[
+ : params.avg
+ ]
+ if len(filenames) == 0:
+ raise ValueError(
+ f"No checkpoints found for"
+ f" --iter {params.iter}, --avg {params.avg}"
+ )
+ elif len(filenames) < params.avg:
+ raise ValueError(
+ f"Not enough checkpoints ({len(filenames)}) found for"
+ f" --iter {params.iter}, --avg {params.avg}"
+ )
+ logging.info(f"averaging {filenames}")
+ model.to(device)
+ model.load_state_dict(average_checkpoints(filenames, device=device))
+ elif params.avg == 1:
+ load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model)
+ else:
+ start = params.epoch - params.avg + 1
+ filenames = []
+ for i in range(start, params.epoch + 1):
+ if start >= 0:
+ filenames.append(f"{params.exp_dir}/epoch-{i}.pt")
+ logging.info(f"averaging {filenames}")
+ model.to(device)
+ model.load_state_dict(average_checkpoints(filenames, device=device))
+
+ model.eval()
+
+ model.to("cpu")
+ model.eval()
+
+ if params.jit:
+ # We won't use the forward() method of the model in C++, so just ignore
+ # it here.
+ # Otherwise, one of its arguments is a ragged tensor and is not
+ # torch scriptabe.
+ model.__class__.forward = torch.jit.ignore(model.__class__.forward)
+ logging.info("Using torch.jit.script")
+ model = torch.jit.script(model)
+ filename = params.exp_dir / "cpu_jit.pt"
+ model.save(str(filename))
+ logging.info(f"Saved to {filename}")
+ else:
+ logging.info("Not using torch.jit.script")
+ # Save it using a format so that it can be loaded
+ # by :func:`load_checkpoint`
+ filename = params.exp_dir / "pretrained.pt"
+ torch.save({"model": model.state_dict()}, str(filename))
+ logging.info(f"Saved to {filename}")
+
+
+if __name__ == "__main__":
+ formatter = (
+ "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
+ )
+
+ logging.basicConfig(format=formatter, level=logging.INFO)
+ main()
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/extract_codebook_index.py b/egs/librispeech/ASR/pruned_transducer_stateless6/extract_codebook_index.py
new file mode 100755
index 000000000..c5c172ff2
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless6/extract_codebook_index.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python3
+# Copyright 2022 Xiaomi Corporation (Author: Liyong Guo)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import argparse
+import os
+from pathlib import Path
+
+import torch
+from vq_utils import CodebookIndexExtractor
+from asr_datamodule import LibriSpeechAsrDataModule
+from hubert_xlarge import HubertXlargeFineTuned
+from icefall.utils import AttributeDict
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+ parser.add_argument(
+ "--exp-dir",
+ type=Path,
+ default="pruned_transducer_stateless6/exp/",
+ help="The experiment dir",
+ )
+
+ return parser
+
+
+def get_world_size():
+ warn_message = (
+ "It's better to use GPU to extrac codebook indices"
+ "Please set with commonds like: export CUDA_VISIBLE_DEVICES=0,1,2,3"
+ )
+ assert (
+ torch.cuda.is_available() and "CUDA_VISIBLE_DEVICES" in os.environ
+ ), warn_message
+ world_size = len(os.environ["CUDA_VISIBLE_DEVICES"].split(","))
+ assert world_size > 0, warn_message
+ return world_size
+
+
+def main():
+ world_size = get_world_size()
+ parser = get_parser()
+ LibriSpeechAsrDataModule.add_arguments(parser)
+ HubertXlargeFineTuned.add_arguments(parser)
+ CodebookIndexExtractor.add_arguments(parser)
+
+ args = parser.parse_args()
+ params = AttributeDict()
+ params.update(vars(args))
+
+ # reset some parameters needed by hubert.
+ params.update(HubertXlargeFineTuned.get_params())
+ params.device = torch.device("cuda", 0)
+ params.world_size = world_size
+
+ extractor = CodebookIndexExtractor(params=params)
+ extractor.extract_and_save_embedding()
+ extractor.train_quantizer()
+ extractor.extract_codebook_indexes()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/hubert_decode.py b/egs/librispeech/ASR/pruned_transducer_stateless6/hubert_decode.py
new file mode 100755
index 000000000..10b0e5edc
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless6/hubert_decode.py
@@ -0,0 +1,205 @@
+#!/usr/bin/env python3
+# Copyright 2022 Xiaomi Corporation (Author: Liyong Guo)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import argparse
+import logging
+from collections import defaultdict
+from pathlib import Path
+from typing import Dict, List, Tuple
+
+import torch
+
+from asr_datamodule import LibriSpeechAsrDataModule
+from hubert_xlarge import HubertXlargeFineTuned
+
+from icefall.utils import (
+ AttributeDict,
+ setup_logger,
+ store_transcripts,
+ write_error_stats,
+)
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+
+ parser.add_argument(
+ "--exp-dir",
+ type=Path,
+ default="pruned_transducer_stateless6/exp/",
+ help="The experiment dir",
+ )
+
+ return parser
+
+
+def decode_dataset(
+ dl: torch.utils.data.DataLoader,
+ hubert_model: HubertXlargeFineTuned,
+ params: AttributeDict,
+) -> Dict[str, List[Tuple[List[str], List[str]]]]:
+ """Decode dataset.
+
+ Args:
+ dl:
+ PyTorch's dataloader containing the dataset to decode.
+ model:
+ The neural model.
+
+ Returns:
+ Return a dict, whose key is decoding method "ctc_greedy_search".
+ Its value is a list of tuples.
+ Each tuple contains two elements:
+ The first is the reference transcript, and the second is the
+ predicted result.
+ """
+ results = []
+
+ num_cuts = 0
+
+ try:
+ num_batches = len(dl)
+ except TypeError:
+ num_batches = "?"
+
+ results = defaultdict(list)
+ for batch_idx, batch in enumerate(dl):
+
+ hyps = hubert_model.ctc_greedy_search(batch)
+
+ texts = batch["supervisions"]["text"]
+ assert len(hyps) == len(texts)
+ this_batch = []
+
+ for hyp_text, ref_text in zip(hyps, texts):
+ ref_words = ref_text.split()
+ hyp_words = hyp_text.split()
+ this_batch.append((ref_words, hyp_words))
+
+ results["ctc_greedy_search"].extend(this_batch)
+
+ num_cuts += len(texts)
+
+ if batch_idx % 20 == 0:
+ batch_str = f"{batch_idx}/{num_batches}"
+
+ logging.info(
+ f"batch {batch_str}, cuts processed until now is {num_cuts}"
+ )
+ return results
+
+
+def save_results(
+ params: AttributeDict,
+ test_set_name: str,
+ results_dict: Dict[str, List[Tuple[List[int], List[int]]]],
+):
+ test_set_wers = dict()
+ for key, results in results_dict.items():
+ recog_path = params.res_dir / f"recogs-{test_set_name}-{key}.txt"
+ store_transcripts(filename=recog_path, texts=results)
+
+ # The following prints out WERs, per-word error statistics and aligned
+ # ref/hyp pairs.
+ errs_filename = params.res_dir / f"errs-{test_set_name}-{key}.txt"
+ with open(errs_filename, "w") as f:
+ wer = write_error_stats(
+ f, f"{test_set_name}-{key}", results, enable_log=True
+ )
+ test_set_wers[key] = wer
+
+ logging.info(
+ "Wrote detailed error stats to {}".format(errs_filename)
+ )
+
+ test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1])
+ errs_info = params.res_dir / f"wer-summary-{test_set_name}.txt"
+ with open(errs_info, "w") as f:
+ print("settings\tWER", file=f)
+ for key, val in test_set_wers:
+ print("{}\t{}".format(key, val), file=f)
+
+ s = "\nFor {}, WER of different settings are:\n".format(test_set_name)
+ note = "\tbest for {}".format(test_set_name)
+ for key, val in test_set_wers:
+ s += "{}\t{}{}\n".format(key, val, note)
+ note = ""
+ logging.info(s)
+
+
+@torch.no_grad()
+def main():
+ parser = get_parser()
+ LibriSpeechAsrDataModule.add_arguments(parser)
+ HubertXlargeFineTuned.add_arguments(parser)
+ args = parser.parse_args()
+
+ params = AttributeDict()
+ params.update(vars(args))
+ # reset some parameters needed by hubert.
+ params.update(HubertXlargeFineTuned.get_params())
+
+ params.res_dir = (
+ params.exp_dir / f"ctc_greedy_search-{params.teacher_model_id}"
+ )
+
+ setup_logger(f"{params.res_dir}/log/log-ctc_greedy_search")
+ logging.info("Decoding started")
+ logging.info(params)
+
+ device = torch.device("cpu")
+ if torch.cuda.is_available():
+ device = torch.device("cuda", 0)
+
+ logging.info(f"device: {device}")
+ params.device = device
+
+ hubert_model = HubertXlargeFineTuned(params)
+
+ librispeech = LibriSpeechAsrDataModule(params)
+
+ test_clean_cuts = librispeech.test_clean_cuts()
+ test_other_cuts = librispeech.test_other_cuts()
+
+ test_clean_dl = librispeech.test_dataloaders(test_clean_cuts)
+ test_other_dl = librispeech.test_dataloaders(test_other_cuts)
+
+ test_sets = ["test-clean", "test-other"]
+ test_dl = [test_clean_dl, test_other_dl]
+
+ for test_set, test_dl in zip(test_sets, test_dl):
+ results_dict = decode_dataset(
+ dl=test_dl,
+ hubert_model=hubert_model,
+ params=params,
+ )
+
+ save_results(
+ params=params, test_set_name=test_set, results_dict=results_dict
+ )
+
+ logging.info("Done!")
+
+
+torch.set_num_threads(1)
+torch.set_num_interop_threads(1)
+
+if __name__ == "__main__":
+ main()
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/hubert_xlarge.py b/egs/librispeech/ASR/pruned_transducer_stateless6/hubert_xlarge.py
new file mode 100644
index 000000000..55ce7b00d
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless6/hubert_xlarge.py
@@ -0,0 +1,220 @@
+#!/usr/bin/env python3
+# Copyright 2022 Xiaomi Corporation (Author: Liyong Guo)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import argparse
+import logging
+from pathlib import Path
+from typing import Dict, List, Tuple
+
+import torch
+from fairseq import (
+ checkpoint_utils,
+ tasks,
+ utils,
+)
+from fairseq.data.data_utils import post_process
+from omegaconf import OmegaConf
+
+from icefall.utils import AttributeDict
+
+
+def _load_hubert_model(params: AttributeDict):
+ """
+ Load the hubert model.
+
+ The model loaded is specified by params.hubert_model_dir
+ and params.teacher_model_id.
+
+ Returned model carries hubert,
+ while processor is responsible to map model's output to human readable transcripts.
+ """
+ cfg_task = OmegaConf.create(
+ {
+ "_name": "hubert_pretraining",
+ "single_target": True,
+ "fine_tuning": True,
+ "data": str(params.hubert_model_dir),
+ }
+ )
+ model_path = Path(params.hubert_model_dir) / (
+ params.teacher_model_id + ".pt"
+ )
+ task = tasks.setup_task(cfg_task)
+ processor = task.target_dictionary
+ models, saved_cfg = checkpoint_utils.load_model_ensemble(
+ utils.split_paths(str(model_path), separator="\\"),
+ arg_overrides={},
+ strict=True,
+ suffix="",
+ num_shards=1,
+ )
+ model = models[0]
+ model.to(params.device)
+ model.eval()
+
+ num_param = sum([p.numel() for p in model.parameters()])
+ logging.info(f"Number of model parameters: {num_param}")
+
+ return model, processor
+
+
+class HubertXlargeFineTuned:
+ """
+ A wrapper of hubert extra large fine-tuned model.
+
+ A teacher model is responsible for:
+ 1. load teacher model
+ 2. extracting embeddings to train quantizer.
+ 3. extract codebook indices
+ 4. verify its performance with ctc_greedy_search method.
+ """
+
+ def __init__(self, params: AttributeDict):
+ self.model, self.processor = _load_hubert_model(params)
+ self.w2v_model = self.model.w2v_encoder.w2v_model
+ self.params = params
+
+ @staticmethod
+ def get_params() -> AttributeDict:
+ """Return a dict containing parameters defined in other modules.
+
+ Their default value conflits to hubert's requirements so they are reset as following.
+ """
+ params = AttributeDict(
+ {
+ # parameters defined in asr_datamodule.py
+ "input_strategy": "AudioSamples",
+ "enable_musan": False,
+ "enable_spec_aug": False,
+ "return_cuts": True,
+ "drop_last": False,
+ # parameters used by quantizer
+ "embedding_dim": 1280,
+ }
+ )
+ return params
+
+ @classmethod
+ def add_arguments(cls, parser: argparse.ArgumentParser):
+ # Options about model loading.
+ parser.add_argument(
+ "--hubert-model-dir",
+ type=Path,
+ default="./pruned_transducer_stateless6/exp/hubert_models/",
+ help="path to save downloaded hubert models.",
+ )
+
+ parser.add_argument(
+ "--teacher-model-id",
+ type=str,
+ default="hubert_xtralarge_ll60k_finetune_ls960",
+ help="""could be one of:
+ [
+ "hubert_xtralarge_ll60k_finetune_ls960", # fine-tuned model.
+ "hubert_xtralarge_ll60k.pt", # pretrained model without fintuing.
+ ]""",
+ )
+ parser.add_argument(
+ "--total-layers",
+ type=int,
+ default=48,
+ )
+
+ # Modified from HubertModel.forward to extract all middle layers output
+ def extract_layers_result(
+ self,
+ batch: Dict,
+ ) -> List[torch.Tensor]:
+ """
+ Extract activations from all layers.
+ """
+ features = batch["inputs"]
+
+ # corresponding task.normalize in fairseq
+ features = torch.nn.functional.layer_norm(features, features.shape)
+
+ supervisions = batch["supervisions"]
+ num_samples = supervisions["num_samples"]
+ B, T = features.shape
+ padding_mask = torch.arange(0, T).expand(B, T) > num_samples.reshape(
+ [-1, 1]
+ )
+
+ padding_mask = padding_mask.to(self.params.device)
+ features = features.to(self.params.device)
+
+ features = self.w2v_model.forward_features(features)
+
+ features = features.transpose(1, 2)
+ features = self.w2v_model.layer_norm(features)
+
+ padding_mask = self.w2v_model.forward_padding_mask(
+ features, padding_mask
+ )
+
+ if self.w2v_model.post_extract_proj is not None:
+ features = self.w2v_model.post_extract_proj(features)
+
+ _, layer_results = self.w2v_model.encoder(
+ features,
+ padding_mask=padding_mask,
+ )
+ return layer_results
+
+ def extract_embedding(self, batch) -> Tuple[torch.tensor, List[int]]:
+ """
+ Eextract embeddings specified by self.params.embedding_layer.
+
+ These embeddings could be used to train quantizer
+ or to extract codebook indexes.
+
+ The returned List[int] is valid length of each embedding.
+ We only want to store codebook indexes related to
+ these valid embeddings.
+ """
+ supervisions = batch["supervisions"]
+ cut_list = supervisions["cut"]
+ assert all(c.start == 0 for c in cut_list)
+ layer_results = self.extract_layers_result(batch)
+ embeddings = layer_results[self.params.embedding_layer - 1][0]
+ encoder_embedding = embeddings.transpose(0, 1) # N, T, C
+ N = encoder_embedding.shape[0]
+ assert len(cut_list) == N
+ # 320 is from: 16,000 / 50 = sample_rate / hbuert output frame rate
+ num_frames = (supervisions["num_samples"] // 320).tolist()
+ return encoder_embedding, num_frames
+
+ def ctc_greedy_search(self, batch):
+ """
+ Mainly used to verify hubert model is used correctly.
+ """
+ layer_results = self.extract_layers_result(batch=batch)
+ encoder_out = self.w2v_model.encoder.layer_norm(
+ layer_results[self.params.total_layers - 1][0]
+ )
+ encoder_out = self.model.w2v_encoder.proj(encoder_out.transpose(0, 1))
+
+ toks = encoder_out.argmax(dim=-1)
+ blank = 0
+ toks = [tok.unique_consecutive() for tok in toks]
+ hyps = [
+ self.processor.string(tok[tok != blank].int().cpu()) for tok in toks
+ ]
+ hyps = [post_process(hyp, "letter") for hyp in hyps]
+
+ return hyps
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/joiner.py b/egs/librispeech/ASR/pruned_transducer_stateless6/joiner.py
new file mode 120000
index 000000000..815fd4bb6
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless6/joiner.py
@@ -0,0 +1 @@
+../pruned_transducer_stateless2/joiner.py
\ No newline at end of file
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/model.py b/egs/librispeech/ASR/pruned_transducer_stateless6/model.py
new file mode 100644
index 000000000..66bb33e8d
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless6/model.py
@@ -0,0 +1,249 @@
+# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, Wei Kang)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import k2
+import torch
+import torch.nn as nn
+from encoder_interface import EncoderInterface
+from scaling import ScaledLinear
+
+from icefall.utils import add_sos
+
+from quantization.prediction import JointCodebookLoss
+
+
+class Transducer(nn.Module):
+ """It implements https://arxiv.org/pdf/1211.3711.pdf
+ "Sequence Transduction with Recurrent Neural Networks"
+ """
+
+ def __init__(
+ self,
+ encoder: EncoderInterface,
+ decoder: nn.Module,
+ joiner: nn.Module,
+ encoder_dim: int,
+ decoder_dim: int,
+ joiner_dim: int,
+ vocab_size: int,
+ num_codebooks: int = 0,
+ ):
+ """
+ Args:
+ encoder:
+ It is the transcription network in the paper. Its accepts
+ two inputs: `x` of (N, T, encoder_dim) and `x_lens` of shape (N,).
+ It returns two tensors: `logits` of shape (N, T, encoder_dm) and
+ `logit_lens` of shape (N,).
+ decoder:
+ It is the prediction network in the paper. Its input shape
+ is (N, U) and its output shape is (N, U, decoder_dim).
+ It should contain one attribute: `blank_id`.
+ joiner:
+ It has two inputs with shapes: (N, T, encoder_dim) and
+ (N, U, decoder_dim).
+ Its output shape is (N, T, U, vocab_size). Note that its output
+ contains unnormalized probs, i.e., not processed by log-softmax.
+ num_codebooks:
+ Used by distillation loss.
+ """
+ super().__init__()
+ assert isinstance(encoder, EncoderInterface), type(encoder)
+ assert hasattr(decoder, "blank_id")
+
+ self.encoder = encoder
+ self.decoder = decoder
+ self.joiner = joiner
+
+ self.simple_am_proj = ScaledLinear(
+ encoder_dim, vocab_size, initial_speed=0.5
+ )
+ self.simple_lm_proj = ScaledLinear(decoder_dim, vocab_size)
+ if num_codebooks > 0:
+ self.codebook_loss_net = JointCodebookLoss(
+ predictor_channels=encoder_dim, num_codebooks=num_codebooks
+ )
+
+ def forward(
+ self,
+ x: torch.Tensor,
+ x_lens: torch.Tensor,
+ y: k2.RaggedTensor,
+ prune_range: int = 5,
+ am_scale: float = 0.0,
+ lm_scale: float = 0.0,
+ warmup: float = 1.0,
+ codebook_indexes: torch.Tensor = None,
+ ) -> torch.Tensor:
+ """
+ Args:
+ x:
+ A 3-D tensor of shape (N, T, C).
+ x_lens:
+ A 1-D tensor of shape (N,). It contains the number of frames in `x`
+ before padding.
+ y:
+ A ragged tensor with 2 axes [utt][label]. It contains labels of each
+ utterance.
+ prune_range:
+ The prune range for rnnt loss, it means how many symbols(context)
+ we are considering for each frame to compute the loss.
+ am_scale:
+ The scale to smooth the loss with am (output of encoder network)
+ part
+ lm_scale:
+ The scale to smooth the loss with lm (output of predictor network)
+ part
+ warmup:
+ A value warmup >= 0 that determines which modules are active, values
+ warmup > 1 "are fully warmed up" and all modules will be active.
+ codebook_indexes:
+ codebook_indexes extracted from a teacher model.
+ Returns:
+ Return the transducer loss.
+
+ Note:
+ Regarding am_scale & lm_scale, it will make the loss-function one of
+ the form:
+ lm_scale * lm_probs + am_scale * am_probs +
+ (1-lm_scale-am_scale) * combined_probs
+ """
+ assert x.ndim == 3, x.shape
+ assert x_lens.ndim == 1, x_lens.shape
+ assert y.num_axes == 2, y.num_axes
+
+ assert x.size(0) == x_lens.size(0) == y.dim0
+
+ layer_results, x_lens = self.encoder(x, x_lens, warmup=warmup)
+ encoder_out = layer_results[-1]
+ middle_layer_output = layer_results[0]
+ if self.training and codebook_indexes is not None:
+ assert hasattr(self, "codebook_loss_net")
+ if codebook_indexes.shape[1] != middle_layer_output.shape[1]:
+ codebook_indexes = self.concat_successive_codebook_indexes(
+ middle_layer_output, codebook_indexes
+ )
+ codebook_loss = self.codebook_loss_net(
+ middle_layer_output, codebook_indexes
+ )
+ else:
+ # when codebook index is not available.
+ codebook_loss = None
+
+ assert torch.all(x_lens > 0)
+
+ # Now for the decoder, i.e., the prediction network
+ row_splits = y.shape.row_splits(1)
+ y_lens = row_splits[1:] - row_splits[:-1]
+
+ blank_id = self.decoder.blank_id
+ sos_y = add_sos(y, sos_id=blank_id)
+
+ # sos_y_padded: [B, S + 1], start with SOS.
+ sos_y_padded = sos_y.pad(mode="constant", padding_value=blank_id)
+
+ # decoder_out: [B, S + 1, decoder_dim]
+ decoder_out = self.decoder(sos_y_padded)
+
+ # Note: y does not start with SOS
+ # y_padded : [B, S]
+ y_padded = y.pad(mode="constant", padding_value=0)
+
+ y_padded = y_padded.to(torch.int64)
+ boundary = torch.zeros(
+ (x.size(0), 4), dtype=torch.int64, device=x.device
+ )
+ boundary[:, 2] = y_lens
+ boundary[:, 3] = x_lens
+
+ lm = self.simple_lm_proj(decoder_out)
+ am = self.simple_am_proj(encoder_out)
+
+ with torch.cuda.amp.autocast(enabled=False):
+ simple_loss, (px_grad, py_grad) = k2.rnnt_loss_smoothed(
+ lm=lm.float(),
+ am=am.float(),
+ symbols=y_padded,
+ termination_symbol=blank_id,
+ lm_only_scale=lm_scale,
+ am_only_scale=am_scale,
+ boundary=boundary,
+ reduction="sum",
+ return_grad=True,
+ )
+
+ # ranges : [B, T, prune_range]
+ ranges = k2.get_rnnt_prune_ranges(
+ px_grad=px_grad,
+ py_grad=py_grad,
+ boundary=boundary,
+ s_range=prune_range,
+ )
+
+ # am_pruned : [B, T, prune_range, encoder_dim]
+ # lm_pruned : [B, T, prune_range, decoder_dim]
+ am_pruned, lm_pruned = k2.do_rnnt_pruning(
+ am=self.joiner.encoder_proj(encoder_out),
+ lm=self.joiner.decoder_proj(decoder_out),
+ ranges=ranges,
+ )
+
+ # logits : [B, T, prune_range, vocab_size]
+
+ # project_input=False since we applied the decoder's input projections
+ # prior to do_rnnt_pruning (this is an optimization for speed).
+ logits = self.joiner(am_pruned, lm_pruned, project_input=False)
+
+ with torch.cuda.amp.autocast(enabled=False):
+ pruned_loss = k2.rnnt_loss_pruned(
+ logits=logits.float(),
+ symbols=y_padded,
+ ranges=ranges,
+ termination_symbol=blank_id,
+ boundary=boundary,
+ reduction="sum",
+ )
+
+ return (simple_loss, pruned_loss, codebook_loss)
+
+ @staticmethod
+ def concat_successive_codebook_indexes(
+ middle_layer_output, codebook_indexes
+ ):
+ # Output rate of hubert is 50 frames per second,
+ # while that of current encoder is 25.
+ # Following code handling two issues:
+ # 1.
+ # Roughly speaking, to generate another frame output,
+ # hubert needes extra two frames,
+ # while current encoder needs extra four frames.
+ # Suppose there are only extra three frames provided,
+ # hubert will generate another frame while current encoder does nothing.
+ # 2.
+ # codebook loss is a frame-wise loss, to enalbe 25 frames studnet output
+ # learns from 50 frames teacher output, two successive frames of teacher model
+ # output is concatenated together.
+ t_expected = middle_layer_output.shape[1]
+ N, T, C = codebook_indexes.shape
+
+ # Handling issue 1.
+ if T >= t_expected * 2:
+ codebook_indexes = codebook_indexes[:, : t_expected * 2, :]
+ # Handling issue 2.
+ codebook_indexes = codebook_indexes.reshape(N, t_expected, C * 2)
+ assert middle_layer_output.shape[1] == codebook_indexes.shape[1]
+ return codebook_indexes
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/optim.py b/egs/librispeech/ASR/pruned_transducer_stateless6/optim.py
new file mode 120000
index 000000000..e2deb4492
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless6/optim.py
@@ -0,0 +1 @@
+../pruned_transducer_stateless2/optim.py
\ No newline at end of file
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/scaling.py b/egs/librispeech/ASR/pruned_transducer_stateless6/scaling.py
new file mode 120000
index 000000000..09d802cc4
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless6/scaling.py
@@ -0,0 +1 @@
+../pruned_transducer_stateless2/scaling.py
\ No newline at end of file
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/test_model.py b/egs/librispeech/ASR/pruned_transducer_stateless6/test_model.py
new file mode 100755
index 000000000..9bd75ba21
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless6/test_model.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python3
+# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+To run this file, do:
+
+ cd icefall/egs/librispeech/ASR
+ python ./pruned_transducer_stateless6/test_model.py
+"""
+
+import torch
+from train import get_params, get_transducer_model
+
+
+def test_model():
+ params = get_params()
+ params.vocab_size = 500
+ params.blank_id = 0
+ params.context_size = 2
+ params.unk_id = 2
+ params.enable_distiallation = False
+
+ model = get_transducer_model(params)
+
+ num_param = sum([p.numel() for p in model.parameters()])
+ print(f"Number of model parameters: {num_param}")
+ model.__class__.forward = torch.jit.ignore(model.__class__.forward)
+ torch.jit.script(model)
+
+
+def main():
+ test_model()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/train.py b/egs/librispeech/ASR/pruned_transducer_stateless6/train.py
new file mode 100755
index 000000000..315c01c8e
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless6/train.py
@@ -0,0 +1,1106 @@
+#!/usr/bin/env python3
+# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang,
+# Wei Kang,
+# Mingshuang Luo,)
+# Zengwei Yao)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Usage:
+
+export CUDA_VISIBLE_DEVICES="0,1,2,3"
+
+./pruned_transducer_stateless6/train.py \
+ --world-size 4 \
+ --num-epochs 30 \
+ --start-epoch 1 \
+ --exp-dir pruned_transducer_stateless6/exp \
+ --full-libri 1 \
+ --max-duration 300
+
+# For mix precision training:
+
+./pruned_transducer_stateless6/train.py \
+ --world-size 4 \
+ --num-epochs 30 \
+ --start-epoch 1 \
+ --use-fp16 1 \
+ --exp-dir pruned_transducer_stateless6/exp \
+ --full-libri 1 \
+ --max-duration 550
+
+# For distiallation with codebook_indexes:
+
+./pruned_transducer_stateless6/train.py \
+ --manifest-dir ./data/vq_fbank \
+ --world-size 4 \
+ --num-epochs 30 \
+ --start-epoch 1 \
+ --exp-dir pruned_transducer_stateless6/exp \
+ --full-libri 0 \
+ --max-duration 300
+
+"""
+
+
+import argparse
+import copy
+import logging
+import warnings
+from pathlib import Path
+from shutil import copyfile
+from typing import Any, Dict, Optional, Tuple, Union
+
+import k2
+import optim
+import sentencepiece as spm
+import torch
+import torch.multiprocessing as mp
+import torch.nn as nn
+from asr_datamodule import LibriSpeechAsrDataModule
+from conformer import Conformer
+from decoder import Decoder
+from joiner import Joiner
+from lhotse.cut import Cut, MonoCut
+from lhotse.dataset.sampling.base import CutSampler
+from lhotse.utils import fix_random_seed
+from lhotse.dataset.collation import collate_custom_field
+from model import Transducer
+from optim import Eden, Eve
+from torch import Tensor
+from torch.cuda.amp import GradScaler
+from torch.nn.parallel import DistributedDataParallel as DDP
+from torch.utils.tensorboard import SummaryWriter
+
+from icefall import diagnostics
+from icefall.checkpoint import load_checkpoint, remove_checkpoints
+from icefall.checkpoint import save_checkpoint as save_checkpoint_impl
+from icefall.checkpoint import (
+ save_checkpoint_with_global_batch_idx,
+ update_averaged_model,
+)
+from icefall.dist import cleanup_dist, setup_dist
+from icefall.env import get_env_info
+from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool
+
+LRSchedulerType = Union[
+ torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler
+]
+
+
+def get_parser():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+
+ parser.add_argument(
+ "--world-size",
+ type=int,
+ default=1,
+ help="Number of GPUs for DDP training.",
+ )
+
+ parser.add_argument(
+ "--master-port",
+ type=int,
+ default=12354,
+ help="Master port to use for DDP training.",
+ )
+
+ parser.add_argument(
+ "--tensorboard",
+ type=str2bool,
+ default=True,
+ help="Should various information be logged in tensorboard.",
+ )
+
+ parser.add_argument(
+ "--num-epochs",
+ type=int,
+ default=30,
+ help="Number of epochs to train.",
+ )
+
+ parser.add_argument(
+ "--start-epoch",
+ type=int,
+ default=1,
+ help="""Resume training from this epoch. It should be positive.
+ If larger than 1, it will load checkpoint from
+ exp-dir/epoch-{start_epoch-1}.pt
+ """,
+ )
+
+ parser.add_argument(
+ "--start-batch",
+ type=int,
+ default=0,
+ help="""If positive, --start-epoch is ignored and
+ it loads the checkpoint from exp-dir/checkpoint-{start_batch}.pt
+ """,
+ )
+
+ parser.add_argument(
+ "--exp-dir",
+ type=str,
+ default="pruned_transducer_stateless6/exp",
+ help="""The experiment dir.
+ It specifies the directory where all training related
+ files, e.g., checkpoints, log, etc, are saved
+ """,
+ )
+
+ parser.add_argument(
+ "--bpe-model",
+ type=str,
+ default="data/lang_bpe_500/bpe.model",
+ help="Path to the BPE model",
+ )
+
+ parser.add_argument(
+ "--initial-lr",
+ type=float,
+ default=0.003,
+ help="""The initial learning rate. This value should not need to be
+ changed.""",
+ )
+
+ parser.add_argument(
+ "--lr-batches",
+ type=float,
+ default=5000,
+ help="""Number of steps that affects how rapidly the learning rate decreases.
+ We suggest not to change this.""",
+ )
+
+ parser.add_argument(
+ "--lr-epochs",
+ type=float,
+ default=6,
+ help="""Number of epochs that affects how rapidly the learning rate decreases.
+ """,
+ )
+
+ parser.add_argument(
+ "--context-size",
+ type=int,
+ default=2,
+ help="The context size in the decoder. 1 means bigram; "
+ "2 means tri-gram",
+ )
+
+ parser.add_argument(
+ "--prune-range",
+ type=int,
+ default=5,
+ help="The prune range for rnnt loss, it means how many symbols(context)"
+ "we are using to compute the loss",
+ )
+
+ parser.add_argument(
+ "--lm-scale",
+ type=float,
+ default=0.25,
+ help="The scale to smooth the loss with lm "
+ "(output of prediction network) part.",
+ )
+
+ parser.add_argument(
+ "--am-scale",
+ type=float,
+ default=0.0,
+ help="The scale to smooth the loss with am (output of encoder network)"
+ "part.",
+ )
+
+ parser.add_argument(
+ "--simple-loss-scale",
+ type=float,
+ default=0.5,
+ help="To get pruning ranges, we will calculate a simple version"
+ "loss(joiner is just addition), this simple loss also uses for"
+ "training (as a regularization item). We will scale the simple loss"
+ "with this parameter before adding to the final loss.",
+ )
+
+ parser.add_argument(
+ "--codebook-loss-scale",
+ type=float,
+ default=0.1,
+ help="The scale of codebook loss.",
+ )
+
+ parser.add_argument(
+ "--seed",
+ type=int,
+ default=42,
+ help="The seed for random generators intended for reproducibility",
+ )
+
+ parser.add_argument(
+ "--print-diagnostics",
+ type=str2bool,
+ default=False,
+ help="Accumulate stats on activations, print them and exit.",
+ )
+
+ parser.add_argument(
+ "--save-every-n",
+ type=int,
+ default=8000,
+ help="""Save checkpoint after processing this number of batches"
+ periodically. We save checkpoint to exp-dir/ whenever
+ params.batch_idx_train % save_every_n == 0. The checkpoint filename
+ has the form: f'exp-dir/checkpoint-{params.batch_idx_train}.pt'
+ Note: It also saves checkpoint to `exp-dir/epoch-xxx.pt` at the
+ end of each epoch where `xxx` is the epoch number counting from 0.
+ """,
+ )
+
+ parser.add_argument(
+ "--keep-last-k",
+ type=int,
+ default=20,
+ help="""Only keep this number of checkpoints on disk.
+ For instance, if it is 3, there are only 3 checkpoints
+ in the exp-dir with filenames `checkpoint-xxx.pt`.
+ It does not affect checkpoints with name `epoch-xxx.pt`.
+ """,
+ )
+
+ parser.add_argument(
+ "--average-period",
+ type=int,
+ default=100,
+ help="""Update the averaged model, namely `model_avg`, after processing
+ this number of batches. `model_avg` is a separate version of model,
+ in which each floating-point parameter is the average of all the
+ parameters from the start of training. Each time we take the average,
+ we do: `model_avg = model * (average_period / batch_idx_train) +
+ model_avg * ((batch_idx_train - average_period) / batch_idx_train)`.
+ """,
+ )
+
+ parser.add_argument(
+ "--use-fp16",
+ type=str2bool,
+ default=False,
+ help="Whether to use half precision training.",
+ )
+
+ return parser
+
+
+def get_params() -> AttributeDict:
+ """Return a dict containing training parameters.
+
+ All training related parameters that are not passed from the commandline
+ are saved in the variable `params`.
+
+ Commandline options are merged into `params` after they are parsed, so
+ you can also access them via `params`.
+
+ Explanation of options saved in `params`:
+
+ - best_train_loss: Best training loss so far. It is used to select
+ the model that has the lowest training loss. It is
+ updated during the training.
+
+ - best_valid_loss: Best validation loss so far. It is used to select
+ the model that has the lowest validation loss. It is
+ updated during the training.
+
+ - best_train_epoch: It is the epoch that has the best training loss.
+
+ - best_valid_epoch: It is the epoch that has the best validation loss.
+
+ - batch_idx_train: Used to writing statistics to tensorboard. It
+ contains number of batches trained so far across
+ epochs.
+
+ - log_interval: Print training loss if batch_idx % log_interval` is 0
+
+ - reset_interval: Reset statistics if batch_idx % reset_interval is 0
+
+ - valid_interval: Run validation if batch_idx % valid_interval is 0
+
+ - feature_dim: The model input dim. It has to match the one used
+ in computing features.
+
+ - subsampling_factor: The subsampling factor for the model.
+
+ - encoder_dim: Hidden dim for multi-head attention model.
+
+ - num_decoder_layers: Number of decoder layer of transformer decoder.
+
+ - warm_step: The warm_step for Noam optimizer.
+ """
+ params = AttributeDict(
+ {
+ "best_train_loss": float("inf"),
+ "best_valid_loss": float("inf"),
+ "best_train_epoch": -1,
+ "best_valid_epoch": -1,
+ "batch_idx_train": 0,
+ "log_interval": 50,
+ "reset_interval": 200,
+ "valid_interval": 3000, # For the 100h subset, use 800
+ # parameters for conformer
+ "feature_dim": 80,
+ "subsampling_factor": 4,
+ "encoder_dim": 512,
+ "nhead": 8,
+ "dim_feedforward": 2048,
+ "num_encoder_layers": 12,
+ # parameters for decoder
+ "decoder_dim": 512,
+ # parameters for joiner
+ "joiner_dim": 512,
+ # parameters for Noam
+ "model_warm_step": 3000, # arg given to model, not for lrate
+ "env_info": get_env_info(),
+ # parameters for distillation with codebook indexes.
+ "enable_distiallation": True,
+ "distillation_layer": 5, # 0-based index
+ # Since output rate of hubert is 50, while that of encoder is 8,
+ # two successive codebook_index are concatenated together.
+ # Detailed in function Transducer::concat_sucessive_codebook_indexes.
+ "num_codebooks": 16, # used to construct distillation loss
+ }
+ )
+
+ return params
+
+
+def get_encoder_model(params: AttributeDict) -> nn.Module:
+ # TODO: We can add an option to switch between Conformer and Transformer
+ encoder = Conformer(
+ num_features=params.feature_dim,
+ subsampling_factor=params.subsampling_factor,
+ d_model=params.encoder_dim,
+ nhead=params.nhead,
+ dim_feedforward=params.dim_feedforward,
+ num_encoder_layers=params.num_encoder_layers,
+ middle_output_layer=params.distillation_layer
+ if params.enable_distiallation
+ else None,
+ )
+ return encoder
+
+
+def get_decoder_model(params: AttributeDict) -> nn.Module:
+ decoder = Decoder(
+ vocab_size=params.vocab_size,
+ decoder_dim=params.decoder_dim,
+ blank_id=params.blank_id,
+ context_size=params.context_size,
+ )
+ return decoder
+
+
+def get_joiner_model(params: AttributeDict) -> nn.Module:
+ joiner = Joiner(
+ encoder_dim=params.encoder_dim,
+ decoder_dim=params.decoder_dim,
+ joiner_dim=params.joiner_dim,
+ vocab_size=params.vocab_size,
+ )
+ return joiner
+
+
+def get_transducer_model(params: AttributeDict) -> nn.Module:
+ encoder = get_encoder_model(params)
+ decoder = get_decoder_model(params)
+ joiner = get_joiner_model(params)
+
+ model = Transducer(
+ encoder=encoder,
+ decoder=decoder,
+ joiner=joiner,
+ encoder_dim=params.encoder_dim,
+ decoder_dim=params.decoder_dim,
+ joiner_dim=params.joiner_dim,
+ vocab_size=params.vocab_size,
+ num_codebooks=params.num_codebooks
+ if params.enable_distiallation
+ else 0,
+ )
+ return model
+
+
+def load_checkpoint_if_available(
+ params: AttributeDict,
+ model: nn.Module,
+ model_avg: nn.Module = None,
+ optimizer: Optional[torch.optim.Optimizer] = None,
+ scheduler: Optional[LRSchedulerType] = None,
+) -> Optional[Dict[str, Any]]:
+ """Load checkpoint from file.
+
+ If params.start_batch is positive, it will load the checkpoint from
+ `params.exp_dir/checkpoint-{params.start_batch}.pt`. Otherwise, if
+ params.start_epoch is larger than 1, it will load the checkpoint from
+ `params.start_epoch - 1`.
+
+ Apart from loading state dict for `model` and `optimizer` it also updates
+ `best_train_epoch`, `best_train_loss`, `best_valid_epoch`,
+ and `best_valid_loss` in `params`.
+
+ Args:
+ params:
+ The return value of :func:`get_params`.
+ model:
+ The training model.
+ model_avg:
+ The stored model averaged from the start of training.
+ optimizer:
+ The optimizer that we are using.
+ scheduler:
+ The scheduler that we are using.
+ Returns:
+ Return a dict containing previously saved training info.
+ """
+ if params.start_batch > 0:
+ filename = params.exp_dir / f"checkpoint-{params.start_batch}.pt"
+ elif params.start_epoch > 1:
+ filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt"
+ else:
+ return None
+
+ assert filename.is_file(), f"{filename} does not exist!"
+
+ saved_params = load_checkpoint(
+ filename,
+ model=model,
+ model_avg=model_avg,
+ optimizer=optimizer,
+ scheduler=scheduler,
+ )
+
+ keys = [
+ "best_train_epoch",
+ "best_valid_epoch",
+ "batch_idx_train",
+ "best_train_loss",
+ "best_valid_loss",
+ ]
+ for k in keys:
+ params[k] = saved_params[k]
+
+ if params.start_batch > 0:
+ if "cur_epoch" in saved_params:
+ params["start_epoch"] = saved_params["cur_epoch"]
+
+ if "cur_batch_idx" in saved_params:
+ params["cur_batch_idx"] = saved_params["cur_batch_idx"]
+
+ return saved_params
+
+
+def save_checkpoint(
+ params: AttributeDict,
+ model: Union[nn.Module, DDP],
+ model_avg: Optional[nn.Module] = None,
+ optimizer: Optional[torch.optim.Optimizer] = None,
+ scheduler: Optional[LRSchedulerType] = None,
+ sampler: Optional[CutSampler] = None,
+ scaler: Optional[GradScaler] = None,
+ rank: int = 0,
+) -> None:
+ """Save model, optimizer, scheduler and training stats to file.
+
+ Args:
+ params:
+ It is returned by :func:`get_params`.
+ model:
+ The training model.
+ model_avg:
+ The stored model averaged from the start of training.
+ optimizer:
+ The optimizer used in the training.
+ sampler:
+ The sampler for the training dataset.
+ scaler:
+ The scaler used for mix precision training.
+ """
+ if rank != 0:
+ return
+ filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt"
+ save_checkpoint_impl(
+ filename=filename,
+ model=model,
+ model_avg=model_avg,
+ params=params,
+ optimizer=optimizer,
+ scheduler=scheduler,
+ sampler=sampler,
+ scaler=scaler,
+ rank=rank,
+ )
+
+ if params.best_train_epoch == params.cur_epoch:
+ best_train_filename = params.exp_dir / "best-train-loss.pt"
+ copyfile(src=filename, dst=best_train_filename)
+
+ if params.best_valid_epoch == params.cur_epoch:
+ best_valid_filename = params.exp_dir / "best-valid-loss.pt"
+ copyfile(src=filename, dst=best_valid_filename)
+
+
+def extract_codebook_indexes(batch):
+ cuts = batch["supervisions"]["cut"]
+ # -100 is identical to ignore_value in CE loss computation.
+ cuts_pre_mixed = [
+ c if isinstance(c, MonoCut) else c.tracks[0].cut for c in cuts
+ ]
+ codebook_indexes, codebook_indexes_lens = collate_custom_field(
+ cuts_pre_mixed, "codebook_indexes", pad_value=-100
+ )
+ return codebook_indexes, codebook_indexes_lens
+
+
+def compute_loss(
+ params: AttributeDict,
+ model: Union[nn.Module, DDP],
+ sp: spm.SentencePieceProcessor,
+ batch: dict,
+ is_training: bool,
+ warmup: float = 1.0,
+) -> Tuple[Tensor, MetricsTracker]:
+ """
+ Compute RNN-T loss given the model and its inputs.
+
+ Args:
+ params:
+ Parameters for training. See :func:`get_params`.
+ model:
+ The model for training. It is an instance of Conformer in our case.
+ batch:
+ A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()`
+ for the content in it.
+ is_training:
+ True for training. False for validation. When it is True, this
+ function enables autograd during computation; when it is False, it
+ disables autograd.
+ warmup: a floating point value which increases throughout training;
+ values >= 1.0 are fully warmed up and have all modules present.
+ """
+ device = (
+ model.device
+ if isinstance(model, DDP)
+ else next(model.parameters()).device
+ )
+ feature = batch["inputs"]
+ # at entry, feature is (N, T, C)
+ assert feature.ndim == 3
+ feature = feature.to(device)
+
+ supervisions = batch["supervisions"]
+ feature_lens = supervisions["num_frames"].to(device)
+
+ texts = batch["supervisions"]["text"]
+ y = sp.encode(texts, out_type=int)
+ y = k2.RaggedTensor(y).to(device)
+
+ info = MetricsTracker()
+ if is_training and params.enable_distiallation:
+ codebook_indexes, _ = extract_codebook_indexes(batch)
+ codebook_indexes = codebook_indexes.to(device)
+ else:
+ codebook_indexes = None
+
+ with torch.set_grad_enabled(is_training):
+ simple_loss, pruned_loss, codebook_loss = model(
+ x=feature,
+ x_lens=feature_lens,
+ y=y,
+ prune_range=params.prune_range,
+ am_scale=params.am_scale,
+ lm_scale=params.lm_scale,
+ warmup=warmup,
+ codebook_indexes=codebook_indexes,
+ )
+ # after the main warmup step, we keep pruned_loss_scale small
+ # for the same amount of time (model_warm_step), to avoid
+ # overwhelming the simple_loss and causing it to diverge,
+ # in case it had not fully learned the alignment yet.
+ pruned_loss_scale = (
+ 0.0
+ if warmup < 1.0
+ else (0.1 if warmup > 1.0 and warmup < 2.0 else 1.0)
+ )
+ loss = (
+ params.simple_loss_scale * simple_loss
+ + pruned_loss_scale * pruned_loss
+ )
+ if is_training and params.enable_distiallation:
+ assert codebook_loss is not None
+ loss += params.codebook_loss_scale * codebook_loss
+
+ assert loss.requires_grad == is_training
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ info["frames"] = (
+ (feature_lens // params.subsampling_factor).sum().item()
+ )
+
+ # Note: We use reduction=sum while computing the loss.
+ info["loss"] = loss.detach().cpu().item()
+ info["simple_loss"] = simple_loss.detach().cpu().item()
+ info["pruned_loss"] = pruned_loss.detach().cpu().item()
+ if is_training and params.enable_distiallation:
+ info["codebook_loss"] = codebook_loss.detach().cpu().item()
+
+ return loss, info
+
+
+def compute_validation_loss(
+ params: AttributeDict,
+ model: Union[nn.Module, DDP],
+ sp: spm.SentencePieceProcessor,
+ valid_dl: torch.utils.data.DataLoader,
+ world_size: int = 1,
+) -> MetricsTracker:
+ """Run the validation process."""
+ model.eval()
+
+ tot_loss = MetricsTracker()
+
+ for batch_idx, batch in enumerate(valid_dl):
+ loss, loss_info = compute_loss(
+ params=params,
+ model=model,
+ sp=sp,
+ batch=batch,
+ is_training=False,
+ )
+ assert loss.requires_grad is False
+ tot_loss = tot_loss + loss_info
+
+ if world_size > 1:
+ tot_loss.reduce(loss.device)
+
+ loss_value = tot_loss["loss"] / tot_loss["frames"]
+ if loss_value < params.best_valid_loss:
+ params.best_valid_epoch = params.cur_epoch
+ params.best_valid_loss = loss_value
+
+ return tot_loss
+
+
+def train_one_epoch(
+ params: AttributeDict,
+ model: Union[nn.Module, DDP],
+ optimizer: torch.optim.Optimizer,
+ scheduler: LRSchedulerType,
+ sp: spm.SentencePieceProcessor,
+ train_dl: torch.utils.data.DataLoader,
+ valid_dl: torch.utils.data.DataLoader,
+ scaler: GradScaler,
+ model_avg: Optional[nn.Module] = None,
+ tb_writer: Optional[SummaryWriter] = None,
+ world_size: int = 1,
+ rank: int = 0,
+) -> None:
+ """Train the model for one epoch.
+
+ The training loss from the mean of all frames is saved in
+ `params.train_loss`. It runs the validation process every
+ `params.valid_interval` batches.
+
+ Args:
+ params:
+ It is returned by :func:`get_params`.
+ model:
+ The model for training.
+ optimizer:
+ The optimizer we are using.
+ scheduler:
+ The learning rate scheduler, we call step() every step.
+ train_dl:
+ Dataloader for the training dataset.
+ valid_dl:
+ Dataloader for the validation dataset.
+ scaler:
+ The scaler used for mix precision training.
+ model_avg:
+ The stored model averaged from the start of training.
+ tb_writer:
+ Writer to write log messages to tensorboard.
+ world_size:
+ Number of nodes in DDP training. If it is 1, DDP is disabled.
+ rank:
+ The rank of the node in DDP training. If no DDP is used, it should
+ be set to 0.
+ """
+ model.train()
+
+ tot_loss = MetricsTracker()
+
+ cur_batch_idx = params.get("cur_batch_idx", 0)
+
+ for batch_idx, batch in enumerate(train_dl):
+ if batch_idx < cur_batch_idx:
+ continue
+ cur_batch_idx = batch_idx
+
+ params.batch_idx_train += 1
+ batch_size = len(batch["supervisions"]["text"])
+
+ with torch.cuda.amp.autocast(enabled=params.use_fp16):
+ loss, loss_info = compute_loss(
+ params=params,
+ model=model,
+ sp=sp,
+ batch=batch,
+ is_training=True,
+ warmup=(params.batch_idx_train / params.model_warm_step),
+ )
+ # summary stats
+ tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info
+
+ # NOTE: We use reduction==sum and loss is computed over utterances
+ # in the batch and there is no normalization to it so far.
+ scaler.scale(loss).backward()
+ scheduler.step_batch(params.batch_idx_train)
+ scaler.step(optimizer)
+ scaler.update()
+ optimizer.zero_grad()
+
+ if params.print_diagnostics and batch_idx == 30:
+ return
+
+ if (
+ rank == 0
+ and params.batch_idx_train > 0
+ and params.batch_idx_train % params.average_period == 0
+ ):
+ update_averaged_model(
+ params=params,
+ model_cur=model,
+ model_avg=model_avg,
+ )
+
+ if (
+ params.batch_idx_train > 0
+ and params.batch_idx_train % params.save_every_n == 0
+ ):
+ params.cur_batch_idx = batch_idx
+ save_checkpoint_with_global_batch_idx(
+ out_dir=params.exp_dir,
+ global_batch_idx=params.batch_idx_train,
+ model=model,
+ model_avg=model_avg,
+ params=params,
+ optimizer=optimizer,
+ scheduler=scheduler,
+ sampler=train_dl.sampler,
+ scaler=scaler,
+ rank=rank,
+ )
+ del params.cur_batch_idx
+ remove_checkpoints(
+ out_dir=params.exp_dir,
+ topk=params.keep_last_k,
+ rank=rank,
+ )
+
+ if batch_idx % params.log_interval == 0:
+ cur_lr = scheduler.get_last_lr()[0]
+ logging.info(
+ f"Epoch {params.cur_epoch}, "
+ f"batch {batch_idx}, loss[{loss_info}], "
+ f"tot_loss[{tot_loss}], batch size: {batch_size}, "
+ f"lr: {cur_lr:.2e}"
+ )
+
+ if tb_writer is not None:
+ tb_writer.add_scalar(
+ "train/learning_rate", cur_lr, params.batch_idx_train
+ )
+
+ loss_info.write_summary(
+ tb_writer, "train/current_", params.batch_idx_train
+ )
+ tot_loss.write_summary(
+ tb_writer, "train/tot_", params.batch_idx_train
+ )
+
+ if batch_idx > 0 and batch_idx % params.valid_interval == 0:
+ logging.info("Computing validation loss")
+ valid_info = compute_validation_loss(
+ params=params,
+ model=model,
+ sp=sp,
+ valid_dl=valid_dl,
+ world_size=world_size,
+ )
+ model.train()
+ logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}")
+ if tb_writer is not None:
+ valid_info.write_summary(
+ tb_writer, "train/valid_", params.batch_idx_train
+ )
+
+ loss_value = tot_loss["loss"] / tot_loss["frames"]
+ params.train_loss = loss_value
+ if params.train_loss < params.best_train_loss:
+ params.best_train_epoch = params.cur_epoch
+ params.best_train_loss = params.train_loss
+
+
+def run(rank, world_size, args):
+ """
+ Args:
+ rank:
+ It is a value between 0 and `world_size-1`, which is
+ passed automatically by `mp.spawn()` in :func:`main`.
+ The node with rank 0 is responsible for saving checkpoint.
+ world_size:
+ Number of GPUs for DDP training.
+ args:
+ The return value of get_parser().parse_args()
+ """
+ params = get_params()
+ params.update(vars(args))
+ if params.full_libri is False:
+ params.valid_interval = 1600
+
+ fix_random_seed(params.seed)
+ if world_size > 1:
+ setup_dist(rank, world_size, params.master_port)
+
+ setup_logger(f"{params.exp_dir}/log/log-train")
+ logging.info("Training started")
+
+ if args.tensorboard and rank == 0:
+ tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard")
+ else:
+ tb_writer = None
+
+ device = torch.device("cpu")
+ if torch.cuda.is_available():
+ device = torch.device("cuda", rank)
+ logging.info(f"Device: {device}")
+
+ sp = spm.SentencePieceProcessor()
+ sp.load(params.bpe_model)
+
+ # is defined in local/train_bpe_model.py
+ params.blank_id = sp.piece_to_id("")
+ params.vocab_size = sp.get_piece_size()
+
+ logging.info(params)
+
+ logging.info("About to create model")
+ model = get_transducer_model(params)
+
+ num_param = sum([p.numel() for p in model.parameters()])
+ logging.info(f"Number of model parameters: {num_param}")
+
+ assert params.save_every_n >= params.average_period
+ model_avg: Optional[nn.Module] = None
+ if rank == 0:
+ # model_avg is only used with rank 0
+ model_avg = copy.deepcopy(model)
+
+ assert params.start_epoch > 0, params.start_epoch
+ checkpoints = load_checkpoint_if_available(
+ params=params, model=model, model_avg=model_avg
+ )
+
+ model.to(device)
+ if world_size > 1:
+ logging.info("Using DDP")
+ model = DDP(model, device_ids=[rank])
+
+ optimizer = Eve(model.parameters(), lr=params.initial_lr)
+
+ scheduler = Eden(optimizer, params.lr_batches, params.lr_epochs)
+
+ if checkpoints and "optimizer" in checkpoints:
+ logging.info("Loading optimizer state dict")
+ optimizer.load_state_dict(checkpoints["optimizer"])
+
+ if (
+ checkpoints
+ and "scheduler" in checkpoints
+ and checkpoints["scheduler"] is not None
+ ):
+ logging.info("Loading scheduler state dict")
+ scheduler.load_state_dict(checkpoints["scheduler"])
+
+ if params.print_diagnostics:
+ diagnostic = diagnostics.attach_diagnostics(model)
+
+ librispeech = LibriSpeechAsrDataModule(args)
+
+ train_cuts = librispeech.train_clean_100_cuts()
+ if params.full_libri:
+ train_cuts += librispeech.train_clean_360_cuts()
+ train_cuts += librispeech.train_other_500_cuts()
+
+ def remove_short_and_long_utt(c: Cut):
+ # Keep only utterances with duration between 1 second and 20 seconds
+ #
+ # Caution: There is a reason to select 20.0 here. Please see
+ # ../local/display_manifest_statistics.py
+ #
+ # You should use ../local/display_manifest_statistics.py to get
+ # an utterance duration distribution for your dataset to select
+ # the threshold
+ return 1.0 <= c.duration <= 20.0
+
+ train_cuts = train_cuts.filter(remove_short_and_long_utt)
+
+ if params.start_batch > 0 and checkpoints and "sampler" in checkpoints:
+ # We only load the sampler's state dict when it loads a checkpoint
+ # saved in the middle of an epoch
+ sampler_state_dict = checkpoints["sampler"]
+ else:
+ sampler_state_dict = None
+
+ train_dl = librispeech.train_dataloaders(
+ train_cuts, sampler_state_dict=sampler_state_dict
+ )
+
+ valid_cuts = librispeech.dev_clean_cuts()
+ valid_cuts += librispeech.dev_other_cuts()
+ valid_dl = librispeech.valid_dataloaders(valid_cuts)
+
+ if not params.print_diagnostics:
+ scan_pessimistic_batches_for_oom(
+ model=model,
+ train_dl=train_dl,
+ optimizer=optimizer,
+ sp=sp,
+ params=params,
+ )
+
+ scaler = GradScaler(enabled=params.use_fp16)
+ if checkpoints and "grad_scaler" in checkpoints:
+ logging.info("Loading grad scaler state dict")
+ scaler.load_state_dict(checkpoints["grad_scaler"])
+
+ for epoch in range(params.start_epoch, params.num_epochs + 1):
+ scheduler.step_epoch(epoch - 1)
+ fix_random_seed(params.seed + epoch - 1)
+ train_dl.sampler.set_epoch(epoch - 1)
+
+ if tb_writer is not None:
+ tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train)
+
+ params.cur_epoch = epoch
+
+ train_one_epoch(
+ params=params,
+ model=model,
+ model_avg=model_avg,
+ optimizer=optimizer,
+ scheduler=scheduler,
+ sp=sp,
+ train_dl=train_dl,
+ valid_dl=valid_dl,
+ scaler=scaler,
+ tb_writer=tb_writer,
+ world_size=world_size,
+ rank=rank,
+ )
+
+ if params.print_diagnostics:
+ diagnostic.print_diagnostics()
+ break
+
+ save_checkpoint(
+ params=params,
+ model=model,
+ model_avg=model_avg,
+ optimizer=optimizer,
+ scheduler=scheduler,
+ sampler=train_dl.sampler,
+ scaler=scaler,
+ rank=rank,
+ )
+
+ logging.info("Done!")
+
+ if world_size > 1:
+ torch.distributed.barrier()
+ cleanup_dist()
+
+
+def scan_pessimistic_batches_for_oom(
+ model: Union[nn.Module, DDP],
+ train_dl: torch.utils.data.DataLoader,
+ optimizer: torch.optim.Optimizer,
+ sp: spm.SentencePieceProcessor,
+ params: AttributeDict,
+):
+ from lhotse.dataset import find_pessimistic_batches
+
+ logging.info(
+ "Sanity check -- see if any of the batches in epoch 1 would cause OOM."
+ )
+ batches, crit_values = find_pessimistic_batches(train_dl.sampler)
+ for criterion, cuts in batches.items():
+ batch = train_dl.dataset[cuts]
+ try:
+ # warmup = 0.0 is so that the derivs for the pruned loss stay zero
+ # (i.e. are not remembered by the decaying-average in adam), because
+ # we want to avoid these params being subject to shrinkage in adam.
+ with torch.cuda.amp.autocast(enabled=params.use_fp16):
+ loss, _ = compute_loss(
+ params=params,
+ model=model,
+ sp=sp,
+ batch=batch,
+ is_training=True,
+ warmup=0.0,
+ )
+ loss.backward()
+ optimizer.step()
+ optimizer.zero_grad()
+ except RuntimeError as e:
+ if "CUDA out of memory" in str(e):
+ logging.error(
+ "Your GPU ran out of memory with the current "
+ "max_duration setting. We recommend decreasing "
+ "max_duration and trying again.\n"
+ f"Failing criterion: {criterion} "
+ f"(={crit_values[criterion]}) ..."
+ )
+ raise
+
+
+def main():
+ parser = get_parser()
+ LibriSpeechAsrDataModule.add_arguments(parser)
+ args = parser.parse_args()
+ args.exp_dir = Path(args.exp_dir)
+
+ world_size = args.world_size
+ assert world_size >= 1
+ if world_size > 1:
+ mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True)
+ else:
+ run(rank=0, world_size=1, args=args)
+
+
+torch.set_num_threads(1)
+torch.set_num_interop_threads(1)
+
+if __name__ == "__main__":
+ main()
diff --git a/egs/librispeech/ASR/pruned_transducer_stateless6/vq_utils.py b/egs/librispeech/ASR/pruned_transducer_stateless6/vq_utils.py
new file mode 100644
index 000000000..c4935f921
--- /dev/null
+++ b/egs/librispeech/ASR/pruned_transducer_stateless6/vq_utils.py
@@ -0,0 +1,399 @@
+#!/usr/bin/env python3
+# Copyright 2022 Xiaomi Corporation (Author: Liyong Guo)
+#
+# See ../../../../LICENSE for clarification regarding multiple authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import argparse
+import copy
+import glob
+import logging
+import os
+from functools import cached_property
+from pathlib import Path
+from typing import List, Tuple
+
+import numpy as np
+import torch
+import torch.multiprocessing as mp
+import quantization
+
+from asr_datamodule import LibriSpeechAsrDataModule
+from hubert_xlarge import HubertXlargeFineTuned
+from icefall.utils import (
+ AttributeDict,
+ setup_logger,
+)
+from lhotse import CutSet, load_manifest
+from lhotse.features.io import NumpyHdf5Writer
+
+
+class CodebookIndexExtractor:
+ """
+ A wrapper of quantiation.Quantizer.
+
+ It's responsible for:
+ 1. extract and save activations from a teacher model.
+ 2. train quantizer from previous activations.
+ 3. extract codebook indexes for whole training set.
+ Normally this step needs multi GPUs.
+ """
+
+ def __init__(self, params: AttributeDict):
+
+ self.params = params
+ params.subsets = ["clean-100"]
+ if self.params.full_libri:
+ self.params.subsets += ["clean-360", "other-500"]
+
+ self.init_dirs()
+ setup_logger(f"{self.vq_dir}/log-vq_extraction")
+
+ def init_dirs(self):
+ # vq_dir is the root dir for quantizer:
+ # training data/ quantizer / extracted codebook indexes
+ self.vq_dir = (
+ self.params.exp_dir / f"vq/{self.params.teacher_model_id}/"
+ )
+ self.vq_dir.mkdir(parents=True, exist_ok=True)
+
+ # manifest_dir for :
+ # splited original manifests,
+ # extracted codebook indexes and their related manifests
+ self.manifest_dir = self.vq_dir / f"splits{self.params.world_size}"
+ self.manifest_dir.mkdir(parents=True, exist_ok=True)
+
+ # It's doesn't matter whether ori_manifest_dir is str or Path.
+ # Set it to Path to be consistent.
+ self.ori_manifest_dir = Path("./data/fbank/")
+ self.dst_manifest_dir = Path("./data/vq_fbank/")
+
+ self.dst_manifest_dir.mkdir(parents=True, exist_ok=True)
+
+ @classmethod
+ def add_arguments(cls, parser: argparse.ArgumentParser):
+ # Options about teacher embeddings eatraction.
+ parser.add_argument(
+ "--embedding-layer",
+ type=int,
+ help="layer to extract teacher embeddings, 1-based.",
+ default=36,
+ )
+
+ parser.add_argument(
+ "--num-utts",
+ type=int,
+ default=1000,
+ help="num utts to train quantizer",
+ )
+
+ parser.add_argument(
+ "--num-codebooks",
+ type=int,
+ default=8,
+ help="""number of codebooks,
+ i.e. number of codebook indexes each teacher embedding is compressed.
+ """,
+ )
+
+ @property
+ def embedding_file_path(self):
+ """
+ The saved embedding is used to train quantizer.
+ """
+ embedding_file_id = (
+ f"num_utts_{self.params.num_utts}"
+ + f"-layer_{self.params.embedding_layer}"
+ + "-embedding_embeddings.h5"
+ )
+
+ embedding_file_path = self.vq_dir / embedding_file_id
+ return embedding_file_path
+
+ @torch.no_grad()
+ def extract_and_save_embedding(self):
+ """
+ The extract embedding is used to train quantizer.
+ """
+ if self.embedding_file_path.exists():
+ warn_message = (
+ f"{self.embedding_file_path} already exists."
+ + " Skip extracting embeddings from teacher model"
+ )
+ logging.warn(warn_message)
+ return
+
+ total_cuts = 0
+ with NumpyHdf5Writer(self.embedding_file_path) as writer:
+ for batch_idx, batch in enumerate(self.quantizer_train_dl):
+ cut_list = batch["supervisions"]["cut"]
+ (
+ encoder_embedding,
+ num_frames,
+ ) = self.teacher_model.extract_embedding(batch)
+ encoder_embedding = encoder_embedding.cpu().numpy()
+ for idx, cut in enumerate(cut_list):
+ cut.encoder_embedding = writer.store_array(
+ key=cut.id,
+ value=encoder_embedding[idx][: num_frames[idx]],
+ )
+ total_cuts += len(cut_list)
+ logging.info(
+ f"Processed {total_cuts} output of {self.params.num_utts} cuts."
+ )
+
+ logging.info(f"Processed all {total_cuts} cuts.")
+
+ @property
+ def quantizer_train_dl(self):
+ # used to train quantizer.
+ librispeech = LibriSpeechAsrDataModule(self.params)
+ quantizer_trian_cuts = librispeech.train_clean_100_cuts().subset(
+ first=self.params.num_utts
+ )
+ return librispeech.train_dataloaders(quantizer_trian_cuts)
+
+ @cached_property
+ def quantizer_file_path(self):
+ quantizer_file_id = (
+ f"num_utts-{self.params.num_utts}"
+ + f"-layer-{self.params.embedding_layer}"
+ + f"-num_codebooks_{self.params.num_codebooks}"
+ + "-quantizer.pt"
+ )
+ quantizer_file_path = Path(self.vq_dir) / quantizer_file_id
+
+ return quantizer_file_path
+
+ def train_quantizer(self):
+ if self.quantizer_file_path.exists():
+ warn_message = (
+ f"{self.quantizer_file_path} already exists."
+ + " Skip trainning quantizer."
+ )
+ logging.warn(warn_message)
+ return
+
+ assert self.embedding_file_path.exists()
+ trainer = quantization.QuantizerTrainer(
+ dim=self.params.embedding_dim,
+ bytes_per_frame=self.params.num_codebooks,
+ device=self.params.device,
+ )
+ train, valid = quantization.read_hdf5_data(self.embedding_file_path)
+ B = 512 # Minibatch size, this is very arbitrary, it's close to what we used
+ # when we tuned this method.
+
+ def minibatch_generator(data: torch.Tensor, repeat: bool):
+ assert 3 * B < data.shape[0]
+ cur_offset = 0
+ while True if repeat else cur_offset + B <= data.shape[0]:
+ start = cur_offset % (data.shape[0] + 1 - B)
+ end = start + B
+ cur_offset += B
+ yield data[start:end, :].to(self.params.device).to(
+ dtype=torch.float
+ )
+
+ for x in minibatch_generator(train, repeat=True):
+ trainer.step(x)
+ if trainer.done():
+ break
+
+ quantizer = trainer.get_quantizer()
+ torch.save(quantizer.state_dict(), self.quantizer_file_path)
+
+ def split_ori_manifests(self):
+ """
+ When multi gpus are available, split original manifests
+ and extract codebook indexes in a prallel way.
+ """
+ for subset in self.params.subsets:
+ logging.info(f"About to split {subset}.")
+ ori_manifest = f"./data/fbank/cuts_train-{subset}.json.gz"
+ split_cmd = f"lhotse split {self.params.world_size} {ori_manifest} {self.manifest_dir}"
+ os.system(f"{split_cmd}")
+
+ def merge_vq_manifests(self):
+ """
+ Merge generated vq included manfiests and storage to self.dst_manifest_dir.
+ """
+ for subset in self.params.subsets:
+ vq_manifests = f"{self.manifest_dir}/with_codebook_indexes-cuts_train-{subset}*.json.gz"
+ dst_vq_manifest = (
+ self.dst_manifest_dir / f"cuts_train-{subset}.json.gz"
+ )
+ if 1 == self.params.world_size:
+ merge_cmd = f"cp {vq_manifests} {dst_vq_manifest}"
+ else:
+ merge_cmd = f"lhotse combine {vq_manifests} {dst_vq_manifest}"
+ os.system(f"{merge_cmd}")
+
+ def reuse_manifests(self):
+ """
+ Only train-* subsets are extracted codebook indexes from.
+ The reset subsets are just a link from ./data/fbank.
+ """
+
+ def is_train(manifest: str) -> bool:
+ for train_subset in ["clean-100", "clean-360", "other-500"]:
+ if train_subset in manifest:
+ return True
+ return False
+
+ # Type of self.ori_nanifest_dir is Path
+ # and result type of glob.glob is str.
+ reusable_manifests = [
+ manifest
+ for manifest in glob.glob(f"{self.ori_manifest_dir}/*.gz")
+ if not is_train(manifest)
+ ]
+ for manifest_path in reusable_manifests:
+ ori_manifest_path = Path(manifest_path).resolve()
+ # Path cannot used as a parameter of str.replace.
+ # Cast them to str.
+ dst_manifest_path = Path(
+ manifest_path.replace(
+ str(self.ori_manifest_dir), str(self.dst_manifest_dir)
+ )
+ ).resolve()
+ if not dst_manifest_path.exists():
+ os.symlink(ori_manifest_path, dst_manifest_path)
+
+ def create_vq_fbank(self):
+ self.reuse_manifests()
+ self.merge_vq_manifests()
+
+ @cached_property
+ def teacher_model(self):
+ return HubertXlargeFineTuned(self.params)
+
+ @cached_property
+ def quantizer(self):
+ assert self.quantizer_file_path.exists()
+ quantizer = quantization.Quantizer(
+ dim=self.params.embedding_dim,
+ num_codebooks=self.params.num_codebooks,
+ codebook_size=256,
+ )
+ quantizer.load_state_dict(torch.load(self.quantizer_file_path))
+ quantizer.to(self.params.device)
+ return quantizer
+
+ def load_ori_dl(self, subset):
+ if self.params.world_size == 1:
+ ori_manifest_path = f"./data/fbank/cuts_train-{subset}.json.gz"
+ else:
+ ori_manifest_path = (
+ self.manifest_dir
+ / f"cuts_train-{subset}.{self.params.manifest_index}.json.gz"
+ )
+
+ cuts = load_manifest(ori_manifest_path)
+ dl = LibriSpeechAsrDataModule(self.params).train_dataloaders(cuts)
+ return dl
+
+ def _release_gpu_memory(self):
+ self.__dict__.pop("teacher_model", None)
+ self.__dict__.pop("quantizer", None)
+ torch.cuda.empty_cache()
+
+ def extract_codebook_indexes(self):
+ if self.params.world_size == 1:
+ self.extract_codebook_indexes_imp()
+ else:
+ # Since a new extractor will be created for each rank in
+ # compute_codebook_indexes_parallel, it's better to
+ # release the GPU memory occupied by current extractor.
+ self._release_gpu_memory()
+
+ # Prepare split manifests for each job.
+ self.split_ori_manifests()
+ mp.spawn(
+ compute_codebook_indexes_parallel,
+ args=(self.params,),
+ nprocs=self.params.world_size,
+ join=True,
+ )
+ self.create_vq_fbank()
+
+ @torch.no_grad()
+ def extract_codebook_indexes_imp(self):
+ for subset in self.params.subsets:
+ num_cuts = 0
+ cuts = []
+ if self.params.world_size == 1:
+ manifest_file_id = f"{subset}"
+ else:
+ manifest_file_id = f"{subset}-{self.params.manifest_index}"
+
+ manifest_file_path = self.manifest_dir / manifest_file_id
+ with NumpyHdf5Writer(manifest_file_path) as writer:
+ for batch_idx, batch in enumerate(self.load_ori_dl(subset)):
+ (
+ encoder_embedding,
+ num_frames,
+ ) = self.teacher_model.extract_embedding(batch)
+ codebook_indexes = self.quantizer.encode(encoder_embedding)
+ # [N, T, C]
+ codebook_indexes = codebook_indexes.to("cpu").numpy()
+ assert np.min(codebook_indexes) >= 0
+ assert np.max(codebook_indexes) < 256
+ supervisions = batch["supervisions"]
+ cut_list = supervisions["cut"]
+ assert len(cut_list) == codebook_indexes.shape[0]
+ assert all(c.start == 0 for c in supervisions["cut"])
+
+ for idx, cut in enumerate(cut_list):
+ cut.codebook_indexes = writer.store_array(
+ key=cut.id,
+ value=codebook_indexes[idx][: num_frames[idx]],
+ frame_shift=0.02,
+ temporal_dim=0,
+ start=0,
+ )
+ cuts += cut_list
+ num_cuts += len(cut_list)
+ message = f"Processed {num_cuts} cuts from {subset}"
+ if self.params.world_size > 1:
+ message += f" by job {self.params.manifest_index}"
+ logging.info(f"{message}.")
+
+ json_file_path = (
+ self.manifest_dir
+ / f"with_codebook_indexes-cuts_train-{manifest_file_id}.json.gz"
+ )
+ CutSet.from_cuts(cuts).to_json(json_file_path)
+
+
+@torch.no_grad()
+def compute_codebook_indexes_parallel(
+ rank: int,
+ params,
+) -> List[Tuple[str, List[int]]]:
+ """Create an extractor for each rank and extract codebook indexes parallelly.
+
+ Normally, this function is called by torch.multiprocessing
+ when multi GPUs are available.
+ """
+ params = copy.deepcopy(params)
+ device = torch.device("cuda", rank)
+ params.device = device
+
+ # rank is 0-based while split manifests by "lhotse split" is 1-based.
+ params.manifest_index = rank + 1
+
+ extractor = CodebookIndexExtractor(params=params)
+ extractor.extract_codebook_indexes_imp()
diff --git a/egs/librispeech/ASR/tdnn_lstm_ctc/asr_datamodule.py b/egs/librispeech/ASR/tdnn_lstm_ctc/asr_datamodule.py
index 8dd1459ca..355ccc99a 100644
--- a/egs/librispeech/ASR/tdnn_lstm_ctc/asr_datamodule.py
+++ b/egs/librispeech/ASR/tdnn_lstm_ctc/asr_datamodule.py
@@ -24,17 +24,20 @@ from pathlib import Path
from typing import Any, Dict, Optional
import torch
-from lhotse import CutSet, Fbank, FbankConfig, load_manifest
-from lhotse.dataset import (
- BucketingSampler,
+from lhotse import CutSet, Fbank, FbankConfig, load_manifest, load_manifest_lazy
+from lhotse.dataset import ( # noqa F401 for PrecomputedFeatures
CutConcatenate,
CutMix,
+ DynamicBucketingSampler,
K2SpeechRecognitionDataset,
PrecomputedFeatures,
SingleCutSampler,
SpecAugment,
)
-from lhotse.dataset.input_strategies import OnTheFlyFeatures
+from lhotse.dataset.input_strategies import ( # noqa F401 For AudioSamples
+ AudioSamples,
+ OnTheFlyFeatures,
+)
from lhotse.utils import fix_random_seed
from torch.utils.data import DataLoader
@@ -110,7 +113,7 @@ class LibriSpeechAsrDataModule:
"--num-buckets",
type=int,
default=30,
- help="The number of buckets for the BucketingSampler"
+ help="The number of buckets for the DynamicBucketingSampler"
"(you might want to increase it for larger datasets).",
)
group.add_argument(
@@ -150,6 +153,12 @@ class LibriSpeechAsrDataModule:
help="When enabled (=default), the examples will be "
"shuffled for each epoch.",
)
+ group.add_argument(
+ "--drop-last",
+ type=str2bool,
+ default=True,
+ help="Whether to drop last batch. Used by sampler.",
+ )
group.add_argument(
"--return-cuts",
type=str2bool,
@@ -192,6 +201,13 @@ class LibriSpeechAsrDataModule:
"with training dataset. ",
)
+ group.add_argument(
+ "--input-strategy",
+ type=str,
+ default="PrecomputedFeatures",
+ help="AudioSamples or PrecomputedFeatures",
+ )
+
def train_dataloaders(
self,
cuts_train: CutSet,
@@ -209,7 +225,7 @@ class LibriSpeechAsrDataModule:
logging.info("Enable MUSAN")
logging.info("About to get Musan cuts")
cuts_musan = load_manifest(
- self.args.manifest_dir / "cuts_musan.json.gz"
+ self.args.manifest_dir / "musan_cuts.jsonl.gz"
)
transforms.append(
CutMix(
@@ -263,6 +279,7 @@ class LibriSpeechAsrDataModule:
logging.info("About to create train dataset")
train = K2SpeechRecognitionDataset(
+ input_strategy=eval(self.args.input_strategy)(),
cut_transforms=transforms,
input_transforms=input_transforms,
return_cuts=self.args.return_cuts,
@@ -289,14 +306,13 @@ class LibriSpeechAsrDataModule:
)
if self.args.bucketing_sampler:
- logging.info("Using BucketingSampler.")
- train_sampler = BucketingSampler(
+ logging.info("Using DynamicBucketingSampler.")
+ train_sampler = DynamicBucketingSampler(
cuts_train,
max_duration=self.args.max_duration,
shuffle=self.args.shuffle,
num_buckets=self.args.num_buckets,
- bucket_method="equal_duration",
- drop_last=True,
+ drop_last=self.args.drop_last,
)
else:
logging.info("Using SingleCutSampler.")
@@ -350,7 +366,7 @@ class LibriSpeechAsrDataModule:
cut_transforms=transforms,
return_cuts=self.args.return_cuts,
)
- valid_sampler = BucketingSampler(
+ valid_sampler = DynamicBucketingSampler(
cuts_valid,
max_duration=self.args.max_duration,
shuffle=False,
@@ -371,11 +387,13 @@ class LibriSpeechAsrDataModule:
test = K2SpeechRecognitionDataset(
input_strategy=OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80)))
if self.args.on_the_fly_feats
- else PrecomputedFeatures(),
+ else eval(self.args.input_strategy)(),
return_cuts=self.args.return_cuts,
)
- sampler = BucketingSampler(
- cuts, max_duration=self.args.max_duration, shuffle=False
+ sampler = DynamicBucketingSampler(
+ cuts,
+ max_duration=self.args.max_duration,
+ shuffle=False,
)
logging.debug("About to create test dataloader")
test_dl = DataLoader(
@@ -389,40 +407,48 @@ class LibriSpeechAsrDataModule:
@lru_cache()
def train_clean_100_cuts(self) -> CutSet:
logging.info("About to get train-clean-100 cuts")
- return load_manifest(
- self.args.manifest_dir / "cuts_train-clean-100.json.gz"
+ return load_manifest_lazy(
+ self.args.manifest_dir / "librispeech_cuts_train-clean-100.jsonl.gz"
)
@lru_cache()
def train_clean_360_cuts(self) -> CutSet:
logging.info("About to get train-clean-360 cuts")
- return load_manifest(
- self.args.manifest_dir / "cuts_train-clean-360.json.gz"
+ return load_manifest_lazy(
+ self.args.manifest_dir / "librispeech_cuts_train-clean-360.jsonl.gz"
)
@lru_cache()
def train_other_500_cuts(self) -> CutSet:
logging.info("About to get train-other-500 cuts")
- return load_manifest(
- self.args.manifest_dir / "cuts_train-other-500.json.gz"
+ return load_manifest_lazy(
+ self.args.manifest_dir / "librispeech_cuts_train-other-500.jsonl.gz"
)
@lru_cache()
def dev_clean_cuts(self) -> CutSet:
logging.info("About to get dev-clean cuts")
- return load_manifest(self.args.manifest_dir / "cuts_dev-clean.json.gz")
+ return load_manifest_lazy(
+ self.args.manifest_dir / "librispeech_cuts_dev-clean.jsonl.gz"
+ )
@lru_cache()
def dev_other_cuts(self) -> CutSet:
logging.info("About to get dev-other cuts")
- return load_manifest(self.args.manifest_dir / "cuts_dev-other.json.gz")
+ return load_manifest_lazy(
+ self.args.manifest_dir / "librispeech_cuts_dev-other.jsonl.gz"
+ )
@lru_cache()
def test_clean_cuts(self) -> CutSet:
logging.info("About to get test-clean cuts")
- return load_manifest(self.args.manifest_dir / "cuts_test-clean.json.gz")
+ return load_manifest_lazy(
+ self.args.manifest_dir / "librispeech_cuts_test-clean.jsonl.gz"
+ )
@lru_cache()
def test_other_cuts(self) -> CutSet:
logging.info("About to get test-other cuts")
- return load_manifest(self.args.manifest_dir / "cuts_test-other.json.gz")
+ return load_manifest_lazy(
+ self.args.manifest_dir / "librispeech_cuts_test-other.jsonl.gz"
+ )
diff --git a/egs/librispeech/ASR/tdnn_lstm_ctc/train.py b/egs/librispeech/ASR/tdnn_lstm_ctc/train.py
index 8597525ba..827e3ae1f 100755
--- a/egs/librispeech/ASR/tdnn_lstm_ctc/train.py
+++ b/egs/librispeech/ASR/tdnn_lstm_ctc/train.py
@@ -16,6 +16,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+"""
+Usage:
+ export CUDA_VISIBLE_DEVICES="0,1,2,3"
+ ./tdnn_lstm_ctc/train.py \
+ --world-size 4 \
+ --full-libri 1 \
+ --max-duration 300 \
+ --num-epochs 20
+"""
import argparse
import logging
@@ -29,6 +38,7 @@ import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
from asr_datamodule import LibriSpeechAsrDataModule
+from lhotse.cut import Cut
from lhotse.utils import fix_random_seed
from model import TdnnLstm
from torch import Tensor
@@ -544,10 +554,25 @@ def run(rank, world_size, args):
if params.full_libri:
train_cuts += librispeech.train_clean_360_cuts()
train_cuts += librispeech.train_other_500_cuts()
+
+ def remove_short_and_long_utt(c: Cut):
+ # Keep only utterances with duration between 1 second and 20 seconds
+ #
+ # Caution: There is a reason to select 20.0 here. Please see
+ # ../local/display_manifest_statistics.py
+ #
+ # You should use ../local/display_manifest_statistics.py to get
+ # an utterance duration distribution for your dataset to select
+ # the threshold
+ return 1.0 <= c.duration <= 20.0
+
+ train_cuts = train_cuts.filter(remove_short_and_long_utt)
+
train_dl = librispeech.train_dataloaders(train_cuts)
valid_cuts = librispeech.dev_clean_cuts()
valid_cuts += librispeech.dev_other_cuts()
+
valid_dl = librispeech.valid_dataloaders(valid_cuts)
for epoch in range(params.start_epoch, params.num_epochs):
diff --git a/egs/librispeech/ASR/transducer/train.py b/egs/librispeech/ASR/transducer/train.py
index cbd9259e0..11c72ae4f 100755
--- a/egs/librispeech/ASR/transducer/train.py
+++ b/egs/librispeech/ASR/transducer/train.py
@@ -360,7 +360,7 @@ def compute_loss(
is_training: bool,
) -> Tuple[Tensor, MetricsTracker]:
"""
- Compute CTC loss given the model and its inputs.
+ Compute RNN-T loss given the model and its inputs.
Args:
params:
diff --git a/egs/librispeech/ASR/transducer_lstm/train.py b/egs/librispeech/ASR/transducer_lstm/train.py
index eef4d3430..17ba6143c 100755
--- a/egs/librispeech/ASR/transducer_lstm/train.py
+++ b/egs/librispeech/ASR/transducer_lstm/train.py
@@ -364,7 +364,7 @@ def compute_loss(
is_training: bool,
) -> Tuple[Tensor, MetricsTracker]:
"""
- Compute CTC loss given the model and its inputs.
+ Compute RNN-T loss given the model and its inputs.
Args:
params:
diff --git a/egs/librispeech/ASR/transducer_stateless/test_compute_ali.py b/egs/librispeech/ASR/transducer_stateless/test_compute_ali.py
index 99d5b3788..b00fc34f1 100755
--- a/egs/librispeech/ASR/transducer_stateless/test_compute_ali.py
+++ b/egs/librispeech/ASR/transducer_stateless/test_compute_ali.py
@@ -44,8 +44,8 @@ from pathlib import Path
import sentencepiece as spm
import torch
from alignment import get_word_starting_frames
-from lhotse import CutSet, load_manifest
-from lhotse.dataset import K2SpeechRecognitionDataset, SingleCutSampler
+from lhotse import CutSet, load_manifest_lazy
+from lhotse.dataset import DynamicBucketingSampler, K2SpeechRecognitionDataset
from lhotse.dataset.collation import collate_custom_field
@@ -93,14 +93,15 @@ def main():
sp = spm.SentencePieceProcessor()
sp.load(args.bpe_model)
- cuts_json = args.ali_dir / f"cuts_{args.dataset}.json.gz"
+ cuts_jsonl = args.ali_dir / f"librispeech_cuts_{args.dataset}.jsonl.gz"
- logging.info(f"Loading {cuts_json}")
- cuts = load_manifest(cuts_json)
+ logging.info(f"Loading {cuts_jsonl}")
+ cuts = load_manifest_lazy(cuts_jsonl)
- sampler = SingleCutSampler(
+ sampler = DynamicBucketingSampler(
cuts,
max_duration=30,
+ num_buckets=30,
shuffle=False,
)
diff --git a/egs/librispeech/ASR/transducer_stateless/train.py b/egs/librispeech/ASR/transducer_stateless/train.py
index cb7f08a09..837a9de2d 100755
--- a/egs/librispeech/ASR/transducer_stateless/train.py
+++ b/egs/librispeech/ASR/transducer_stateless/train.py
@@ -381,7 +381,7 @@ def compute_loss(
is_training: bool,
) -> Tuple[Tensor, MetricsTracker]:
"""
- Compute CTC loss given the model and its inputs.
+ Compute RNN-T loss given the model and its inputs.
Args:
params:
diff --git a/egs/librispeech/ASR/transducer_stateless2/train.py b/egs/librispeech/ASR/transducer_stateless2/train.py
index cb13e317c..fe075b073 100755
--- a/egs/librispeech/ASR/transducer_stateless2/train.py
+++ b/egs/librispeech/ASR/transducer_stateless2/train.py
@@ -370,7 +370,7 @@ def compute_loss(
is_training: bool,
) -> Tuple[Tensor, MetricsTracker]:
"""
- Compute CTC loss given the model and its inputs.
+ Compute RNN-T loss given the model and its inputs.
Args:
params:
diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/asr_datamodule.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/asr_datamodule.py
deleted file mode 100644
index c6cf739fb..000000000
--- a/egs/librispeech/ASR/transducer_stateless_multi_datasets/asr_datamodule.py
+++ /dev/null
@@ -1,333 +0,0 @@
-# Copyright 2021 Piotr Żelasko
-# 2022 Xiaomi Corp. (authors: Fangjun Kuang
-# Mingshuang Luo)
-#
-# See ../../../../LICENSE for clarification regarding multiple authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import argparse
-import inspect
-import logging
-from pathlib import Path
-from typing import Optional
-
-import torch
-from lhotse import CutSet, Fbank, FbankConfig
-from lhotse.dataset import (
- BucketingSampler,
- CutMix,
- DynamicBucketingSampler,
- K2SpeechRecognitionDataset,
- SpecAugment,
-)
-from lhotse.dataset.input_strategies import (
- OnTheFlyFeatures,
- PrecomputedFeatures,
-)
-from lhotse.utils import fix_random_seed
-from torch.utils.data import DataLoader
-
-from icefall.utils import str2bool
-
-
-class _SeedWorkers:
- def __init__(self, seed: int):
- self.seed = seed
-
- def __call__(self, worker_id: int):
- fix_random_seed(self.seed + worker_id)
-
-
-class AsrDataModule:
- def __init__(self, args: argparse.Namespace):
- self.args = args
-
- @classmethod
- def add_arguments(cls, parser: argparse.ArgumentParser):
- group = parser.add_argument_group(
- title="ASR data related options",
- description="These options are used for the preparation of "
- "PyTorch DataLoaders from Lhotse CutSet's -- they control the "
- "effective batch sizes, sampling strategies, applied data "
- "augmentations, etc.",
- )
-
- group.add_argument(
- "--max-duration",
- type=int,
- default=200.0,
- help="Maximum pooled recordings duration (seconds) in a "
- "single batch. You can reduce it if it causes CUDA OOM.",
- )
-
- group.add_argument(
- "--bucketing-sampler",
- type=str2bool,
- default=True,
- help="When enabled, the batches will come from buckets of "
- "similar duration (saves padding frames).",
- )
-
- group.add_argument(
- "--num-buckets",
- type=int,
- default=30,
- help="The number of buckets for the BucketingSampler "
- "and DynamicBucketingSampler."
- "(you might want to increase it for larger datasets).",
- )
-
- group.add_argument(
- "--shuffle",
- type=str2bool,
- default=True,
- help="When enabled (=default), the examples will be "
- "shuffled for each epoch.",
- )
-
- group.add_argument(
- "--return-cuts",
- type=str2bool,
- default=True,
- help="When enabled, each batch will have the "
- "field: batch['supervisions']['cut'] with the cuts that "
- "were used to construct it.",
- )
-
- group.add_argument(
- "--num-workers",
- type=int,
- default=2,
- help="The number of training dataloader workers that "
- "collect the batches.",
- )
-
- group.add_argument(
- "--enable-spec-aug",
- type=str2bool,
- default=True,
- help="When enabled, use SpecAugment for training dataset.",
- )
-
- group.add_argument(
- "--spec-aug-time-warp-factor",
- type=int,
- default=80,
- help="Used only when --enable-spec-aug is True. "
- "It specifies the factor for time warping in SpecAugment. "
- "Larger values mean more warping. "
- "A value less than 1 means to disable time warp.",
- )
-
- group.add_argument(
- "--enable-musan",
- type=str2bool,
- default=True,
- help="When enabled, select noise from MUSAN and mix it"
- "with training dataset. ",
- )
-
- group.add_argument(
- "--manifest-dir",
- type=Path,
- default=Path("data/fbank"),
- help="Path to directory with train/valid/test cuts.",
- )
-
- group.add_argument(
- "--on-the-fly-feats",
- type=str2bool,
- default=False,
- help="When enabled, use on-the-fly cut mixing and feature "
- "extraction. Will drop existing precomputed feature manifests "
- "if available. Used only in dev/test CutSet",
- )
-
- def train_dataloaders(
- self,
- cuts_train: CutSet,
- dynamic_bucketing: bool,
- on_the_fly_feats: bool,
- cuts_musan: Optional[CutSet] = None,
- ) -> DataLoader:
- """
- Args:
- cuts_train:
- Cuts for training.
- cuts_musan:
- If not None, it is the cuts for mixing.
- dynamic_bucketing:
- True to use DynamicBucketingSampler;
- False to use BucketingSampler.
- on_the_fly_feats:
- True to use OnTheFlyFeatures;
- False to use PrecomputedFeatures.
- """
- transforms = []
- if cuts_musan is not None:
- logging.info("Enable MUSAN")
- transforms.append(
- CutMix(
- cuts=cuts_musan, prob=0.5, snr=(10, 20), preserve_id=True
- )
- )
- else:
- logging.info("Disable MUSAN")
-
- input_transforms = []
-
- if self.args.enable_spec_aug:
- logging.info("Enable SpecAugment")
- logging.info(
- f"Time warp factor: {self.args.spec_aug_time_warp_factor}"
- )
- # Set the value of num_frame_masks according to Lhotse's version.
- # In different Lhotse's versions, the default of num_frame_masks is
- # different.
- num_frame_masks = 10
- num_frame_masks_parameter = inspect.signature(
- SpecAugment.__init__
- ).parameters["num_frame_masks"]
- if num_frame_masks_parameter.default == 1:
- num_frame_masks = 2
- logging.info(f"Num frame mask: {num_frame_masks}")
- input_transforms.append(
- SpecAugment(
- time_warp_factor=self.args.spec_aug_time_warp_factor,
- num_frame_masks=num_frame_masks,
- features_mask_size=27,
- num_feature_masks=2,
- frames_mask_size=100,
- )
- )
- else:
- logging.info("Disable SpecAugment")
-
- logging.info("About to create train dataset")
- train = K2SpeechRecognitionDataset(
- cut_transforms=transforms,
- input_transforms=input_transforms,
- return_cuts=self.args.return_cuts,
- )
-
- # NOTE: the PerturbSpeed transform should be added only if we
- # remove it from data prep stage.
- # Add on-the-fly speed perturbation; since originally it would
- # have increased epoch size by 3, we will apply prob 2/3 and use
- # 3x more epochs.
- # Speed perturbation probably should come first before
- # concatenation, but in principle the transforms order doesn't have
- # to be strict (e.g. could be randomized)
- # transforms = [PerturbSpeed(factors=[0.9, 1.1], p=2/3)] + transforms # noqa
- # Drop feats to be on the safe side.
- train = K2SpeechRecognitionDataset(
- cut_transforms=transforms,
- input_strategy=(
- OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80)))
- if on_the_fly_feats
- else PrecomputedFeatures()
- ),
- input_transforms=input_transforms,
- return_cuts=self.args.return_cuts,
- )
-
- if dynamic_bucketing:
- logging.info("Using DynamicBucketingSampler.")
- train_sampler = DynamicBucketingSampler(
- cuts_train,
- max_duration=self.args.max_duration,
- shuffle=self.args.shuffle,
- num_buckets=self.args.num_buckets,
- drop_last=True,
- )
- else:
- logging.info("Using BucketingSampler.")
- train_sampler = BucketingSampler(
- cuts_train,
- max_duration=self.args.max_duration,
- shuffle=self.args.shuffle,
- num_buckets=self.args.num_buckets,
- bucket_method="equal_duration",
- drop_last=True,
- )
-
- logging.info("About to create train dataloader")
-
- # 'seed' is derived from the current random state, which will have
- # previously been set in the main process.
- seed = torch.randint(0, 100000, ()).item()
- worker_init_fn = _SeedWorkers(seed)
-
- train_dl = DataLoader(
- train,
- sampler=train_sampler,
- batch_size=None,
- num_workers=self.args.num_workers,
- persistent_workers=False,
- worker_init_fn=worker_init_fn,
- )
- return train_dl
-
- def valid_dataloaders(self, cuts_valid: CutSet) -> DataLoader:
- transforms = []
-
- logging.info("About to create dev dataset")
- if self.args.on_the_fly_feats:
- validate = K2SpeechRecognitionDataset(
- cut_transforms=transforms,
- input_strategy=OnTheFlyFeatures(
- Fbank(FbankConfig(num_mel_bins=80))
- ),
- return_cuts=self.args.return_cuts,
- )
- else:
- validate = K2SpeechRecognitionDataset(
- cut_transforms=transforms,
- return_cuts=self.args.return_cuts,
- )
- valid_sampler = BucketingSampler(
- cuts_valid,
- max_duration=self.args.max_duration,
- shuffle=False,
- )
- logging.info("About to create dev dataloader")
- valid_dl = DataLoader(
- validate,
- sampler=valid_sampler,
- batch_size=None,
- num_workers=2,
- persistent_workers=False,
- )
-
- return valid_dl
-
- def test_dataloaders(self, cuts: CutSet) -> DataLoader:
- logging.debug("About to create test dataset")
- test = K2SpeechRecognitionDataset(
- input_strategy=OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80)))
- if self.args.on_the_fly_feats
- else PrecomputedFeatures(),
- return_cuts=self.args.return_cuts,
- )
- sampler = BucketingSampler(
- cuts, max_duration=self.args.max_duration, shuffle=False
- )
- logging.debug("About to create test dataloader")
- test_dl = DataLoader(
- test,
- batch_size=None,
- sampler=sampler,
- num_workers=self.args.num_workers,
- )
- return test_dl
diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/asr_datamodule.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/asr_datamodule.py
new file mode 120000
index 000000000..3ba9ada4f
--- /dev/null
+++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/asr_datamodule.py
@@ -0,0 +1 @@
+../pruned_transducer_stateless3/asr_datamodule.py
\ No newline at end of file
diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/gigaspeech.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/gigaspeech.py
deleted file mode 100644
index 286771d7d..000000000
--- a/egs/librispeech/ASR/transducer_stateless_multi_datasets/gigaspeech.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright 2021 Piotr Żelasko
-# 2022 Xiaomi Corp. (authors: Fangjun Kuang)
-#
-# See ../../../../LICENSE for clarification regarding multiple authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import logging
-from pathlib import Path
-
-from lhotse import CutSet, load_manifest
-
-
-class GigaSpeech:
- def __init__(self, manifest_dir: str):
- """
- Args:
- manifest_dir:
- It is expected to contain the following files::
-
- - cuts_XL_raw.jsonl.gz
- - cuts_L_raw.jsonl.gz
- - cuts_M_raw.jsonl.gz
- - cuts_S_raw.jsonl.gz
- - cuts_XS_raw.jsonl.gz
- - cuts_DEV_raw.jsonl.gz
- - cuts_TEST_raw.jsonl.gz
- """
- self.manifest_dir = Path(manifest_dir)
-
- def train_XL_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_XL_raw.jsonl.gz"
- logging.info(f"About to get train-XL cuts from {f}")
- return CutSet.from_jsonl_lazy(f)
-
- def train_L_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_L_raw.jsonl.gz"
- logging.info(f"About to get train-L cuts from {f}")
- return CutSet.from_jsonl_lazy(f)
-
- def train_M_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_M_raw.jsonl.gz"
- logging.info(f"About to get train-M cuts from {f}")
- return CutSet.from_jsonl_lazy(f)
-
- def train_S_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_S_raw.jsonl.gz"
- logging.info(f"About to get train-S cuts from {f}")
- return CutSet.from_jsonl_lazy(f)
-
- def train_XS_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_XS_raw.jsonl.gz"
- logging.info(f"About to get train-XS cuts from {f}")
- return CutSet.from_jsonl_lazy(f)
-
- def test_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_TEST.jsonl.gz"
- logging.info(f"About to get TEST cuts from {f}")
- return load_manifest(f)
-
- def dev_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_DEV.jsonl.gz"
- logging.info(f"About to get DEV cuts from {f}")
- return load_manifest(f)
diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/gigaspeech.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/gigaspeech.py
new file mode 120000
index 000000000..5242c652a
--- /dev/null
+++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/gigaspeech.py
@@ -0,0 +1 @@
+../pruned_transducer_stateless3/gigaspeech.py
\ No newline at end of file
diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/librispeech.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/librispeech.py
deleted file mode 100644
index 00b7c8334..000000000
--- a/egs/librispeech/ASR/transducer_stateless_multi_datasets/librispeech.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright 2021 Piotr Żelasko
-# 2022 Xiaomi Corp. (authors: Fangjun Kuang)
-#
-# See ../../../../LICENSE for clarification regarding multiple authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-from pathlib import Path
-
-from lhotse import CutSet, load_manifest
-
-
-class LibriSpeech:
- def __init__(self, manifest_dir: str):
- """
- Args:
- manifest_dir:
- It is expected to contain the following files::
-
- - cuts_dev-clean.json.gz
- - cuts_dev-other.json.gz
- - cuts_test-clean.json.gz
- - cuts_test-other.json.gz
- - cuts_train-clean-100.json.gz
- - cuts_train-clean-360.json.gz
- - cuts_train-other-500.json.gz
- """
- self.manifest_dir = Path(manifest_dir)
-
- def train_clean_100_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_train-clean-100.json.gz"
- logging.info(f"About to get train-clean-100 cuts from {f}")
- return load_manifest(f)
-
- def train_clean_360_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_train-clean-360.json.gz"
- logging.info(f"About to get train-clean-360 cuts from {f}")
- return load_manifest(f)
-
- def train_other_500_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_train-other-500.json.gz"
- logging.info(f"About to get train-other-500 cuts from {f}")
- return load_manifest(f)
-
- def test_clean_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_test-clean.json.gz"
- logging.info(f"About to get test-clean cuts from {f}")
- return load_manifest(f)
-
- def test_other_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_test-other.json.gz"
- logging.info(f"About to get test-other cuts from {f}")
- return load_manifest(f)
-
- def dev_clean_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_dev-clean.json.gz"
- logging.info(f"About to get dev-clean cuts from {f}")
- return load_manifest(f)
-
- def dev_other_cuts(self) -> CutSet:
- f = self.manifest_dir / "cuts_dev-other.json.gz"
- logging.info(f"About to get dev-other cuts from {f}")
- return load_manifest(f)
diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/librispeech.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/librispeech.py
new file mode 120000
index 000000000..b76723bf5
--- /dev/null
+++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/librispeech.py
@@ -0,0 +1 @@
+../pruned_transducer_stateless3/librispeech.py
\ No newline at end of file
diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/test_asr_datamodule.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/test_asr_datamodule.py
index e1833b841..ef51a7811 100755
--- a/egs/librispeech/ASR/transducer_stateless_multi_datasets/test_asr_datamodule.py
+++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/test_asr_datamodule.py
@@ -42,7 +42,7 @@ def test_dataset():
if args.enable_musan:
cuts_musan = load_manifest(
- Path(args.manifest_dir) / "cuts_musan.json.gz"
+ Path(args.manifest_dir) / "musan_cuts.jsonl.gz"
)
else:
cuts_musan = None
@@ -57,14 +57,12 @@ def test_dataset():
libri_train_dl = asr_datamodule.train_dataloaders(
train_clean_100,
- dynamic_bucketing=False,
on_the_fly_feats=False,
cuts_musan=cuts_musan,
)
giga_train_dl = asr_datamodule.train_dataloaders(
train_S,
- dynamic_bucketing=True,
on_the_fly_feats=True,
cuts_musan=cuts_musan,
)
diff --git a/egs/librispeech/ASR/transducer_stateless_multi_datasets/train.py b/egs/librispeech/ASR/transducer_stateless_multi_datasets/train.py
index 5572d3f4c..32ce1032c 100755
--- a/egs/librispeech/ASR/transducer_stateless_multi_datasets/train.py
+++ b/egs/librispeech/ASR/transducer_stateless_multi_datasets/train.py
@@ -425,7 +425,7 @@ def compute_loss(
is_training: bool,
) -> Tuple[Tensor, MetricsTracker]:
"""
- Compute CTC loss given the model and its inputs.
+ Compute RNN-T loss given the model and its inputs.
Args:
params:
@@ -662,19 +662,17 @@ def train_one_epoch(
def filter_short_and_long_utterances(cuts: CutSet) -> CutSet:
def remove_short_and_long_utt(c: Cut):
# Keep only utterances with duration between 1 second and 20 seconds
+ #
+ # Caution: There is a reason to select 20.0 here. Please see
+ # ../local/display_manifest_statistics.py
+ #
+ # You should use ../local/display_manifest_statistics.py to get
+ # an utterance duration distribution for your dataset to select
+ # the threshold
return 1.0 <= c.duration <= 20.0
- num_in_total = len(cuts)
cuts = cuts.filter(remove_short_and_long_utt)
- num_left = len(cuts)
- num_removed = num_in_total - num_left
- removed_percent = num_removed / num_in_total * 100
-
- logging.info(f"Before removing short and long utterances: {num_in_total}")
- logging.info(f"After removing short and long utterances: {num_left}")
- logging.info(f"Removed {num_removed} utterances ({removed_percent:.5f}%)")
-
return cuts
@@ -767,17 +765,18 @@ def run(rank, world_size, args):
# DEV 12 hours
# Test 40 hours
if params.full_libri:
- logging.info("Using the L subset of GigaSpeech (2.5k hours)")
- train_giga_cuts = gigaspeech.train_L_cuts()
+ logging.info("Using the XL subset of GigaSpeech (10k hours)")
+ train_giga_cuts = gigaspeech.train_XL_cuts()
else:
logging.info("Using the S subset of GigaSpeech (250 hours)")
train_giga_cuts = gigaspeech.train_S_cuts()
train_giga_cuts = filter_short_and_long_utterances(train_giga_cuts)
+ train_giga_cuts = train_giga_cuts.repeat(times=None)
if args.enable_musan:
cuts_musan = load_manifest(
- Path(args.manifest_dir) / "cuts_musan.json.gz"
+ Path(args.manifest_dir) / "musan_cuts.jsonl.gz"
)
else:
cuts_musan = None
@@ -786,14 +785,12 @@ def run(rank, world_size, args):
train_dl = asr_datamodule.train_dataloaders(
train_cuts,
- dynamic_bucketing=False,
on_the_fly_feats=False,
cuts_musan=cuts_musan,
)
giga_train_dl = asr_datamodule.train_dataloaders(
train_giga_cuts,
- dynamic_bucketing=True,
on_the_fly_feats=True,
cuts_musan=cuts_musan,
)
diff --git a/egs/tedlium3/ASR/local/compute_fbank_tedlium.py b/egs/tedlium3/ASR/local/compute_fbank_tedlium.py
index 14200f34f..e324b5025 100755
--- a/egs/tedlium3/ASR/local/compute_fbank_tedlium.py
+++ b/egs/tedlium3/ASR/local/compute_fbank_tedlium.py
@@ -27,7 +27,7 @@ import os
from pathlib import Path
import torch
-from lhotse import ChunkedLilcomHdf5Writer, CutSet, Fbank, FbankConfig
+from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter
from lhotse.recipes.utils import read_manifests_if_cached
from icefall.utils import get_executor
@@ -52,8 +52,13 @@ def compute_fbank_tedlium():
"test",
)
+ prefix = "tedlium"
+ suffix = "jsonl.gz"
manifests = read_manifests_if_cached(
- prefix="tedlium", dataset_parts=dataset_parts, output_dir=src_dir
+ dataset_parts=dataset_parts,
+ output_dir=src_dir,
+ prefix=prefix,
+ suffix=suffix,
)
assert manifests is not None
@@ -61,7 +66,7 @@ def compute_fbank_tedlium():
with get_executor() as ex: # Initialize the executor only once.
for partition, m in manifests.items():
- if (output_dir / f"cuts_{partition}.json.gz").is_file():
+ if (output_dir / f"{prefix}_cuts_{partition}.{suffix}").is_file():
logging.info(f"{partition} already exists - skipping.")
continue
logging.info(f"Processing {partition}")
@@ -80,15 +85,15 @@ def compute_fbank_tedlium():
cut_set = cut_set.compute_and_store_features(
extractor=extractor,
- storage_path=f"{output_dir}/feats_{partition}",
+ storage_path=f"{output_dir}/{prefix}_feats_{partition}",
# when an executor is specified, make more partitions
num_jobs=cur_num_jobs,
executor=ex,
- storage_type=ChunkedLilcomHdf5Writer,
+ storage_type=LilcomChunkyWriter,
)
# Split long cuts into many short and un-overlapping cuts
cut_set = cut_set.trim_to_supervisions(keep_overlapping=False)
- cut_set.to_json(output_dir / f"cuts_{partition}.json.gz")
+ cut_set.to_file(output_dir / f"{prefix}_cuts_{partition}.{suffix}")
if __name__ == "__main__":
diff --git a/egs/tedlium3/ASR/local/display_manifest_statistics.py b/egs/tedlium3/ASR/local/display_manifest_statistics.py
index 972d03b12..52e152389 100755
--- a/egs/tedlium3/ASR/local/display_manifest_statistics.py
+++ b/egs/tedlium3/ASR/local/display_manifest_statistics.py
@@ -27,15 +27,15 @@ for usage.
"""
-from lhotse import load_manifest
+from lhotse import load_manifest_lazy
def main():
- path = "./data/fbank/cuts_train.json.gz"
- path = "./data/fbank/cuts_dev.json.gz"
- path = "./data/fbank/cuts_test.json.gz"
+ path = "./data/fbank/tedlium_cuts_train.jsonl.gz"
+ path = "./data/fbank/tedlium_cuts_dev.jsonl.gz"
+ path = "./data/fbank/tedlium_cuts_test.jsonl.gz"
- cuts = load_manifest(path)
+ cuts = load_manifest_lazy(path)
cuts.describe()
diff --git a/egs/tedlium3/ASR/transducer_stateless/asr_datamodule.py b/egs/tedlium3/ASR/transducer_stateless/asr_datamodule.py
index a6b986a94..51de46ae8 100644
--- a/egs/tedlium3/ASR/transducer_stateless/asr_datamodule.py
+++ b/egs/tedlium3/ASR/transducer_stateless/asr_datamodule.py
@@ -22,11 +22,11 @@ import logging
from functools import lru_cache
from pathlib import Path
-from lhotse import CutSet, Fbank, FbankConfig, load_manifest
+from lhotse import CutSet, Fbank, FbankConfig, load_manifest, load_manifest_lazy
from lhotse.dataset import (
- BucketingSampler,
CutConcatenate,
CutMix,
+ DynamicBucketingSampler,
K2SpeechRecognitionDataset,
PrecomputedFeatures,
SingleCutSampler,
@@ -92,7 +92,7 @@ class TedLiumAsrDataModule:
"--num-buckets",
type=int,
default=30,
- help="The number of buckets for the BucketingSampler"
+ help="The number of buckets for the DynamicBucketingSampler"
"(you might want to increase it for larger datasets).",
)
group.add_argument(
@@ -180,7 +180,7 @@ class TedLiumAsrDataModule:
if self.args.enable_musan:
logging.info("Enable MUSAN")
cuts_musan = load_manifest(
- self.args.manifest_dir / "cuts_musan.json.gz"
+ self.args.manifest_dir / "musan_cuts.jsonl.gz"
)
transforms.append(
CutMix(
@@ -261,13 +261,12 @@ class TedLiumAsrDataModule:
)
if self.args.bucketing_sampler:
- logging.info("Using BucketingSampler.")
- train_sampler = BucketingSampler(
+ logging.info("Using DynamicBucketingSampler.")
+ train_sampler = DynamicBucketingSampler(
cuts_train,
max_duration=self.args.max_duration,
shuffle=self.args.shuffle,
num_buckets=self.args.num_buckets,
- bucket_method="equal_duration",
drop_last=True,
)
else:
@@ -311,7 +310,7 @@ class TedLiumAsrDataModule:
cut_transforms=transforms,
return_cuts=self.args.return_cuts,
)
- valid_sampler = BucketingSampler(
+ valid_sampler = DynamicBucketingSampler(
cuts_valid,
max_duration=self.args.max_duration,
shuffle=False,
@@ -335,8 +334,10 @@ class TedLiumAsrDataModule:
else PrecomputedFeatures(),
return_cuts=self.args.return_cuts,
)
- sampler = BucketingSampler(
- cuts, max_duration=self.args.max_duration, shuffle=False
+ sampler = DynamicBucketingSampler(
+ cuts,
+ max_duration=self.args.max_duration,
+ shuffle=False,
)
logging.debug("About to create test dataloader")
test_dl = DataLoader(
@@ -350,14 +351,20 @@ class TedLiumAsrDataModule:
@lru_cache()
def train_cuts(self) -> CutSet:
logging.info("About to get train cuts")
- return load_manifest(self.args.manifest_dir / "cuts_train.json.gz")
+ return load_manifest_lazy(
+ self.args.manifest_dir / "tedlium_cuts_train.jsonl.gz"
+ )
@lru_cache()
def dev_cuts(self) -> CutSet:
logging.info("About to get dev cuts")
- return load_manifest(self.args.manifest_dir / "cuts_dev.json.gz")
+ return load_manifest_lazy(
+ self.args.manifest_dir / "tedlium_cuts_dev.jsonl.gz"
+ )
@lru_cache()
def test_cuts(self) -> CutSet:
logging.info("About to get test cuts")
- return load_manifest(self.args.manifest_dir / "cuts_test.json.gz")
+ return load_manifest_lazy(
+ self.args.manifest_dir / "tedlium_cuts_test.jsonl.gz"
+ )
diff --git a/egs/timit/ASR/local/compute_fbank_timit.py b/egs/timit/ASR/local/compute_fbank_timit.py
index 8e3cbac4e..094769c8c 100644
--- a/egs/timit/ASR/local/compute_fbank_timit.py
+++ b/egs/timit/ASR/local/compute_fbank_timit.py
@@ -29,7 +29,7 @@ import os
from pathlib import Path
import torch
-from lhotse import CutSet, Fbank, FbankConfig, LilcomHdf5Writer
+from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter
from lhotse.recipes.utils import read_manifests_if_cached
from icefall.utils import get_executor
@@ -53,8 +53,13 @@ def compute_fbank_timit():
"DEV",
"TEST",
)
+ prefix = "timit"
+ suffix = "jsonl.gz"
manifests = read_manifests_if_cached(
- prefix="timit", dataset_parts=dataset_parts, output_dir=src_dir
+ dataset_parts=dataset_parts,
+ output_dir=src_dir,
+ prefix=prefix,
+ suffix=suffix,
)
assert manifests is not None
@@ -62,7 +67,8 @@ def compute_fbank_timit():
with get_executor() as ex: # Initialize the executor only once.
for partition, m in manifests.items():
- if (output_dir / f"cuts_{partition}.json.gz").is_file():
+ cuts_file = output_dir / f"{prefix}_cuts_{partition}.{suffix}"
+ if cuts_file.is_file():
logging.info(f"{partition} already exists - skipping.")
continue
logging.info(f"Processing {partition}")
@@ -78,13 +84,13 @@ def compute_fbank_timit():
)
cut_set = cut_set.compute_and_store_features(
extractor=extractor,
- storage_path=f"{output_dir}/feats_{partition}",
+ storage_path=f"{output_dir}/{prefix}_feats_{partition}",
# when an executor is specified, make more partitions
num_jobs=num_jobs if ex is None else 80,
executor=ex,
- storage_type=LilcomHdf5Writer,
+ storage_type=LilcomChunkyWriter,
)
- cut_set.to_json(output_dir / f"cuts_{partition}.json.gz")
+ cut_set.to_file(cuts_file)
if __name__ == "__main__":
diff --git a/egs/timit/ASR/tdnn_lstm_ctc/asr_datamodule.py b/egs/timit/ASR/tdnn_lstm_ctc/asr_datamodule.py
index a7029f514..5e2923fb6 100644
--- a/egs/timit/ASR/tdnn_lstm_ctc/asr_datamodule.py
+++ b/egs/timit/ASR/tdnn_lstm_ctc/asr_datamodule.py
@@ -23,11 +23,11 @@ from functools import lru_cache
from pathlib import Path
from typing import List, Union
-from lhotse import CutSet, Fbank, FbankConfig, load_manifest
+from lhotse import CutSet, Fbank, FbankConfig, load_manifest, load_manifest_lazy
from lhotse.dataset import (
- BucketingSampler,
CutConcatenate,
CutMix,
+ DynamicBucketingSampler,
K2SpeechRecognitionDataset,
PrecomputedFeatures,
SingleCutSampler,
@@ -92,7 +92,7 @@ class TimitAsrDataModule(DataModule):
"--num-buckets",
type=int,
default=30,
- help="The number of buckets for the BucketingSampler"
+ help="The number of buckets for the DynamicBucketingSampler"
"(you might want to increase it for larger datasets).",
)
group.add_argument(
@@ -154,7 +154,9 @@ class TimitAsrDataModule(DataModule):
cuts_train = self.train_cuts()
logging.info("About to get Musan cuts")
- cuts_musan = load_manifest(self.args.feature_dir / "cuts_musan.json.gz")
+ cuts_musan = load_manifest(
+ self.args.feature_dir / "cuts_musan.jsonl.gz"
+ )
logging.info("About to create train dataset")
transforms = [CutMix(cuts=cuts_musan, prob=0.5, snr=(10, 20))]
@@ -218,13 +220,12 @@ class TimitAsrDataModule(DataModule):
)
if self.args.bucketing_sampler:
- logging.info("Using BucketingSampler.")
- train_sampler = BucketingSampler(
+ logging.info("Using DynamicBucketingSampler.")
+ train_sampler = DynamicBucketingSampler(
cuts_train,
max_duration=self.args.max_duration,
shuffle=self.args.shuffle,
num_buckets=self.args.num_buckets,
- bucket_method="equal_duration",
drop_last=True,
)
else:
@@ -322,20 +323,26 @@ class TimitAsrDataModule(DataModule):
@lru_cache()
def train_cuts(self) -> CutSet:
logging.info("About to get train cuts")
- cuts_train = load_manifest(self.args.feature_dir / "cuts_TRAIN.json.gz")
+ cuts_train = load_manifest_lazy(
+ self.args.feature_dir / "timit_cuts_TRAIN.jsonl.gz"
+ )
return cuts_train
@lru_cache()
def valid_cuts(self) -> CutSet:
logging.info("About to get dev cuts")
- cuts_valid = load_manifest(self.args.feature_dir / "cuts_DEV.json.gz")
+ cuts_valid = load_manifest_lazy(
+ self.args.feature_dir / "timit_cuts_DEV.jsonl.gz"
+ )
return cuts_valid
@lru_cache()
def test_cuts(self) -> CutSet:
logging.debug("About to get test cuts")
- cuts_test = load_manifest(self.args.feature_dir / "cuts_TEST.json.gz")
+ cuts_test = load_manifest_lazy(
+ self.args.feature_dir / "timit_cuts_TEST.jsonl.gz"
+ )
return cuts_test
diff --git a/egs/wenetspeech/ASR/local/compute_fbank_wenetspeech_splits.py b/egs/wenetspeech/ASR/local/compute_fbank_wenetspeech_splits.py
index a828bead9..4622bdb55 100755
--- a/egs/wenetspeech/ASR/local/compute_fbank_wenetspeech_splits.py
+++ b/egs/wenetspeech/ASR/local/compute_fbank_wenetspeech_splits.py
@@ -23,10 +23,10 @@ from pathlib import Path
import torch
from lhotse import (
- ChunkedLilcomHdf5Writer,
CutSet,
KaldifeatFbank,
KaldifeatFbankConfig,
+ LilcomChunkyWriter,
set_audio_duration_mismatch_tolerance,
set_caching_enabled,
)
@@ -135,7 +135,7 @@ def compute_fbank_wenetspeech_splits(args):
storage_path=f"{output_dir}/feats_{subset}_{idx}",
num_workers=args.num_workers,
batch_duration=args.batch_duration,
- storage_type=ChunkedLilcomHdf5Writer,
+ storage_type=LilcomChunkyWriter,
)
logging.info("About to split cuts into smaller chunks.")
diff --git a/egs/wenetspeech/ASR/local/display_manifest_statistics.py b/egs/wenetspeech/ASR/local/display_manifest_statistics.py
index 30dc5a5ec..c41445b8d 100644
--- a/egs/wenetspeech/ASR/local/display_manifest_statistics.py
+++ b/egs/wenetspeech/ASR/local/display_manifest_statistics.py
@@ -26,7 +26,7 @@ for usage.
"""
-from lhotse import load_manifest
+from lhotse import load_manifest_lazy
def main():
@@ -40,7 +40,7 @@ def main():
for path in paths:
print(f"Starting display the statistics for {path}")
- cuts = load_manifest(path)
+ cuts = load_manifest_lazy(path)
cuts.describe()
diff --git a/egs/wenetspeech/ASR/local/text2segments.py b/egs/wenetspeech/ASR/local/text2segments.py
index acf6f9698..3df727c67 100644
--- a/egs/wenetspeech/ASR/local/text2segments.py
+++ b/egs/wenetspeech/ASR/local/text2segments.py
@@ -61,8 +61,8 @@ def main():
parser = get_parser()
args = parser.parse_args()
- input_file = args.input
- output_file = args.output
+ input_file = args.input_file
+ output_file = args.output_file
f = open(input_file, "r", encoding="utf-8")
lines = f.readlines()
diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless2/asr_datamodule.py b/egs/wenetspeech/ASR/pruned_transducer_stateless2/asr_datamodule.py
index d2f8d85ce..200a694d6 100644
--- a/egs/wenetspeech/ASR/pruned_transducer_stateless2/asr_datamodule.py
+++ b/egs/wenetspeech/ASR/pruned_transducer_stateless2/asr_datamodule.py
@@ -28,6 +28,7 @@ from lhotse import (
Fbank,
FbankConfig,
load_manifest,
+ load_manifest_lazy,
set_caching_enabled,
)
from lhotse.dataset import (
@@ -219,7 +220,7 @@ class WenetSpeechAsrDataModule:
"""
logging.info("About to get Musan cuts")
cuts_musan = load_manifest(
- self.args.manifest_dir / "cuts_musan.json.gz"
+ self.args.manifest_dir / "musan_cuts.jsonl.gz"
)
transforms = []
@@ -435,16 +436,18 @@ class WenetSpeechAsrDataModule:
@lru_cache()
def valid_cuts(self) -> CutSet:
logging.info("About to get dev cuts")
- return load_manifest(self.args.manifest_dir / "cuts_DEV.jsonl.gz")
+ return load_manifest_lazy(self.args.manifest_dir / "cuts_DEV.jsonl.gz")
@lru_cache()
def test_net_cuts(self) -> List[CutSet]:
logging.info("About to get TEST_NET cuts")
- return load_manifest(self.args.manifest_dir / "cuts_TEST_NET.jsonl.gz")
+ return load_manifest_lazy(
+ self.args.manifest_dir / "cuts_TEST_NET.jsonl.gz"
+ )
@lru_cache()
def test_meeting_cuts(self) -> List[CutSet]:
logging.info("About to get TEST_MEETING cuts")
- return load_manifest(
+ return load_manifest_lazy(
self.args.manifest_dir / "cuts_TEST_MEETING.jsonl.gz"
)
diff --git a/egs/wenetspeech/ASR/pruned_transducer_stateless2/decode.py b/egs/wenetspeech/ASR/pruned_transducer_stateless2/decode.py
index be3d01f6a..41e7a0f44 100755
--- a/egs/wenetspeech/ASR/pruned_transducer_stateless2/decode.py
+++ b/egs/wenetspeech/ASR/pruned_transducer_stateless2/decode.py
@@ -63,7 +63,7 @@ import torch.nn as nn
from asr_datamodule import WenetSpeechAsrDataModule
from beam_search import (
beam_search,
- fast_beam_search,
+ fast_beam_search_one_best,
greedy_search,
greedy_search_batch,
modified_beam_search,
@@ -256,7 +256,7 @@ def decode_one_batch(
hyps = []
if params.decoding_method == "fast_beam_search":
- hyp_tokens = fast_beam_search(
+ hyp_tokens = fast_beam_search_one_best(
model=model,
decoding_graph=decoding_graph,
encoder_out=encoder_out,
@@ -274,6 +274,7 @@ def decode_one_batch(
hyp_tokens = greedy_search_batch(
model=model,
encoder_out=encoder_out,
+ encoder_out_lens=encoder_out_lens,
)
for i in range(encoder_out.size(0)):
hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]])
@@ -282,6 +283,7 @@ def decode_one_batch(
model=model,
encoder_out=encoder_out,
beam=params.beam_size,
+ encoder_out_lens=encoder_out_lens,
)
for i in range(encoder_out.size(0)):
hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]])
diff --git a/egs/yesno/ASR/local/compute_fbank_yesno.py b/egs/yesno/ASR/local/compute_fbank_yesno.py
index 6922ffe10..fb48b6f8e 100755
--- a/egs/yesno/ASR/local/compute_fbank_yesno.py
+++ b/egs/yesno/ASR/local/compute_fbank_yesno.py
@@ -12,7 +12,7 @@ import os
from pathlib import Path
import torch
-from lhotse import CutSet, Fbank, FbankConfig, LilcomHdf5Writer
+from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter
from lhotse.recipes.utils import read_manifests_if_cached
from icefall.utils import get_executor
@@ -37,10 +37,13 @@ def compute_fbank_yesno():
"train",
"test",
)
+ prefix = "yesno"
+ suffix = "jsonl.gz"
manifests = read_manifests_if_cached(
dataset_parts=dataset_parts,
output_dir=src_dir,
- prefix="yesno",
+ prefix=prefix,
+ suffix=suffix,
)
assert manifests is not None
@@ -50,7 +53,8 @@ def compute_fbank_yesno():
with get_executor() as ex: # Initialize the executor only once.
for partition, m in manifests.items():
- if (output_dir / f"cuts_{partition}.json.gz").is_file():
+ cuts_file = output_dir / f"{prefix}_cuts_{partition}.{suffix}"
+ if cuts_file.is_file():
logging.info(f"{partition} already exists - skipping.")
continue
logging.info(f"Processing {partition}")
@@ -66,13 +70,13 @@ def compute_fbank_yesno():
)
cut_set = cut_set.compute_and_store_features(
extractor=extractor,
- storage_path=f"{output_dir}/feats_{partition}",
+ storage_path=f"{output_dir}/{prefix}_feats_{partition}",
# when an executor is specified, make more partitions
num_jobs=num_jobs if ex is None else 1, # use one job
executor=ex,
- storage_type=LilcomHdf5Writer,
+ storage_type=LilcomChunkyWriter,
)
- cut_set.to_json(output_dir / f"cuts_{partition}.json.gz")
+ cut_set.to_file(cuts_file)
if __name__ == "__main__":
diff --git a/egs/yesno/ASR/tdnn/asr_datamodule.py b/egs/yesno/ASR/tdnn/asr_datamodule.py
index 0a5a42089..85e5f1358 100644
--- a/egs/yesno/ASR/tdnn/asr_datamodule.py
+++ b/egs/yesno/ASR/tdnn/asr_datamodule.py
@@ -20,18 +20,19 @@ from functools import lru_cache
from pathlib import Path
from typing import List
+from lhotse import CutSet, Fbank, FbankConfig, load_manifest_lazy
+from lhotse.dataset import (
+ CutConcatenate,
+ DynamicBucketingSampler,
+ K2SpeechRecognitionDataset,
+ PrecomputedFeatures,
+ SingleCutSampler,
+)
+from lhotse.dataset.input_strategies import OnTheFlyFeatures
from torch.utils.data import DataLoader
from icefall.dataset.datamodule import DataModule
from icefall.utils import str2bool
-from lhotse import CutSet, Fbank, FbankConfig, load_manifest
-from lhotse.dataset import (
- BucketingSampler,
- CutConcatenate,
- K2SpeechRecognitionDataset,
- PrecomputedFeatures,
-)
-from lhotse.dataset.input_strategies import OnTheFlyFeatures
class YesNoAsrDataModule(DataModule):
@@ -84,7 +85,7 @@ class YesNoAsrDataModule(DataModule):
"--num-buckets",
type=int,
default=10,
- help="The number of buckets for the BucketingSampler"
+ help="The number of buckets for the DynamicBucketingSampler"
"(you might want to increase it for larger datasets).",
)
group.add_argument(
@@ -186,18 +187,17 @@ class YesNoAsrDataModule(DataModule):
)
if self.args.bucketing_sampler:
- logging.info("Using BucketingSampler.")
- train_sampler = BucketingSampler(
+ logging.info("Using DynamicBucketingSampler.")
+ train_sampler = DynamicBucketingSampler(
cuts_train,
max_duration=self.args.max_duration,
shuffle=self.args.shuffle,
num_buckets=self.args.num_buckets,
- bucket_method="equal_duration",
drop_last=True,
)
else:
logging.info("Using SingleCutSampler.")
- train_sampler = BucketingSampler(
+ train_sampler = SingleCutSampler(
cuts_train,
max_duration=self.args.max_duration,
shuffle=self.args.shuffle,
@@ -225,8 +225,10 @@ class YesNoAsrDataModule(DataModule):
else PrecomputedFeatures(),
return_cuts=self.args.return_cuts,
)
- sampler = BucketingSampler(
- cuts_test, max_duration=self.args.max_duration, shuffle=False
+ sampler = DynamicBucketingSampler(
+ cuts_test,
+ max_duration=self.args.max_duration,
+ shuffle=False,
)
logging.debug("About to create test dataloader")
test_dl = DataLoader(
@@ -240,11 +242,15 @@ class YesNoAsrDataModule(DataModule):
@lru_cache()
def train_cuts(self) -> CutSet:
logging.info("About to get train cuts")
- cuts_train = load_manifest(self.args.feature_dir / "cuts_train.json.gz")
+ cuts_train = load_manifest_lazy(
+ self.args.feature_dir / "yesno_cuts_train.jsonl.gz"
+ )
return cuts_train
@lru_cache()
def test_cuts(self) -> List[CutSet]:
logging.info("About to get test cuts")
- cuts_test = load_manifest(self.args.feature_dir / "cuts_test.json.gz")
+ cuts_test = load_manifest_lazy(
+ self.args.feature_dir / "yesno_cuts_test.jsonl.gz"
+ )
return cuts_test
diff --git a/icefall/utils.py b/icefall/utils.py
index daccd4346..b38574f0c 100644
--- a/icefall/utils.py
+++ b/icefall/utils.py
@@ -127,7 +127,10 @@ def setup_logger(
level = logging.CRITICAL
logging.basicConfig(
- filename=log_filename, format=formatter, level=level, filemode="w"
+ filename=log_filename,
+ format=formatter,
+ level=level,
+ filemode="w",
)
if use_console:
console = logging.StreamHandler()