mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-09-18 21:44:18 +00:00
Merge remote-tracking branch 'k2-fsa/master' into emformer_conv_scale_new
This commit is contained in:
commit
eadd0f6aa7
@ -59,6 +59,8 @@ jobs:
|
|||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: |
|
run: |
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
||||||
|
pip uninstall -y protobuf
|
||||||
|
pip install --no-binary protobuf protobuf
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
- name: Cache kaldifeat
|
||||||
id: my-cache
|
id: my-cache
|
||||||
|
@ -59,6 +59,8 @@ jobs:
|
|||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: |
|
run: |
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
||||||
|
pip uninstall -y protobuf
|
||||||
|
pip install --no-binary protobuf protobuf
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
- name: Cache kaldifeat
|
||||||
id: my-cache
|
id: my-cache
|
||||||
@ -99,7 +101,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
~/tmp/fbank-libri
|
~/tmp/fbank-libri
|
||||||
key: cache-libri-fbank-test-clean-and-test-other
|
key: cache-libri-fbank-test-clean-and-test-other-v2
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
- name: Compute fbank for LibriSpeech test-clean and test-other
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
||||||
|
@ -59,6 +59,8 @@ jobs:
|
|||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: |
|
run: |
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
||||||
|
pip uninstall -y protobuf
|
||||||
|
pip install --no-binary protobuf protobuf
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
- name: Cache kaldifeat
|
||||||
id: my-cache
|
id: my-cache
|
||||||
@ -99,7 +101,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
~/tmp/fbank-libri
|
~/tmp/fbank-libri
|
||||||
key: cache-libri-fbank-test-clean-and-test-other
|
key: cache-libri-fbank-test-clean-and-test-other-v2
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
- name: Compute fbank for LibriSpeech test-clean and test-other
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
||||||
|
@ -59,6 +59,8 @@ jobs:
|
|||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: |
|
run: |
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
||||||
|
pip uninstall -y protobuf
|
||||||
|
pip install --no-binary protobuf protobuf
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
- name: Cache kaldifeat
|
||||||
id: my-cache
|
id: my-cache
|
||||||
@ -99,7 +101,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
~/tmp/fbank-libri
|
~/tmp/fbank-libri
|
||||||
key: cache-libri-fbank-test-clean-and-test-other
|
key: cache-libri-fbank-test-clean-and-test-other-v2
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
- name: Compute fbank for LibriSpeech test-clean and test-other
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
||||||
|
@ -59,6 +59,8 @@ jobs:
|
|||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: |
|
run: |
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
||||||
|
pip uninstall -y protobuf
|
||||||
|
pip install --no-binary protobuf protobuf
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
- name: Cache kaldifeat
|
||||||
id: my-cache
|
id: my-cache
|
||||||
@ -99,7 +101,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
~/tmp/fbank-libri
|
~/tmp/fbank-libri
|
||||||
key: cache-libri-fbank-test-clean-and-test-other
|
key: cache-libri-fbank-test-clean-and-test-other-v2
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
- name: Compute fbank for LibriSpeech test-clean and test-other
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
||||||
|
@ -59,6 +59,8 @@ jobs:
|
|||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: |
|
run: |
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
||||||
|
pip uninstall -y protobuf
|
||||||
|
pip install --no-binary protobuf protobuf
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
- name: Cache kaldifeat
|
||||||
id: my-cache
|
id: my-cache
|
||||||
@ -99,7 +101,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
~/tmp/fbank-libri
|
~/tmp/fbank-libri
|
||||||
key: cache-libri-fbank-test-clean-and-test-other
|
key: cache-libri-fbank-test-clean-and-test-other-v2
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
- name: Compute fbank for LibriSpeech test-clean and test-other
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
||||||
|
@ -49,6 +49,8 @@ jobs:
|
|||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: |
|
run: |
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
||||||
|
pip uninstall -y protobuf
|
||||||
|
pip install --no-binary protobuf protobuf
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
- name: Cache kaldifeat
|
||||||
id: my-cache
|
id: my-cache
|
||||||
|
@ -58,6 +58,8 @@ jobs:
|
|||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: |
|
run: |
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
||||||
|
pip uninstall -y protobuf
|
||||||
|
pip install --no-binary protobuf protobuf
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
- name: Cache kaldifeat
|
||||||
id: my-cache
|
id: my-cache
|
||||||
@ -98,7 +100,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
~/tmp/fbank-libri
|
~/tmp/fbank-libri
|
||||||
key: cache-libri-fbank-test-clean-and-test-other
|
key: cache-libri-fbank-test-clean-and-test-other-v2
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
- name: Compute fbank for LibriSpeech test-clean and test-other
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
||||||
|
@ -58,6 +58,8 @@ jobs:
|
|||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: |
|
run: |
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
||||||
|
pip uninstall -y protobuf
|
||||||
|
pip install --no-binary protobuf protobuf
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
- name: Cache kaldifeat
|
||||||
id: my-cache
|
id: my-cache
|
||||||
@ -98,7 +100,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
~/tmp/fbank-libri
|
~/tmp/fbank-libri
|
||||||
key: cache-libri-fbank-test-clean-and-test-other
|
key: cache-libri-fbank-test-clean-and-test-other-v2
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
- name: Compute fbank for LibriSpeech test-clean and test-other
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
||||||
|
@ -49,6 +49,8 @@ jobs:
|
|||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: |
|
run: |
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
||||||
|
pip uninstall -y protobuf
|
||||||
|
pip install --no-binary protobuf protobuf
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
- name: Cache kaldifeat
|
||||||
id: my-cache
|
id: my-cache
|
||||||
|
@ -49,6 +49,8 @@ jobs:
|
|||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: |
|
run: |
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
||||||
|
pip uninstall -y protobuf
|
||||||
|
pip install --no-binary protobuf protobuf
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
- name: Cache kaldifeat
|
||||||
id: my-cache
|
id: my-cache
|
||||||
|
@ -58,6 +58,8 @@ jobs:
|
|||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: |
|
run: |
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
||||||
|
pip uninstall -y protobuf
|
||||||
|
pip install --no-binary protobuf protobuf
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
- name: Cache kaldifeat
|
||||||
id: my-cache
|
id: my-cache
|
||||||
@ -98,7 +100,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
~/tmp/fbank-libri
|
~/tmp/fbank-libri
|
||||||
key: cache-libri-fbank-test-clean-and-test-other
|
key: cache-libri-fbank-test-clean-and-test-other-v2
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
- name: Compute fbank for LibriSpeech test-clean and test-other
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
||||||
|
@ -49,6 +49,8 @@ jobs:
|
|||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: |
|
run: |
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
||||||
|
pip uninstall -y protobuf
|
||||||
|
pip install --no-binary protobuf protobuf
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
- name: Cache kaldifeat
|
||||||
id: my-cache
|
id: my-cache
|
||||||
|
2
.github/workflows/run-yesno-recipe.yml
vendored
2
.github/workflows/run-yesno-recipe.yml
vendored
@ -62,6 +62,8 @@ jobs:
|
|||||||
- name: Install Python dependencies
|
- name: Install Python dependencies
|
||||||
run: |
|
run: |
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
||||||
|
pip uninstall -y protobuf
|
||||||
|
pip install --no-binary protobuf protobuf
|
||||||
|
|
||||||
- name: Run yesno recipe
|
- name: Run yesno recipe
|
||||||
shell: bash
|
shell: bash
|
||||||
|
3
.github/workflows/test.yml
vendored
3
.github/workflows/test.yml
vendored
@ -76,6 +76,9 @@ jobs:
|
|||||||
pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.org/nightly/
|
pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.org/nightly/
|
||||||
pip install git+https://github.com/lhotse-speech/lhotse
|
pip install git+https://github.com/lhotse-speech/lhotse
|
||||||
# icefall requirements
|
# icefall requirements
|
||||||
|
pip uninstall -y protobuf
|
||||||
|
pip install --no-binary protobuf protobuf
|
||||||
|
|
||||||
pip install -r requirements.txt
|
pip install -r requirements.txt
|
||||||
|
|
||||||
- name: Install graphviz
|
- name: Install graphviz
|
||||||
|
30
README.md
30
README.md
@ -2,6 +2,14 @@
|
|||||||
<img src="https://raw.githubusercontent.com/k2-fsa/icefall/master/docs/source/_static/logo.png" width=168>
|
<img src="https://raw.githubusercontent.com/k2-fsa/icefall/master/docs/source/_static/logo.png" width=168>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
icefall contains ASR recipes for various datasets
|
||||||
|
using <https://github.com/k2-fsa/k2>.
|
||||||
|
|
||||||
|
You can use <https://github.com/k2-fsa/sherpa> to deploy models
|
||||||
|
trained with icefall.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
Please refer to <https://icefall.readthedocs.io/en/latest/installation/index.html>
|
Please refer to <https://icefall.readthedocs.io/en/latest/installation/index.html>
|
||||||
@ -12,7 +20,7 @@ for installation.
|
|||||||
Please refer to <https://icefall.readthedocs.io/en/latest/recipes/index.html>
|
Please refer to <https://icefall.readthedocs.io/en/latest/recipes/index.html>
|
||||||
for more information.
|
for more information.
|
||||||
|
|
||||||
We provide 6 recipes at present:
|
We provide the following recipes:
|
||||||
|
|
||||||
- [yesno][yesno]
|
- [yesno][yesno]
|
||||||
- [LibriSpeech][librispeech]
|
- [LibriSpeech][librispeech]
|
||||||
@ -22,6 +30,7 @@ We provide 6 recipes at present:
|
|||||||
- [GigaSpeech][gigaspeech]
|
- [GigaSpeech][gigaspeech]
|
||||||
- [Aidatatang_200zh][aidatatang_200zh]
|
- [Aidatatang_200zh][aidatatang_200zh]
|
||||||
- [WenetSpeech][wenetspeech]
|
- [WenetSpeech][wenetspeech]
|
||||||
|
- [Alimeeting][alimeeting]
|
||||||
|
|
||||||
### yesno
|
### yesno
|
||||||
|
|
||||||
@ -126,7 +135,7 @@ The best CER we currently have is:
|
|||||||
| CER | 4.26 |
|
| CER | 4.26 |
|
||||||
|
|
||||||
|
|
||||||
We provide a Colab notebook to run a pre-trained conformer CTC model: [](https://colab.research.google.com/drive/1WnG17io5HEZ0Gn_cnh_VzK5QYOoiiklC?usp=sharing)
|
We provide a Colab notebook to run a pre-trained conformer CTC model: [
|
||||||
|
|
||||||
#### Transducer Stateless Model
|
#### Transducer Stateless Model
|
||||||
|
|
||||||
@ -247,6 +256,20 @@ We provide one model for this recipe: [Pruned stateless RNN-T: Conformer encoder
|
|||||||
|
|
||||||
We provide a Colab notebook to run a pre-trained Pruned Transducer Stateless model: [](https://colab.research.google.com/drive/1EV4e1CHa1GZgEF-bZgizqI9RyFFehIiN?usp=sharing)
|
We provide a Colab notebook to run a pre-trained Pruned Transducer Stateless model: [](https://colab.research.google.com/drive/1EV4e1CHa1GZgEF-bZgizqI9RyFFehIiN?usp=sharing)
|
||||||
|
|
||||||
|
### Alimeeting
|
||||||
|
|
||||||
|
We provide one model for this recipe: [Pruned stateless RNN-T: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss][Alimeeting_pruned_transducer_stateless2].
|
||||||
|
|
||||||
|
#### Pruned stateless RNN-T: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss (trained with far subset)
|
||||||
|
|
||||||
|
| | Eval | Test-Net |
|
||||||
|
|----------------------|--------|----------|
|
||||||
|
| greedy search | 31.77 | 34.66 |
|
||||||
|
| fast beam search | 31.39 | 33.02 |
|
||||||
|
| modified beam search | 30.38 | 34.25 |
|
||||||
|
|
||||||
|
We provide a Colab notebook to run a pre-trained Pruned Transducer Stateless model: [](https://colab.research.google.com/drive/1tKr3f0mL17uO_ljdHGKtR7HOmthYHwJG?usp=sharing)
|
||||||
|
|
||||||
## Deployment with C++
|
## Deployment with C++
|
||||||
|
|
||||||
Once you have trained a model in icefall, you may want to deploy it with C++,
|
Once you have trained a model in icefall, you may want to deploy it with C++,
|
||||||
@ -274,6 +297,7 @@ Please see: [
|
||||||
|
@ -29,7 +29,7 @@ import os
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from lhotse import CutSet, Fbank, FbankConfig, LilcomHdf5Writer
|
from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter
|
||||||
from lhotse.recipes.utils import read_manifests_if_cached
|
from lhotse.recipes.utils import read_manifests_if_cached
|
||||||
|
|
||||||
from icefall.utils import get_executor
|
from icefall.utils import get_executor
|
||||||
@ -43,7 +43,7 @@ torch.set_num_interop_threads(1)
|
|||||||
|
|
||||||
|
|
||||||
def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80):
|
def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80):
|
||||||
src_dir = Path("data/manifests/aidatatang_200zh")
|
src_dir = Path("data/manifests")
|
||||||
output_dir = Path("data/fbank")
|
output_dir = Path("data/fbank")
|
||||||
num_jobs = min(15, os.cpu_count())
|
num_jobs = min(15, os.cpu_count())
|
||||||
|
|
||||||
@ -52,8 +52,13 @@ def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80):
|
|||||||
"dev",
|
"dev",
|
||||||
"test",
|
"test",
|
||||||
)
|
)
|
||||||
|
prefix = "aidatatang"
|
||||||
|
suffix = "jsonl.gz"
|
||||||
manifests = read_manifests_if_cached(
|
manifests = read_manifests_if_cached(
|
||||||
dataset_parts=dataset_parts, output_dir=src_dir
|
dataset_parts=dataset_parts,
|
||||||
|
output_dir=src_dir,
|
||||||
|
prefix=prefix,
|
||||||
|
suffix=suffix,
|
||||||
)
|
)
|
||||||
assert manifests is not None
|
assert manifests is not None
|
||||||
|
|
||||||
@ -61,10 +66,14 @@ def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80):
|
|||||||
|
|
||||||
with get_executor() as ex: # Initialize the executor only once.
|
with get_executor() as ex: # Initialize the executor only once.
|
||||||
for partition, m in manifests.items():
|
for partition, m in manifests.items():
|
||||||
if (output_dir / f"cuts_{partition}.json.gz").is_file():
|
if (output_dir / f"{prefix}_cuts_{partition}.{suffix}").is_file():
|
||||||
logging.info(f"{partition} already exists - skipping.")
|
logging.info(f"{partition} already exists - skipping.")
|
||||||
continue
|
continue
|
||||||
logging.info(f"Processing {partition}")
|
logging.info(f"Processing {partition}")
|
||||||
|
|
||||||
|
for sup in m["supervisions"]:
|
||||||
|
sup.custom = {"origin": "aidatatang_200zh"}
|
||||||
|
|
||||||
cut_set = CutSet.from_manifests(
|
cut_set = CutSet.from_manifests(
|
||||||
recordings=m["recordings"],
|
recordings=m["recordings"],
|
||||||
supervisions=m["supervisions"],
|
supervisions=m["supervisions"],
|
||||||
@ -77,13 +86,14 @@ def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80):
|
|||||||
)
|
)
|
||||||
cut_set = cut_set.compute_and_store_features(
|
cut_set = cut_set.compute_and_store_features(
|
||||||
extractor=extractor,
|
extractor=extractor,
|
||||||
storage_path=f"{output_dir}/feats_{partition}",
|
storage_path=f"{output_dir}/{prefix}_feats_{partition}",
|
||||||
# when an executor is specified, make more partitions
|
# when an executor is specified, make more partitions
|
||||||
num_jobs=num_jobs if ex is None else 80,
|
num_jobs=num_jobs if ex is None else 80,
|
||||||
executor=ex,
|
executor=ex,
|
||||||
storage_type=LilcomHdf5Writer,
|
storage_type=LilcomChunkyWriter,
|
||||||
)
|
)
|
||||||
cut_set.to_json(output_dir / f"cuts_{partition}.json.gz")
|
|
||||||
|
cut_set.to_file(output_dir / f"{prefix}_cuts_{partition}.{suffix}")
|
||||||
|
|
||||||
|
|
||||||
def get_args():
|
def get_args():
|
||||||
|
@ -25,19 +25,19 @@ for usage.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
from lhotse import load_manifest
|
from lhotse import load_manifest_lazy
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
paths = [
|
paths = [
|
||||||
"./data/fbank/cuts_train.json.gz",
|
"./data/fbank/aidatatang_cuts_train.jsonl.gz",
|
||||||
"./data/fbank/cuts_dev.json.gz",
|
"./data/fbank/aidatatang_cuts_dev.jsonl.gz",
|
||||||
"./data/fbank/cuts_test.json.gz",
|
"./data/fbank/aidatatang_cuts_test.jsonl.gz",
|
||||||
]
|
]
|
||||||
|
|
||||||
for path in paths:
|
for path in paths:
|
||||||
print(f"Starting display the statistics for {path}")
|
print(f"Starting display the statistics for {path}")
|
||||||
cuts = load_manifest(path)
|
cuts = load_manifest_lazy(path)
|
||||||
cuts.describe()
|
cuts.describe()
|
||||||
|
|
||||||
|
|
||||||
@ -45,7 +45,7 @@ if __name__ == "__main__":
|
|||||||
main()
|
main()
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Starting display the statistics for ./data/fbank/cuts_train.json.gz
|
Starting display the statistics for ./data/fbank/aidatatang_cuts_train.jsonl.gz
|
||||||
Cuts count: 494715
|
Cuts count: 494715
|
||||||
Total duration (hours): 422.6
|
Total duration (hours): 422.6
|
||||||
Speech duration (hours): 422.6 (100.0%)
|
Speech duration (hours): 422.6 (100.0%)
|
||||||
@ -61,7 +61,7 @@ min 1.0
|
|||||||
99.5% 8.0
|
99.5% 8.0
|
||||||
99.9% 9.5
|
99.9% 9.5
|
||||||
max 18.1
|
max 18.1
|
||||||
Starting display the statistics for ./data/fbank/cuts_dev.json.gz
|
Starting display the statistics for ./data/fbank/aidatatang_cuts_dev.jsonl.gz
|
||||||
Cuts count: 24216
|
Cuts count: 24216
|
||||||
Total duration (hours): 20.2
|
Total duration (hours): 20.2
|
||||||
Speech duration (hours): 20.2 (100.0%)
|
Speech duration (hours): 20.2 (100.0%)
|
||||||
@ -77,7 +77,7 @@ min 1.2
|
|||||||
99.5% 7.3
|
99.5% 7.3
|
||||||
99.9% 8.8
|
99.9% 8.8
|
||||||
max 11.3
|
max 11.3
|
||||||
Starting display the statistics for ./data/fbank/cuts_test.json.gz
|
Starting display the statistics for ./data/fbank/aidatatang_cuts_test.jsonl.gz
|
||||||
Cuts count: 48144
|
Cuts count: 48144
|
||||||
Total duration (hours): 40.2
|
Total duration (hours): 40.2
|
||||||
Speech duration (hours): 40.2 (100.0%)
|
Speech duration (hours): 40.2 (100.0%)
|
||||||
|
@ -28,10 +28,10 @@ from lhotse import (
|
|||||||
Fbank,
|
Fbank,
|
||||||
FbankConfig,
|
FbankConfig,
|
||||||
load_manifest,
|
load_manifest,
|
||||||
|
load_manifest_lazy,
|
||||||
set_caching_enabled,
|
set_caching_enabled,
|
||||||
)
|
)
|
||||||
from lhotse.dataset import (
|
from lhotse.dataset import (
|
||||||
BucketingSampler,
|
|
||||||
CutConcatenate,
|
CutConcatenate,
|
||||||
CutMix,
|
CutMix,
|
||||||
DynamicBucketingSampler,
|
DynamicBucketingSampler,
|
||||||
@ -206,7 +206,7 @@ class Aidatatang_200zhAsrDataModule:
|
|||||||
"""
|
"""
|
||||||
logging.info("About to get Musan cuts")
|
logging.info("About to get Musan cuts")
|
||||||
cuts_musan = load_manifest(
|
cuts_musan = load_manifest(
|
||||||
self.args.manifest_dir / "cuts_musan.json.gz"
|
self.args.manifest_dir / "musan_cuts.jsonl.gz"
|
||||||
)
|
)
|
||||||
|
|
||||||
transforms = []
|
transforms = []
|
||||||
@ -290,13 +290,12 @@ class Aidatatang_200zhAsrDataModule:
|
|||||||
)
|
)
|
||||||
|
|
||||||
if self.args.bucketing_sampler:
|
if self.args.bucketing_sampler:
|
||||||
logging.info("Using BucketingSampler.")
|
logging.info("Using DynamicBucketingSampler.")
|
||||||
train_sampler = BucketingSampler(
|
train_sampler = DynamicBucketingSampler(
|
||||||
cuts_train,
|
cuts_train,
|
||||||
max_duration=self.args.max_duration,
|
max_duration=self.args.max_duration,
|
||||||
shuffle=self.args.shuffle,
|
shuffle=self.args.shuffle,
|
||||||
num_buckets=self.args.num_buckets,
|
num_buckets=self.args.num_buckets,
|
||||||
bucket_method="equal_duration",
|
|
||||||
drop_last=True,
|
drop_last=True,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
@ -402,14 +401,20 @@ class Aidatatang_200zhAsrDataModule:
|
|||||||
@lru_cache()
|
@lru_cache()
|
||||||
def train_cuts(self) -> CutSet:
|
def train_cuts(self) -> CutSet:
|
||||||
logging.info("About to get train cuts")
|
logging.info("About to get train cuts")
|
||||||
return load_manifest(self.args.manifest_dir / "cuts_train.json.gz")
|
return load_manifest_lazy(
|
||||||
|
self.args.manifest_dir / "aidatatang_cuts_train.jsonl.gz"
|
||||||
|
)
|
||||||
|
|
||||||
@lru_cache()
|
@lru_cache()
|
||||||
def valid_cuts(self) -> CutSet:
|
def valid_cuts(self) -> CutSet:
|
||||||
logging.info("About to get dev cuts")
|
logging.info("About to get dev cuts")
|
||||||
return load_manifest(self.args.manifest_dir / "cuts_dev.json.gz")
|
return load_manifest_lazy(
|
||||||
|
self.args.manifest_dir / "aidatatang_cuts_dev.jsonl.gz"
|
||||||
|
)
|
||||||
|
|
||||||
@lru_cache()
|
@lru_cache()
|
||||||
def test_cuts(self) -> List[CutSet]:
|
def test_cuts(self) -> List[CutSet]:
|
||||||
logging.info("About to get test cuts")
|
logging.info("About to get test cuts")
|
||||||
return load_manifest(self.args.manifest_dir / "cuts_test.json.gz")
|
return load_manifest_lazy(
|
||||||
|
self.args.manifest_dir / "aidatatang_cuts_test.jsonl.gz"
|
||||||
|
)
|
||||||
|
@ -195,9 +195,9 @@ def get_params() -> AttributeDict:
|
|||||||
"best_train_epoch": -1,
|
"best_train_epoch": -1,
|
||||||
"best_valid_epoch": -1,
|
"best_valid_epoch": -1,
|
||||||
"batch_idx_train": 0,
|
"batch_idx_train": 0,
|
||||||
"log_interval": 10,
|
"log_interval": 50,
|
||||||
"reset_interval": 200,
|
"reset_interval": 200,
|
||||||
"valid_interval": 3000,
|
"valid_interval": 2000,
|
||||||
# parameters for k2.ctc_loss
|
# parameters for k2.ctc_loss
|
||||||
"beam_size": 10,
|
"beam_size": 10,
|
||||||
"reduction": "sum",
|
"reduction": "sum",
|
||||||
|
119
egs/aishell/ASR/local/compute_fbank_aidatatang_200zh.py
Executable file
119
egs/aishell/ASR/local/compute_fbank_aidatatang_200zh.py
Executable file
@ -0,0 +1,119 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
This file computes fbank features of the aidatatang_200zh dataset.
|
||||||
|
It looks for manifests in the directory data/manifests.
|
||||||
|
|
||||||
|
The generated fbank features are saved in data/fbank.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter
|
||||||
|
from lhotse.recipes.utils import read_manifests_if_cached
|
||||||
|
|
||||||
|
from icefall.utils import get_executor
|
||||||
|
|
||||||
|
# Torch's multithreaded behavior needs to be disabled or
|
||||||
|
# it wastes a lot of CPU and slow things down.
|
||||||
|
# Do this outside of main() in case it needs to take effect
|
||||||
|
# even when we are not invoking the main (e.g. when spawning subprocesses).
|
||||||
|
torch.set_num_threads(1)
|
||||||
|
torch.set_num_interop_threads(1)
|
||||||
|
|
||||||
|
|
||||||
|
def compute_fbank_aidatatang_200zh(num_mel_bins: int = 80):
|
||||||
|
src_dir = Path("data/manifests")
|
||||||
|
output_dir = Path("data/fbank")
|
||||||
|
num_jobs = min(15, os.cpu_count())
|
||||||
|
|
||||||
|
dataset_parts = (
|
||||||
|
"train",
|
||||||
|
"test",
|
||||||
|
"dev",
|
||||||
|
)
|
||||||
|
prefix = "aidatatang"
|
||||||
|
suffix = "jsonl.gz"
|
||||||
|
manifests = read_manifests_if_cached(
|
||||||
|
dataset_parts=dataset_parts,
|
||||||
|
output_dir=src_dir,
|
||||||
|
prefix=prefix,
|
||||||
|
suffix=suffix,
|
||||||
|
)
|
||||||
|
assert manifests is not None
|
||||||
|
|
||||||
|
extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins))
|
||||||
|
|
||||||
|
with get_executor() as ex: # Initialize the executor only once.
|
||||||
|
for partition, m in manifests.items():
|
||||||
|
if (output_dir / f"{prefix}_cuts_{partition}.{suffix}").is_file():
|
||||||
|
logging.info(f"{partition} already exists - skipping.")
|
||||||
|
continue
|
||||||
|
logging.info(f"Processing {partition}")
|
||||||
|
|
||||||
|
for sup in m["supervisions"]:
|
||||||
|
sup.custom = {"origin": "aidatatang_200zh"}
|
||||||
|
|
||||||
|
cut_set = CutSet.from_manifests(
|
||||||
|
recordings=m["recordings"],
|
||||||
|
supervisions=m["supervisions"],
|
||||||
|
)
|
||||||
|
if "train" in partition:
|
||||||
|
cut_set = (
|
||||||
|
cut_set
|
||||||
|
+ cut_set.perturb_speed(0.9)
|
||||||
|
+ cut_set.perturb_speed(1.1)
|
||||||
|
)
|
||||||
|
cut_set = cut_set.compute_and_store_features(
|
||||||
|
extractor=extractor,
|
||||||
|
storage_path=f"{output_dir}/{prefix}_feats_{partition}",
|
||||||
|
# when an executor is specified, make more partitions
|
||||||
|
num_jobs=num_jobs if ex is None else 80,
|
||||||
|
executor=ex,
|
||||||
|
storage_type=LilcomChunkyWriter,
|
||||||
|
)
|
||||||
|
|
||||||
|
cut_set.to_file(output_dir / f"{prefix}_cuts_{partition}.{suffix}")
|
||||||
|
|
||||||
|
|
||||||
|
def get_args():
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
"--num-mel-bins",
|
||||||
|
type=int,
|
||||||
|
default=80,
|
||||||
|
help="""The number of mel bins for Fbank""",
|
||||||
|
)
|
||||||
|
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
formatter = (
|
||||||
|
"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
|
||||||
|
)
|
||||||
|
|
||||||
|
logging.basicConfig(format=formatter, level=logging.INFO)
|
||||||
|
|
||||||
|
args = get_args()
|
||||||
|
compute_fbank_aidatatang_200zh(num_mel_bins=args.num_mel_bins)
|
@ -29,7 +29,7 @@ import os
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from lhotse import CutSet, Fbank, FbankConfig, LilcomHdf5Writer
|
from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter
|
||||||
from lhotse.recipes.utils import read_manifests_if_cached
|
from lhotse.recipes.utils import read_manifests_if_cached
|
||||||
|
|
||||||
from icefall.utils import get_executor
|
from icefall.utils import get_executor
|
||||||
@ -52,8 +52,13 @@ def compute_fbank_aishell(num_mel_bins: int = 80):
|
|||||||
"dev",
|
"dev",
|
||||||
"test",
|
"test",
|
||||||
)
|
)
|
||||||
|
prefix = "aishell"
|
||||||
|
suffix = "jsonl.gz"
|
||||||
manifests = read_manifests_if_cached(
|
manifests = read_manifests_if_cached(
|
||||||
prefix="aishell", dataset_parts=dataset_parts, output_dir=src_dir
|
dataset_parts=dataset_parts,
|
||||||
|
output_dir=src_dir,
|
||||||
|
prefix=prefix,
|
||||||
|
suffix=suffix,
|
||||||
)
|
)
|
||||||
assert manifests is not None
|
assert manifests is not None
|
||||||
|
|
||||||
@ -61,7 +66,7 @@ def compute_fbank_aishell(num_mel_bins: int = 80):
|
|||||||
|
|
||||||
with get_executor() as ex: # Initialize the executor only once.
|
with get_executor() as ex: # Initialize the executor only once.
|
||||||
for partition, m in manifests.items():
|
for partition, m in manifests.items():
|
||||||
if (output_dir / f"cuts_{partition}.json.gz").is_file():
|
if (output_dir / f"{prefix}_cuts_{partition}.{suffix}").is_file():
|
||||||
logging.info(f"{partition} already exists - skipping.")
|
logging.info(f"{partition} already exists - skipping.")
|
||||||
continue
|
continue
|
||||||
logging.info(f"Processing {partition}")
|
logging.info(f"Processing {partition}")
|
||||||
@ -77,13 +82,13 @@ def compute_fbank_aishell(num_mel_bins: int = 80):
|
|||||||
)
|
)
|
||||||
cut_set = cut_set.compute_and_store_features(
|
cut_set = cut_set.compute_and_store_features(
|
||||||
extractor=extractor,
|
extractor=extractor,
|
||||||
storage_path=f"{output_dir}/feats_{partition}",
|
storage_path=f"{output_dir}/{prefix}_feats_{partition}",
|
||||||
# when an executor is specified, make more partitions
|
# when an executor is specified, make more partitions
|
||||||
num_jobs=num_jobs if ex is None else 80,
|
num_jobs=num_jobs if ex is None else 80,
|
||||||
executor=ex,
|
executor=ex,
|
||||||
storage_type=LilcomHdf5Writer,
|
storage_type=LilcomChunkyWriter,
|
||||||
)
|
)
|
||||||
cut_set.to_json(output_dir / f"cuts_{partition}.json.gz")
|
cut_set.to_file(output_dir / f"{prefix}_cuts_{partition}.{suffix}")
|
||||||
|
|
||||||
|
|
||||||
def get_args():
|
def get_args():
|
||||||
|
@ -25,18 +25,18 @@ for usage.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
from lhotse import load_manifest
|
from lhotse import load_manifest_lazy
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
# path = "./data/fbank/cuts_train.json.gz"
|
# path = "./data/fbank/aishell_cuts_train.jsonl.gz"
|
||||||
# path = "./data/fbank/cuts_test.json.gz"
|
# path = "./data/fbank/aishell_cuts_test.jsonl.gz"
|
||||||
# path = "./data/fbank/cuts_dev.json.gz"
|
path = "./data/fbank/aishell_cuts_dev.jsonl.gz"
|
||||||
# path = "./data/fbank/aidatatang_200zh/cuts_train_raw.jsonl.gz"
|
# path = "./data/fbank/aidatatang_cuts_train.jsonl.gz"
|
||||||
# path = "./data/fbank/aidatatang_200zh/cuts_test_raw.jsonl.gz"
|
# path = "./data/fbank/aidatatang_cuts_test.jsonl.gz"
|
||||||
path = "./data/fbank/aidatatang_200zh/cuts_dev_raw.jsonl.gz"
|
# path = "./data/fbank/aidatatang_cuts_dev.jsonl.gz"
|
||||||
|
|
||||||
cuts = load_manifest(path)
|
cuts = load_manifest_lazy(path)
|
||||||
cuts.describe()
|
cuts.describe()
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,71 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
# Copyright 2022 Xiaomi Corp. (Fangjun Kuang)
|
|
||||||
#
|
|
||||||
# See ../../../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from lhotse import CutSet
|
|
||||||
from lhotse.recipes.utils import read_manifests_if_cached
|
|
||||||
|
|
||||||
|
|
||||||
def preprocess_aidatatang_200zh():
|
|
||||||
src_dir = Path("data/manifests/aidatatang_200zh")
|
|
||||||
output_dir = Path("data/fbank/aidatatang_200zh")
|
|
||||||
output_dir.mkdir(exist_ok=True, parents=True)
|
|
||||||
|
|
||||||
dataset_parts = (
|
|
||||||
"train",
|
|
||||||
"test",
|
|
||||||
"dev",
|
|
||||||
)
|
|
||||||
|
|
||||||
logging.info("Loading manifest")
|
|
||||||
manifests = read_manifests_if_cached(
|
|
||||||
dataset_parts=dataset_parts, output_dir=src_dir, prefix="aidatatang"
|
|
||||||
)
|
|
||||||
assert len(manifests) > 0
|
|
||||||
|
|
||||||
for partition, m in manifests.items():
|
|
||||||
logging.info(f"Processing {partition}")
|
|
||||||
raw_cuts_path = output_dir / f"cuts_{partition}_raw.jsonl.gz"
|
|
||||||
if raw_cuts_path.is_file():
|
|
||||||
logging.info(f"{partition} already exists - skipping")
|
|
||||||
continue
|
|
||||||
|
|
||||||
for sup in m["supervisions"]:
|
|
||||||
sup.custom = {"origin": "aidatatang_200zh"}
|
|
||||||
|
|
||||||
cut_set = CutSet.from_manifests(
|
|
||||||
recordings=m["recordings"],
|
|
||||||
supervisions=m["supervisions"],
|
|
||||||
)
|
|
||||||
|
|
||||||
logging.info(f"Saving to {raw_cuts_path}")
|
|
||||||
cut_set.to_file(raw_cuts_path)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
formatter = (
|
|
||||||
"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
|
|
||||||
)
|
|
||||||
logging.basicConfig(format=formatter, level=logging.INFO)
|
|
||||||
|
|
||||||
preprocess_aidatatang_200zh()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@ -42,18 +42,18 @@ if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
|
|||||||
log "Stage 1: Prepare manifest"
|
log "Stage 1: Prepare manifest"
|
||||||
# We assume that you have downloaded the aidatatang_200zh corpus
|
# We assume that you have downloaded the aidatatang_200zh corpus
|
||||||
# to $dl_dir/aidatatang_200zh
|
# to $dl_dir/aidatatang_200zh
|
||||||
if [ ! -f data/manifests/aidatatang_200zh/.manifests.done ]; then
|
if [ ! -f data/manifests/.aidatatang_200zh_manifests.done ]; then
|
||||||
mkdir -p data/manifests/aidatatang_200zh
|
mkdir -p data/manifests
|
||||||
lhotse prepare aidatatang-200zh $dl_dir data/manifests/aidatatang_200zh
|
lhotse prepare aidatatang-200zh $dl_dir data/manifests
|
||||||
touch data/manifests/aidatatang_200zh/.manifests.done
|
touch data/manifests/.aidatatang_200zh_manifests.done
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
|
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
|
||||||
log "Stage 2: Process aidatatang_200zh"
|
log "Stage 2: Process aidatatang_200zh"
|
||||||
if [ ! -f data/fbank/aidatatang_200zh/.fbank.done ]; then
|
if [ ! -f data/fbank/.aidatatang_200zh_fbank.done ]; then
|
||||||
mkdir -p data/fbank/aidatatang_200zh
|
mkdir -p data/fbank
|
||||||
lhotse prepare aidatatang-200zh $dl_dir data/manifests/aidatatang_200zh
|
./local/compute_fbank_aidatatang_200zh.py
|
||||||
touch data/fbank/aidatatang_200zh/.fbank.done
|
touch data/fbank/.aidatatang_200zh_fbank.done
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
@ -23,11 +23,11 @@ from functools import lru_cache
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
from lhotse import CutSet, Fbank, FbankConfig, load_manifest
|
from lhotse import CutSet, Fbank, FbankConfig, load_manifest, load_manifest_lazy
|
||||||
from lhotse.dataset import (
|
from lhotse.dataset import (
|
||||||
BucketingSampler,
|
|
||||||
CutConcatenate,
|
CutConcatenate,
|
||||||
CutMix,
|
CutMix,
|
||||||
|
DynamicBucketingSampler,
|
||||||
K2SpeechRecognitionDataset,
|
K2SpeechRecognitionDataset,
|
||||||
PrecomputedFeatures,
|
PrecomputedFeatures,
|
||||||
SingleCutSampler,
|
SingleCutSampler,
|
||||||
@ -93,7 +93,7 @@ class AishellAsrDataModule:
|
|||||||
"--num-buckets",
|
"--num-buckets",
|
||||||
type=int,
|
type=int,
|
||||||
default=30,
|
default=30,
|
||||||
help="The number of buckets for the BucketingSampler"
|
help="The number of buckets for the DynamicBucketingSampler"
|
||||||
"(you might want to increase it for larger datasets).",
|
"(you might want to increase it for larger datasets).",
|
||||||
)
|
)
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
@ -133,6 +133,12 @@ class AishellAsrDataModule:
|
|||||||
help="When enabled (=default), the examples will be "
|
help="When enabled (=default), the examples will be "
|
||||||
"shuffled for each epoch.",
|
"shuffled for each epoch.",
|
||||||
)
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--drop-last",
|
||||||
|
type=str2bool,
|
||||||
|
default=True,
|
||||||
|
help="Whether to drop last batch. Used by sampler.",
|
||||||
|
)
|
||||||
group.add_argument(
|
group.add_argument(
|
||||||
"--return-cuts",
|
"--return-cuts",
|
||||||
type=str2bool,
|
type=str2bool,
|
||||||
@ -178,7 +184,7 @@ class AishellAsrDataModule:
|
|||||||
def train_dataloaders(self, cuts_train: CutSet) -> DataLoader:
|
def train_dataloaders(self, cuts_train: CutSet) -> DataLoader:
|
||||||
logging.info("About to get Musan cuts")
|
logging.info("About to get Musan cuts")
|
||||||
cuts_musan = load_manifest(
|
cuts_musan = load_manifest(
|
||||||
self.args.manifest_dir / "cuts_musan.json.gz"
|
self.args.manifest_dir / "musan_cuts.jsonl.gz"
|
||||||
)
|
)
|
||||||
|
|
||||||
transforms = []
|
transforms = []
|
||||||
@ -262,14 +268,13 @@ class AishellAsrDataModule:
|
|||||||
)
|
)
|
||||||
|
|
||||||
if self.args.bucketing_sampler:
|
if self.args.bucketing_sampler:
|
||||||
logging.info("Using BucketingSampler.")
|
logging.info("Using DynamicBucketingSampler.")
|
||||||
train_sampler = BucketingSampler(
|
train_sampler = DynamicBucketingSampler(
|
||||||
cuts_train,
|
cuts_train,
|
||||||
max_duration=self.args.max_duration,
|
max_duration=self.args.max_duration,
|
||||||
shuffle=self.args.shuffle,
|
shuffle=self.args.shuffle,
|
||||||
num_buckets=self.args.num_buckets,
|
num_buckets=self.args.num_buckets,
|
||||||
bucket_method="equal_duration",
|
drop_last=self.args.drop_last,
|
||||||
drop_last=True,
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logging.info("Using SingleCutSampler.")
|
logging.info("Using SingleCutSampler.")
|
||||||
@ -313,7 +318,7 @@ class AishellAsrDataModule:
|
|||||||
cut_transforms=transforms,
|
cut_transforms=transforms,
|
||||||
return_cuts=self.args.return_cuts,
|
return_cuts=self.args.return_cuts,
|
||||||
)
|
)
|
||||||
valid_sampler = BucketingSampler(
|
valid_sampler = DynamicBucketingSampler(
|
||||||
cuts_valid,
|
cuts_valid,
|
||||||
max_duration=self.args.max_duration,
|
max_duration=self.args.max_duration,
|
||||||
shuffle=False,
|
shuffle=False,
|
||||||
@ -337,8 +342,10 @@ class AishellAsrDataModule:
|
|||||||
else PrecomputedFeatures(),
|
else PrecomputedFeatures(),
|
||||||
return_cuts=self.args.return_cuts,
|
return_cuts=self.args.return_cuts,
|
||||||
)
|
)
|
||||||
sampler = BucketingSampler(
|
sampler = DynamicBucketingSampler(
|
||||||
cuts, max_duration=self.args.max_duration, shuffle=False
|
cuts,
|
||||||
|
max_duration=self.args.max_duration,
|
||||||
|
shuffle=False,
|
||||||
)
|
)
|
||||||
test_dl = DataLoader(
|
test_dl = DataLoader(
|
||||||
test,
|
test,
|
||||||
@ -351,17 +358,21 @@ class AishellAsrDataModule:
|
|||||||
@lru_cache()
|
@lru_cache()
|
||||||
def train_cuts(self) -> CutSet:
|
def train_cuts(self) -> CutSet:
|
||||||
logging.info("About to get train cuts")
|
logging.info("About to get train cuts")
|
||||||
cuts_train = load_manifest(
|
cuts_train = load_manifest_lazy(
|
||||||
self.args.manifest_dir / "cuts_train.json.gz"
|
self.args.manifest_dir / "aishell_cuts_train.jsonl.gz"
|
||||||
)
|
)
|
||||||
return cuts_train
|
return cuts_train
|
||||||
|
|
||||||
@lru_cache()
|
@lru_cache()
|
||||||
def valid_cuts(self) -> CutSet:
|
def valid_cuts(self) -> CutSet:
|
||||||
logging.info("About to get dev cuts")
|
logging.info("About to get dev cuts")
|
||||||
return load_manifest(self.args.manifest_dir / "cuts_dev.json.gz")
|
return load_manifest_lazy(
|
||||||
|
self.args.manifest_dir / "aishell_cuts_dev.jsonl.gz"
|
||||||
|
)
|
||||||
|
|
||||||
@lru_cache()
|
@lru_cache()
|
||||||
def test_cuts(self) -> List[CutSet]:
|
def test_cuts(self) -> List[CutSet]:
|
||||||
logging.info("About to get test cuts")
|
logging.info("About to get test cuts")
|
||||||
return load_manifest(self.args.manifest_dir / "cuts_test.json.gz")
|
return load_manifest_lazy(
|
||||||
|
self.args.manifest_dir / "aishell_cuts_test.jsonl.gz"
|
||||||
|
)
|
||||||
|
@ -15,6 +15,14 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Usage
|
||||||
|
export CUDA_VISIBLE_DEVICES="0,1,2,3"
|
||||||
|
./tdnn_lstm_ctc/train.py \
|
||||||
|
--world-size 4 \
|
||||||
|
--num-epochs 20 \
|
||||||
|
--max-duration 300
|
||||||
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import logging
|
import logging
|
||||||
|
@ -110,9 +110,7 @@ class Conformer(Transformer):
|
|||||||
x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C)
|
x = x.permute(1, 0, 2) # (N, T, C) -> (T, N, C)
|
||||||
|
|
||||||
# Caution: We assume the subsampling factor is 4!
|
# Caution: We assume the subsampling factor is 4!
|
||||||
with warnings.catch_warnings():
|
lengths = (((x_lens - 1) >> 1) - 1) >> 1
|
||||||
warnings.simplefilter("ignore")
|
|
||||||
lengths = ((x_lens - 1) // 2 - 1) // 2
|
|
||||||
assert x.size(0) == lengths.max().item()
|
assert x.size(0) == lengths.max().item()
|
||||||
mask = make_pad_mask(lengths)
|
mask = make_pad_mask(lengths)
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import logging
|
import logging
|
||||||
|
import warnings
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from shutil import copyfile
|
from shutil import copyfile
|
||||||
from typing import Optional, Tuple
|
from typing import Optional, Tuple
|
||||||
@ -386,7 +387,11 @@ def compute_loss(
|
|||||||
assert loss.requires_grad == is_training
|
assert loss.requires_grad == is_training
|
||||||
|
|
||||||
info = MetricsTracker()
|
info = MetricsTracker()
|
||||||
info["frames"] = (feature_lens // params.subsampling_factor).sum().item()
|
with warnings.catch_warnings():
|
||||||
|
warnings.simplefilter("ignore")
|
||||||
|
info["frames"] = (
|
||||||
|
(feature_lens // params.subsampling_factor).sum().item()
|
||||||
|
)
|
||||||
|
|
||||||
# Note: We use reduction=sum while computing the loss.
|
# Note: We use reduction=sum while computing the loss.
|
||||||
info["loss"] = loss.detach().cpu().item()
|
info["loss"] = loss.detach().cpu().item()
|
||||||
@ -599,21 +604,18 @@ def run(rank, world_size, args):
|
|||||||
train_cuts = aishell.train_cuts()
|
train_cuts = aishell.train_cuts()
|
||||||
|
|
||||||
def remove_short_and_long_utt(c: Cut):
|
def remove_short_and_long_utt(c: Cut):
|
||||||
# Keep only utterances with duration between 1 second and 20 seconds
|
# Keep only utterances with duration between 1 second and 12 seconds
|
||||||
return 1.0 <= c.duration <= 20.0
|
#
|
||||||
|
# Caution: There is a reason to select 12.0 here. Please see
|
||||||
num_in_total = len(train_cuts)
|
# ../local/display_manifest_statistics.py
|
||||||
|
#
|
||||||
|
# You should use ../local/display_manifest_statistics.py to get
|
||||||
|
# an utterance duration distribution for your dataset to select
|
||||||
|
# the threshold
|
||||||
|
return 1.0 <= c.duration <= 12.0
|
||||||
|
|
||||||
train_cuts = train_cuts.filter(remove_short_and_long_utt)
|
train_cuts = train_cuts.filter(remove_short_and_long_utt)
|
||||||
|
|
||||||
num_left = len(train_cuts)
|
|
||||||
num_removed = num_in_total - num_left
|
|
||||||
removed_percent = num_removed / num_in_total * 100
|
|
||||||
|
|
||||||
logging.info(f"Before removing short and long utterances: {num_in_total}")
|
|
||||||
logging.info(f"After removing short and long utterances: {num_left}")
|
|
||||||
logging.info(f"Removed {num_removed} utterances ({removed_percent:.5f}%)")
|
|
||||||
|
|
||||||
train_dl = aishell.train_dataloaders(train_cuts)
|
train_dl = aishell.train_dataloaders(train_cuts)
|
||||||
valid_dl = aishell.valid_dataloaders(aishell.valid_cuts())
|
valid_dl = aishell.valid_dataloaders(aishell.valid_cuts())
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from lhotse import CutSet, load_manifest
|
from lhotse import CutSet, load_manifest_lazy
|
||||||
|
|
||||||
|
|
||||||
class AIDatatang200zh:
|
class AIDatatang200zh:
|
||||||
@ -28,26 +28,26 @@ class AIDatatang200zh:
|
|||||||
manifest_dir:
|
manifest_dir:
|
||||||
It is expected to contain the following files::
|
It is expected to contain the following files::
|
||||||
|
|
||||||
- cuts_dev_raw.jsonl.gz
|
- aidatatang_cuts_dev.jsonl.gz
|
||||||
- cuts_train_raw.jsonl.gz
|
- aidatatang_cuts_train.jsonl.gz
|
||||||
- cuts_test_raw.jsonl.gz
|
- aidatatang_cuts_test.jsonl.gz
|
||||||
"""
|
"""
|
||||||
self.manifest_dir = Path(manifest_dir)
|
self.manifest_dir = Path(manifest_dir)
|
||||||
|
|
||||||
def train_cuts(self) -> CutSet:
|
def train_cuts(self) -> CutSet:
|
||||||
f = self.manifest_dir / "cuts_train_raw.jsonl.gz"
|
f = self.manifest_dir / "aidatatang_cuts_train.jsonl.gz"
|
||||||
logging.info(f"About to get train cuts from {f}")
|
logging.info(f"About to get train cuts from {f}")
|
||||||
cuts_train = load_manifest(f)
|
cuts_train = load_manifest_lazy(f)
|
||||||
return cuts_train
|
return cuts_train
|
||||||
|
|
||||||
def valid_cuts(self) -> CutSet:
|
def valid_cuts(self) -> CutSet:
|
||||||
f = self.manifest_dir / "cuts_valid_raw.jsonl.gz"
|
f = self.manifest_dir / "aidatatang_cuts_valid.jsonl.gz"
|
||||||
logging.info(f"About to get valid cuts from {f}")
|
logging.info(f"About to get valid cuts from {f}")
|
||||||
cuts_valid = load_manifest(f)
|
cuts_valid = load_manifest_lazy(f)
|
||||||
return cuts_valid
|
return cuts_valid
|
||||||
|
|
||||||
def test_cuts(self) -> CutSet:
|
def test_cuts(self) -> CutSet:
|
||||||
f = self.manifest_dir / "cuts_test_raw.jsonl.gz"
|
f = self.manifest_dir / "aidatatang_cuts_test.jsonl.gz"
|
||||||
logging.info(f"About to get test cuts from {f}")
|
logging.info(f"About to get test cuts from {f}")
|
||||||
cuts_test = load_manifest(f)
|
cuts_test = load_manifest_lazy(f)
|
||||||
return cuts_test
|
return cuts_test
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from lhotse import CutSet, load_manifest
|
from lhotse import CutSet, load_manifest_lazy
|
||||||
|
|
||||||
|
|
||||||
class AIShell:
|
class AIShell:
|
||||||
@ -28,26 +28,26 @@ class AIShell:
|
|||||||
manifest_dir:
|
manifest_dir:
|
||||||
It is expected to contain the following files::
|
It is expected to contain the following files::
|
||||||
|
|
||||||
- cuts_dev.json.gz
|
- aishell_cuts_dev.jsonl.gz
|
||||||
- cuts_train.json.gz
|
- aishell_cuts_train.jsonl.gz
|
||||||
- cuts_test.json.gz
|
- aishell_cuts_test.jsonl.gz
|
||||||
"""
|
"""
|
||||||
self.manifest_dir = Path(manifest_dir)
|
self.manifest_dir = Path(manifest_dir)
|
||||||
|
|
||||||
def train_cuts(self) -> CutSet:
|
def train_cuts(self) -> CutSet:
|
||||||
f = self.manifest_dir / "cuts_train.json.gz"
|
f = self.manifest_dir / "aishell_cuts_train.jsonl.gz"
|
||||||
logging.info(f"About to get train cuts from {f}")
|
logging.info(f"About to get train cuts from {f}")
|
||||||
cuts_train = load_manifest(f)
|
cuts_train = load_manifest_lazy(f)
|
||||||
return cuts_train
|
return cuts_train
|
||||||
|
|
||||||
def valid_cuts(self) -> CutSet:
|
def valid_cuts(self) -> CutSet:
|
||||||
f = self.manifest_dir / "cuts_dev.json.gz"
|
f = self.manifest_dir / "aishell_cuts_dev.jsonl.gz"
|
||||||
logging.info(f"About to get valid cuts from {f}")
|
logging.info(f"About to get valid cuts from {f}")
|
||||||
cuts_valid = load_manifest(f)
|
cuts_valid = load_manifest_lazy(f)
|
||||||
return cuts_valid
|
return cuts_valid
|
||||||
|
|
||||||
def test_cuts(self) -> CutSet:
|
def test_cuts(self) -> CutSet:
|
||||||
f = self.manifest_dir / "cuts_test.json.gz"
|
f = self.manifest_dir / "aishell_cuts_test.jsonl.gz"
|
||||||
logging.info(f"About to get test cuts from {f}")
|
logging.info(f"About to get test cuts from {f}")
|
||||||
cuts_test = load_manifest(f)
|
cuts_test = load_manifest_lazy(f)
|
||||||
return cuts_test
|
return cuts_test
|
||||||
|
@ -24,7 +24,6 @@ from typing import Optional
|
|||||||
|
|
||||||
from lhotse import CutSet, Fbank, FbankConfig
|
from lhotse import CutSet, Fbank, FbankConfig
|
||||||
from lhotse.dataset import (
|
from lhotse.dataset import (
|
||||||
BucketingSampler,
|
|
||||||
CutMix,
|
CutMix,
|
||||||
DynamicBucketingSampler,
|
DynamicBucketingSampler,
|
||||||
K2SpeechRecognitionDataset,
|
K2SpeechRecognitionDataset,
|
||||||
@ -73,8 +72,7 @@ class AsrDataModule:
|
|||||||
"--num-buckets",
|
"--num-buckets",
|
||||||
type=int,
|
type=int,
|
||||||
default=30,
|
default=30,
|
||||||
help="The number of buckets for the BucketingSampler "
|
help="The number of buckets for the DynamicBucketingSampler "
|
||||||
"and DynamicBucketingSampler."
|
|
||||||
"(you might want to increase it for larger datasets).",
|
"(you might want to increase it for larger datasets).",
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -147,7 +145,6 @@ class AsrDataModule:
|
|||||||
def train_dataloaders(
|
def train_dataloaders(
|
||||||
self,
|
self,
|
||||||
cuts_train: CutSet,
|
cuts_train: CutSet,
|
||||||
dynamic_bucketing: bool,
|
|
||||||
on_the_fly_feats: bool,
|
on_the_fly_feats: bool,
|
||||||
cuts_musan: Optional[CutSet] = None,
|
cuts_musan: Optional[CutSet] = None,
|
||||||
) -> DataLoader:
|
) -> DataLoader:
|
||||||
@ -157,9 +154,6 @@ class AsrDataModule:
|
|||||||
Cuts for training.
|
Cuts for training.
|
||||||
cuts_musan:
|
cuts_musan:
|
||||||
If not None, it is the cuts for mixing.
|
If not None, it is the cuts for mixing.
|
||||||
dynamic_bucketing:
|
|
||||||
True to use DynamicBucketingSampler;
|
|
||||||
False to use BucketingSampler.
|
|
||||||
on_the_fly_feats:
|
on_the_fly_feats:
|
||||||
True to use OnTheFlyFeatures;
|
True to use OnTheFlyFeatures;
|
||||||
False to use PrecomputedFeatures.
|
False to use PrecomputedFeatures.
|
||||||
@ -232,25 +226,14 @@ class AsrDataModule:
|
|||||||
return_cuts=self.args.return_cuts,
|
return_cuts=self.args.return_cuts,
|
||||||
)
|
)
|
||||||
|
|
||||||
if dynamic_bucketing:
|
logging.info("Using DynamicBucketingSampler.")
|
||||||
logging.info("Using DynamicBucketingSampler.")
|
train_sampler = DynamicBucketingSampler(
|
||||||
train_sampler = DynamicBucketingSampler(
|
cuts_train,
|
||||||
cuts_train,
|
max_duration=self.args.max_duration,
|
||||||
max_duration=self.args.max_duration,
|
shuffle=self.args.shuffle,
|
||||||
shuffle=self.args.shuffle,
|
num_buckets=self.args.num_buckets,
|
||||||
num_buckets=self.args.num_buckets,
|
drop_last=True,
|
||||||
drop_last=True,
|
)
|
||||||
)
|
|
||||||
else:
|
|
||||||
logging.info("Using BucketingSampler.")
|
|
||||||
train_sampler = BucketingSampler(
|
|
||||||
cuts_train,
|
|
||||||
max_duration=self.args.max_duration,
|
|
||||||
shuffle=self.args.shuffle,
|
|
||||||
num_buckets=self.args.num_buckets,
|
|
||||||
bucket_method="equal_duration",
|
|
||||||
drop_last=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
logging.info("About to create train dataloader")
|
logging.info("About to create train dataloader")
|
||||||
train_dl = DataLoader(
|
train_dl = DataLoader(
|
||||||
@ -279,7 +262,7 @@ class AsrDataModule:
|
|||||||
cut_transforms=transforms,
|
cut_transforms=transforms,
|
||||||
return_cuts=self.args.return_cuts,
|
return_cuts=self.args.return_cuts,
|
||||||
)
|
)
|
||||||
valid_sampler = BucketingSampler(
|
valid_sampler = DynamicBucketingSampler(
|
||||||
cuts_valid,
|
cuts_valid,
|
||||||
max_duration=self.args.max_duration,
|
max_duration=self.args.max_duration,
|
||||||
shuffle=False,
|
shuffle=False,
|
||||||
@ -303,8 +286,10 @@ class AsrDataModule:
|
|||||||
else PrecomputedFeatures(),
|
else PrecomputedFeatures(),
|
||||||
return_cuts=self.args.return_cuts,
|
return_cuts=self.args.return_cuts,
|
||||||
)
|
)
|
||||||
sampler = BucketingSampler(
|
sampler = DynamicBucketingSampler(
|
||||||
cuts, max_duration=self.args.max_duration, shuffle=False
|
cuts,
|
||||||
|
max_duration=self.args.max_duration,
|
||||||
|
shuffle=False,
|
||||||
)
|
)
|
||||||
logging.debug("About to create test dataloader")
|
logging.debug("About to create test dataloader")
|
||||||
test_dl = DataLoader(
|
test_dl = DataLoader(
|
||||||
|
@ -41,6 +41,7 @@ export CUDA_VISIBLE_DEVICES="0,1,2"
|
|||||||
import argparse
|
import argparse
|
||||||
import logging
|
import logging
|
||||||
import random
|
import random
|
||||||
|
import warnings
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from shutil import copyfile
|
from shutil import copyfile
|
||||||
from typing import Optional, Tuple
|
from typing import Optional, Tuple
|
||||||
@ -446,7 +447,11 @@ def compute_loss(
|
|||||||
assert loss.requires_grad == is_training
|
assert loss.requires_grad == is_training
|
||||||
|
|
||||||
info = MetricsTracker()
|
info = MetricsTracker()
|
||||||
info["frames"] = (feature_lens // params.subsampling_factor).sum().item()
|
with warnings.catch_warnings():
|
||||||
|
warnings.simplefilter("ignore")
|
||||||
|
info["frames"] = (
|
||||||
|
(feature_lens // params.subsampling_factor).sum().item()
|
||||||
|
)
|
||||||
|
|
||||||
# Note: We use reduction=sum while computing the loss.
|
# Note: We use reduction=sum while computing the loss.
|
||||||
info["loss"] = loss.detach().cpu().item()
|
info["loss"] = loss.detach().cpu().item()
|
||||||
@ -636,19 +641,15 @@ def train_one_epoch(
|
|||||||
def filter_short_and_long_utterances(cuts: CutSet) -> CutSet:
|
def filter_short_and_long_utterances(cuts: CutSet) -> CutSet:
|
||||||
def remove_short_and_long_utt(c: Cut):
|
def remove_short_and_long_utt(c: Cut):
|
||||||
# Keep only utterances with duration between 1 second and 12 seconds
|
# Keep only utterances with duration between 1 second and 12 seconds
|
||||||
|
#
|
||||||
|
# Caution: There is a reason to select 12.0 here. Please see
|
||||||
|
# ../local/display_manifest_statistics.py
|
||||||
|
#
|
||||||
|
# You should use ../local/display_manifest_statistics.py to get
|
||||||
|
# an utterance duration distribution for your dataset to select
|
||||||
|
# the threshold
|
||||||
return 1.0 <= c.duration <= 12.0
|
return 1.0 <= c.duration <= 12.0
|
||||||
|
|
||||||
num_in_total = len(cuts)
|
|
||||||
cuts = cuts.filter(remove_short_and_long_utt)
|
|
||||||
|
|
||||||
num_left = len(cuts)
|
|
||||||
num_removed = num_in_total - num_left
|
|
||||||
removed_percent = num_removed / num_in_total * 100
|
|
||||||
|
|
||||||
logging.info(f"Before removing short and long utterances: {num_in_total}")
|
|
||||||
logging.info(f"After removing short and long utterances: {num_left}")
|
|
||||||
logging.info(f"Removed {num_removed} utterances ({removed_percent:.5f}%)")
|
|
||||||
|
|
||||||
return cuts
|
return cuts
|
||||||
|
|
||||||
|
|
||||||
@ -728,15 +729,14 @@ def run(rank, world_size, args):
|
|||||||
train_cuts = aishell.train_cuts()
|
train_cuts = aishell.train_cuts()
|
||||||
train_cuts = filter_short_and_long_utterances(train_cuts)
|
train_cuts = filter_short_and_long_utterances(train_cuts)
|
||||||
|
|
||||||
datatang = AIDatatang200zh(
|
datatang = AIDatatang200zh(manifest_dir=args.manifest_dir)
|
||||||
manifest_dir=f"{args.manifest_dir}/aidatatang_200zh"
|
|
||||||
)
|
|
||||||
train_datatang_cuts = datatang.train_cuts()
|
train_datatang_cuts = datatang.train_cuts()
|
||||||
train_datatang_cuts = filter_short_and_long_utterances(train_datatang_cuts)
|
train_datatang_cuts = filter_short_and_long_utterances(train_datatang_cuts)
|
||||||
|
train_datatang_cuts = train_datatang_cuts.repeat(times=None)
|
||||||
|
|
||||||
if args.enable_musan:
|
if args.enable_musan:
|
||||||
cuts_musan = load_manifest(
|
cuts_musan = load_manifest(
|
||||||
Path(args.manifest_dir) / "cuts_musan.json.gz"
|
Path(args.manifest_dir) / "musan_cuts.jsonl.gz"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
cuts_musan = None
|
cuts_musan = None
|
||||||
@ -745,22 +745,23 @@ def run(rank, world_size, args):
|
|||||||
|
|
||||||
train_dl = asr_datamodule.train_dataloaders(
|
train_dl = asr_datamodule.train_dataloaders(
|
||||||
train_cuts,
|
train_cuts,
|
||||||
dynamic_bucketing=False,
|
|
||||||
on_the_fly_feats=False,
|
on_the_fly_feats=False,
|
||||||
cuts_musan=cuts_musan,
|
cuts_musan=cuts_musan,
|
||||||
)
|
)
|
||||||
|
|
||||||
datatang_train_dl = asr_datamodule.train_dataloaders(
|
datatang_train_dl = asr_datamodule.train_dataloaders(
|
||||||
train_datatang_cuts,
|
train_datatang_cuts,
|
||||||
dynamic_bucketing=True,
|
on_the_fly_feats=False,
|
||||||
on_the_fly_feats=True,
|
|
||||||
cuts_musan=cuts_musan,
|
cuts_musan=cuts_musan,
|
||||||
)
|
)
|
||||||
|
|
||||||
valid_cuts = aishell.valid_cuts()
|
valid_cuts = aishell.valid_cuts()
|
||||||
valid_dl = asr_datamodule.valid_dataloaders(valid_cuts)
|
valid_dl = asr_datamodule.valid_dataloaders(valid_cuts)
|
||||||
|
|
||||||
for dl in [train_dl, datatang_train_dl]:
|
for dl in [
|
||||||
|
train_dl,
|
||||||
|
# datatang_train_dl
|
||||||
|
]:
|
||||||
scan_pessimistic_batches_for_oom(
|
scan_pessimistic_batches_for_oom(
|
||||||
model=model,
|
model=model,
|
||||||
train_dl=dl,
|
train_dl=dl,
|
||||||
|
@ -37,6 +37,7 @@ export CUDA_VISIBLE_DEVICES="0,1,2"
|
|||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import logging
|
import logging
|
||||||
|
import warnings
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from shutil import copyfile
|
from shutil import copyfile
|
||||||
from typing import Optional, Tuple
|
from typing import Optional, Tuple
|
||||||
@ -411,7 +412,11 @@ def compute_loss(
|
|||||||
assert loss.requires_grad == is_training
|
assert loss.requires_grad == is_training
|
||||||
|
|
||||||
info = MetricsTracker()
|
info = MetricsTracker()
|
||||||
info["frames"] = (feature_lens // params.subsampling_factor).sum().item()
|
with warnings.catch_warnings():
|
||||||
|
warnings.simplefilter("ignore")
|
||||||
|
info["frames"] = (
|
||||||
|
(feature_lens // params.subsampling_factor).sum().item()
|
||||||
|
)
|
||||||
|
|
||||||
# Note: We use reduction=sum while computing the loss.
|
# Note: We use reduction=sum while computing the loss.
|
||||||
info["loss"] = loss.detach().cpu().item()
|
info["loss"] = loss.detach().cpu().item()
|
||||||
@ -625,20 +630,17 @@ def run(rank, world_size, args):
|
|||||||
|
|
||||||
def remove_short_and_long_utt(c: Cut):
|
def remove_short_and_long_utt(c: Cut):
|
||||||
# Keep only utterances with duration between 1 second and 12 seconds
|
# Keep only utterances with duration between 1 second and 12 seconds
|
||||||
|
#
|
||||||
|
# Caution: There is a reason to select 12.0 here. Please see
|
||||||
|
# ../local/display_manifest_statistics.py
|
||||||
|
#
|
||||||
|
# You should use ../local/display_manifest_statistics.py to get
|
||||||
|
# an utterance duration distribution for your dataset to select
|
||||||
|
# the threshold
|
||||||
return 1.0 <= c.duration <= 12.0
|
return 1.0 <= c.duration <= 12.0
|
||||||
|
|
||||||
num_in_total = len(train_cuts)
|
|
||||||
|
|
||||||
train_cuts = train_cuts.filter(remove_short_and_long_utt)
|
train_cuts = train_cuts.filter(remove_short_and_long_utt)
|
||||||
|
|
||||||
num_left = len(train_cuts)
|
|
||||||
num_removed = num_in_total - num_left
|
|
||||||
removed_percent = num_removed / num_in_total * 100
|
|
||||||
|
|
||||||
logging.info(f"Before removing short and long utterances: {num_in_total}")
|
|
||||||
logging.info(f"After removing short and long utterances: {num_left}")
|
|
||||||
logging.info(f"Removed {num_removed} utterances ({removed_percent:.5f}%)")
|
|
||||||
|
|
||||||
train_dl = aishell.train_dataloaders(train_cuts)
|
train_dl = aishell.train_dataloaders(train_cuts)
|
||||||
valid_dl = aishell.valid_dataloaders(aishell.valid_cuts())
|
valid_dl = aishell.valid_dataloaders(aishell.valid_cuts())
|
||||||
|
|
||||||
|
19
egs/alimeeting/ASR/README.md
Normal file
19
egs/alimeeting/ASR/README.md
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
|
||||||
|
# Introduction
|
||||||
|
|
||||||
|
This recipe includes some different ASR models trained with Alimeeting (far).
|
||||||
|
|
||||||
|
[./RESULTS.md](./RESULTS.md) contains the latest results.
|
||||||
|
|
||||||
|
# Transducers
|
||||||
|
|
||||||
|
There are various folders containing the name `transducer` in this folder.
|
||||||
|
The following table lists the differences among them.
|
||||||
|
|
||||||
|
| | Encoder | Decoder | Comment |
|
||||||
|
|---------------------------------------|---------------------|--------------------|-----------------------------|
|
||||||
|
| `pruned_transducer_stateless2` | Conformer(modified) | Embedding + Conv1d | Using k2 pruned RNN-T loss | |
|
||||||
|
|
||||||
|
The decoder in `transducer_stateless` is modified from the paper
|
||||||
|
[Rnn-Transducer with Stateless Prediction Network](https://ieeexplore.ieee.org/document/9054419/).
|
||||||
|
We place an additional Conv1d layer right after the input embedding layer.
|
71
egs/alimeeting/ASR/RESULTS.md
Normal file
71
egs/alimeeting/ASR/RESULTS.md
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
## Results
|
||||||
|
|
||||||
|
### Alimeeting Char training results (Pruned Transducer Stateless2)
|
||||||
|
|
||||||
|
#### 2022-06-01
|
||||||
|
|
||||||
|
Using the codes from this PR https://github.com/k2-fsa/icefall/pull/378.
|
||||||
|
|
||||||
|
The WERs are
|
||||||
|
| | eval | test | comment |
|
||||||
|
|------------------------------------|------------|------------|------------------------------------------|
|
||||||
|
| greedy search | 31.77 | 34.66 | --epoch 29, --avg 18, --max-duration 100 |
|
||||||
|
| modified beam search (beam size 4) | 30.38 | 33.02 | --epoch 29, --avg 18, --max-duration 100 |
|
||||||
|
| fast beam search (set as default) | 31.39 | 34.25 | --epoch 29, --avg 18, --max-duration 1500|
|
||||||
|
|
||||||
|
The training command for reproducing is given below:
|
||||||
|
|
||||||
|
```
|
||||||
|
export CUDA_VISIBLE_DEVICES="0,1,2,3"
|
||||||
|
|
||||||
|
./pruned_transducer_stateless2/train.py \
|
||||||
|
--world-size 4 \
|
||||||
|
--num-epochs 30 \
|
||||||
|
--start-epoch 0 \
|
||||||
|
--exp-dir pruned_transducer_stateless2/exp \
|
||||||
|
--lang-dir data/lang_char \
|
||||||
|
--max-duration 220 \
|
||||||
|
--save-every-n 1000
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
The tensorboard training log can be found at
|
||||||
|
https://tensorboard.dev/experiment/AoqgSvZKTZCJhJbOuG3W6g/#scalars
|
||||||
|
|
||||||
|
The decoding command is:
|
||||||
|
```
|
||||||
|
epoch=29
|
||||||
|
avg=18
|
||||||
|
|
||||||
|
## greedy search
|
||||||
|
./pruned_transducer_stateless2/decode.py \
|
||||||
|
--epoch $epoch \
|
||||||
|
--avg $avg \
|
||||||
|
--exp-dir pruned_transducer_stateless2/exp \
|
||||||
|
--lang-dir ./data/lang_char \
|
||||||
|
--max-duration 100
|
||||||
|
|
||||||
|
## modified beam search
|
||||||
|
./pruned_transducer_stateless2/decode.py \
|
||||||
|
--epoch $epoch \
|
||||||
|
--avg $avg \
|
||||||
|
--exp-dir pruned_transducer_stateless2/exp \
|
||||||
|
--lang-dir ./data/lang_char \
|
||||||
|
--max-duration 100 \
|
||||||
|
--decoding-method modified_beam_search \
|
||||||
|
--beam-size 4
|
||||||
|
|
||||||
|
## fast beam search
|
||||||
|
./pruned_transducer_stateless2/decode.py \
|
||||||
|
--epoch $epoch \
|
||||||
|
--avg $avg \
|
||||||
|
--exp-dir ./pruned_transducer_stateless2/exp \
|
||||||
|
--lang-dir ./data/lang_char \
|
||||||
|
--max-duration 1500 \
|
||||||
|
--decoding-method fast_beam_search \
|
||||||
|
--beam 4 \
|
||||||
|
--max-contexts 4 \
|
||||||
|
--max-states 8
|
||||||
|
```
|
||||||
|
|
||||||
|
A pre-trained model and decoding logs can be found at <https://huggingface.co/luomingshuang/icefall_asr_alimeeting_pruned_transducer_stateless2>
|
0
egs/alimeeting/ASR/local/__init__.py
Normal file
0
egs/alimeeting/ASR/local/__init__.py
Normal file
124
egs/alimeeting/ASR/local/compute_fbank_alimeeting.py
Executable file
124
egs/alimeeting/ASR/local/compute_fbank_alimeeting.py
Executable file
@ -0,0 +1,124 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
This file computes fbank features of the aishell dataset.
|
||||||
|
It looks for manifests in the directory data/manifests.
|
||||||
|
|
||||||
|
The generated fbank features are saved in data/fbank.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter
|
||||||
|
from lhotse.recipes.utils import read_manifests_if_cached
|
||||||
|
|
||||||
|
from icefall.utils import get_executor
|
||||||
|
|
||||||
|
# Torch's multithreaded behavior needs to be disabled or
|
||||||
|
# it wastes a lot of CPU and slow things down.
|
||||||
|
# Do this outside of main() in case it needs to take effect
|
||||||
|
# even when we are not invoking the main (e.g. when spawning subprocesses).
|
||||||
|
torch.set_num_threads(1)
|
||||||
|
torch.set_num_interop_threads(1)
|
||||||
|
|
||||||
|
|
||||||
|
def compute_fbank_alimeeting(num_mel_bins: int = 80):
|
||||||
|
src_dir = Path("data/manifests")
|
||||||
|
output_dir = Path("data/fbank")
|
||||||
|
num_jobs = min(15, os.cpu_count())
|
||||||
|
|
||||||
|
dataset_parts = (
|
||||||
|
"train",
|
||||||
|
"eval",
|
||||||
|
"test",
|
||||||
|
)
|
||||||
|
|
||||||
|
prefix = "alimeeting"
|
||||||
|
suffix = "jsonl.gz"
|
||||||
|
manifests = read_manifests_if_cached(
|
||||||
|
dataset_parts=dataset_parts,
|
||||||
|
output_dir=src_dir,
|
||||||
|
prefix=prefix,
|
||||||
|
suffix=suffix,
|
||||||
|
)
|
||||||
|
assert manifests is not None
|
||||||
|
|
||||||
|
extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins))
|
||||||
|
|
||||||
|
with get_executor() as ex: # Initialize the executor only once.
|
||||||
|
for partition, m in manifests.items():
|
||||||
|
if (output_dir / f"{prefix}_cuts_{partition}.{suffix}").is_file():
|
||||||
|
logging.info(f"{partition} already exists - skipping.")
|
||||||
|
continue
|
||||||
|
logging.info(f"Processing {partition}")
|
||||||
|
cut_set = CutSet.from_manifests(
|
||||||
|
recordings=m["recordings"],
|
||||||
|
supervisions=m["supervisions"],
|
||||||
|
)
|
||||||
|
if "train" in partition:
|
||||||
|
cut_set = (
|
||||||
|
cut_set
|
||||||
|
+ cut_set.perturb_speed(0.9)
|
||||||
|
+ cut_set.perturb_speed(1.1)
|
||||||
|
)
|
||||||
|
cur_num_jobs = num_jobs if ex is None else 80
|
||||||
|
cur_num_jobs = min(cur_num_jobs, len(cut_set))
|
||||||
|
|
||||||
|
cut_set = cut_set.compute_and_store_features(
|
||||||
|
extractor=extractor,
|
||||||
|
storage_path=f"{output_dir}/{prefix}_feats_{partition}",
|
||||||
|
# when an executor is specified, make more partitions
|
||||||
|
num_jobs=cur_num_jobs,
|
||||||
|
executor=ex,
|
||||||
|
storage_type=LilcomChunkyWriter,
|
||||||
|
)
|
||||||
|
|
||||||
|
logging.info("About splitting cuts into smaller chunks")
|
||||||
|
cut_set = cut_set.trim_to_supervisions(
|
||||||
|
keep_overlapping=False,
|
||||||
|
min_duration=None,
|
||||||
|
)
|
||||||
|
cut_set.to_file(output_dir / f"{prefix}_cuts_{partition}.{suffix}")
|
||||||
|
|
||||||
|
|
||||||
|
def get_args():
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
"--num-mel-bins",
|
||||||
|
type=int,
|
||||||
|
default=80,
|
||||||
|
help="""The number of mel bins for Fbank""",
|
||||||
|
)
|
||||||
|
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
formatter = (
|
||||||
|
"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
|
||||||
|
)
|
||||||
|
|
||||||
|
logging.basicConfig(format=formatter, level=logging.INFO)
|
||||||
|
|
||||||
|
args = get_args()
|
||||||
|
compute_fbank_alimeeting(num_mel_bins=args.num_mel_bins)
|
1
egs/alimeeting/ASR/local/compute_fbank_musan.py
Symbolic link
1
egs/alimeeting/ASR/local/compute_fbank_musan.py
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../../../librispeech/ASR/local/compute_fbank_musan.py
|
96
egs/alimeeting/ASR/local/display_manifest_statistics.py
Normal file
96
egs/alimeeting/ASR/local/display_manifest_statistics.py
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang
|
||||||
|
# Mingshuang Luo)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
This file displays duration statistics of utterances in a manifest.
|
||||||
|
You can use the displayed value to choose minimum/maximum duration
|
||||||
|
to remove short and long utterances during the training.
|
||||||
|
See the function `remove_short_and_long_utt()`
|
||||||
|
in ../../../librispeech/ASR/transducer/train.py
|
||||||
|
for usage.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
from lhotse import load_manifest_lazy
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
paths = [
|
||||||
|
"./data/fbank/alimeeting_cuts_train.jsonl.gz",
|
||||||
|
"./data/fbank/alimeeting_cuts_eval.jsonl.gz",
|
||||||
|
"./data/fbank/alimeeting_cuts_test.jsonl.gz",
|
||||||
|
]
|
||||||
|
|
||||||
|
for path in paths:
|
||||||
|
print(f"Starting display the statistics for {path}")
|
||||||
|
cuts = load_manifest_lazy(path)
|
||||||
|
cuts.describe()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
|
||||||
|
"""
|
||||||
|
Starting display the statistics for ./data/fbank/alimeeting_cuts_train.jsonl.gz
|
||||||
|
Cuts count: 559092
|
||||||
|
Total duration (hours): 424.6
|
||||||
|
Speech duration (hours): 424.6 (100.0%)
|
||||||
|
***
|
||||||
|
Duration statistics (seconds):
|
||||||
|
mean 2.7
|
||||||
|
std 3.0
|
||||||
|
min 0.0
|
||||||
|
25% 0.7
|
||||||
|
50% 1.7
|
||||||
|
75% 3.6
|
||||||
|
99% 13.6
|
||||||
|
99.5% 14.7
|
||||||
|
99.9% 16.2
|
||||||
|
max 284.3
|
||||||
|
Starting display the statistics for ./data/fbank/alimeeting_cuts_eval.jsonl.gz
|
||||||
|
Cuts count: 6457
|
||||||
|
Total duration (hours): 4.9
|
||||||
|
Speech duration (hours): 4.9 (100.0%)
|
||||||
|
***
|
||||||
|
Duration statistics (seconds):
|
||||||
|
mean 2.7
|
||||||
|
std 3.1
|
||||||
|
min 0.1
|
||||||
|
25% 0.6
|
||||||
|
50% 1.6
|
||||||
|
75% 3.5
|
||||||
|
99% 13.6
|
||||||
|
99.5% 14.1
|
||||||
|
99.9% 14.7
|
||||||
|
max 15.8
|
||||||
|
Starting display the statistics for ./data/fbank/alimeeting_cuts_test.jsonl.gz
|
||||||
|
Cuts count: 16358
|
||||||
|
Total duration (hours): 12.5
|
||||||
|
Speech duration (hours): 12.5 (100.0%)
|
||||||
|
***
|
||||||
|
Duration statistics (seconds):
|
||||||
|
mean 2.7
|
||||||
|
std 2.9
|
||||||
|
min 0.1
|
||||||
|
25% 0.7
|
||||||
|
50% 1.7
|
||||||
|
75% 3.5
|
||||||
|
99% 13.7
|
||||||
|
99.5% 14.2
|
||||||
|
99.9% 14.8
|
||||||
|
max 15.7
|
||||||
|
"""
|
248
egs/alimeeting/ASR/local/prepare_char.py
Executable file
248
egs/alimeeting/ASR/local/prepare_char.py
Executable file
@ -0,0 +1,248 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang,
|
||||||
|
# Wei Kang)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
This script takes as input `lang_dir`, which should contain::
|
||||||
|
|
||||||
|
- lang_dir/text,
|
||||||
|
- lang_dir/words.txt
|
||||||
|
|
||||||
|
and generates the following files in the directory `lang_dir`:
|
||||||
|
|
||||||
|
- lexicon.txt
|
||||||
|
- lexicon_disambig.txt
|
||||||
|
- L.pt
|
||||||
|
- L_disambig.pt
|
||||||
|
- tokens.txt
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List
|
||||||
|
|
||||||
|
import k2
|
||||||
|
import torch
|
||||||
|
from prepare_lang import (
|
||||||
|
Lexicon,
|
||||||
|
add_disambig_symbols,
|
||||||
|
add_self_loops,
|
||||||
|
write_lexicon,
|
||||||
|
write_mapping,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def lexicon_to_fst_no_sil(
|
||||||
|
lexicon: Lexicon,
|
||||||
|
token2id: Dict[str, int],
|
||||||
|
word2id: Dict[str, int],
|
||||||
|
need_self_loops: bool = False,
|
||||||
|
) -> k2.Fsa:
|
||||||
|
"""Convert a lexicon to an FST (in k2 format).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
lexicon:
|
||||||
|
The input lexicon. See also :func:`read_lexicon`
|
||||||
|
token2id:
|
||||||
|
A dict mapping tokens to IDs.
|
||||||
|
word2id:
|
||||||
|
A dict mapping words to IDs.
|
||||||
|
need_self_loops:
|
||||||
|
If True, add self-loop to states with non-epsilon output symbols
|
||||||
|
on at least one arc out of the state. The input label for this
|
||||||
|
self loop is `token2id["#0"]` and the output label is `word2id["#0"]`.
|
||||||
|
Returns:
|
||||||
|
Return an instance of `k2.Fsa` representing the given lexicon.
|
||||||
|
"""
|
||||||
|
loop_state = 0 # words enter and leave from here
|
||||||
|
next_state = 1 # the next un-allocated state, will be incremented as we go
|
||||||
|
|
||||||
|
arcs = []
|
||||||
|
|
||||||
|
# The blank symbol <blk> is defined in local/train_bpe_model.py
|
||||||
|
assert token2id["<blk>"] == 0
|
||||||
|
assert word2id["<eps>"] == 0
|
||||||
|
|
||||||
|
eps = 0
|
||||||
|
|
||||||
|
for word, pieces in lexicon:
|
||||||
|
assert len(pieces) > 0, f"{word} has no pronunciations"
|
||||||
|
cur_state = loop_state
|
||||||
|
|
||||||
|
word = word2id[word]
|
||||||
|
pieces = [
|
||||||
|
token2id[i] if i in token2id else token2id["<unk>"] for i in pieces
|
||||||
|
]
|
||||||
|
|
||||||
|
for i in range(len(pieces) - 1):
|
||||||
|
w = word if i == 0 else eps
|
||||||
|
arcs.append([cur_state, next_state, pieces[i], w, 0])
|
||||||
|
|
||||||
|
cur_state = next_state
|
||||||
|
next_state += 1
|
||||||
|
|
||||||
|
# now for the last piece of this word
|
||||||
|
i = len(pieces) - 1
|
||||||
|
w = word if i == 0 else eps
|
||||||
|
arcs.append([cur_state, loop_state, pieces[i], w, 0])
|
||||||
|
|
||||||
|
if need_self_loops:
|
||||||
|
disambig_token = token2id["#0"]
|
||||||
|
disambig_word = word2id["#0"]
|
||||||
|
arcs = add_self_loops(
|
||||||
|
arcs,
|
||||||
|
disambig_token=disambig_token,
|
||||||
|
disambig_word=disambig_word,
|
||||||
|
)
|
||||||
|
|
||||||
|
final_state = next_state
|
||||||
|
arcs.append([loop_state, final_state, -1, -1, 0])
|
||||||
|
arcs.append([final_state])
|
||||||
|
|
||||||
|
arcs = sorted(arcs, key=lambda arc: arc[0])
|
||||||
|
arcs = [[str(i) for i in arc] for arc in arcs]
|
||||||
|
arcs = [" ".join(arc) for arc in arcs]
|
||||||
|
arcs = "\n".join(arcs)
|
||||||
|
|
||||||
|
fsa = k2.Fsa.from_str(arcs, acceptor=False)
|
||||||
|
return fsa
|
||||||
|
|
||||||
|
|
||||||
|
def contain_oov(token_sym_table: Dict[str, int], tokens: List[str]) -> bool:
|
||||||
|
"""Check if all the given tokens are in token symbol table.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
token_sym_table:
|
||||||
|
Token symbol table that contains all the valid tokens.
|
||||||
|
tokens:
|
||||||
|
A list of tokens.
|
||||||
|
Returns:
|
||||||
|
Return True if there is any token not in the token_sym_table,
|
||||||
|
otherwise False.
|
||||||
|
"""
|
||||||
|
for tok in tokens:
|
||||||
|
if tok not in token_sym_table:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def generate_lexicon(
|
||||||
|
token_sym_table: Dict[str, int], words: List[str]
|
||||||
|
) -> Lexicon:
|
||||||
|
"""Generate a lexicon from a word list and token_sym_table.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
token_sym_table:
|
||||||
|
Token symbol table that mapping token to token ids.
|
||||||
|
words:
|
||||||
|
A list of strings representing words.
|
||||||
|
Returns:
|
||||||
|
Return a dict whose keys are words and values are the corresponding
|
||||||
|
tokens.
|
||||||
|
"""
|
||||||
|
lexicon = []
|
||||||
|
for word in words:
|
||||||
|
chars = list(word.strip(" \t"))
|
||||||
|
if contain_oov(token_sym_table, chars):
|
||||||
|
continue
|
||||||
|
lexicon.append((word, chars))
|
||||||
|
|
||||||
|
# The OOV word is <UNK>
|
||||||
|
lexicon.append(("<UNK>", ["<unk>"]))
|
||||||
|
return lexicon
|
||||||
|
|
||||||
|
|
||||||
|
def generate_tokens(text_file: str) -> Dict[str, int]:
|
||||||
|
"""Generate tokens from the given text file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
text_file:
|
||||||
|
A file that contains text lines to generate tokens.
|
||||||
|
Returns:
|
||||||
|
Return a dict whose keys are tokens and values are token ids ranged
|
||||||
|
from 0 to len(keys) - 1.
|
||||||
|
"""
|
||||||
|
tokens: Dict[str, int] = dict()
|
||||||
|
tokens["<blk>"] = 0
|
||||||
|
tokens["<sos/eos>"] = 1
|
||||||
|
tokens["<unk>"] = 2
|
||||||
|
whitespace = re.compile(r"([ \t\r\n]+)")
|
||||||
|
with open(text_file, "r", encoding="utf-8") as f:
|
||||||
|
for line in f:
|
||||||
|
line = re.sub(whitespace, "", line)
|
||||||
|
chars = list(line)
|
||||||
|
for char in chars:
|
||||||
|
if char not in tokens:
|
||||||
|
tokens[char] = len(tokens)
|
||||||
|
return tokens
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
lang_dir = Path("data/lang_char")
|
||||||
|
text_file = lang_dir / "text"
|
||||||
|
|
||||||
|
word_sym_table = k2.SymbolTable.from_file(lang_dir / "words.txt")
|
||||||
|
|
||||||
|
words = word_sym_table.symbols
|
||||||
|
|
||||||
|
excluded = ["<eps>", "!SIL", "<SPOKEN_NOISE>", "<UNK>", "#0", "<s>", "</s>"]
|
||||||
|
for w in excluded:
|
||||||
|
if w in words:
|
||||||
|
words.remove(w)
|
||||||
|
|
||||||
|
token_sym_table = generate_tokens(text_file)
|
||||||
|
|
||||||
|
lexicon = generate_lexicon(token_sym_table, words)
|
||||||
|
|
||||||
|
lexicon_disambig, max_disambig = add_disambig_symbols(lexicon)
|
||||||
|
|
||||||
|
next_token_id = max(token_sym_table.values()) + 1
|
||||||
|
for i in range(max_disambig + 1):
|
||||||
|
disambig = f"#{i}"
|
||||||
|
assert disambig not in token_sym_table
|
||||||
|
token_sym_table[disambig] = next_token_id
|
||||||
|
next_token_id += 1
|
||||||
|
|
||||||
|
word_sym_table.add("#0")
|
||||||
|
word_sym_table.add("<s>")
|
||||||
|
word_sym_table.add("</s>")
|
||||||
|
|
||||||
|
write_mapping(lang_dir / "tokens.txt", token_sym_table)
|
||||||
|
|
||||||
|
write_lexicon(lang_dir / "lexicon.txt", lexicon)
|
||||||
|
write_lexicon(lang_dir / "lexicon_disambig.txt", lexicon_disambig)
|
||||||
|
|
||||||
|
L = lexicon_to_fst_no_sil(
|
||||||
|
lexicon,
|
||||||
|
token2id=token_sym_table,
|
||||||
|
word2id=word_sym_table,
|
||||||
|
)
|
||||||
|
|
||||||
|
L_disambig = lexicon_to_fst_no_sil(
|
||||||
|
lexicon_disambig,
|
||||||
|
token2id=token_sym_table,
|
||||||
|
word2id=word_sym_table,
|
||||||
|
need_self_loops=True,
|
||||||
|
)
|
||||||
|
torch.save(L.as_dict(), lang_dir / "L.pt")
|
||||||
|
torch.save(L_disambig.as_dict(), lang_dir / "L_disambig.pt")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
390
egs/alimeeting/ASR/local/prepare_lang.py
Executable file
390
egs/alimeeting/ASR/local/prepare_lang.py
Executable file
@ -0,0 +1,390 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
This script takes as input a lexicon file "data/lang_phone/lexicon.txt"
|
||||||
|
consisting of words and tokens (i.e., phones) and does the following:
|
||||||
|
|
||||||
|
1. Add disambiguation symbols to the lexicon and generate lexicon_disambig.txt
|
||||||
|
|
||||||
|
2. Generate tokens.txt, the token table mapping a token to a unique integer.
|
||||||
|
|
||||||
|
3. Generate words.txt, the word table mapping a word to a unique integer.
|
||||||
|
|
||||||
|
4. Generate L.pt, in k2 format. It can be loaded by
|
||||||
|
|
||||||
|
d = torch.load("L.pt")
|
||||||
|
lexicon = k2.Fsa.from_dict(d)
|
||||||
|
|
||||||
|
5. Generate L_disambig.pt, in k2 format.
|
||||||
|
"""
|
||||||
|
import argparse
|
||||||
|
import math
|
||||||
|
from collections import defaultdict
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, List, Tuple
|
||||||
|
|
||||||
|
import k2
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from icefall.lexicon import read_lexicon, write_lexicon
|
||||||
|
|
||||||
|
Lexicon = List[Tuple[str, List[str]]]
|
||||||
|
|
||||||
|
|
||||||
|
def write_mapping(filename: str, sym2id: Dict[str, int]) -> None:
|
||||||
|
"""Write a symbol to ID mapping to a file.
|
||||||
|
|
||||||
|
Note:
|
||||||
|
No need to implement `read_mapping` as it can be done
|
||||||
|
through :func:`k2.SymbolTable.from_file`.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
filename:
|
||||||
|
Filename to save the mapping.
|
||||||
|
sym2id:
|
||||||
|
A dict mapping symbols to IDs.
|
||||||
|
Returns:
|
||||||
|
Return None.
|
||||||
|
"""
|
||||||
|
with open(filename, "w", encoding="utf-8") as f:
|
||||||
|
for sym, i in sym2id.items():
|
||||||
|
f.write(f"{sym} {i}\n")
|
||||||
|
|
||||||
|
|
||||||
|
def get_tokens(lexicon: Lexicon) -> List[str]:
|
||||||
|
"""Get tokens from a lexicon.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
lexicon:
|
||||||
|
It is the return value of :func:`read_lexicon`.
|
||||||
|
Returns:
|
||||||
|
Return a list of unique tokens.
|
||||||
|
"""
|
||||||
|
ans = set()
|
||||||
|
for _, tokens in lexicon:
|
||||||
|
ans.update(tokens)
|
||||||
|
sorted_ans = sorted(list(ans))
|
||||||
|
return sorted_ans
|
||||||
|
|
||||||
|
|
||||||
|
def get_words(lexicon: Lexicon) -> List[str]:
|
||||||
|
"""Get words from a lexicon.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
lexicon:
|
||||||
|
It is the return value of :func:`read_lexicon`.
|
||||||
|
Returns:
|
||||||
|
Return a list of unique words.
|
||||||
|
"""
|
||||||
|
ans = set()
|
||||||
|
for word, _ in lexicon:
|
||||||
|
ans.add(word)
|
||||||
|
sorted_ans = sorted(list(ans))
|
||||||
|
return sorted_ans
|
||||||
|
|
||||||
|
|
||||||
|
def add_disambig_symbols(lexicon: Lexicon) -> Tuple[Lexicon, int]:
|
||||||
|
"""It adds pseudo-token disambiguation symbols #1, #2 and so on
|
||||||
|
at the ends of tokens to ensure that all pronunciations are different,
|
||||||
|
and that none is a prefix of another.
|
||||||
|
|
||||||
|
See also add_lex_disambig.pl from kaldi.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
lexicon:
|
||||||
|
It is returned by :func:`read_lexicon`.
|
||||||
|
Returns:
|
||||||
|
Return a tuple with two elements:
|
||||||
|
|
||||||
|
- The output lexicon with disambiguation symbols
|
||||||
|
- The ID of the max disambiguation symbol that appears
|
||||||
|
in the lexicon
|
||||||
|
"""
|
||||||
|
|
||||||
|
# (1) Work out the count of each token-sequence in the
|
||||||
|
# lexicon.
|
||||||
|
count = defaultdict(int)
|
||||||
|
for _, tokens in lexicon:
|
||||||
|
count[" ".join(tokens)] += 1
|
||||||
|
|
||||||
|
# (2) For each left sub-sequence of each token-sequence, note down
|
||||||
|
# that it exists (for identifying prefixes of longer strings).
|
||||||
|
issubseq = defaultdict(int)
|
||||||
|
for _, tokens in lexicon:
|
||||||
|
tokens = tokens.copy()
|
||||||
|
tokens.pop()
|
||||||
|
while tokens:
|
||||||
|
issubseq[" ".join(tokens)] = 1
|
||||||
|
tokens.pop()
|
||||||
|
|
||||||
|
# (3) For each entry in the lexicon:
|
||||||
|
# if the token sequence is unique and is not a
|
||||||
|
# prefix of another word, no disambig symbol.
|
||||||
|
# Else output #1, or #2, #3, ... if the same token-seq
|
||||||
|
# has already been assigned a disambig symbol.
|
||||||
|
ans = []
|
||||||
|
|
||||||
|
# We start with #1 since #0 has its own purpose
|
||||||
|
first_allowed_disambig = 1
|
||||||
|
max_disambig = first_allowed_disambig - 1
|
||||||
|
last_used_disambig_symbol_of = defaultdict(int)
|
||||||
|
|
||||||
|
for word, tokens in lexicon:
|
||||||
|
tokenseq = " ".join(tokens)
|
||||||
|
assert tokenseq != ""
|
||||||
|
if issubseq[tokenseq] == 0 and count[tokenseq] == 1:
|
||||||
|
ans.append((word, tokens))
|
||||||
|
continue
|
||||||
|
|
||||||
|
cur_disambig = last_used_disambig_symbol_of[tokenseq]
|
||||||
|
if cur_disambig == 0:
|
||||||
|
cur_disambig = first_allowed_disambig
|
||||||
|
else:
|
||||||
|
cur_disambig += 1
|
||||||
|
|
||||||
|
if cur_disambig > max_disambig:
|
||||||
|
max_disambig = cur_disambig
|
||||||
|
last_used_disambig_symbol_of[tokenseq] = cur_disambig
|
||||||
|
tokenseq += f" #{cur_disambig}"
|
||||||
|
ans.append((word, tokenseq.split()))
|
||||||
|
return ans, max_disambig
|
||||||
|
|
||||||
|
|
||||||
|
def generate_id_map(symbols: List[str]) -> Dict[str, int]:
|
||||||
|
"""Generate ID maps, i.e., map a symbol to a unique ID.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbols:
|
||||||
|
A list of unique symbols.
|
||||||
|
Returns:
|
||||||
|
A dict containing the mapping between symbols and IDs.
|
||||||
|
"""
|
||||||
|
return {sym: i for i, sym in enumerate(symbols)}
|
||||||
|
|
||||||
|
|
||||||
|
def add_self_loops(
|
||||||
|
arcs: List[List[Any]], disambig_token: int, disambig_word: int
|
||||||
|
) -> List[List[Any]]:
|
||||||
|
"""Adds self-loops to states of an FST to propagate disambiguation symbols
|
||||||
|
through it. They are added on each state with non-epsilon output symbols
|
||||||
|
on at least one arc out of the state.
|
||||||
|
|
||||||
|
See also fstaddselfloops.pl from Kaldi. One difference is that
|
||||||
|
Kaldi uses OpenFst style FSTs and it has multiple final states.
|
||||||
|
This function uses k2 style FSTs and it does not need to add self-loops
|
||||||
|
to the final state.
|
||||||
|
|
||||||
|
The input label of a self-loop is `disambig_token`, while the output
|
||||||
|
label is `disambig_word`.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
arcs:
|
||||||
|
A list-of-list. The sublist contains
|
||||||
|
`[src_state, dest_state, label, aux_label, score]`
|
||||||
|
disambig_token:
|
||||||
|
It is the token ID of the symbol `#0`.
|
||||||
|
disambig_word:
|
||||||
|
It is the word ID of the symbol `#0`.
|
||||||
|
|
||||||
|
Return:
|
||||||
|
Return new `arcs` containing self-loops.
|
||||||
|
"""
|
||||||
|
states_needs_self_loops = set()
|
||||||
|
for arc in arcs:
|
||||||
|
src, dst, ilabel, olabel, score = arc
|
||||||
|
if olabel != 0:
|
||||||
|
states_needs_self_loops.add(src)
|
||||||
|
|
||||||
|
ans = []
|
||||||
|
for s in states_needs_self_loops:
|
||||||
|
ans.append([s, s, disambig_token, disambig_word, 0])
|
||||||
|
|
||||||
|
return arcs + ans
|
||||||
|
|
||||||
|
|
||||||
|
def lexicon_to_fst(
|
||||||
|
lexicon: Lexicon,
|
||||||
|
token2id: Dict[str, int],
|
||||||
|
word2id: Dict[str, int],
|
||||||
|
sil_token: str = "SIL",
|
||||||
|
sil_prob: float = 0.5,
|
||||||
|
need_self_loops: bool = False,
|
||||||
|
) -> k2.Fsa:
|
||||||
|
"""Convert a lexicon to an FST (in k2 format) with optional silence at
|
||||||
|
the beginning and end of each word.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
lexicon:
|
||||||
|
The input lexicon. See also :func:`read_lexicon`
|
||||||
|
token2id:
|
||||||
|
A dict mapping tokens to IDs.
|
||||||
|
word2id:
|
||||||
|
A dict mapping words to IDs.
|
||||||
|
sil_token:
|
||||||
|
The silence token.
|
||||||
|
sil_prob:
|
||||||
|
The probability for adding a silence at the beginning and end
|
||||||
|
of the word.
|
||||||
|
need_self_loops:
|
||||||
|
If True, add self-loop to states with non-epsilon output symbols
|
||||||
|
on at least one arc out of the state. The input label for this
|
||||||
|
self loop is `token2id["#0"]` and the output label is `word2id["#0"]`.
|
||||||
|
Returns:
|
||||||
|
Return an instance of `k2.Fsa` representing the given lexicon.
|
||||||
|
"""
|
||||||
|
assert sil_prob > 0.0 and sil_prob < 1.0
|
||||||
|
# CAUTION: we use score, i.e, negative cost.
|
||||||
|
sil_score = math.log(sil_prob)
|
||||||
|
no_sil_score = math.log(1.0 - sil_prob)
|
||||||
|
|
||||||
|
start_state = 0
|
||||||
|
loop_state = 1 # words enter and leave from here
|
||||||
|
sil_state = 2 # words terminate here when followed by silence; this state
|
||||||
|
# has a silence transition to loop_state.
|
||||||
|
next_state = 3 # the next un-allocated state, will be incremented as we go.
|
||||||
|
arcs = []
|
||||||
|
|
||||||
|
assert token2id["<eps>"] == 0
|
||||||
|
assert word2id["<eps>"] == 0
|
||||||
|
|
||||||
|
eps = 0
|
||||||
|
|
||||||
|
sil_token = token2id[sil_token]
|
||||||
|
|
||||||
|
arcs.append([start_state, loop_state, eps, eps, no_sil_score])
|
||||||
|
arcs.append([start_state, sil_state, eps, eps, sil_score])
|
||||||
|
arcs.append([sil_state, loop_state, sil_token, eps, 0])
|
||||||
|
|
||||||
|
for word, tokens in lexicon:
|
||||||
|
assert len(tokens) > 0, f"{word} has no pronunciations"
|
||||||
|
cur_state = loop_state
|
||||||
|
|
||||||
|
word = word2id[word]
|
||||||
|
tokens = [token2id[i] for i in tokens]
|
||||||
|
|
||||||
|
for i in range(len(tokens) - 1):
|
||||||
|
w = word if i == 0 else eps
|
||||||
|
arcs.append([cur_state, next_state, tokens[i], w, 0])
|
||||||
|
|
||||||
|
cur_state = next_state
|
||||||
|
next_state += 1
|
||||||
|
|
||||||
|
# now for the last token of this word
|
||||||
|
# It has two out-going arcs, one to the loop state,
|
||||||
|
# the other one to the sil_state.
|
||||||
|
i = len(tokens) - 1
|
||||||
|
w = word if i == 0 else eps
|
||||||
|
arcs.append([cur_state, loop_state, tokens[i], w, no_sil_score])
|
||||||
|
arcs.append([cur_state, sil_state, tokens[i], w, sil_score])
|
||||||
|
|
||||||
|
if need_self_loops:
|
||||||
|
disambig_token = token2id["#0"]
|
||||||
|
disambig_word = word2id["#0"]
|
||||||
|
arcs = add_self_loops(
|
||||||
|
arcs,
|
||||||
|
disambig_token=disambig_token,
|
||||||
|
disambig_word=disambig_word,
|
||||||
|
)
|
||||||
|
|
||||||
|
final_state = next_state
|
||||||
|
arcs.append([loop_state, final_state, -1, -1, 0])
|
||||||
|
arcs.append([final_state])
|
||||||
|
|
||||||
|
arcs = sorted(arcs, key=lambda arc: arc[0])
|
||||||
|
arcs = [[str(i) for i in arc] for arc in arcs]
|
||||||
|
arcs = [" ".join(arc) for arc in arcs]
|
||||||
|
arcs = "\n".join(arcs)
|
||||||
|
|
||||||
|
fsa = k2.Fsa.from_str(arcs, acceptor=False)
|
||||||
|
return fsa
|
||||||
|
|
||||||
|
|
||||||
|
def get_args():
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
"--lang-dir", type=str, help="The lang dir, data/lang_phone"
|
||||||
|
)
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
out_dir = Path(get_args().lang_dir)
|
||||||
|
lexicon_filename = out_dir / "lexicon.txt"
|
||||||
|
sil_token = "SIL"
|
||||||
|
sil_prob = 0.5
|
||||||
|
|
||||||
|
lexicon = read_lexicon(lexicon_filename)
|
||||||
|
tokens = get_tokens(lexicon)
|
||||||
|
words = get_words(lexicon)
|
||||||
|
|
||||||
|
lexicon_disambig, max_disambig = add_disambig_symbols(lexicon)
|
||||||
|
|
||||||
|
for i in range(max_disambig + 1):
|
||||||
|
disambig = f"#{i}"
|
||||||
|
assert disambig not in tokens
|
||||||
|
tokens.append(f"#{i}")
|
||||||
|
|
||||||
|
assert "<eps>" not in tokens
|
||||||
|
tokens = ["<eps>"] + tokens
|
||||||
|
|
||||||
|
assert "<eps>" not in words
|
||||||
|
assert "#0" not in words
|
||||||
|
assert "<s>" not in words
|
||||||
|
assert "</s>" not in words
|
||||||
|
|
||||||
|
words = ["<eps>"] + words + ["#0", "<s>", "</s>"]
|
||||||
|
|
||||||
|
token2id = generate_id_map(tokens)
|
||||||
|
word2id = generate_id_map(words)
|
||||||
|
|
||||||
|
write_mapping(out_dir / "tokens.txt", token2id)
|
||||||
|
write_mapping(out_dir / "words.txt", word2id)
|
||||||
|
write_lexicon(out_dir / "lexicon_disambig.txt", lexicon_disambig)
|
||||||
|
|
||||||
|
L = lexicon_to_fst(
|
||||||
|
lexicon,
|
||||||
|
token2id=token2id,
|
||||||
|
word2id=word2id,
|
||||||
|
sil_token=sil_token,
|
||||||
|
sil_prob=sil_prob,
|
||||||
|
)
|
||||||
|
|
||||||
|
L_disambig = lexicon_to_fst(
|
||||||
|
lexicon_disambig,
|
||||||
|
token2id=token2id,
|
||||||
|
word2id=word2id,
|
||||||
|
sil_token=sil_token,
|
||||||
|
sil_prob=sil_prob,
|
||||||
|
need_self_loops=True,
|
||||||
|
)
|
||||||
|
torch.save(L.as_dict(), out_dir / "L.pt")
|
||||||
|
torch.save(L_disambig.as_dict(), out_dir / "L_disambig.pt")
|
||||||
|
|
||||||
|
if False:
|
||||||
|
# Just for debugging, will remove it
|
||||||
|
L.labels_sym = k2.SymbolTable.from_file(out_dir / "tokens.txt")
|
||||||
|
L.aux_labels_sym = k2.SymbolTable.from_file(out_dir / "words.txt")
|
||||||
|
L_disambig.labels_sym = L.labels_sym
|
||||||
|
L_disambig.aux_labels_sym = L.aux_labels_sym
|
||||||
|
L.draw(out_dir / "L.png", title="L")
|
||||||
|
L_disambig.draw(out_dir / "L_disambig.png", title="L_disambig")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
84
egs/alimeeting/ASR/local/prepare_words.py
Executable file
84
egs/alimeeting/ASR/local/prepare_words.py
Executable file
@ -0,0 +1,84 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2021 Xiaomi Corp. (authors: Mingshuang Luo)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
This script takes as input words.txt without ids:
|
||||||
|
- words_no_ids.txt
|
||||||
|
and generates the new words.txt with related ids.
|
||||||
|
- words.txt
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
|
||||||
|
def get_parser():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Prepare words.txt",
|
||||||
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--input-file",
|
||||||
|
default="data/lang_char/words_no_ids.txt",
|
||||||
|
type=str,
|
||||||
|
help="the words file without ids for WenetSpeech",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--output-file",
|
||||||
|
default="data/lang_char/words.txt",
|
||||||
|
type=str,
|
||||||
|
help="the words file with ids for WenetSpeech",
|
||||||
|
)
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = get_parser()
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
input_file = args.input_file
|
||||||
|
output_file = args.output_file
|
||||||
|
|
||||||
|
f = open(input_file, "r", encoding="utf-8")
|
||||||
|
lines = f.readlines()
|
||||||
|
new_lines = []
|
||||||
|
add_words = ["<eps> 0", "!SIL 1", "<SPOKEN_NOISE> 2", "<UNK> 3"]
|
||||||
|
new_lines.extend(add_words)
|
||||||
|
|
||||||
|
logging.info("Starting reading the input file")
|
||||||
|
for i in tqdm(range(len(lines))):
|
||||||
|
x = lines[i]
|
||||||
|
idx = 4 + i
|
||||||
|
new_line = str(x.strip("\n")) + " " + str(idx)
|
||||||
|
new_lines.append(new_line)
|
||||||
|
|
||||||
|
logging.info("Starting writing the words.txt")
|
||||||
|
f_out = open(output_file, "w", encoding="utf-8")
|
||||||
|
for line in new_lines:
|
||||||
|
f_out.write(line)
|
||||||
|
f_out.write("\n")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
106
egs/alimeeting/ASR/local/test_prepare_lang.py
Executable file
106
egs/alimeeting/ASR/local/test_prepare_lang.py
Executable file
@ -0,0 +1,106 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
# Copyright (c) 2021 Xiaomi Corporation (authors: Fangjun Kuang)
|
||||||
|
|
||||||
|
import os
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
import k2
|
||||||
|
from prepare_lang import (
|
||||||
|
add_disambig_symbols,
|
||||||
|
generate_id_map,
|
||||||
|
get_phones,
|
||||||
|
get_words,
|
||||||
|
lexicon_to_fst,
|
||||||
|
read_lexicon,
|
||||||
|
write_lexicon,
|
||||||
|
write_mapping,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_lexicon_file() -> str:
|
||||||
|
fd, filename = tempfile.mkstemp()
|
||||||
|
os.close(fd)
|
||||||
|
s = """
|
||||||
|
!SIL SIL
|
||||||
|
<SPOKEN_NOISE> SPN
|
||||||
|
<UNK> SPN
|
||||||
|
f f
|
||||||
|
a a
|
||||||
|
foo f o o
|
||||||
|
bar b a r
|
||||||
|
bark b a r k
|
||||||
|
food f o o d
|
||||||
|
food2 f o o d
|
||||||
|
fo f o
|
||||||
|
""".strip()
|
||||||
|
with open(filename, "w") as f:
|
||||||
|
f.write(s)
|
||||||
|
return filename
|
||||||
|
|
||||||
|
|
||||||
|
def test_read_lexicon(filename: str):
|
||||||
|
lexicon = read_lexicon(filename)
|
||||||
|
phones = get_phones(lexicon)
|
||||||
|
words = get_words(lexicon)
|
||||||
|
print(lexicon)
|
||||||
|
print(phones)
|
||||||
|
print(words)
|
||||||
|
lexicon_disambig, max_disambig = add_disambig_symbols(lexicon)
|
||||||
|
print(lexicon_disambig)
|
||||||
|
print("max disambig:", f"#{max_disambig}")
|
||||||
|
|
||||||
|
phones = ["<eps>", "SIL", "SPN"] + phones
|
||||||
|
for i in range(max_disambig + 1):
|
||||||
|
phones.append(f"#{i}")
|
||||||
|
words = ["<eps>"] + words
|
||||||
|
|
||||||
|
phone2id = generate_id_map(phones)
|
||||||
|
word2id = generate_id_map(words)
|
||||||
|
|
||||||
|
print(phone2id)
|
||||||
|
print(word2id)
|
||||||
|
|
||||||
|
write_mapping("phones.txt", phone2id)
|
||||||
|
write_mapping("words.txt", word2id)
|
||||||
|
|
||||||
|
write_lexicon("a.txt", lexicon)
|
||||||
|
write_lexicon("a_disambig.txt", lexicon_disambig)
|
||||||
|
|
||||||
|
fsa = lexicon_to_fst(lexicon, phone2id=phone2id, word2id=word2id)
|
||||||
|
fsa.labels_sym = k2.SymbolTable.from_file("phones.txt")
|
||||||
|
fsa.aux_labels_sym = k2.SymbolTable.from_file("words.txt")
|
||||||
|
fsa.draw("L.pdf", title="L")
|
||||||
|
|
||||||
|
fsa_disambig = lexicon_to_fst(
|
||||||
|
lexicon_disambig, phone2id=phone2id, word2id=word2id
|
||||||
|
)
|
||||||
|
fsa_disambig.labels_sym = k2.SymbolTable.from_file("phones.txt")
|
||||||
|
fsa_disambig.aux_labels_sym = k2.SymbolTable.from_file("words.txt")
|
||||||
|
fsa_disambig.draw("L_disambig.pdf", title="L_disambig")
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
filename = generate_lexicon_file()
|
||||||
|
test_read_lexicon(filename)
|
||||||
|
os.remove(filename)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
83
egs/alimeeting/ASR/local/text2segments.py
Normal file
83
egs/alimeeting/ASR/local/text2segments.py
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2021 Xiaomi Corp. (authors: Mingshuang Luo)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
This script takes as input "text", which refers to the transcript file for
|
||||||
|
WenetSpeech:
|
||||||
|
- text
|
||||||
|
and generates the output file text_word_segmentation which is implemented
|
||||||
|
with word segmenting:
|
||||||
|
- text_words_segmentation
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
import jieba
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
jieba.enable_paddle()
|
||||||
|
|
||||||
|
|
||||||
|
def get_parser():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Chinese Word Segmentation for text",
|
||||||
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--input-file",
|
||||||
|
default="data/lang_char/text",
|
||||||
|
type=str,
|
||||||
|
help="the input text file for WenetSpeech",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--output-file",
|
||||||
|
default="data/lang_char/text_words_segmentation",
|
||||||
|
type=str,
|
||||||
|
help="the text implemented with words segmenting for WenetSpeech",
|
||||||
|
)
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = get_parser()
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
input_file = args.input_file
|
||||||
|
output_file = args.output_file
|
||||||
|
|
||||||
|
f = open(input_file, "r", encoding="utf-8")
|
||||||
|
lines = f.readlines()
|
||||||
|
new_lines = []
|
||||||
|
for i in tqdm(range(len(lines))):
|
||||||
|
x = lines[i].rstrip()
|
||||||
|
seg_list = jieba.cut(x, use_paddle=True)
|
||||||
|
new_line = " ".join(seg_list)
|
||||||
|
new_lines.append(new_line)
|
||||||
|
|
||||||
|
f_new = open(output_file, "w", encoding="utf-8")
|
||||||
|
for line in new_lines:
|
||||||
|
f_new.write(line)
|
||||||
|
f_new.write("\n")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
195
egs/alimeeting/ASR/local/text2token.py
Executable file
195
egs/alimeeting/ASR/local/text2token.py
Executable file
@ -0,0 +1,195 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright 2017 Johns Hopkins University (authors: Shinji Watanabe)
|
||||||
|
# 2022 Xiaomi Corp. (authors: Mingshuang Luo)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import codecs
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from pypinyin import lazy_pinyin, pinyin
|
||||||
|
|
||||||
|
is_python2 = sys.version_info[0] == 2
|
||||||
|
|
||||||
|
|
||||||
|
def exist_or_not(i, match_pos):
|
||||||
|
start_pos = None
|
||||||
|
end_pos = None
|
||||||
|
for pos in match_pos:
|
||||||
|
if pos[0] <= i < pos[1]:
|
||||||
|
start_pos = pos[0]
|
||||||
|
end_pos = pos[1]
|
||||||
|
break
|
||||||
|
|
||||||
|
return start_pos, end_pos
|
||||||
|
|
||||||
|
|
||||||
|
def get_parser():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="convert raw text to tokenized text",
|
||||||
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--nchar",
|
||||||
|
"-n",
|
||||||
|
default=1,
|
||||||
|
type=int,
|
||||||
|
help="number of characters to split, i.e., \
|
||||||
|
aabb -> a a b b with -n 1 and aa bb with -n 2",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--skip-ncols", "-s", default=0, type=int, help="skip first n columns"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--space", default="<space>", type=str, help="space symbol"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--non-lang-syms",
|
||||||
|
"-l",
|
||||||
|
default=None,
|
||||||
|
type=str,
|
||||||
|
help="list of non-linguistic symobles, e.g., <NOISE> etc.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"text", type=str, default=False, nargs="?", help="input text"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--trans_type",
|
||||||
|
"-t",
|
||||||
|
type=str,
|
||||||
|
default="char",
|
||||||
|
choices=["char", "pinyin", "lazy_pinyin"],
|
||||||
|
help="""Transcript type. char/pinyin/lazy_pinyin""",
|
||||||
|
)
|
||||||
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
def token2id(
|
||||||
|
texts, token_table, token_type: str = "lazy_pinyin", oov: str = "<unk>"
|
||||||
|
) -> List[List[int]]:
|
||||||
|
"""Convert token to id.
|
||||||
|
Args:
|
||||||
|
texts:
|
||||||
|
The input texts, it refers to the chinese text here.
|
||||||
|
token_table:
|
||||||
|
The token table is built based on "data/lang_xxx/token.txt"
|
||||||
|
token_type:
|
||||||
|
The type of token, such as "pinyin" and "lazy_pinyin".
|
||||||
|
oov:
|
||||||
|
Out of vocabulary token. When a word(token) in the transcript
|
||||||
|
does not exist in the token list, it is replaced with `oov`.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The list of ids for the input texts.
|
||||||
|
"""
|
||||||
|
if texts is None:
|
||||||
|
raise ValueError("texts can't be None!")
|
||||||
|
else:
|
||||||
|
oov_id = token_table[oov]
|
||||||
|
ids: List[List[int]] = []
|
||||||
|
for text in texts:
|
||||||
|
chars_list = list(str(text))
|
||||||
|
if token_type == "lazy_pinyin":
|
||||||
|
text = lazy_pinyin(chars_list)
|
||||||
|
sub_ids = [
|
||||||
|
token_table[txt] if txt in token_table else oov_id
|
||||||
|
for txt in text
|
||||||
|
]
|
||||||
|
ids.append(sub_ids)
|
||||||
|
else: # token_type = "pinyin"
|
||||||
|
text = pinyin(chars_list)
|
||||||
|
sub_ids = [
|
||||||
|
token_table[txt[0]] if txt[0] in token_table else oov_id
|
||||||
|
for txt in text
|
||||||
|
]
|
||||||
|
ids.append(sub_ids)
|
||||||
|
return ids
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = get_parser()
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
rs = []
|
||||||
|
if args.non_lang_syms is not None:
|
||||||
|
with codecs.open(args.non_lang_syms, "r", encoding="utf-8") as f:
|
||||||
|
nls = [x.rstrip() for x in f.readlines()]
|
||||||
|
rs = [re.compile(re.escape(x)) for x in nls]
|
||||||
|
|
||||||
|
if args.text:
|
||||||
|
f = codecs.open(args.text, encoding="utf-8")
|
||||||
|
else:
|
||||||
|
f = codecs.getreader("utf-8")(
|
||||||
|
sys.stdin if is_python2 else sys.stdin.buffer
|
||||||
|
)
|
||||||
|
|
||||||
|
sys.stdout = codecs.getwriter("utf-8")(
|
||||||
|
sys.stdout if is_python2 else sys.stdout.buffer
|
||||||
|
)
|
||||||
|
line = f.readline()
|
||||||
|
n = args.nchar
|
||||||
|
while line:
|
||||||
|
x = line.split()
|
||||||
|
print(" ".join(x[: args.skip_ncols]), end=" ")
|
||||||
|
a = " ".join(x[args.skip_ncols :]) # noqa E203
|
||||||
|
|
||||||
|
# get all matched positions
|
||||||
|
match_pos = []
|
||||||
|
for r in rs:
|
||||||
|
i = 0
|
||||||
|
while i >= 0:
|
||||||
|
m = r.search(a, i)
|
||||||
|
if m:
|
||||||
|
match_pos.append([m.start(), m.end()])
|
||||||
|
i = m.end()
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
if len(match_pos) > 0:
|
||||||
|
chars = []
|
||||||
|
i = 0
|
||||||
|
while i < len(a):
|
||||||
|
start_pos, end_pos = exist_or_not(i, match_pos)
|
||||||
|
if start_pos is not None:
|
||||||
|
chars.append(a[start_pos:end_pos])
|
||||||
|
i = end_pos
|
||||||
|
else:
|
||||||
|
chars.append(a[i])
|
||||||
|
i += 1
|
||||||
|
a = chars
|
||||||
|
|
||||||
|
if args.trans_type == "pinyin":
|
||||||
|
a = pinyin(list(str(a)))
|
||||||
|
a = [one[0] for one in a]
|
||||||
|
|
||||||
|
if args.trans_type == "lazy_pinyin":
|
||||||
|
a = lazy_pinyin(list(str(a)))
|
||||||
|
|
||||||
|
a = [a[j : j + n] for j in range(0, len(a), n)] # noqa E203
|
||||||
|
|
||||||
|
a_flat = []
|
||||||
|
for z in a:
|
||||||
|
a_flat.append("".join(z))
|
||||||
|
|
||||||
|
a_chars = "".join(a_flat)
|
||||||
|
print(a_chars)
|
||||||
|
line = f.readline()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
133
egs/alimeeting/ASR/prepare.sh
Executable file
133
egs/alimeeting/ASR/prepare.sh
Executable file
@ -0,0 +1,133 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eou pipefail
|
||||||
|
|
||||||
|
stage=-1
|
||||||
|
stop_stage=100
|
||||||
|
|
||||||
|
# We assume dl_dir (download dir) contains the following
|
||||||
|
# directories and files. If not, they will be downloaded
|
||||||
|
# by this script automatically.
|
||||||
|
#
|
||||||
|
# - $dl_dir/alimeeting
|
||||||
|
# This directory contains the following files downloaded from
|
||||||
|
# https://openslr.org/62/
|
||||||
|
#
|
||||||
|
# - Train_Ali_far.tar.gz
|
||||||
|
# - Train_Ali_near.tar.gz
|
||||||
|
# - Test_Ali.tar.gz
|
||||||
|
# - Eval_Ali.tar.gz
|
||||||
|
#
|
||||||
|
# - $dl_dir/musan
|
||||||
|
# This directory contains the following directories downloaded from
|
||||||
|
# http://www.openslr.org/17/
|
||||||
|
#
|
||||||
|
# - music
|
||||||
|
# - noise
|
||||||
|
# - speech
|
||||||
|
|
||||||
|
dl_dir=$PWD/download
|
||||||
|
|
||||||
|
. shared/parse_options.sh || exit 1
|
||||||
|
|
||||||
|
# All files generated by this script are saved in "data".
|
||||||
|
# You can safely remove "data" and rerun this script to regenerate it.
|
||||||
|
mkdir -p data
|
||||||
|
|
||||||
|
log() {
|
||||||
|
# This function is from espnet
|
||||||
|
local fname=${BASH_SOURCE[1]##*/}
|
||||||
|
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
||||||
|
}
|
||||||
|
|
||||||
|
log "dl_dir: $dl_dir"
|
||||||
|
|
||||||
|
if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
|
||||||
|
log "Stage 0: Download data"
|
||||||
|
|
||||||
|
if [ ! -f $dl_dir/alimeeting/Train_Ali_far.tar.gz ]; then
|
||||||
|
lhotse download ali-meeting $dl_dir/alimeeting
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
|
||||||
|
log "Stage 1: Prepare alimeeting manifest"
|
||||||
|
# We assume that you have downloaded the alimeeting corpus
|
||||||
|
# to $dl_dir/alimeeting
|
||||||
|
if [ ! -f data/manifests/alimeeting/.manifests.done ]; then
|
||||||
|
mkdir -p data/manifests/alimeeting
|
||||||
|
lhotse prepare ali-meeting $dl_dir/alimeeting data/manifests/alimeeting
|
||||||
|
touch data/manifests/alimeeting/.manifests.done
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
|
||||||
|
log "Stage 2: Process alimeeting"
|
||||||
|
if [ ! -f data/fbank/alimeeting/.fbank.done ]; then
|
||||||
|
mkdir -p data/fbank/alimeeting
|
||||||
|
lhotse prepare ali-meeting $dl_dir/alimeeting data/manifests/alimeeting
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
||||||
|
log "Stage 3: Prepare musan manifest"
|
||||||
|
# We assume that you have downloaded the musan corpus
|
||||||
|
# to data/musan
|
||||||
|
if [ ! -f data/manifests/.musan_manifests.done ]; then
|
||||||
|
log "It may take 6 minutes"
|
||||||
|
mkdir -p data/manifests
|
||||||
|
lhotse prepare musan $dl_dir/musan data/manifests
|
||||||
|
touch data/manifests/.musan_manifests.done
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
|
||||||
|
log "Stage 4: Compute fbank for musan"
|
||||||
|
if [ ! -f data/fbank/.msuan.done ]; then
|
||||||
|
mkdir -p data/fbank
|
||||||
|
./local/compute_fbank_musan.py
|
||||||
|
touch data/fbank/.msuan.done
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $stage -le 5 ] && [ $stop_stage -ge 5 ]; then
|
||||||
|
log "Stage 5: Compute fbank for alimeeting"
|
||||||
|
if [ ! -f data/fbank/.alimeeting.done ]; then
|
||||||
|
mkdir -p data/fbank
|
||||||
|
./local/compute_fbank_alimeeting.py
|
||||||
|
touch data/fbank/.alimeeting.done
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $stage -le 6 ] && [ $stop_stage -ge 6 ]; then
|
||||||
|
log "Stage 6: Prepare char based lang"
|
||||||
|
lang_char_dir=data/lang_char
|
||||||
|
mkdir -p $lang_char_dir
|
||||||
|
|
||||||
|
# Prepare text.
|
||||||
|
# Note: in Linux, you can install jq with the following command:
|
||||||
|
# wget -O jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64
|
||||||
|
gunzip -c data/manifests/alimeeting/supervisions_train.jsonl.gz \
|
||||||
|
| jq ".text" | sed 's/"//g' \
|
||||||
|
| ./local/text2token.py -t "char" > $lang_char_dir/text
|
||||||
|
|
||||||
|
# Prepare words segments
|
||||||
|
python ./local/text2segments.py \
|
||||||
|
--input $lang_char_dir/text \
|
||||||
|
--output $lang_char_dir/text_words_segmentation
|
||||||
|
|
||||||
|
cat $lang_char_dir/text_words_segmentation | sed "s/ /\n/g" \
|
||||||
|
| sort -u | sed "/^$/d" \
|
||||||
|
| uniq > $lang_char_dir/words_no_ids.txt
|
||||||
|
|
||||||
|
# Prepare words.txt
|
||||||
|
if [ ! -f $lang_char_dir/words.txt ]; then
|
||||||
|
./local/prepare_words.py \
|
||||||
|
--input-file $lang_char_dir/words_no_ids.txt \
|
||||||
|
--output-file $lang_char_dir/words.txt
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -f $lang_char_dir/L_disambig.pt ]; then
|
||||||
|
./local/prepare_char.py
|
||||||
|
fi
|
||||||
|
fi
|
@ -0,0 +1,421 @@
|
|||||||
|
# Copyright 2021 Piotr Żelasko
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import inspect
|
||||||
|
import logging
|
||||||
|
from functools import lru_cache
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from lhotse import (
|
||||||
|
CutSet,
|
||||||
|
Fbank,
|
||||||
|
FbankConfig,
|
||||||
|
load_manifest,
|
||||||
|
load_manifest_lazy,
|
||||||
|
set_caching_enabled,
|
||||||
|
)
|
||||||
|
from lhotse.dataset import (
|
||||||
|
CutConcatenate,
|
||||||
|
CutMix,
|
||||||
|
DynamicBucketingSampler,
|
||||||
|
K2SpeechRecognitionDataset,
|
||||||
|
PrecomputedFeatures,
|
||||||
|
SingleCutSampler,
|
||||||
|
SpecAugment,
|
||||||
|
)
|
||||||
|
from lhotse.dataset.input_strategies import OnTheFlyFeatures
|
||||||
|
from lhotse.utils import fix_random_seed
|
||||||
|
from torch.utils.data import DataLoader
|
||||||
|
|
||||||
|
from icefall.utils import str2bool
|
||||||
|
|
||||||
|
set_caching_enabled(False)
|
||||||
|
torch.set_num_threads(1)
|
||||||
|
|
||||||
|
|
||||||
|
class _SeedWorkers:
|
||||||
|
def __init__(self, seed: int):
|
||||||
|
self.seed = seed
|
||||||
|
|
||||||
|
def __call__(self, worker_id: int):
|
||||||
|
fix_random_seed(self.seed + worker_id)
|
||||||
|
|
||||||
|
|
||||||
|
class AlimeetingAsrDataModule:
|
||||||
|
"""
|
||||||
|
DataModule for k2 ASR experiments.
|
||||||
|
It assumes there is always one train and valid dataloader,
|
||||||
|
but there can be multiple test dataloaders (e.g. LibriSpeech test-clean
|
||||||
|
and test-other).
|
||||||
|
It contains all the common data pipeline modules used in ASR
|
||||||
|
experiments, e.g.:
|
||||||
|
- dynamic batch size,
|
||||||
|
- bucketing samplers,
|
||||||
|
- cut concatenation,
|
||||||
|
- augmentation,
|
||||||
|
- on-the-fly feature extraction
|
||||||
|
This class should be derived for specific corpora used in ASR tasks.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, args: argparse.Namespace):
|
||||||
|
self.args = args
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def add_arguments(cls, parser: argparse.ArgumentParser):
|
||||||
|
group = parser.add_argument_group(
|
||||||
|
title="ASR data related options",
|
||||||
|
description="These options are used for the preparation of "
|
||||||
|
"PyTorch DataLoaders from Lhotse CutSet's -- they control the "
|
||||||
|
"effective batch sizes, sampling strategies, applied data "
|
||||||
|
"augmentations, etc.",
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--manifest-dir",
|
||||||
|
type=Path,
|
||||||
|
default=Path("data/fbank"),
|
||||||
|
help="Path to directory with train/dev/test cuts.",
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--max-duration",
|
||||||
|
type=int,
|
||||||
|
default=200.0,
|
||||||
|
help="Maximum pooled recordings duration (seconds) in a "
|
||||||
|
"single batch. You can reduce it if it causes CUDA OOM.",
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--bucketing-sampler",
|
||||||
|
type=str2bool,
|
||||||
|
default=True,
|
||||||
|
help="When enabled, the batches will come from buckets of "
|
||||||
|
"similar duration (saves padding frames).",
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--num-buckets",
|
||||||
|
type=int,
|
||||||
|
default=300,
|
||||||
|
help="The number of buckets for the DynamicBucketingSampler"
|
||||||
|
"(you might want to increase it for larger datasets).",
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--concatenate-cuts",
|
||||||
|
type=str2bool,
|
||||||
|
default=False,
|
||||||
|
help="When enabled, utterances (cuts) will be concatenated "
|
||||||
|
"to minimize the amount of padding.",
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--duration-factor",
|
||||||
|
type=float,
|
||||||
|
default=1.0,
|
||||||
|
help="Determines the maximum duration of a concatenated cut "
|
||||||
|
"relative to the duration of the longest cut in a batch.",
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--gap",
|
||||||
|
type=float,
|
||||||
|
default=1.0,
|
||||||
|
help="The amount of padding (in seconds) inserted between "
|
||||||
|
"concatenated cuts. This padding is filled with noise when "
|
||||||
|
"noise augmentation is used.",
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--on-the-fly-feats",
|
||||||
|
type=str2bool,
|
||||||
|
default=False,
|
||||||
|
help="When enabled, use on-the-fly cut mixing and feature "
|
||||||
|
"extraction. Will drop existing precomputed feature manifests "
|
||||||
|
"if available.",
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--shuffle",
|
||||||
|
type=str2bool,
|
||||||
|
default=True,
|
||||||
|
help="When enabled (=default), the examples will be "
|
||||||
|
"shuffled for each epoch.",
|
||||||
|
)
|
||||||
|
group.add_argument(
|
||||||
|
"--return-cuts",
|
||||||
|
type=str2bool,
|
||||||
|
default=True,
|
||||||
|
help="When enabled, each batch will have the "
|
||||||
|
"field: batch['supervisions']['cut'] with the cuts that "
|
||||||
|
"were used to construct it.",
|
||||||
|
)
|
||||||
|
|
||||||
|
group.add_argument(
|
||||||
|
"--num-workers",
|
||||||
|
type=int,
|
||||||
|
default=2,
|
||||||
|
help="The number of training dataloader workers that "
|
||||||
|
"collect the batches.",
|
||||||
|
)
|
||||||
|
|
||||||
|
group.add_argument(
|
||||||
|
"--enable-spec-aug",
|
||||||
|
type=str2bool,
|
||||||
|
default=True,
|
||||||
|
help="When enabled, use SpecAugment for training dataset.",
|
||||||
|
)
|
||||||
|
|
||||||
|
group.add_argument(
|
||||||
|
"--spec-aug-time-warp-factor",
|
||||||
|
type=int,
|
||||||
|
default=80,
|
||||||
|
help="Used only when --enable-spec-aug is True. "
|
||||||
|
"It specifies the factor for time warping in SpecAugment. "
|
||||||
|
"Larger values mean more warping. "
|
||||||
|
"A value less than 1 means to disable time warp.",
|
||||||
|
)
|
||||||
|
|
||||||
|
group.add_argument(
|
||||||
|
"--enable-musan",
|
||||||
|
type=str2bool,
|
||||||
|
default=True,
|
||||||
|
help="When enabled, select noise from MUSAN and mix it"
|
||||||
|
"with training dataset. ",
|
||||||
|
)
|
||||||
|
|
||||||
|
def train_dataloaders(
|
||||||
|
self,
|
||||||
|
cuts_train: CutSet,
|
||||||
|
sampler_state_dict: Optional[Dict[str, Any]] = None,
|
||||||
|
) -> DataLoader:
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
cuts_train:
|
||||||
|
CutSet for training.
|
||||||
|
sampler_state_dict:
|
||||||
|
The state dict for the training sampler.
|
||||||
|
"""
|
||||||
|
logging.info("About to get Musan cuts")
|
||||||
|
cuts_musan = load_manifest(
|
||||||
|
self.args.manifest_dir / "musan_cuts.jsonl.gz"
|
||||||
|
)
|
||||||
|
|
||||||
|
transforms = []
|
||||||
|
if self.args.enable_musan:
|
||||||
|
logging.info("Enable MUSAN")
|
||||||
|
transforms.append(
|
||||||
|
CutMix(
|
||||||
|
cuts=cuts_musan, prob=0.5, snr=(10, 20), preserve_id=True
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logging.info("Disable MUSAN")
|
||||||
|
|
||||||
|
if self.args.concatenate_cuts:
|
||||||
|
logging.info(
|
||||||
|
f"Using cut concatenation with duration factor "
|
||||||
|
f"{self.args.duration_factor} and gap {self.args.gap}."
|
||||||
|
)
|
||||||
|
# Cut concatenation should be the first transform in the list,
|
||||||
|
# so that if we e.g. mix noise in, it will fill the gaps between
|
||||||
|
# different utterances.
|
||||||
|
transforms = [
|
||||||
|
CutConcatenate(
|
||||||
|
duration_factor=self.args.duration_factor, gap=self.args.gap
|
||||||
|
)
|
||||||
|
] + transforms
|
||||||
|
|
||||||
|
input_transforms = []
|
||||||
|
if self.args.enable_spec_aug:
|
||||||
|
logging.info("Enable SpecAugment")
|
||||||
|
logging.info(
|
||||||
|
f"Time warp factor: {self.args.spec_aug_time_warp_factor}"
|
||||||
|
)
|
||||||
|
# Set the value of num_frame_masks according to Lhotse's version.
|
||||||
|
# In different Lhotse's versions, the default of num_frame_masks is
|
||||||
|
# different.
|
||||||
|
num_frame_masks = 10
|
||||||
|
num_frame_masks_parameter = inspect.signature(
|
||||||
|
SpecAugment.__init__
|
||||||
|
).parameters["num_frame_masks"]
|
||||||
|
if num_frame_masks_parameter.default == 1:
|
||||||
|
num_frame_masks = 2
|
||||||
|
logging.info(f"Num frame mask: {num_frame_masks}")
|
||||||
|
input_transforms.append(
|
||||||
|
SpecAugment(
|
||||||
|
time_warp_factor=self.args.spec_aug_time_warp_factor,
|
||||||
|
num_frame_masks=num_frame_masks,
|
||||||
|
features_mask_size=27,
|
||||||
|
num_feature_masks=2,
|
||||||
|
frames_mask_size=100,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logging.info("Disable SpecAugment")
|
||||||
|
|
||||||
|
logging.info("About to create train dataset")
|
||||||
|
train = K2SpeechRecognitionDataset(
|
||||||
|
cut_transforms=transforms,
|
||||||
|
input_transforms=input_transforms,
|
||||||
|
return_cuts=self.args.return_cuts,
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.args.on_the_fly_feats:
|
||||||
|
# NOTE: the PerturbSpeed transform should be added only if we
|
||||||
|
# remove it from data prep stage.
|
||||||
|
# Add on-the-fly speed perturbation; since originally it would
|
||||||
|
# have increased epoch size by 3, we will apply prob 2/3 and use
|
||||||
|
# 3x more epochs.
|
||||||
|
# Speed perturbation probably should come first before
|
||||||
|
# concatenation, but in principle the transforms order doesn't have
|
||||||
|
# to be strict (e.g. could be randomized)
|
||||||
|
# transforms = [PerturbSpeed(factors=[0.9, 1.1], p=2/3)] + transforms # noqa
|
||||||
|
# Drop feats to be on the safe side.
|
||||||
|
train = K2SpeechRecognitionDataset(
|
||||||
|
cut_transforms=transforms,
|
||||||
|
input_strategy=OnTheFlyFeatures(
|
||||||
|
Fbank(FbankConfig(num_mel_bins=80))
|
||||||
|
),
|
||||||
|
input_transforms=input_transforms,
|
||||||
|
return_cuts=self.args.return_cuts,
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.args.bucketing_sampler:
|
||||||
|
logging.info("Using DynamicBucketingSampler.")
|
||||||
|
train_sampler = DynamicBucketingSampler(
|
||||||
|
cuts_train,
|
||||||
|
max_duration=self.args.max_duration,
|
||||||
|
shuffle=self.args.shuffle,
|
||||||
|
num_buckets=self.args.num_buckets,
|
||||||
|
buffer_size=30000,
|
||||||
|
drop_last=True,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logging.info("Using SingleCutSampler.")
|
||||||
|
train_sampler = SingleCutSampler(
|
||||||
|
cuts_train,
|
||||||
|
max_duration=self.args.max_duration,
|
||||||
|
shuffle=self.args.shuffle,
|
||||||
|
)
|
||||||
|
logging.info("About to create train dataloader")
|
||||||
|
|
||||||
|
# 'seed' is derived from the current random state, which will have
|
||||||
|
# previously been set in the main process.
|
||||||
|
seed = torch.randint(0, 100000, ()).item()
|
||||||
|
worker_init_fn = _SeedWorkers(seed)
|
||||||
|
|
||||||
|
train_dl = DataLoader(
|
||||||
|
train,
|
||||||
|
sampler=train_sampler,
|
||||||
|
batch_size=None,
|
||||||
|
num_workers=self.args.num_workers,
|
||||||
|
persistent_workers=False,
|
||||||
|
worker_init_fn=worker_init_fn,
|
||||||
|
)
|
||||||
|
|
||||||
|
if sampler_state_dict is not None:
|
||||||
|
logging.info("Loading sampler state dict")
|
||||||
|
train_dl.sampler.load_state_dict(sampler_state_dict)
|
||||||
|
|
||||||
|
return train_dl
|
||||||
|
|
||||||
|
def valid_dataloaders(self, cuts_valid: CutSet) -> DataLoader:
|
||||||
|
transforms = []
|
||||||
|
if self.args.concatenate_cuts:
|
||||||
|
transforms = [
|
||||||
|
CutConcatenate(
|
||||||
|
duration_factor=self.args.duration_factor, gap=self.args.gap
|
||||||
|
)
|
||||||
|
] + transforms
|
||||||
|
|
||||||
|
logging.info("About to create dev dataset")
|
||||||
|
if self.args.on_the_fly_feats:
|
||||||
|
validate = K2SpeechRecognitionDataset(
|
||||||
|
cut_transforms=transforms,
|
||||||
|
input_strategy=OnTheFlyFeatures(
|
||||||
|
Fbank(FbankConfig(num_mel_bins=80))
|
||||||
|
),
|
||||||
|
return_cuts=self.args.return_cuts,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
validate = K2SpeechRecognitionDataset(
|
||||||
|
cut_transforms=transforms,
|
||||||
|
return_cuts=self.args.return_cuts,
|
||||||
|
)
|
||||||
|
valid_sampler = DynamicBucketingSampler(
|
||||||
|
cuts_valid,
|
||||||
|
max_duration=self.args.max_duration,
|
||||||
|
shuffle=False,
|
||||||
|
)
|
||||||
|
logging.info("About to create dev dataloader")
|
||||||
|
|
||||||
|
from lhotse.dataset.iterable_dataset import IterableDatasetWrapper
|
||||||
|
|
||||||
|
dev_iter_dataset = IterableDatasetWrapper(
|
||||||
|
dataset=validate,
|
||||||
|
sampler=valid_sampler,
|
||||||
|
)
|
||||||
|
valid_dl = DataLoader(
|
||||||
|
dev_iter_dataset,
|
||||||
|
batch_size=None,
|
||||||
|
num_workers=self.args.num_workers,
|
||||||
|
persistent_workers=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
return valid_dl
|
||||||
|
|
||||||
|
def test_dataloaders(self, cuts: CutSet) -> DataLoader:
|
||||||
|
logging.debug("About to create test dataset")
|
||||||
|
test = K2SpeechRecognitionDataset(
|
||||||
|
input_strategy=OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80)))
|
||||||
|
if self.args.on_the_fly_feats
|
||||||
|
else PrecomputedFeatures(),
|
||||||
|
return_cuts=self.args.return_cuts,
|
||||||
|
)
|
||||||
|
sampler = DynamicBucketingSampler(
|
||||||
|
cuts,
|
||||||
|
max_duration=self.args.max_duration,
|
||||||
|
shuffle=False,
|
||||||
|
)
|
||||||
|
from lhotse.dataset.iterable_dataset import IterableDatasetWrapper
|
||||||
|
|
||||||
|
test_iter_dataset = IterableDatasetWrapper(
|
||||||
|
dataset=test,
|
||||||
|
sampler=sampler,
|
||||||
|
)
|
||||||
|
test_dl = DataLoader(
|
||||||
|
test_iter_dataset,
|
||||||
|
batch_size=None,
|
||||||
|
num_workers=self.args.num_workers,
|
||||||
|
)
|
||||||
|
return test_dl
|
||||||
|
|
||||||
|
@lru_cache()
|
||||||
|
def train_cuts(self) -> CutSet:
|
||||||
|
logging.info("About to get train cuts")
|
||||||
|
return load_manifest_lazy(
|
||||||
|
self.args.manifest_dir / "alimeeting_cuts_train.jsonl.gz"
|
||||||
|
)
|
||||||
|
|
||||||
|
@lru_cache()
|
||||||
|
def valid_cuts(self) -> CutSet:
|
||||||
|
logging.info("About to get dev cuts")
|
||||||
|
return load_manifest_lazy(
|
||||||
|
self.args.manifest_dir / "alimeeting_cuts_eval.jsonl.gz"
|
||||||
|
)
|
||||||
|
|
||||||
|
@lru_cache()
|
||||||
|
def test_cuts(self) -> List[CutSet]:
|
||||||
|
logging.info("About to get test cuts")
|
||||||
|
return load_manifest_lazy(
|
||||||
|
self.args.manifest_dir / "alimeeting_cuts_test.jsonl.gz"
|
||||||
|
)
|
1
egs/alimeeting/ASR/pruned_transducer_stateless2/beam_search.py
Symbolic link
1
egs/alimeeting/ASR/pruned_transducer_stateless2/beam_search.py
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../../../librispeech/ASR/pruned_transducer_stateless2/beam_search.py
|
1
egs/alimeeting/ASR/pruned_transducer_stateless2/conformer.py
Symbolic link
1
egs/alimeeting/ASR/pruned_transducer_stateless2/conformer.py
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../../../librispeech/ASR/pruned_transducer_stateless2/conformer.py
|
615
egs/alimeeting/ASR/pruned_transducer_stateless2/decode.py
Executable file
615
egs/alimeeting/ASR/pruned_transducer_stateless2/decode.py
Executable file
@ -0,0 +1,615 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
"""
|
||||||
|
When training with the far data, usage:
|
||||||
|
(1) greedy search
|
||||||
|
./pruned_transducer_stateless2/decode.py \
|
||||||
|
--epoch 29 \
|
||||||
|
--avg 18 \
|
||||||
|
--exp-dir ./pruned_transducer_stateless2/exp \
|
||||||
|
--lang-dir data/lang_char \
|
||||||
|
--max-duration 100 \
|
||||||
|
--decoding-method greedy_search
|
||||||
|
|
||||||
|
(2) modified beam search
|
||||||
|
./pruned_transducer_stateless2/decode.py \
|
||||||
|
--epoch 29 \
|
||||||
|
--avg 18 \
|
||||||
|
--exp-dir ./pruned_transducer_stateless2/exp \
|
||||||
|
--lang-dir data/lang_char \
|
||||||
|
--max-duration 100 \
|
||||||
|
--decoding-method modified_beam_search \
|
||||||
|
--beam-size 4
|
||||||
|
|
||||||
|
(3) fast beam search
|
||||||
|
./pruned_transducer_stateless2/decode.py \
|
||||||
|
--epoch 29 \
|
||||||
|
--avg 18 \
|
||||||
|
--exp-dir ./pruned_transducer_stateless2/exp \
|
||||||
|
--lang-dir data/lang_char \
|
||||||
|
--max-duration 1500 \
|
||||||
|
--decoding-method fast_beam_search \
|
||||||
|
--beam 4 \
|
||||||
|
--max-contexts 4 \
|
||||||
|
--max-states 8
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
from collections import defaultdict
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional, Tuple
|
||||||
|
|
||||||
|
import k2
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
from asr_datamodule import AlimeetingAsrDataModule
|
||||||
|
from beam_search import (
|
||||||
|
beam_search,
|
||||||
|
fast_beam_search_one_best,
|
||||||
|
greedy_search,
|
||||||
|
greedy_search_batch,
|
||||||
|
modified_beam_search,
|
||||||
|
)
|
||||||
|
from lhotse.cut import Cut
|
||||||
|
from train import get_params, get_transducer_model
|
||||||
|
|
||||||
|
from icefall.checkpoint import (
|
||||||
|
average_checkpoints,
|
||||||
|
find_checkpoints,
|
||||||
|
load_checkpoint,
|
||||||
|
)
|
||||||
|
from icefall.lexicon import Lexicon
|
||||||
|
from icefall.utils import (
|
||||||
|
AttributeDict,
|
||||||
|
setup_logger,
|
||||||
|
store_transcripts,
|
||||||
|
write_error_stats,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_parser():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--epoch",
|
||||||
|
type=int,
|
||||||
|
default=28,
|
||||||
|
help="It specifies the checkpoint to use for decoding."
|
||||||
|
"Note: Epoch counts from 0.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--batch",
|
||||||
|
type=int,
|
||||||
|
default=None,
|
||||||
|
help="It specifies the batch checkpoint to use for decoding."
|
||||||
|
"Note: Epoch counts from 0.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--avg",
|
||||||
|
type=int,
|
||||||
|
default=15,
|
||||||
|
help="Number of checkpoints to average. Automatically select "
|
||||||
|
"consecutive checkpoints before the checkpoint specified by "
|
||||||
|
"'--epoch'. ",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--avg-last-n",
|
||||||
|
type=int,
|
||||||
|
default=0,
|
||||||
|
help="""If positive, --epoch and --avg are ignored and it
|
||||||
|
will use the last n checkpoints exp_dir/checkpoint-xxx.pt
|
||||||
|
where xxx is the number of processed batches while
|
||||||
|
saving that checkpoint.
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--exp-dir",
|
||||||
|
type=str,
|
||||||
|
default="pruned_transducer_stateless2/exp",
|
||||||
|
help="The experiment dir",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--lang-dir",
|
||||||
|
type=str,
|
||||||
|
default="data/lang_char",
|
||||||
|
help="""The lang dir
|
||||||
|
It contains language related input files such as
|
||||||
|
"lexicon.txt"
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--decoding-method",
|
||||||
|
type=str,
|
||||||
|
default="greedy_search",
|
||||||
|
help="""Possible values are:
|
||||||
|
- greedy_search
|
||||||
|
- beam_search
|
||||||
|
- modified_beam_search
|
||||||
|
- fast_beam_search
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--beam-size",
|
||||||
|
type=int,
|
||||||
|
default=4,
|
||||||
|
help="""An interger indicating how many candidates we will keep for each
|
||||||
|
frame. Used only when --decoding-method is beam_search or
|
||||||
|
modified_beam_search.""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--beam",
|
||||||
|
type=float,
|
||||||
|
default=4,
|
||||||
|
help="""A floating point value to calculate the cutoff score during beam
|
||||||
|
search (i.e., `cutoff = max-score - beam`), which is the same as the
|
||||||
|
`beam` in Kaldi.
|
||||||
|
Used only when --decoding-method is fast_beam_search""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--max-contexts",
|
||||||
|
type=int,
|
||||||
|
default=4,
|
||||||
|
help="""Used only when --decoding-method is
|
||||||
|
fast_beam_search""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--max-states",
|
||||||
|
type=int,
|
||||||
|
default=8,
|
||||||
|
help="""Used only when --decoding-method is
|
||||||
|
fast_beam_search""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--context-size",
|
||||||
|
type=int,
|
||||||
|
default=2,
|
||||||
|
help="The context size in the decoder. 1 means bigram; "
|
||||||
|
"2 means tri-gram",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--max-sym-per-frame",
|
||||||
|
type=int,
|
||||||
|
default=1,
|
||||||
|
help="""Maximum number of symbols per frame.
|
||||||
|
Used only when --decoding_method is greedy_search""",
|
||||||
|
)
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
def decode_one_batch(
|
||||||
|
params: AttributeDict,
|
||||||
|
model: nn.Module,
|
||||||
|
lexicon: Lexicon,
|
||||||
|
batch: dict,
|
||||||
|
decoding_graph: Optional[k2.Fsa] = None,
|
||||||
|
) -> Dict[str, List[List[str]]]:
|
||||||
|
"""Decode one batch and return the result in a dict. The dict has the
|
||||||
|
following format:
|
||||||
|
|
||||||
|
- key: It indicates the setting used for decoding. For example,
|
||||||
|
if greedy_search is used, it would be "greedy_search"
|
||||||
|
If beam search with a beam size of 7 is used, it would be
|
||||||
|
"beam_7"
|
||||||
|
- value: It contains the decoding result. `len(value)` equals to
|
||||||
|
batch size. `value[i]` is the decoding result for the i-th
|
||||||
|
utterance in the given batch.
|
||||||
|
Args:
|
||||||
|
params:
|
||||||
|
It's the return value of :func:`get_params`.
|
||||||
|
model:
|
||||||
|
The neural model.
|
||||||
|
batch:
|
||||||
|
It is the return value from iterating
|
||||||
|
`lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation
|
||||||
|
for the format of the `batch`.
|
||||||
|
decoding_graph:
|
||||||
|
The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used
|
||||||
|
only when --decoding_method is fast_beam_search.
|
||||||
|
Returns:
|
||||||
|
Return the decoding result. See above description for the format of
|
||||||
|
the returned dict.
|
||||||
|
"""
|
||||||
|
device = model.device
|
||||||
|
feature = batch["inputs"]
|
||||||
|
assert feature.ndim == 3
|
||||||
|
|
||||||
|
feature = feature.to(device)
|
||||||
|
# at entry, feature is (N, T, C)
|
||||||
|
|
||||||
|
supervisions = batch["supervisions"]
|
||||||
|
feature_lens = supervisions["num_frames"].to(device)
|
||||||
|
encoder_out, encoder_out_lens = model.encoder(
|
||||||
|
x=feature, x_lens=feature_lens
|
||||||
|
)
|
||||||
|
hyps = []
|
||||||
|
|
||||||
|
if params.decoding_method == "fast_beam_search":
|
||||||
|
hyp_tokens = fast_beam_search_one_best(
|
||||||
|
model=model,
|
||||||
|
decoding_graph=decoding_graph,
|
||||||
|
encoder_out=encoder_out,
|
||||||
|
encoder_out_lens=encoder_out_lens,
|
||||||
|
beam=params.beam,
|
||||||
|
max_contexts=params.max_contexts,
|
||||||
|
max_states=params.max_states,
|
||||||
|
)
|
||||||
|
for i in range(encoder_out.size(0)):
|
||||||
|
hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]])
|
||||||
|
elif (
|
||||||
|
params.decoding_method == "greedy_search"
|
||||||
|
and params.max_sym_per_frame == 1
|
||||||
|
):
|
||||||
|
hyp_tokens = greedy_search_batch(
|
||||||
|
model=model,
|
||||||
|
encoder_out=encoder_out,
|
||||||
|
encoder_out_lens=encoder_out_lens,
|
||||||
|
)
|
||||||
|
for i in range(encoder_out.size(0)):
|
||||||
|
hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]])
|
||||||
|
elif params.decoding_method == "modified_beam_search":
|
||||||
|
hyp_tokens = modified_beam_search(
|
||||||
|
model=model,
|
||||||
|
encoder_out=encoder_out,
|
||||||
|
encoder_out_lens=encoder_out_lens,
|
||||||
|
beam=params.beam_size,
|
||||||
|
)
|
||||||
|
for i in range(encoder_out.size(0)):
|
||||||
|
hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]])
|
||||||
|
else:
|
||||||
|
batch_size = encoder_out.size(0)
|
||||||
|
|
||||||
|
for i in range(batch_size):
|
||||||
|
# fmt: off
|
||||||
|
encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]]
|
||||||
|
# fmt: on
|
||||||
|
if params.decoding_method == "greedy_search":
|
||||||
|
hyp = greedy_search(
|
||||||
|
model=model,
|
||||||
|
encoder_out=encoder_out_i,
|
||||||
|
max_sym_per_frame=params.max_sym_per_frame,
|
||||||
|
)
|
||||||
|
elif params.decoding_method == "beam_search":
|
||||||
|
hyp = beam_search(
|
||||||
|
model=model,
|
||||||
|
encoder_out=encoder_out_i,
|
||||||
|
beam=params.beam_size,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"Unsupported decoding method: {params.decoding_method}"
|
||||||
|
)
|
||||||
|
hyps.append([lexicon.token_table[idx] for idx in hyp])
|
||||||
|
|
||||||
|
if params.decoding_method == "greedy_search":
|
||||||
|
return {"greedy_search": hyps}
|
||||||
|
elif params.decoding_method == "fast_beam_search":
|
||||||
|
return {
|
||||||
|
(
|
||||||
|
f"beam_{params.beam}_"
|
||||||
|
f"max_contexts_{params.max_contexts}_"
|
||||||
|
f"max_states_{params.max_states}"
|
||||||
|
): hyps
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {f"beam_size_{params.beam_size}": hyps}
|
||||||
|
|
||||||
|
|
||||||
|
def decode_dataset(
|
||||||
|
dl: torch.utils.data.DataLoader,
|
||||||
|
params: AttributeDict,
|
||||||
|
model: nn.Module,
|
||||||
|
lexicon: Lexicon,
|
||||||
|
decoding_graph: Optional[k2.Fsa] = None,
|
||||||
|
) -> Dict[str, List[Tuple[List[str], List[str]]]]:
|
||||||
|
"""Decode dataset.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dl:
|
||||||
|
PyTorch's dataloader containing the dataset to decode.
|
||||||
|
params:
|
||||||
|
It is returned by :func:`get_params`.
|
||||||
|
model:
|
||||||
|
The neural model.
|
||||||
|
decoding_graph:
|
||||||
|
The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used
|
||||||
|
only when --decoding_method is fast_beam_search.
|
||||||
|
Returns:
|
||||||
|
Return a dict, whose key may be "greedy_search" if greedy search
|
||||||
|
is used, or it may be "beam_7" if beam size of 7 is used.
|
||||||
|
Its value is a list of tuples. Each tuple contains two elements:
|
||||||
|
The first is the reference transcript, and the second is the
|
||||||
|
predicted result.
|
||||||
|
"""
|
||||||
|
num_cuts = 0
|
||||||
|
|
||||||
|
try:
|
||||||
|
num_batches = len(dl)
|
||||||
|
except TypeError:
|
||||||
|
num_batches = "?"
|
||||||
|
|
||||||
|
if params.decoding_method == "greedy_search":
|
||||||
|
log_interval = 100
|
||||||
|
else:
|
||||||
|
log_interval = 50
|
||||||
|
|
||||||
|
results = defaultdict(list)
|
||||||
|
for batch_idx, batch in enumerate(dl):
|
||||||
|
texts = batch["supervisions"]["text"]
|
||||||
|
texts = [list(str(text).replace(" ", "")) for text in texts]
|
||||||
|
|
||||||
|
hyps_dict = decode_one_batch(
|
||||||
|
params=params,
|
||||||
|
model=model,
|
||||||
|
lexicon=lexicon,
|
||||||
|
decoding_graph=decoding_graph,
|
||||||
|
batch=batch,
|
||||||
|
)
|
||||||
|
|
||||||
|
for name, hyps in hyps_dict.items():
|
||||||
|
this_batch = []
|
||||||
|
assert len(hyps) == len(texts)
|
||||||
|
for hyp_words, ref_text in zip(hyps, texts):
|
||||||
|
this_batch.append((ref_text, hyp_words))
|
||||||
|
|
||||||
|
results[name].extend(this_batch)
|
||||||
|
|
||||||
|
num_cuts += len(texts)
|
||||||
|
|
||||||
|
if batch_idx % log_interval == 0:
|
||||||
|
batch_str = f"{batch_idx}/{num_batches}"
|
||||||
|
|
||||||
|
logging.info(
|
||||||
|
f"batch {batch_str}, cuts processed until now is {num_cuts}"
|
||||||
|
)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def save_results(
|
||||||
|
params: AttributeDict,
|
||||||
|
test_set_name: str,
|
||||||
|
results_dict: Dict[str, List[Tuple[List[int], List[int]]]],
|
||||||
|
):
|
||||||
|
test_set_wers = dict()
|
||||||
|
for key, results in results_dict.items():
|
||||||
|
recog_path = (
|
||||||
|
params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt"
|
||||||
|
)
|
||||||
|
store_transcripts(filename=recog_path, texts=results)
|
||||||
|
logging.info(f"The transcripts are stored in {recog_path}")
|
||||||
|
|
||||||
|
# The following prints out WERs, per-word error statistics and aligned
|
||||||
|
# ref/hyp pairs.
|
||||||
|
errs_filename = (
|
||||||
|
params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt"
|
||||||
|
)
|
||||||
|
with open(errs_filename, "w") as f:
|
||||||
|
wer = write_error_stats(
|
||||||
|
f, f"{test_set_name}-{key}", results, enable_log=True
|
||||||
|
)
|
||||||
|
test_set_wers[key] = wer
|
||||||
|
|
||||||
|
logging.info("Wrote detailed error stats to {}".format(errs_filename))
|
||||||
|
|
||||||
|
test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1])
|
||||||
|
errs_info = (
|
||||||
|
params.res_dir
|
||||||
|
/ f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt"
|
||||||
|
)
|
||||||
|
with open(errs_info, "w") as f:
|
||||||
|
print("settings\tWER", file=f)
|
||||||
|
for key, val in test_set_wers:
|
||||||
|
print("{}\t{}".format(key, val), file=f)
|
||||||
|
|
||||||
|
s = "\nFor {}, WER of different settings are:\n".format(test_set_name)
|
||||||
|
note = "\tbest for {}".format(test_set_name)
|
||||||
|
for key, val in test_set_wers:
|
||||||
|
s += "{}\t{}{}\n".format(key, val, note)
|
||||||
|
note = ""
|
||||||
|
logging.info(s)
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def main():
|
||||||
|
parser = get_parser()
|
||||||
|
AlimeetingAsrDataModule.add_arguments(parser)
|
||||||
|
args = parser.parse_args()
|
||||||
|
args.exp_dir = Path(args.exp_dir)
|
||||||
|
|
||||||
|
params = get_params()
|
||||||
|
params.update(vars(args))
|
||||||
|
|
||||||
|
assert params.decoding_method in (
|
||||||
|
"greedy_search",
|
||||||
|
"beam_search",
|
||||||
|
"fast_beam_search",
|
||||||
|
"modified_beam_search",
|
||||||
|
)
|
||||||
|
params.res_dir = params.exp_dir / params.decoding_method
|
||||||
|
|
||||||
|
params.suffix = f"epoch-{params.epoch}-avg-{params.avg}"
|
||||||
|
if "fast_beam_search" in params.decoding_method:
|
||||||
|
params.suffix += f"-beam-{params.beam}"
|
||||||
|
params.suffix += f"-max-contexts-{params.max_contexts}"
|
||||||
|
params.suffix += f"-max-states-{params.max_states}"
|
||||||
|
elif "beam_search" in params.decoding_method:
|
||||||
|
params.suffix += f"-beam-{params.beam_size}"
|
||||||
|
else:
|
||||||
|
params.suffix += f"-context-{params.context_size}"
|
||||||
|
params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}"
|
||||||
|
|
||||||
|
setup_logger(f"{params.res_dir}/log-decode-{params.suffix}")
|
||||||
|
logging.info("Decoding started")
|
||||||
|
|
||||||
|
device = torch.device("cpu")
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
device = torch.device("cuda", 0)
|
||||||
|
|
||||||
|
logging.info(f"Device: {device}")
|
||||||
|
|
||||||
|
lexicon = Lexicon(params.lang_dir)
|
||||||
|
params.blank_id = lexicon.token_table["<blk>"]
|
||||||
|
params.vocab_size = max(lexicon.tokens) + 1
|
||||||
|
|
||||||
|
logging.info(params)
|
||||||
|
|
||||||
|
logging.info("About to create model")
|
||||||
|
model = get_transducer_model(params)
|
||||||
|
|
||||||
|
if params.avg_last_n > 0:
|
||||||
|
filenames = find_checkpoints(params.exp_dir)[: params.avg_last_n]
|
||||||
|
logging.info(f"averaging {filenames}")
|
||||||
|
model.to(device)
|
||||||
|
model.load_state_dict(average_checkpoints(filenames, device=device))
|
||||||
|
elif params.avg == 1:
|
||||||
|
load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model)
|
||||||
|
elif params.batch is not None:
|
||||||
|
filenames = f"{params.exp_dir}/checkpoint-{params.batch}.pt"
|
||||||
|
logging.info(f"averaging {filenames}")
|
||||||
|
model.to(device)
|
||||||
|
model.load_state_dict(average_checkpoints([filenames], device=device))
|
||||||
|
else:
|
||||||
|
start = params.epoch - params.avg + 1
|
||||||
|
filenames = []
|
||||||
|
for i in range(start, params.epoch + 1):
|
||||||
|
if start >= 0:
|
||||||
|
filenames.append(f"{params.exp_dir}/epoch-{i}.pt")
|
||||||
|
logging.info(f"averaging {filenames}")
|
||||||
|
model.to(device)
|
||||||
|
model.load_state_dict(average_checkpoints(filenames, device=device))
|
||||||
|
|
||||||
|
average = average_checkpoints(filenames, device=device)
|
||||||
|
checkpoint = {"model": average}
|
||||||
|
torch.save(
|
||||||
|
checkpoint,
|
||||||
|
"pruned_transducer_stateless2/exp/pretrained_epoch_29_avg_18.pt",
|
||||||
|
)
|
||||||
|
|
||||||
|
model.to(device)
|
||||||
|
model.eval()
|
||||||
|
model.device = device
|
||||||
|
|
||||||
|
if params.decoding_method == "fast_beam_search":
|
||||||
|
decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device)
|
||||||
|
else:
|
||||||
|
decoding_graph = None
|
||||||
|
|
||||||
|
num_param = sum([p.numel() for p in model.parameters()])
|
||||||
|
logging.info(f"Number of model parameters: {num_param}")
|
||||||
|
|
||||||
|
# Note: Please use "pip install webdataset==0.1.103"
|
||||||
|
# for installing the webdataset.
|
||||||
|
import glob
|
||||||
|
import os
|
||||||
|
|
||||||
|
from lhotse import CutSet
|
||||||
|
from lhotse.dataset.webdataset import export_to_webdataset
|
||||||
|
|
||||||
|
alimeeting = AlimeetingAsrDataModule(args)
|
||||||
|
|
||||||
|
dev = "eval"
|
||||||
|
test = "test"
|
||||||
|
|
||||||
|
if not os.path.exists(f"{dev}/shared-0.tar"):
|
||||||
|
os.makedirs(dev)
|
||||||
|
dev_cuts = alimeeting.valid_cuts()
|
||||||
|
export_to_webdataset(
|
||||||
|
dev_cuts,
|
||||||
|
output_path=f"{dev}/shared-%d.tar",
|
||||||
|
shard_size=300,
|
||||||
|
)
|
||||||
|
|
||||||
|
if not os.path.exists(f"{test}/shared-0.tar"):
|
||||||
|
os.makedirs(test)
|
||||||
|
test_cuts = alimeeting.test_cuts()
|
||||||
|
export_to_webdataset(
|
||||||
|
test_cuts,
|
||||||
|
output_path=f"{test}/shared-%d.tar",
|
||||||
|
shard_size=300,
|
||||||
|
)
|
||||||
|
|
||||||
|
dev_shards = [
|
||||||
|
str(path)
|
||||||
|
for path in sorted(glob.glob(os.path.join(dev, "shared-*.tar")))
|
||||||
|
]
|
||||||
|
cuts_dev_webdataset = CutSet.from_webdataset(
|
||||||
|
dev_shards,
|
||||||
|
split_by_worker=True,
|
||||||
|
split_by_node=True,
|
||||||
|
shuffle_shards=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
test_shards = [
|
||||||
|
str(path)
|
||||||
|
for path in sorted(glob.glob(os.path.join(test, "shared-*.tar")))
|
||||||
|
]
|
||||||
|
cuts_test_webdataset = CutSet.from_webdataset(
|
||||||
|
test_shards,
|
||||||
|
split_by_worker=True,
|
||||||
|
split_by_node=True,
|
||||||
|
shuffle_shards=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
def remove_short_and_long_utt(c: Cut):
|
||||||
|
return 1.0 <= c.duration
|
||||||
|
|
||||||
|
cuts_dev_webdataset = cuts_dev_webdataset.filter(remove_short_and_long_utt)
|
||||||
|
cuts_test_webdataset = cuts_test_webdataset.filter(
|
||||||
|
remove_short_and_long_utt
|
||||||
|
)
|
||||||
|
|
||||||
|
dev_dl = alimeeting.valid_dataloaders(cuts_dev_webdataset)
|
||||||
|
test_dl = alimeeting.test_dataloaders(cuts_test_webdataset)
|
||||||
|
|
||||||
|
test_sets = ["dev", "test"]
|
||||||
|
test_dl = [dev_dl, test_dl]
|
||||||
|
|
||||||
|
for test_set, test_dl in zip(test_sets, test_dl):
|
||||||
|
results_dict = decode_dataset(
|
||||||
|
dl=test_dl,
|
||||||
|
params=params,
|
||||||
|
model=model,
|
||||||
|
lexicon=lexicon,
|
||||||
|
decoding_graph=decoding_graph,
|
||||||
|
)
|
||||||
|
save_results(
|
||||||
|
params=params,
|
||||||
|
test_set_name=test_set,
|
||||||
|
results_dict=results_dict,
|
||||||
|
)
|
||||||
|
|
||||||
|
logging.info("Done!")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
1
egs/alimeeting/ASR/pruned_transducer_stateless2/decoder.py
Symbolic link
1
egs/alimeeting/ASR/pruned_transducer_stateless2/decoder.py
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../../../librispeech/ASR/pruned_transducer_stateless2/decoder.py
|
@ -0,0 +1 @@
|
|||||||
|
../../../librispeech/ASR/transducer_stateless/encoder_interface.py
|
178
egs/alimeeting/ASR/pruned_transducer_stateless2/export.py
Normal file
178
egs/alimeeting/ASR/pruned_transducer_stateless2/export.py
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# This script converts several saved checkpoints
|
||||||
|
# to a single one using model averaging.
|
||||||
|
"""
|
||||||
|
Usage:
|
||||||
|
./pruned_transducer_stateless2/export.py \
|
||||||
|
--exp-dir ./pruned_transducer_stateless2/exp \
|
||||||
|
--lang-dir data/lang_char \
|
||||||
|
--epoch 29 \
|
||||||
|
--avg 18
|
||||||
|
|
||||||
|
It will generate a file exp_dir/pretrained.pt
|
||||||
|
|
||||||
|
To use the generated file with `pruned_transducer_stateless2/decode.py`,
|
||||||
|
you can do:
|
||||||
|
|
||||||
|
cd /path/to/exp_dir
|
||||||
|
ln -s pretrained.pt epoch-9999.pt
|
||||||
|
|
||||||
|
cd /path/to/egs/alimeeting/ASR
|
||||||
|
./pruned_transducer_stateless2/decode.py \
|
||||||
|
--exp-dir ./pruned_transducer_stateless2/exp \
|
||||||
|
--epoch 9999 \
|
||||||
|
--avg 1 \
|
||||||
|
--max-duration 100 \
|
||||||
|
--lang-dir data/lang_char
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from train import get_params, get_transducer_model
|
||||||
|
|
||||||
|
from icefall.checkpoint import average_checkpoints, load_checkpoint
|
||||||
|
from icefall.lexicon import Lexicon
|
||||||
|
from icefall.utils import str2bool
|
||||||
|
|
||||||
|
|
||||||
|
def get_parser():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--epoch",
|
||||||
|
type=int,
|
||||||
|
default=28,
|
||||||
|
help="It specifies the checkpoint to use for decoding."
|
||||||
|
"Note: Epoch counts from 0.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--avg",
|
||||||
|
type=int,
|
||||||
|
default=15,
|
||||||
|
help="Number of checkpoints to average. Automatically select "
|
||||||
|
"consecutive checkpoints before the checkpoint specified by "
|
||||||
|
"'--epoch'. ",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--exp-dir",
|
||||||
|
type=str,
|
||||||
|
default="pruned_transducer_stateless2/exp",
|
||||||
|
help="""It specifies the directory where all training related
|
||||||
|
files, e.g., checkpoints, log, etc, are saved
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--lang-dir",
|
||||||
|
type=str,
|
||||||
|
default="data/lang_char",
|
||||||
|
help="The lang dir",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--jit",
|
||||||
|
type=str2bool,
|
||||||
|
default=False,
|
||||||
|
help="""True to save a model after applying torch.jit.script.
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--context-size",
|
||||||
|
type=int,
|
||||||
|
default=2,
|
||||||
|
help="The context size in the decoder. 1 means bigram; "
|
||||||
|
"2 means tri-gram",
|
||||||
|
)
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
args = get_parser().parse_args()
|
||||||
|
args.exp_dir = Path(args.exp_dir)
|
||||||
|
|
||||||
|
assert args.jit is False, "Support torchscript will be added later"
|
||||||
|
|
||||||
|
params = get_params()
|
||||||
|
params.update(vars(args))
|
||||||
|
|
||||||
|
device = torch.device("cpu")
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
device = torch.device("cuda", 0)
|
||||||
|
|
||||||
|
logging.info(f"device: {device}")
|
||||||
|
|
||||||
|
lexicon = Lexicon(params.lang_dir)
|
||||||
|
|
||||||
|
params.blank_id = 0
|
||||||
|
params.vocab_size = max(lexicon.tokens) + 1
|
||||||
|
|
||||||
|
logging.info(params)
|
||||||
|
|
||||||
|
logging.info("About to create model")
|
||||||
|
model = get_transducer_model(params)
|
||||||
|
|
||||||
|
model.to(device)
|
||||||
|
|
||||||
|
if params.avg == 1:
|
||||||
|
load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model)
|
||||||
|
else:
|
||||||
|
start = params.epoch - params.avg + 1
|
||||||
|
filenames = []
|
||||||
|
for i in range(start, params.epoch + 1):
|
||||||
|
if start >= 0:
|
||||||
|
filenames.append(f"{params.exp_dir}/epoch-{i}.pt")
|
||||||
|
logging.info(f"averaging {filenames}")
|
||||||
|
model.to(device)
|
||||||
|
model.load_state_dict(average_checkpoints(filenames, device=device))
|
||||||
|
|
||||||
|
model.eval()
|
||||||
|
|
||||||
|
model.to("cpu")
|
||||||
|
model.eval()
|
||||||
|
|
||||||
|
if params.jit:
|
||||||
|
logging.info("Using torch.jit.script")
|
||||||
|
model = torch.jit.script(model)
|
||||||
|
filename = params.exp_dir / "cpu_jit.pt"
|
||||||
|
model.save(str(filename))
|
||||||
|
logging.info(f"Saved to {filename}")
|
||||||
|
else:
|
||||||
|
logging.info("Not using torch.jit.script")
|
||||||
|
# Save it using a format so that it can be loaded
|
||||||
|
# by :func:`load_checkpoint`
|
||||||
|
filename = params.exp_dir / "pretrained.pt"
|
||||||
|
torch.save({"model": model.state_dict()}, str(filename))
|
||||||
|
logging.info(f"Saved to {filename}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
formatter = (
|
||||||
|
"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
|
||||||
|
)
|
||||||
|
|
||||||
|
logging.basicConfig(format=formatter, level=logging.INFO)
|
||||||
|
main()
|
1
egs/alimeeting/ASR/pruned_transducer_stateless2/joiner.py
Symbolic link
1
egs/alimeeting/ASR/pruned_transducer_stateless2/joiner.py
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../../../librispeech/ASR/pruned_transducer_stateless2/joiner.py
|
1
egs/alimeeting/ASR/pruned_transducer_stateless2/model.py
Symbolic link
1
egs/alimeeting/ASR/pruned_transducer_stateless2/model.py
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../../../librispeech/ASR/pruned_transducer_stateless2/model.py
|
1
egs/alimeeting/ASR/pruned_transducer_stateless2/optim.py
Symbolic link
1
egs/alimeeting/ASR/pruned_transducer_stateless2/optim.py
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../../../librispeech/ASR/pruned_transducer_stateless2/optim.py
|
347
egs/alimeeting/ASR/pruned_transducer_stateless2/pretrained.py
Normal file
347
egs/alimeeting/ASR/pruned_transducer_stateless2/pretrained.py
Normal file
@ -0,0 +1,347 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
|
||||||
|
# 2022 Xiaomi Crop. (authors: Mingshuang Luo)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
"""
|
||||||
|
Here, the far data is used for training, usage:
|
||||||
|
|
||||||
|
(1) greedy search
|
||||||
|
./pruned_transducer_stateless2/pretrained.py \
|
||||||
|
--checkpoint ./pruned_transducer_stateless2/exp/pretrained.pt \
|
||||||
|
--lang-dir ./data/lang_char \
|
||||||
|
--decoding-method greedy_search \
|
||||||
|
--max-sym-per-frame 1 \
|
||||||
|
/path/to/foo.wav \
|
||||||
|
/path/to/bar.wav
|
||||||
|
|
||||||
|
(2) modified beam search
|
||||||
|
./pruned_transducer_stateless2/pretrained.py \
|
||||||
|
--checkpoint ./pruned_transducer_stateless2/exp/pretrained.pt \
|
||||||
|
--lang-dir ./data/lang_char \
|
||||||
|
--decoding-method modified_beam_search \
|
||||||
|
--beam-size 4 \
|
||||||
|
/path/to/foo.wav \
|
||||||
|
/path/to/bar.wav
|
||||||
|
|
||||||
|
(3) fast beam search
|
||||||
|
./pruned_transducer_stateless2/pretrained.py \
|
||||||
|
--checkpoint ./pruned_transducer_stateless/exp/pretrained.pt \
|
||||||
|
--lang-dir ./data/lang_char \
|
||||||
|
--decoding-method fast_beam_search \
|
||||||
|
--beam 4 \
|
||||||
|
--max-contexts 4 \
|
||||||
|
--max-states 8 \
|
||||||
|
/path/to/foo.wav \
|
||||||
|
/path/to/bar.wav
|
||||||
|
|
||||||
|
You can also use `./pruned_transducer_stateless2/exp/epoch-xx.pt`.
|
||||||
|
|
||||||
|
Note: ./pruned_transducer_stateless2/exp/pretrained.pt is generated by
|
||||||
|
./pruned_transducer_stateless2/export.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
import math
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
import k2
|
||||||
|
import kaldifeat
|
||||||
|
import torch
|
||||||
|
import torchaudio
|
||||||
|
from beam_search import (
|
||||||
|
beam_search,
|
||||||
|
fast_beam_search_one_best,
|
||||||
|
greedy_search,
|
||||||
|
greedy_search_batch,
|
||||||
|
modified_beam_search,
|
||||||
|
)
|
||||||
|
from torch.nn.utils.rnn import pad_sequence
|
||||||
|
from train import get_params, get_transducer_model
|
||||||
|
|
||||||
|
from icefall.lexicon import Lexicon
|
||||||
|
|
||||||
|
|
||||||
|
def get_parser():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--checkpoint",
|
||||||
|
type=str,
|
||||||
|
required=True,
|
||||||
|
help="Path to the checkpoint. "
|
||||||
|
"The checkpoint is assumed to be saved by "
|
||||||
|
"icefall.checkpoint.save_checkpoint().",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--lang-dir",
|
||||||
|
type=str,
|
||||||
|
help="""Path to lang.
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--decoding-method",
|
||||||
|
type=str,
|
||||||
|
default="greedy_search",
|
||||||
|
help="""Possible values are:
|
||||||
|
- greedy_search
|
||||||
|
- modified_beam_search
|
||||||
|
- fast_beam_search
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"sound_files",
|
||||||
|
type=str,
|
||||||
|
nargs="+",
|
||||||
|
help="The input sound file(s) to transcribe. "
|
||||||
|
"Supported formats are those supported by torchaudio.load(). "
|
||||||
|
"For example, wav and flac are supported. "
|
||||||
|
"The sample rate has to be 16kHz.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--sample-rate",
|
||||||
|
type=int,
|
||||||
|
default=16000,
|
||||||
|
help="The sample rate of the input sound file",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--beam-size",
|
||||||
|
type=int,
|
||||||
|
default=4,
|
||||||
|
help="Used only when --method is beam_search and modified_beam_search ",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--beam",
|
||||||
|
type=float,
|
||||||
|
default=4,
|
||||||
|
help="""A floating point value to calculate the cutoff score during beam
|
||||||
|
search (i.e., `cutoff = max-score - beam`), which is the same as the
|
||||||
|
`beam` in Kaldi.
|
||||||
|
Used only when --decoding-method is fast_beam_search""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--max-contexts",
|
||||||
|
type=int,
|
||||||
|
default=4,
|
||||||
|
help="""Used only when --decoding-method is
|
||||||
|
fast_beam_search""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--max-states",
|
||||||
|
type=int,
|
||||||
|
default=8,
|
||||||
|
help="""Used only when --decoding-method is
|
||||||
|
fast_beam_search""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--context-size",
|
||||||
|
type=int,
|
||||||
|
default=2,
|
||||||
|
help="The context size in the decoder. 1 means bigram; "
|
||||||
|
"2 means tri-gram",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--max-sym-per-frame",
|
||||||
|
type=int,
|
||||||
|
default=1,
|
||||||
|
help="""Maximum number of symbols per frame. Used only when
|
||||||
|
--method is greedy_search.
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
def read_sound_files(
|
||||||
|
filenames: List[str], expected_sample_rate: float
|
||||||
|
) -> List[torch.Tensor]:
|
||||||
|
"""Read a list of sound files into a list 1-D float32 torch tensors.
|
||||||
|
Args:
|
||||||
|
filenames:
|
||||||
|
A list of sound filenames.
|
||||||
|
expected_sample_rate:
|
||||||
|
The expected sample rate of the sound files.
|
||||||
|
Returns:
|
||||||
|
Return a list of 1-D float32 torch tensors.
|
||||||
|
"""
|
||||||
|
ans = []
|
||||||
|
for f in filenames:
|
||||||
|
wave, sample_rate = torchaudio.load(f)
|
||||||
|
assert sample_rate == expected_sample_rate, (
|
||||||
|
f"expected sample rate: {expected_sample_rate}. "
|
||||||
|
f"Given: {sample_rate}"
|
||||||
|
)
|
||||||
|
# We use only the first channel
|
||||||
|
ans.append(wave[0])
|
||||||
|
return ans
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def main():
|
||||||
|
parser = get_parser()
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
params = get_params()
|
||||||
|
|
||||||
|
params.update(vars(args))
|
||||||
|
|
||||||
|
lexicon = Lexicon(params.lang_dir)
|
||||||
|
params.blank_id = lexicon.token_table["<blk>"]
|
||||||
|
params.vocab_size = max(lexicon.tokens) + 1
|
||||||
|
|
||||||
|
logging.info(f"{params}")
|
||||||
|
|
||||||
|
device = torch.device("cpu")
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
device = torch.device("cuda", 0)
|
||||||
|
|
||||||
|
logging.info(f"device: {device}")
|
||||||
|
|
||||||
|
logging.info("Creating model")
|
||||||
|
model = get_transducer_model(params)
|
||||||
|
|
||||||
|
checkpoint = torch.load(args.checkpoint, map_location="cpu")
|
||||||
|
model.load_state_dict(checkpoint["model"], strict=False)
|
||||||
|
model.to(device)
|
||||||
|
model.eval()
|
||||||
|
model.device = device
|
||||||
|
|
||||||
|
if params.decoding_method == "fast_beam_search":
|
||||||
|
decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device)
|
||||||
|
else:
|
||||||
|
decoding_graph = None
|
||||||
|
|
||||||
|
logging.info("Constructing Fbank computer")
|
||||||
|
opts = kaldifeat.FbankOptions()
|
||||||
|
opts.device = device
|
||||||
|
opts.frame_opts.dither = 0
|
||||||
|
opts.frame_opts.snip_edges = False
|
||||||
|
opts.frame_opts.samp_freq = params.sample_rate
|
||||||
|
opts.mel_opts.num_bins = params.feature_dim
|
||||||
|
|
||||||
|
fbank = kaldifeat.Fbank(opts)
|
||||||
|
|
||||||
|
logging.info(f"Reading sound files: {params.sound_files}")
|
||||||
|
waves = read_sound_files(
|
||||||
|
filenames=params.sound_files, expected_sample_rate=params.sample_rate
|
||||||
|
)
|
||||||
|
waves = [w.to(device) for w in waves]
|
||||||
|
|
||||||
|
logging.info("Decoding started")
|
||||||
|
features = fbank(waves)
|
||||||
|
feature_lengths = [f.size(0) for f in features]
|
||||||
|
|
||||||
|
features = pad_sequence(
|
||||||
|
features, batch_first=True, padding_value=math.log(1e-10)
|
||||||
|
)
|
||||||
|
|
||||||
|
feature_lengths = torch.tensor(feature_lengths, device=device)
|
||||||
|
|
||||||
|
with torch.no_grad():
|
||||||
|
encoder_out, encoder_out_lens = model.encoder(
|
||||||
|
x=features, x_lens=feature_lengths
|
||||||
|
)
|
||||||
|
|
||||||
|
hyps = []
|
||||||
|
msg = f"Using {params.decoding_method}"
|
||||||
|
logging.info(msg)
|
||||||
|
|
||||||
|
if params.decoding_method == "fast_beam_search":
|
||||||
|
hyp_tokens = fast_beam_search_one_best(
|
||||||
|
model=model,
|
||||||
|
decoding_graph=decoding_graph,
|
||||||
|
encoder_out=encoder_out,
|
||||||
|
encoder_out_lens=encoder_out_lens,
|
||||||
|
beam=params.beam,
|
||||||
|
max_contexts=params.max_contexts,
|
||||||
|
max_states=params.max_states,
|
||||||
|
)
|
||||||
|
for i in range(encoder_out.size(0)):
|
||||||
|
hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]])
|
||||||
|
elif (
|
||||||
|
params.decoding_method == "greedy_search"
|
||||||
|
and params.max_sym_per_frame == 1
|
||||||
|
):
|
||||||
|
hyp_tokens = greedy_search_batch(
|
||||||
|
model=model,
|
||||||
|
encoder_out=encoder_out,
|
||||||
|
encoder_out_lens=encoder_out_lens,
|
||||||
|
)
|
||||||
|
for i in range(encoder_out.size(0)):
|
||||||
|
hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]])
|
||||||
|
elif params.decoding_method == "modified_beam_search":
|
||||||
|
hyp_tokens = modified_beam_search(
|
||||||
|
model=model,
|
||||||
|
encoder_out=encoder_out,
|
||||||
|
encoder_out_lens=encoder_out_lens,
|
||||||
|
beam=params.beam_size,
|
||||||
|
)
|
||||||
|
for i in range(encoder_out.size(0)):
|
||||||
|
hyps.append([lexicon.token_table[idx] for idx in hyp_tokens[i]])
|
||||||
|
else:
|
||||||
|
batch_size = encoder_out.size(0)
|
||||||
|
|
||||||
|
for i in range(batch_size):
|
||||||
|
# fmt: off
|
||||||
|
encoder_out_i = encoder_out[i:i+1, :encoder_out_lens[i]]
|
||||||
|
# fmt: on
|
||||||
|
if params.decoding_method == "greedy_search":
|
||||||
|
hyp = greedy_search(
|
||||||
|
model=model,
|
||||||
|
encoder_out=encoder_out_i,
|
||||||
|
max_sym_per_frame=params.max_sym_per_frame,
|
||||||
|
)
|
||||||
|
elif params.decoding_method == "beam_search":
|
||||||
|
hyp = beam_search(
|
||||||
|
model=model,
|
||||||
|
encoder_out=encoder_out_i,
|
||||||
|
beam=params.beam_size,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"Unsupported decoding method: {params.decoding_method}"
|
||||||
|
)
|
||||||
|
hyps.append([lexicon.token_table[idx] for idx in hyp])
|
||||||
|
|
||||||
|
s = "\n"
|
||||||
|
for filename, hyp in zip(params.sound_files, hyps):
|
||||||
|
words = " ".join(hyp)
|
||||||
|
s += f"{filename}:\n{words}\n\n"
|
||||||
|
logging.info(s)
|
||||||
|
|
||||||
|
logging.info("Decoding Done")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
formatter = (
|
||||||
|
"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
|
||||||
|
)
|
||||||
|
|
||||||
|
logging.basicConfig(format=formatter, level=logging.INFO)
|
||||||
|
main()
|
1
egs/alimeeting/ASR/pruned_transducer_stateless2/scaling.py
Symbolic link
1
egs/alimeeting/ASR/pruned_transducer_stateless2/scaling.py
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../../../librispeech/ASR/pruned_transducer_stateless2/scaling.py
|
972
egs/alimeeting/ASR/pruned_transducer_stateless2/train.py
Normal file
972
egs/alimeeting/ASR/pruned_transducer_stateless2/train.py
Normal file
@ -0,0 +1,972 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang,
|
||||||
|
# Wei Kang
|
||||||
|
# Mingshuang Luo)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
"""
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
export CUDA_VISIBLE_DEVICES="0,1,2,3"
|
||||||
|
|
||||||
|
./pruned_transducer_stateless2/train.py \
|
||||||
|
--world-size 4 \
|
||||||
|
--num-epochs 30 \
|
||||||
|
--start-epoch 0 \
|
||||||
|
--exp-dir pruned_transducer_stateless2/exp \
|
||||||
|
--lang-dir data/lang_char \
|
||||||
|
--max-duration 220 \
|
||||||
|
--save-every-n 1000
|
||||||
|
|
||||||
|
# For mix precision training:
|
||||||
|
|
||||||
|
./pruned_transducer_stateless2/train.py \
|
||||||
|
--world-size 4 \
|
||||||
|
--num-epochs 30 \
|
||||||
|
--start-epoch 0 \
|
||||||
|
--exp-dir pruned_transducer_stateless2/exp \
|
||||||
|
--lang-dir data/lang_char \
|
||||||
|
--max-duration 220 \
|
||||||
|
--save-every-n 1000
|
||||||
|
--use-fp16 True
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import warnings
|
||||||
|
from pathlib import Path
|
||||||
|
from shutil import copyfile
|
||||||
|
from typing import Any, Dict, Optional, Tuple, Union
|
||||||
|
|
||||||
|
import k2
|
||||||
|
import optim
|
||||||
|
import torch
|
||||||
|
import torch.multiprocessing as mp
|
||||||
|
import torch.nn as nn
|
||||||
|
from asr_datamodule import AlimeetingAsrDataModule
|
||||||
|
from conformer import Conformer
|
||||||
|
from decoder import Decoder
|
||||||
|
from joiner import Joiner
|
||||||
|
from lhotse.cut import Cut
|
||||||
|
from lhotse.dataset.sampling.base import CutSampler
|
||||||
|
from lhotse.utils import fix_random_seed
|
||||||
|
from model import Transducer
|
||||||
|
from optim import Eden, Eve
|
||||||
|
from torch import Tensor
|
||||||
|
from torch.cuda.amp import GradScaler
|
||||||
|
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||||
|
from torch.utils.tensorboard import SummaryWriter
|
||||||
|
|
||||||
|
from icefall import diagnostics
|
||||||
|
from icefall.char_graph_compiler import CharCtcTrainingGraphCompiler
|
||||||
|
from icefall.checkpoint import load_checkpoint, remove_checkpoints
|
||||||
|
from icefall.checkpoint import save_checkpoint as save_checkpoint_impl
|
||||||
|
from icefall.checkpoint import save_checkpoint_with_global_batch_idx
|
||||||
|
from icefall.dist import cleanup_dist, setup_dist
|
||||||
|
from icefall.env import get_env_info
|
||||||
|
from icefall.lexicon import Lexicon
|
||||||
|
from icefall.utils import AttributeDict, MetricsTracker, setup_logger, str2bool
|
||||||
|
|
||||||
|
LRSchedulerType = Union[
|
||||||
|
torch.optim.lr_scheduler._LRScheduler, optim.LRScheduler
|
||||||
|
]
|
||||||
|
|
||||||
|
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
|
||||||
|
|
||||||
|
|
||||||
|
def get_parser():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--world-size",
|
||||||
|
type=int,
|
||||||
|
default=1,
|
||||||
|
help="Number of GPUs for DDP training.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--master-port",
|
||||||
|
type=int,
|
||||||
|
default=12359,
|
||||||
|
help="Master port to use for DDP training.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--tensorboard",
|
||||||
|
type=str2bool,
|
||||||
|
default=True,
|
||||||
|
help="Should various information be logged in tensorboard.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--num-epochs",
|
||||||
|
type=int,
|
||||||
|
default=30,
|
||||||
|
help="Number of epochs to train.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--start-epoch",
|
||||||
|
type=int,
|
||||||
|
default=0,
|
||||||
|
help="""Resume training from from this epoch.
|
||||||
|
If it is positive, it will load checkpoint from
|
||||||
|
transducer_stateless2/exp/epoch-{start_epoch-1}.pt
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--start-batch",
|
||||||
|
type=int,
|
||||||
|
default=0,
|
||||||
|
help="""If positive, --start-epoch is ignored and
|
||||||
|
it loads the checkpoint from exp-dir/checkpoint-{start_batch}.pt
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--exp-dir",
|
||||||
|
type=str,
|
||||||
|
default="pruned_transducer_stateless2/exp",
|
||||||
|
help="""The experiment dir.
|
||||||
|
It specifies the directory where all training related
|
||||||
|
files, e.g., checkpoints, log, etc, are saved
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--lang-dir",
|
||||||
|
type=str,
|
||||||
|
default="data/lang_char",
|
||||||
|
help="""The lang dir
|
||||||
|
It contains language related input files such as
|
||||||
|
"lexicon.txt"
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--initial-lr",
|
||||||
|
type=float,
|
||||||
|
default=0.003,
|
||||||
|
help="The initial learning rate. This value should not need to be changed.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--lr-batches",
|
||||||
|
type=float,
|
||||||
|
default=5000,
|
||||||
|
help="""Number of steps that affects how rapidly the learning rate decreases.
|
||||||
|
We suggest not to change this.""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--lr-epochs",
|
||||||
|
type=float,
|
||||||
|
default=6,
|
||||||
|
help="""Number of epochs that affects how rapidly the learning rate decreases.
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--context-size",
|
||||||
|
type=int,
|
||||||
|
default=2,
|
||||||
|
help="The context size in the decoder. 1 means bigram; "
|
||||||
|
"2 means tri-gram",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--prune-range",
|
||||||
|
type=int,
|
||||||
|
default=5,
|
||||||
|
help="The prune range for rnnt loss, it means how many symbols(context)"
|
||||||
|
"we are using to compute the loss",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--lm-scale",
|
||||||
|
type=float,
|
||||||
|
default=0.25,
|
||||||
|
help="The scale to smooth the loss with lm "
|
||||||
|
"(output of prediction network) part.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--am-scale",
|
||||||
|
type=float,
|
||||||
|
default=0.0,
|
||||||
|
help="The scale to smooth the loss with am (output of encoder network)"
|
||||||
|
"part.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--simple-loss-scale",
|
||||||
|
type=float,
|
||||||
|
default=0.5,
|
||||||
|
help="To get pruning ranges, we will calculate a simple version"
|
||||||
|
"loss(joiner is just addition), this simple loss also uses for"
|
||||||
|
"training (as a regularization item). We will scale the simple loss"
|
||||||
|
"with this parameter before adding to the final loss.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--seed",
|
||||||
|
type=int,
|
||||||
|
default=42,
|
||||||
|
help="The seed for random generators intended for reproducibility",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--print-diagnostics",
|
||||||
|
type=str2bool,
|
||||||
|
default=False,
|
||||||
|
help="Accumulate stats on activations, print them and exit.",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--save-every-n",
|
||||||
|
type=int,
|
||||||
|
default=8000,
|
||||||
|
help="""Save checkpoint after processing this number of batches"
|
||||||
|
periodically. We save checkpoint to exp-dir/ whenever
|
||||||
|
params.batch_idx_train % save_every_n == 0. The checkpoint filename
|
||||||
|
has the form: f'exp-dir/checkpoint-{params.batch_idx_train}.pt'
|
||||||
|
Note: It also saves checkpoint to `exp-dir/epoch-xxx.pt` at the
|
||||||
|
end of each epoch where `xxx` is the epoch number counting from 0.
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--keep-last-k",
|
||||||
|
type=int,
|
||||||
|
default=20,
|
||||||
|
help="""Only keep this number of checkpoints on disk.
|
||||||
|
For instance, if it is 3, there are only 3 checkpoints
|
||||||
|
in the exp-dir with filenames `checkpoint-xxx.pt`.
|
||||||
|
It does not affect checkpoints with name `epoch-xxx.pt`.
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--use-fp16",
|
||||||
|
type=str2bool,
|
||||||
|
default=False,
|
||||||
|
help="Whether to use half precision training.",
|
||||||
|
)
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
def get_params() -> AttributeDict:
|
||||||
|
"""Return a dict containing training parameters.
|
||||||
|
All training related parameters that are not passed from the commandline
|
||||||
|
are saved in the variable `params`.
|
||||||
|
Commandline options are merged into `params` after they are parsed, so
|
||||||
|
you can also access them via `params`.
|
||||||
|
Explanation of options saved in `params`:
|
||||||
|
- best_train_loss: Best training loss so far. It is used to select
|
||||||
|
the model that has the lowest training loss. It is
|
||||||
|
updated during the training.
|
||||||
|
- best_valid_loss: Best validation loss so far. It is used to select
|
||||||
|
the model that has the lowest validation loss. It is
|
||||||
|
updated during the training.
|
||||||
|
- best_train_epoch: It is the epoch that has the best training loss.
|
||||||
|
- best_valid_epoch: It is the epoch that has the best validation loss.
|
||||||
|
- batch_idx_train: Used to writing statistics to tensorboard. It
|
||||||
|
contains number of batches trained so far across
|
||||||
|
epochs.
|
||||||
|
- log_interval: Print training loss if batch_idx % log_interval` is 0
|
||||||
|
- reset_interval: Reset statistics if batch_idx % reset_interval is 0
|
||||||
|
- valid_interval: Run validation if batch_idx % valid_interval is 0
|
||||||
|
- feature_dim: The model input dim. It has to match the one used
|
||||||
|
in computing features.
|
||||||
|
- subsampling_factor: The subsampling factor for the model.
|
||||||
|
- encoder_dim: Hidden dim for multi-head attention model.
|
||||||
|
- num_decoder_layers: Number of decoder layer of transformer decoder.
|
||||||
|
- warm_step: The warm_step for Noam optimizer.
|
||||||
|
"""
|
||||||
|
params = AttributeDict(
|
||||||
|
{
|
||||||
|
"best_train_loss": float("inf"),
|
||||||
|
"best_valid_loss": float("inf"),
|
||||||
|
"best_train_epoch": -1,
|
||||||
|
"best_valid_epoch": -1,
|
||||||
|
"batch_idx_train": 10,
|
||||||
|
"log_interval": 1,
|
||||||
|
"reset_interval": 200,
|
||||||
|
"valid_interval": 400,
|
||||||
|
# parameters for conformer
|
||||||
|
"feature_dim": 80,
|
||||||
|
"subsampling_factor": 4,
|
||||||
|
"encoder_dim": 512,
|
||||||
|
"nhead": 8,
|
||||||
|
"dim_feedforward": 2048,
|
||||||
|
"num_encoder_layers": 12,
|
||||||
|
# parameters for decoder
|
||||||
|
"decoder_dim": 512,
|
||||||
|
# parameters for joiner
|
||||||
|
"joiner_dim": 512,
|
||||||
|
# parameters for Noam
|
||||||
|
"model_warm_step": 200,
|
||||||
|
"env_info": get_env_info(),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
def get_encoder_model(params: AttributeDict) -> nn.Module:
|
||||||
|
# TODO: We can add an option to switch between Conformer and Transformer
|
||||||
|
encoder = Conformer(
|
||||||
|
num_features=params.feature_dim,
|
||||||
|
subsampling_factor=params.subsampling_factor,
|
||||||
|
d_model=params.encoder_dim,
|
||||||
|
nhead=params.nhead,
|
||||||
|
dim_feedforward=params.dim_feedforward,
|
||||||
|
num_encoder_layers=params.num_encoder_layers,
|
||||||
|
)
|
||||||
|
return encoder
|
||||||
|
|
||||||
|
|
||||||
|
def get_decoder_model(params: AttributeDict) -> nn.Module:
|
||||||
|
decoder = Decoder(
|
||||||
|
vocab_size=params.vocab_size,
|
||||||
|
decoder_dim=params.decoder_dim,
|
||||||
|
blank_id=params.blank_id,
|
||||||
|
context_size=params.context_size,
|
||||||
|
)
|
||||||
|
return decoder
|
||||||
|
|
||||||
|
|
||||||
|
def get_joiner_model(params: AttributeDict) -> nn.Module:
|
||||||
|
joiner = Joiner(
|
||||||
|
encoder_dim=params.encoder_dim,
|
||||||
|
decoder_dim=params.decoder_dim,
|
||||||
|
joiner_dim=params.joiner_dim,
|
||||||
|
vocab_size=params.vocab_size,
|
||||||
|
)
|
||||||
|
return joiner
|
||||||
|
|
||||||
|
|
||||||
|
def get_transducer_model(params: AttributeDict) -> nn.Module:
|
||||||
|
encoder = get_encoder_model(params)
|
||||||
|
decoder = get_decoder_model(params)
|
||||||
|
joiner = get_joiner_model(params)
|
||||||
|
|
||||||
|
model = Transducer(
|
||||||
|
encoder=encoder,
|
||||||
|
decoder=decoder,
|
||||||
|
joiner=joiner,
|
||||||
|
encoder_dim=params.encoder_dim,
|
||||||
|
decoder_dim=params.decoder_dim,
|
||||||
|
joiner_dim=params.joiner_dim,
|
||||||
|
vocab_size=params.vocab_size,
|
||||||
|
)
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
def load_checkpoint_if_available(
|
||||||
|
params: AttributeDict,
|
||||||
|
model: nn.Module,
|
||||||
|
optimizer: Optional[torch.optim.Optimizer] = None,
|
||||||
|
scheduler: Optional[LRSchedulerType] = None,
|
||||||
|
) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Load checkpoint from file.
|
||||||
|
If params.start_batch is positive, it will load the checkpoint from
|
||||||
|
`params.exp_dir/checkpoint-{params.start_batch}.pt`. Otherwise, if
|
||||||
|
params.start_epoch is positive, it will load the checkpoint from
|
||||||
|
`params.start_epoch - 1`.
|
||||||
|
Apart from loading state dict for `model` and `optimizer` it also updates
|
||||||
|
`best_train_epoch`, `best_train_loss`, `best_valid_epoch`,
|
||||||
|
and `best_valid_loss` in `params`.
|
||||||
|
Args:
|
||||||
|
params:
|
||||||
|
The return value of :func:`get_params`.
|
||||||
|
model:
|
||||||
|
The training model.
|
||||||
|
optimizer:
|
||||||
|
The optimizer that we are using.
|
||||||
|
scheduler:
|
||||||
|
The scheduler that we are using.
|
||||||
|
Returns:
|
||||||
|
Return a dict containing previously saved training info.
|
||||||
|
"""
|
||||||
|
if params.start_batch > 0:
|
||||||
|
filename = params.exp_dir / f"checkpoint-{params.start_batch}.pt"
|
||||||
|
elif params.start_epoch > 0:
|
||||||
|
filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt"
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
assert filename.is_file(), f"{filename} does not exist!"
|
||||||
|
|
||||||
|
saved_params = load_checkpoint(
|
||||||
|
filename,
|
||||||
|
model=model,
|
||||||
|
optimizer=optimizer,
|
||||||
|
scheduler=scheduler,
|
||||||
|
)
|
||||||
|
|
||||||
|
keys = [
|
||||||
|
"best_train_epoch",
|
||||||
|
"best_valid_epoch",
|
||||||
|
"batch_idx_train",
|
||||||
|
"best_train_loss",
|
||||||
|
"best_valid_loss",
|
||||||
|
]
|
||||||
|
for k in keys:
|
||||||
|
params[k] = saved_params[k]
|
||||||
|
|
||||||
|
if params.start_batch > 0:
|
||||||
|
if "cur_epoch" in saved_params:
|
||||||
|
params["start_epoch"] = saved_params["cur_epoch"]
|
||||||
|
|
||||||
|
return saved_params
|
||||||
|
|
||||||
|
|
||||||
|
def save_checkpoint(
|
||||||
|
params: AttributeDict,
|
||||||
|
model: nn.Module,
|
||||||
|
optimizer: Optional[torch.optim.Optimizer] = None,
|
||||||
|
scheduler: Optional[LRSchedulerType] = None,
|
||||||
|
sampler: Optional[CutSampler] = None,
|
||||||
|
scaler: Optional[GradScaler] = None,
|
||||||
|
rank: int = 0,
|
||||||
|
) -> None:
|
||||||
|
"""Save model, optimizer, scheduler and training stats to file.
|
||||||
|
Args:
|
||||||
|
params:
|
||||||
|
It is returned by :func:`get_params`.
|
||||||
|
model:
|
||||||
|
The training model.
|
||||||
|
optimizer:
|
||||||
|
The optimizer used in the training.
|
||||||
|
sampler:
|
||||||
|
The sampler for the training dataset.
|
||||||
|
scaler:
|
||||||
|
The scaler used for mix precision training.
|
||||||
|
"""
|
||||||
|
if rank != 0:
|
||||||
|
return
|
||||||
|
filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt"
|
||||||
|
save_checkpoint_impl(
|
||||||
|
filename=filename,
|
||||||
|
model=model,
|
||||||
|
params=params,
|
||||||
|
optimizer=optimizer,
|
||||||
|
scheduler=scheduler,
|
||||||
|
sampler=sampler,
|
||||||
|
scaler=scaler,
|
||||||
|
rank=rank,
|
||||||
|
)
|
||||||
|
|
||||||
|
if params.best_train_epoch == params.cur_epoch:
|
||||||
|
best_train_filename = params.exp_dir / "best-train-loss.pt"
|
||||||
|
copyfile(src=filename, dst=best_train_filename)
|
||||||
|
|
||||||
|
if params.best_valid_epoch == params.cur_epoch:
|
||||||
|
best_valid_filename = params.exp_dir / "best-valid-loss.pt"
|
||||||
|
copyfile(src=filename, dst=best_valid_filename)
|
||||||
|
|
||||||
|
|
||||||
|
def compute_loss(
|
||||||
|
params: AttributeDict,
|
||||||
|
model: nn.Module,
|
||||||
|
graph_compiler: CharCtcTrainingGraphCompiler,
|
||||||
|
batch: dict,
|
||||||
|
is_training: bool,
|
||||||
|
warmup: float = 1.0,
|
||||||
|
) -> Tuple[Tensor, MetricsTracker]:
|
||||||
|
"""
|
||||||
|
Compute CTC loss given the model and its inputs.
|
||||||
|
Args:
|
||||||
|
params:
|
||||||
|
Parameters for training. See :func:`get_params`.
|
||||||
|
model:
|
||||||
|
The model for training. It is an instance of Conformer in our case.
|
||||||
|
batch:
|
||||||
|
A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()`
|
||||||
|
for the content in it.
|
||||||
|
is_training:
|
||||||
|
True for training. False for validation. When it is True, this
|
||||||
|
function enables autograd during computation; when it is False, it
|
||||||
|
disables autograd.
|
||||||
|
warmup: a floating point value which increases throughout training;
|
||||||
|
values >= 1.0 are fully warmed up and have all modules present.
|
||||||
|
"""
|
||||||
|
device = model.device
|
||||||
|
feature = batch["inputs"]
|
||||||
|
# at entry, feature is (N, T, C)
|
||||||
|
assert feature.ndim == 3
|
||||||
|
feature = feature.to(device)
|
||||||
|
|
||||||
|
supervisions = batch["supervisions"]
|
||||||
|
feature_lens = supervisions["num_frames"].to(device)
|
||||||
|
|
||||||
|
texts = batch["supervisions"]["text"]
|
||||||
|
|
||||||
|
y = graph_compiler.texts_to_ids(texts)
|
||||||
|
if type(y) == list:
|
||||||
|
y = k2.RaggedTensor(y).to(device)
|
||||||
|
else:
|
||||||
|
y = y.to(device)
|
||||||
|
|
||||||
|
with torch.set_grad_enabled(is_training):
|
||||||
|
simple_loss, pruned_loss = model(
|
||||||
|
x=feature,
|
||||||
|
x_lens=feature_lens,
|
||||||
|
y=y,
|
||||||
|
prune_range=params.prune_range,
|
||||||
|
am_scale=params.am_scale,
|
||||||
|
lm_scale=params.lm_scale,
|
||||||
|
warmup=warmup,
|
||||||
|
)
|
||||||
|
# after the main warmup step, we keep pruned_loss_scale small
|
||||||
|
# for the same amount of time (model_warm_step), to avoid
|
||||||
|
# overwhelming the simple_loss and causing it to diverge,
|
||||||
|
# in case it had not fully learned the alignment yet.
|
||||||
|
pruned_loss_scale = (
|
||||||
|
0.0
|
||||||
|
if warmup < 1.0
|
||||||
|
else (0.1 if warmup > 1.0 and warmup < 2.0 else 1.0)
|
||||||
|
)
|
||||||
|
loss = (
|
||||||
|
params.simple_loss_scale * simple_loss
|
||||||
|
+ pruned_loss_scale * pruned_loss
|
||||||
|
)
|
||||||
|
assert loss.requires_grad == is_training
|
||||||
|
|
||||||
|
info = MetricsTracker()
|
||||||
|
with warnings.catch_warnings():
|
||||||
|
warnings.simplefilter("ignore")
|
||||||
|
info["frames"] = (
|
||||||
|
(feature_lens // params.subsampling_factor).sum().item()
|
||||||
|
)
|
||||||
|
|
||||||
|
# Note: We use reduction=sum while computing the loss.
|
||||||
|
info["loss"] = loss.detach().cpu().item()
|
||||||
|
info["simple_loss"] = simple_loss.detach().cpu().item()
|
||||||
|
info["pruned_loss"] = pruned_loss.detach().cpu().item()
|
||||||
|
|
||||||
|
return loss, info
|
||||||
|
|
||||||
|
|
||||||
|
def compute_validation_loss(
|
||||||
|
params: AttributeDict,
|
||||||
|
model: nn.Module,
|
||||||
|
graph_compiler: CharCtcTrainingGraphCompiler,
|
||||||
|
valid_dl: torch.utils.data.DataLoader,
|
||||||
|
world_size: int = 1,
|
||||||
|
) -> MetricsTracker:
|
||||||
|
"""Run the validation process."""
|
||||||
|
model.eval()
|
||||||
|
|
||||||
|
tot_loss = MetricsTracker()
|
||||||
|
|
||||||
|
for batch_idx, batch in enumerate(valid_dl):
|
||||||
|
loss, loss_info = compute_loss(
|
||||||
|
params=params,
|
||||||
|
model=model,
|
||||||
|
graph_compiler=graph_compiler,
|
||||||
|
batch=batch,
|
||||||
|
is_training=False,
|
||||||
|
)
|
||||||
|
assert loss.requires_grad is False
|
||||||
|
tot_loss = tot_loss + loss_info
|
||||||
|
|
||||||
|
if world_size > 1:
|
||||||
|
tot_loss.reduce(loss.device)
|
||||||
|
|
||||||
|
loss_value = tot_loss["loss"] / tot_loss["frames"]
|
||||||
|
if loss_value < params.best_valid_loss:
|
||||||
|
params.best_valid_epoch = params.cur_epoch
|
||||||
|
params.best_valid_loss = loss_value
|
||||||
|
|
||||||
|
return tot_loss
|
||||||
|
|
||||||
|
|
||||||
|
def train_one_epoch(
|
||||||
|
params: AttributeDict,
|
||||||
|
model: nn.Module,
|
||||||
|
optimizer: torch.optim.Optimizer,
|
||||||
|
scheduler: LRSchedulerType,
|
||||||
|
graph_compiler: CharCtcTrainingGraphCompiler,
|
||||||
|
train_dl: torch.utils.data.DataLoader,
|
||||||
|
valid_dl: torch.utils.data.DataLoader,
|
||||||
|
scaler: GradScaler,
|
||||||
|
tb_writer: Optional[SummaryWriter] = None,
|
||||||
|
world_size: int = 1,
|
||||||
|
rank: int = 0,
|
||||||
|
) -> None:
|
||||||
|
"""Train the model for one epoch.
|
||||||
|
The training loss from the mean of all frames is saved in
|
||||||
|
`params.train_loss`. It runs the validation process every
|
||||||
|
`params.valid_interval` batches.
|
||||||
|
Args:
|
||||||
|
params:
|
||||||
|
It is returned by :func:`get_params`.
|
||||||
|
model:
|
||||||
|
The model for training.
|
||||||
|
optimizer:
|
||||||
|
The optimizer we are using.
|
||||||
|
scheduler:
|
||||||
|
The learning rate scheduler, we call step() every step.
|
||||||
|
train_dl:
|
||||||
|
Dataloader for the training dataset.
|
||||||
|
valid_dl:
|
||||||
|
Dataloader for the validation dataset.
|
||||||
|
scaler:
|
||||||
|
The scaler used for mix precision training.
|
||||||
|
tb_writer:
|
||||||
|
Writer to write log messages to tensorboard.
|
||||||
|
world_size:
|
||||||
|
Number of nodes in DDP training. If it is 1, DDP is disabled.
|
||||||
|
rank:
|
||||||
|
The rank of the node in DDP training. If no DDP is used, it should
|
||||||
|
be set to 0.
|
||||||
|
"""
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
tot_loss = MetricsTracker()
|
||||||
|
|
||||||
|
for batch_idx, batch in enumerate(train_dl):
|
||||||
|
|
||||||
|
params.batch_idx_train += 1
|
||||||
|
batch_size = len(batch["supervisions"]["text"])
|
||||||
|
|
||||||
|
with torch.cuda.amp.autocast(enabled=params.use_fp16):
|
||||||
|
loss, loss_info = compute_loss(
|
||||||
|
params=params,
|
||||||
|
model=model,
|
||||||
|
graph_compiler=graph_compiler,
|
||||||
|
batch=batch,
|
||||||
|
is_training=True,
|
||||||
|
warmup=(params.batch_idx_train / params.model_warm_step),
|
||||||
|
)
|
||||||
|
# summary stats
|
||||||
|
tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info
|
||||||
|
|
||||||
|
# NOTE: We use reduction==sum and loss is computed over utterances
|
||||||
|
# in the batch and there is no normalization to it so far.
|
||||||
|
scaler.scale(loss).backward()
|
||||||
|
scheduler.step_batch(params.batch_idx_train)
|
||||||
|
scaler.step(optimizer)
|
||||||
|
scaler.update()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
|
||||||
|
if params.print_diagnostics and batch_idx == 5:
|
||||||
|
return
|
||||||
|
|
||||||
|
if (
|
||||||
|
params.batch_idx_train > 0
|
||||||
|
and params.batch_idx_train % params.save_every_n == 0
|
||||||
|
):
|
||||||
|
save_checkpoint_with_global_batch_idx(
|
||||||
|
out_dir=params.exp_dir,
|
||||||
|
global_batch_idx=params.batch_idx_train,
|
||||||
|
model=model,
|
||||||
|
params=params,
|
||||||
|
optimizer=optimizer,
|
||||||
|
scheduler=scheduler,
|
||||||
|
sampler=train_dl.sampler,
|
||||||
|
scaler=scaler,
|
||||||
|
rank=rank,
|
||||||
|
)
|
||||||
|
remove_checkpoints(
|
||||||
|
out_dir=params.exp_dir,
|
||||||
|
topk=params.keep_last_k,
|
||||||
|
rank=rank,
|
||||||
|
)
|
||||||
|
|
||||||
|
if batch_idx % params.log_interval == 0:
|
||||||
|
cur_lr = scheduler.get_last_lr()[0]
|
||||||
|
logging.info(
|
||||||
|
f"Epoch {params.cur_epoch}, "
|
||||||
|
f"batch {batch_idx}, loss[{loss_info}], "
|
||||||
|
f"tot_loss[{tot_loss}], batch size: {batch_size}, "
|
||||||
|
f"lr: {cur_lr:.2e}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if tb_writer is not None:
|
||||||
|
tb_writer.add_scalar(
|
||||||
|
"train/learning_rate", cur_lr, params.batch_idx_train
|
||||||
|
)
|
||||||
|
|
||||||
|
loss_info.write_summary(
|
||||||
|
tb_writer, "train/current_", params.batch_idx_train
|
||||||
|
)
|
||||||
|
tot_loss.write_summary(
|
||||||
|
tb_writer, "train/tot_", params.batch_idx_train
|
||||||
|
)
|
||||||
|
|
||||||
|
if batch_idx > 0 and batch_idx % params.valid_interval == 0:
|
||||||
|
logging.info("Computing validation loss")
|
||||||
|
valid_info = compute_validation_loss(
|
||||||
|
params=params,
|
||||||
|
model=model,
|
||||||
|
graph_compiler=graph_compiler,
|
||||||
|
valid_dl=valid_dl,
|
||||||
|
world_size=world_size,
|
||||||
|
)
|
||||||
|
model.train()
|
||||||
|
logging.info(f"Epoch {params.cur_epoch}, validation: {valid_info}")
|
||||||
|
if tb_writer is not None:
|
||||||
|
valid_info.write_summary(
|
||||||
|
tb_writer, "train/valid_", params.batch_idx_train
|
||||||
|
)
|
||||||
|
|
||||||
|
loss_value = tot_loss["loss"] / tot_loss["frames"]
|
||||||
|
params.train_loss = loss_value
|
||||||
|
if params.train_loss < params.best_train_loss:
|
||||||
|
params.best_train_epoch = params.cur_epoch
|
||||||
|
params.best_train_loss = params.train_loss
|
||||||
|
|
||||||
|
|
||||||
|
def run(rank, world_size, args):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
rank:
|
||||||
|
It is a value between 0 and `world_size-1`, which is
|
||||||
|
passed automatically by `mp.spawn()` in :func:`main`.
|
||||||
|
The node with rank 0 is responsible for saving checkpoint.
|
||||||
|
world_size:
|
||||||
|
Number of GPUs for DDP training.
|
||||||
|
args:
|
||||||
|
The return value of get_parser().parse_args()
|
||||||
|
"""
|
||||||
|
params = get_params()
|
||||||
|
params.update(vars(args))
|
||||||
|
|
||||||
|
fix_random_seed(params.seed)
|
||||||
|
if world_size > 1:
|
||||||
|
setup_dist(rank, world_size, params.master_port)
|
||||||
|
|
||||||
|
setup_logger(f"{params.exp_dir}/log/log-train")
|
||||||
|
logging.info("Training started")
|
||||||
|
|
||||||
|
if args.tensorboard and rank == 0:
|
||||||
|
tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard")
|
||||||
|
else:
|
||||||
|
tb_writer = None
|
||||||
|
|
||||||
|
device = torch.device("cpu")
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
device = torch.device("cuda", rank)
|
||||||
|
logging.info(f"Device: {device}")
|
||||||
|
|
||||||
|
lexicon = Lexicon(params.lang_dir)
|
||||||
|
graph_compiler = CharCtcTrainingGraphCompiler(
|
||||||
|
lexicon=lexicon,
|
||||||
|
device=device,
|
||||||
|
)
|
||||||
|
|
||||||
|
params.blank_id = lexicon.token_table["<blk>"]
|
||||||
|
params.vocab_size = max(lexicon.tokens) + 1
|
||||||
|
|
||||||
|
logging.info(params)
|
||||||
|
|
||||||
|
logging.info("About to create model")
|
||||||
|
model = get_transducer_model(params)
|
||||||
|
|
||||||
|
num_param = sum([p.numel() for p in model.parameters()])
|
||||||
|
logging.info(f"Number of model parameters: {num_param}")
|
||||||
|
|
||||||
|
checkpoints = load_checkpoint_if_available(params=params, model=model)
|
||||||
|
|
||||||
|
model.to(device)
|
||||||
|
if world_size > 1:
|
||||||
|
logging.info("Using DDP")
|
||||||
|
model = DDP(model, device_ids=[rank])
|
||||||
|
model.device = device
|
||||||
|
|
||||||
|
optimizer = Eve(model.parameters(), lr=params.initial_lr)
|
||||||
|
|
||||||
|
scheduler = Eden(optimizer, params.lr_batches, params.lr_epochs)
|
||||||
|
|
||||||
|
if checkpoints and "optimizer" in checkpoints:
|
||||||
|
logging.info("Loading optimizer state dict")
|
||||||
|
optimizer.load_state_dict(checkpoints["optimizer"])
|
||||||
|
|
||||||
|
if (
|
||||||
|
checkpoints
|
||||||
|
and "scheduler" in checkpoints
|
||||||
|
and checkpoints["scheduler"] is not None
|
||||||
|
):
|
||||||
|
logging.info("Loading scheduler state dict")
|
||||||
|
scheduler.load_state_dict(checkpoints["scheduler"])
|
||||||
|
|
||||||
|
if params.print_diagnostics:
|
||||||
|
opts = diagnostics.TensorDiagnosticOptions(
|
||||||
|
2 ** 22
|
||||||
|
) # allow 4 megabytes per sub-module
|
||||||
|
diagnostic = diagnostics.attach_diagnostics(model, opts)
|
||||||
|
|
||||||
|
alimeeting = AlimeetingAsrDataModule(args)
|
||||||
|
|
||||||
|
train_cuts = alimeeting.train_cuts()
|
||||||
|
valid_cuts = alimeeting.valid_cuts()
|
||||||
|
|
||||||
|
def remove_short_and_long_utt(c: Cut):
|
||||||
|
# Keep only utterances with duration between 1 second and 15.0 seconds
|
||||||
|
#
|
||||||
|
# Caution: There is a reason to select 10.0 here. Please see
|
||||||
|
# ../local/display_manifest_statistics.py
|
||||||
|
#
|
||||||
|
# You should use ../local/display_manifest_statistics.py to get
|
||||||
|
# an utterance duration distribution for your dataset to select
|
||||||
|
# the threshold
|
||||||
|
return 1.0 <= c.duration <= 15.0
|
||||||
|
|
||||||
|
train_cuts = train_cuts.filter(remove_short_and_long_utt)
|
||||||
|
|
||||||
|
valid_dl = alimeeting.valid_dataloaders(valid_cuts)
|
||||||
|
|
||||||
|
if params.start_batch > 0 and checkpoints and "sampler" in checkpoints:
|
||||||
|
# We only load the sampler's state dict when it loads a checkpoint
|
||||||
|
# saved in the middle of an epoch
|
||||||
|
sampler_state_dict = checkpoints["sampler"]
|
||||||
|
else:
|
||||||
|
sampler_state_dict = None
|
||||||
|
|
||||||
|
train_dl = alimeeting.train_dataloaders(
|
||||||
|
train_cuts, sampler_state_dict=sampler_state_dict
|
||||||
|
)
|
||||||
|
|
||||||
|
if not params.print_diagnostics and params.start_batch == 0:
|
||||||
|
scan_pessimistic_batches_for_oom(
|
||||||
|
model=model,
|
||||||
|
train_dl=train_dl,
|
||||||
|
optimizer=optimizer,
|
||||||
|
graph_compiler=graph_compiler,
|
||||||
|
params=params,
|
||||||
|
)
|
||||||
|
|
||||||
|
scaler = GradScaler(enabled=params.use_fp16)
|
||||||
|
if checkpoints and "grad_scaler" in checkpoints:
|
||||||
|
logging.info("Loading grad scaler state dict")
|
||||||
|
scaler.load_state_dict(checkpoints["grad_scaler"])
|
||||||
|
|
||||||
|
for epoch in range(params.start_epoch, params.num_epochs):
|
||||||
|
scheduler.step_epoch(epoch)
|
||||||
|
fix_random_seed(params.seed + epoch)
|
||||||
|
train_dl.sampler.set_epoch(epoch)
|
||||||
|
|
||||||
|
if tb_writer is not None:
|
||||||
|
tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train)
|
||||||
|
|
||||||
|
params.cur_epoch = epoch
|
||||||
|
|
||||||
|
train_one_epoch(
|
||||||
|
params=params,
|
||||||
|
model=model,
|
||||||
|
optimizer=optimizer,
|
||||||
|
scheduler=scheduler,
|
||||||
|
graph_compiler=graph_compiler,
|
||||||
|
train_dl=train_dl,
|
||||||
|
valid_dl=valid_dl,
|
||||||
|
scaler=scaler,
|
||||||
|
tb_writer=tb_writer,
|
||||||
|
world_size=world_size,
|
||||||
|
rank=rank,
|
||||||
|
)
|
||||||
|
|
||||||
|
if params.print_diagnostics:
|
||||||
|
diagnostic.print_diagnostics()
|
||||||
|
break
|
||||||
|
|
||||||
|
save_checkpoint(
|
||||||
|
params=params,
|
||||||
|
model=model,
|
||||||
|
optimizer=optimizer,
|
||||||
|
scheduler=scheduler,
|
||||||
|
sampler=train_dl.sampler,
|
||||||
|
scaler=scaler,
|
||||||
|
rank=rank,
|
||||||
|
)
|
||||||
|
|
||||||
|
logging.info("Done!")
|
||||||
|
|
||||||
|
if world_size > 1:
|
||||||
|
torch.distributed.barrier()
|
||||||
|
cleanup_dist()
|
||||||
|
|
||||||
|
|
||||||
|
def scan_pessimistic_batches_for_oom(
|
||||||
|
model: nn.Module,
|
||||||
|
train_dl: torch.utils.data.DataLoader,
|
||||||
|
optimizer: torch.optim.Optimizer,
|
||||||
|
graph_compiler: CharCtcTrainingGraphCompiler,
|
||||||
|
params: AttributeDict,
|
||||||
|
):
|
||||||
|
from lhotse.dataset import find_pessimistic_batches
|
||||||
|
|
||||||
|
logging.info(
|
||||||
|
"Sanity check -- see if any of the batches in epoch 0 would cause OOM."
|
||||||
|
)
|
||||||
|
batches, crit_values = find_pessimistic_batches(train_dl.sampler)
|
||||||
|
for criterion, cuts in batches.items():
|
||||||
|
batch = train_dl.dataset[cuts]
|
||||||
|
try:
|
||||||
|
# warmup = 0.0 is so that the derivs for the pruned loss stay zero
|
||||||
|
# (i.e. are not remembered by the decaying-average in adam), because
|
||||||
|
# we want to avoid these params being subject to shrinkage in adam.
|
||||||
|
with torch.cuda.amp.autocast(enabled=params.use_fp16):
|
||||||
|
loss, _ = compute_loss(
|
||||||
|
params=params,
|
||||||
|
model=model,
|
||||||
|
graph_compiler=graph_compiler,
|
||||||
|
batch=batch,
|
||||||
|
is_training=True,
|
||||||
|
warmup=0.0,
|
||||||
|
)
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
except RuntimeError as e:
|
||||||
|
if "CUDA out of memory" in str(e):
|
||||||
|
logging.error(
|
||||||
|
"Your GPU ran out of memory with the current "
|
||||||
|
"max_duration setting. We recommend decreasing "
|
||||||
|
"max_duration and trying again.\n"
|
||||||
|
f"Failing criterion: {criterion} "
|
||||||
|
f"(={crit_values[criterion]}) ..."
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = get_parser()
|
||||||
|
AlimeetingAsrDataModule.add_arguments(parser)
|
||||||
|
args = parser.parse_args()
|
||||||
|
args.lang_dir = Path(args.lang_dir)
|
||||||
|
args.exp_dir = Path(args.exp_dir)
|
||||||
|
|
||||||
|
world_size = args.world_size
|
||||||
|
assert world_size >= 1
|
||||||
|
if world_size > 1:
|
||||||
|
mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True)
|
||||||
|
else:
|
||||||
|
run(rank=0, world_size=1, args=args)
|
||||||
|
|
||||||
|
|
||||||
|
torch.set_num_threads(1)
|
||||||
|
torch.set_num_interop_threads(1)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
1
egs/alimeeting/ASR/shared
Symbolic link
1
egs/alimeeting/ASR/shared
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../../../egs/aishell/ASR/shared
|
@ -20,9 +20,8 @@ import logging
|
|||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from lhotse import CutSet, Fbank, FbankConfig, load_manifest
|
from lhotse import CutSet, Fbank, FbankConfig, load_manifest, load_manifest_lazy
|
||||||
from lhotse.dataset import (
|
from lhotse.dataset import (
|
||||||
BucketingSampler,
|
|
||||||
CutConcatenate,
|
CutConcatenate,
|
||||||
CutMix,
|
CutMix,
|
||||||
DynamicBucketingSampler,
|
DynamicBucketingSampler,
|
||||||
@ -191,7 +190,7 @@ class GigaSpeechAsrDataModule:
|
|||||||
def train_dataloaders(self, cuts_train: CutSet) -> DataLoader:
|
def train_dataloaders(self, cuts_train: CutSet) -> DataLoader:
|
||||||
logging.info("About to get Musan cuts")
|
logging.info("About to get Musan cuts")
|
||||||
cuts_musan = load_manifest(
|
cuts_musan = load_manifest(
|
||||||
self.args.manifest_dir / "cuts_musan.json.gz"
|
self.args.manifest_dir / "musan_cuts.jsonl.gz"
|
||||||
)
|
)
|
||||||
|
|
||||||
transforms = []
|
transforms = []
|
||||||
@ -315,7 +314,7 @@ class GigaSpeechAsrDataModule:
|
|||||||
cut_transforms=transforms,
|
cut_transforms=transforms,
|
||||||
return_cuts=self.args.return_cuts,
|
return_cuts=self.args.return_cuts,
|
||||||
)
|
)
|
||||||
valid_sampler = BucketingSampler(
|
valid_sampler = DynamicBucketingSampler(
|
||||||
cuts_valid,
|
cuts_valid,
|
||||||
max_duration=self.args.max_duration,
|
max_duration=self.args.max_duration,
|
||||||
shuffle=False,
|
shuffle=False,
|
||||||
@ -339,8 +338,10 @@ class GigaSpeechAsrDataModule:
|
|||||||
else PrecomputedFeatures(),
|
else PrecomputedFeatures(),
|
||||||
return_cuts=self.args.return_cuts,
|
return_cuts=self.args.return_cuts,
|
||||||
)
|
)
|
||||||
sampler = BucketingSampler(
|
sampler = DynamicBucketingSampler(
|
||||||
cuts, max_duration=self.args.max_duration, shuffle=False
|
cuts,
|
||||||
|
max_duration=self.args.max_duration,
|
||||||
|
shuffle=False,
|
||||||
)
|
)
|
||||||
logging.debug("About to create test dataloader")
|
logging.debug("About to create test dataloader")
|
||||||
test_dl = DataLoader(
|
test_dl = DataLoader(
|
||||||
@ -361,7 +362,9 @@ class GigaSpeechAsrDataModule:
|
|||||||
@lru_cache()
|
@lru_cache()
|
||||||
def dev_cuts(self) -> CutSet:
|
def dev_cuts(self) -> CutSet:
|
||||||
logging.info("About to get dev cuts")
|
logging.info("About to get dev cuts")
|
||||||
cuts_valid = load_manifest(self.args.manifest_dir / "cuts_DEV.jsonl.gz")
|
cuts_valid = load_manifest_lazy(
|
||||||
|
self.args.manifest_dir / "cuts_DEV.jsonl.gz"
|
||||||
|
)
|
||||||
if self.args.small_dev:
|
if self.args.small_dev:
|
||||||
return cuts_valid.subset(first=1000)
|
return cuts_valid.subset(first=1000)
|
||||||
else:
|
else:
|
||||||
@ -370,4 +373,4 @@ class GigaSpeechAsrDataModule:
|
|||||||
@lru_cache()
|
@lru_cache()
|
||||||
def test_cuts(self) -> CutSet:
|
def test_cuts(self) -> CutSet:
|
||||||
logging.info("About to get test cuts")
|
logging.info("About to get test cuts")
|
||||||
return load_manifest(self.args.manifest_dir / "cuts_TEST.jsonl.gz")
|
return load_manifest_lazy(self.args.manifest_dir / "cuts_TEST.jsonl.gz")
|
||||||
|
@ -1,103 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
# Copyright 2021 Johns Hopkins University (Piotr Żelasko)
|
|
||||||
# Copyright 2021 Xiaomi Corp. (Fangjun Kuang)
|
|
||||||
#
|
|
||||||
# See ../../../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import torch
|
|
||||||
from lhotse import (
|
|
||||||
CutSet,
|
|
||||||
KaldifeatFbank,
|
|
||||||
KaldifeatFbankConfig,
|
|
||||||
combine,
|
|
||||||
)
|
|
||||||
from lhotse.recipes.utils import read_manifests_if_cached
|
|
||||||
|
|
||||||
# Torch's multithreaded behavior needs to be disabled or
|
|
||||||
# it wastes a lot of CPU and slow things down.
|
|
||||||
# Do this outside of main() in case it needs to take effect
|
|
||||||
# even when we are not invoking the main (e.g. when spawning subprocesses).
|
|
||||||
torch.set_num_threads(1)
|
|
||||||
torch.set_num_interop_threads(1)
|
|
||||||
|
|
||||||
|
|
||||||
def compute_fbank_musan():
|
|
||||||
src_dir = Path("data/manifests")
|
|
||||||
output_dir = Path("data/fbank")
|
|
||||||
|
|
||||||
# number of workers in dataloader
|
|
||||||
num_workers = 10
|
|
||||||
|
|
||||||
# number of seconds in a batch
|
|
||||||
batch_duration = 600
|
|
||||||
|
|
||||||
dataset_parts = (
|
|
||||||
"music",
|
|
||||||
"speech",
|
|
||||||
"noise",
|
|
||||||
)
|
|
||||||
|
|
||||||
manifests = read_manifests_if_cached(
|
|
||||||
prefix="musan", dataset_parts=dataset_parts, output_dir=src_dir
|
|
||||||
)
|
|
||||||
assert manifests is not None
|
|
||||||
|
|
||||||
musan_cuts_path = output_dir / "cuts_musan.json.gz"
|
|
||||||
|
|
||||||
if musan_cuts_path.is_file():
|
|
||||||
logging.info(f"{musan_cuts_path} already exists - skipping")
|
|
||||||
return
|
|
||||||
|
|
||||||
logging.info("Extracting features for Musan")
|
|
||||||
|
|
||||||
device = torch.device("cpu")
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
device = torch.device("cuda", 0)
|
|
||||||
extractor = KaldifeatFbank(KaldifeatFbankConfig(device=device))
|
|
||||||
|
|
||||||
logging.info(f"device: {device}")
|
|
||||||
|
|
||||||
musan_cuts = (
|
|
||||||
CutSet.from_manifests(
|
|
||||||
recordings=combine(
|
|
||||||
part["recordings"] for part in manifests.values()
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.cut_into_windows(10.0)
|
|
||||||
.filter(lambda c: c.duration > 5)
|
|
||||||
.compute_and_store_features_batch(
|
|
||||||
extractor=extractor,
|
|
||||||
storage_path=f"{output_dir}/feats_musan",
|
|
||||||
num_workers=num_workers,
|
|
||||||
batch_duration=batch_duration,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
musan_cuts.to_json(musan_cuts_path)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
formatter = (
|
|
||||||
"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
|
|
||||||
)
|
|
||||||
logging.basicConfig(format=formatter, level=logging.INFO)
|
|
||||||
|
|
||||||
compute_fbank_musan()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
1
egs/gigaspeech/ASR/local/compute_fbank_musan.py
Symbolic link
1
egs/gigaspeech/ASR/local/compute_fbank_musan.py
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../../../librispeech/ASR/local/compute_fbank_musan.py
|
@ -23,9 +23,8 @@ from pathlib import Path
|
|||||||
from typing import Any, Dict, Optional
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from lhotse import CutSet, Fbank, FbankConfig, load_manifest
|
from lhotse import CutSet, Fbank, FbankConfig, load_manifest, load_manifest_lazy
|
||||||
from lhotse.dataset import (
|
from lhotse.dataset import (
|
||||||
BucketingSampler,
|
|
||||||
CutConcatenate,
|
CutConcatenate,
|
||||||
CutMix,
|
CutMix,
|
||||||
DynamicBucketingSampler,
|
DynamicBucketingSampler,
|
||||||
@ -218,7 +217,7 @@ class GigaSpeechAsrDataModule:
|
|||||||
logging.info("Enable MUSAN")
|
logging.info("Enable MUSAN")
|
||||||
logging.info("About to get Musan cuts")
|
logging.info("About to get Musan cuts")
|
||||||
cuts_musan = load_manifest(
|
cuts_musan = load_manifest(
|
||||||
self.args.manifest_dir / "cuts_musan.json.gz"
|
self.args.manifest_dir / "musan_cuts.jsonl.gz"
|
||||||
)
|
)
|
||||||
transforms.append(
|
transforms.append(
|
||||||
CutMix(
|
CutMix(
|
||||||
@ -358,7 +357,7 @@ class GigaSpeechAsrDataModule:
|
|||||||
cut_transforms=transforms,
|
cut_transforms=transforms,
|
||||||
return_cuts=self.args.return_cuts,
|
return_cuts=self.args.return_cuts,
|
||||||
)
|
)
|
||||||
valid_sampler = BucketingSampler(
|
valid_sampler = DynamicBucketingSampler(
|
||||||
cuts_valid,
|
cuts_valid,
|
||||||
max_duration=self.args.max_duration,
|
max_duration=self.args.max_duration,
|
||||||
shuffle=False,
|
shuffle=False,
|
||||||
@ -382,8 +381,10 @@ class GigaSpeechAsrDataModule:
|
|||||||
else PrecomputedFeatures(),
|
else PrecomputedFeatures(),
|
||||||
return_cuts=self.args.return_cuts,
|
return_cuts=self.args.return_cuts,
|
||||||
)
|
)
|
||||||
sampler = BucketingSampler(
|
sampler = DynamicBucketingSampler(
|
||||||
cuts, max_duration=self.args.max_duration, shuffle=False
|
cuts,
|
||||||
|
max_duration=self.args.max_duration,
|
||||||
|
shuffle=False,
|
||||||
)
|
)
|
||||||
logging.debug("About to create test dataloader")
|
logging.debug("About to create test dataloader")
|
||||||
test_dl = DataLoader(
|
test_dl = DataLoader(
|
||||||
@ -404,7 +405,9 @@ class GigaSpeechAsrDataModule:
|
|||||||
@lru_cache()
|
@lru_cache()
|
||||||
def dev_cuts(self) -> CutSet:
|
def dev_cuts(self) -> CutSet:
|
||||||
logging.info("About to get dev cuts")
|
logging.info("About to get dev cuts")
|
||||||
cuts_valid = load_manifest(self.args.manifest_dir / "cuts_DEV.jsonl.gz")
|
cuts_valid = load_manifest_lazy(
|
||||||
|
self.args.manifest_dir / "cuts_DEV.jsonl.gz"
|
||||||
|
)
|
||||||
if self.args.small_dev:
|
if self.args.small_dev:
|
||||||
return cuts_valid.subset(first=1000)
|
return cuts_valid.subset(first=1000)
|
||||||
else:
|
else:
|
||||||
@ -413,4 +416,4 @@ class GigaSpeechAsrDataModule:
|
|||||||
@lru_cache()
|
@lru_cache()
|
||||||
def test_cuts(self) -> CutSet:
|
def test_cuts(self) -> CutSet:
|
||||||
logging.info("About to get test cuts")
|
logging.info("About to get test cuts")
|
||||||
return load_manifest(self.args.manifest_dir / "cuts_TEST.jsonl.gz")
|
return load_manifest_lazy(self.args.manifest_dir / "cuts_TEST.jsonl.gz")
|
||||||
|
@ -21,6 +21,8 @@ The following table lists the differences among them.
|
|||||||
| `pruned_transducer_stateless3` | Conformer(modified) | Embedding + Conv1d | Using k2 pruned RNN-T loss + using GigaSpeech as extra training data |
|
| `pruned_transducer_stateless3` | Conformer(modified) | Embedding + Conv1d | Using k2 pruned RNN-T loss + using GigaSpeech as extra training data |
|
||||||
| `pruned_transducer_stateless4` | Conformer(modified) | Embedding + Conv1d | same as pruned_transducer_stateless2 + save averaged models periodically during training |
|
| `pruned_transducer_stateless4` | Conformer(modified) | Embedding + Conv1d | same as pruned_transducer_stateless2 + save averaged models periodically during training |
|
||||||
| `pruned_transducer_stateless5` | Conformer(modified) | Embedding + Conv1d | same as pruned_transducer_stateless4 + more layers + random combiner|
|
| `pruned_transducer_stateless5` | Conformer(modified) | Embedding + Conv1d | same as pruned_transducer_stateless4 + more layers + random combiner|
|
||||||
|
| `pruned_transducer_stateless6` | Conformer(modified) | Embedding + Conv1d | same as pruned_transducer_stateless4 + distillation with hubert|
|
||||||
|
| `pruned_stateless_emformer_rnnt2` | Emformer(from torchaudio) | Embedding + Conv1d | Using Emformer from torchaudio for streaming ASR|
|
||||||
|
|
||||||
|
|
||||||
The decoder in `transducer_stateless` is modified from the paper
|
The decoder in `transducer_stateless` is modified from the paper
|
||||||
|
@ -3,6 +3,31 @@
|
|||||||
This page shows the WERs for test-clean/test-other using only
|
This page shows the WERs for test-clean/test-other using only
|
||||||
train-clean-100 subset as training data.
|
train-clean-100 subset as training data.
|
||||||
|
|
||||||
|
## Distillation with hubert
|
||||||
|
### 2022-05-27
|
||||||
|
Related models/log/tensorboard:
|
||||||
|
https://huggingface.co/GuoLiyong/stateless6_baseline_vs_disstillation
|
||||||
|
|
||||||
|
Following results are obtained by ./distillation_with_hubert.sh
|
||||||
|
|
||||||
|
The only differences is in pruned_transducer_stateless6/train.py.
|
||||||
|
|
||||||
|
For baseline: set enable_distillation=False
|
||||||
|
|
||||||
|
For distillation: set enable_distillation=True (the default)
|
||||||
|
|
||||||
|
Decoding method is modified beam search.
|
||||||
|
| | test-clean | test-other | comment |
|
||||||
|
|-------------------------------------|------------|------------|------------------------------------------|
|
||||||
|
| baseline no vq distillation | 7.09 | 18.88 | --epoch 20, --avg 10, --max-duration 200 |
|
||||||
|
| baseline no vq distillation | 6.83 | 18.19 | --epoch 30, --avg 10, --max-duration 200 |
|
||||||
|
| baseline no vq distillation | 6.73 | 17.79 | --epoch 40, --avg 10, --max-duration 200 |
|
||||||
|
| baseline no vq distillation | 6.75 | 17.68 | --epoch 50, --avg 10, --max-duration 200 |
|
||||||
|
| distillation with hubert | 5.82 | 15.98 | --epoch 20, --avg 10, --max-duration 200 |
|
||||||
|
| distillation with hubert | 5.52 | 15.15 | --epoch 30, --avg 10, --max-duration 200 |
|
||||||
|
| distillation with hubert | 5.45 | 14.94 | --epoch 40, --avg 10, --max-duration 200 |
|
||||||
|
| distillation with hubert | 5.50 | 14.77 | --epoch 50, --avg 10, --max-duration 200 |
|
||||||
|
|
||||||
## Conformer encoder + embedding decoder
|
## Conformer encoder + embedding decoder
|
||||||
|
|
||||||
### 2022-02-21
|
### 2022-02-21
|
||||||
|
@ -1,5 +1,73 @@
|
|||||||
## Results
|
## Results
|
||||||
|
|
||||||
|
### LibriSpeech BPE training results (Pruned Stateless Emformer RNN-T)
|
||||||
|
|
||||||
|
[pruned_stateless_emformer_rnnt2](./pruned_stateless_emformer_rnnt2)
|
||||||
|
|
||||||
|
Use <https://github.com/k2-fsa/icefall/pull/390>.
|
||||||
|
|
||||||
|
Use [Emformer](https://arxiv.org/abs/2010.10759) from [torchaudio](https://github.com/pytorch/audio)
|
||||||
|
for streaming ASR. The Emformer model is imported from torchaudio without modifications.
|
||||||
|
|
||||||
|
You can use <https://github.com/k2-fsa/sherpa> to deploy it.
|
||||||
|
|
||||||
|
| | test-clean | test-other | comment |
|
||||||
|
|-------------------------------------|------------|------------|----------------------------------------|
|
||||||
|
| greedy search (max sym per frame 1) | 4.28 | 11.42 | --epoch 39 --avg 6 --max-duration 600 |
|
||||||
|
| modified beam search | 4.22 | 11.16 | --epoch 39 --avg 6 --max-duration 600 |
|
||||||
|
| fast beam search | 4.29 | 11.26 | --epoch 39 --avg 6 --max-duration 600 |
|
||||||
|
|
||||||
|
|
||||||
|
The training commands are:
|
||||||
|
```bash
|
||||||
|
export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
|
||||||
|
|
||||||
|
./pruned_stateless_emformer_rnnt2/train.py \
|
||||||
|
--world-size 8 \
|
||||||
|
--num-epochs 40 \
|
||||||
|
--start-epoch 1 \
|
||||||
|
--exp-dir pruned_stateless_emformer_rnnt2/exp-full \
|
||||||
|
--full-libri 1 \
|
||||||
|
--use-fp16 0 \
|
||||||
|
--max-duration 200 \
|
||||||
|
--prune-range 5 \
|
||||||
|
--lm-scale 0.25 \
|
||||||
|
--master-port 12358 \
|
||||||
|
--num-encoder-layers 18 \
|
||||||
|
--left-context-length 128 \
|
||||||
|
--segment-length 8 \
|
||||||
|
--right-context-length 4
|
||||||
|
```
|
||||||
|
|
||||||
|
The tensorboard log can be found at
|
||||||
|
<https://tensorboard.dev/experiment/ZyiqhAhmRjmr49xml4ofLw/>
|
||||||
|
|
||||||
|
The decoding commands are:
|
||||||
|
```bash
|
||||||
|
for m in greedy_search fast_beam_search modified_beam_search; do
|
||||||
|
for epoch in 39; do
|
||||||
|
for avg in 6; do
|
||||||
|
./pruned_stateless_emformer_rnnt2/decode.py \
|
||||||
|
--epoch $epoch \
|
||||||
|
--avg $avg \
|
||||||
|
--use-averaged-model 1 \
|
||||||
|
--exp-dir pruned_stateless_emformer_rnnt2/exp-full \
|
||||||
|
--max-duration 50 \
|
||||||
|
--decoding-method $m \
|
||||||
|
--num-encoder-layers 18 \
|
||||||
|
--left-context-length 128 \
|
||||||
|
--segment-length 8 \
|
||||||
|
--right-context-length 4
|
||||||
|
done
|
||||||
|
done
|
||||||
|
done
|
||||||
|
```
|
||||||
|
|
||||||
|
You can find a pretrained model, training logs, decoding logs, and decoding
|
||||||
|
results at:
|
||||||
|
<https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-stateless-emformer-rnnt2-2022-06-01>
|
||||||
|
|
||||||
|
|
||||||
### LibriSpeech BPE training results (Pruned Stateless Transducer 5)
|
### LibriSpeech BPE training results (Pruned Stateless Transducer 5)
|
||||||
|
|
||||||
[pruned_transducer_stateless5](./pruned_transducer_stateless5)
|
[pruned_transducer_stateless5](./pruned_transducer_stateless5)
|
||||||
@ -193,6 +261,126 @@ You can find a pretrained model, training logs, decoding logs, and decoding
|
|||||||
results at:
|
results at:
|
||||||
<https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless5-narrower-2022-05-13>
|
<https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless5-narrower-2022-05-13>
|
||||||
|
|
||||||
|
|
||||||
|
### LibriSpeech BPE training results (Pruned Stateless Transducer 4)
|
||||||
|
|
||||||
|
[pruned_transducer_stateless4](./pruned_transducer_stateless4)
|
||||||
|
|
||||||
|
This version saves averaged model during training, and decodes with averaged model.
|
||||||
|
|
||||||
|
See <https://github.com/k2-fsa/icefall/issues/337> for details about the idea of model averaging.
|
||||||
|
|
||||||
|
#### Training on full librispeech
|
||||||
|
|
||||||
|
See <https://github.com/k2-fsa/icefall/pull/344>
|
||||||
|
|
||||||
|
Using commit `ec0b0e92297cc03fdb09f48cd235e84d2c04156b`.
|
||||||
|
|
||||||
|
The WERs are:
|
||||||
|
|
||||||
|
| | test-clean | test-other | comment |
|
||||||
|
|-------------------------------------|------------|------------|-------------------------------------------------------------------------------|
|
||||||
|
| greedy search (max sym per frame 1) | 2.75 | 6.74 | --epoch 30 --avg 6 --use_averaged_model False |
|
||||||
|
| greedy search (max sym per frame 1) | 2.69 | 6.64 | --epoch 30 --avg 6 --use_averaged_model True |
|
||||||
|
| fast beam search | 2.72 | 6.67 | --epoch 30 --avg 6 --use_averaged_model False |
|
||||||
|
| fast beam search | 2.66 | 6.6 | --epoch 30 --avg 6 --use_averaged_model True |
|
||||||
|
| modified beam search | 2.67 | 6.68 | --epoch 30 --avg 6 --use_averaged_model False |
|
||||||
|
| modified beam search | 2.62 | 6.57 | --epoch 30 --avg 6 --use_averaged_model True |
|
||||||
|
|
||||||
|
The training command is:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./pruned_transducer_stateless4/train.py \
|
||||||
|
--world-size 6 \
|
||||||
|
--num-epochs 30 \
|
||||||
|
--start-epoch 1 \
|
||||||
|
--exp-dir pruned_transducer_stateless4/exp \
|
||||||
|
--full-libri 1 \
|
||||||
|
--max-duration 300 \
|
||||||
|
--save-every-n 8000 \
|
||||||
|
--keep-last-k 20 \
|
||||||
|
--average-period 100
|
||||||
|
```
|
||||||
|
|
||||||
|
The tensorboard log can be found at
|
||||||
|
<https://tensorboard.dev/experiment/QOGSPBgsR8KzcRMmie9JGw/>
|
||||||
|
|
||||||
|
The decoding command using greedy search is:
|
||||||
|
```bash
|
||||||
|
./pruned_transducer_stateless4/decode.py \
|
||||||
|
--epoch 30 \
|
||||||
|
--avg 6 \
|
||||||
|
--exp-dir pruned_transducer_stateless4/exp \
|
||||||
|
--max-duration 300 \
|
||||||
|
--decoding-method greedy_search \
|
||||||
|
--use-averaged-model True
|
||||||
|
```
|
||||||
|
|
||||||
|
The decoding command using fast beam search is:
|
||||||
|
```bash
|
||||||
|
./pruned_transducer_stateless4/decode.py \
|
||||||
|
--epoch 30 \
|
||||||
|
--avg 6 \
|
||||||
|
--exp-dir pruned_transducer_stateless4/exp \
|
||||||
|
--max-duration 300 \
|
||||||
|
--decoding-method fast_beam_search \
|
||||||
|
--use-averaged-model True \
|
||||||
|
--beam 4 \
|
||||||
|
--max-contexts 4 \
|
||||||
|
--max-states 8
|
||||||
|
```
|
||||||
|
|
||||||
|
The decoding command using modified beam search is:
|
||||||
|
```bash
|
||||||
|
./pruned_transducer_stateless4/decode.py \
|
||||||
|
--epoch 30 \
|
||||||
|
--avg 6 \
|
||||||
|
--exp-dir pruned_transducer_stateless4/exp \
|
||||||
|
--max-duration 300 \
|
||||||
|
--decoding-method modified_beam_search \
|
||||||
|
--use-averaged-model True \
|
||||||
|
--beam-size 4
|
||||||
|
```
|
||||||
|
|
||||||
|
Pretrained models, training logs, decoding logs, and decoding results
|
||||||
|
are available at
|
||||||
|
<https://huggingface.co/Zengwei/icefall-asr-librispeech-pruned-transducer-stateless4-2022-06-03>
|
||||||
|
|
||||||
|
#### Training on train-clean-100
|
||||||
|
|
||||||
|
See <https://github.com/k2-fsa/icefall/pull/344>
|
||||||
|
|
||||||
|
Using commit `ec0b0e92297cc03fdb09f48cd235e84d2c04156b`.
|
||||||
|
|
||||||
|
The WERs are:
|
||||||
|
|
||||||
|
| | test-clean | test-other | comment |
|
||||||
|
|-------------------------------------|------------|------------|-------------------------------------------------------------------------------|
|
||||||
|
| greedy search (max sym per frame 1) | 7.0 | 18.95 | --epoch 30 --avg 10 --use_averaged_model False |
|
||||||
|
| greedy search (max sym per frame 1) | 6.92 | 18.65 | --epoch 30 --avg 10 --use_averaged_model True |
|
||||||
|
| fast beam search | 6.82 | 18.47 | --epoch 30 --avg 10 --use_averaged_model False |
|
||||||
|
| fast beam search | 6.74 | 18.2 | --epoch 30 --avg 10 --use_averaged_model True |
|
||||||
|
| modified beam search | 6.74 | 18.39 | --epoch 30 --avg 10 --use_averaged_model False |
|
||||||
|
| modified beam search | 6.74 | 18.12 | --epoch 30 --avg 10 --use_averaged_model True |
|
||||||
|
|
||||||
|
The training command is:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./pruned_transducer_stateless4/train.py \
|
||||||
|
--world-size 3 \
|
||||||
|
--num-epochs 30 \
|
||||||
|
--start-epoch 1 \
|
||||||
|
--exp-dir pruned_transducer_stateless4/exp \
|
||||||
|
--full-libri 0 \
|
||||||
|
--max-duration 300 \
|
||||||
|
--save-every-n 8000 \
|
||||||
|
--keep-last-k 20 \
|
||||||
|
--average-period 100
|
||||||
|
```
|
||||||
|
|
||||||
|
The tensorboard log can be found at
|
||||||
|
<https://tensorboard.dev/experiment/YVYHq1irQS69s9bW1vQ06Q/>
|
||||||
|
|
||||||
### LibriSpeech BPE training results (Pruned Stateless Transducer 3, 2022-04-29)
|
### LibriSpeech BPE training results (Pruned Stateless Transducer 3, 2022-04-29)
|
||||||
|
|
||||||
[pruned_transducer_stateless3](./pruned_transducer_stateless3)
|
[pruned_transducer_stateless3](./pruned_transducer_stateless3)
|
||||||
|
@ -96,14 +96,14 @@ def get_parser():
|
|||||||
|
|
||||||
- labels_xxx.h5
|
- labels_xxx.h5
|
||||||
- aux_labels_xxx.h5
|
- aux_labels_xxx.h5
|
||||||
- cuts_xxx.json.gz
|
- librispeech_cuts_xxx.jsonl.gz
|
||||||
|
|
||||||
where xxx is the value of `--dataset`. For instance, if
|
where xxx is the value of `--dataset`. For instance, if
|
||||||
`--dataset` is `train-clean-100`, it will contain 3 files:
|
`--dataset` is `train-clean-100`, it will contain 3 files:
|
||||||
|
|
||||||
- `labels_train-clean-100.h5`
|
- `labels_train-clean-100.h5`
|
||||||
- `aux_labels_train-clean-100.h5`
|
- `aux_labels_train-clean-100.h5`
|
||||||
- `cuts_train-clean-100.json.gz`
|
- `librispeech_cuts_train-clean-100.jsonl.gz`
|
||||||
|
|
||||||
Note: Both labels_xxx.h5 and aux_labels_xxx.h5 contain framewise
|
Note: Both labels_xxx.h5 and aux_labels_xxx.h5 contain framewise
|
||||||
alignment. The difference is that labels_xxx.h5 contains repeats.
|
alignment. The difference is that labels_xxx.h5 contains repeats.
|
||||||
@ -289,7 +289,9 @@ def main():
|
|||||||
|
|
||||||
out_labels_ali_filename = out_dir / f"labels_{params.dataset}.h5"
|
out_labels_ali_filename = out_dir / f"labels_{params.dataset}.h5"
|
||||||
out_aux_labels_ali_filename = out_dir / f"aux_labels_{params.dataset}.h5"
|
out_aux_labels_ali_filename = out_dir / f"aux_labels_{params.dataset}.h5"
|
||||||
out_manifest_filename = out_dir / f"cuts_{params.dataset}.json.gz"
|
out_manifest_filename = (
|
||||||
|
out_dir / f"librispeech_cuts_{params.dataset}.jsonl.gz"
|
||||||
|
)
|
||||||
|
|
||||||
for f in (
|
for f in (
|
||||||
out_labels_ali_filename,
|
out_labels_ali_filename,
|
||||||
|
@ -17,6 +17,17 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Usage:
|
||||||
|
export CUDA_VISIBLE_DEVICES="0,1,2,3"
|
||||||
|
./conformer_ctc/train.py \
|
||||||
|
--exp-dir ./conformer_ctc/exp \
|
||||||
|
--world-size 4 \
|
||||||
|
--full-libri 1 \
|
||||||
|
--max-duration 200 \
|
||||||
|
--num-epochs 20
|
||||||
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
@ -29,6 +40,7 @@ import torch.multiprocessing as mp
|
|||||||
import torch.nn as nn
|
import torch.nn as nn
|
||||||
from asr_datamodule import LibriSpeechAsrDataModule
|
from asr_datamodule import LibriSpeechAsrDataModule
|
||||||
from conformer import Conformer
|
from conformer import Conformer
|
||||||
|
from lhotse.cut import Cut
|
||||||
from lhotse.utils import fix_random_seed
|
from lhotse.utils import fix_random_seed
|
||||||
from torch import Tensor
|
from torch import Tensor
|
||||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||||
@ -676,6 +688,20 @@ def run(rank, world_size, args):
|
|||||||
if params.full_libri:
|
if params.full_libri:
|
||||||
train_cuts += librispeech.train_clean_360_cuts()
|
train_cuts += librispeech.train_clean_360_cuts()
|
||||||
train_cuts += librispeech.train_other_500_cuts()
|
train_cuts += librispeech.train_other_500_cuts()
|
||||||
|
|
||||||
|
def remove_short_and_long_utt(c: Cut):
|
||||||
|
# Keep only utterances with duration between 1 second and 20 seconds
|
||||||
|
#
|
||||||
|
# Caution: There is a reason to select 20.0 here. Please see
|
||||||
|
# ../local/display_manifest_statistics.py
|
||||||
|
#
|
||||||
|
# You should use ../local/display_manifest_statistics.py to get
|
||||||
|
# an utterance duration distribution for your dataset to select
|
||||||
|
# the threshold
|
||||||
|
return 1.0 <= c.duration <= 20.0
|
||||||
|
|
||||||
|
train_cuts = train_cuts.filter(remove_short_and_long_utt)
|
||||||
|
|
||||||
train_dl = librispeech.train_dataloaders(train_cuts)
|
train_dl = librispeech.train_dataloaders(train_cuts)
|
||||||
|
|
||||||
valid_cuts = librispeech.dev_clean_cuts()
|
valid_cuts = librispeech.dev_clean_cuts()
|
||||||
|
144
egs/librispeech/ASR/distillation_with_hubert.sh
Normal file
144
egs/librispeech/ASR/distillation_with_hubert.sh
Normal file
@ -0,0 +1,144 @@
|
|||||||
|
# A short introduction about distillation framework.
|
||||||
|
#
|
||||||
|
# A typical traditional distillation method is
|
||||||
|
# Loss(teacher embedding, student embedding).
|
||||||
|
#
|
||||||
|
# Comparing to these, the proposed distillation framework contains two mainly steps:
|
||||||
|
# codebook indexes = quantizer.encode(teacher embedding)
|
||||||
|
# Loss(codebook indexes, student embedding)
|
||||||
|
#
|
||||||
|
# Things worth to meantion:
|
||||||
|
# 1. The float type teacher embedding is quantized into a sequence of
|
||||||
|
# 8-bit integer codebook indexes.
|
||||||
|
# 2. a middle layer 36(1-based) out of total 48 layers is used to extract
|
||||||
|
# teacher embeddings.
|
||||||
|
# 3. a middle layer 6(1-based) out of total 6 layers is used to extract
|
||||||
|
# student embeddings.
|
||||||
|
|
||||||
|
# This is an example to do distillation with librispeech clean-100 subset.
|
||||||
|
# run with command:
|
||||||
|
# bash distillation_with_hubert.sh [0|1|2|3|4]
|
||||||
|
#
|
||||||
|
# For example command
|
||||||
|
# bash distillation_with_hubert.sh 0
|
||||||
|
# will download hubert model.
|
||||||
|
stage=$1
|
||||||
|
|
||||||
|
# Set the GPUs available.
|
||||||
|
# This script requires at least one GPU.
|
||||||
|
# You MUST set environment variable "CUDA_VISIBLE_DEVICES",
|
||||||
|
# even you only have ONE GPU. It needed by CodebookIndexExtractor to determine numbert of jobs to extract codebook indexes parallelly.
|
||||||
|
|
||||||
|
# Suppose only one GPU exists:
|
||||||
|
# export CUDA_VISIBLE_DEVICES="0"
|
||||||
|
#
|
||||||
|
# Suppose GPU 2,3,4,5 are available.
|
||||||
|
export CUDA_VISIBLE_DEVICES="2,3,4,5"
|
||||||
|
|
||||||
|
|
||||||
|
if [ $stage -eq 0 ]; then
|
||||||
|
# Preparation stage.
|
||||||
|
|
||||||
|
# Install fairseq according to:
|
||||||
|
# https://github.com/pytorch/fairseq
|
||||||
|
# when testing this code:
|
||||||
|
# commit 806855bf660ea748ed7ffb42fe8dcc881ca3aca0 is used.
|
||||||
|
has_fairseq=$(python3 -c "import importlib; print(importlib.util.find_spec('fairseq') is not None)")
|
||||||
|
if [ $has_fairseq == 'False' ]; then
|
||||||
|
echo "Please install fairseq before running following stages"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Install quantization toolkit:
|
||||||
|
# pip install git+https://github.com/danpovey/quantization.git@master
|
||||||
|
# when testing this code:
|
||||||
|
# commit c17ffe67aa2e6ca6b6855c50fde812f2eed7870b is used.
|
||||||
|
|
||||||
|
has_quantization=$(python3 -c "import importlib; print(importlib.util.find_spec('quantization') is not None)")
|
||||||
|
if [ $has_quantization == 'False' ]; then
|
||||||
|
echo "Please install quantization before running following stages"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Download hubert model."
|
||||||
|
# Parameters about model.
|
||||||
|
exp_dir=./pruned_transducer_stateless6/exp/
|
||||||
|
model_id=hubert_xtralarge_ll60k_finetune_ls960
|
||||||
|
hubert_model_dir=${exp_dir}/hubert_models
|
||||||
|
hubert_model=${hubert_model_dir}/${model_id}.pt
|
||||||
|
mkdir -p ${hubert_model_dir}
|
||||||
|
# For more models refer to: https://github.com/pytorch/fairseq/tree/main/examples/hubert
|
||||||
|
if [ -f ${hubert_model} ]; then
|
||||||
|
echo "hubert model alread exists."
|
||||||
|
else
|
||||||
|
wget -c https://dl.fbaipublicfiles.com/hubert/${model_id} -P ${hubert_model}
|
||||||
|
wget -c wget https://dl.fbaipublicfiles.com/fairseq/wav2vec/dict.ltr.txt -P ${hubert_model_dir}
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -d ./data/fbank ]; then
|
||||||
|
echo "This script assumes ./data/fbank is already generated by prepare.sh"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $stage -eq 1 ]; then
|
||||||
|
# This stage is not directly used by codebook indexes extraction.
|
||||||
|
# It is a method to "prove" that the downloaed hubert model
|
||||||
|
# is inferenced in an correct way if WERs look like normal.
|
||||||
|
# Expect WERs:
|
||||||
|
# [test-clean-ctc_greedy_search] %WER 2.04% [1075 / 52576, 92 ins, 104 del, 879 sub ]
|
||||||
|
# [test-other-ctc_greedy_search] %WER 3.71% [1942 / 52343, 152 ins, 126 del, 1664 sub ]
|
||||||
|
./pruned_transducer_stateless6/hubert_decode.py
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $stage -eq 2 ]; then
|
||||||
|
# Analysis of disk usage:
|
||||||
|
# With num_codebooks==8, each teacher embedding is quantized into
|
||||||
|
# a sequence of eight 8-bit integers, i.e. only eight bytes are needed.
|
||||||
|
# Training dataset including clean-100h with speed perturb 0.9 and 1.1 has 300 hours.
|
||||||
|
# The output frame rates of Hubert is 50 per second.
|
||||||
|
# Theoretically, 412M = 300 * 3600 * 50 * 8 / 1024 / 1024 is needed.
|
||||||
|
# The actual size of all "*.h5" files storaging codebook index is 450M.
|
||||||
|
# I think the extra "48M" usage is some meta information.
|
||||||
|
|
||||||
|
# Time consumption analysis:
|
||||||
|
# For quantizer training data(teacher embedding) extraction, only 1000 utts from clean-100 are used.
|
||||||
|
# Together with quantizer training, no more than 20 minutes will be used.
|
||||||
|
#
|
||||||
|
# For codebook indexes extraction,
|
||||||
|
# with two pieces of NVIDIA A100 gpus, around three hours needed to process 300 hours training data,
|
||||||
|
# i.e. clean-100 with speed purteb 0.9 and 1.1.
|
||||||
|
|
||||||
|
# GPU usage:
|
||||||
|
# During quantizer's training data(teacher embedding) and it's training,
|
||||||
|
# only the first ONE GPU is used.
|
||||||
|
# During codebook indexes extraction, ALL GPUs set by CUDA_VISIBLE_DEVICES are used.
|
||||||
|
./pruned_transducer_stateless6/extract_codebook_index.py \
|
||||||
|
--full-libri False
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $stage -eq 3 ]; then
|
||||||
|
# Example training script.
|
||||||
|
# Note: it's better to set spec-aug-time-warpi-factor=-1
|
||||||
|
WORLD_SIZE=$(echo ${CUDA_VISIBLE_DEVICES} | awk '{n=split($1, _, ","); print n}')
|
||||||
|
./pruned_transducer_stateless6/train.py \
|
||||||
|
--manifest-dir ./data/vq_fbank \
|
||||||
|
--master-port 12359 \
|
||||||
|
--full-libri False \
|
||||||
|
--spec-aug-time-warp-factor -1 \
|
||||||
|
--max-duration 300 \
|
||||||
|
--world-size ${WORLD_SIZE} \
|
||||||
|
--num-epochs 20
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $stage -eq 4 ]; then
|
||||||
|
# Results should be similar to:
|
||||||
|
# errs-test-clean-beam_size_4-epoch-20-avg-10-beam-4.txt:%WER = 5.67
|
||||||
|
# errs-test-other-beam_size_4-epoch-20-avg-10-beam-4.txt:%WER = 15.60
|
||||||
|
./pruned_transducer_stateless6/decode.py \
|
||||||
|
--decoding-method "modified_beam_search" \
|
||||||
|
--epoch 20 \
|
||||||
|
--avg 10 \
|
||||||
|
--max-duration 200 \
|
||||||
|
--exp-dir ./pruned_transducer_stateless6/exp
|
||||||
|
fi
|
@ -20,11 +20,7 @@ import logging
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from lhotse import (
|
from lhotse import CutSet, KaldifeatFbank, KaldifeatFbankConfig
|
||||||
CutSet,
|
|
||||||
KaldifeatFbank,
|
|
||||||
KaldifeatFbankConfig,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Torch's multithreaded behavior needs to be disabled or
|
# Torch's multithreaded behavior needs to be disabled or
|
||||||
# it wastes a lot of CPU and slow things down.
|
# it wastes a lot of CPU and slow things down.
|
||||||
@ -51,13 +47,16 @@ def compute_fbank_gigaspeech_dev_test():
|
|||||||
|
|
||||||
logging.info(f"device: {device}")
|
logging.info(f"device: {device}")
|
||||||
|
|
||||||
|
prefix = "gigaspeech"
|
||||||
|
suffix = "jsonl.gz"
|
||||||
|
|
||||||
for partition in subsets:
|
for partition in subsets:
|
||||||
cuts_path = in_out_dir / f"cuts_{partition}.jsonl.gz"
|
cuts_path = in_out_dir / f"{prefix}_cuts_{partition}.{suffix}"
|
||||||
if cuts_path.is_file():
|
if cuts_path.is_file():
|
||||||
logging.info(f"{cuts_path} exists - skipping")
|
logging.info(f"{cuts_path} exists - skipping")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
raw_cuts_path = in_out_dir / f"cuts_{partition}_raw.jsonl.gz"
|
raw_cuts_path = in_out_dir / f"{prefix}_cuts_{partition}_raw.{suffix}"
|
||||||
|
|
||||||
logging.info(f"Loading {raw_cuts_path}")
|
logging.info(f"Loading {raw_cuts_path}")
|
||||||
cut_set = CutSet.from_file(raw_cuts_path)
|
cut_set = CutSet.from_file(raw_cuts_path)
|
||||||
@ -66,7 +65,7 @@ def compute_fbank_gigaspeech_dev_test():
|
|||||||
|
|
||||||
cut_set = cut_set.compute_and_store_features_batch(
|
cut_set = cut_set.compute_and_store_features_batch(
|
||||||
extractor=extractor,
|
extractor=extractor,
|
||||||
storage_path=f"{in_out_dir}/feats_{partition}",
|
storage_path=f"{in_out_dir}/{prefix}_feats_{partition}",
|
||||||
num_workers=num_workers,
|
num_workers=num_workers,
|
||||||
batch_duration=batch_duration,
|
batch_duration=batch_duration,
|
||||||
)
|
)
|
||||||
|
@ -77,7 +77,7 @@ def get_parser():
|
|||||||
|
|
||||||
def compute_fbank_gigaspeech_splits(args):
|
def compute_fbank_gigaspeech_splits(args):
|
||||||
num_splits = args.num_splits
|
num_splits = args.num_splits
|
||||||
output_dir = f"data/fbank/XL_split_{num_splits}"
|
output_dir = f"data/fbank/gigaspeech_XL_split_{num_splits}"
|
||||||
output_dir = Path(output_dir)
|
output_dir = Path(output_dir)
|
||||||
assert output_dir.exists(), f"{output_dir} does not exist!"
|
assert output_dir.exists(), f"{output_dir} does not exist!"
|
||||||
|
|
||||||
@ -96,17 +96,19 @@ def compute_fbank_gigaspeech_splits(args):
|
|||||||
extractor = KaldifeatFbank(KaldifeatFbankConfig(device=device))
|
extractor = KaldifeatFbank(KaldifeatFbankConfig(device=device))
|
||||||
logging.info(f"device: {device}")
|
logging.info(f"device: {device}")
|
||||||
|
|
||||||
|
prefix = "gigaspeech"
|
||||||
|
|
||||||
num_digits = 8 # num_digits is fixed by lhotse split-lazy
|
num_digits = 8 # num_digits is fixed by lhotse split-lazy
|
||||||
for i in range(start, stop):
|
for i in range(start, stop):
|
||||||
idx = f"{i + 1}".zfill(num_digits)
|
idx = f"{i + 1}".zfill(num_digits)
|
||||||
logging.info(f"Processing {idx}/{num_splits}")
|
logging.info(f"Processing {idx}/{num_splits}")
|
||||||
|
|
||||||
cuts_path = output_dir / f"cuts_XL.{idx}.jsonl.gz"
|
cuts_path = output_dir / f"{prefix}_cuts_XL.{idx}.jsonl.gz"
|
||||||
if cuts_path.is_file():
|
if cuts_path.is_file():
|
||||||
logging.info(f"{cuts_path} exists - skipping")
|
logging.info(f"{cuts_path} exists - skipping")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
raw_cuts_path = output_dir / f"cuts_XL_raw.{idx}.jsonl.gz"
|
raw_cuts_path = output_dir / f"{prefix}_cuts_XL_raw.{idx}.jsonl.gz"
|
||||||
if not raw_cuts_path.is_file():
|
if not raw_cuts_path.is_file():
|
||||||
logging.info(f"{raw_cuts_path} does not exist - skipping it")
|
logging.info(f"{raw_cuts_path} does not exist - skipping it")
|
||||||
continue
|
continue
|
||||||
@ -115,13 +117,13 @@ def compute_fbank_gigaspeech_splits(args):
|
|||||||
cut_set = CutSet.from_file(raw_cuts_path)
|
cut_set = CutSet.from_file(raw_cuts_path)
|
||||||
|
|
||||||
logging.info("Computing features")
|
logging.info("Computing features")
|
||||||
if (output_dir / f"feats_XL_{idx}.lca").exists():
|
if (output_dir / f"{prefix}_feats_XL_{idx}.lca").exists():
|
||||||
logging.info(f"Removing {output_dir}/feats_XL_{idx}.lca")
|
logging.info(f"Removing {output_dir}/{prefix}_feats_XL_{idx}.lca")
|
||||||
os.remove(output_dir / f"feats_XL_{idx}.lca")
|
os.remove(output_dir / f"{prefix}_feats_XL_{idx}.lca")
|
||||||
|
|
||||||
cut_set = cut_set.compute_and_store_features_batch(
|
cut_set = cut_set.compute_and_store_features_batch(
|
||||||
extractor=extractor,
|
extractor=extractor,
|
||||||
storage_path=f"{output_dir}/feats_XL_{idx}",
|
storage_path=f"{output_dir}/{prefix}_feats_XL_{idx}",
|
||||||
num_workers=args.num_workers,
|
num_workers=args.num_workers,
|
||||||
batch_duration=args.batch_duration,
|
batch_duration=args.batch_duration,
|
||||||
)
|
)
|
||||||
|
@ -28,7 +28,7 @@ import os
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from lhotse import ChunkedLilcomHdf5Writer, CutSet, Fbank, FbankConfig
|
from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter
|
||||||
from lhotse.recipes.utils import read_manifests_if_cached
|
from lhotse.recipes.utils import read_manifests_if_cached
|
||||||
|
|
||||||
from icefall.utils import get_executor
|
from icefall.utils import get_executor
|
||||||
@ -56,8 +56,13 @@ def compute_fbank_librispeech():
|
|||||||
"train-clean-360",
|
"train-clean-360",
|
||||||
"train-other-500",
|
"train-other-500",
|
||||||
)
|
)
|
||||||
|
prefix = "librispeech"
|
||||||
|
suffix = "jsonl.gz"
|
||||||
manifests = read_manifests_if_cached(
|
manifests = read_manifests_if_cached(
|
||||||
prefix="librispeech", dataset_parts=dataset_parts, output_dir=src_dir
|
dataset_parts=dataset_parts,
|
||||||
|
output_dir=src_dir,
|
||||||
|
prefix=prefix,
|
||||||
|
suffix=suffix,
|
||||||
)
|
)
|
||||||
assert manifests is not None
|
assert manifests is not None
|
||||||
|
|
||||||
@ -65,7 +70,8 @@ def compute_fbank_librispeech():
|
|||||||
|
|
||||||
with get_executor() as ex: # Initialize the executor only once.
|
with get_executor() as ex: # Initialize the executor only once.
|
||||||
for partition, m in manifests.items():
|
for partition, m in manifests.items():
|
||||||
if (output_dir / f"cuts_{partition}.json.gz").is_file():
|
cuts_filename = f"{prefix}_cuts_{partition}.{suffix}"
|
||||||
|
if (output_dir / cuts_filename).is_file():
|
||||||
logging.info(f"{partition} already exists - skipping.")
|
logging.info(f"{partition} already exists - skipping.")
|
||||||
continue
|
continue
|
||||||
logging.info(f"Processing {partition}")
|
logging.info(f"Processing {partition}")
|
||||||
@ -81,13 +87,13 @@ def compute_fbank_librispeech():
|
|||||||
)
|
)
|
||||||
cut_set = cut_set.compute_and_store_features(
|
cut_set = cut_set.compute_and_store_features(
|
||||||
extractor=extractor,
|
extractor=extractor,
|
||||||
storage_path=f"{output_dir}/feats_{partition}",
|
storage_path=f"{output_dir}/{prefix}_feats_{partition}",
|
||||||
# when an executor is specified, make more partitions
|
# when an executor is specified, make more partitions
|
||||||
num_jobs=num_jobs if ex is None else 80,
|
num_jobs=num_jobs if ex is None else 80,
|
||||||
executor=ex,
|
executor=ex,
|
||||||
storage_type=ChunkedLilcomHdf5Writer,
|
storage_type=LilcomChunkyWriter,
|
||||||
)
|
)
|
||||||
cut_set.to_json(output_dir / f"cuts_{partition}.json.gz")
|
cut_set.to_file(output_dir / cuts_filename)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -28,7 +28,7 @@ import os
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from lhotse import ChunkedLilcomHdf5Writer, CutSet, Fbank, FbankConfig, combine
|
from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter, combine
|
||||||
from lhotse.recipes.utils import read_manifests_if_cached
|
from lhotse.recipes.utils import read_manifests_if_cached
|
||||||
|
|
||||||
from icefall.utils import get_executor
|
from icefall.utils import get_executor
|
||||||
@ -52,12 +52,22 @@ def compute_fbank_musan():
|
|||||||
"speech",
|
"speech",
|
||||||
"noise",
|
"noise",
|
||||||
)
|
)
|
||||||
|
prefix = "musan"
|
||||||
|
suffix = "jsonl.gz"
|
||||||
manifests = read_manifests_if_cached(
|
manifests = read_manifests_if_cached(
|
||||||
prefix="musan", dataset_parts=dataset_parts, output_dir=src_dir
|
dataset_parts=dataset_parts,
|
||||||
|
output_dir=src_dir,
|
||||||
|
prefix=prefix,
|
||||||
|
suffix=suffix,
|
||||||
)
|
)
|
||||||
assert manifests is not None
|
assert manifests is not None
|
||||||
|
|
||||||
musan_cuts_path = output_dir / "cuts_musan.json.gz"
|
assert len(manifests) == len(dataset_parts), (
|
||||||
|
len(manifests),
|
||||||
|
len(dataset_parts),
|
||||||
|
)
|
||||||
|
|
||||||
|
musan_cuts_path = output_dir / "musan_cuts.jsonl.gz"
|
||||||
|
|
||||||
if musan_cuts_path.is_file():
|
if musan_cuts_path.is_file():
|
||||||
logging.info(f"{musan_cuts_path} already exists - skipping")
|
logging.info(f"{musan_cuts_path} already exists - skipping")
|
||||||
@ -79,13 +89,13 @@ def compute_fbank_musan():
|
|||||||
.filter(lambda c: c.duration > 5)
|
.filter(lambda c: c.duration > 5)
|
||||||
.compute_and_store_features(
|
.compute_and_store_features(
|
||||||
extractor=extractor,
|
extractor=extractor,
|
||||||
storage_path=f"{output_dir}/feats_musan",
|
storage_path=f"{output_dir}/musan_feats",
|
||||||
num_jobs=num_jobs if ex is None else 80,
|
num_jobs=num_jobs if ex is None else 80,
|
||||||
executor=ex,
|
executor=ex,
|
||||||
storage_type=ChunkedLilcomHdf5Writer,
|
storage_type=LilcomChunkyWriter,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
musan_cuts.to_json(musan_cuts_path)
|
musan_cuts.to_file(musan_cuts_path)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -25,19 +25,19 @@ for usage.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
from lhotse import load_manifest
|
from lhotse import load_manifest_lazy
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
path = "./data/fbank/cuts_train-clean-100.json.gz"
|
# path = "./data/fbank/librispeech_cuts_train-clean-100.jsonl.gz"
|
||||||
path = "./data/fbank/cuts_train-clean-360.json.gz"
|
# path = "./data/fbank/librispeech_cuts_train-clean-360.jsonl.gz"
|
||||||
path = "./data/fbank/cuts_train-other-500.json.gz"
|
# path = "./data/fbank/librispeech_cuts_train-other-500.jsonl.gz"
|
||||||
path = "./data/fbank/cuts_dev-clean.json.gz"
|
# path = "./data/fbank/librispeech_cuts_dev-clean.jsonl.gz"
|
||||||
path = "./data/fbank/cuts_dev-other.json.gz"
|
# path = "./data/fbank/librispeech_cuts_dev-other.jsonl.gz"
|
||||||
path = "./data/fbank/cuts_test-clean.json.gz"
|
# path = "./data/fbank/librispeech_cuts_test-clean.jsonl.gz"
|
||||||
path = "./data/fbank/cuts_test-other.json.gz"
|
path = "./data/fbank/librispeech_cuts_test-other.jsonl.gz"
|
||||||
|
|
||||||
cuts = load_manifest(path)
|
cuts = load_manifest_lazy(path)
|
||||||
cuts.describe()
|
cuts.describe()
|
||||||
|
|
||||||
|
|
||||||
|
@ -58,17 +58,19 @@ def preprocess_giga_speech():
|
|||||||
)
|
)
|
||||||
|
|
||||||
logging.info("Loading manifest (may take 4 minutes)")
|
logging.info("Loading manifest (may take 4 minutes)")
|
||||||
|
prefix = "gigaspeech"
|
||||||
|
suffix = "jsonl.gz"
|
||||||
manifests = read_manifests_if_cached(
|
manifests = read_manifests_if_cached(
|
||||||
dataset_parts=dataset_parts,
|
dataset_parts=dataset_parts,
|
||||||
output_dir=src_dir,
|
output_dir=src_dir,
|
||||||
prefix="gigaspeech",
|
prefix=prefix,
|
||||||
suffix="jsonl.gz",
|
suffix=suffix,
|
||||||
)
|
)
|
||||||
assert manifests is not None
|
assert manifests is not None
|
||||||
|
|
||||||
for partition, m in manifests.items():
|
for partition, m in manifests.items():
|
||||||
logging.info(f"Processing {partition}")
|
logging.info(f"Processing {partition}")
|
||||||
raw_cuts_path = output_dir / f"cuts_{partition}_raw.jsonl.gz"
|
raw_cuts_path = output_dir / f"{prefix}_cuts_{partition}_raw.{suffix}"
|
||||||
if raw_cuts_path.is_file():
|
if raw_cuts_path.is_file():
|
||||||
logging.info(f"{partition} already exists - skipping")
|
logging.info(f"{partition} already exists - skipping")
|
||||||
continue
|
continue
|
||||||
|
@ -25,7 +25,7 @@ We will add more checks later if needed.
|
|||||||
Usage example:
|
Usage example:
|
||||||
|
|
||||||
python3 ./local/validate_manifest.py \
|
python3 ./local/validate_manifest.py \
|
||||||
./data/fbank/cuts_train-clean-100.json.gz
|
./data/fbank/librispeech_cuts_train-clean-100.jsonl.gz
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -33,7 +33,7 @@ import argparse
|
|||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from lhotse import load_manifest, CutSet
|
from lhotse import CutSet, load_manifest_lazy
|
||||||
from lhotse.cut import Cut
|
from lhotse.cut import Cut
|
||||||
|
|
||||||
|
|
||||||
@ -76,7 +76,7 @@ def main():
|
|||||||
logging.info(f"Validating {manifest}")
|
logging.info(f"Validating {manifest}")
|
||||||
|
|
||||||
assert manifest.is_file(), f"{manifest} does not exist"
|
assert manifest.is_file(), f"{manifest} does not exist"
|
||||||
cut_set = load_manifest(manifest)
|
cut_set = load_manifest_lazy(manifest)
|
||||||
assert isinstance(cut_set, CutSet)
|
assert isinstance(cut_set, CutSet)
|
||||||
|
|
||||||
for c in cut_set:
|
for c in cut_set:
|
||||||
|
@ -40,9 +40,9 @@ dl_dir=$PWD/download
|
|||||||
# It will generate data/lang_bpe_xxx,
|
# It will generate data/lang_bpe_xxx,
|
||||||
# data/lang_bpe_yyy if the array contains xxx, yyy
|
# data/lang_bpe_yyy if the array contains xxx, yyy
|
||||||
vocab_sizes=(
|
vocab_sizes=(
|
||||||
5000
|
# 5000
|
||||||
2000
|
# 2000
|
||||||
1000
|
# 1000
|
||||||
500
|
500
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -132,7 +132,7 @@ if [ $stage -le 3 ] && [ $stop_stage -ge 3 ]; then
|
|||||||
)
|
)
|
||||||
for part in ${parts[@]}; do
|
for part in ${parts[@]}; do
|
||||||
python3 ./local/validate_manifest.py \
|
python3 ./local/validate_manifest.py \
|
||||||
data/fbank/cuts_${part}.json.gz
|
data/fbank/librispeech_cuts_${part}.jsonl.gz
|
||||||
done
|
done
|
||||||
touch data/fbank/.librispeech-validated.done
|
touch data/fbank/.librispeech-validated.done
|
||||||
fi
|
fi
|
||||||
|
@ -124,9 +124,9 @@ fi
|
|||||||
|
|
||||||
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
|
if [ $stage -le 4 ] && [ $stop_stage -ge 4 ]; then
|
||||||
log "Stage 4: Split XL subset into ${num_splits} pieces"
|
log "Stage 4: Split XL subset into ${num_splits} pieces"
|
||||||
split_dir=data/fbank/XL_split_${num_splits}
|
split_dir=data/fbank/gigaspeech_XL_split_${num_splits}
|
||||||
if [ ! -f $split_dir/.split_completed ]; then
|
if [ ! -f $split_dir/.split_completed ]; then
|
||||||
lhotse split-lazy ./data/fbank/cuts_XL_raw.jsonl.gz $split_dir $chunk_size
|
lhotse split-lazy ./data/fbank/gigaspeech_cuts_XL_raw.jsonl.gz $split_dir $chunk_size
|
||||||
touch $split_dir/.split_completed
|
touch $split_dir/.split_completed
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
@ -0,0 +1 @@
|
|||||||
|
../pruned_transducer_stateless/asr_datamodule.py
|
@ -0,0 +1 @@
|
|||||||
|
../pruned_transducer_stateless/beam_search.py
|
645
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/decode.py
Executable file
645
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/decode.py
Executable file
@ -0,0 +1,645 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# Copyright 2021-2022 Xiaomi Corporation (Author: Fangjun Kuang,
|
||||||
|
# Zengwei Yao)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
"""
|
||||||
|
Usage:
|
||||||
|
(1) greedy search
|
||||||
|
./pruned_stateless_emformer_rnnt/decode.py \
|
||||||
|
--epoch 30 \
|
||||||
|
--avg 15 \
|
||||||
|
--exp-dir ./pruned_stateless_emformer_rnnt/exp \
|
||||||
|
--max-duration 600 \
|
||||||
|
--decoding-method greedy_search
|
||||||
|
|
||||||
|
(2) beam search (not recommended)
|
||||||
|
./pruned_stateless_emformer_rnnt/decode.py \
|
||||||
|
--epoch 30 \
|
||||||
|
--avg 15 \
|
||||||
|
--exp-dir ./pruned_stateless_emformer_rnnt/exp \
|
||||||
|
--max-duration 600 \
|
||||||
|
--decoding-method beam_search \
|
||||||
|
--beam-size 4
|
||||||
|
|
||||||
|
(3) modified beam search
|
||||||
|
./pruned_stateless_emformer_rnnt/decode.py \
|
||||||
|
--epoch 30 \
|
||||||
|
--avg 15 \
|
||||||
|
--exp-dir ./pruned_stateless_emformer_rnnt/exp \
|
||||||
|
--max-duration 600 \
|
||||||
|
--decoding-method modified_beam_search \
|
||||||
|
--beam-size 4
|
||||||
|
|
||||||
|
(4) fast beam search
|
||||||
|
./pruned_stateless_emformer_rnnt/decode.py \
|
||||||
|
--epoch 30 \
|
||||||
|
--avg 15 \
|
||||||
|
--exp-dir ./pruned_stateless_emformer_rnnt/exp \
|
||||||
|
--max-duration 600 \
|
||||||
|
--decoding-method fast_beam_search \
|
||||||
|
--beam 4 \
|
||||||
|
--max-contexts 4 \
|
||||||
|
--max-states 8
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
import math
|
||||||
|
from collections import defaultdict
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional, Tuple
|
||||||
|
|
||||||
|
import k2
|
||||||
|
import sentencepiece as spm
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
from asr_datamodule import LibriSpeechAsrDataModule
|
||||||
|
from beam_search import (
|
||||||
|
beam_search,
|
||||||
|
fast_beam_search_one_best,
|
||||||
|
greedy_search,
|
||||||
|
greedy_search_batch,
|
||||||
|
modified_beam_search,
|
||||||
|
)
|
||||||
|
from train import add_model_arguments, get_params, get_transducer_model
|
||||||
|
|
||||||
|
from icefall.checkpoint import (
|
||||||
|
average_checkpoints,
|
||||||
|
average_checkpoints_with_averaged_model,
|
||||||
|
find_checkpoints,
|
||||||
|
load_checkpoint,
|
||||||
|
)
|
||||||
|
from icefall.utils import (
|
||||||
|
AttributeDict,
|
||||||
|
setup_logger,
|
||||||
|
store_transcripts,
|
||||||
|
str2bool,
|
||||||
|
write_error_stats,
|
||||||
|
)
|
||||||
|
|
||||||
|
LOG_EPS = math.log(1e-10)
|
||||||
|
|
||||||
|
|
||||||
|
def get_parser():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--epoch",
|
||||||
|
type=int,
|
||||||
|
default=30,
|
||||||
|
help="""It specifies the checkpoint to use for decoding.
|
||||||
|
Note: Epoch counts from 1.
|
||||||
|
You can specify --avg to use more checkpoints for model averaging.""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--iter",
|
||||||
|
type=int,
|
||||||
|
default=0,
|
||||||
|
help="""If positive, --epoch is ignored and it
|
||||||
|
will use the checkpoint exp_dir/checkpoint-iter.pt.
|
||||||
|
You can specify --avg to use more checkpoints for model averaging.
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--avg",
|
||||||
|
type=int,
|
||||||
|
default=15,
|
||||||
|
help="Number of checkpoints to average. Automatically select "
|
||||||
|
"consecutive checkpoints before the checkpoint specified by "
|
||||||
|
"'--epoch' and '--iter'",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--use-averaged-model",
|
||||||
|
type=str2bool,
|
||||||
|
default=False,
|
||||||
|
help="Whether to load averaged model. Currently it only supports "
|
||||||
|
"using --epoch. If True, it would decode with the averaged model "
|
||||||
|
"over the epoch range from `epoch-avg` (excluded) to `epoch`."
|
||||||
|
"Actually only the models with epoch number of `epoch-avg` and "
|
||||||
|
"`epoch` are loaded for averaging. ",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--exp-dir",
|
||||||
|
type=str,
|
||||||
|
default="pruned_stateless_emformer_rnnt/exp",
|
||||||
|
help="The experiment dir",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--bpe-model",
|
||||||
|
type=str,
|
||||||
|
default="data/lang_bpe_500/bpe.model",
|
||||||
|
help="Path to the BPE model",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--decoding-method",
|
||||||
|
type=str,
|
||||||
|
default="greedy_search",
|
||||||
|
help="""Possible values are:
|
||||||
|
- greedy_search
|
||||||
|
- beam_search
|
||||||
|
- modified_beam_search
|
||||||
|
- fast_beam_search
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--beam-size",
|
||||||
|
type=int,
|
||||||
|
default=4,
|
||||||
|
help="""An integer indicating how many candidates we will keep for each
|
||||||
|
frame. Used only when --decoding-method is beam_search or
|
||||||
|
modified_beam_search.""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--beam",
|
||||||
|
type=float,
|
||||||
|
default=4,
|
||||||
|
help="""A floating point value to calculate the cutoff score during beam
|
||||||
|
search (i.e., `cutoff = max-score - beam`), which is the same as the
|
||||||
|
`beam` in Kaldi.
|
||||||
|
Used only when --decoding-method is fast_beam_search""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--max-contexts",
|
||||||
|
type=int,
|
||||||
|
default=4,
|
||||||
|
help="""Used only when --decoding-method is
|
||||||
|
fast_beam_search""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--max-states",
|
||||||
|
type=int,
|
||||||
|
default=8,
|
||||||
|
help="""Used only when --decoding-method is
|
||||||
|
fast_beam_search""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--context-size",
|
||||||
|
type=int,
|
||||||
|
default=2,
|
||||||
|
help="The context size in the decoder. 1 means bigram; "
|
||||||
|
"2 means tri-gram",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--max-sym-per-frame",
|
||||||
|
type=int,
|
||||||
|
default=1,
|
||||||
|
help="""Maximum number of symbols per frame.
|
||||||
|
Used only when --decoding_method is greedy_search""",
|
||||||
|
)
|
||||||
|
|
||||||
|
add_model_arguments(parser)
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
def decode_one_batch(
|
||||||
|
params: AttributeDict,
|
||||||
|
model: nn.Module,
|
||||||
|
sp: spm.SentencePieceProcessor,
|
||||||
|
batch: dict,
|
||||||
|
decoding_graph: Optional[k2.Fsa] = None,
|
||||||
|
) -> Dict[str, List[List[str]]]:
|
||||||
|
"""Decode one batch and return the result in a dict. The dict has the
|
||||||
|
following format:
|
||||||
|
|
||||||
|
- key: It indicates the setting used for decoding. For example,
|
||||||
|
if greedy_search is used, it would be "greedy_search"
|
||||||
|
If beam search with a beam size of 7 is used, it would be
|
||||||
|
"beam_7"
|
||||||
|
- value: It contains the decoding result. `len(value)` equals to
|
||||||
|
batch size. `value[i]` is the decoding result for the i-th
|
||||||
|
utterance in the given batch.
|
||||||
|
Args:
|
||||||
|
params:
|
||||||
|
It's the return value of :func:`get_params`.
|
||||||
|
model:
|
||||||
|
The neural model.
|
||||||
|
sp:
|
||||||
|
The BPE model.
|
||||||
|
batch:
|
||||||
|
It is the return value from iterating
|
||||||
|
`lhotse.dataset.K2SpeechRecognitionDataset`. See its documentation
|
||||||
|
for the format of the `batch`.
|
||||||
|
decoding_graph:
|
||||||
|
The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used
|
||||||
|
only when --decoding_method is fast_beam_search.
|
||||||
|
Returns:
|
||||||
|
Return the decoding result. See above description for the format of
|
||||||
|
the returned dict.
|
||||||
|
"""
|
||||||
|
device = next(model.parameters()).device
|
||||||
|
feature = batch["inputs"]
|
||||||
|
assert feature.ndim == 3
|
||||||
|
|
||||||
|
feature = feature.to(device)
|
||||||
|
# at entry, feature is (N, T, C)
|
||||||
|
|
||||||
|
supervisions = batch["supervisions"]
|
||||||
|
feature_lens = supervisions["num_frames"].to(device)
|
||||||
|
|
||||||
|
feature_lens += params.left_context_length
|
||||||
|
feature = torch.nn.functional.pad(
|
||||||
|
feature,
|
||||||
|
pad=(0, 0, 0, params.left_context_length),
|
||||||
|
value=LOG_EPS,
|
||||||
|
)
|
||||||
|
|
||||||
|
encoder_out, encoder_out_lens = model.encoder(
|
||||||
|
x=feature, x_lens=feature_lens
|
||||||
|
)
|
||||||
|
hyps = []
|
||||||
|
|
||||||
|
if params.decoding_method == "fast_beam_search":
|
||||||
|
hyp_tokens = fast_beam_search_one_best(
|
||||||
|
model=model,
|
||||||
|
decoding_graph=decoding_graph,
|
||||||
|
encoder_out=encoder_out,
|
||||||
|
encoder_out_lens=encoder_out_lens,
|
||||||
|
beam=params.beam,
|
||||||
|
max_contexts=params.max_contexts,
|
||||||
|
max_states=params.max_states,
|
||||||
|
)
|
||||||
|
for hyp in sp.decode(hyp_tokens):
|
||||||
|
hyps.append(hyp.split())
|
||||||
|
elif (
|
||||||
|
params.decoding_method == "greedy_search"
|
||||||
|
and params.max_sym_per_frame == 1
|
||||||
|
):
|
||||||
|
hyp_tokens = greedy_search_batch(
|
||||||
|
model=model,
|
||||||
|
encoder_out=encoder_out,
|
||||||
|
encoder_out_lens=encoder_out_lens,
|
||||||
|
)
|
||||||
|
for hyp in sp.decode(hyp_tokens):
|
||||||
|
hyps.append(hyp.split())
|
||||||
|
elif params.decoding_method == "modified_beam_search":
|
||||||
|
hyp_tokens = modified_beam_search(
|
||||||
|
model=model,
|
||||||
|
encoder_out=encoder_out,
|
||||||
|
encoder_out_lens=encoder_out_lens,
|
||||||
|
beam=params.beam_size,
|
||||||
|
)
|
||||||
|
for hyp in sp.decode(hyp_tokens):
|
||||||
|
hyps.append(hyp.split())
|
||||||
|
else:
|
||||||
|
batch_size = encoder_out.size(0)
|
||||||
|
|
||||||
|
for i in range(batch_size):
|
||||||
|
# fmt: off
|
||||||
|
encoder_out_i = encoder_out[i:i + 1, :encoder_out_lens[i]]
|
||||||
|
# fmt: on
|
||||||
|
if params.decoding_method == "greedy_search":
|
||||||
|
hyp = greedy_search(
|
||||||
|
model=model,
|
||||||
|
encoder_out=encoder_out_i,
|
||||||
|
max_sym_per_frame=params.max_sym_per_frame,
|
||||||
|
)
|
||||||
|
elif params.decoding_method == "beam_search":
|
||||||
|
hyp = beam_search(
|
||||||
|
model=model,
|
||||||
|
encoder_out=encoder_out_i,
|
||||||
|
beam=params.beam_size,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
f"Unsupported decoding method: {params.decoding_method}"
|
||||||
|
)
|
||||||
|
hyps.append(sp.decode(hyp).split())
|
||||||
|
|
||||||
|
if params.decoding_method == "greedy_search":
|
||||||
|
return {"greedy_search": hyps}
|
||||||
|
elif params.decoding_method == "fast_beam_search":
|
||||||
|
return {
|
||||||
|
(
|
||||||
|
f"beam_{params.beam}_"
|
||||||
|
f"max_contexts_{params.max_contexts}_"
|
||||||
|
f"max_states_{params.max_states}"
|
||||||
|
): hyps
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {f"beam_size_{params.beam_size}": hyps}
|
||||||
|
|
||||||
|
|
||||||
|
def decode_dataset(
|
||||||
|
dl: torch.utils.data.DataLoader,
|
||||||
|
params: AttributeDict,
|
||||||
|
model: nn.Module,
|
||||||
|
sp: spm.SentencePieceProcessor,
|
||||||
|
decoding_graph: Optional[k2.Fsa] = None,
|
||||||
|
) -> Dict[str, List[Tuple[List[str], List[str]]]]:
|
||||||
|
"""Decode dataset.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
dl:
|
||||||
|
PyTorch's dataloader containing the dataset to decode.
|
||||||
|
params:
|
||||||
|
It is returned by :func:`get_params`.
|
||||||
|
model:
|
||||||
|
The neural model.
|
||||||
|
sp:
|
||||||
|
The BPE model.
|
||||||
|
decoding_graph:
|
||||||
|
The decoding graph. Can be either a `k2.trivial_graph` or HLG, Used
|
||||||
|
only when --decoding_method is fast_beam_search.
|
||||||
|
Returns:
|
||||||
|
Return a dict, whose key may be "greedy_search" if greedy search
|
||||||
|
is used, or it may be "beam_7" if beam size of 7 is used.
|
||||||
|
Its value is a list of tuples. Each tuple contains two elements:
|
||||||
|
The first is the reference transcript, and the second is the
|
||||||
|
predicted result.
|
||||||
|
"""
|
||||||
|
num_cuts = 0
|
||||||
|
|
||||||
|
try:
|
||||||
|
num_batches = len(dl)
|
||||||
|
except TypeError:
|
||||||
|
num_batches = "?"
|
||||||
|
|
||||||
|
if params.decoding_method == "greedy_search":
|
||||||
|
log_interval = 50
|
||||||
|
else:
|
||||||
|
log_interval = 10
|
||||||
|
|
||||||
|
results = defaultdict(list)
|
||||||
|
for batch_idx, batch in enumerate(dl):
|
||||||
|
texts = batch["supervisions"]["text"]
|
||||||
|
|
||||||
|
hyps_dict = decode_one_batch(
|
||||||
|
params=params,
|
||||||
|
model=model,
|
||||||
|
sp=sp,
|
||||||
|
decoding_graph=decoding_graph,
|
||||||
|
batch=batch,
|
||||||
|
)
|
||||||
|
|
||||||
|
for name, hyps in hyps_dict.items():
|
||||||
|
this_batch = []
|
||||||
|
assert len(hyps) == len(texts)
|
||||||
|
for hyp_words, ref_text in zip(hyps, texts):
|
||||||
|
ref_words = ref_text.split()
|
||||||
|
this_batch.append((ref_words, hyp_words))
|
||||||
|
|
||||||
|
results[name].extend(this_batch)
|
||||||
|
|
||||||
|
num_cuts += len(texts)
|
||||||
|
|
||||||
|
if batch_idx % log_interval == 0:
|
||||||
|
batch_str = f"{batch_idx}/{num_batches}"
|
||||||
|
|
||||||
|
logging.info(
|
||||||
|
f"batch {batch_str}, cuts processed until now is {num_cuts}"
|
||||||
|
)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def save_results(
|
||||||
|
params: AttributeDict,
|
||||||
|
test_set_name: str,
|
||||||
|
results_dict: Dict[str, List[Tuple[List[int], List[int]]]],
|
||||||
|
):
|
||||||
|
test_set_wers = dict()
|
||||||
|
for key, results in results_dict.items():
|
||||||
|
recog_path = (
|
||||||
|
params.res_dir / f"recogs-{test_set_name}-{key}-{params.suffix}.txt"
|
||||||
|
)
|
||||||
|
store_transcripts(filename=recog_path, texts=results)
|
||||||
|
logging.info(f"The transcripts are stored in {recog_path}")
|
||||||
|
|
||||||
|
# The following prints out WERs, per-word error statistics and aligned
|
||||||
|
# ref/hyp pairs.
|
||||||
|
errs_filename = (
|
||||||
|
params.res_dir / f"errs-{test_set_name}-{key}-{params.suffix}.txt"
|
||||||
|
)
|
||||||
|
with open(errs_filename, "w") as f:
|
||||||
|
wer = write_error_stats(
|
||||||
|
f, f"{test_set_name}-{key}", results, enable_log=True
|
||||||
|
)
|
||||||
|
test_set_wers[key] = wer
|
||||||
|
|
||||||
|
logging.info("Wrote detailed error stats to {}".format(errs_filename))
|
||||||
|
|
||||||
|
test_set_wers = sorted(test_set_wers.items(), key=lambda x: x[1])
|
||||||
|
errs_info = (
|
||||||
|
params.res_dir
|
||||||
|
/ f"wer-summary-{test_set_name}-{key}-{params.suffix}.txt"
|
||||||
|
)
|
||||||
|
with open(errs_info, "w") as f:
|
||||||
|
print("settings\tWER", file=f)
|
||||||
|
for key, val in test_set_wers:
|
||||||
|
print("{}\t{}".format(key, val), file=f)
|
||||||
|
|
||||||
|
s = "\nFor {}, WER of different settings are:\n".format(test_set_name)
|
||||||
|
note = "\tbest for {}".format(test_set_name)
|
||||||
|
for key, val in test_set_wers:
|
||||||
|
s += "{}\t{}{}\n".format(key, val, note)
|
||||||
|
note = ""
|
||||||
|
logging.info(s)
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def main():
|
||||||
|
parser = get_parser()
|
||||||
|
LibriSpeechAsrDataModule.add_arguments(parser)
|
||||||
|
args = parser.parse_args()
|
||||||
|
args.exp_dir = Path(args.exp_dir)
|
||||||
|
|
||||||
|
params = get_params()
|
||||||
|
params.update(vars(args))
|
||||||
|
|
||||||
|
assert params.decoding_method in (
|
||||||
|
"greedy_search",
|
||||||
|
"beam_search",
|
||||||
|
"fast_beam_search",
|
||||||
|
"modified_beam_search",
|
||||||
|
)
|
||||||
|
params.res_dir = params.exp_dir / params.decoding_method
|
||||||
|
|
||||||
|
if params.iter > 0:
|
||||||
|
params.suffix = f"iter-{params.iter}-avg-{params.avg}"
|
||||||
|
else:
|
||||||
|
params.suffix = f"epoch-{params.epoch}-avg-{params.avg}"
|
||||||
|
|
||||||
|
if "fast_beam_search" in params.decoding_method:
|
||||||
|
params.suffix += f"-beam-{params.beam}"
|
||||||
|
params.suffix += f"-max-contexts-{params.max_contexts}"
|
||||||
|
params.suffix += f"-max-states-{params.max_states}"
|
||||||
|
elif "beam_search" in params.decoding_method:
|
||||||
|
params.suffix += (
|
||||||
|
f"-{params.decoding_method}-beam-size-{params.beam_size}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
params.suffix += f"-context-{params.context_size}"
|
||||||
|
params.suffix += f"-max-sym-per-frame-{params.max_sym_per_frame}"
|
||||||
|
|
||||||
|
if params.use_averaged_model:
|
||||||
|
params.suffix += "-use-averaged-model"
|
||||||
|
|
||||||
|
setup_logger(f"{params.res_dir}/log-decode-{params.suffix}")
|
||||||
|
logging.info("Decoding started")
|
||||||
|
|
||||||
|
device = torch.device("cpu")
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
device = torch.device("cuda", 0)
|
||||||
|
|
||||||
|
logging.info(f"Device: {device}")
|
||||||
|
|
||||||
|
sp = spm.SentencePieceProcessor()
|
||||||
|
sp.load(params.bpe_model)
|
||||||
|
|
||||||
|
# <blk> and <unk> are defined in local/train_bpe_model.py
|
||||||
|
params.blank_id = sp.piece_to_id("<blk>")
|
||||||
|
params.unk_id = sp.piece_to_id("<unk>")
|
||||||
|
params.vocab_size = sp.get_piece_size()
|
||||||
|
|
||||||
|
logging.info(params)
|
||||||
|
|
||||||
|
logging.info("About to create model")
|
||||||
|
model = get_transducer_model(params)
|
||||||
|
|
||||||
|
if not params.use_averaged_model:
|
||||||
|
if params.iter > 0:
|
||||||
|
filenames = find_checkpoints(
|
||||||
|
params.exp_dir, iteration=-params.iter
|
||||||
|
)[: params.avg]
|
||||||
|
if len(filenames) == 0:
|
||||||
|
raise ValueError(
|
||||||
|
f"No checkpoints found for"
|
||||||
|
f" --iter {params.iter}, --avg {params.avg}"
|
||||||
|
)
|
||||||
|
elif len(filenames) < params.avg:
|
||||||
|
raise ValueError(
|
||||||
|
f"Not enough checkpoints ({len(filenames)}) found for"
|
||||||
|
f" --iter {params.iter}, --avg {params.avg}"
|
||||||
|
)
|
||||||
|
logging.info(f"averaging {filenames}")
|
||||||
|
model.to(device)
|
||||||
|
model.load_state_dict(average_checkpoints(filenames, device=device))
|
||||||
|
elif params.avg == 1:
|
||||||
|
load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model)
|
||||||
|
else:
|
||||||
|
start = params.epoch - params.avg + 1
|
||||||
|
filenames = []
|
||||||
|
for i in range(start, params.epoch + 1):
|
||||||
|
if i >= 1:
|
||||||
|
filenames.append(f"{params.exp_dir}/epoch-{i}.pt")
|
||||||
|
logging.info(f"averaging {filenames}")
|
||||||
|
model.to(device)
|
||||||
|
model.load_state_dict(average_checkpoints(filenames, device=device))
|
||||||
|
else:
|
||||||
|
if params.iter > 0:
|
||||||
|
filenames = find_checkpoints(
|
||||||
|
params.exp_dir, iteration=-params.iter
|
||||||
|
)[: params.avg + 1]
|
||||||
|
if len(filenames) == 0:
|
||||||
|
raise ValueError(
|
||||||
|
f"No checkpoints found for"
|
||||||
|
f" --iter {params.iter}, --avg {params.avg}"
|
||||||
|
)
|
||||||
|
elif len(filenames) < params.avg + 1:
|
||||||
|
raise ValueError(
|
||||||
|
f"Not enough checkpoints ({len(filenames)}) found for"
|
||||||
|
f" --iter {params.iter}, --avg {params.avg}"
|
||||||
|
)
|
||||||
|
filename_start = filenames[-1]
|
||||||
|
filename_end = filenames[0]
|
||||||
|
logging.info(
|
||||||
|
"Calculating the averaged model over iteration checkpoints"
|
||||||
|
f" from {filename_start} (excluded) to {filename_end}"
|
||||||
|
)
|
||||||
|
model.to(device)
|
||||||
|
model.load_state_dict(
|
||||||
|
average_checkpoints_with_averaged_model(
|
||||||
|
filename_start=filename_start,
|
||||||
|
filename_end=filename_end,
|
||||||
|
device=device,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
assert params.avg > 0, params.avg
|
||||||
|
start = params.epoch - params.avg
|
||||||
|
assert start >= 1, start
|
||||||
|
filename_start = f"{params.exp_dir}/epoch-{start}.pt"
|
||||||
|
filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt"
|
||||||
|
logging.info(
|
||||||
|
f"Calculating the averaged model over epoch range from "
|
||||||
|
f"{start} (excluded) to {params.epoch}"
|
||||||
|
)
|
||||||
|
model.to(device)
|
||||||
|
model.load_state_dict(
|
||||||
|
average_checkpoints_with_averaged_model(
|
||||||
|
filename_start=filename_start,
|
||||||
|
filename_end=filename_end,
|
||||||
|
device=device,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
model.to(device)
|
||||||
|
model.eval()
|
||||||
|
|
||||||
|
if params.decoding_method == "fast_beam_search":
|
||||||
|
decoding_graph = k2.trivial_graph(params.vocab_size - 1, device=device)
|
||||||
|
else:
|
||||||
|
decoding_graph = None
|
||||||
|
|
||||||
|
num_param = sum([p.numel() for p in model.parameters()])
|
||||||
|
logging.info(f"Number of model parameters: {num_param}")
|
||||||
|
|
||||||
|
librispeech = LibriSpeechAsrDataModule(args)
|
||||||
|
|
||||||
|
test_clean_cuts = librispeech.test_clean_cuts()
|
||||||
|
test_other_cuts = librispeech.test_other_cuts()
|
||||||
|
|
||||||
|
test_clean_dl = librispeech.test_dataloaders(test_clean_cuts)
|
||||||
|
test_other_dl = librispeech.test_dataloaders(test_other_cuts)
|
||||||
|
|
||||||
|
test_sets = ["test-clean", "test-other"]
|
||||||
|
test_dl = [test_clean_dl, test_other_dl]
|
||||||
|
|
||||||
|
for test_set, test_dl in zip(test_sets, test_dl):
|
||||||
|
results_dict = decode_dataset(
|
||||||
|
dl=test_dl,
|
||||||
|
params=params,
|
||||||
|
model=model,
|
||||||
|
sp=sp,
|
||||||
|
decoding_graph=decoding_graph,
|
||||||
|
)
|
||||||
|
|
||||||
|
save_results(
|
||||||
|
params=params,
|
||||||
|
test_set_name=test_set,
|
||||||
|
results_dict=results_dict,
|
||||||
|
)
|
||||||
|
|
||||||
|
logging.info("Done!")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
1
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/decoder.py
Symbolic link
1
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/decoder.py
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../pruned_transducer_stateless/decoder.py
|
315
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/emformer.py
Normal file
315
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/emformer.py
Normal file
@ -0,0 +1,315 @@
|
|||||||
|
# Copyright 2022 Xiaomi Corporation (Author: Mingshuang Luo)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import math
|
||||||
|
from typing import List, Optional, Tuple
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
from encoder_interface import EncoderInterface
|
||||||
|
from subsampling import Conv2dSubsampling, VggSubsampling
|
||||||
|
|
||||||
|
try:
|
||||||
|
from torchaudio.models import Emformer as _Emformer
|
||||||
|
except ImportError:
|
||||||
|
import torchaudio
|
||||||
|
|
||||||
|
print(
|
||||||
|
"Please install torchaudio >= 0.11.0. "
|
||||||
|
f"Current version: {torchaudio.__version__}"
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def unstack_states(
|
||||||
|
states: List[List[torch.Tensor]],
|
||||||
|
) -> List[List[List[torch.Tensor]]]:
|
||||||
|
"""Unstack the emformer state corresponding to a batch of utterances
|
||||||
|
into a list of states, were the i-th entry is the state from the i-th
|
||||||
|
utterance in the batch.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
states:
|
||||||
|
A list-of-list of tensors. ``len(states)`` equals to number of
|
||||||
|
layers in the emformer. ``states[i]]`` contains the states for
|
||||||
|
the i-th layer. ``states[i][k]`` is either a 3-D tensor of shape
|
||||||
|
``(T, N, C)`` or a 2-D tensor of shape ``(C, N)``
|
||||||
|
"""
|
||||||
|
batch_size = states[0][0].size(1)
|
||||||
|
num_layers = len(states)
|
||||||
|
|
||||||
|
ans = [None] * batch_size
|
||||||
|
for i in range(batch_size):
|
||||||
|
ans[i] = [[] for _ in range(num_layers)]
|
||||||
|
|
||||||
|
for li, layer in enumerate(states):
|
||||||
|
for s in layer:
|
||||||
|
s_list = s.unbind(dim=1)
|
||||||
|
# We will use stack(dim=1) later in stack_states()
|
||||||
|
for bi, b in enumerate(ans):
|
||||||
|
b[li].append(s_list[bi])
|
||||||
|
return ans
|
||||||
|
|
||||||
|
|
||||||
|
def stack_states(
|
||||||
|
state_list: List[List[List[torch.Tensor]]],
|
||||||
|
) -> List[List[torch.Tensor]]:
|
||||||
|
"""Stack list of emformer states that correspond to separate utterances
|
||||||
|
into a single emformer state so that it can be used as an input for
|
||||||
|
emformer when those utterances are formed into a batch.
|
||||||
|
|
||||||
|
Note:
|
||||||
|
It is the inverse of :func:`unstack_states`.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
state_list:
|
||||||
|
Each element in state_list corresponding to the internal state
|
||||||
|
of the emformer model for a single utterance.
|
||||||
|
Returns:
|
||||||
|
Return a new state corresponding to a batch of utterances.
|
||||||
|
See the input argument of :func:`unstack_states` for the meaning
|
||||||
|
of the returned tensor.
|
||||||
|
"""
|
||||||
|
batch_size = len(state_list)
|
||||||
|
ans = []
|
||||||
|
for layer in state_list[0]:
|
||||||
|
# layer is a list of tensors
|
||||||
|
if batch_size > 1:
|
||||||
|
ans.append([[s] for s in layer])
|
||||||
|
# Note: We will stack ans[layer][s][] later to get ans[layer][s]
|
||||||
|
else:
|
||||||
|
ans.append([s.unsqueeze(1) for s in layer])
|
||||||
|
|
||||||
|
for b, states in enumerate(state_list[1:], 1):
|
||||||
|
for li, layer in enumerate(states):
|
||||||
|
for si, s in enumerate(layer):
|
||||||
|
ans[li][si].append(s)
|
||||||
|
if b == batch_size - 1:
|
||||||
|
ans[li][si] = torch.stack(ans[li][si], dim=1)
|
||||||
|
# We will use unbind(dim=1) later in unstack_states()
|
||||||
|
return ans
|
||||||
|
|
||||||
|
|
||||||
|
class Emformer(EncoderInterface):
|
||||||
|
"""This is just a simple wrapper around torchaudio.models.Emformer.
|
||||||
|
We may replace it with our own implementation some time later.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
num_features: int,
|
||||||
|
output_dim: int,
|
||||||
|
d_model: int,
|
||||||
|
nhead: int,
|
||||||
|
dim_feedforward: int,
|
||||||
|
num_encoder_layers: int,
|
||||||
|
segment_length: int,
|
||||||
|
left_context_length: int,
|
||||||
|
right_context_length: int,
|
||||||
|
max_memory_size: int = 0,
|
||||||
|
dropout: float = 0.1,
|
||||||
|
subsampling_factor: int = 4,
|
||||||
|
vgg_frontend: bool = False,
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
num_features:
|
||||||
|
The input dimension of the model.
|
||||||
|
output_dim:
|
||||||
|
The output dimension of the model.
|
||||||
|
d_model:
|
||||||
|
Attention dimension.
|
||||||
|
nhead:
|
||||||
|
Number of heads in multi-head attention.
|
||||||
|
dim_feedforward:
|
||||||
|
The output dimension of the feedforward layers in encoder.
|
||||||
|
num_encoder_layers:
|
||||||
|
Number of encoder layers.
|
||||||
|
segment_length:
|
||||||
|
Number of frames per segment before subsampling.
|
||||||
|
left_context_length:
|
||||||
|
Number of frames in the left context before subsampling.
|
||||||
|
right_context_length:
|
||||||
|
Number of frames in the right context before subsampling.
|
||||||
|
max_memory_size:
|
||||||
|
TODO.
|
||||||
|
dropout:
|
||||||
|
Dropout in encoder.
|
||||||
|
subsampling_factor:
|
||||||
|
Number of output frames is num_in_frames // subsampling_factor.
|
||||||
|
Currently, subsampling_factor MUST be 4.
|
||||||
|
vgg_frontend:
|
||||||
|
True to use vgg style frontend for subsampling.
|
||||||
|
"""
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.subsampling_factor = subsampling_factor
|
||||||
|
if subsampling_factor != 4:
|
||||||
|
raise NotImplementedError("Support only 'subsampling_factor=4'.")
|
||||||
|
|
||||||
|
# self.encoder_embed converts the input of shape (N, T, num_features)
|
||||||
|
# to the shape (N, T//subsampling_factor, d_model).
|
||||||
|
# That is, it does two things simultaneously:
|
||||||
|
# (1) subsampling: T -> T//subsampling_factor
|
||||||
|
# (2) embedding: num_features -> d_model
|
||||||
|
if vgg_frontend:
|
||||||
|
self.encoder_embed = VggSubsampling(num_features, d_model)
|
||||||
|
else:
|
||||||
|
self.encoder_embed = Conv2dSubsampling(num_features, d_model)
|
||||||
|
|
||||||
|
self.segment_length = segment_length # before subsampling
|
||||||
|
self.right_context_length = right_context_length
|
||||||
|
|
||||||
|
assert right_context_length % subsampling_factor == 0
|
||||||
|
assert segment_length % subsampling_factor == 0
|
||||||
|
assert left_context_length % subsampling_factor == 0
|
||||||
|
|
||||||
|
left_context_length = left_context_length // subsampling_factor
|
||||||
|
right_context_length = right_context_length // subsampling_factor
|
||||||
|
segment_length = segment_length // subsampling_factor
|
||||||
|
|
||||||
|
self.model = _Emformer(
|
||||||
|
input_dim=d_model,
|
||||||
|
num_heads=nhead,
|
||||||
|
ffn_dim=dim_feedforward,
|
||||||
|
num_layers=num_encoder_layers,
|
||||||
|
segment_length=segment_length,
|
||||||
|
dropout=dropout,
|
||||||
|
activation="relu",
|
||||||
|
left_context_length=left_context_length,
|
||||||
|
right_context_length=right_context_length,
|
||||||
|
max_memory_size=max_memory_size,
|
||||||
|
weight_init_scale_strategy="depthwise",
|
||||||
|
tanh_on_mem=False,
|
||||||
|
negative_inf=-1e8,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.encoder_output_layer = nn.Sequential(
|
||||||
|
nn.Dropout(p=dropout), nn.Linear(d_model, output_dim)
|
||||||
|
)
|
||||||
|
self.log_eps = math.log(1e-10)
|
||||||
|
|
||||||
|
self._has_init_state = False
|
||||||
|
self._init_state = torch.jit.Attribute([], List[List[torch.Tensor]])
|
||||||
|
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
x: torch.Tensor,
|
||||||
|
x_lens: torch.Tensor,
|
||||||
|
) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
x:
|
||||||
|
Input features of shape (N, T, C).
|
||||||
|
x_lens:
|
||||||
|
A int32 tensor of shape (N,) containing valid frames in `x` before
|
||||||
|
padding. We have `x.size(1) == x_lens.max()`
|
||||||
|
Returns:
|
||||||
|
Return a tuple containing two tensors:
|
||||||
|
|
||||||
|
- encoder_out, a tensor of shape (N, T', C)
|
||||||
|
- encoder_out_lens, a int32 tensor of shape (N,) containing the
|
||||||
|
valid frames in `encoder_out` before padding
|
||||||
|
"""
|
||||||
|
x = nn.functional.pad(
|
||||||
|
x,
|
||||||
|
# (left, right, top, bottom)
|
||||||
|
# left/right are for the channel dimension, i.e., axis 2
|
||||||
|
# top/bottom are for the time dimension, i.e., axis 1
|
||||||
|
(0, 0, 0, self.right_context_length),
|
||||||
|
value=self.log_eps,
|
||||||
|
) # (N, T, C) -> (N, T+right_context_length, C)
|
||||||
|
|
||||||
|
x = self.encoder_embed(x)
|
||||||
|
|
||||||
|
# Caution: We assume the subsampling factor is 4!
|
||||||
|
x_lens = (((x_lens - 1) >> 1) - 1) >> 1
|
||||||
|
|
||||||
|
emformer_out, emformer_out_lens = self.model(x, x_lens)
|
||||||
|
logits = self.encoder_output_layer(emformer_out)
|
||||||
|
|
||||||
|
return logits, emformer_out_lens
|
||||||
|
|
||||||
|
@torch.jit.export
|
||||||
|
def streaming_forward(
|
||||||
|
self,
|
||||||
|
x: torch.Tensor,
|
||||||
|
x_lens: torch.Tensor,
|
||||||
|
states: Optional[List[List[torch.Tensor]]] = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
x:
|
||||||
|
A 3-D tensor of shape (N, T, C). Note: x also contains right
|
||||||
|
context frames.
|
||||||
|
x_lens:
|
||||||
|
A 2-D tensor of shap containing the number of valid frames for each
|
||||||
|
element in `x` before padding. Note: It also counts right context
|
||||||
|
frames.
|
||||||
|
states:
|
||||||
|
Internal states of the model.
|
||||||
|
Returns:
|
||||||
|
Return a tuple containing 3 tensors:
|
||||||
|
- encoder_out, a 3-D tensor of shape (N, T, C)
|
||||||
|
- encoder_out_lens: a 1-D tensor of shape (N,)
|
||||||
|
- next_state, internal model states for the next invocation
|
||||||
|
"""
|
||||||
|
x = self.encoder_embed(x)
|
||||||
|
|
||||||
|
# Caution: We assume the subsampling factor is 4!
|
||||||
|
x_lens = (((x_lens - 1) >> 1) - 1) >> 1
|
||||||
|
|
||||||
|
emformer_out, emformer_out_lens, states = self.model.infer(
|
||||||
|
x, x_lens, states
|
||||||
|
)
|
||||||
|
|
||||||
|
if x.size(1) != (
|
||||||
|
self.model.segment_length + self.model.right_context_length
|
||||||
|
):
|
||||||
|
raise ValueError(
|
||||||
|
"Incorrect input shape."
|
||||||
|
f"{x.size(1)} vs {self.model.segment_length} + "
|
||||||
|
f"{self.model.right_context_length}"
|
||||||
|
)
|
||||||
|
|
||||||
|
logits = self.encoder_output_layer(emformer_out)
|
||||||
|
|
||||||
|
return logits, emformer_out_lens, states
|
||||||
|
|
||||||
|
@torch.jit.export
|
||||||
|
def get_init_state(self, device: torch.device) -> List[List[torch.Tensor]]:
|
||||||
|
"""Return the initial state of each layer.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Return the initial state of each layer. NOTE: the returned
|
||||||
|
tensors are on the given device. `len(ans) == num_emformer_layers`.
|
||||||
|
"""
|
||||||
|
if self._has_init_state:
|
||||||
|
# Note(fangjun): It is OK to share the init state as it is
|
||||||
|
# not going to be modified by the model
|
||||||
|
return self._init_state
|
||||||
|
|
||||||
|
batch_size = 1
|
||||||
|
|
||||||
|
ans: List[List[torch.Tensor]] = []
|
||||||
|
for layer in self.model.emformer_layers:
|
||||||
|
s = layer._init_state(batch_size=batch_size, device=device)
|
||||||
|
ans.append(s)
|
||||||
|
|
||||||
|
self._has_init_state = True
|
||||||
|
self._init_state = ans
|
||||||
|
|
||||||
|
return ans
|
@ -0,0 +1 @@
|
|||||||
|
../pruned_transducer_stateless/encoder_interface.py
|
281
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/export.py
Executable file
281
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/export.py
Executable file
@ -0,0 +1,281 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# This script converts several saved checkpoints
|
||||||
|
# to a single one using model averaging.
|
||||||
|
"""
|
||||||
|
Usage:
|
||||||
|
./prunted_stateless_emformer_rnnt/export.py \
|
||||||
|
--exp-dir ./prunted_stateless_emformer_rnnt/exp \
|
||||||
|
--bpe-model data/lang_bpe_500/bpe.model \
|
||||||
|
--epoch 20 \
|
||||||
|
--avg 10
|
||||||
|
|
||||||
|
It will generate a file exp_dir/pretrained.pt
|
||||||
|
|
||||||
|
To use the generated file with `prunted_stateless_emformer_rnnt/decode.py`,
|
||||||
|
you can do:
|
||||||
|
|
||||||
|
cd /path/to/exp_dir
|
||||||
|
ln -s pretrained.pt epoch-9999.pt
|
||||||
|
|
||||||
|
cd /path/to/egs/librispeech/ASR
|
||||||
|
./prunted_stateless_emformer_rnnt/decode.py \
|
||||||
|
--exp-dir ./prunted_stateless_emformer_rnnt/exp \
|
||||||
|
--epoch 9999 \
|
||||||
|
--avg 1 \
|
||||||
|
--max-duration 600 \
|
||||||
|
--decoding-method greedy_search \
|
||||||
|
--bpe-model data/lang_bpe_500/bpe.model
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import sentencepiece as spm
|
||||||
|
import torch
|
||||||
|
from train import add_model_arguments, get_params, get_transducer_model
|
||||||
|
|
||||||
|
from icefall.checkpoint import (
|
||||||
|
average_checkpoints,
|
||||||
|
average_checkpoints_with_averaged_model,
|
||||||
|
find_checkpoints,
|
||||||
|
load_checkpoint,
|
||||||
|
)
|
||||||
|
from icefall.utils import str2bool
|
||||||
|
|
||||||
|
|
||||||
|
def get_parser():
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--epoch",
|
||||||
|
type=int,
|
||||||
|
default=28,
|
||||||
|
help="""It specifies the checkpoint to use for averaging.
|
||||||
|
Note: Epoch counts from 1.
|
||||||
|
You can specify --avg to use more checkpoints for model averaging.""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--iter",
|
||||||
|
type=int,
|
||||||
|
default=0,
|
||||||
|
help="""If positive, --epoch is ignored and it
|
||||||
|
will use the checkpoint exp_dir/checkpoint-iter.pt.
|
||||||
|
You can specify --avg to use more checkpoints for model averaging.
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--avg",
|
||||||
|
type=int,
|
||||||
|
default=15,
|
||||||
|
help="Number of checkpoints to average. Automatically select "
|
||||||
|
"consecutive checkpoints before the checkpoint specified by "
|
||||||
|
"'--epoch' and '--iter'",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--use-averaged-model",
|
||||||
|
type=str2bool,
|
||||||
|
default=False,
|
||||||
|
help="Whether to load averaged model. Currently it only supports "
|
||||||
|
"using --epoch. If True, it would decode with the averaged model "
|
||||||
|
"over the epoch range from `epoch-avg` (excluded) to `epoch`."
|
||||||
|
"Actually only the models with epoch number of `epoch-avg` and "
|
||||||
|
"`epoch` are loaded for averaging. ",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--exp-dir",
|
||||||
|
type=str,
|
||||||
|
default="prunted_stateless_emformer_rnnt/exp",
|
||||||
|
help="""It specifies the directory where all training related
|
||||||
|
files, e.g., checkpoints, log, etc, are saved
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--bpe-model",
|
||||||
|
type=str,
|
||||||
|
default="data/lang_bpe_500/bpe.model",
|
||||||
|
help="Path to the BPE model",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--jit",
|
||||||
|
type=str2bool,
|
||||||
|
default=False,
|
||||||
|
help="""True to save a model after applying torch.jit.script.
|
||||||
|
""",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--context-size",
|
||||||
|
type=int,
|
||||||
|
default=2,
|
||||||
|
help="The context size in the decoder. 1 means bigram; "
|
||||||
|
"2 means tri-gram",
|
||||||
|
)
|
||||||
|
|
||||||
|
add_model_arguments(parser)
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
args = get_parser().parse_args()
|
||||||
|
args.exp_dir = Path(args.exp_dir)
|
||||||
|
|
||||||
|
params = get_params()
|
||||||
|
params.update(vars(args))
|
||||||
|
|
||||||
|
device = torch.device("cpu")
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
device = torch.device("cuda", 0)
|
||||||
|
|
||||||
|
logging.info(f"device: {device}")
|
||||||
|
|
||||||
|
sp = spm.SentencePieceProcessor()
|
||||||
|
sp.load(params.bpe_model)
|
||||||
|
|
||||||
|
# <blk> and <unk> are defined in local/train_bpe_model.py
|
||||||
|
params.blank_id = sp.piece_to_id("<blk>")
|
||||||
|
params.unk_id = sp.piece_to_id("<unk>")
|
||||||
|
params.vocab_size = sp.get_piece_size()
|
||||||
|
|
||||||
|
logging.info(params)
|
||||||
|
|
||||||
|
logging.info("About to create model")
|
||||||
|
model = get_transducer_model(params)
|
||||||
|
|
||||||
|
if not params.use_averaged_model:
|
||||||
|
if params.iter > 0:
|
||||||
|
filenames = find_checkpoints(
|
||||||
|
params.exp_dir, iteration=-params.iter
|
||||||
|
)[: params.avg]
|
||||||
|
if len(filenames) == 0:
|
||||||
|
raise ValueError(
|
||||||
|
f"No checkpoints found for"
|
||||||
|
f" --iter {params.iter}, --avg {params.avg}"
|
||||||
|
)
|
||||||
|
elif len(filenames) < params.avg:
|
||||||
|
raise ValueError(
|
||||||
|
f"Not enough checkpoints ({len(filenames)}) found for"
|
||||||
|
f" --iter {params.iter}, --avg {params.avg}"
|
||||||
|
)
|
||||||
|
logging.info(f"averaging {filenames}")
|
||||||
|
model.to(device)
|
||||||
|
model.load_state_dict(average_checkpoints(filenames, device=device))
|
||||||
|
elif params.avg == 1:
|
||||||
|
load_checkpoint(f"{params.exp_dir}/epoch-{params.epoch}.pt", model)
|
||||||
|
else:
|
||||||
|
start = params.epoch - params.avg + 1
|
||||||
|
filenames = []
|
||||||
|
for i in range(start, params.epoch + 1):
|
||||||
|
if i >= 1:
|
||||||
|
filenames.append(f"{params.exp_dir}/epoch-{i}.pt")
|
||||||
|
logging.info(f"averaging {filenames}")
|
||||||
|
model.to(device)
|
||||||
|
model.load_state_dict(average_checkpoints(filenames, device=device))
|
||||||
|
else:
|
||||||
|
if params.iter > 0:
|
||||||
|
filenames = find_checkpoints(
|
||||||
|
params.exp_dir, iteration=-params.iter
|
||||||
|
)[: params.avg + 1]
|
||||||
|
if len(filenames) == 0:
|
||||||
|
raise ValueError(
|
||||||
|
f"No checkpoints found for"
|
||||||
|
f" --iter {params.iter}, --avg {params.avg}"
|
||||||
|
)
|
||||||
|
elif len(filenames) < params.avg + 1:
|
||||||
|
raise ValueError(
|
||||||
|
f"Not enough checkpoints ({len(filenames)}) found for"
|
||||||
|
f" --iter {params.iter}, --avg {params.avg}"
|
||||||
|
)
|
||||||
|
filename_start = filenames[-1]
|
||||||
|
filename_end = filenames[0]
|
||||||
|
logging.info(
|
||||||
|
"Calculating the averaged model over iteration checkpoints"
|
||||||
|
f" from {filename_start} (excluded) to {filename_end}"
|
||||||
|
)
|
||||||
|
model.to(device)
|
||||||
|
model.load_state_dict(
|
||||||
|
average_checkpoints_with_averaged_model(
|
||||||
|
filename_start=filename_start,
|
||||||
|
filename_end=filename_end,
|
||||||
|
device=device,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
assert params.avg > 0, params.avg
|
||||||
|
start = params.epoch - params.avg
|
||||||
|
assert start >= 1, start
|
||||||
|
filename_start = f"{params.exp_dir}/epoch-{start}.pt"
|
||||||
|
filename_end = f"{params.exp_dir}/epoch-{params.epoch}.pt"
|
||||||
|
logging.info(
|
||||||
|
f"Calculating the averaged model over epoch range from "
|
||||||
|
f"{start} (excluded) to {params.epoch}"
|
||||||
|
)
|
||||||
|
model.to(device)
|
||||||
|
model.load_state_dict(
|
||||||
|
average_checkpoints_with_averaged_model(
|
||||||
|
filename_start=filename_start,
|
||||||
|
filename_end=filename_end,
|
||||||
|
device=device,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
model.eval()
|
||||||
|
|
||||||
|
model.to("cpu")
|
||||||
|
model.eval()
|
||||||
|
for p in model.parameters():
|
||||||
|
p.requires_grad_(False)
|
||||||
|
|
||||||
|
if params.jit:
|
||||||
|
# We won't use the forward() method of the model in C++, so just ignore
|
||||||
|
# it here.
|
||||||
|
# Otherwise, one of its arguments is a ragged tensor and is not
|
||||||
|
# torch scriptabe.
|
||||||
|
model.__class__.forward = torch.jit.ignore(model.__class__.forward)
|
||||||
|
logging.info("Using torch.jit.script")
|
||||||
|
model = torch.jit.script(model)
|
||||||
|
filename = params.exp_dir / "cpu_jit.pt"
|
||||||
|
model.save(str(filename))
|
||||||
|
logging.info(f"Saved to {filename}")
|
||||||
|
else:
|
||||||
|
logging.info("Not using torch.jit.script")
|
||||||
|
# Save it using a format so that it can be loaded
|
||||||
|
# by :func:`load_checkpoint`
|
||||||
|
filename = params.exp_dir / "pretrained.pt"
|
||||||
|
torch.save({"model": model.state_dict()}, str(filename))
|
||||||
|
logging.info(f"Saved to {filename}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
formatter = (
|
||||||
|
"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
|
||||||
|
)
|
||||||
|
|
||||||
|
logging.basicConfig(format=formatter, level=logging.INFO)
|
||||||
|
main()
|
1
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/joiner.py
Symbolic link
1
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/joiner.py
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../pruned_transducer_stateless/joiner.py
|
169
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/model.py
Normal file
169
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/model.py
Normal file
@ -0,0 +1,169 @@
|
|||||||
|
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang, Wei Kang)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
import k2
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
from encoder_interface import EncoderInterface
|
||||||
|
|
||||||
|
from icefall.utils import add_sos
|
||||||
|
|
||||||
|
|
||||||
|
class Transducer(nn.Module):
|
||||||
|
"""It implements https://arxiv.org/pdf/1211.3711.pdf
|
||||||
|
"Sequence Transduction with Recurrent Neural Networks"
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
encoder: EncoderInterface,
|
||||||
|
decoder: nn.Module,
|
||||||
|
joiner: nn.Module,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
encoder:
|
||||||
|
It is the transcription network in the paper. Its accepts
|
||||||
|
two inputs: `x` of (N, T, C) and `x_lens` of shape (N,).
|
||||||
|
It returns two tensors: `logits` of shape (N, T, C) and
|
||||||
|
`logit_lens` of shape (N,).
|
||||||
|
decoder:
|
||||||
|
It is the prediction network in the paper. Its input shape
|
||||||
|
is (N, U) and its output shape is (N, U, C). It should contain
|
||||||
|
one attribute: `blank_id`.
|
||||||
|
joiner:
|
||||||
|
It has two inputs with shapes: (N, T, C) and (N, U, C). Its
|
||||||
|
output shape is (N, T, U, C). Note that its output contains
|
||||||
|
unnormalized probs, i.e., not processed by log-softmax.
|
||||||
|
"""
|
||||||
|
super().__init__()
|
||||||
|
assert isinstance(encoder, EncoderInterface), type(encoder)
|
||||||
|
assert hasattr(decoder, "blank_id")
|
||||||
|
|
||||||
|
self.encoder = encoder
|
||||||
|
self.decoder = decoder
|
||||||
|
self.joiner = joiner
|
||||||
|
|
||||||
|
def forward(
|
||||||
|
self,
|
||||||
|
x: torch.Tensor,
|
||||||
|
x_lens: torch.Tensor,
|
||||||
|
y: k2.RaggedTensor,
|
||||||
|
prune_range: int = 5,
|
||||||
|
am_scale: float = 0.0,
|
||||||
|
lm_scale: float = 0.0,
|
||||||
|
) -> torch.Tensor:
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
x:
|
||||||
|
A 3-D tensor of shape (N, T, C).
|
||||||
|
x_lens:
|
||||||
|
A 1-D tensor of shape (N,). It contains the number of frames in `x`
|
||||||
|
before padding.
|
||||||
|
y:
|
||||||
|
A ragged tensor with 2 axes [utt][label]. It contains labels of each
|
||||||
|
utterance.
|
||||||
|
prune_range:
|
||||||
|
The prune range for rnnt loss, it means how many symbols(context)
|
||||||
|
we are considering for each frame to compute the loss.
|
||||||
|
am_scale:
|
||||||
|
The scale to smooth the loss with am (output of encoder network)
|
||||||
|
part
|
||||||
|
lm_scale:
|
||||||
|
The scale to smooth the loss with lm (output of predictor network)
|
||||||
|
part
|
||||||
|
Returns:
|
||||||
|
Return the transducer loss.
|
||||||
|
|
||||||
|
Note:
|
||||||
|
Regarding am_scale & lm_scale, it will make the loss-function one of
|
||||||
|
the form:
|
||||||
|
lm_scale * lm_probs + am_scale * am_probs +
|
||||||
|
(1-lm_scale-am_scale) * combined_probs
|
||||||
|
"""
|
||||||
|
assert x.ndim == 3, x.shape
|
||||||
|
assert x_lens.ndim == 1, x_lens.shape
|
||||||
|
assert y.num_axes == 2, y.num_axes
|
||||||
|
|
||||||
|
assert x.size(0) == x_lens.size(0) == y.dim0
|
||||||
|
|
||||||
|
encoder_out, x_lens = self.encoder(x, x_lens)
|
||||||
|
assert torch.all(x_lens > 0)
|
||||||
|
|
||||||
|
# Now for the decoder, i.e., the prediction network
|
||||||
|
row_splits = y.shape.row_splits(1)
|
||||||
|
y_lens = row_splits[1:] - row_splits[:-1]
|
||||||
|
|
||||||
|
blank_id = self.decoder.blank_id
|
||||||
|
sos_y = add_sos(y, sos_id=blank_id)
|
||||||
|
|
||||||
|
# sos_y_padded: [B, S + 1], start with SOS.
|
||||||
|
sos_y_padded = sos_y.pad(mode="constant", padding_value=blank_id)
|
||||||
|
|
||||||
|
# decoder_out: [B, S + 1, C]
|
||||||
|
decoder_out = self.decoder(sos_y_padded)
|
||||||
|
|
||||||
|
# Note: y does not start with SOS
|
||||||
|
# y_padded : [B, S]
|
||||||
|
y_padded = y.pad(mode="constant", padding_value=0)
|
||||||
|
|
||||||
|
y_padded = y_padded.to(torch.int64)
|
||||||
|
boundary = torch.zeros(
|
||||||
|
(x.size(0), 4), dtype=torch.int64, device=x.device
|
||||||
|
)
|
||||||
|
boundary[:, 2] = y_lens
|
||||||
|
boundary[:, 3] = x_lens
|
||||||
|
|
||||||
|
simple_loss, (px_grad, py_grad) = k2.rnnt_loss_smoothed(
|
||||||
|
lm=decoder_out,
|
||||||
|
am=encoder_out,
|
||||||
|
symbols=y_padded,
|
||||||
|
termination_symbol=blank_id,
|
||||||
|
lm_only_scale=lm_scale,
|
||||||
|
am_only_scale=am_scale,
|
||||||
|
boundary=boundary,
|
||||||
|
reduction="sum",
|
||||||
|
return_grad=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# ranges : [B, T, prune_range]
|
||||||
|
ranges = k2.get_rnnt_prune_ranges(
|
||||||
|
px_grad=px_grad,
|
||||||
|
py_grad=py_grad,
|
||||||
|
boundary=boundary,
|
||||||
|
s_range=prune_range,
|
||||||
|
)
|
||||||
|
|
||||||
|
# am_pruned : [B, T, prune_range, C]
|
||||||
|
# lm_pruned : [B, T, prune_range, C]
|
||||||
|
am_pruned, lm_pruned = k2.do_rnnt_pruning(
|
||||||
|
am=encoder_out, lm=decoder_out, ranges=ranges
|
||||||
|
)
|
||||||
|
|
||||||
|
# logits : [B, T, prune_range, C]
|
||||||
|
logits = self.joiner(am_pruned, lm_pruned)
|
||||||
|
|
||||||
|
pruned_loss = k2.rnnt_loss_pruned(
|
||||||
|
logits=logits,
|
||||||
|
symbols=y_padded,
|
||||||
|
ranges=ranges,
|
||||||
|
termination_symbol=blank_id,
|
||||||
|
boundary=boundary,
|
||||||
|
reduction="sum",
|
||||||
|
)
|
||||||
|
|
||||||
|
return (simple_loss, pruned_loss)
|
104
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/noam.py
Normal file
104
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/noam.py
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
# Copyright 2021 University of Chinese Academy of Sciences (author: Han Zhu)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
|
||||||
|
class Noam(object):
|
||||||
|
"""
|
||||||
|
Implements Noam optimizer.
|
||||||
|
|
||||||
|
Proposed in
|
||||||
|
"Attention Is All You Need", https://arxiv.org/pdf/1706.03762.pdf
|
||||||
|
|
||||||
|
Modified from
|
||||||
|
https://github.com/espnet/espnet/blob/master/espnet/nets/pytorch_backend/transformer/optimizer.py # noqa
|
||||||
|
|
||||||
|
Args:
|
||||||
|
params:
|
||||||
|
iterable of parameters to optimize or dicts defining parameter groups
|
||||||
|
model_size:
|
||||||
|
attention dimension of the transformer model
|
||||||
|
factor:
|
||||||
|
learning rate factor
|
||||||
|
warm_step:
|
||||||
|
warmup steps
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
params,
|
||||||
|
model_size: int = 256,
|
||||||
|
factor: float = 10.0,
|
||||||
|
warm_step: int = 25000,
|
||||||
|
weight_decay=0,
|
||||||
|
) -> None:
|
||||||
|
"""Construct an Noam object."""
|
||||||
|
self.optimizer = torch.optim.Adam(
|
||||||
|
params, lr=0, betas=(0.9, 0.98), eps=1e-9, weight_decay=weight_decay
|
||||||
|
)
|
||||||
|
self._step = 0
|
||||||
|
self.warmup = warm_step
|
||||||
|
self.factor = factor
|
||||||
|
self.model_size = model_size
|
||||||
|
self._rate = 0
|
||||||
|
|
||||||
|
@property
|
||||||
|
def param_groups(self):
|
||||||
|
"""Return param_groups."""
|
||||||
|
return self.optimizer.param_groups
|
||||||
|
|
||||||
|
def step(self):
|
||||||
|
"""Update parameters and rate."""
|
||||||
|
self._step += 1
|
||||||
|
rate = self.rate()
|
||||||
|
for p in self.optimizer.param_groups:
|
||||||
|
p["lr"] = rate
|
||||||
|
self._rate = rate
|
||||||
|
self.optimizer.step()
|
||||||
|
|
||||||
|
def rate(self, step=None):
|
||||||
|
"""Implement `lrate` above."""
|
||||||
|
if step is None:
|
||||||
|
step = self._step
|
||||||
|
return (
|
||||||
|
self.factor
|
||||||
|
* self.model_size ** (-0.5)
|
||||||
|
* min(step ** (-0.5), step * self.warmup ** (-1.5))
|
||||||
|
)
|
||||||
|
|
||||||
|
def zero_grad(self):
|
||||||
|
"""Reset gradient."""
|
||||||
|
self.optimizer.zero_grad()
|
||||||
|
|
||||||
|
def state_dict(self):
|
||||||
|
"""Return state_dict."""
|
||||||
|
return {
|
||||||
|
"_step": self._step,
|
||||||
|
"warmup": self.warmup,
|
||||||
|
"factor": self.factor,
|
||||||
|
"model_size": self.model_size,
|
||||||
|
"_rate": self._rate,
|
||||||
|
"optimizer": self.optimizer.state_dict(),
|
||||||
|
}
|
||||||
|
|
||||||
|
def load_state_dict(self, state_dict):
|
||||||
|
"""Load state_dict."""
|
||||||
|
for key, value in state_dict.items():
|
||||||
|
if key == "optimizer":
|
||||||
|
self.optimizer.load_state_dict(state_dict["optimizer"])
|
||||||
|
else:
|
||||||
|
setattr(self, key, value)
|
@ -0,0 +1 @@
|
|||||||
|
../conformer_ctc/subsampling.py
|
142
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/test_emformer.py
Executable file
142
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/test_emformer.py
Executable file
@ -0,0 +1,142 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
To run this file, do:
|
||||||
|
|
||||||
|
cd icefall/egs/librispeech/ASR
|
||||||
|
python ./pruned_stateless_emformer_rnnt/test_emformer.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from emformer import Emformer, stack_states, unstack_states
|
||||||
|
|
||||||
|
|
||||||
|
def test_emformer():
|
||||||
|
N = 3
|
||||||
|
T = 300
|
||||||
|
C = 80
|
||||||
|
|
||||||
|
output_dim = 500
|
||||||
|
|
||||||
|
encoder = Emformer(
|
||||||
|
num_features=C,
|
||||||
|
output_dim=output_dim,
|
||||||
|
d_model=512,
|
||||||
|
nhead=8,
|
||||||
|
dim_feedforward=2048,
|
||||||
|
num_encoder_layers=20,
|
||||||
|
segment_length=16,
|
||||||
|
left_context_length=120,
|
||||||
|
right_context_length=4,
|
||||||
|
vgg_frontend=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
x = torch.rand(N, T, C)
|
||||||
|
x_lens = torch.randint(100, T, (N,))
|
||||||
|
x_lens[0] = T
|
||||||
|
|
||||||
|
y, y_lens = encoder(x, x_lens)
|
||||||
|
|
||||||
|
y_lens = (((x_lens - 1) >> 1) - 1) >> 1
|
||||||
|
assert x.size(0) == x.size(0)
|
||||||
|
assert y.size(1) == max(y_lens)
|
||||||
|
assert y.size(2) == output_dim
|
||||||
|
|
||||||
|
num_param = sum([p.numel() for p in encoder.parameters()])
|
||||||
|
print(f"Number of encoder parameters: {num_param}")
|
||||||
|
|
||||||
|
|
||||||
|
def test_emformer_streaming_forward():
|
||||||
|
N = 3
|
||||||
|
C = 80
|
||||||
|
|
||||||
|
output_dim = 500
|
||||||
|
|
||||||
|
encoder = Emformer(
|
||||||
|
num_features=C,
|
||||||
|
output_dim=output_dim,
|
||||||
|
d_model=512,
|
||||||
|
nhead=8,
|
||||||
|
dim_feedforward=2048,
|
||||||
|
num_encoder_layers=20,
|
||||||
|
segment_length=16,
|
||||||
|
left_context_length=120,
|
||||||
|
right_context_length=4,
|
||||||
|
vgg_frontend=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
x = torch.rand(N, 23, C)
|
||||||
|
x_lens = torch.full((N,), 23)
|
||||||
|
y, y_lens, states = encoder.streaming_forward(x=x, x_lens=x_lens)
|
||||||
|
|
||||||
|
state_list = unstack_states(states)
|
||||||
|
states2 = stack_states(state_list)
|
||||||
|
|
||||||
|
for ss, ss2 in zip(states, states2):
|
||||||
|
for s, s2 in zip(ss, ss2):
|
||||||
|
assert torch.allclose(s, s2), f"{s.sum()}, {s2.sum()}"
|
||||||
|
|
||||||
|
|
||||||
|
def test_emformer_init_state():
|
||||||
|
num_encoder_layers = 20
|
||||||
|
d_model = 512
|
||||||
|
encoder = Emformer(
|
||||||
|
num_features=80,
|
||||||
|
output_dim=500,
|
||||||
|
d_model=512,
|
||||||
|
nhead=8,
|
||||||
|
dim_feedforward=2048,
|
||||||
|
num_encoder_layers=num_encoder_layers,
|
||||||
|
segment_length=16,
|
||||||
|
left_context_length=120,
|
||||||
|
right_context_length=4,
|
||||||
|
vgg_frontend=False,
|
||||||
|
)
|
||||||
|
init_state = encoder.get_init_state()
|
||||||
|
assert len(init_state) == num_encoder_layers
|
||||||
|
layer0_state = init_state[0]
|
||||||
|
assert len(layer0_state) == 4
|
||||||
|
|
||||||
|
assert layer0_state[0].shape == (
|
||||||
|
0, # max_memory_size
|
||||||
|
1, # batch_size
|
||||||
|
d_model, # input_dim
|
||||||
|
)
|
||||||
|
|
||||||
|
assert layer0_state[1].shape == (
|
||||||
|
encoder.model.left_context_length,
|
||||||
|
1, # batch_size
|
||||||
|
d_model, # input_dim
|
||||||
|
)
|
||||||
|
assert layer0_state[2].shape == layer0_state[1].shape
|
||||||
|
assert layer0_state[3].shape == (
|
||||||
|
1, # always 1
|
||||||
|
1, # batch_size
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@torch.no_grad()
|
||||||
|
def main():
|
||||||
|
test_emformer()
|
||||||
|
test_emformer_streaming_forward()
|
||||||
|
test_emformer_init_state()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
torch.manual_seed(20220329)
|
||||||
|
main()
|
59
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/test_model.py
Executable file
59
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/test_model.py
Executable file
@ -0,0 +1,59 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang)
|
||||||
|
#
|
||||||
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
To run this file, do:
|
||||||
|
|
||||||
|
cd icefall/egs/librispeech/ASR
|
||||||
|
python ./pruned_stateless_emformer_rnnt/test_model.py
|
||||||
|
"""
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from train import get_params, get_transducer_model
|
||||||
|
|
||||||
|
|
||||||
|
def test_model():
|
||||||
|
params = get_params()
|
||||||
|
params.vocab_size = 500
|
||||||
|
params.blank_id = 0
|
||||||
|
params.context_size = 2
|
||||||
|
params.unk_id = 2
|
||||||
|
|
||||||
|
params.attention_dim = 512
|
||||||
|
params.nhead = 8
|
||||||
|
params.dim_feedforward = 2048
|
||||||
|
params.num_encoder_layers = 18
|
||||||
|
params.left_context_length = 128
|
||||||
|
params.segment_length = 8
|
||||||
|
params.right_context_length = 4
|
||||||
|
params.memory_size = 0
|
||||||
|
|
||||||
|
model = get_transducer_model(params)
|
||||||
|
|
||||||
|
num_param = sum([p.numel() for p in model.parameters()])
|
||||||
|
print(f"Number of model parameters: {num_param}")
|
||||||
|
model.__class__.forward = torch.jit.ignore(model.__class__.forward)
|
||||||
|
torch.jit.script(model)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
test_model()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
1034
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/train.py
Executable file
1034
egs/librispeech/ASR/pruned_stateless_emformer_rnnt2/train.py
Executable file
File diff suppressed because it is too large
Load Diff
@ -32,13 +32,15 @@ class Joiner(nn.Module):
|
|||||||
"""
|
"""
|
||||||
Args:
|
Args:
|
||||||
encoder_out:
|
encoder_out:
|
||||||
Output from the encoder. Its shape is (N, T, s_range, C).
|
Output from the encoder. Its shape is (N, T, s_range, C) during
|
||||||
|
training or (N, C) in case of streaming decoding.
|
||||||
decoder_out:
|
decoder_out:
|
||||||
Output from the decoder. Its shape is (N, T, s_range, C).
|
Output from the decoder. Its shape is (N, T, s_range, C) during
|
||||||
Returns:
|
training or (N, C) in case of streaming decoding.
|
||||||
Return a tensor of shape (N, T, s_range, C).
|
Return a tensor of shape (N, T, s_range, C).
|
||||||
"""
|
"""
|
||||||
assert encoder_out.ndim == decoder_out.ndim == 4
|
assert encoder_out.ndim == decoder_out.ndim
|
||||||
|
assert encoder_out.ndim in (2, 4)
|
||||||
assert encoder_out.shape == decoder_out.shape
|
assert encoder_out.shape == decoder_out.shape
|
||||||
|
|
||||||
logit = encoder_out + decoder_out
|
logit = encoder_out + decoder_out
|
||||||
|
@ -456,7 +456,7 @@ def compute_loss(
|
|||||||
is_training: bool,
|
is_training: bool,
|
||||||
) -> Tuple[Tensor, MetricsTracker]:
|
) -> Tuple[Tensor, MetricsTracker]:
|
||||||
"""
|
"""
|
||||||
Compute CTC loss given the model and its inputs.
|
Compute RNN-T loss given the model and its inputs.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
params:
|
params:
|
||||||
@ -807,28 +807,8 @@ def run(rank, world_size, args):
|
|||||||
# the threshold
|
# the threshold
|
||||||
return 1.0 <= c.duration <= 20.0
|
return 1.0 <= c.duration <= 20.0
|
||||||
|
|
||||||
num_in_total = len(train_cuts)
|
|
||||||
|
|
||||||
train_cuts = train_cuts.filter(remove_short_and_long_utt)
|
train_cuts = train_cuts.filter(remove_short_and_long_utt)
|
||||||
|
|
||||||
try:
|
|
||||||
num_left = len(train_cuts)
|
|
||||||
num_removed = num_in_total - num_left
|
|
||||||
removed_percent = num_removed / num_in_total * 100
|
|
||||||
|
|
||||||
logging.info(
|
|
||||||
f"Before removing short and long utterances: {num_in_total}"
|
|
||||||
)
|
|
||||||
logging.info(f"After removing short and long utterances: {num_left}")
|
|
||||||
logging.info(
|
|
||||||
f"Removed {num_removed} utterances ({removed_percent:.5f}%)"
|
|
||||||
)
|
|
||||||
except TypeError as e:
|
|
||||||
# You can ignore this error as previous versions of Lhotse work fine
|
|
||||||
# for the above code. In recent versions of Lhotse, it uses
|
|
||||||
# lazy filter, producing cutsets that don't have the __len__ method
|
|
||||||
logging.info(str(e))
|
|
||||||
|
|
||||||
if params.start_batch > 0 and checkpoints and "sampler" in checkpoints:
|
if params.start_batch > 0 and checkpoints and "sampler" in checkpoints:
|
||||||
# We only load the sampler's state dict when it loads a checkpoint
|
# We only load the sampler's state dict when it loads a checkpoint
|
||||||
# saved in the middle of an epoch
|
# saved in the middle of an epoch
|
||||||
|
@ -510,7 +510,7 @@ def compute_loss(
|
|||||||
warmup: float = 1.0,
|
warmup: float = 1.0,
|
||||||
) -> Tuple[Tensor, MetricsTracker]:
|
) -> Tuple[Tensor, MetricsTracker]:
|
||||||
"""
|
"""
|
||||||
Compute CTC loss given the model and its inputs.
|
Compute RNN-T loss given the model and its inputs.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
params:
|
params:
|
||||||
|
@ -22,7 +22,6 @@ from typing import Optional
|
|||||||
|
|
||||||
from lhotse import CutSet, Fbank, FbankConfig
|
from lhotse import CutSet, Fbank, FbankConfig
|
||||||
from lhotse.dataset import (
|
from lhotse.dataset import (
|
||||||
BucketingSampler,
|
|
||||||
CutMix,
|
CutMix,
|
||||||
DynamicBucketingSampler,
|
DynamicBucketingSampler,
|
||||||
K2SpeechRecognitionDataset,
|
K2SpeechRecognitionDataset,
|
||||||
@ -71,8 +70,7 @@ class AsrDataModule:
|
|||||||
"--num-buckets",
|
"--num-buckets",
|
||||||
type=int,
|
type=int,
|
||||||
default=30,
|
default=30,
|
||||||
help="The number of buckets for the BucketingSampler "
|
help="The number of buckets for the DynamicBucketingSampler. "
|
||||||
"and DynamicBucketingSampler."
|
|
||||||
"(you might want to increase it for larger datasets).",
|
"(you might want to increase it for larger datasets).",
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -152,7 +150,6 @@ class AsrDataModule:
|
|||||||
def train_dataloaders(
|
def train_dataloaders(
|
||||||
self,
|
self,
|
||||||
cuts_train: CutSet,
|
cuts_train: CutSet,
|
||||||
dynamic_bucketing: bool,
|
|
||||||
on_the_fly_feats: bool,
|
on_the_fly_feats: bool,
|
||||||
cuts_musan: Optional[CutSet] = None,
|
cuts_musan: Optional[CutSet] = None,
|
||||||
) -> DataLoader:
|
) -> DataLoader:
|
||||||
@ -162,9 +159,6 @@ class AsrDataModule:
|
|||||||
Cuts for training.
|
Cuts for training.
|
||||||
cuts_musan:
|
cuts_musan:
|
||||||
If not None, it is the cuts for mixing.
|
If not None, it is the cuts for mixing.
|
||||||
dynamic_bucketing:
|
|
||||||
True to use DynamicBucketingSampler;
|
|
||||||
False to use BucketingSampler.
|
|
||||||
on_the_fly_feats:
|
on_the_fly_feats:
|
||||||
True to use OnTheFlyFeatures;
|
True to use OnTheFlyFeatures;
|
||||||
False to use PrecomputedFeatures.
|
False to use PrecomputedFeatures.
|
||||||
@ -230,25 +224,14 @@ class AsrDataModule:
|
|||||||
return_cuts=self.args.return_cuts,
|
return_cuts=self.args.return_cuts,
|
||||||
)
|
)
|
||||||
|
|
||||||
if dynamic_bucketing:
|
logging.info("Using DynamicBucketingSampler.")
|
||||||
logging.info("Using DynamicBucketingSampler.")
|
train_sampler = DynamicBucketingSampler(
|
||||||
train_sampler = DynamicBucketingSampler(
|
cuts_train,
|
||||||
cuts_train,
|
max_duration=self.args.max_duration,
|
||||||
max_duration=self.args.max_duration,
|
shuffle=self.args.shuffle,
|
||||||
shuffle=self.args.shuffle,
|
num_buckets=self.args.num_buckets,
|
||||||
num_buckets=self.args.num_buckets,
|
drop_last=True,
|
||||||
drop_last=True,
|
)
|
||||||
)
|
|
||||||
else:
|
|
||||||
logging.info("Using BucketingSampler.")
|
|
||||||
train_sampler = BucketingSampler(
|
|
||||||
cuts_train,
|
|
||||||
max_duration=self.args.max_duration,
|
|
||||||
shuffle=self.args.shuffle,
|
|
||||||
num_buckets=self.args.num_buckets,
|
|
||||||
bucket_method="equal_duration",
|
|
||||||
drop_last=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
logging.info("About to create train dataloader")
|
logging.info("About to create train dataloader")
|
||||||
train_dl = DataLoader(
|
train_dl = DataLoader(
|
||||||
@ -277,10 +260,12 @@ class AsrDataModule:
|
|||||||
cut_transforms=transforms,
|
cut_transforms=transforms,
|
||||||
return_cuts=self.args.return_cuts,
|
return_cuts=self.args.return_cuts,
|
||||||
)
|
)
|
||||||
valid_sampler = BucketingSampler(
|
valid_sampler = DynamicBucketingSampler(
|
||||||
cuts_valid,
|
cuts_valid,
|
||||||
max_duration=self.args.max_duration,
|
max_duration=self.args.max_duration,
|
||||||
shuffle=False,
|
shuffle=False,
|
||||||
|
num_buckets=self.args.num_buckets,
|
||||||
|
drop_last=False,
|
||||||
)
|
)
|
||||||
logging.info("About to create dev dataloader")
|
logging.info("About to create dev dataloader")
|
||||||
valid_dl = DataLoader(
|
valid_dl = DataLoader(
|
||||||
@ -301,8 +286,12 @@ class AsrDataModule:
|
|||||||
else PrecomputedFeatures(),
|
else PrecomputedFeatures(),
|
||||||
return_cuts=self.args.return_cuts,
|
return_cuts=self.args.return_cuts,
|
||||||
)
|
)
|
||||||
sampler = BucketingSampler(
|
sampler = DynamicBucketingSampler(
|
||||||
cuts, max_duration=self.args.max_duration, shuffle=False
|
cuts,
|
||||||
|
max_duration=self.args.max_duration,
|
||||||
|
shuffle=False,
|
||||||
|
num_buckets=self.args.num_buckets,
|
||||||
|
drop_last=True,
|
||||||
)
|
)
|
||||||
logging.debug("About to create test dataloader")
|
logging.debug("About to create test dataloader")
|
||||||
test_dl = DataLoader(
|
test_dl = DataLoader(
|
||||||
|
@ -22,7 +22,7 @@ import re
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import lhotse
|
import lhotse
|
||||||
from lhotse import CutSet, load_manifest
|
from lhotse import CutSet, load_manifest_lazy
|
||||||
|
|
||||||
|
|
||||||
class GigaSpeech:
|
class GigaSpeech:
|
||||||
@ -32,13 +32,13 @@ class GigaSpeech:
|
|||||||
manifest_dir:
|
manifest_dir:
|
||||||
It is expected to contain the following files::
|
It is expected to contain the following files::
|
||||||
|
|
||||||
- XL_split_2000/cuts_XL.*.jsonl.gz
|
- gigaspeech_XL_split_2000/gigaspeech_cuts_XL.*.jsonl.gz
|
||||||
- cuts_L_raw.jsonl.gz
|
- gigaspeech_cuts_L_raw.jsonl.gz
|
||||||
- cuts_M_raw.jsonl.gz
|
- gigaspeech_cuts_M_raw.jsonl.gz
|
||||||
- cuts_S_raw.jsonl.gz
|
- gigaspeech_cuts_S_raw.jsonl.gz
|
||||||
- cuts_XS_raw.jsonl.gz
|
- gigaspeech_cuts_XS_raw.jsonl.gz
|
||||||
- cuts_DEV_raw.jsonl.gz
|
- gigaspeech_cuts_DEV_raw.jsonl.gz
|
||||||
- cuts_TEST_raw.jsonl.gz
|
- gigaspeech_cuts_TEST_raw.jsonl.gz
|
||||||
"""
|
"""
|
||||||
self.manifest_dir = Path(manifest_dir)
|
self.manifest_dir = Path(manifest_dir)
|
||||||
|
|
||||||
@ -46,10 +46,12 @@ class GigaSpeech:
|
|||||||
logging.info("About to get train-XL cuts")
|
logging.info("About to get train-XL cuts")
|
||||||
|
|
||||||
filenames = list(
|
filenames = list(
|
||||||
glob.glob(f"{self.manifest_dir}/XL_split_2000/cuts_XL.*.jsonl.gz")
|
glob.glob(
|
||||||
|
f"{self.manifest_dir}/gigaspeech_XL_split_2000/gigaspeech_cuts_XL.*.jsonl.gz" # noqa
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
pattern = re.compile(r"cuts_XL.([0-9]+).jsonl.gz")
|
pattern = re.compile(r"gigaspeech_cuts_XL.([0-9]+).jsonl.gz")
|
||||||
idx_filenames = [
|
idx_filenames = [
|
||||||
(int(pattern.search(f).group(1)), f) for f in filenames
|
(int(pattern.search(f).group(1)), f) for f in filenames
|
||||||
]
|
]
|
||||||
@ -64,31 +66,31 @@ class GigaSpeech:
|
|||||||
)
|
)
|
||||||
|
|
||||||
def train_L_cuts(self) -> CutSet:
|
def train_L_cuts(self) -> CutSet:
|
||||||
f = self.manifest_dir / "cuts_L_raw.jsonl.gz"
|
f = self.manifest_dir / "gigaspeech_cuts_L_raw.jsonl.gz"
|
||||||
logging.info(f"About to get train-L cuts from {f}")
|
logging.info(f"About to get train-L cuts from {f}")
|
||||||
return CutSet.from_jsonl_lazy(f)
|
return CutSet.from_jsonl_lazy(f)
|
||||||
|
|
||||||
def train_M_cuts(self) -> CutSet:
|
def train_M_cuts(self) -> CutSet:
|
||||||
f = self.manifest_dir / "cuts_M_raw.jsonl.gz"
|
f = self.manifest_dir / "gigaspeech_cuts_M_raw.jsonl.gz"
|
||||||
logging.info(f"About to get train-M cuts from {f}")
|
logging.info(f"About to get train-M cuts from {f}")
|
||||||
return CutSet.from_jsonl_lazy(f)
|
return CutSet.from_jsonl_lazy(f)
|
||||||
|
|
||||||
def train_S_cuts(self) -> CutSet:
|
def train_S_cuts(self) -> CutSet:
|
||||||
f = self.manifest_dir / "cuts_S_raw.jsonl.gz"
|
f = self.manifest_dir / "gigaspeech_cuts_S_raw.jsonl.gz"
|
||||||
logging.info(f"About to get train-S cuts from {f}")
|
logging.info(f"About to get train-S cuts from {f}")
|
||||||
return CutSet.from_jsonl_lazy(f)
|
return CutSet.from_jsonl_lazy(f)
|
||||||
|
|
||||||
def train_XS_cuts(self) -> CutSet:
|
def train_XS_cuts(self) -> CutSet:
|
||||||
f = self.manifest_dir / "cuts_XS_raw.jsonl.gz"
|
f = self.manifest_dir / "gigaspeech_cuts_XS_raw.jsonl.gz"
|
||||||
logging.info(f"About to get train-XS cuts from {f}")
|
logging.info(f"About to get train-XS cuts from {f}")
|
||||||
return CutSet.from_jsonl_lazy(f)
|
return CutSet.from_jsonl_lazy(f)
|
||||||
|
|
||||||
def test_cuts(self) -> CutSet:
|
def test_cuts(self) -> CutSet:
|
||||||
f = self.manifest_dir / "cuts_TEST.jsonl.gz"
|
f = self.manifest_dir / "gigaspeech_cuts_TEST.jsonl.gz"
|
||||||
logging.info(f"About to get TEST cuts from {f}")
|
logging.info(f"About to get TEST cuts from {f}")
|
||||||
return load_manifest(f)
|
return load_manifest_lazy(f)
|
||||||
|
|
||||||
def dev_cuts(self) -> CutSet:
|
def dev_cuts(self) -> CutSet:
|
||||||
f = self.manifest_dir / "cuts_DEV.jsonl.gz"
|
f = self.manifest_dir / "gigaspeech_cuts_DEV.jsonl.gz"
|
||||||
logging.info(f"About to get DEV cuts from {f}")
|
logging.info(f"About to get DEV cuts from {f}")
|
||||||
return load_manifest(f)
|
return load_manifest_lazy(f)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user