mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-12-11 06:55:27 +00:00
Merge with master
This commit is contained in:
commit
afe3b183c4
2
.flake8
2
.flake8
@ -15,7 +15,7 @@ per-file-ignores =
|
|||||||
egs/librispeech/ASR/zipformer_mmi/*.py: E501, E203
|
egs/librispeech/ASR/zipformer_mmi/*.py: E501, E203
|
||||||
egs/librispeech/ASR/zipformer/*.py: E501, E203
|
egs/librispeech/ASR/zipformer/*.py: E501, E203
|
||||||
egs/librispeech/ASR/RESULTS.md: E999,
|
egs/librispeech/ASR/RESULTS.md: E999,
|
||||||
|
egs/ljspeech/TTS/vits/*.py: E501, E203
|
||||||
# invalid escape sequence (cause by tex formular), W605
|
# invalid escape sequence (cause by tex formular), W605
|
||||||
icefall/utils.py: E501, W605
|
icefall/utils.py: E501, W605
|
||||||
|
|
||||||
|
|||||||
343
.github/scripts/aishell/ASR/run.sh
vendored
Executable file
343
.github/scripts/aishell/ASR/run.sh
vendored
Executable file
@ -0,0 +1,343 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
log() {
|
||||||
|
# This function is from espnet
|
||||||
|
local fname=${BASH_SOURCE[1]##*/}
|
||||||
|
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
||||||
|
}
|
||||||
|
|
||||||
|
cd egs/aishell/ASR
|
||||||
|
|
||||||
|
function download_test_dev_manifests() {
|
||||||
|
git lfs install
|
||||||
|
|
||||||
|
fbank_url=https://huggingface.co/csukuangfj/aishell-test-dev-manifests
|
||||||
|
log "Downloading pre-commputed fbank from $fbank_url"
|
||||||
|
|
||||||
|
git clone https://huggingface.co/csukuangfj/aishell-test-dev-manifests
|
||||||
|
ln -s $PWD/aishell-test-dev-manifests/data .
|
||||||
|
}
|
||||||
|
|
||||||
|
function test_transducer_stateless3_2022_06_20() {
|
||||||
|
repo_url=https://huggingface.co/csukuangfj/icefall-aishell-pruned-transducer-stateless3-2022-06-20
|
||||||
|
log "Downloading pre-trained model from $repo_url"
|
||||||
|
git clone $repo_url
|
||||||
|
repo=$(basename $repo_url)
|
||||||
|
|
||||||
|
log "Display test files"
|
||||||
|
tree $repo/
|
||||||
|
ls -lh $repo/test_wavs/*.wav
|
||||||
|
|
||||||
|
pushd $repo/exp
|
||||||
|
ln -s pretrained-epoch-29-avg-5-torch-1.10.0.pt pretrained.pt
|
||||||
|
popd
|
||||||
|
|
||||||
|
log "test greedy_search with pretrained.py"
|
||||||
|
|
||||||
|
for sym in 1 2 3; do
|
||||||
|
log "Greedy search with --max-sym-per-frame $sym"
|
||||||
|
|
||||||
|
./pruned_transducer_stateless3/pretrained.py \
|
||||||
|
--method greedy_search \
|
||||||
|
--max-sym-per-frame $sym \
|
||||||
|
--checkpoint $repo/exp/pretrained.pt \
|
||||||
|
--lang-dir $repo/data/lang_char \
|
||||||
|
$repo/test_wavs/BAC009S0764W0121.wav \
|
||||||
|
$repo/test_wavs/BAC009S0764W0122.wav \
|
||||||
|
$repo/test_wavs/BAC009S0764W0123.wav
|
||||||
|
done
|
||||||
|
|
||||||
|
log "test beam search with pretrained.py"
|
||||||
|
|
||||||
|
for method in modified_beam_search beam_search fast_beam_search; do
|
||||||
|
log "$method"
|
||||||
|
|
||||||
|
./pruned_transducer_stateless3/pretrained.py \
|
||||||
|
--method $method \
|
||||||
|
--beam-size 4 \
|
||||||
|
--checkpoint $repo/exp/pretrained.pt \
|
||||||
|
--lang-dir $repo/data/lang_char \
|
||||||
|
$repo/test_wavs/BAC009S0764W0121.wav \
|
||||||
|
$repo/test_wavs/BAC009S0764W0122.wav \
|
||||||
|
$repo/test_wavs/BAC009S0764W0123.wav
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
||||||
|
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
||||||
|
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
||||||
|
mkdir -p pruned_transducer_stateless3/exp
|
||||||
|
ln -s $PWD/$repo/exp/pretrained.pt pruned_transducer_stateless3/exp/epoch-999.pt
|
||||||
|
ln -s $PWD/$repo/data/lang_char data/
|
||||||
|
|
||||||
|
ls -lh data
|
||||||
|
ls -lh pruned_transducer_stateless3/exp
|
||||||
|
|
||||||
|
log "Decoding test and dev"
|
||||||
|
|
||||||
|
# use a small value for decoding with CPU
|
||||||
|
max_duration=100
|
||||||
|
|
||||||
|
for method in greedy_search fast_beam_search modified_beam_search; do
|
||||||
|
log "Decoding with $method"
|
||||||
|
|
||||||
|
./pruned_transducer_stateless3/decode.py \
|
||||||
|
--decoding-method $method \
|
||||||
|
--epoch 999 \
|
||||||
|
--avg 1 \
|
||||||
|
--max-duration $max_duration \
|
||||||
|
--exp-dir pruned_transducer_stateless3/exp
|
||||||
|
done
|
||||||
|
|
||||||
|
rm pruned_transducer_stateless3/exp/*.pt
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -rf $repo
|
||||||
|
}
|
||||||
|
|
||||||
|
function test_zipformer_large_2023_10_24() {
|
||||||
|
log "CI testing large model"
|
||||||
|
repo_url=https://huggingface.co/zrjin/icefall-asr-aishell-zipformer-large-2023-10-24/
|
||||||
|
log "Downloading pre-trained model from $repo_url"
|
||||||
|
git clone $repo_url
|
||||||
|
repo=$(basename $repo_url)
|
||||||
|
|
||||||
|
log "Display test files"
|
||||||
|
tree $repo/
|
||||||
|
ls -lh $repo/test_wavs/*.wav
|
||||||
|
|
||||||
|
for method in modified_beam_search greedy_search fast_beam_search; do
|
||||||
|
log "$method"
|
||||||
|
|
||||||
|
./zipformer/pretrained.py \
|
||||||
|
--method $method \
|
||||||
|
--context-size 1 \
|
||||||
|
--checkpoint $repo/exp/pretrained.pt \
|
||||||
|
--tokens $repo/data/lang_char/tokens.txt \
|
||||||
|
--num-encoder-layers 2,2,4,5,4,2 \
|
||||||
|
--feedforward-dim 512,768,1536,2048,1536,768 \
|
||||||
|
--encoder-dim 192,256,512,768,512,256 \
|
||||||
|
--encoder-unmasked-dim 192,192,256,320,256,192 \
|
||||||
|
$repo/test_wavs/BAC009S0764W0121.wav \
|
||||||
|
$repo/test_wavs/BAC009S0764W0122.wav \
|
||||||
|
$repo/test_wavs/BAC009S0764W0123.wav
|
||||||
|
done
|
||||||
|
rm -rf $repo
|
||||||
|
}
|
||||||
|
|
||||||
|
function test_zipformer_2023_10_24() {
|
||||||
|
repo_url=https://huggingface.co/zrjin/icefall-asr-aishell-zipformer-2023-10-24/
|
||||||
|
log "Downloading pre-trained model from $repo_url"
|
||||||
|
git clone $repo_url
|
||||||
|
repo=$(basename $repo_url)
|
||||||
|
|
||||||
|
log "Display test files"
|
||||||
|
tree $repo/
|
||||||
|
ls -lh $repo/test_wavs/*.wav
|
||||||
|
|
||||||
|
|
||||||
|
for method in modified_beam_search greedy_search fast_beam_search; do
|
||||||
|
log "$method"
|
||||||
|
|
||||||
|
./zipformer/pretrained.py \
|
||||||
|
--method $method \
|
||||||
|
--context-size 1 \
|
||||||
|
--checkpoint $repo/exp/pretrained.pt \
|
||||||
|
--tokens $repo/data/lang_char/tokens.txt \
|
||||||
|
$repo/test_wavs/BAC009S0764W0121.wav \
|
||||||
|
$repo/test_wavs/BAC009S0764W0122.wav \
|
||||||
|
$repo/test_wavs/BAC009S0764W0123.wav
|
||||||
|
done
|
||||||
|
rm -rf $repo
|
||||||
|
}
|
||||||
|
|
||||||
|
function test_zipformer_small_2023_10_24() {
|
||||||
|
log "CI testing small model"
|
||||||
|
repo_url=https://huggingface.co/zrjin/icefall-asr-aishell-zipformer-small-2023-10-24/
|
||||||
|
log "Downloading pre-trained model from $repo_url"
|
||||||
|
git clone $repo_url
|
||||||
|
repo=$(basename $repo_url)
|
||||||
|
|
||||||
|
log "Display test files"
|
||||||
|
tree $repo/
|
||||||
|
ls -lh $repo/test_wavs/*.wav
|
||||||
|
|
||||||
|
|
||||||
|
for method in modified_beam_search greedy_search fast_beam_search; do
|
||||||
|
log "$method"
|
||||||
|
|
||||||
|
./zipformer/pretrained.py \
|
||||||
|
--method $method \
|
||||||
|
--context-size 1 \
|
||||||
|
--checkpoint $repo/exp/pretrained.pt \
|
||||||
|
--tokens $repo/data/lang_char/tokens.txt \
|
||||||
|
--num-encoder-layers 2,2,2,2,2,2 \
|
||||||
|
--feedforward-dim 512,768,768,768,768,768 \
|
||||||
|
--encoder-dim 192,256,256,256,256,256 \
|
||||||
|
--encoder-unmasked-dim 192,192,192,192,192,192 \
|
||||||
|
$repo/test_wavs/BAC009S0764W0121.wav \
|
||||||
|
$repo/test_wavs/BAC009S0764W0122.wav \
|
||||||
|
$repo/test_wavs/BAC009S0764W0123.wav
|
||||||
|
done
|
||||||
|
rm -rf $repo
|
||||||
|
}
|
||||||
|
|
||||||
|
function test_transducer_stateless_modified_2022_03_01() {
|
||||||
|
repo_url=https://huggingface.co/csukuangfj/icefall-aishell-transducer-stateless-modified-2022-03-01
|
||||||
|
|
||||||
|
log "Downloading pre-trained model from $repo_url"
|
||||||
|
git lfs install
|
||||||
|
git clone $repo_url
|
||||||
|
repo=$(basename $repo_url)
|
||||||
|
|
||||||
|
log "Display test files"
|
||||||
|
tree $repo/
|
||||||
|
ls -lh $repo/test_wavs/*.wav
|
||||||
|
|
||||||
|
for sym in 1 2 3; do
|
||||||
|
log "Greedy search with --max-sym-per-frame $sym"
|
||||||
|
|
||||||
|
./transducer_stateless_modified/pretrained.py \
|
||||||
|
--method greedy_search \
|
||||||
|
--max-sym-per-frame $sym \
|
||||||
|
--checkpoint $repo/exp/pretrained.pt \
|
||||||
|
--lang-dir $repo/data/lang_char \
|
||||||
|
$repo/test_wavs/BAC009S0764W0121.wav \
|
||||||
|
$repo/test_wavs/BAC009S0764W0122.wav \
|
||||||
|
$repo/test_wavs/BAC009S0764W0123.wav
|
||||||
|
done
|
||||||
|
|
||||||
|
for method in modified_beam_search beam_search; do
|
||||||
|
log "$method"
|
||||||
|
|
||||||
|
./transducer_stateless_modified/pretrained.py \
|
||||||
|
--method $method \
|
||||||
|
--beam-size 4 \
|
||||||
|
--checkpoint $repo/exp/pretrained.pt \
|
||||||
|
--lang-dir $repo/data/lang_char \
|
||||||
|
$repo/test_wavs/BAC009S0764W0121.wav \
|
||||||
|
$repo/test_wavs/BAC009S0764W0122.wav \
|
||||||
|
$repo/test_wavs/BAC009S0764W0123.wav
|
||||||
|
done
|
||||||
|
rm -rf $repo
|
||||||
|
}
|
||||||
|
|
||||||
|
function test_transducer_stateless_modified_2_2022_03_01() {
|
||||||
|
repo_url=https://huggingface.co/csukuangfj/icefall-aishell-transducer-stateless-modified-2-2022-03-01
|
||||||
|
|
||||||
|
log "Downloading pre-trained model from $repo_url"
|
||||||
|
git lfs install
|
||||||
|
git clone $repo_url
|
||||||
|
repo=$(basename $repo_url)
|
||||||
|
|
||||||
|
log "Display test files"
|
||||||
|
tree $repo/
|
||||||
|
ls -lh $repo/test_wavs/*.wav
|
||||||
|
|
||||||
|
for sym in 1 2 3; do
|
||||||
|
log "Greedy search with --max-sym-per-frame $sym"
|
||||||
|
|
||||||
|
./transducer_stateless_modified-2/pretrained.py \
|
||||||
|
--method greedy_search \
|
||||||
|
--max-sym-per-frame $sym \
|
||||||
|
--checkpoint $repo/exp/pretrained.pt \
|
||||||
|
--lang-dir $repo/data/lang_char \
|
||||||
|
$repo/test_wavs/BAC009S0764W0121.wav \
|
||||||
|
$repo/test_wavs/BAC009S0764W0122.wav \
|
||||||
|
$repo/test_wavs/BAC009S0764W0123.wav
|
||||||
|
done
|
||||||
|
|
||||||
|
for method in modified_beam_search beam_search; do
|
||||||
|
log "$method"
|
||||||
|
|
||||||
|
./transducer_stateless_modified-2/pretrained.py \
|
||||||
|
--method $method \
|
||||||
|
--beam-size 4 \
|
||||||
|
--checkpoint $repo/exp/pretrained.pt \
|
||||||
|
--lang-dir $repo/data/lang_char \
|
||||||
|
$repo/test_wavs/BAC009S0764W0121.wav \
|
||||||
|
$repo/test_wavs/BAC009S0764W0122.wav \
|
||||||
|
$repo/test_wavs/BAC009S0764W0123.wav
|
||||||
|
done
|
||||||
|
rm -rf $repo
|
||||||
|
}
|
||||||
|
|
||||||
|
function test_conformer_ctc() {
|
||||||
|
repo_url=https://huggingface.co/csukuangfj/icefall_asr_aishell_conformer_ctc
|
||||||
|
log "Downloading pre-trained model from $repo_url"
|
||||||
|
GIT_LFS_SKIP_SMUDGE=1 git clone $repo_url
|
||||||
|
repo=$(basename $repo_url)
|
||||||
|
pushd $repo
|
||||||
|
|
||||||
|
git lfs pull --include "exp/pretrained.pt"
|
||||||
|
git lfs pull --include "data/lang_char/H.fst"
|
||||||
|
git lfs pull --include "data/lang_char/HL.fst"
|
||||||
|
git lfs pull --include "data/lang_char/HLG.fst"
|
||||||
|
|
||||||
|
popd
|
||||||
|
|
||||||
|
log "Display test files"
|
||||||
|
tree $repo/
|
||||||
|
ls -lh $repo/test_wavs/*.wav
|
||||||
|
|
||||||
|
log "CTC decoding"
|
||||||
|
|
||||||
|
log "Exporting model with torchscript"
|
||||||
|
|
||||||
|
pushd $repo/exp
|
||||||
|
ln -s pretrained.pt epoch-99.pt
|
||||||
|
popd
|
||||||
|
|
||||||
|
./conformer_ctc/export.py \
|
||||||
|
--epoch 99 \
|
||||||
|
--avg 1 \
|
||||||
|
--exp-dir $repo/exp \
|
||||||
|
--tokens $repo/data/lang_char/tokens.txt \
|
||||||
|
--jit 1
|
||||||
|
|
||||||
|
ls -lh $repo/exp
|
||||||
|
|
||||||
|
ls -lh $repo/data/lang_char
|
||||||
|
|
||||||
|
log "Decoding with H on CPU with OpenFst"
|
||||||
|
|
||||||
|
./conformer_ctc/jit_pretrained_decode_with_H.py \
|
||||||
|
--nn-model $repo/exp/cpu_jit.pt \
|
||||||
|
--H $repo/data/lang_char/H.fst \
|
||||||
|
--tokens $repo/data/lang_char/tokens.txt \
|
||||||
|
$repo/test_wavs/0.wav \
|
||||||
|
$repo/test_wavs/1.wav \
|
||||||
|
$repo/test_wavs/2.wav
|
||||||
|
|
||||||
|
log "Decoding with HL on CPU with OpenFst"
|
||||||
|
|
||||||
|
./conformer_ctc/jit_pretrained_decode_with_HL.py \
|
||||||
|
--nn-model $repo/exp/cpu_jit.pt \
|
||||||
|
--HL $repo/data/lang_char/HL.fst \
|
||||||
|
--words $repo/data/lang_char/words.txt \
|
||||||
|
$repo/test_wavs/0.wav \
|
||||||
|
$repo/test_wavs/1.wav \
|
||||||
|
$repo/test_wavs/2.wav
|
||||||
|
|
||||||
|
log "Decoding with HLG on CPU with OpenFst"
|
||||||
|
|
||||||
|
./conformer_ctc/jit_pretrained_decode_with_HLG.py \
|
||||||
|
--nn-model $repo/exp/cpu_jit.pt \
|
||||||
|
--HLG $repo/data/lang_char/HLG.fst \
|
||||||
|
--words $repo/data/lang_char/words.txt \
|
||||||
|
$repo/test_wavs/0.wav \
|
||||||
|
$repo/test_wavs/1.wav \
|
||||||
|
$repo/test_wavs/2.wav
|
||||||
|
|
||||||
|
rm -rf $repo
|
||||||
|
}
|
||||||
|
|
||||||
|
download_test_dev_manifests
|
||||||
|
test_transducer_stateless3_2022_06_20
|
||||||
|
test_zipformer_large_2023_10_24
|
||||||
|
test_zipformer_2023_10_24
|
||||||
|
test_zipformer_small_2023_10_24
|
||||||
|
test_transducer_stateless_modified_2022_03_01
|
||||||
|
test_transducer_stateless_modified_2_2022_03_01
|
||||||
|
# test_conformer_ctc # fails for torch 1.13.x and torch 2.0.x
|
||||||
63
.github/scripts/docker/Dockerfile
vendored
Normal file
63
.github/scripts/docker/Dockerfile
vendored
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
ARG PYTHON_VERSION=3.8
|
||||||
|
FROM python:${PYTHON_VERSION}
|
||||||
|
|
||||||
|
ARG TORCHAUDIO_VERSION="0.13.0"
|
||||||
|
ARG TORCH_VERSION="1.13.0"
|
||||||
|
ARG K2_VERSION="1.24.4.dev20231220"
|
||||||
|
ARG KALDIFEAT_VERSION="1.25.3.dev20231221"
|
||||||
|
|
||||||
|
ARG _K2_VERSION="${K2_VERSION}+cpu.torch${TORCH_VERSION}"
|
||||||
|
ARG _KALDIFEAT_VERSION="${KALDIFEAT_VERSION}+cpu.torch${TORCH_VERSION}"
|
||||||
|
|
||||||
|
RUN apt-get update -y && \
|
||||||
|
apt-get install -qq -y \
|
||||||
|
ffmpeg \
|
||||||
|
git \
|
||||||
|
git-lfs \
|
||||||
|
graphviz \
|
||||||
|
less \
|
||||||
|
tree \
|
||||||
|
vim \
|
||||||
|
&& \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/cache/apt/archives /var/lib/apt/lists
|
||||||
|
|
||||||
|
|
||||||
|
LABEL authors="Fangjun Kuang <csukuangfj@gmail.com>"
|
||||||
|
LABEL k2_version=${_K2_VERSION}
|
||||||
|
LABEL kaldifeat_version=${_KALDIFEAT_VERSION}
|
||||||
|
LABEL github_repo="https://github.com/k2-fsa/icefall"
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
|
RUN pip install --no-cache-dir \
|
||||||
|
torch==${TORCH_VERSION} torchaudio==${TORCHAUDIO_VERSION} -f https://download.pytorch.org/whl/cpu/torch_stable.html \
|
||||||
|
k2==${_K2_VERSION} -f https://k2-fsa.github.io/k2/cpu.html \
|
||||||
|
\
|
||||||
|
git+https://github.com/lhotse-speech/lhotse \
|
||||||
|
kaldifeat==${_KALDIFEAT_VERSION} -f https://csukuangfj.github.io/kaldifeat/cpu.html \
|
||||||
|
dill \
|
||||||
|
graphviz \
|
||||||
|
kaldi-decoder \
|
||||||
|
kaldi_native_io \
|
||||||
|
kaldialign \
|
||||||
|
kaldifst \
|
||||||
|
kaldilm \
|
||||||
|
matplotlib \
|
||||||
|
multi_quantization \
|
||||||
|
numpy \
|
||||||
|
onnx \
|
||||||
|
onnxmltools \
|
||||||
|
onnxruntime \
|
||||||
|
pytest \
|
||||||
|
sentencepiece>=0.1.96 \
|
||||||
|
six \
|
||||||
|
tensorboard \
|
||||||
|
typeguard
|
||||||
|
|
||||||
|
# RUN git clone https://github.com/k2-fsa/icefall /workspace/icefall && \
|
||||||
|
# cd /workspace/icefall && \
|
||||||
|
# pip install --no-cache-dir -r requirements.txt
|
||||||
|
#
|
||||||
|
# ENV PYTHONPATH /workspace/icefall:$PYTHONPATH
|
||||||
|
#
|
||||||
|
# WORKDIR /workspace/icefall
|
||||||
79
.github/scripts/docker/generate_build_matrix.py
vendored
Executable file
79
.github/scripts/docker/generate_build_matrix.py
vendored
Executable file
@ -0,0 +1,79 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# Copyright 2023 Xiaomi Corp. (authors: Fangjun Kuang)
|
||||||
|
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
|
def version_gt(a, b):
|
||||||
|
a_major, a_minor = a.split(".")[:2]
|
||||||
|
b_major, b_minor = b.split(".")[:2]
|
||||||
|
if a_major > b_major:
|
||||||
|
return True
|
||||||
|
|
||||||
|
if a_major == b_major and a_minor > b_minor:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def version_ge(a, b):
|
||||||
|
a_major, a_minor = a.split(".")[:2]
|
||||||
|
b_major, b_minor = b.split(".")[:2]
|
||||||
|
if a_major > b_major:
|
||||||
|
return True
|
||||||
|
|
||||||
|
if a_major == b_major and a_minor >= b_minor:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_torchaudio_version(torch_version):
|
||||||
|
if torch_version == "1.13.0":
|
||||||
|
return "0.13.0"
|
||||||
|
elif torch_version == "1.13.1":
|
||||||
|
return "0.13.1"
|
||||||
|
elif torch_version == "2.0.0":
|
||||||
|
return "2.0.1"
|
||||||
|
elif torch_version == "2.0.1":
|
||||||
|
return "2.0.2"
|
||||||
|
else:
|
||||||
|
return torch_version
|
||||||
|
|
||||||
|
|
||||||
|
def get_matrix():
|
||||||
|
k2_version = "1.24.4.dev20231220"
|
||||||
|
kaldifeat_version = "1.25.3.dev20231221"
|
||||||
|
version = "1.2"
|
||||||
|
python_version = ["3.8", "3.9", "3.10", "3.11"]
|
||||||
|
torch_version = ["1.13.0", "1.13.1", "2.0.0", "2.0.1", "2.1.0", "2.1.1", "2.1.2"]
|
||||||
|
|
||||||
|
matrix = []
|
||||||
|
for p in python_version:
|
||||||
|
for t in torch_version:
|
||||||
|
# torchaudio <= 1.13.x supports only python <= 3.10
|
||||||
|
|
||||||
|
if version_gt(p, "3.10") and not version_gt(t, "2.0"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
matrix.append(
|
||||||
|
{
|
||||||
|
"k2-version": k2_version,
|
||||||
|
"kaldifeat-version": kaldifeat_version,
|
||||||
|
"version": version,
|
||||||
|
"python-version": p,
|
||||||
|
"torch-version": t,
|
||||||
|
"torchaudio-version": get_torchaudio_version(t),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return matrix
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
matrix = get_matrix()
|
||||||
|
print(json.dumps({"include": matrix}))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
1603
.github/scripts/librispeech/ASR/run.sh
vendored
Executable file
1603
.github/scripts/librispeech/ASR/run.sh
vendored
Executable file
File diff suppressed because it is too large
Load Diff
158
.github/scripts/multi-zh-hans.sh
vendored
Executable file
158
.github/scripts/multi-zh-hans.sh
vendored
Executable file
@ -0,0 +1,158 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
git config --global user.name "k2-fsa"
|
||||||
|
git config --global user.email "csukuangfj@gmail.com"
|
||||||
|
git config --global lfs.allowincompletepush true
|
||||||
|
|
||||||
|
log() {
|
||||||
|
# This function is from espnet
|
||||||
|
local fname=${BASH_SOURCE[1]##*/}
|
||||||
|
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
||||||
|
}
|
||||||
|
|
||||||
|
log "pwd: $PWD"
|
||||||
|
|
||||||
|
cd egs/multi_zh-hans/ASR
|
||||||
|
|
||||||
|
repo_url=https://huggingface.co/zrjin/icefall-asr-multi-zh-hans-zipformer-ctc-streaming-2023-11-05
|
||||||
|
log "Downloading pre-trained model from $repo_url"
|
||||||
|
GIT_LFS_SKIP_SMUDGE=1 git clone $repo_url
|
||||||
|
repo=$(basename $repo_url)
|
||||||
|
|
||||||
|
pushd $repo
|
||||||
|
cd exp/
|
||||||
|
git lfs pull --include pretrained.pt
|
||||||
|
rm -fv epoch-20.pt
|
||||||
|
rm -fv *.onnx
|
||||||
|
ln -s pretrained.pt epoch-20.pt
|
||||||
|
cd ../data/lang_bpe_2000
|
||||||
|
ls -lh
|
||||||
|
git lfs pull --include L.pt L_disambig.pt Linv.pt bpe.model
|
||||||
|
git lfs pull --include "*.model"
|
||||||
|
ls -lh
|
||||||
|
popd
|
||||||
|
|
||||||
|
log "----------------------------------------"
|
||||||
|
log "Export streaming ONNX CTC models "
|
||||||
|
log "----------------------------------------"
|
||||||
|
./zipformer/export-onnx-streaming-ctc.py \
|
||||||
|
--exp-dir $repo/exp \
|
||||||
|
--tokens $repo/data/lang_bpe_2000/tokens.txt \
|
||||||
|
--causal 1 \
|
||||||
|
--avg 1 \
|
||||||
|
--epoch 20 \
|
||||||
|
--use-averaged-model 0 \
|
||||||
|
--chunk-size 16 \
|
||||||
|
--left-context-frames 128 \
|
||||||
|
--use-ctc 1
|
||||||
|
|
||||||
|
ls -lh $repo/exp/
|
||||||
|
|
||||||
|
log "------------------------------------------------------------"
|
||||||
|
log "Test exported streaming ONNX CTC models (greedy search) "
|
||||||
|
log "------------------------------------------------------------"
|
||||||
|
|
||||||
|
test_wavs=(
|
||||||
|
DEV_T0000000000.wav
|
||||||
|
DEV_T0000000001.wav
|
||||||
|
DEV_T0000000002.wav
|
||||||
|
TEST_MEETING_T0000000113.wav
|
||||||
|
TEST_MEETING_T0000000219.wav
|
||||||
|
TEST_MEETING_T0000000351.wav
|
||||||
|
)
|
||||||
|
|
||||||
|
for w in ${test_wavs[@]}; do
|
||||||
|
./zipformer/onnx_pretrained-streaming-ctc.py \
|
||||||
|
--model-filename $repo/exp/ctc-epoch-20-avg-1-chunk-16-left-128.int8.onnx \
|
||||||
|
--tokens $repo/data/lang_bpe_2000/tokens.txt \
|
||||||
|
$repo/test_wavs/$w
|
||||||
|
done
|
||||||
|
|
||||||
|
log "Upload onnx CTC models to huggingface"
|
||||||
|
url=https://huggingface.co/k2-fsa/sherpa-onnx-streaming-zipformer-ctc-multi-zh-hans-2023-12-13
|
||||||
|
GIT_LFS_SKIP_SMUDGE=1 git clone $url
|
||||||
|
dst=$(basename $url)
|
||||||
|
cp -v $repo/exp/ctc*.onnx $dst
|
||||||
|
cp -v $repo/data/lang_bpe_2000/tokens.txt $dst
|
||||||
|
cp -v $repo/data/lang_bpe_2000/bpe.model $dst
|
||||||
|
mkdir -p $dst/test_wavs
|
||||||
|
cp -v $repo/test_wavs/*.wav $dst/test_wavs
|
||||||
|
cd $dst
|
||||||
|
git lfs track "*.onnx" "bpe.model"
|
||||||
|
ls -lh
|
||||||
|
file bpe.model
|
||||||
|
git status
|
||||||
|
git add .
|
||||||
|
git commit -m "upload model" && git push https://k2-fsa:${HF_TOKEN}@huggingface.co/k2-fsa/$dst main || true
|
||||||
|
|
||||||
|
log "Upload models to https://github.com/k2-fsa/sherpa-onnx"
|
||||||
|
rm -rf .git
|
||||||
|
rm -fv .gitattributes
|
||||||
|
cd ..
|
||||||
|
tar cjfv $dst.tar.bz2 $dst
|
||||||
|
ls -lh *.tar.bz2
|
||||||
|
mv -v $dst.tar.bz2 ../../../
|
||||||
|
|
||||||
|
log "----------------------------------------"
|
||||||
|
log "Export streaming ONNX transducer models "
|
||||||
|
log "----------------------------------------"
|
||||||
|
|
||||||
|
./zipformer/export-onnx-streaming.py \
|
||||||
|
--exp-dir $repo/exp \
|
||||||
|
--tokens $repo/data/lang_bpe_2000/tokens.txt \
|
||||||
|
--causal 1 \
|
||||||
|
--avg 1 \
|
||||||
|
--epoch 20 \
|
||||||
|
--use-averaged-model 0 \
|
||||||
|
--chunk-size 16 \
|
||||||
|
--left-context-frames 128 \
|
||||||
|
--use-ctc 0
|
||||||
|
|
||||||
|
ls -lh $repo/exp
|
||||||
|
|
||||||
|
log "------------------------------------------------------------"
|
||||||
|
log "Test exported streaming ONNX transducer models (Python code)"
|
||||||
|
log "------------------------------------------------------------"
|
||||||
|
|
||||||
|
log "test fp32"
|
||||||
|
./zipformer/onnx_pretrained-streaming.py \
|
||||||
|
--encoder-model-filename $repo/exp/encoder-epoch-20-avg-1-chunk-16-left-128.onnx \
|
||||||
|
--decoder-model-filename $repo/exp/decoder-epoch-20-avg-1-chunk-16-left-128.onnx \
|
||||||
|
--joiner-model-filename $repo/exp/joiner-epoch-20-avg-1-chunk-16-left-128.onnx \
|
||||||
|
--tokens $repo/data/lang_bpe_2000/tokens.txt \
|
||||||
|
$repo/test_wavs/DEV_T0000000000.wav
|
||||||
|
|
||||||
|
log "test int8"
|
||||||
|
./zipformer/onnx_pretrained-streaming.py \
|
||||||
|
--encoder-model-filename $repo/exp/encoder-epoch-20-avg-1-chunk-16-left-128.int8.onnx \
|
||||||
|
--decoder-model-filename $repo/exp/decoder-epoch-20-avg-1-chunk-16-left-128.onnx \
|
||||||
|
--joiner-model-filename $repo/exp/joiner-epoch-20-avg-1-chunk-16-left-128.int8.onnx \
|
||||||
|
--tokens $repo/data/lang_bpe_2000/tokens.txt \
|
||||||
|
$repo/test_wavs/DEV_T0000000000.wav
|
||||||
|
|
||||||
|
log "Upload onnx transducer models to huggingface"
|
||||||
|
|
||||||
|
url=https://huggingface.co/k2-fsa/sherpa-onnx-streaming-zipformer-multi-zh-hans-2023-12-12
|
||||||
|
GIT_LFS_SKIP_SMUDGE=1 git clone $url
|
||||||
|
dst=$(basename $url)
|
||||||
|
cp -v $repo/exp/encoder*.onnx $dst
|
||||||
|
cp -v $repo/exp/decoder*.onnx $dst
|
||||||
|
cp -v $repo/exp/joiner*.onnx $dst
|
||||||
|
cp -v $repo/data/lang_bpe_2000/tokens.txt $dst
|
||||||
|
cp -v $repo/data/lang_bpe_2000/bpe.model $dst
|
||||||
|
mkdir -p $dst/test_wavs
|
||||||
|
cp -v $repo/test_wavs/*.wav $dst/test_wavs
|
||||||
|
cd $dst
|
||||||
|
git lfs track "*.onnx" bpe.model
|
||||||
|
git add .
|
||||||
|
git commit -m "upload model" && git push https://k2-fsa:${HF_TOKEN}@huggingface.co/k2-fsa/$dst main || true
|
||||||
|
|
||||||
|
log "Upload models to https://github.com/k2-fsa/sherpa-onnx"
|
||||||
|
rm -rf .git
|
||||||
|
rm -fv .gitattributes
|
||||||
|
cd ..
|
||||||
|
tar cjfv $dst.tar.bz2 $dst
|
||||||
|
ls -lh *.tar.bz2
|
||||||
|
mv -v $dst.tar.bz2 ../../../
|
||||||
@ -1,87 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/aishell/ASR
|
|
||||||
|
|
||||||
git lfs install
|
|
||||||
|
|
||||||
fbank_url=https://huggingface.co/csukuangfj/aishell-test-dev-manifests
|
|
||||||
log "Downloading pre-commputed fbank from $fbank_url"
|
|
||||||
|
|
||||||
git clone https://huggingface.co/csukuangfj/aishell-test-dev-manifests
|
|
||||||
ln -s $PWD/aishell-test-dev-manifests/data .
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/csukuangfj/icefall-aishell-pruned-transducer-stateless3-2022-06-20
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
pushd $repo/exp
|
|
||||||
ln -s pretrained-epoch-29-avg-5-torch-1.10.0.pt pretrained.pt
|
|
||||||
popd
|
|
||||||
|
|
||||||
for sym in 1 2 3; do
|
|
||||||
log "Greedy search with --max-sym-per-frame $sym"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless3/pretrained.py \
|
|
||||||
--method greedy_search \
|
|
||||||
--max-sym-per-frame $sym \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--lang-dir $repo/data/lang_char \
|
|
||||||
$repo/test_wavs/BAC009S0764W0121.wav \
|
|
||||||
$repo/test_wavs/BAC009S0764W0122.wav \
|
|
||||||
$repo/test_wavs/BAC009S0764W0123.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in modified_beam_search beam_search fast_beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless3/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--lang-dir $repo/data/lang_char \
|
|
||||||
$repo/test_wavs/BAC009S0764W0121.wav \
|
|
||||||
$repo/test_wavs/BAC009S0764W0122.wav \
|
|
||||||
$repo/test_wavs/BAC009S0764W0123.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p pruned_transducer_stateless3/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt pruned_transducer_stateless3/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_char data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh pruned_transducer_stateless3/exp
|
|
||||||
|
|
||||||
log "Decoding test and dev"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless3/decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir pruned_transducer_stateless3/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
rm pruned_transducer_stateless3/exp/*.pt
|
|
||||||
fi
|
|
||||||
103
.github/scripts/run-aishell-zipformer-2023-10-24.sh
vendored
103
.github/scripts/run-aishell-zipformer-2023-10-24.sh
vendored
@ -1,103 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/aishell/ASR
|
|
||||||
|
|
||||||
git lfs install
|
|
||||||
|
|
||||||
fbank_url=https://huggingface.co/csukuangfj/aishell-test-dev-manifests
|
|
||||||
log "Downloading pre-commputed fbank from $fbank_url"
|
|
||||||
|
|
||||||
git clone https://huggingface.co/csukuangfj/aishell-test-dev-manifests
|
|
||||||
ln -s $PWD/aishell-test-dev-manifests/data .
|
|
||||||
|
|
||||||
log "======================="
|
|
||||||
log "CI testing large model"
|
|
||||||
repo_url=https://huggingface.co/zrjin/icefall-asr-aishell-zipformer-large-2023-10-24/
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
for method in modified_beam_search greedy_search fast_beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./zipformer/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--context-size 1 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_char/tokens.txt \
|
|
||||||
--num-encoder-layers 2,2,4,5,4,2 \
|
|
||||||
--feedforward-dim 512,768,1536,2048,1536,768 \
|
|
||||||
--encoder-dim 192,256,512,768,512,256 \
|
|
||||||
--encoder-unmasked-dim 192,192,256,320,256,192 \
|
|
||||||
$repo/test_wavs/BAC009S0764W0121.wav \
|
|
||||||
$repo/test_wavs/BAC009S0764W0122.wav \
|
|
||||||
$repo/test_wavs/BAC009S0764W0123.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
log "======================="
|
|
||||||
log "CI testing medium model"
|
|
||||||
repo_url=https://huggingface.co/zrjin/icefall-asr-aishell-zipformer-2023-10-24/
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
|
|
||||||
for method in modified_beam_search greedy_search fast_beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./zipformer/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--context-size 1 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_char/tokens.txt \
|
|
||||||
$repo/test_wavs/BAC009S0764W0121.wav \
|
|
||||||
$repo/test_wavs/BAC009S0764W0122.wav \
|
|
||||||
$repo/test_wavs/BAC009S0764W0123.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
|
|
||||||
log "======================="
|
|
||||||
log "CI testing small model"
|
|
||||||
repo_url=https://huggingface.co/zrjin/icefall-asr-aishell-zipformer-small-2023-10-24/
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
|
|
||||||
for method in modified_beam_search greedy_search fast_beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./zipformer/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--context-size 1 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_char/tokens.txt \
|
|
||||||
--num-encoder-layers 2,2,2,2,2,2 \
|
|
||||||
--feedforward-dim 512,768,768,768,768,768 \
|
|
||||||
--encoder-dim 192,256,256,256,256,256 \
|
|
||||||
--encoder-unmasked-dim 192,192,192,192,192,192 \
|
|
||||||
$repo/test_wavs/BAC009S0764W0121.wav \
|
|
||||||
$repo/test_wavs/BAC009S0764W0122.wav \
|
|
||||||
$repo/test_wavs/BAC009S0764W0123.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
@ -26,16 +26,80 @@ git lfs pull --include "data/lang_bpe_500/bpe.model"
|
|||||||
git lfs pull --include "data/lang_bpe_500/tokens.txt"
|
git lfs pull --include "data/lang_bpe_500/tokens.txt"
|
||||||
git lfs pull --include "exp/jit_script.pt"
|
git lfs pull --include "exp/jit_script.pt"
|
||||||
git lfs pull --include "exp/pretrained.pt"
|
git lfs pull --include "exp/pretrained.pt"
|
||||||
ln -s pretrained.pt epoch-99.pt
|
rm epoch-30.pt
|
||||||
ls -lh *.pt
|
ln -s pretrained.pt epoch-30.pt
|
||||||
|
rm *.onnx
|
||||||
|
ls -lh
|
||||||
popd
|
popd
|
||||||
|
|
||||||
|
log "----------------------------------------"
|
||||||
|
log "Export ONNX transducer models "
|
||||||
|
log "----------------------------------------"
|
||||||
|
|
||||||
|
./zipformer/export-onnx.py \
|
||||||
|
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
||||||
|
--use-averaged-model 0 \
|
||||||
|
--epoch 30 \
|
||||||
|
--avg 1 \
|
||||||
|
--exp-dir $repo/exp
|
||||||
|
|
||||||
|
ls -lh $repo/exp
|
||||||
|
|
||||||
|
log "------------------------------------------------------------"
|
||||||
|
log "Test exported ONNX transducer models (Python code) "
|
||||||
|
log "------------------------------------------------------------"
|
||||||
|
|
||||||
|
log "test fp32"
|
||||||
|
./zipformer/onnx_pretrained.py \
|
||||||
|
--encoder-model-filename $repo/exp/encoder-epoch-30-avg-1.onnx \
|
||||||
|
--decoder-model-filename $repo/exp/decoder-epoch-30-avg-1.onnx \
|
||||||
|
--joiner-model-filename $repo/exp/joiner-epoch-30-avg-1.onnx \
|
||||||
|
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
||||||
|
$repo/test_wavs/1089-134686-0001.wav \
|
||||||
|
$repo/test_wavs/1221-135766-0001.wav \
|
||||||
|
$repo/test_wavs/1221-135766-0002.wav
|
||||||
|
|
||||||
|
log "test int8"
|
||||||
|
./zipformer/onnx_pretrained.py \
|
||||||
|
--encoder-model-filename $repo/exp/encoder-epoch-30-avg-1.int8.onnx \
|
||||||
|
--decoder-model-filename $repo/exp/decoder-epoch-30-avg-1.onnx \
|
||||||
|
--joiner-model-filename $repo/exp/joiner-epoch-30-avg-1.int8.onnx \
|
||||||
|
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
||||||
|
$repo/test_wavs/1089-134686-0001.wav \
|
||||||
|
$repo/test_wavs/1221-135766-0001.wav \
|
||||||
|
$repo/test_wavs/1221-135766-0002.wav
|
||||||
|
|
||||||
|
log "Upload models to huggingface"
|
||||||
|
git config --global user.name "k2-fsa"
|
||||||
|
git config --global user.email "xxx@gmail.com"
|
||||||
|
|
||||||
|
url=https://huggingface.co/k2-fsa/sherpa-onnx-zipformer-gigaspeech-2023-12-12
|
||||||
|
GIT_LFS_SKIP_SMUDGE=1 git clone $url
|
||||||
|
dst=$(basename $url)
|
||||||
|
cp -v $repo/exp/*.onnx $dst
|
||||||
|
cp -v $repo/data/lang_bpe_500/tokens.txt $dst
|
||||||
|
cp -v $repo/data/lang_bpe_500/bpe.model $dst
|
||||||
|
mkdir -p $dst/test_wavs
|
||||||
|
cp -v $repo/test_wavs/*.wav $dst/test_wavs
|
||||||
|
cd $dst
|
||||||
|
git lfs track "*.onnx"
|
||||||
|
git add .
|
||||||
|
git commit -m "upload model" && git push https://k2-fsa:${HF_TOKEN}@huggingface.co/k2-fsa/$dst main || true
|
||||||
|
|
||||||
|
log "Upload models to https://github.com/k2-fsa/sherpa-onnx"
|
||||||
|
rm -rf .git
|
||||||
|
rm -fv .gitattributes
|
||||||
|
cd ..
|
||||||
|
tar cjfv $dst.tar.bz2 $dst
|
||||||
|
ls -lh
|
||||||
|
mv -v $dst.tar.bz2 ../../../
|
||||||
|
|
||||||
log "Export to torchscript model"
|
log "Export to torchscript model"
|
||||||
./zipformer/export.py \
|
./zipformer/export.py \
|
||||||
--exp-dir $repo/exp \
|
--exp-dir $repo/exp \
|
||||||
--use-averaged-model false \
|
--use-averaged-model false \
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
||||||
--epoch 99 \
|
--epoch 30 \
|
||||||
--avg 1 \
|
--avg 1 \
|
||||||
--jit 1
|
--jit 1
|
||||||
|
|
||||||
@ -67,7 +131,7 @@ echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
||||||
mkdir -p zipformer/exp
|
mkdir -p zipformer/exp
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt zipformer/exp/epoch-999.pt
|
ln -s $PWD/$repo/exp/pretrained.pt zipformer/exp/epoch-30.pt
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
||||||
|
|
||||||
ls -lh data
|
ls -lh data
|
||||||
@ -83,7 +147,7 @@ if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" ==
|
|||||||
|
|
||||||
./zipformer/decode.py \
|
./zipformer/decode.py \
|
||||||
--decoding-method $method \
|
--decoding-method $method \
|
||||||
--epoch 999 \
|
--epoch 30 \
|
||||||
--avg 1 \
|
--avg 1 \
|
||||||
--use-averaged-model 0 \
|
--use-averaged-model 0 \
|
||||||
--max-duration $max_duration \
|
--max-duration $max_duration \
|
||||||
|
|||||||
@ -1,122 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/Zengwei/icefall-asr-librispeech-conformer-ctc3-2022-11-27
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
GIT_LFS_SKIP_SMUDGE=1 git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
pushd $repo/exp
|
|
||||||
git lfs pull --include "data/lang_bpe_500/HLG.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/L.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/LG.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/Linv.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/bpe.model"
|
|
||||||
git lfs pull --include "data/lm/G_4_gram.pt"
|
|
||||||
git lfs pull --include "exp/jit_trace.pt"
|
|
||||||
git lfs pull --include "exp/pretrained.pt"
|
|
||||||
ln -s pretrained.pt epoch-99.pt
|
|
||||||
ls -lh *.pt
|
|
||||||
popd
|
|
||||||
|
|
||||||
log "Decode with models exported by torch.jit.trace()"
|
|
||||||
|
|
||||||
for m in ctc-decoding 1best; do
|
|
||||||
./conformer_ctc3/jit_pretrained.py \
|
|
||||||
--model-filename $repo/exp/jit_trace.pt \
|
|
||||||
--words-file $repo/data/lang_bpe_500/words.txt \
|
|
||||||
--HLG $repo/data/lang_bpe_500/HLG.pt \
|
|
||||||
--bpe-model $repo/data/lang_bpe_500/bpe.model \
|
|
||||||
--G $repo/data/lm/G_4_gram.pt \
|
|
||||||
--method $m \
|
|
||||||
--sample-rate 16000 \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
log "Export to torchscript model"
|
|
||||||
|
|
||||||
./conformer_ctc3/export.py \
|
|
||||||
--exp-dir $repo/exp \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--jit-trace 1 \
|
|
||||||
--epoch 99 \
|
|
||||||
--avg 1 \
|
|
||||||
--use-averaged-model 0
|
|
||||||
|
|
||||||
ls -lh $repo/exp/*.pt
|
|
||||||
|
|
||||||
log "Decode with models exported by torch.jit.trace()"
|
|
||||||
|
|
||||||
for m in ctc-decoding 1best; do
|
|
||||||
./conformer_ctc3/jit_pretrained.py \
|
|
||||||
--model-filename $repo/exp/jit_trace.pt \
|
|
||||||
--words-file $repo/data/lang_bpe_500/words.txt \
|
|
||||||
--HLG $repo/data/lang_bpe_500/HLG.pt \
|
|
||||||
--bpe-model $repo/data/lang_bpe_500/bpe.model \
|
|
||||||
--G $repo/data/lm/G_4_gram.pt \
|
|
||||||
--method $m \
|
|
||||||
--sample-rate 16000 \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for m in ctc-decoding 1best; do
|
|
||||||
./conformer_ctc3/pretrained.py \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--words-file $repo/data/lang_bpe_500/words.txt \
|
|
||||||
--HLG $repo/data/lang_bpe_500/HLG.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--G $repo/data/lm/G_4_gram.pt \
|
|
||||||
--method $m \
|
|
||||||
--sample-rate 16000 \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p conformer_ctc3/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt conformer_ctc3/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh conformer_ctc3/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in ctc-decoding 1best; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
./conformer_ctc3/decode.py \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--use-averaged-model 0 \
|
|
||||||
--exp-dir conformer_ctc3/exp/ \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--decoding-method $method \
|
|
||||||
--lm-dir data/lm
|
|
||||||
done
|
|
||||||
|
|
||||||
rm conformer_ctc3/exp/*.pt
|
|
||||||
fi
|
|
||||||
@ -1,77 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless-2022-03-12
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git lfs install
|
|
||||||
git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
for sym in 1 2 3; do
|
|
||||||
log "Greedy search with --max-sym-per-frame $sym"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless/pretrained.py \
|
|
||||||
--method greedy_search \
|
|
||||||
--max-sym-per-frame $sym \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in fast_beam_search modified_beam_search beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p pruned_transducer_stateless/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt pruned_transducer_stateless/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh pruned_transducer_stateless/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless/decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir pruned_transducer_stateless/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
rm pruned_transducer_stateless/exp/*.pt
|
|
||||||
fi
|
|
||||||
@ -1,86 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless2-2022-04-29
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
GIT_LFS_SKIP_SMUDGE=1 git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
pushd $repo
|
|
||||||
git lfs pull --include "data/lang_bpe_500/bpe.model"
|
|
||||||
git lfs pull --include "exp/pretrained-epoch-38-avg-10.pt"
|
|
||||||
popd
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
pushd $repo/exp
|
|
||||||
ln -s pretrained-epoch-38-avg-10.pt pretrained.pt
|
|
||||||
popd
|
|
||||||
|
|
||||||
for sym in 1 2 3; do
|
|
||||||
log "Greedy search with --max-sym-per-frame $sym"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless2/pretrained.py \
|
|
||||||
--method greedy_search \
|
|
||||||
--max-sym-per-frame $sym \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in modified_beam_search beam_search fast_beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless2/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p pruned_transducer_stateless2/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt pruned_transducer_stateless2/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh pruned_transducer_stateless2/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless2/decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir pruned_transducer_stateless2/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
rm pruned_transducer_stateless2/exp/*.pt
|
|
||||||
rm -r data/lang_bpe_500
|
|
||||||
fi
|
|
||||||
@ -1,85 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless3-2022-04-29
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
GIT_LFS_SKIP_SMUDGE=1 git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
pushd $repo
|
|
||||||
git lfs pull --include "data/lang_bpe_500/bpe.model"
|
|
||||||
git lfs pull --include "exp/pretrained-epoch-25-avg-6.pt"
|
|
||||||
popd
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
pushd $repo/exp
|
|
||||||
ln -s pretrained-epoch-25-avg-6.pt pretrained.pt
|
|
||||||
popd
|
|
||||||
|
|
||||||
for sym in 1 2 3; do
|
|
||||||
log "Greedy search with --max-sym-per-frame $sym"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless3/pretrained.py \
|
|
||||||
--method greedy_search \
|
|
||||||
--max-sym-per-frame $sym \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in modified_beam_search beam_search fast_beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless3/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p pruned_transducer_stateless3/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt pruned_transducer_stateless3/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh pruned_transducer_stateless3/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless3/decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir pruned_transducer_stateless3/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
rm pruned_transducer_stateless3/exp/*.pt
|
|
||||||
rm -r data/lang_bpe_500
|
|
||||||
fi
|
|
||||||
@ -1,123 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git lfs install
|
|
||||||
git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
pushd $repo/exp
|
|
||||||
ln -s pretrained-iter-1224000-avg-14.pt pretrained.pt
|
|
||||||
ln -s pretrained-iter-1224000-avg-14.pt epoch-99.pt
|
|
||||||
popd
|
|
||||||
|
|
||||||
|
|
||||||
log "Export to torchscript model"
|
|
||||||
./pruned_transducer_stateless3/export.py \
|
|
||||||
--exp-dir $repo/exp \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--epoch 99 \
|
|
||||||
--avg 1 \
|
|
||||||
--jit 1
|
|
||||||
|
|
||||||
./pruned_transducer_stateless3/export.py \
|
|
||||||
--exp-dir $repo/exp \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--epoch 99 \
|
|
||||||
--avg 1 \
|
|
||||||
--jit-trace 1
|
|
||||||
|
|
||||||
ls -lh $repo/exp/*.pt
|
|
||||||
|
|
||||||
log "Decode with models exported by torch.jit.trace()"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless3/jit_pretrained.py \
|
|
||||||
--bpe-model $repo/data/lang_bpe_500/bpe.model \
|
|
||||||
--encoder-model-filename $repo/exp/encoder_jit_trace.pt \
|
|
||||||
--decoder-model-filename $repo/exp/decoder_jit_trace.pt \
|
|
||||||
--joiner-model-filename $repo/exp/joiner_jit_trace.pt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
|
|
||||||
log "Decode with models exported by torch.jit.script()"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless3/jit_pretrained.py \
|
|
||||||
--bpe-model $repo/data/lang_bpe_500/bpe.model \
|
|
||||||
--encoder-model-filename $repo/exp/encoder_jit_script.pt \
|
|
||||||
--decoder-model-filename $repo/exp/decoder_jit_script.pt \
|
|
||||||
--joiner-model-filename $repo/exp/joiner_jit_script.pt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
|
|
||||||
|
|
||||||
for sym in 1 2 3; do
|
|
||||||
log "Greedy search with --max-sym-per-frame $sym"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless3/pretrained.py \
|
|
||||||
--method greedy_search \
|
|
||||||
--max-sym-per-frame $sym \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in modified_beam_search beam_search fast_beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless3/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p pruned_transducer_stateless3/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt pruned_transducer_stateless3/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh pruned_transducer_stateless3/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless3/decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir pruned_transducer_stateless3/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
rm pruned_transducer_stateless3/exp/*.pt
|
|
||||||
fi
|
|
||||||
@ -1,100 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless5-2022-05-13
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git lfs install
|
|
||||||
git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
pushd $repo/exp
|
|
||||||
ln -s pretrained-epoch-39-avg-7.pt pretrained.pt
|
|
||||||
popd
|
|
||||||
|
|
||||||
for sym in 1 2 3; do
|
|
||||||
log "Greedy search with --max-sym-per-frame $sym"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless5/pretrained.py \
|
|
||||||
--method greedy_search \
|
|
||||||
--max-sym-per-frame $sym \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--num-encoder-layers 18 \
|
|
||||||
--dim-feedforward 2048 \
|
|
||||||
--nhead 8 \
|
|
||||||
--encoder-dim 512 \
|
|
||||||
--decoder-dim 512 \
|
|
||||||
--joiner-dim 512 \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in modified_beam_search beam_search fast_beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless5/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav \
|
|
||||||
--num-encoder-layers 18 \
|
|
||||||
--dim-feedforward 2048 \
|
|
||||||
--nhead 8 \
|
|
||||||
--encoder-dim 512 \
|
|
||||||
--decoder-dim 512 \
|
|
||||||
--joiner-dim 512
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p pruned_transducer_stateless5/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained-epoch-39-avg-7.pt pruned_transducer_stateless5/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh pruned_transducer_stateless5/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless5/decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--use-averaged-model 0 \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir pruned_transducer_stateless5/exp \
|
|
||||||
--num-encoder-layers 18 \
|
|
||||||
--dim-feedforward 2048 \
|
|
||||||
--nhead 8 \
|
|
||||||
--encoder-dim 512 \
|
|
||||||
--decoder-dim 512 \
|
|
||||||
--joiner-dim 512
|
|
||||||
done
|
|
||||||
|
|
||||||
rm pruned_transducer_stateless5/exp/*.pt
|
|
||||||
fi
|
|
||||||
@ -1,106 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless7-2022-11-11
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git lfs install
|
|
||||||
GIT_LFS_SKIP_SMUDGE=1 git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
pushd $repo/exp
|
|
||||||
git lfs pull --include "data/lang_bpe_500/bpe.model"
|
|
||||||
git lfs pull --include "exp/cpu_jit.pt"
|
|
||||||
git lfs pull --include "exp/pretrained.pt"
|
|
||||||
ln -s pretrained.pt epoch-99.pt
|
|
||||||
ls -lh *.pt
|
|
||||||
popd
|
|
||||||
|
|
||||||
log "Export to torchscript model"
|
|
||||||
./pruned_transducer_stateless7/export.py \
|
|
||||||
--exp-dir $repo/exp \
|
|
||||||
--use-averaged-model false \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--epoch 99 \
|
|
||||||
--avg 1 \
|
|
||||||
--jit 1
|
|
||||||
|
|
||||||
ls -lh $repo/exp/*.pt
|
|
||||||
|
|
||||||
log "Decode with models exported by torch.jit.script()"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless7/jit_pretrained.py \
|
|
||||||
--bpe-model $repo/data/lang_bpe_500/bpe.model \
|
|
||||||
--nn-model-filename $repo/exp/cpu_jit.pt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
|
|
||||||
for sym in 1 2 3; do
|
|
||||||
log "Greedy search with --max-sym-per-frame $sym"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless7/pretrained.py \
|
|
||||||
--method greedy_search \
|
|
||||||
--max-sym-per-frame $sym \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in modified_beam_search beam_search fast_beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless7/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p pruned_transducer_stateless7/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt pruned_transducer_stateless7/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh pruned_transducer_stateless7/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless7/decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--use-averaged-model 0 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir pruned_transducer_stateless7/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
rm pruned_transducer_stateless7/exp/*.pt
|
|
||||||
fi
|
|
||||||
@ -1,150 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/Zengwei/icefall-asr-librispeech-pruned-transducer-stateless7-ctc-2022-12-01
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
GIT_LFS_SKIP_SMUDGE=1 git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
pushd $repo/exp
|
|
||||||
git lfs pull --include "data/lang_bpe_500/HLG.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/L.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/LG.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/Linv.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/bpe.model"
|
|
||||||
git lfs pull --include "data/lm/G_4_gram.pt"
|
|
||||||
git lfs pull --include "exp/cpu_jit.pt"
|
|
||||||
git lfs pull --include "exp/pretrained.pt"
|
|
||||||
ln -s pretrained.pt epoch-99.pt
|
|
||||||
ls -lh *.pt
|
|
||||||
popd
|
|
||||||
|
|
||||||
log "Export to torchscript model"
|
|
||||||
./pruned_transducer_stateless7_ctc/export.py \
|
|
||||||
--exp-dir $repo/exp \
|
|
||||||
--use-averaged-model false \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--epoch 99 \
|
|
||||||
--avg 1 \
|
|
||||||
--jit 1
|
|
||||||
|
|
||||||
ls -lh $repo/exp/*.pt
|
|
||||||
|
|
||||||
log "Decode with models exported by torch.jit.script()"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless7_ctc/jit_pretrained.py \
|
|
||||||
--bpe-model $repo/data/lang_bpe_500/bpe.model \
|
|
||||||
--nn-model-filename $repo/exp/cpu_jit.pt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
|
|
||||||
for m in ctc-decoding 1best; do
|
|
||||||
./pruned_transducer_stateless7_ctc/jit_pretrained_ctc.py \
|
|
||||||
--model-filename $repo/exp/cpu_jit.pt \
|
|
||||||
--words-file $repo/data/lang_bpe_500/words.txt \
|
|
||||||
--HLG $repo/data/lang_bpe_500/HLG.pt \
|
|
||||||
--bpe-model $repo/data/lang_bpe_500/bpe.model \
|
|
||||||
--G $repo/data/lm/G_4_gram.pt \
|
|
||||||
--method $m \
|
|
||||||
--sample-rate 16000 \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for sym in 1 2 3; do
|
|
||||||
log "Greedy search with --max-sym-per-frame $sym"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless7_ctc/pretrained.py \
|
|
||||||
--method greedy_search \
|
|
||||||
--max-sym-per-frame $sym \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in modified_beam_search beam_search fast_beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless7_ctc/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for m in ctc-decoding 1best; do
|
|
||||||
./pruned_transducer_stateless7_ctc/pretrained_ctc.py \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--words-file $repo/data/lang_bpe_500/words.txt \
|
|
||||||
--HLG $repo/data/lang_bpe_500/HLG.pt \
|
|
||||||
--bpe-model $repo/data/lang_bpe_500/bpe.model \
|
|
||||||
--G $repo/data/lm/G_4_gram.pt \
|
|
||||||
--method $m \
|
|
||||||
--sample-rate 16000 \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p pruned_transducer_stateless7_ctc/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt pruned_transducer_stateless7_ctc/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh pruned_transducer_stateless7_ctc/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless7_ctc/decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--use-averaged-model 0 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir pruned_transducer_stateless7_ctc/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
for m in ctc-decoding 1best; do
|
|
||||||
./pruned_transducer_stateless7_ctc/ctc_decode.py \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--exp-dir ./pruned_transducer_stateless7_ctc/exp \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--use-averaged-model 0 \
|
|
||||||
--decoding-method $m \
|
|
||||||
--hlg-scale 0.6 \
|
|
||||||
--lm-dir data/lm
|
|
||||||
done
|
|
||||||
|
|
||||||
rm pruned_transducer_stateless7_ctc/exp/*.pt
|
|
||||||
fi
|
|
||||||
@ -1,147 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/yfyeung/icefall-asr-librispeech-pruned_transducer_stateless7_ctc_bs-2023-01-29
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
GIT_LFS_SKIP_SMUDGE=1 git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
pushd $repo/exp
|
|
||||||
git lfs pull --include "data/lang_bpe_500/L.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/LG.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/HLG.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/Linv.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/bpe.model"
|
|
||||||
git lfs pull --include "exp/cpu_jit.pt"
|
|
||||||
git lfs pull --include "exp/pretrained.pt"
|
|
||||||
ln -s pretrained.pt epoch-99.pt
|
|
||||||
ls -lh *.pt
|
|
||||||
popd
|
|
||||||
|
|
||||||
log "Export to torchscript model"
|
|
||||||
./pruned_transducer_stateless7_ctc_bs/export.py \
|
|
||||||
--exp-dir $repo/exp \
|
|
||||||
--use-averaged-model false \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--epoch 99 \
|
|
||||||
--avg 1 \
|
|
||||||
--jit 1
|
|
||||||
|
|
||||||
ls -lh $repo/exp/*.pt
|
|
||||||
|
|
||||||
log "Decode with models exported by torch.jit.script()"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless7_ctc_bs/jit_pretrained.py \
|
|
||||||
--bpe-model $repo/data/lang_bpe_500/bpe.model \
|
|
||||||
--nn-model-filename $repo/exp/cpu_jit.pt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
|
|
||||||
for m in ctc-decoding 1best; do
|
|
||||||
./pruned_transducer_stateless7_ctc_bs/jit_pretrained_ctc.py \
|
|
||||||
--model-filename $repo/exp/cpu_jit.pt \
|
|
||||||
--words-file $repo/data/lang_bpe_500/words.txt \
|
|
||||||
--HLG $repo/data/lang_bpe_500/HLG.pt \
|
|
||||||
--bpe-model $repo/data/lang_bpe_500/bpe.model \
|
|
||||||
--method $m \
|
|
||||||
--sample-rate 16000 \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for sym in 1 2 3; do
|
|
||||||
log "Greedy search with --max-sym-per-frame $sym"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless7_ctc_bs/pretrained.py \
|
|
||||||
--method greedy_search \
|
|
||||||
--max-sym-per-frame $sym \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in modified_beam_search beam_search fast_beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless7_ctc_bs/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for m in ctc-decoding 1best; do
|
|
||||||
./pruned_transducer_stateless7_ctc_bs/pretrained_ctc.py \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--words-file $repo/data/lang_bpe_500/words.txt \
|
|
||||||
--HLG $repo/data/lang_bpe_500/HLG.pt \
|
|
||||||
--bpe-model $repo/data/lang_bpe_500/bpe.model \
|
|
||||||
--method $m \
|
|
||||||
--sample-rate 16000 \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p pruned_transducer_stateless7_ctc_bs/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt pruned_transducer_stateless7_ctc_bs/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh pruned_transducer_stateless7_ctc_bs/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless7_ctc_bs/decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--use-averaged-model 0 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir pruned_transducer_stateless7_ctc_bs/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
for m in ctc-decoding 1best; do
|
|
||||||
./pruned_transducer_stateless7_ctc_bs/ctc_decode.py \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--exp-dir ./pruned_transducer_stateless7_ctc_bs/exp \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--use-averaged-model 0 \
|
|
||||||
--decoding-method $m \
|
|
||||||
--hlg-scale 0.6
|
|
||||||
done
|
|
||||||
|
|
||||||
rm pruned_transducer_stateless7_ctc_bs/exp/*.pt
|
|
||||||
fi
|
|
||||||
@ -1,148 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/Zengwei/icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git lfs install
|
|
||||||
GIT_LFS_SKIP_SMUDGE=1 git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
pushd $repo
|
|
||||||
git lfs pull --include "data/lang_bpe_500/bpe.model"
|
|
||||||
git lfs pull --include "exp/cpu_jit.pt"
|
|
||||||
git lfs pull --include "exp/pretrained.pt"
|
|
||||||
git lfs pull --include "exp/encoder_jit_trace.pt"
|
|
||||||
git lfs pull --include "exp/decoder_jit_trace.pt"
|
|
||||||
git lfs pull --include "exp/joiner_jit_trace.pt"
|
|
||||||
cd exp
|
|
||||||
ln -s pretrained.pt epoch-99.pt
|
|
||||||
ls -lh *.pt
|
|
||||||
popd
|
|
||||||
|
|
||||||
log "Export to torchscript model"
|
|
||||||
./pruned_transducer_stateless7_streaming/export.py \
|
|
||||||
--exp-dir $repo/exp \
|
|
||||||
--use-averaged-model false \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--decode-chunk-len 32 \
|
|
||||||
--epoch 99 \
|
|
||||||
--avg 1 \
|
|
||||||
--jit 1
|
|
||||||
|
|
||||||
ls -lh $repo/exp/*.pt
|
|
||||||
|
|
||||||
log "Decode with models exported by torch.jit.script()"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless7_streaming/jit_pretrained.py \
|
|
||||||
--bpe-model $repo/data/lang_bpe_500/bpe.model \
|
|
||||||
--nn-model-filename $repo/exp/cpu_jit.pt \
|
|
||||||
--decode-chunk-len 32 \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
|
|
||||||
log "Export to torchscript model by torch.jit.trace()"
|
|
||||||
./pruned_transducer_stateless7_streaming/jit_trace_export.py \
|
|
||||||
--exp-dir $repo/exp \
|
|
||||||
--use-averaged-model false \
|
|
||||||
--bpe-model $repo/data/lang_bpe_500/bpe.model \
|
|
||||||
--decode-chunk-len 32 \
|
|
||||||
--epoch 99 \
|
|
||||||
--avg 1
|
|
||||||
|
|
||||||
log "Decode with models exported by torch.jit.trace()"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless7_streaming/jit_trace_pretrained.py \
|
|
||||||
--bpe-model $repo/data/lang_bpe_500/bpe.model \
|
|
||||||
--encoder-model-filename $repo/exp/encoder_jit_trace.pt \
|
|
||||||
--decoder-model-filename $repo/exp/decoder_jit_trace.pt \
|
|
||||||
--joiner-model-filename $repo/exp/joiner_jit_trace.pt \
|
|
||||||
--decode-chunk-len 32 \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav
|
|
||||||
|
|
||||||
for sym in 1 2 3; do
|
|
||||||
log "Greedy search with --max-sym-per-frame $sym"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless7_streaming/pretrained.py \
|
|
||||||
--method greedy_search \
|
|
||||||
--max-sym-per-frame $sym \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--decode-chunk-len 32 \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in modified_beam_search beam_search fast_beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless7_streaming/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--decode-chunk-len 32 \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p pruned_transducer_stateless7_streaming/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt pruned_transducer_stateless7_streaming/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh pruned_transducer_stateless7_streaming/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
num_decode_stream=200
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "decoding with $method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless7_streaming/decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--use-averaged-model 0 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--decode-chunk-len 32 \
|
|
||||||
--exp-dir pruned_transducer_stateless7_streaming/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless7_streaming/streaming_decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--use-averaged-model 0 \
|
|
||||||
--decode-chunk-len 32 \
|
|
||||||
--num-decode-streams $num_decode_stream
|
|
||||||
--exp-dir pruned_transducer_stateless7_streaming/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
rm pruned_transducer_stateless7_streaming/exp/*.pt
|
|
||||||
fi
|
|
||||||
@ -1,115 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless8-2022-11-14
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git lfs install
|
|
||||||
GIT_LFS_SKIP_SMUDGE=1 git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
pushd $repo/exp
|
|
||||||
git lfs pull --include "data/lang_bpe_500/bpe.model"
|
|
||||||
git lfs pull --include "exp/cpu_jit.pt"
|
|
||||||
git lfs pull --include "exp/pretrained.pt"
|
|
||||||
ln -s pretrained.pt epoch-99.pt
|
|
||||||
ls -lh *.pt
|
|
||||||
popd
|
|
||||||
|
|
||||||
log "Decode with models exported by torch.jit.script()"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless8/jit_pretrained.py \
|
|
||||||
--bpe-model $repo/data/lang_bpe_500/bpe.model \
|
|
||||||
--nn-model-filename $repo/exp/cpu_jit.pt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
|
|
||||||
log "Export to torchscript model"
|
|
||||||
./pruned_transducer_stateless8/export.py \
|
|
||||||
--exp-dir $repo/exp \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--use-averaged-model false \
|
|
||||||
--epoch 99 \
|
|
||||||
--avg 1 \
|
|
||||||
--jit 1
|
|
||||||
|
|
||||||
ls -lh $repo/exp/*.pt
|
|
||||||
|
|
||||||
log "Decode with models exported by torch.jit.script()"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless8/jit_pretrained.py \
|
|
||||||
--bpe-model $repo/data/lang_bpe_500/bpe.model \
|
|
||||||
--nn-model-filename $repo/exp/cpu_jit.pt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
|
|
||||||
for sym in 1 2 3; do
|
|
||||||
log "Greedy search with --max-sym-per-frame $sym"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless8/pretrained.py \
|
|
||||||
--method greedy_search \
|
|
||||||
--max-sym-per-frame $sym \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in modified_beam_search beam_search fast_beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless8/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p pruned_transducer_stateless8/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt pruned_transducer_stateless8/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh pruned_transducer_stateless8/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless8/decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--use-averaged-model 0 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir pruned_transducer_stateless8/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
rm pruned_transducer_stateless8/exp/*.pt
|
|
||||||
fi
|
|
||||||
@ -1,101 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/pkufool/icefall_librispeech_streaming_pruned_transducer_stateless2_20220625
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git lfs install
|
|
||||||
git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
pushd $repo/exp
|
|
||||||
ln -s pretrained-epoch-24-avg-10.pt pretrained.pt
|
|
||||||
popd
|
|
||||||
|
|
||||||
for sym in 1 2 3; do
|
|
||||||
log "Greedy search with --max-sym-per-frame $sym"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless2/pretrained.py \
|
|
||||||
--method greedy_search \
|
|
||||||
--max-sym-per-frame $sym \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--simulate-streaming 1 \
|
|
||||||
--causal-convolution 1 \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in modified_beam_search beam_search fast_beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless2/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--simulate-streaming 1 \
|
|
||||||
--causal-convolution 1 \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p pruned_transducer_stateless2/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained-epoch-24-avg-10.pt pruned_transducer_stateless2/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh pruned_transducer_stateless2/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Simulate streaming decoding with $method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless2/decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir pruned_transducer_stateless2/exp \
|
|
||||||
--simulate-streaming 1 \
|
|
||||||
--causal-convolution 1
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Real streaming decoding with $method"
|
|
||||||
|
|
||||||
./pruned_transducer_stateless2/streaming_decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--num-decode-streams 100 \
|
|
||||||
--exp-dir pruned_transducer_stateless2/exp \
|
|
||||||
--left-context 32 \
|
|
||||||
--decode-chunk-size 8 \
|
|
||||||
--right-context 0
|
|
||||||
done
|
|
||||||
|
|
||||||
rm pruned_transducer_stateless2/exp/*.pt
|
|
||||||
fi
|
|
||||||
@ -1,116 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/Zengwei/icefall-asr-librispeech-streaming-zipformer-2023-05-17
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git lfs install
|
|
||||||
GIT_LFS_SKIP_SMUDGE=1 git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
pushd $repo/exp
|
|
||||||
git lfs pull --include "data/lang_bpe_500/bpe.model"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/tokens.txt"
|
|
||||||
git lfs pull --include "exp/jit_script_chunk_16_left_128.pt"
|
|
||||||
git lfs pull --include "exp/pretrained.pt"
|
|
||||||
ln -s pretrained.pt epoch-99.pt
|
|
||||||
ls -lh *.pt
|
|
||||||
popd
|
|
||||||
|
|
||||||
log "Export to torchscript model"
|
|
||||||
./zipformer/export.py \
|
|
||||||
--exp-dir $repo/exp \
|
|
||||||
--use-averaged-model false \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--causal 1 \
|
|
||||||
--chunk-size 16 \
|
|
||||||
--left-context-frames 128 \
|
|
||||||
--epoch 99 \
|
|
||||||
--avg 1 \
|
|
||||||
--jit 1
|
|
||||||
|
|
||||||
ls -lh $repo/exp/*.pt
|
|
||||||
|
|
||||||
log "Decode with models exported by torch.jit.script()"
|
|
||||||
|
|
||||||
./zipformer/jit_pretrained_streaming.py \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--nn-model-filename $repo/exp/jit_script_chunk_16_left_128.pt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav
|
|
||||||
|
|
||||||
for method in greedy_search modified_beam_search fast_beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./zipformer/pretrained.py \
|
|
||||||
--causal 1 \
|
|
||||||
--chunk-size 16 \
|
|
||||||
--left-context-frames 128 \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p zipformer/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt zipformer/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh zipformer/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Simulated streaming decoding with $method"
|
|
||||||
|
|
||||||
./zipformer/decode.py \
|
|
||||||
--causal 1 \
|
|
||||||
--chunk-size 16 \
|
|
||||||
--left-context-frames 128 \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--use-averaged-model 0 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir zipformer/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Chunk-wise streaming decoding with $method"
|
|
||||||
|
|
||||||
./zipformer/streaming_decode.py \
|
|
||||||
--causal 1 \
|
|
||||||
--chunk-size 16 \
|
|
||||||
--left-context-frames 128 \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--use-averaged-model 0 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir zipformer/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
rm zipformer/exp/*.pt
|
|
||||||
fi
|
|
||||||
@ -1,77 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/csukuangfj/icefall-asr-librispeech-transducer-stateless2-torchaudio-2022-04-19
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git lfs install
|
|
||||||
git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
for sym in 1 2 3; do
|
|
||||||
log "Greedy search with --max-sym-per-frame $sym"
|
|
||||||
|
|
||||||
./transducer_stateless2/pretrained.py \
|
|
||||||
--method greedy_search \
|
|
||||||
--max-sym-per-frame $sym \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in fast_beam_search modified_beam_search beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./transducer_stateless2/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p transducer_stateless2/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt transducer_stateless2/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh transducer_stateless2/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
|
|
||||||
./transducer_stateless2/decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir transducer_stateless2/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
rm transducer_stateless2/exp/*.pt
|
|
||||||
fi
|
|
||||||
@ -1,94 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/Zengwei/icefall-asr-librispeech-zipformer-2023-05-15
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git lfs install
|
|
||||||
GIT_LFS_SKIP_SMUDGE=1 git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
pushd $repo/exp
|
|
||||||
git lfs pull --include "data/lang_bpe_500/bpe.model"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/tokens.txt"
|
|
||||||
git lfs pull --include "exp/jit_script.pt"
|
|
||||||
git lfs pull --include "exp/pretrained.pt"
|
|
||||||
ln -s pretrained.pt epoch-99.pt
|
|
||||||
ls -lh *.pt
|
|
||||||
popd
|
|
||||||
|
|
||||||
log "Export to torchscript model"
|
|
||||||
./zipformer/export.py \
|
|
||||||
--exp-dir $repo/exp \
|
|
||||||
--use-averaged-model false \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--epoch 99 \
|
|
||||||
--avg 1 \
|
|
||||||
--jit 1
|
|
||||||
|
|
||||||
ls -lh $repo/exp/*.pt
|
|
||||||
|
|
||||||
log "Decode with models exported by torch.jit.script()"
|
|
||||||
|
|
||||||
./zipformer/jit_pretrained.py \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--nn-model-filename $repo/exp/jit_script.pt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
|
|
||||||
for method in greedy_search modified_beam_search fast_beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./zipformer/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p zipformer/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt zipformer/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh zipformer/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
|
|
||||||
./zipformer/decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--use-averaged-model 0 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir zipformer/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
rm zipformer/exp/*.pt
|
|
||||||
fi
|
|
||||||
@ -1,117 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/Zengwei/icefall-asr-librispeech-zipformer-transducer-ctc-2023-06-13
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git lfs install
|
|
||||||
GIT_LFS_SKIP_SMUDGE=1 git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
pushd $repo/exp
|
|
||||||
git lfs pull --include "data/lang_bpe_500/bpe.model"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/tokens.txt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/HLG.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/L.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/LG.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/Linv.pt"
|
|
||||||
git lfs pull --include "data/lm/G_4_gram.pt"
|
|
||||||
git lfs pull --include "exp/jit_script.pt"
|
|
||||||
git lfs pull --include "exp/pretrained.pt"
|
|
||||||
ln -s pretrained.pt epoch-99.pt
|
|
||||||
ls -lh *.pt
|
|
||||||
popd
|
|
||||||
|
|
||||||
log "Export to torchscript model"
|
|
||||||
./zipformer/export.py \
|
|
||||||
--exp-dir $repo/exp \
|
|
||||||
--use-transducer 1 \
|
|
||||||
--use-ctc 1 \
|
|
||||||
--use-averaged-model false \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--epoch 99 \
|
|
||||||
--avg 1 \
|
|
||||||
--jit 1
|
|
||||||
|
|
||||||
ls -lh $repo/exp/*.pt
|
|
||||||
|
|
||||||
log "Decode with models exported by torch.jit.script()"
|
|
||||||
|
|
||||||
for method in ctc-decoding 1best; do
|
|
||||||
./zipformer/jit_pretrained_ctc.py \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--model-filename $repo/exp/jit_script.pt \
|
|
||||||
--HLG $repo/data/lang_bpe_500/HLG.pt \
|
|
||||||
--words-file $repo/data/lang_bpe_500/words.txt \
|
|
||||||
--G $repo/data/lm/G_4_gram.pt \
|
|
||||||
--method $method \
|
|
||||||
--sample-rate 16000 \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in ctc-decoding 1best; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./zipformer/pretrained_ctc.py \
|
|
||||||
--use-transducer 1 \
|
|
||||||
--use-ctc 1 \
|
|
||||||
--method $method \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--HLG $repo/data/lang_bpe_500/HLG.pt \
|
|
||||||
--G $repo/data/lm/G_4_gram.pt \
|
|
||||||
--words-file $repo/data/lang_bpe_500/words.txt \
|
|
||||||
--sample-rate 16000 \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p zipformer/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt zipformer/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh zipformer/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in ctc-decoding 1best; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
|
|
||||||
./zipformer/ctc_decode.py \
|
|
||||||
--use-transducer 1 \
|
|
||||||
--use-ctc 1 \
|
|
||||||
--decoding-method $method \
|
|
||||||
--nbest-scale 1.0 \
|
|
||||||
--hlg-scale 0.6 \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--use-averaged-model 0 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir zipformer/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
rm zipformer/exp/*.pt
|
|
||||||
fi
|
|
||||||
@ -1,102 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/Zengwei/icefall-asr-librispeech-zipformer-mmi-2022-12-08
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
GIT_LFS_SKIP_SMUDGE=1 git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
pushd $repo/exp
|
|
||||||
git lfs pull --include "data/lang_bpe_500/3gram.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/4gram.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/L.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/LG.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/Linv.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/bpe.model"
|
|
||||||
git lfs pull --include "exp/cpu_jit.pt"
|
|
||||||
git lfs pull --include "exp/pretrained.pt"
|
|
||||||
ln -s pretrained.pt epoch-99.pt
|
|
||||||
ls -lh *.pt
|
|
||||||
popd
|
|
||||||
|
|
||||||
log "Export to torchscript model"
|
|
||||||
./zipformer_mmi/export.py \
|
|
||||||
--exp-dir $repo/exp \
|
|
||||||
--use-averaged-model false \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--epoch 99 \
|
|
||||||
--avg 1 \
|
|
||||||
--jit 1
|
|
||||||
|
|
||||||
ls -lh $repo/exp/*.pt
|
|
||||||
|
|
||||||
log "Decode with models exported by torch.jit.script()"
|
|
||||||
|
|
||||||
./zipformer_mmi/jit_pretrained.py \
|
|
||||||
--bpe-model $repo/data/lang_bpe_500/bpe.model \
|
|
||||||
--nn-model-filename $repo/exp/cpu_jit.pt \
|
|
||||||
--lang-dir $repo/data/lang_bpe_500 \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
|
|
||||||
for method in 1best nbest nbest-rescoring-LG nbest-rescoring-3-gram nbest-rescoring-4-gram; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./zipformer_mmi/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--lang-dir $repo/data/lang_bpe_500 \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p zipformer_mmi/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt zipformer_mmi/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh zipformer_mmi/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in 1best nbest nbest-rescoring-LG nbest-rescoring-3-gram nbest-rescoring-4-gram; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
|
|
||||||
./zipformer_mmi/decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--use-averaged-model 0 \
|
|
||||||
--nbest-scale 1.2 \
|
|
||||||
--hp-scale 1.0 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--lang-dir $repo/data/lang_bpe_500 \
|
|
||||||
--exp-dir zipformer_mmi/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
rm zipformer_mmi/exp/*.pt
|
|
||||||
fi
|
|
||||||
@ -95,3 +95,41 @@ for method in modified_beam_search fast_beam_search; do
|
|||||||
$repo/test_wavs/DEV_T0000000001.wav \
|
$repo/test_wavs/DEV_T0000000001.wav \
|
||||||
$repo/test_wavs/DEV_T0000000002.wav
|
$repo/test_wavs/DEV_T0000000002.wav
|
||||||
done
|
done
|
||||||
|
|
||||||
|
rm -rf $repo
|
||||||
|
|
||||||
|
cd ../../../egs/multi_zh_en/ASR
|
||||||
|
log "==== Test icefall-asr-zipformer-multi-zh-en-2023-11-22 ===="
|
||||||
|
repo_url=https://huggingface.co/zrjin/icefall-asr-zipformer-multi-zh-en-2023-11-22/
|
||||||
|
|
||||||
|
log "Downloading pre-trained model from $repo_url"
|
||||||
|
git lfs install
|
||||||
|
git clone $repo_url
|
||||||
|
repo=$(basename $repo_url)
|
||||||
|
|
||||||
|
log "Display test files"
|
||||||
|
tree $repo/
|
||||||
|
ls -lh $repo/test_wavs/*.wav
|
||||||
|
|
||||||
|
./zipformer/pretrained.py \
|
||||||
|
--checkpoint $repo/exp/pretrained.pt \
|
||||||
|
--bpe-model $repo/data/lang_bbpe_2000/bbpe.model \
|
||||||
|
--method greedy_search \
|
||||||
|
$repo/test_wavs/_1634_210_2577_1_1525157964032_3712259_29.wav \
|
||||||
|
$repo/test_wavs/_1634_210_2577_1_1525157964032_3712259_55.wav \
|
||||||
|
$repo/test_wavs/_1634_210_2577_1_1525157964032_3712259_75.wav
|
||||||
|
|
||||||
|
for method in modified_beam_search fast_beam_search; do
|
||||||
|
log "$method"
|
||||||
|
|
||||||
|
./zipformer/pretrained.py \
|
||||||
|
--method $method \
|
||||||
|
--beam-size 4 \
|
||||||
|
--checkpoint $repo/exp/pretrained.pt \
|
||||||
|
--bpe-model $repo/data/lang_bbpe_2000/bbpe.model \
|
||||||
|
$repo/test_wavs/_1634_210_2577_1_1525157964032_3712259_29.wav \
|
||||||
|
$repo/test_wavs/_1634_210_2577_1_1525157964032_3712259_55.wav \
|
||||||
|
$repo/test_wavs/_1634_210_2577_1_1525157964032_3712259_75.wav
|
||||||
|
done
|
||||||
|
|
||||||
|
rm -rf $repo
|
||||||
240
.github/scripts/run-pre-trained-ctc.sh
vendored
240
.github/scripts/run-pre-trained-ctc.sh
vendored
@ -1,240 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
pushd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/csukuangfj/sherpa-onnx-zipformer-ctc-en-2023-10-02
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git lfs install
|
|
||||||
git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
log "CTC greedy search"
|
|
||||||
|
|
||||||
./zipformer/onnx_pretrained_ctc.py \
|
|
||||||
--nn-model $repo/model.onnx \
|
|
||||||
--tokens $repo/tokens.txt \
|
|
||||||
$repo/test_wavs/0.wav \
|
|
||||||
$repo/test_wavs/1.wav \
|
|
||||||
$repo/test_wavs/2.wav
|
|
||||||
|
|
||||||
log "CTC H decoding"
|
|
||||||
|
|
||||||
./zipformer/onnx_pretrained_ctc_H.py \
|
|
||||||
--nn-model $repo/model.onnx \
|
|
||||||
--tokens $repo/tokens.txt \
|
|
||||||
--H $repo/H.fst \
|
|
||||||
$repo/test_wavs/0.wav \
|
|
||||||
$repo/test_wavs/1.wav \
|
|
||||||
$repo/test_wavs/2.wav
|
|
||||||
|
|
||||||
log "CTC HL decoding"
|
|
||||||
|
|
||||||
./zipformer/onnx_pretrained_ctc_HL.py \
|
|
||||||
--nn-model $repo/model.onnx \
|
|
||||||
--words $repo/words.txt \
|
|
||||||
--HL $repo/HL.fst \
|
|
||||||
$repo/test_wavs/0.wav \
|
|
||||||
$repo/test_wavs/1.wav \
|
|
||||||
$repo/test_wavs/2.wav
|
|
||||||
|
|
||||||
log "CTC HLG decoding"
|
|
||||||
|
|
||||||
./zipformer/onnx_pretrained_ctc_HLG.py \
|
|
||||||
--nn-model $repo/model.onnx \
|
|
||||||
--words $repo/words.txt \
|
|
||||||
--HLG $repo/HLG.fst \
|
|
||||||
$repo/test_wavs/0.wav \
|
|
||||||
$repo/test_wavs/1.wav \
|
|
||||||
$repo/test_wavs/2.wav
|
|
||||||
|
|
||||||
rm -rf $repo
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/csukuangfj/icefall-asr-librispeech-conformer-ctc-jit-bpe-500-2021-11-09
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
GIT_LFS_SKIP_SMUDGE=1 git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
pushd $repo
|
|
||||||
|
|
||||||
git lfs pull --include "exp/pretrained.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/HLG.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/L.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/L_disambig.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/Linv.pt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/bpe.model"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/lexicon.txt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/lexicon_disambig.txt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/tokens.txt"
|
|
||||||
git lfs pull --include "data/lang_bpe_500/words.txt"
|
|
||||||
git lfs pull --include "data/lm/G_3_gram.fst.txt"
|
|
||||||
|
|
||||||
popd
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
log "CTC decoding"
|
|
||||||
|
|
||||||
./conformer_ctc/pretrained.py \
|
|
||||||
--method ctc-decoding \
|
|
||||||
--num-classes 500 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
|
|
||||||
log "HLG decoding"
|
|
||||||
|
|
||||||
./conformer_ctc/pretrained.py \
|
|
||||||
--method 1best \
|
|
||||||
--num-classes 500 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--words-file $repo/data/lang_bpe_500/words.txt \
|
|
||||||
--HLG $repo/data/lang_bpe_500/HLG.pt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
|
|
||||||
log "CTC decoding on CPU with kaldi decoders using OpenFst"
|
|
||||||
|
|
||||||
log "Exporting model with torchscript"
|
|
||||||
|
|
||||||
pushd $repo/exp
|
|
||||||
ln -s pretrained.pt epoch-99.pt
|
|
||||||
popd
|
|
||||||
|
|
||||||
./conformer_ctc/export.py \
|
|
||||||
--epoch 99 \
|
|
||||||
--avg 1 \
|
|
||||||
--exp-dir $repo/exp \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
--jit 1
|
|
||||||
|
|
||||||
ls -lh $repo/exp
|
|
||||||
|
|
||||||
|
|
||||||
log "Generating H.fst, HL.fst"
|
|
||||||
|
|
||||||
./local/prepare_lang_fst.py --lang-dir $repo/data/lang_bpe_500 --ngram-G $repo/data/lm/G_3_gram.fst.txt
|
|
||||||
|
|
||||||
ls -lh $repo/data/lang_bpe_500
|
|
||||||
|
|
||||||
log "Decoding with H on CPU with OpenFst"
|
|
||||||
|
|
||||||
./conformer_ctc/jit_pretrained_decode_with_H.py \
|
|
||||||
--nn-model $repo/exp/cpu_jit.pt \
|
|
||||||
--H $repo/data/lang_bpe_500/H.fst \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
|
|
||||||
log "Decoding with HL on CPU with OpenFst"
|
|
||||||
|
|
||||||
./conformer_ctc/jit_pretrained_decode_with_HL.py \
|
|
||||||
--nn-model $repo/exp/cpu_jit.pt \
|
|
||||||
--HL $repo/data/lang_bpe_500/HL.fst \
|
|
||||||
--words $repo/data/lang_bpe_500/words.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
|
|
||||||
log "Decoding with HLG on CPU with OpenFst"
|
|
||||||
|
|
||||||
./conformer_ctc/jit_pretrained_decode_with_HLG.py \
|
|
||||||
--nn-model $repo/exp/cpu_jit.pt \
|
|
||||||
--HLG $repo/data/lang_bpe_500/HLG.fst \
|
|
||||||
--words $repo/data/lang_bpe_500/words.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
|
|
||||||
rm -rf $repo
|
|
||||||
|
|
||||||
popd
|
|
||||||
|
|
||||||
log "Test aishell"
|
|
||||||
|
|
||||||
pushd egs/aishell/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/csukuangfj/icefall_asr_aishell_conformer_ctc
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
GIT_LFS_SKIP_SMUDGE=1 git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
pushd $repo
|
|
||||||
|
|
||||||
git lfs pull --include "exp/pretrained.pt"
|
|
||||||
git lfs pull --include "data/lang_char/H.fst"
|
|
||||||
git lfs pull --include "data/lang_char/HL.fst"
|
|
||||||
git lfs pull --include "data/lang_char/HLG.fst"
|
|
||||||
|
|
||||||
popd
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
log "CTC decoding"
|
|
||||||
|
|
||||||
log "Exporting model with torchscript"
|
|
||||||
|
|
||||||
pushd $repo/exp
|
|
||||||
ln -s pretrained.pt epoch-99.pt
|
|
||||||
popd
|
|
||||||
|
|
||||||
./conformer_ctc/export.py \
|
|
||||||
--epoch 99 \
|
|
||||||
--avg 1 \
|
|
||||||
--exp-dir $repo/exp \
|
|
||||||
--tokens $repo/data/lang_char/tokens.txt \
|
|
||||||
--jit 1
|
|
||||||
|
|
||||||
ls -lh $repo/exp
|
|
||||||
|
|
||||||
ls -lh $repo/data/lang_char
|
|
||||||
|
|
||||||
log "Decoding with H on CPU with OpenFst"
|
|
||||||
|
|
||||||
./conformer_ctc/jit_pretrained_decode_with_H.py \
|
|
||||||
--nn-model $repo/exp/cpu_jit.pt \
|
|
||||||
--H $repo/data/lang_char/H.fst \
|
|
||||||
--tokens $repo/data/lang_char/tokens.txt \
|
|
||||||
$repo/test_wavs/0.wav \
|
|
||||||
$repo/test_wavs/1.wav \
|
|
||||||
$repo/test_wavs/2.wav
|
|
||||||
|
|
||||||
log "Decoding with HL on CPU with OpenFst"
|
|
||||||
|
|
||||||
./conformer_ctc/jit_pretrained_decode_with_HL.py \
|
|
||||||
--nn-model $repo/exp/cpu_jit.pt \
|
|
||||||
--HL $repo/data/lang_char/HL.fst \
|
|
||||||
--words $repo/data/lang_char/words.txt \
|
|
||||||
$repo/test_wavs/0.wav \
|
|
||||||
$repo/test_wavs/1.wav \
|
|
||||||
$repo/test_wavs/2.wav
|
|
||||||
|
|
||||||
log "Decoding with HLG on CPU with OpenFst"
|
|
||||||
|
|
||||||
./conformer_ctc/jit_pretrained_decode_with_HLG.py \
|
|
||||||
--nn-model $repo/exp/cpu_jit.pt \
|
|
||||||
--HLG $repo/data/lang_char/HLG.fst \
|
|
||||||
--words $repo/data/lang_char/words.txt \
|
|
||||||
$repo/test_wavs/0.wav \
|
|
||||||
$repo/test_wavs/1.wav \
|
|
||||||
$repo/test_wavs/2.wav
|
|
||||||
|
|
||||||
rm -rf $repo
|
|
||||||
@ -1,77 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/csukuangfj/icefall-asr-librispeech-100h-transducer-stateless-multi-datasets-bpe-500-2022-02-21
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git lfs install
|
|
||||||
git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
for sym in 1 2 3; do
|
|
||||||
log "Greedy search with --max-sym-per-frame $sym"
|
|
||||||
|
|
||||||
./transducer_stateless_multi_datasets/pretrained.py \
|
|
||||||
--method greedy_search \
|
|
||||||
--max-sym-per-frame $sym \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in modified_beam_search beam_search fast_beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./transducer_stateless_multi_datasets/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p transducer_stateless_multi_datasets/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt transducer_stateless_multi_datasets/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh transducer_stateless_multi_datasets/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
|
|
||||||
./transducer_stateless_multi_datasets/decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir transducer_stateless_multi_datasets/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
rm transducer_stateless_multi_datasets/exp/*.pt
|
|
||||||
fi
|
|
||||||
@ -1,77 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/csukuangfj/icefall-asr-librispeech-transducer-stateless-multi-datasets-bpe-500-2022-03-01
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git lfs install
|
|
||||||
git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
for sym in 1 2 3; do
|
|
||||||
log "Greedy search with --max-sym-per-frame $sym"
|
|
||||||
|
|
||||||
./transducer_stateless_multi_datasets/pretrained.py \
|
|
||||||
--method greedy_search \
|
|
||||||
--max-sym-per-frame $sym \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in modified_beam_search beam_search fast_beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./transducer_stateless_multi_datasets/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p transducer_stateless_multi_datasets/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt transducer_stateless_multi_datasets/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh transducer_stateless_multi_datasets/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
|
|
||||||
./transducer_stateless_multi_datasets/decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir transducer_stateless_multi_datasets/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
rm transducer_stateless_multi_datasets/exp/*.pt
|
|
||||||
fi
|
|
||||||
@ -1,48 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/aishell/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/csukuangfj/icefall-aishell-transducer-stateless-modified-2-2022-03-01
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git lfs install
|
|
||||||
git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
for sym in 1 2 3; do
|
|
||||||
log "Greedy search with --max-sym-per-frame $sym"
|
|
||||||
|
|
||||||
./transducer_stateless_modified-2/pretrained.py \
|
|
||||||
--method greedy_search \
|
|
||||||
--max-sym-per-frame $sym \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--lang-dir $repo/data/lang_char \
|
|
||||||
$repo/test_wavs/BAC009S0764W0121.wav \
|
|
||||||
$repo/test_wavs/BAC009S0764W0122.wav \
|
|
||||||
$repo/test_wavs/BAC009S0764W0123.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in modified_beam_search beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./transducer_stateless_modified-2/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--lang-dir $repo/data/lang_char \
|
|
||||||
$repo/test_wavs/BAC009S0764W0121.wav \
|
|
||||||
$repo/test_wavs/BAC009S0764W0122.wav \
|
|
||||||
$repo/test_wavs/BAC009S0764W0123.wav
|
|
||||||
done
|
|
||||||
@ -1,48 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/aishell/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/csukuangfj/icefall-aishell-transducer-stateless-modified-2022-03-01
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git lfs install
|
|
||||||
git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
for sym in 1 2 3; do
|
|
||||||
log "Greedy search with --max-sym-per-frame $sym"
|
|
||||||
|
|
||||||
./transducer_stateless_modified/pretrained.py \
|
|
||||||
--method greedy_search \
|
|
||||||
--max-sym-per-frame $sym \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--lang-dir $repo/data/lang_char \
|
|
||||||
$repo/test_wavs/BAC009S0764W0121.wav \
|
|
||||||
$repo/test_wavs/BAC009S0764W0122.wav \
|
|
||||||
$repo/test_wavs/BAC009S0764W0123.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in modified_beam_search beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./transducer_stateless_modified/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--lang-dir $repo/data/lang_char \
|
|
||||||
$repo/test_wavs/BAC009S0764W0121.wav \
|
|
||||||
$repo/test_wavs/BAC009S0764W0122.wav \
|
|
||||||
$repo/test_wavs/BAC009S0764W0123.wav
|
|
||||||
done
|
|
||||||
@ -1,77 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/csukuangfj/icefall-asr-librispeech-transducer-stateless-bpe-500-2022-02-07
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git lfs install
|
|
||||||
git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
for sym in 1 2 3; do
|
|
||||||
log "Greedy search with --max-sym-per-frame $sym"
|
|
||||||
|
|
||||||
./transducer_stateless/pretrained.py \
|
|
||||||
--method greedy_search \
|
|
||||||
--max-sym-per-frame $sym \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
for method in fast_beam_search modified_beam_search beam_search; do
|
|
||||||
log "$method"
|
|
||||||
|
|
||||||
./transducer_stateless/pretrained.py \
|
|
||||||
--method $method \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "GITHUB_EVENT_NAME: ${GITHUB_EVENT_NAME}"
|
|
||||||
echo "GITHUB_EVENT_LABEL_NAME: ${GITHUB_EVENT_LABEL_NAME}"
|
|
||||||
if [[ x"${GITHUB_EVENT_NAME}" == x"schedule" || x"${GITHUB_EVENT_LABEL_NAME}" == x"run-decode" ]]; then
|
|
||||||
mkdir -p transducer_stateless/exp
|
|
||||||
ln -s $PWD/$repo/exp/pretrained.pt transducer_stateless/exp/epoch-999.pt
|
|
||||||
ln -s $PWD/$repo/data/lang_bpe_500 data/
|
|
||||||
|
|
||||||
ls -lh data
|
|
||||||
ls -lh transducer_stateless/exp
|
|
||||||
|
|
||||||
log "Decoding test-clean and test-other"
|
|
||||||
|
|
||||||
# use a small value for decoding with CPU
|
|
||||||
max_duration=100
|
|
||||||
|
|
||||||
for method in greedy_search fast_beam_search modified_beam_search; do
|
|
||||||
log "Decoding with $method"
|
|
||||||
|
|
||||||
./transducer_stateless/decode.py \
|
|
||||||
--decoding-method $method \
|
|
||||||
--epoch 999 \
|
|
||||||
--avg 1 \
|
|
||||||
--max-duration $max_duration \
|
|
||||||
--exp-dir transducer_stateless/exp
|
|
||||||
done
|
|
||||||
|
|
||||||
rm transducer_stateless/exp/*.pt
|
|
||||||
fi
|
|
||||||
33
.github/scripts/run-pre-trained-transducer.sh
vendored
33
.github/scripts/run-pre-trained-transducer.sh
vendored
@ -1,33 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
log() {
|
|
||||||
# This function is from espnet
|
|
||||||
local fname=${BASH_SOURCE[1]##*/}
|
|
||||||
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
|
||||||
}
|
|
||||||
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
|
|
||||||
repo_url=https://huggingface.co/csukuangfj/icefall-asr-librispeech-transducer-bpe-500-2021-12-23
|
|
||||||
|
|
||||||
log "Downloading pre-trained model from $repo_url"
|
|
||||||
git lfs install
|
|
||||||
git clone $repo_url
|
|
||||||
repo=$(basename $repo_url)
|
|
||||||
|
|
||||||
log "Display test files"
|
|
||||||
tree $repo/
|
|
||||||
ls -lh $repo/test_wavs/*.wav
|
|
||||||
|
|
||||||
log "Beam search decoding"
|
|
||||||
|
|
||||||
./transducer/pretrained.py \
|
|
||||||
--method beam_search \
|
|
||||||
--beam-size 4 \
|
|
||||||
--checkpoint $repo/exp/pretrained.pt \
|
|
||||||
--tokens $repo/data/lang_bpe_500/tokens.txt \
|
|
||||||
$repo/test_wavs/1089-134686-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0001.wav \
|
|
||||||
$repo/test_wavs/1221-135766-0002.wav
|
|
||||||
@ -30,7 +30,7 @@ log "Test exporting to ONNX format"
|
|||||||
|
|
||||||
./pruned_transducer_stateless2/export-onnx.py \
|
./pruned_transducer_stateless2/export-onnx.py \
|
||||||
--exp-dir $repo/exp \
|
--exp-dir $repo/exp \
|
||||||
--lang-dir $repo/data/lang_char \
|
--tokens $repo/data/lang_char/tokens.txt \
|
||||||
--epoch 99 \
|
--epoch 99 \
|
||||||
--avg 1
|
--avg 1
|
||||||
|
|
||||||
@ -38,14 +38,14 @@ log "Export to torchscript model"
|
|||||||
|
|
||||||
./pruned_transducer_stateless2/export.py \
|
./pruned_transducer_stateless2/export.py \
|
||||||
--exp-dir $repo/exp \
|
--exp-dir $repo/exp \
|
||||||
--lang-dir $repo/data/lang_char \
|
--tokens $repo/data/lang_char/tokens.txt \
|
||||||
--epoch 99 \
|
--epoch 99 \
|
||||||
--avg 1 \
|
--avg 1 \
|
||||||
--jit 1
|
--jit 1
|
||||||
|
|
||||||
./pruned_transducer_stateless2/export.py \
|
./pruned_transducer_stateless2/export.py \
|
||||||
--exp-dir $repo/exp \
|
--exp-dir $repo/exp \
|
||||||
--lang-dir $repo/data/lang_char \
|
--tokens $repo/data/lang_char/tokens.txt \
|
||||||
--epoch 99 \
|
--epoch 99 \
|
||||||
--avg 1 \
|
--avg 1 \
|
||||||
--jit-trace 1
|
--jit-trace 1
|
||||||
|
|||||||
86
.github/scripts/yesno/ASR/run.sh
vendored
Executable file
86
.github/scripts/yesno/ASR/run.sh
vendored
Executable file
@ -0,0 +1,86 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
log() {
|
||||||
|
# This function is from espnet
|
||||||
|
local fname=${BASH_SOURCE[1]##*/}
|
||||||
|
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
|
||||||
|
}
|
||||||
|
|
||||||
|
cd egs/yesno/ASR
|
||||||
|
|
||||||
|
log "data preparation"
|
||||||
|
./prepare.sh
|
||||||
|
|
||||||
|
log "training"
|
||||||
|
python3 ./tdnn/train.py
|
||||||
|
|
||||||
|
log "decoding"
|
||||||
|
python3 ./tdnn/decode.py
|
||||||
|
|
||||||
|
log "export to pretrained.pt"
|
||||||
|
|
||||||
|
python3 ./tdnn/export.py --epoch 14 --avg 2
|
||||||
|
|
||||||
|
python3 ./tdnn/pretrained.py \
|
||||||
|
--checkpoint ./tdnn/exp/pretrained.pt \
|
||||||
|
--HLG ./data/lang_phone/HLG.pt \
|
||||||
|
--words-file ./data/lang_phone/words.txt \
|
||||||
|
download/waves_yesno/0_0_0_1_0_0_0_1.wav \
|
||||||
|
download/waves_yesno/0_0_1_0_0_0_1_0.wav
|
||||||
|
|
||||||
|
log "Test exporting to torchscript"
|
||||||
|
python3 ./tdnn/export.py --epoch 14 --avg 2 --jit 1
|
||||||
|
|
||||||
|
python3 ./tdnn/jit_pretrained.py \
|
||||||
|
--nn-model ./tdnn/exp/cpu_jit.pt \
|
||||||
|
--HLG ./data/lang_phone/HLG.pt \
|
||||||
|
--words-file ./data/lang_phone/words.txt \
|
||||||
|
download/waves_yesno/0_0_0_1_0_0_0_1.wav \
|
||||||
|
download/waves_yesno/0_0_1_0_0_0_1_0.wav
|
||||||
|
|
||||||
|
log "Test exporting to onnx"
|
||||||
|
python3 ./tdnn/export_onnx.py --epoch 14 --avg 2
|
||||||
|
|
||||||
|
log "Test float32 model"
|
||||||
|
python3 ./tdnn/onnx_pretrained.py \
|
||||||
|
--nn-model ./tdnn/exp/model-epoch-14-avg-2.onnx \
|
||||||
|
--HLG ./data/lang_phone/HLG.pt \
|
||||||
|
--words-file ./data/lang_phone/words.txt \
|
||||||
|
download/waves_yesno/0_0_0_1_0_0_0_1.wav \
|
||||||
|
download/waves_yesno/0_0_1_0_0_0_1_0.wav
|
||||||
|
|
||||||
|
log "Test int8 model"
|
||||||
|
python3 ./tdnn/onnx_pretrained.py \
|
||||||
|
--nn-model ./tdnn/exp/model-epoch-14-avg-2.int8.onnx \
|
||||||
|
--HLG ./data/lang_phone/HLG.pt \
|
||||||
|
--words-file ./data/lang_phone/words.txt \
|
||||||
|
download/waves_yesno/0_0_0_1_0_0_0_1.wav \
|
||||||
|
download/waves_yesno/0_0_1_0_0_0_1_0.wav
|
||||||
|
|
||||||
|
log "Test decoding with H"
|
||||||
|
python3 ./tdnn/export.py --epoch 14 --avg 2 --jit 1
|
||||||
|
|
||||||
|
python3 ./tdnn/jit_pretrained_decode_with_H.py \
|
||||||
|
--nn-model ./tdnn/exp/cpu_jit.pt \
|
||||||
|
--H ./data/lang_phone/H.fst \
|
||||||
|
--tokens ./data/lang_phone/tokens.txt \
|
||||||
|
./download/waves_yesno/0_0_0_1_0_0_0_1.wav \
|
||||||
|
./download/waves_yesno/0_0_1_0_0_0_1_0.wav \
|
||||||
|
./download/waves_yesno/0_0_1_0_0_1_1_1.wav
|
||||||
|
|
||||||
|
log "Test decoding with HL"
|
||||||
|
python3 ./tdnn/export.py --epoch 14 --avg 2 --jit 1
|
||||||
|
|
||||||
|
python3 ./tdnn/jit_pretrained_decode_with_HL.py \
|
||||||
|
--nn-model ./tdnn/exp/cpu_jit.pt \
|
||||||
|
--HL ./data/lang_phone/HL.fst \
|
||||||
|
--words ./data/lang_phone/words.txt \
|
||||||
|
./download/waves_yesno/0_0_0_1_0_0_0_1.wav \
|
||||||
|
./download/waves_yesno/0_0_1_0_0_0_1_0.wav \
|
||||||
|
./download/waves_yesno/0_0_1_0_0_1_1_1.wav
|
||||||
|
|
||||||
|
log "Show generated files"
|
||||||
|
ls -lh tdnn/exp
|
||||||
|
ls -lh data/lang_phone
|
||||||
72
.github/workflows/aishell.yml
vendored
Normal file
72
.github/workflows/aishell.yml
vendored
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
name: aishell
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: aishell-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
generate_build_matrix:
|
||||||
|
if: (github.repository_owner == 'csukuangfj' || github.repository_owner == 'k2-fsa') && (github.event.label.name == 'ready' || github.event_name == 'push' || github.event_name == 'aishell')
|
||||||
|
|
||||||
|
# see https://github.com/pytorch/pytorch/pull/50633
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Generating build matrix
|
||||||
|
id: set-matrix
|
||||||
|
run: |
|
||||||
|
# outputting for debugging purposes
|
||||||
|
python ./.github/scripts/docker/generate_build_matrix.py
|
||||||
|
MATRIX=$(python ./.github/scripts/docker/generate_build_matrix.py)
|
||||||
|
echo "::set-output name=matrix::${MATRIX}"
|
||||||
|
aishell:
|
||||||
|
needs: generate_build_matrix
|
||||||
|
name: py${{ matrix.python-version }} torch${{ matrix.torch-version }} v${{ matrix.version }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
${{ fromJson(needs.generate_build_matrix.outputs.matrix) }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Free space
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
df -h
|
||||||
|
rm -rf /opt/hostedtoolcache
|
||||||
|
df -h
|
||||||
|
echo "pwd: $PWD"
|
||||||
|
echo "github.workspace ${{ github.workspace }}"
|
||||||
|
|
||||||
|
- name: Run aishell tests
|
||||||
|
uses: addnab/docker-run-action@v3
|
||||||
|
with:
|
||||||
|
image: ghcr.io/${{ github.repository_owner }}/icefall:cpu-py${{ matrix.python-version }}-torch${{ matrix.torch-version }}-v${{ matrix.version }}
|
||||||
|
options: |
|
||||||
|
--volume ${{ github.workspace }}/:/icefall
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
export PYTHONPATH=/icefall:$PYTHONPATH
|
||||||
|
cd /icefall
|
||||||
|
git config --global --add safe.directory /icefall
|
||||||
|
|
||||||
|
.github/scripts/aishell/ASR/run.sh
|
||||||
81
.github/workflows/build-cpu-docker.yml
vendored
Normal file
81
.github/workflows/build-cpu-docker.yml
vendored
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
name: build-cpu-docker
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: build-cpu-docker-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
generate_build_matrix:
|
||||||
|
if: github.repository_owner == 'csukuangfj' || github.repository_owner == 'k2-fsa'
|
||||||
|
# see https://github.com/pytorch/pytorch/pull/50633
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Generating build matrix
|
||||||
|
id: set-matrix
|
||||||
|
run: |
|
||||||
|
# outputting for debugging purposes
|
||||||
|
python ./.github/scripts/docker/generate_build_matrix.py
|
||||||
|
MATRIX=$(python ./.github/scripts/docker/generate_build_matrix.py)
|
||||||
|
echo "::set-output name=matrix::${MATRIX}"
|
||||||
|
build-cpu-docker:
|
||||||
|
needs: generate_build_matrix
|
||||||
|
name: py${{ matrix.python-version }} torch${{ matrix.torch-version }} v${{ matrix.version }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
${{ fromJson(needs.generate_build_matrix.outputs.matrix) }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
# refer to https://github.com/actions/checkout
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Free space
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
df -h
|
||||||
|
rm -rf /opt/hostedtoolcache
|
||||||
|
df -h
|
||||||
|
|
||||||
|
- name: 'Login to GitHub Container Registry'
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build docker Image
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cd .github/scripts/docker
|
||||||
|
torch_version=${{ matrix.torch-version }}
|
||||||
|
torchaudio_version=${{ matrix.torchaudio-version }}
|
||||||
|
|
||||||
|
echo "torch_version: $torch_version"
|
||||||
|
echo "torchaudio_version: $torchaudio_version"
|
||||||
|
|
||||||
|
version=${{ matrix.version }}
|
||||||
|
|
||||||
|
tag=ghcr.io/${{ github.repository_owner }}/icefall:cpu-py${{ matrix.python-version }}-torch${{ matrix.torch-version }}-v$version
|
||||||
|
echo "tag: $tag"
|
||||||
|
|
||||||
|
docker build \
|
||||||
|
-t $tag \
|
||||||
|
--build-arg PYTHON_VERSION=${{ matrix.python-version }} \
|
||||||
|
--build-arg TORCH_VERSION=$torch_version \
|
||||||
|
--build-arg TORCHAUDIO_VERSION=$torchaudio_version \
|
||||||
|
--build-arg K2_VERSION=${{ matrix.k2-version }} \
|
||||||
|
--build-arg KALDIFEAT_VERSION=${{ matrix.kaldifeat-version }} \
|
||||||
|
.
|
||||||
|
|
||||||
|
docker image ls
|
||||||
|
docker push $tag
|
||||||
71
.github/workflows/librispeech.yml
vendored
Normal file
71
.github/workflows/librispeech.yml
vendored
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
name: librispeech
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: librispeech-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
generate_build_matrix:
|
||||||
|
if: github.repository_owner == 'csukuangfj' || github.repository_owner == 'k2-fsa'
|
||||||
|
# see https://github.com/pytorch/pytorch/pull/50633
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Generating build matrix
|
||||||
|
id: set-matrix
|
||||||
|
run: |
|
||||||
|
# outputting for debugging purposes
|
||||||
|
python ./.github/scripts/docker/generate_build_matrix.py
|
||||||
|
MATRIX=$(python ./.github/scripts/docker/generate_build_matrix.py)
|
||||||
|
echo "::set-output name=matrix::${MATRIX}"
|
||||||
|
librispeech:
|
||||||
|
needs: generate_build_matrix
|
||||||
|
name: py${{ matrix.python-version }} torch${{ matrix.torch-version }} v${{ matrix.version }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
${{ fromJson(needs.generate_build_matrix.outputs.matrix) }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
# refer to https://github.com/actions/checkout
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Free space
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
df -h
|
||||||
|
rm -rf /opt/hostedtoolcache
|
||||||
|
df -h
|
||||||
|
echo "pwd: $PWD"
|
||||||
|
echo "github.workspace ${{ github.workspace }}"
|
||||||
|
|
||||||
|
- name: Test zipformer/train.py with LibriSpeech
|
||||||
|
uses: addnab/docker-run-action@v3
|
||||||
|
with:
|
||||||
|
image: ghcr.io/${{ github.repository_owner }}/icefall:cpu-py${{ matrix.python-version }}-torch${{ matrix.torch-version }}-v${{ matrix.version }}
|
||||||
|
options: |
|
||||||
|
--volume ${{ github.workspace }}/:/icefall
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
export PYTHONPATH=/icefall:$PYTHONPATH
|
||||||
|
cd /icefall
|
||||||
|
git config --global --add safe.directory /icefall
|
||||||
|
|
||||||
|
.github/scripts/librispeech/ASR/run.sh
|
||||||
@ -1,45 +1,30 @@
|
|||||||
# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com)
|
name: run-multi-zh-hans
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-pre-trained-transducer
|
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
workflow_dispatch:
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: run_pre_trained_transducer-${{ github.ref }}
|
group: run-multi-zh-hans-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
run_pre_trained_transducer:
|
multi-zh-hans:
|
||||||
if: github.event.label.name == 'ready' || github.event_name == 'push'
|
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest]
|
os: [ubuntu-latest]
|
||||||
python-version: [3.8]
|
python-version: [3.8]
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@ -68,13 +53,27 @@ jobs:
|
|||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
if: steps.my-cache.outputs.cache-hit != 'true'
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
make -j2 _kaldifeat
|
.github/scripts/install-kaldifeat.sh
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
- name: export-model
|
||||||
shell: bash
|
shell: bash
|
||||||
|
env:
|
||||||
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get -qq install git-lfs tree
|
sudo apt-get -qq install git-lfs tree
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
export PYTHONPATH=$PWD:$PYTHONPATH
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
||||||
.github/scripts/run-pre-trained-transducer.sh
|
|
||||||
|
.github/scripts/multi-zh-hans.sh
|
||||||
|
ls -lh
|
||||||
|
|
||||||
|
- name: upload model to https://github.com/k2-fsa/sherpa-onnx
|
||||||
|
uses: svenstaro/upload-release-action@v2
|
||||||
|
with:
|
||||||
|
file_glob: true
|
||||||
|
file: ./*.tar.bz2
|
||||||
|
overwrite: true
|
||||||
|
repo_name: k2-fsa/sherpa-onnx
|
||||||
|
repo_token: ${{ secrets.UPLOAD_GH_SHERPA_ONNX_TOKEN }}
|
||||||
|
tag: asr-models
|
||||||
123
.github/workflows/run-aishell-2022-06-20.yml
vendored
123
.github/workflows/run-aishell-2022-06-20.yml
vendored
@ -1,123 +0,0 @@
|
|||||||
# Copyright 2022 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-aishell-2022-06-20
|
|
||||||
# pruned RNN-T + reworked model with random combiner
|
|
||||||
# https://huggingface.co/csukuangfj/icefall-aishell-pruned-transducer-stateless3-2022-06-20
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_aishell_2022_06_20-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_aishell_2022_06_20:
|
|
||||||
if: github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-aishell-pruned-transducer-stateless3-2022-06-20.sh
|
|
||||||
|
|
||||||
- name: Display decoding results for aishell pruned_transducer_stateless3
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/aishell/ASR/
|
|
||||||
tree ./pruned_transducer_stateless3/exp
|
|
||||||
|
|
||||||
cd pruned_transducer_stateless3
|
|
||||||
echo "results for pruned_transducer_stateless3"
|
|
||||||
echo "===greedy search==="
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test" {} + | sort -n -k2
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for dev" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test" {} + | sort -n -k2
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for dev" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified beam search==="
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test" {} + | sort -n -k2
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for dev" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Upload decoding results for aishell pruned_transducer_stateless3
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: aishell-torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless3-2022-06-20
|
|
||||||
path: egs/aishell/ASR/pruned_transducer_stateless3/exp/
|
|
||||||
@ -1,95 +0,0 @@
|
|||||||
# Copyright 2023 Zengrui Jin (Xiaomi Corp.)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-aishell-zipformer-2023-10-24
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_aishell_zipformer_2023_10_24-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_aishell_zipformer_2023_10_24:
|
|
||||||
if: github.event.label.name == 'ready' || github.event.label.name == 'zipformer' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-aishell-zipformer-2023-10-24.sh
|
|
||||||
|
|
||||||
|
|
||||||
@ -21,6 +21,7 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
|
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [labeled]
|
types: [labeled]
|
||||||
|
|
||||||
@ -33,6 +34,8 @@ on:
|
|||||||
# nightly build at 15:50 UTC time every day
|
# nightly build at 15:50 UTC time every day
|
||||||
- cron: "50 15 * * *"
|
- cron: "50 15 * * *"
|
||||||
|
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: run_gigaspeech_2023_10_17_zipformer-${{ github.ref }}
|
group: run_gigaspeech_2023_10_17_zipformer-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
@ -85,6 +88,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
||||||
|
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
mkdir -p egs/gigaspeech/ASR/data
|
mkdir -p egs/gigaspeech/ASR/data
|
||||||
ln -sfv ~/tmp/fbank-libri egs/gigaspeech/ASR/data/fbank
|
ln -sfv ~/tmp/fbank-libri egs/gigaspeech/ASR/data/fbank
|
||||||
@ -97,6 +101,16 @@ jobs:
|
|||||||
|
|
||||||
.github/scripts/run-gigaspeech-zipformer-2023-10-17.sh
|
.github/scripts/run-gigaspeech-zipformer-2023-10-17.sh
|
||||||
|
|
||||||
|
- name: upload model to https://github.com/k2-fsa/sherpa-onnx
|
||||||
|
uses: svenstaro/upload-release-action@v2
|
||||||
|
with:
|
||||||
|
file_glob: true
|
||||||
|
file: ./*.tar.bz2
|
||||||
|
overwrite: true
|
||||||
|
repo_name: k2-fsa/sherpa-onnx
|
||||||
|
repo_token: ${{ secrets.UPLOAD_GH_SHERPA_ONNX_TOKEN }}
|
||||||
|
tag: asr-models
|
||||||
|
|
||||||
- name: Display decoding results for gigaspeech zipformer
|
- name: Display decoding results for gigaspeech zipformer
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|||||||
159
.github/workflows/run-librispeech-2022-03-12.yml
vendored
159
.github/workflows/run-librispeech-2022-03-12.yml
vendored
@ -1,159 +0,0 @@
|
|||||||
# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-librispeech-2022-03-12
|
|
||||||
# stateless transducer + k2 pruned rnnt-loss
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_librispeech_2022_03_12-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_librispeech_2022_03_12:
|
|
||||||
if: github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-librispeech-pruned-transducer-stateless-2022-03-12.sh
|
|
||||||
|
|
||||||
- name: Display decoding results for pruned_transducer_stateless
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR/
|
|
||||||
tree ./pruned_transducer_stateless/exp
|
|
||||||
|
|
||||||
cd pruned_transducer_stateless
|
|
||||||
echo "results for pruned_transducer_stateless"
|
|
||||||
echo "===greedy search==="
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified beam search==="
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Upload decoding results for pruned_transducer_stateless
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless-2022-03-12
|
|
||||||
path: egs/librispeech/ASR/pruned_transducer_stateless/exp/
|
|
||||||
185
.github/workflows/run-librispeech-2022-04-29.yml
vendored
185
.github/workflows/run-librispeech-2022-04-29.yml
vendored
@ -1,185 +0,0 @@
|
|||||||
# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-librispeech-2022-04-29
|
|
||||||
# stateless pruned transducer (reworked model) + giga speech
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_librispeech_2022_04_29-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_librispeech_2022_04_29:
|
|
||||||
if: github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-librispeech-pruned-transducer-stateless2-2022-04-29.sh
|
|
||||||
|
|
||||||
.github/scripts/run-librispeech-pruned-transducer-stateless3-2022-04-29.sh
|
|
||||||
|
|
||||||
- name: Display decoding results for pruned_transducer_stateless2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
tree pruned_transducer_stateless2/exp
|
|
||||||
cd pruned_transducer_stateless2/exp
|
|
||||||
echo "===greedy search==="
|
|
||||||
find greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified beam search==="
|
|
||||||
find modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Display decoding results for pruned_transducer_stateless3
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
tree pruned_transducer_stateless3/exp
|
|
||||||
cd pruned_transducer_stateless3/exp
|
|
||||||
echo "===greedy search==="
|
|
||||||
find greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified beam search==="
|
|
||||||
find modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Upload decoding results for pruned_transducer_stateless2
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless2-2022-04-29
|
|
||||||
path: egs/librispeech/ASR/pruned_transducer_stateless2/exp/
|
|
||||||
|
|
||||||
- name: Upload decoding results for pruned_transducer_stateless3
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless3-2022-04-29
|
|
||||||
path: egs/librispeech/ASR/pruned_transducer_stateless3/exp/
|
|
||||||
159
.github/workflows/run-librispeech-2022-05-13.yml
vendored
159
.github/workflows/run-librispeech-2022-05-13.yml
vendored
@ -1,159 +0,0 @@
|
|||||||
# Copyright 2022 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-librispeech-2022-05-13
|
|
||||||
# stateless transducer + k2 pruned rnnt-loss + deeper model
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_librispeech_2022_05_13-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_librispeech_2022_05_13:
|
|
||||||
if: github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-librispeech-pruned-transducer-stateless5-2022-05-13.sh
|
|
||||||
|
|
||||||
- name: Display decoding results for librispeech pruned_transducer_stateless5
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR/
|
|
||||||
tree ./pruned_transducer_stateless5/exp
|
|
||||||
|
|
||||||
cd pruned_transducer_stateless5
|
|
||||||
echo "results for pruned_transducer_stateless5"
|
|
||||||
echo "===greedy search==="
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified beam search==="
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Upload decoding results for librispeech pruned_transducer_stateless5
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless5-2022-05-13
|
|
||||||
path: egs/librispeech/ASR/pruned_transducer_stateless5/exp/
|
|
||||||
@ -1,159 +0,0 @@
|
|||||||
# Copyright 2022 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-librispeech-2022-11-11-stateless7
|
|
||||||
# zipformer
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_librispeech_2022_11_11_zipformer-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_librispeech_2022_11_11_zipformer:
|
|
||||||
if: github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-librispeech-pruned-transducer-stateless7-2022-11-11.sh
|
|
||||||
|
|
||||||
- name: Display decoding results for librispeech pruned_transducer_stateless7
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR/
|
|
||||||
tree ./pruned_transducer_stateless7/exp
|
|
||||||
|
|
||||||
cd pruned_transducer_stateless7
|
|
||||||
echo "results for pruned_transducer_stateless7"
|
|
||||||
echo "===greedy search==="
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified beam search==="
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Upload decoding results for librispeech pruned_transducer_stateless7
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless7-2022-11-11
|
|
||||||
path: egs/librispeech/ASR/pruned_transducer_stateless7/exp/
|
|
||||||
@ -1,159 +0,0 @@
|
|||||||
# Copyright 2022 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-librispeech-2022-11-14-stateless8
|
|
||||||
# zipformer
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_librispeech_2022_11_14_zipformer_stateless8-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_librispeech_2022_11_14_zipformer_stateless8:
|
|
||||||
if: github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-librispeech-pruned-transducer-stateless8-2022-11-14.sh
|
|
||||||
|
|
||||||
- name: Display decoding results for librispeech pruned_transducer_stateless8
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR/
|
|
||||||
tree ./pruned_transducer_stateless8/exp
|
|
||||||
|
|
||||||
cd pruned_transducer_stateless8
|
|
||||||
echo "results for pruned_transducer_stateless8"
|
|
||||||
echo "===greedy search==="
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified beam search==="
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Upload decoding results for librispeech pruned_transducer_stateless8
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless8-2022-11-14
|
|
||||||
path: egs/librispeech/ASR/pruned_transducer_stateless8/exp/
|
|
||||||
@ -1,163 +0,0 @@
|
|||||||
# Copyright 2022 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-librispeech-2022-12-01-stateless7-ctc
|
|
||||||
# zipformer
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_librispeech_2022_11_11_zipformer:
|
|
||||||
if: github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-librispeech-pruned-transducer-stateless7-ctc-2022-12-01.sh
|
|
||||||
|
|
||||||
- name: Display decoding results for librispeech pruned_transducer_stateless7_ctc
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR/
|
|
||||||
tree ./pruned_transducer_stateless7_ctc/exp
|
|
||||||
|
|
||||||
cd pruned_transducer_stateless7_ctc
|
|
||||||
echo "results for pruned_transducer_stateless7_ctc"
|
|
||||||
echo "===greedy search==="
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified beam search==="
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===ctc decoding==="
|
|
||||||
find exp/ctc-decoding -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/ctc-decoding -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===1best==="
|
|
||||||
find exp/1best -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/1best -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Upload decoding results for librispeech pruned_transducer_stateless7_ctc
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless7-ctc-2022-12-01
|
|
||||||
path: egs/librispeech/ASR/pruned_transducer_stateless7_ctc/exp/
|
|
||||||
@ -1,167 +0,0 @@
|
|||||||
# Copyright 2022 Zengwei Yao
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-librispeech-2022-12-08-zipformer-mmi
|
|
||||||
# zipformer
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_librispeech_2022_12_08_zipformer-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_librispeech_2022_12_08_zipformer:
|
|
||||||
if: github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-librispeech-zipformer-mmi-2022-12-08.sh
|
|
||||||
|
|
||||||
- name: Display decoding results for librispeech zipformer-mmi
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR/
|
|
||||||
tree ./zipformer-mmi/exp
|
|
||||||
|
|
||||||
cd zipformer-mmi
|
|
||||||
echo "results for zipformer-mmi"
|
|
||||||
echo "===1best==="
|
|
||||||
find exp/1best -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/1best -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===nbest==="
|
|
||||||
find exp/nbest -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/nbest -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===nbest-rescoring-LG==="
|
|
||||||
find exp/nbest-rescoring-LG -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/nbest-rescoring-LG -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===nbest-rescoring-3-gram==="
|
|
||||||
find exp/nbest-rescoring-3-gram -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/nbest-rescoring-3-gram -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===nbest-rescoring-4-gram==="
|
|
||||||
find exp/nbest-rescoring-4-gram -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/nbest-rescoring-4-gram -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Upload decoding results for librispeech zipformer-mmi
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-zipformer_mmi-2022-12-08
|
|
||||||
path: egs/librispeech/ASR/zipformer_mmi/exp/
|
|
||||||
@ -1,172 +0,0 @@
|
|||||||
# Copyright 2022 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-librispeech-2022-12-29-stateless7-streaming
|
|
||||||
# zipformer
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_librispeech_2022_12_29_zipformer_streaming-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_librispeech_2022_12_29_zipformer_streaming:
|
|
||||||
if: github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event.label.name == 'streaming-zipformer' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-librispeech-pruned-transducer-stateless7-streaming-2022-12-29.sh
|
|
||||||
|
|
||||||
- name: Display decoding results for librispeech pruned_transducer_stateless7_streaming
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR/
|
|
||||||
tree ./pruned_transducer_stateless7_streaming/exp
|
|
||||||
|
|
||||||
cd pruned_transducer_stateless7_streaming
|
|
||||||
echo "results for pruned_transducer_stateless7_streaming"
|
|
||||||
echo "===greedy search==="
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified beam search==="
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===streaming greedy search==="
|
|
||||||
find exp/streaming/greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/streaming/greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===streaming fast_beam_search==="
|
|
||||||
find exp/streaming/fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/streaming/fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===streaming modified beam search==="
|
|
||||||
find exp/streaming/modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/streaming/modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
|
|
||||||
- name: Upload decoding results for librispeech pruned_transducer_stateless7_streaming
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless7-streaming-2022-12-29
|
|
||||||
path: egs/librispeech/ASR/pruned_transducer_stateless7_streaming/exp/
|
|
||||||
@ -1,163 +0,0 @@
|
|||||||
# Copyright 2022 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-librispeech-2023-01-29-stateless7-ctc-bs
|
|
||||||
# zipformer
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_librispeech_2023_01_29_zipformer_ctc_bs:
|
|
||||||
if: github.event.label.name == 'run-decode' || github.event.label.name == 'blank-skip' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-librispeech-pruned-transducer-stateless7-ctc-bs-2023-01-29.sh
|
|
||||||
|
|
||||||
- name: Display decoding results for librispeech pruned_transducer_stateless7_ctc_bs
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR/
|
|
||||||
tree ./pruned_transducer_stateless7_ctc_bs/exp
|
|
||||||
|
|
||||||
cd pruned_transducer_stateless7_ctc_bs
|
|
||||||
echo "results for pruned_transducer_stateless7_ctc_bs"
|
|
||||||
echo "===greedy search==="
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified beam search==="
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===ctc decoding==="
|
|
||||||
find exp/ctc-decoding -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/ctc-decoding -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===1best==="
|
|
||||||
find exp/1best -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/1best -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Upload decoding results for librispeech pruned_transducer_stateless7_ctc_bs
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless7-ctc-bs-2023-01-29
|
|
||||||
path: egs/librispeech/ASR/pruned_transducer_stateless7_ctc_bs/exp/
|
|
||||||
@ -1,155 +0,0 @@
|
|||||||
# Copyright 2022 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-librispeech-conformer-ctc3-2022-11-28
|
|
||||||
# zipformer
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_librispeech_2022_11_28_conformer_ctc3-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_librispeech_2022_11_28_conformer_ctc3:
|
|
||||||
if: github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-librispeech-conformer-ctc3-2022-11-28.sh
|
|
||||||
|
|
||||||
- name: Display decoding results for librispeech conformer_ctc3
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR/
|
|
||||||
tree ./conformer_ctc3/exp
|
|
||||||
|
|
||||||
cd conformer_ctc3
|
|
||||||
echo "results for conformer_ctc3"
|
|
||||||
echo "===ctc-decoding==="
|
|
||||||
find exp/ctc-decoding -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/ctc-decoding -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===1best==="
|
|
||||||
find exp/1best -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/1best -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Upload decoding results for librispeech conformer_ctc3
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-conformer_ctc3-2022-11-28
|
|
||||||
path: egs/librispeech/ASR/conformer_ctc3/exp/
|
|
||||||
@ -1,157 +0,0 @@
|
|||||||
# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-librispeech-pruned-transducer-stateless3-2022-05-13
|
|
||||||
# stateless pruned transducer (reworked model) + giga speech
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_librispeech_pruned_transducer_stateless3_2022_05_13-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_librispeech_pruned_transducer_stateless3_2022_05_13:
|
|
||||||
if: github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-librispeech-pruned-transducer-stateless3-2022-05-13.sh
|
|
||||||
|
|
||||||
- name: Display decoding results for pruned_transducer_stateless3
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR
|
|
||||||
tree pruned_transducer_stateless3/exp
|
|
||||||
cd pruned_transducer_stateless3/exp
|
|
||||||
echo "===greedy search==="
|
|
||||||
find greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified beam search==="
|
|
||||||
find modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Upload decoding results for pruned_transducer_stateless3
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless3-2022-04-29
|
|
||||||
path: egs/librispeech/ASR/pruned_transducer_stateless3/exp/
|
|
||||||
@ -1,159 +0,0 @@
|
|||||||
# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-librispeech-streaming-2022-06-26
|
|
||||||
# streaming conformer stateless transducer2
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_librispeech_streaming_2022_06_26-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_librispeech_streaming_2022_06_26:
|
|
||||||
if: github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-librispeech-streaming-pruned-transducer-stateless2-2022-06-26.sh
|
|
||||||
|
|
||||||
- name: Display decoding results
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR/
|
|
||||||
tree ./pruned_transducer_stateless2/exp
|
|
||||||
|
|
||||||
cd pruned_transducer_stateless2
|
|
||||||
echo "results for pruned_transducer_stateless2"
|
|
||||||
echo "===greedy search==="
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified_beam_search==="
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Upload decoding results for pruned_transducer_stateless2
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-pruned_transducer_stateless2-2022-06-26
|
|
||||||
path: egs/librispeech/ASR/pruned_transducer_stateless2/exp/
|
|
||||||
@ -1,174 +0,0 @@
|
|||||||
# Copyright 2022 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-librispeech-streaming-zipformer-2023-05-18
|
|
||||||
# zipformer
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_librispeech_2023_05_18_streaming_zipformer-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_librispeech_2023_05_18_streaming_zipformer:
|
|
||||||
if: github.event.label.name == 'zipformer' ||github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-librispeech-streaming-zipformer-2023-05-18.sh
|
|
||||||
|
|
||||||
- name: Display decoding results for librispeech zipformer
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR/
|
|
||||||
tree ./zipformer/exp
|
|
||||||
|
|
||||||
cd zipformer
|
|
||||||
|
|
||||||
echo "results for zipformer, simulated streaming decoding"
|
|
||||||
echo "===greedy search==="
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified beam search==="
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "results for zipformer, chunk-wise streaming decoding"
|
|
||||||
echo "===greedy search==="
|
|
||||||
find exp/streaming/greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/streaming/greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find exp/streaming/fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/streaming/fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified beam search==="
|
|
||||||
find exp/streaming/modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/streaming/modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
|
|
||||||
- name: Upload decoding results for librispeech zipformer
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-zipformer-2022-11-11
|
|
||||||
path: egs/librispeech/ASR/zipformer/exp/
|
|
||||||
@ -1,159 +0,0 @@
|
|||||||
# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-librispeech-2022-04-19
|
|
||||||
# stateless transducer + torchaudio rnn-t loss
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_librispeech_2022_04_19-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_librispeech_2022_04_19:
|
|
||||||
if: github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-librispeech-transducer-stateless2-2022-04-19.sh
|
|
||||||
|
|
||||||
- name: Display decoding results
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR/
|
|
||||||
tree ./transducer_stateless2/exp
|
|
||||||
|
|
||||||
cd transducer_stateless2
|
|
||||||
echo "results for transducer_stateless2"
|
|
||||||
echo "===greedy search==="
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified_beam_search==="
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Upload decoding results for transducer_stateless2
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-transducer_stateless2-2022-04-19
|
|
||||||
path: egs/librispeech/ASR/transducer_stateless2/exp/
|
|
||||||
@ -1,159 +0,0 @@
|
|||||||
# Copyright 2022 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-librispeech-zipformer-2023-05-18
|
|
||||||
# zipformer
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_librispeech_2023_05_18_zipformer-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_librispeech_2023_05_18_zipformer:
|
|
||||||
if: github.event.label.name == 'zipformer' ||github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-librispeech-zipformer-2023-05-18.sh
|
|
||||||
|
|
||||||
- name: Display decoding results for librispeech zipformer
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR/
|
|
||||||
tree ./zipformer/exp
|
|
||||||
|
|
||||||
cd zipformer
|
|
||||||
echo "results for zipformer"
|
|
||||||
echo "===greedy search==="
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified beam search==="
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Upload decoding results for librispeech zipformer
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-zipformer-2022-11-11
|
|
||||||
path: egs/librispeech/ASR/zipformer/exp/
|
|
||||||
@ -1,155 +0,0 @@
|
|||||||
# Copyright 2022 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-librispeech-zipformer-ctc-2023-06-14
|
|
||||||
# zipformer
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_librispeech_2023_06_14_zipformer-ctc-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_librispeech_2023_06_14_zipformer_ctc:
|
|
||||||
if: github.event.label.name == 'zipformer' ||github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-librispeech-zipformer-ctc-2023-06-14.sh
|
|
||||||
|
|
||||||
- name: Display decoding results for librispeech zipformer
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR/
|
|
||||||
tree ./zipformer/exp
|
|
||||||
|
|
||||||
cd zipformer
|
|
||||||
echo "results for zipformer"
|
|
||||||
echo "===ctc-decoding==="
|
|
||||||
find exp/ctc-decoding -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/ctc-decoding -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===1best==="
|
|
||||||
find exp/1best -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/1best -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Upload decoding results for librispeech zipformer
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-zipformer-2022-11-11
|
|
||||||
path: egs/librispeech/ASR/zipformer/exp/
|
|
||||||
@ -14,7 +14,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
name: run-multi-zh_hans-zipformer
|
name: run-multi-corpora-zipformer
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
@ -24,12 +24,12 @@ on:
|
|||||||
types: [labeled]
|
types: [labeled]
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: run_multi-zh_hans_zipformer-${{ github.ref }}
|
group: run_multi-corpora_zipformer-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
run_multi-zh_hans_zipformer:
|
run_multi-corpora_zipformer:
|
||||||
if: github.event.label.name == 'onnx' || github.event.label.name == 'ready' || github.event_name == 'push' || github.event.label.name == 'multi-zh_hans' || github.event.label.name == 'zipformer'
|
if: github.event.label.name == 'onnx' || github.event.label.name == 'ready' || github.event_name == 'push' || github.event.label.name == 'multi-zh_hans' || github.event.label.name == 'zipformer' || github.event.label.name == 'multi-corpora'
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
@ -81,4 +81,4 @@ jobs:
|
|||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
||||||
|
|
||||||
.github/scripts/run-multi-zh_hans-zipformer.sh
|
.github/scripts/run-multi-corpora-zipformer.sh
|
||||||
87
.github/workflows/run-pretrained-ctc.yml
vendored
87
.github/workflows/run-pretrained-ctc.yml
vendored
@ -1,87 +0,0 @@
|
|||||||
# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-pre-trained-ctc
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
test-run:
|
|
||||||
description: 'Test (y/n)?'
|
|
||||||
required: true
|
|
||||||
default: 'y'
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_pre_trained_ctc-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_pre_trained_ctc:
|
|
||||||
if: github.event.label.name == 'ready' || github.event_name == 'push' || github.event.inputs.test-run == 'y' || github.event.label.name == 'ctc'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
.github/scripts/run-pre-trained-ctc.sh
|
|
||||||
@ -1,158 +0,0 @@
|
|||||||
# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-pre-trained-trandsucer-stateless-multi-datasets-librispeech-100h
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_pre_trained_transducer_stateless_multi_datasets_librispeech_100h-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_pre_trained_transducer_stateless_multi_datasets_librispeech_100h:
|
|
||||||
if: github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-pre-trained-transducer-stateless-librispeech-100h.sh
|
|
||||||
|
|
||||||
- name: Display decoding results for transducer_stateless_multi_datasets
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR/
|
|
||||||
tree ./transducer_stateless_multi_datasets/exp
|
|
||||||
|
|
||||||
cd transducer_stateless_multi_datasets
|
|
||||||
echo "results for transducer_stateless_multi_datasets"
|
|
||||||
echo "===greedy search==="
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified beam search==="
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Upload decoding results for transducer_stateless_multi_datasets
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-transducer_stateless_multi_datasets-100h-2022-02-21
|
|
||||||
path: egs/librispeech/ASR/transducer_stateless_multi_datasets/exp/
|
|
||||||
@ -1,158 +0,0 @@
|
|||||||
# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-pre-trained-trandsucer-stateless-multi-datasets-librispeech-960h
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_pre_trained_transducer_stateless_multi_datasets_librispeech_960h-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_pre_trained_transducer_stateless_multi_datasets_librispeech_960h:
|
|
||||||
if: github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-pre-trained-transducer-stateless-librispeech-960h.sh
|
|
||||||
|
|
||||||
- name: Display decoding results for transducer_stateless_multi_datasets
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR/
|
|
||||||
tree ./transducer_stateless_multi_datasets/exp
|
|
||||||
|
|
||||||
cd transducer_stateless_multi_datasets
|
|
||||||
echo "results for transducer_stateless_multi_datasets"
|
|
||||||
echo "===greedy search==="
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified beam search==="
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Upload decoding results for transducer_stateless_multi_datasets
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-transducer_stateless_multi_datasets-100h-2022-03-01
|
|
||||||
path: egs/librispeech/ASR/transducer_stateless_multi_datasets/exp/
|
|
||||||
@ -1,80 +0,0 @@
|
|||||||
# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-pre-trained-trandsucer-stateless-modified-2-aishell
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_pre_trained_transducer_stateless_modified_2_aishell-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_pre_trained_transducer_stateless_modified_2_aishell:
|
|
||||||
if: github.event.label.name == 'ready' || github.event_name == 'push'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
.github/scripts/run-pre-trained-transducer-stateless-modified-2-aishell.sh
|
|
||||||
@ -1,80 +0,0 @@
|
|||||||
# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-pre-trained-trandsucer-stateless-modified-aishell
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_pre_trained_transducer_stateless_modified_aishell-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_pre_trained_transducer_stateless_modified_aishell:
|
|
||||||
if: github.event.label.name == 'ready' || github.event_name == 'push'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
.github/scripts/run-pre-trained-transducer-stateless-modified-aishell.sh
|
|
||||||
@ -1,158 +0,0 @@
|
|||||||
# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-pre-trained-transducer-stateless
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
types: [labeled]
|
|
||||||
|
|
||||||
schedule:
|
|
||||||
# minute (0-59)
|
|
||||||
# hour (0-23)
|
|
||||||
# day of the month (1-31)
|
|
||||||
# month (1-12)
|
|
||||||
# day of the week (0-6)
|
|
||||||
# nightly build at 15:50 UTC time every day
|
|
||||||
- cron: "50 15 * * *"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run_pre_trained_transducer_stateless-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run_pre_trained_transducer_stateless:
|
|
||||||
if: github.event.label.name == 'ready' || github.event.label.name == 'run-decode' || github.event_name == 'push' || github.event_name == 'schedule'
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
- name: Cache kaldifeat
|
|
||||||
id: my-cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/kaldifeat
|
|
||||||
key: cache-tmp-${{ matrix.python-version }}-2023-05-22
|
|
||||||
|
|
||||||
- name: Install kaldifeat
|
|
||||||
if: steps.my-cache.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/install-kaldifeat.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other datasets
|
|
||||||
id: libri-test-clean-and-test-other-data
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/download
|
|
||||||
key: cache-libri-test-clean-and-test-other
|
|
||||||
|
|
||||||
- name: Download LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-data.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/download-librispeech-test-clean-and-test-other-dataset.sh
|
|
||||||
|
|
||||||
- name: Prepare manifests for LibriSpeech test-clean and test-other
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/prepare-librispeech-test-clean-and-test-other-manifests.sh
|
|
||||||
|
|
||||||
- name: Cache LibriSpeech test-clean and test-other fbank features
|
|
||||||
id: libri-test-clean-and-test-other-fbank
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/tmp/fbank-libri
|
|
||||||
key: cache-libri-fbank-test-clean-and-test-other-v2
|
|
||||||
|
|
||||||
- name: Compute fbank for LibriSpeech test-clean and test-other
|
|
||||||
if: steps.libri-test-clean-and-test-other-fbank.outputs.cache-hit != 'true'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
.github/scripts/compute-fbank-librispeech-test-clean-and-test-other.sh
|
|
||||||
|
|
||||||
- name: Inference with pre-trained model
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
GITHUB_EVENT_NAME: ${{ github.event_name }}
|
|
||||||
GITHUB_EVENT_LABEL_NAME: ${{ github.event.label.name }}
|
|
||||||
run: |
|
|
||||||
mkdir -p egs/librispeech/ASR/data
|
|
||||||
ln -sfv ~/tmp/fbank-libri egs/librispeech/ASR/data/fbank
|
|
||||||
ls -lh egs/librispeech/ASR/data/*
|
|
||||||
|
|
||||||
sudo apt-get -qq install git-lfs tree
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/kaldifeat/python:$PYTHONPATH
|
|
||||||
export PYTHONPATH=~/tmp/kaldifeat/build/lib:$PYTHONPATH
|
|
||||||
|
|
||||||
.github/scripts/run-pre-trained-transducer-stateless.sh
|
|
||||||
|
|
||||||
- name: Display decoding results for transducer_stateless
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
cd egs/librispeech/ASR/
|
|
||||||
tree ./transducer_stateless/exp
|
|
||||||
|
|
||||||
cd transducer_stateless
|
|
||||||
echo "results for transducer_stateless"
|
|
||||||
echo "===greedy search==="
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/greedy_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===fast_beam_search==="
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/fast_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
echo "===modified beam search==="
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-clean" {} + | sort -n -k2
|
|
||||||
find exp/modified_beam_search -name "log-*" -exec grep -n --color "best for test-other" {} + | sort -n -k2
|
|
||||||
|
|
||||||
- name: Upload decoding results for transducer_stateless
|
|
||||||
uses: actions/upload-artifact@v2
|
|
||||||
if: github.event_name == 'schedule' || github.event.label.name == 'run-decode'
|
|
||||||
with:
|
|
||||||
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu-transducer_stateless-2022-02-07
|
|
||||||
path: egs/librispeech/ASR/transducer_stateless/exp/
|
|
||||||
185
.github/workflows/run-yesno-recipe.yml
vendored
185
.github/workflows/run-yesno-recipe.yml
vendored
@ -1,185 +0,0 @@
|
|||||||
# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: run-yesno-recipe
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: run-yesno-recipe-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
run-yesno-recipe:
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
# os: [ubuntu-latest, macos-10.15]
|
|
||||||
# TODO: enable macOS for CPU testing
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: [3.8]
|
|
||||||
fail-fast: false
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
cache: 'pip'
|
|
||||||
cache-dependency-path: '**/requirements-ci.txt'
|
|
||||||
|
|
||||||
- name: Install libnsdfile and libsox
|
|
||||||
if: startsWith(matrix.os, 'ubuntu')
|
|
||||||
run: |
|
|
||||||
sudo apt update
|
|
||||||
sudo apt install -q -y libsndfile1-dev libsndfile1 ffmpeg
|
|
||||||
sudo apt install -q -y --fix-missing sox libsox-dev libsox-fmt-all
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
grep -v '^#' ./requirements-ci.txt | xargs -n 1 -L 1 pip install
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
pip install --no-deps --force-reinstall k2==1.24.4.dev20231021+cpu.torch1.13.1 -f https://k2-fsa.github.io/k2/cpu.html
|
|
||||||
pip install kaldifeat==1.25.1.dev20231022+cpu.torch1.13.1 -f https://csukuangfj.github.io/kaldifeat/cpu.html
|
|
||||||
|
|
||||||
- name: Run yesno recipe
|
|
||||||
shell: bash
|
|
||||||
working-directory: ${{github.workspace}}
|
|
||||||
run: |
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
echo $PYTHONPATH
|
|
||||||
|
|
||||||
cd egs/yesno/ASR
|
|
||||||
./prepare.sh
|
|
||||||
python3 ./tdnn/train.py
|
|
||||||
python3 ./tdnn/decode.py
|
|
||||||
|
|
||||||
- name: Test exporting to pretrained.pt
|
|
||||||
shell: bash
|
|
||||||
working-directory: ${{github.workspace}}
|
|
||||||
run: |
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
echo $PYTHONPATH
|
|
||||||
|
|
||||||
cd egs/yesno/ASR
|
|
||||||
python3 ./tdnn/export.py --epoch 14 --avg 2
|
|
||||||
|
|
||||||
python3 ./tdnn/pretrained.py \
|
|
||||||
--checkpoint ./tdnn/exp/pretrained.pt \
|
|
||||||
--HLG ./data/lang_phone/HLG.pt \
|
|
||||||
--words-file ./data/lang_phone/words.txt \
|
|
||||||
download/waves_yesno/0_0_0_1_0_0_0_1.wav \
|
|
||||||
download/waves_yesno/0_0_1_0_0_0_1_0.wav
|
|
||||||
|
|
||||||
- name: Test exporting to torchscript
|
|
||||||
shell: bash
|
|
||||||
working-directory: ${{github.workspace}}
|
|
||||||
run: |
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
echo $PYTHONPATH
|
|
||||||
|
|
||||||
cd egs/yesno/ASR
|
|
||||||
python3 ./tdnn/export.py --epoch 14 --avg 2 --jit 1
|
|
||||||
|
|
||||||
python3 ./tdnn/jit_pretrained.py \
|
|
||||||
--nn-model ./tdnn/exp/cpu_jit.pt \
|
|
||||||
--HLG ./data/lang_phone/HLG.pt \
|
|
||||||
--words-file ./data/lang_phone/words.txt \
|
|
||||||
download/waves_yesno/0_0_0_1_0_0_0_1.wav \
|
|
||||||
download/waves_yesno/0_0_1_0_0_0_1_0.wav
|
|
||||||
|
|
||||||
- name: Test exporting to onnx
|
|
||||||
shell: bash
|
|
||||||
working-directory: ${{github.workspace}}
|
|
||||||
run: |
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
echo $PYTHONPATH
|
|
||||||
|
|
||||||
cd egs/yesno/ASR
|
|
||||||
python3 ./tdnn/export_onnx.py --epoch 14 --avg 2
|
|
||||||
|
|
||||||
echo "Test float32 model"
|
|
||||||
python3 ./tdnn/onnx_pretrained.py \
|
|
||||||
--nn-model ./tdnn/exp/model-epoch-14-avg-2.onnx \
|
|
||||||
--HLG ./data/lang_phone/HLG.pt \
|
|
||||||
--words-file ./data/lang_phone/words.txt \
|
|
||||||
download/waves_yesno/0_0_0_1_0_0_0_1.wav \
|
|
||||||
download/waves_yesno/0_0_1_0_0_0_1_0.wav
|
|
||||||
|
|
||||||
|
|
||||||
echo "Test int8 model"
|
|
||||||
python3 ./tdnn/onnx_pretrained.py \
|
|
||||||
--nn-model ./tdnn/exp/model-epoch-14-avg-2.int8.onnx \
|
|
||||||
--HLG ./data/lang_phone/HLG.pt \
|
|
||||||
--words-file ./data/lang_phone/words.txt \
|
|
||||||
download/waves_yesno/0_0_0_1_0_0_0_1.wav \
|
|
||||||
download/waves_yesno/0_0_1_0_0_0_1_0.wav
|
|
||||||
|
|
||||||
- name: Test decoding with H
|
|
||||||
shell: bash
|
|
||||||
working-directory: ${{github.workspace}}
|
|
||||||
run: |
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
echo $PYTHONPATH
|
|
||||||
|
|
||||||
cd egs/yesno/ASR
|
|
||||||
python3 ./tdnn/export.py --epoch 14 --avg 2 --jit 1
|
|
||||||
|
|
||||||
python3 ./tdnn/jit_pretrained_decode_with_H.py \
|
|
||||||
--nn-model ./tdnn/exp/cpu_jit.pt \
|
|
||||||
--H ./data/lang_phone/H.fst \
|
|
||||||
--tokens ./data/lang_phone/tokens.txt \
|
|
||||||
./download/waves_yesno/0_0_0_1_0_0_0_1.wav \
|
|
||||||
./download/waves_yesno/0_0_1_0_0_0_1_0.wav \
|
|
||||||
./download/waves_yesno/0_0_1_0_0_1_1_1.wav
|
|
||||||
|
|
||||||
- name: Test decoding with HL
|
|
||||||
shell: bash
|
|
||||||
working-directory: ${{github.workspace}}
|
|
||||||
run: |
|
|
||||||
export PYTHONPATH=$PWD:$PYTHONPATH
|
|
||||||
echo $PYTHONPATH
|
|
||||||
|
|
||||||
cd egs/yesno/ASR
|
|
||||||
python3 ./tdnn/export.py --epoch 14 --avg 2 --jit 1
|
|
||||||
|
|
||||||
python3 ./tdnn/jit_pretrained_decode_with_HL.py \
|
|
||||||
--nn-model ./tdnn/exp/cpu_jit.pt \
|
|
||||||
--HL ./data/lang_phone/HL.fst \
|
|
||||||
--words ./data/lang_phone/words.txt \
|
|
||||||
./download/waves_yesno/0_0_0_1_0_0_0_1.wav \
|
|
||||||
./download/waves_yesno/0_0_1_0_0_0_1_0.wav \
|
|
||||||
./download/waves_yesno/0_0_1_0_0_1_1_1.wav
|
|
||||||
|
|
||||||
- name: Show generated files
|
|
||||||
shell: bash
|
|
||||||
working-directory: ${{github.workspace}}
|
|
||||||
run: |
|
|
||||||
cd egs/yesno/ASR
|
|
||||||
ls -lh tdnn/exp
|
|
||||||
ls -lh data/lang_phone
|
|
||||||
158
.github/workflows/test.yml
vendored
158
.github/workflows/test.yml
vendored
@ -1,129 +1,109 @@
|
|||||||
# Copyright 2021 Fangjun Kuang (csukuangfj@gmail.com)
|
|
||||||
|
|
||||||
# See ../../LICENSE for clarification regarding multiple authors
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
name: test
|
name: test
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
|
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
|
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: test-${{ github.ref }}
|
group: test-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
generate_build_matrix:
|
||||||
|
if: github.repository_owner == 'csukuangfj' || github.repository_owner == 'k2-fsa'
|
||||||
|
# see https://github.com/pytorch/pytorch/pull/50633
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Generating build matrix
|
||||||
|
id: set-matrix
|
||||||
|
run: |
|
||||||
|
# outputting for debugging purposes
|
||||||
|
python ./.github/scripts/docker/generate_build_matrix.py
|
||||||
|
MATRIX=$(python ./.github/scripts/docker/generate_build_matrix.py)
|
||||||
|
echo "::set-output name=matrix::${MATRIX}"
|
||||||
test:
|
test:
|
||||||
runs-on: ${{ matrix.os }}
|
needs: generate_build_matrix
|
||||||
|
name: py${{ matrix.python-version }} torch${{ matrix.torch-version }} v${{ matrix.version }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest]
|
|
||||||
python-version: ["3.8"]
|
|
||||||
torch: ["1.13.0"]
|
|
||||||
torchaudio: ["0.13.0"]
|
|
||||||
k2-version: ["1.24.3.dev20230719"]
|
|
||||||
|
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
${{ fromJson(needs.generate_build_matrix.outputs.matrix) }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Setup Python ${{ matrix.python-version }}
|
- name: Free space
|
||||||
uses: actions/setup-python@v1
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
|
|
||||||
- name: Install libnsdfile and libsox
|
|
||||||
if: startsWith(matrix.os, 'ubuntu')
|
|
||||||
run: |
|
|
||||||
sudo apt update
|
|
||||||
sudo apt install -q -y libsndfile1-dev libsndfile1 ffmpeg
|
|
||||||
sudo apt install -q -y --fix-missing libsox-dev libsox-fmt-all
|
|
||||||
|
|
||||||
- name: Install Python dependencies
|
|
||||||
run: |
|
|
||||||
python3 -m pip install --upgrade pip pytest
|
|
||||||
# numpy 1.20.x does not support python 3.6
|
|
||||||
pip install numpy==1.19
|
|
||||||
pip install torch==${{ matrix.torch }}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html
|
|
||||||
pip install torchaudio==${{ matrix.torchaudio }}+cpu -f https://download.pytorch.org/whl/cpu/torch_stable.html
|
|
||||||
|
|
||||||
pip install k2==${{ matrix.k2-version }}+cpu.torch${{ matrix.torch }} -f https://k2-fsa.github.io/k2/cpu.html
|
|
||||||
pip install git+https://github.com/lhotse-speech/lhotse
|
|
||||||
# icefall requirements
|
|
||||||
pip uninstall -y protobuf
|
|
||||||
pip install --no-binary protobuf protobuf==3.20.*
|
|
||||||
|
|
||||||
pip install kaldifst
|
|
||||||
pip install onnxruntime matplotlib
|
|
||||||
pip install -r requirements.txt
|
|
||||||
|
|
||||||
- name: Install graphviz
|
|
||||||
if: startsWith(matrix.os, 'ubuntu')
|
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
python3 -m pip install -qq graphviz
|
df -h
|
||||||
sudo apt-get -qq install graphviz
|
rm -rf /opt/hostedtoolcache
|
||||||
|
df -h
|
||||||
|
echo "pwd: $PWD"
|
||||||
|
echo "github.workspace ${{ github.workspace }}"
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
if: startsWith(matrix.os, 'ubuntu')
|
uses: addnab/docker-run-action@v3
|
||||||
run: |
|
with:
|
||||||
ls -lh
|
image: ghcr.io/${{ github.repository_owner }}/icefall:cpu-py${{ matrix.python-version }}-torch${{ matrix.torch-version }}-v${{ matrix.version }}
|
||||||
export PYTHONPATH=$PWD:$PWD/lhotse:$PYTHONPATH
|
options: |
|
||||||
echo $PYTHONPATH
|
--volume ${{ github.workspace }}/:/icefall
|
||||||
pytest -v -s ./test
|
shell: bash
|
||||||
# runt tests for conformer ctc
|
run: |
|
||||||
cd egs/librispeech/ASR/conformer_ctc
|
export PYTHONPATH=/icefall:$PYTHONPATH
|
||||||
pytest -v -s
|
cd /icefall
|
||||||
|
git config --global --add safe.directory /icefall
|
||||||
|
|
||||||
cd ../pruned_transducer_stateless
|
pytest -v -s ./test
|
||||||
pytest -v -s
|
|
||||||
|
|
||||||
cd ../pruned_transducer_stateless2
|
# runt tests for conformer ctc
|
||||||
pytest -v -s
|
cd egs/librispeech/ASR/conformer_ctc
|
||||||
|
pytest -v -s
|
||||||
|
|
||||||
cd ../pruned_transducer_stateless3
|
cd ../pruned_transducer_stateless
|
||||||
pytest -v -s
|
pytest -v -s
|
||||||
|
|
||||||
cd ../pruned_transducer_stateless4
|
cd ../pruned_transducer_stateless2
|
||||||
pytest -v -s
|
pytest -v -s
|
||||||
|
|
||||||
echo $PYTHONPATH
|
cd ../pruned_transducer_stateless3
|
||||||
cd ../pruned_transducer_stateless7
|
pytest -v -s
|
||||||
pytest -v -s
|
|
||||||
|
|
||||||
cd ../transducer_stateless
|
cd ../pruned_transducer_stateless4
|
||||||
pytest -v -s
|
pytest -v -s
|
||||||
|
|
||||||
# cd ../transducer
|
echo $PYTHONPATH
|
||||||
# pytest -v -s
|
cd ../pruned_transducer_stateless7
|
||||||
|
pytest -v -s
|
||||||
|
|
||||||
cd ../transducer_stateless2
|
cd ../transducer_stateless
|
||||||
pytest -v -s
|
pytest -v -s
|
||||||
|
|
||||||
cd ../transducer_lstm
|
# cd ../transducer
|
||||||
pytest -v -s
|
# pytest -v -s
|
||||||
|
|
||||||
cd ../zipformer
|
cd ../transducer_stateless2
|
||||||
pytest -v -s
|
pytest -v -s
|
||||||
|
|
||||||
|
cd ../transducer_lstm
|
||||||
|
pytest -v -s
|
||||||
|
|
||||||
|
cd ../zipformer
|
||||||
|
pytest -v -s
|
||||||
|
|
||||||
- uses: actions/upload-artifact@v2
|
- uses: actions/upload-artifact@v2
|
||||||
with:
|
with:
|
||||||
|
|||||||
62
.github/workflows/yesno.yml
vendored
Normal file
62
.github/workflows/yesno.yml
vendored
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
name: yesno
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: yesno-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
generate_build_matrix:
|
||||||
|
if: github.repository_owner == 'csukuangfj' || github.repository_owner == 'k2-fsa'
|
||||||
|
# see https://github.com/pytorch/pytorch/pull/50633
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Generating build matrix
|
||||||
|
id: set-matrix
|
||||||
|
run: |
|
||||||
|
# outputting for debugging purposes
|
||||||
|
python ./.github/scripts/docker/generate_build_matrix.py
|
||||||
|
MATRIX=$(python ./.github/scripts/docker/generate_build_matrix.py)
|
||||||
|
echo "::set-output name=matrix::${MATRIX}"
|
||||||
|
yesno:
|
||||||
|
needs: generate_build_matrix
|
||||||
|
name: py${{ matrix.python-version }} torch${{ matrix.torch-version }} v${{ matrix.version }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
${{ fromJson(needs.generate_build_matrix.outputs.matrix) }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Run the yesno recipe
|
||||||
|
uses: addnab/docker-run-action@v3
|
||||||
|
with:
|
||||||
|
image: ghcr.io/${{ github.repository_owner }}/icefall:cpu-py${{ matrix.python-version }}-torch${{ matrix.torch-version }}-v${{ matrix.version }}
|
||||||
|
options: |
|
||||||
|
--volume ${{ github.workspace }}/:/icefall
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
export PYTHONPATH=/icefall:$PYTHONPATH
|
||||||
|
cd /icefall
|
||||||
|
git config --global --add safe.directory /icefall
|
||||||
|
|
||||||
|
.github/scripts/yesno/ASR/run.sh
|
||||||
442
README.md
442
README.md
@ -2,46 +2,86 @@
|
|||||||
<img src="https://raw.githubusercontent.com/k2-fsa/icefall/master/docs/source/_static/logo.png" width=168>
|
<img src="https://raw.githubusercontent.com/k2-fsa/icefall/master/docs/source/_static/logo.png" width=168>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## Introduction
|
# Introduction
|
||||||
|
|
||||||
icefall contains ASR recipes for various datasets
|
The icefall project contains speech-related recipes for various datasets
|
||||||
using <https://github.com/k2-fsa/k2>.
|
using [k2-fsa](https://github.com/k2-fsa/k2) and [lhotse](https://github.com/lhotse-speech/lhotse).
|
||||||
|
|
||||||
You can use <https://github.com/k2-fsa/sherpa> to deploy models
|
You can use [sherpa](https://github.com/k2-fsa/sherpa), [sherpa-ncnn](https://github.com/k2-fsa/sherpa-ncnn) or [sherpa-onnx](https://github.com/k2-fsa/sherpa-onnx) for deployment with models
|
||||||
trained with icefall.
|
in icefall; these frameworks also support models not included in icefall; please refer to respective documents for more details.
|
||||||
|
|
||||||
You can try pre-trained models from within your browser without the need
|
You can try pre-trained models from within your browser without the need
|
||||||
to download or install anything by visiting <https://huggingface.co/spaces/k2-fsa/automatic-speech-recognition>
|
to download or install anything by visiting this [huggingface space](https://huggingface.co/spaces/k2-fsa/automatic-speech-recognition).
|
||||||
See <https://k2-fsa.github.io/icefall/huggingface/spaces.html> for more details.
|
Please refer to [document](https://k2-fsa.github.io/icefall/huggingface/spaces.html) for more details.
|
||||||
|
|
||||||
## Installation
|
# Installation
|
||||||
|
|
||||||
Please refer to <https://icefall.readthedocs.io/en/latest/installation/index.html>
|
Please refer to [document](https://icefall.readthedocs.io/en/latest/installation/index.html)
|
||||||
for installation.
|
for installation.
|
||||||
|
|
||||||
## Recipes
|
# Recipes
|
||||||
|
|
||||||
Please refer to <https://icefall.readthedocs.io/en/latest/recipes/index.html>
|
Please refer to [document](https://icefall.readthedocs.io/en/latest/recipes/index.html)
|
||||||
for more information.
|
for more details.
|
||||||
|
|
||||||
We provide the following recipes:
|
## ASR: Automatic Speech Recognition
|
||||||
|
|
||||||
|
### Supported Datasets
|
||||||
- [yesno][yesno]
|
- [yesno][yesno]
|
||||||
- [LibriSpeech][librispeech]
|
|
||||||
- [GigaSpeech][gigaspeech]
|
- [Aidatatang_200zh][aidatatang_200zh]
|
||||||
- [AMI][ami]
|
|
||||||
- [Aishell][aishell]
|
- [Aishell][aishell]
|
||||||
- [Aishell2][aishell2]
|
- [Aishell2][aishell2]
|
||||||
- [Aishell4][aishell4]
|
- [Aishell4][aishell4]
|
||||||
|
- [Alimeeting][alimeeting]
|
||||||
|
- [AMI][ami]
|
||||||
|
- [CommonVoice][commonvoice]
|
||||||
|
- [Corpus of Spontaneous Japanese][csj]
|
||||||
|
- [GigaSpeech][gigaspeech]
|
||||||
|
- [LibriCSS][libricss]
|
||||||
|
- [LibriSpeech][librispeech]
|
||||||
|
- [Libriheavy][libriheavy]
|
||||||
|
- [Multi-Dialect Broadcast News Arabic Speech Recognition][mgb2]
|
||||||
|
- [PeopleSpeech][peoplespeech]
|
||||||
|
- [SPGISpeech][spgispeech]
|
||||||
|
- [Switchboard][swbd]
|
||||||
- [TIMIT][timit]
|
- [TIMIT][timit]
|
||||||
- [TED-LIUM3][tedlium3]
|
- [TED-LIUM3][tedlium3]
|
||||||
- [Aidatatang_200zh][aidatatang_200zh]
|
|
||||||
- [WenetSpeech][wenetspeech]
|
|
||||||
- [Alimeeting][alimeeting]
|
|
||||||
- [Switchboard][swbd]
|
|
||||||
- [TAL_CSASR][tal_csasr]
|
- [TAL_CSASR][tal_csasr]
|
||||||
|
- [Voxpopuli][voxpopuli]
|
||||||
|
- [XBMU-AMDO31][xbmu-amdo31]
|
||||||
|
- [WenetSpeech][wenetspeech]
|
||||||
|
|
||||||
### yesno
|
More datasets will be added in the future.
|
||||||
|
|
||||||
|
### Supported Models
|
||||||
|
|
||||||
|
The [LibriSpeech][librispeech] recipe supports the most comprehensive set of models, you are welcome to try them out.
|
||||||
|
|
||||||
|
#### CTC
|
||||||
|
- TDNN LSTM CTC
|
||||||
|
- Conformer CTC
|
||||||
|
- Zipformer CTC
|
||||||
|
|
||||||
|
#### MMI
|
||||||
|
- Conformer MMI
|
||||||
|
- Zipformer MMI
|
||||||
|
|
||||||
|
#### Transducer
|
||||||
|
- Conformer-based Encoder
|
||||||
|
- LSTM-based Encoder
|
||||||
|
- Zipformer-based Encoder
|
||||||
|
- LSTM-based Predictor
|
||||||
|
- [Stateless Predictor](https://research.google/pubs/rnn-transducer-with-stateless-prediction-network/)
|
||||||
|
|
||||||
|
#### Whisper
|
||||||
|
- [OpenAi Whisper](https://arxiv.org/abs/2212.04356) (We support fine-tuning on AiShell-1.)
|
||||||
|
|
||||||
|
If you are willing to contribute to icefall, please refer to [contributing](https://icefall.readthedocs.io/en/latest/contributing/index.html) for more details.
|
||||||
|
|
||||||
|
We would like to highlight the performance of some of the recipes here.
|
||||||
|
|
||||||
|
### [yesno][yesno]
|
||||||
|
|
||||||
This is the simplest ASR recipe in `icefall` and can be run on CPU.
|
This is the simplest ASR recipe in `icefall` and can be run on CPU.
|
||||||
Training takes less than 30 seconds and gives you the following WER:
|
Training takes less than 30 seconds and gives you the following WER:
|
||||||
@ -52,350 +92,264 @@ Training takes less than 30 seconds and gives you the following WER:
|
|||||||
We provide a Colab notebook for this recipe: [](https://colab.research.google.com/drive/1tIjjzaJc3IvGyKiMCDWO-TSnBgkcuN3B?usp=sharing)
|
We provide a Colab notebook for this recipe: [](https://colab.research.google.com/drive/1tIjjzaJc3IvGyKiMCDWO-TSnBgkcuN3B?usp=sharing)
|
||||||
|
|
||||||
|
|
||||||
### LibriSpeech
|
### [LibriSpeech][librispeech]
|
||||||
|
|
||||||
Please see <https://github.com/k2-fsa/icefall/blob/master/egs/librispeech/ASR/RESULTS.md>
|
Please see [RESULTS.md](https://github.com/k2-fsa/icefall/blob/master/egs/librispeech/ASR/RESULTS.md)
|
||||||
for the **latest** results.
|
for the **latest** results.
|
||||||
|
|
||||||
We provide 5 models for this recipe:
|
#### [Conformer CTC](https://github.com/k2-fsa/icefall/tree/master/egs/librispeech/ASR/conformer_ctc)
|
||||||
|
|
||||||
- [conformer CTC model][LibriSpeech_conformer_ctc]
|
|
||||||
- [TDNN LSTM CTC model][LibriSpeech_tdnn_lstm_ctc]
|
|
||||||
- [Transducer: Conformer encoder + LSTM decoder][LibriSpeech_transducer]
|
|
||||||
- [Transducer: Conformer encoder + Embedding decoder][LibriSpeech_transducer_stateless]
|
|
||||||
- [Transducer: Zipformer encoder + Embedding decoder][LibriSpeech_zipformer]
|
|
||||||
|
|
||||||
#### Conformer CTC Model
|
|
||||||
|
|
||||||
The best WER we currently have is:
|
|
||||||
|
|
||||||
| | test-clean | test-other |
|
| | test-clean | test-other |
|
||||||
|-----|------------|------------|
|
|-----|------------|------------|
|
||||||
| WER | 2.42 | 5.73 |
|
| WER | 2.42 | 5.73 |
|
||||||
|
|
||||||
|
|
||||||
We provide a Colab notebook to run a pre-trained conformer CTC model: [](https://colab.research.google.com/drive/1huyupXAcHsUrKaWfI83iMEJ6J0Nh0213?usp=sharing)
|
We provide a Colab notebook to test the pre-trained model: [](https://colab.research.google.com/drive/1huyupXAcHsUrKaWfI83iMEJ6J0Nh0213?usp=sharing)
|
||||||
|
|
||||||
#### TDNN LSTM CTC Model
|
#### [TDNN LSTM CTC](https://github.com/k2-fsa/icefall/tree/master/egs/librispeech/ASR/tdnn_lstm_ctc)
|
||||||
|
|
||||||
The WER for this model is:
|
|
||||||
|
|
||||||
| | test-clean | test-other |
|
| | test-clean | test-other |
|
||||||
|-----|------------|------------|
|
|-----|------------|------------|
|
||||||
| WER | 6.59 | 17.69 |
|
| WER | 6.59 | 17.69 |
|
||||||
|
|
||||||
We provide a Colab notebook to run a pre-trained TDNN LSTM CTC model: [](https://colab.research.google.com/drive/1-iSfQMp2So-We_Uu49N4AAcMInB72u9z?usp=sharing)
|
We provide a Colab notebook to test the pre-trained model: [](https://colab.research.google.com/drive/1-iSfQMp2So-We_Uu49N4AAcMInB72u9z?usp=sharing)
|
||||||
|
|
||||||
|
|
||||||
#### Transducer: Conformer encoder + LSTM decoder
|
#### [Transducer (Conformer Encoder + LSTM Predictor)](https://github.com/k2-fsa/icefall/tree/master/egs/librispeech/ASR/transducer)
|
||||||
|
|
||||||
Using Conformer as encoder and LSTM as decoder.
|
| | test-clean | test-other |
|
||||||
|
|---------------|------------|------------|
|
||||||
|
| greedy_search | 3.07 | 7.51 |
|
||||||
|
|
||||||
The best WER with greedy search is:
|
We provide a Colab notebook to test the pre-trained model: [](https://colab.research.google.com/drive/1_u6yK9jDkPwG_NLrZMN2XK7Aeq4suMO2?usp=sharing)
|
||||||
|
|
||||||
| | test-clean | test-other |
|
#### [Transducer (Conformer Encoder + Stateless Predictor)](https://github.com/k2-fsa/icefall/tree/master/egs/librispeech/ASR/transducer)
|
||||||
|-----|------------|------------|
|
|
||||||
| WER | 3.07 | 7.51 |
|
|
||||||
|
|
||||||
We provide a Colab notebook to run a pre-trained RNN-T conformer model: [](https://colab.research.google.com/drive/1_u6yK9jDkPwG_NLrZMN2XK7Aeq4suMO2?usp=sharing)
|
| | test-clean | test-other |
|
||||||
|
|---------------------------------------|------------|------------|
|
||||||
#### Transducer: Conformer encoder + Embedding decoder
|
| modified_beam_search (`beam_size=4`) | 2.56 | 6.27 |
|
||||||
|
|
||||||
Using Conformer as encoder. The decoder consists of 1 embedding layer
|
|
||||||
and 1 convolutional layer.
|
|
||||||
|
|
||||||
The best WER using modified beam search with beam size 4 is:
|
|
||||||
|
|
||||||
| | test-clean | test-other |
|
|
||||||
|-----|------------|------------|
|
|
||||||
| WER | 2.56 | 6.27 |
|
|
||||||
|
|
||||||
Note: No auxiliary losses are used in the training and no LMs are used
|
|
||||||
in the decoding.
|
|
||||||
|
|
||||||
We provide a Colab notebook to run a pre-trained transducer conformer + stateless decoder model: [](https://colab.research.google.com/drive/1CO1bXJ-2khDckZIW8zjOPHGSKLHpTDlp?usp=sharing)
|
|
||||||
|
|
||||||
|
|
||||||
#### k2 pruned RNN-T
|
We provide a Colab notebook to test the pre-trained model: [](https://colab.research.google.com/drive/1CO1bXJ-2khDckZIW8zjOPHGSKLHpTDlp?usp=sharing)
|
||||||
|
|
||||||
|
|
||||||
|
#### [Transducer (Zipformer Encoder + Stateless Predictor)](https://github.com/k2-fsa/icefall/tree/master/egs/librispeech/ASR/zipformer)
|
||||||
|
|
||||||
|
WER (modified_beam_search `beam_size=4` unless further stated)
|
||||||
|
|
||||||
|
1. LibriSpeech-960hr
|
||||||
|
|
||||||
| Encoder | Params | test-clean | test-other | epochs | devices |
|
| Encoder | Params | test-clean | test-other | epochs | devices |
|
||||||
|-----------------|--------|------------|------------|---------|------------|
|
|-----------------|--------|------------|------------|---------|------------|
|
||||||
| zipformer | 65.5M | 2.21 | 4.79 | 50 | 4 32G-V100 |
|
| Zipformer | 65.5M | 2.21 | 4.79 | 50 | 4 32G-V100 |
|
||||||
| zipformer-small | 23.2M | 2.42 | 5.73 | 50 | 2 32G-V100 |
|
| Zipformer-small | 23.2M | 2.42 | 5.73 | 50 | 2 32G-V100 |
|
||||||
| zipformer-large | 148.4M | 2.06 | 4.63 | 50 | 4 32G-V100 |
|
| Zipformer-large | 148.4M | 2.06 | 4.63 | 50 | 4 32G-V100 |
|
||||||
| zipformer-large | 148.4M | 2.00 | 4.38 | 174 | 8 80G-A100 |
|
| Zipformer-large | 148.4M | 2.00 | 4.38 | 174 | 8 80G-A100 |
|
||||||
|
|
||||||
Note: No auxiliary losses are used in the training and no LMs are used
|
2. LibriSpeech-960hr + GigaSpeech
|
||||||
in the decoding.
|
|
||||||
|
|
||||||
#### k2 pruned RNN-T + GigaSpeech
|
| Encoder | Params | test-clean | test-other |
|
||||||
|
|-----------------|--------|------------|------------|
|
||||||
| | test-clean | test-other |
|
| Zipformer | 65.5M | 1.78 | 4.08 |
|
||||||
|-----|------------|------------|
|
|
||||||
| WER | 1.78 | 4.08 |
|
|
||||||
|
|
||||||
Note: No auxiliary losses are used in the training and no LMs are used
|
|
||||||
in the decoding.
|
|
||||||
|
|
||||||
#### k2 pruned RNN-T + GigaSpeech + CommonVoice
|
|
||||||
|
|
||||||
| | test-clean | test-other |
|
|
||||||
|-----|------------|------------|
|
|
||||||
| WER | 1.90 | 3.98 |
|
|
||||||
|
|
||||||
Note: No auxiliary losses are used in the training and no LMs are used
|
|
||||||
in the decoding.
|
|
||||||
|
|
||||||
|
|
||||||
### GigaSpeech
|
3. LibriSpeech-960hr + GigaSpeech + CommonVoice
|
||||||
|
|
||||||
We provide three models for this recipe:
|
| Encoder | Params | test-clean | test-other |
|
||||||
|
|-----------------|--------|------------|------------|
|
||||||
|
| Zipformer | 65.5M | 1.90 | 3.98 |
|
||||||
|
|
||||||
- [Conformer CTC model][GigaSpeech_conformer_ctc]
|
|
||||||
- [Pruned stateless RNN-T: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss][GigaSpeech_pruned_transducer_stateless2].
|
|
||||||
- [Transducer: Zipformer encoder + Embedding decoder][GigaSpeech_zipformer]
|
|
||||||
|
|
||||||
#### Conformer CTC
|
### [GigaSpeech][gigaspeech]
|
||||||
|
|
||||||
|
#### [Conformer CTC](https://github.com/k2-fsa/icefall/tree/master/egs/gigaspeech/ASR/conformer_ctc)
|
||||||
|
|
||||||
| | Dev | Test |
|
| | Dev | Test |
|
||||||
|-----|-------|-------|
|
|-----|-------|-------|
|
||||||
| WER | 10.47 | 10.58 |
|
| WER | 10.47 | 10.58 |
|
||||||
|
|
||||||
#### Pruned stateless RNN-T: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss
|
#### [Transducer (pruned_transducer_stateless2)](https://github.com/k2-fsa/icefall/tree/master/egs/gigaspeech/ASR/pruned_transducer_stateless2)
|
||||||
|
|
||||||
|
Conformer Encoder + Stateless Predictor + k2 Pruned RNN-T Loss
|
||||||
|
|
||||||
| | Dev | Test |
|
| | Dev | Test |
|
||||||
|----------------------|-------|-------|
|
|----------------------|-------|-------|
|
||||||
| greedy search | 10.51 | 10.73 |
|
| greedy_search | 10.51 | 10.73 |
|
||||||
| fast beam search | 10.50 | 10.69 |
|
| fast_beam_search | 10.50 | 10.69 |
|
||||||
| modified beam search | 10.40 | 10.51 |
|
| modified_beam_search | 10.40 | 10.51 |
|
||||||
|
|
||||||
#### Transducer: Zipformer encoder + Embedding decoder
|
#### [Transducer (Zipformer Encoder + Stateless Predictor)](https://github.com/k2-fsa/icefall/tree/master/egs/gigaspeech/ASR/zipformer)
|
||||||
|
|
||||||
| | Dev | Test |
|
| | Dev | Test |
|
||||||
|----------------------|-------|-------|
|
|----------------------|-------|-------|
|
||||||
| greedy search | 10.31 | 10.50 |
|
| greedy_search | 10.31 | 10.50 |
|
||||||
| fast beam search | 10.26 | 10.48 |
|
| fast_beam_search | 10.26 | 10.48 |
|
||||||
| modified beam search | 10.25 | 10.38 |
|
| modified_beam_search | 10.25 | 10.38 |
|
||||||
|
|
||||||
|
|
||||||
### Aishell
|
### [Aishell][aishell]
|
||||||
|
|
||||||
We provide three models for this recipe: [conformer CTC model][Aishell_conformer_ctc],
|
#### [TDNN LSTM CTC](https://github.com/k2-fsa/icefall/tree/master/egs/aishell/ASR/tdnn_lstm_ctc)
|
||||||
[TDNN LSTM CTC model][Aishell_tdnn_lstm_ctc], and [Transducer Stateless Model][Aishell_pruned_transducer_stateless7],
|
|
||||||
|
|
||||||
#### Conformer CTC Model
|
|
||||||
|
|
||||||
The best CER we currently have is:
|
|
||||||
|
|
||||||
| | test |
|
|
||||||
|-----|------|
|
|
||||||
| CER | 4.26 |
|
|
||||||
|
|
||||||
#### TDNN LSTM CTC Model
|
|
||||||
|
|
||||||
The CER for this model is:
|
|
||||||
|
|
||||||
| | test |
|
| | test |
|
||||||
|-----|-------|
|
|-----|-------|
|
||||||
| CER | 10.16 |
|
| CER | 10.16 |
|
||||||
|
|
||||||
We provide a Colab notebook to run a pre-trained TDNN LSTM CTC model: [](https://colab.research.google.com/drive/1jbyzYq3ytm6j2nlEt-diQm-6QVWyDDEa?usp=sharing)
|
We provide a Colab notebook to test the pre-trained model: [](https://colab.research.google.com/drive/1jbyzYq3ytm6j2nlEt-diQm-6QVWyDDEa?usp=sharing)
|
||||||
|
|
||||||
#### Transducer Stateless Model
|
#### [Transducer (Conformer Encoder + Stateless Predictor)](https://github.com/k2-fsa/icefall/tree/master/egs/aishell/ASR/transducer_stateless)
|
||||||
|
|
||||||
The best CER we currently have is:
|
|
||||||
|
|
||||||
| | test |
|
| | test |
|
||||||
|-----|------|
|
|-----|------|
|
||||||
| CER | 4.38 |
|
| CER | 4.38 |
|
||||||
|
|
||||||
We provide a Colab notebook to run a pre-trained TransducerStateless model: [](https://colab.research.google.com/drive/14XaT2MhnBkK-3_RqqWq3K90Xlbin-GZC?usp=sharing)
|
We provide a Colab notebook to test the pre-trained model: [](https://colab.research.google.com/drive/14XaT2MhnBkK-3_RqqWq3K90Xlbin-GZC?usp=sharing)
|
||||||
|
|
||||||
|
#### [Transducer (Zipformer Encoder + Stateless Predictor)](https://github.com/k2-fsa/icefall/tree/master/egs/aishell/ASR/zipformer)
|
||||||
|
|
||||||
|
WER (modified_beam_search `beam_size=4`)
|
||||||
|
|
||||||
|
| Encoder | Params | dev | test | epochs |
|
||||||
|
|-----------------|--------|-----|------|---------|
|
||||||
|
| Zipformer | 73.4M | 4.13| 4.40 | 55 |
|
||||||
|
| Zipformer-small | 30.2M | 4.40| 4.67 | 55 |
|
||||||
|
| Zipformer-large | 157.3M | 4.03| 4.28 | 56 |
|
||||||
|
|
||||||
|
|
||||||
### Aishell2
|
### [Aishell4][aishell4]
|
||||||
|
|
||||||
We provide one model for this recipe: [Transducer Stateless Model][Aishell2_pruned_transducer_stateless5].
|
#### [Transducer (pruned_transducer_stateless5)](https://github.com/k2-fsa/icefall/tree/master/egs/aishell4/ASR/pruned_transducer_stateless5)
|
||||||
|
|
||||||
#### Transducer Stateless Model
|
|
||||||
|
|
||||||
The best WER we currently have is:
|
|
||||||
|
|
||||||
| | dev-ios | test-ios |
|
|
||||||
|-----|------------|------------|
|
|
||||||
| WER | 5.32 | 5.56 |
|
|
||||||
|
|
||||||
|
|
||||||
### Aishell4
|
|
||||||
|
|
||||||
We provide one model for this recipe: [Pruned stateless RNN-T: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss][Aishell4_pruned_transducer_stateless5].
|
|
||||||
|
|
||||||
#### Pruned stateless RNN-T: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss (trained with all subsets)
|
|
||||||
|
|
||||||
The best CER we currently have is:
|
|
||||||
|
|
||||||
|
1 Trained with all subsets:
|
||||||
| | test |
|
| | test |
|
||||||
|-----|------------|
|
|-----|------------|
|
||||||
| CER | 29.08 |
|
| CER | 29.08 |
|
||||||
|
|
||||||
|
We provide a Colab notebook to test the pre-trained model: [](https://colab.research.google.com/drive/1z3lkURVv9M7uTiIgf3Np9IntMHEknaks?usp=sharing)
|
||||||
We provide a Colab notebook to run a pre-trained Pruned Transducer Stateless model: [](https://colab.research.google.com/drive/1z3lkURVv9M7uTiIgf3Np9IntMHEknaks?usp=sharing)
|
|
||||||
|
|
||||||
|
|
||||||
### TIMIT
|
### [TIMIT][timit]
|
||||||
|
|
||||||
We provide two models for this recipe: [TDNN LSTM CTC model][TIMIT_tdnn_lstm_ctc]
|
#### [TDNN LSTM CTC](https://github.com/k2-fsa/icefall/tree/master/egs/timit/ASR/tdnn_lstm_ctc)
|
||||||
and [TDNN LiGRU CTC model][TIMIT_tdnn_ligru_ctc].
|
|
||||||
|
|
||||||
#### TDNN LSTM CTC Model
|
| |TEST|
|
||||||
|
|---|----|
|
||||||
The best PER we currently have is:
|
|
||||||
|
|
||||||
||TEST|
|
|
||||||
|--|--|
|
|
||||||
|PER| 19.71% |
|
|PER| 19.71% |
|
||||||
|
|
||||||
We provide a Colab notebook to run a pre-trained TDNN LSTM CTC model: [](https://colab.research.google.com/drive/1Hs9DA4V96uapw_30uNp32OMJgkuR5VVd?usp=sharing)
|
We provide a Colab notebook to test the pre-trained model: [](https://colab.research.google.com/drive/1Hs9DA4V96uapw_30uNp32OMJgkuR5VVd?usp=sharing)
|
||||||
|
|
||||||
#### TDNN LiGRU CTC Model
|
#### [TDNN LiGRU CTC](https://github.com/k2-fsa/icefall/tree/master/egs/timit/ASR/tdnn_ligru_ctc)
|
||||||
|
|
||||||
The PER for this model is:
|
| |TEST|
|
||||||
|
|---|----|
|
||||||
||TEST|
|
|
||||||
|--|--|
|
|
||||||
|PER| 17.66% |
|
|PER| 17.66% |
|
||||||
|
|
||||||
We provide a Colab notebook to run a pre-trained TDNN LiGRU CTC model: [](https://colab.research.google.com/drive/1z3lkURVv9M7uTiIgf3Np9IntMHEknaks?usp=sharing)
|
We provide a Colab notebook to test the pre-trained model: [](https://colab.research.google.com/drive/1z3lkURVv9M7uTiIgf3Np9IntMHEknaks?usp=sharing)
|
||||||
|
|
||||||
|
|
||||||
### TED-LIUM3
|
### [TED-LIUM3][tedlium3]
|
||||||
|
|
||||||
We provide two models for this recipe: [Transducer Stateless: Conformer encoder + Embedding decoder][TED-LIUM3_transducer_stateless] and [Pruned Transducer Stateless: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss][TED-LIUM3_pruned_transducer_stateless].
|
#### [Transducer (Conformer Encoder + Stateless Predictor)](https://github.com/k2-fsa/icefall/tree/master/egs/tedlium3/ASR/transducer_stateless)
|
||||||
|
|
||||||
#### Transducer Stateless: Conformer encoder + Embedding decoder
|
| | dev | test |
|
||||||
|
|--------------------------------------|-------|--------|
|
||||||
The best WER using modified beam search with beam size 4 is:
|
| modified_beam_search (`beam_size=4`) | 6.91 | 6.33 |
|
||||||
|
|
||||||
| | dev | test |
|
|
||||||
|-----|-------|--------|
|
|
||||||
| WER | 6.91 | 6.33 |
|
|
||||||
|
|
||||||
Note: No auxiliary losses are used in the training and no LMs are used in the decoding.
|
|
||||||
|
|
||||||
We provide a Colab notebook to run a pre-trained Transducer Stateless model: [](https://colab.research.google.com/drive/1MmY5bBxwvKLNT4A2DJnwiqRXhdchUqPN?usp=sharing)
|
|
||||||
|
|
||||||
#### Pruned Transducer Stateless: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss
|
|
||||||
|
|
||||||
The best WER using modified beam search with beam size 4 is:
|
|
||||||
|
|
||||||
| | dev | test |
|
|
||||||
|-----|-------|--------|
|
|
||||||
| WER | 6.77 | 6.14 |
|
|
||||||
|
|
||||||
We provide a Colab notebook to run a pre-trained Pruned Transducer Stateless model: [](https://colab.research.google.com/drive/1je_1zGrOkGVVd4WLzgkXRHxl-I27yWtz?usp=sharing)
|
|
||||||
|
|
||||||
|
|
||||||
### Aidatatang_200zh
|
We provide a Colab notebook to test the pre-trained model: [](https://colab.research.google.com/drive/1MmY5bBxwvKLNT4A2DJnwiqRXhdchUqPN?usp=sharing)
|
||||||
|
|
||||||
We provide one model for this recipe: [Pruned stateless RNN-T: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss][Aidatatang_200zh_pruned_transducer_stateless2].
|
#### [Transducer (pruned_transducer_stateless)](https://github.com/k2-fsa/icefall/tree/master/egs/tedlium3/ASR/pruned_transducer_stateless)
|
||||||
|
|
||||||
#### Pruned stateless RNN-T: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss
|
| | dev | test |
|
||||||
|
|--------------------------------------|-------|--------|
|
||||||
|
| modified_beam_search (`beam_size=4`) | 6.77 | 6.14 |
|
||||||
|
|
||||||
|
We provide a Colab notebook to test the pre-trained model: [](https://colab.research.google.com/drive/1je_1zGrOkGVVd4WLzgkXRHxl-I27yWtz?usp=sharing)
|
||||||
|
|
||||||
|
|
||||||
|
### [Aidatatang_200zh][aidatatang_200zh]
|
||||||
|
|
||||||
|
#### [Transducer (pruned_transducer_stateless2)](https://github.com/k2-fsa/icefall/tree/master/egs/aidatatang_200zh/ASR/pruned_transducer_stateless2)
|
||||||
|
|
||||||
| | Dev | Test |
|
| | Dev | Test |
|
||||||
|----------------------|-------|-------|
|
|----------------------|-------|-------|
|
||||||
| greedy search | 5.53 | 6.59 |
|
| greedy_search | 5.53 | 6.59 |
|
||||||
| fast beam search | 5.30 | 6.34 |
|
| fast_beam_search | 5.30 | 6.34 |
|
||||||
| modified beam search | 5.27 | 6.33 |
|
| modified_beam_search | 5.27 | 6.33 |
|
||||||
|
|
||||||
We provide a Colab notebook to run a pre-trained Pruned Transducer Stateless model: [](https://colab.research.google.com/drive/1wNSnSj3T5oOctbh5IGCa393gKOoQw2GH?usp=sharing)
|
We provide a Colab notebook to test the pre-trained model: [](https://colab.research.google.com/drive/1wNSnSj3T5oOctbh5IGCa393gKOoQw2GH?usp=sharing)
|
||||||
|
|
||||||
|
|
||||||
### WenetSpeech
|
### [WenetSpeech][wenetspeech]
|
||||||
|
|
||||||
We provide some models for this recipe: [Pruned stateless RNN-T_2: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss][WenetSpeech_pruned_transducer_stateless2] and [Pruned stateless RNN-T_5: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss][WenetSpeech_pruned_transducer_stateless5].
|
#### [Transducer (pruned_transducer_stateless2)](https://github.com/k2-fsa/icefall/tree/master/egs/wenetspeech/ASR/pruned_transducer_stateless2)
|
||||||
|
|
||||||
#### Pruned stateless RNN-T_2: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss (trained with L subset, offline ASR)
|
|
||||||
|
|
||||||
| | Dev | Test-Net | Test-Meeting |
|
| | Dev | Test-Net | Test-Meeting |
|
||||||
|----------------------|-------|----------|--------------|
|
|----------------------|-------|----------|--------------|
|
||||||
| greedy search | 7.80 | 8.75 | 13.49 |
|
| greedy_search | 7.80 | 8.75 | 13.49 |
|
||||||
| modified beam search| 7.76 | 8.71 | 13.41 |
|
| fast_beam_search | 7.94 | 8.74 | 13.80 |
|
||||||
| fast beam search | 7.94 | 8.74 | 13.80 |
|
| modified_beam_search | 7.76 | 8.71 | 13.41 |
|
||||||
|
|
||||||
|
We provide a Colab notebook to test the pre-trained model: [](https://colab.research.google.com/drive/1EV4e1CHa1GZgEF-bZgizqI9RyFFehIiN?usp=sharing)
|
||||||
|
|
||||||
|
#### [Transducer **Streaming** (pruned_transducer_stateless5) ](https://github.com/k2-fsa/icefall/tree/master/egs/wenetspeech/ASR/pruned_transducer_stateless5)
|
||||||
|
|
||||||
#### Pruned stateless RNN-T_5: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss (trained with L subset)
|
|
||||||
**Streaming**:
|
|
||||||
| | Dev | Test-Net | Test-Meeting |
|
| | Dev | Test-Net | Test-Meeting |
|
||||||
|----------------------|-------|----------|--------------|
|
|----------------------|-------|----------|--------------|
|
||||||
| greedy_search | 8.78 | 10.12 | 16.16 |
|
| greedy_search | 8.78 | 10.12 | 16.16 |
|
||||||
| modified_beam_search | 8.53| 9.95 | 15.81 |
|
|
||||||
| fast_beam_search| 9.01 | 10.47 | 16.28 |
|
| fast_beam_search| 9.01 | 10.47 | 16.28 |
|
||||||
|
| modified_beam_search | 8.53| 9.95 | 15.81 |
|
||||||
|
|
||||||
We provide a Colab notebook to run a pre-trained Pruned Transducer Stateless2 model: [](https://colab.research.google.com/drive/1EV4e1CHa1GZgEF-bZgizqI9RyFFehIiN?usp=sharing)
|
|
||||||
|
|
||||||
### Alimeeting
|
### [Alimeeting][alimeeting]
|
||||||
|
|
||||||
We provide one model for this recipe: [Pruned stateless RNN-T: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss][Alimeeting_pruned_transducer_stateless2].
|
#### [Transducer (pruned_transducer_stateless2)](https://github.com/k2-fsa/icefall/tree/master/egs/alimeeting/ASR/pruned_transducer_stateless2)
|
||||||
|
|
||||||
#### Pruned stateless RNN-T: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss (trained with far subset)
|
|
||||||
|
|
||||||
| | Eval | Test-Net |
|
| | Eval | Test-Net |
|
||||||
|----------------------|--------|----------|
|
|----------------------|--------|----------|
|
||||||
| greedy search | 31.77 | 34.66 |
|
| greedy_search | 31.77 | 34.66 |
|
||||||
| fast beam search | 31.39 | 33.02 |
|
| fast_beam_search | 31.39 | 33.02 |
|
||||||
| modified beam search | 30.38 | 34.25 |
|
| modified_beam_search | 30.38 | 34.25 |
|
||||||
|
|
||||||
We provide a Colab notebook to run a pre-trained Pruned Transducer Stateless model: [](https://colab.research.google.com/drive/1tKr3f0mL17uO_ljdHGKtR7HOmthYHwJG?usp=sharing)
|
We provide a Colab notebook to test the pre-trained model: [](https://colab.research.google.com/drive/1tKr3f0mL17uO_ljdHGKtR7HOmthYHwJG?usp=sharing)
|
||||||
|
|
||||||
|
|
||||||
### TAL_CSASR
|
### [TAL_CSASR][tal_csasr]
|
||||||
|
|
||||||
We provide one model for this recipe: [Pruned stateless RNN-T: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss][TAL_CSASR_pruned_transducer_stateless5].
|
|
||||||
|
|
||||||
#### Pruned stateless RNN-T: Conformer encoder + Embedding decoder + k2 pruned RNN-T loss
|
#### [Transducer (pruned_transducer_stateless5)](https://github.com/k2-fsa/icefall/tree/master/egs/tal_csasr/ASR/pruned_transducer_stateless5)
|
||||||
|
|
||||||
The best results for Chinese CER(%) and English WER(%) respectively (zh: Chinese, en: English):
|
The best results for Chinese CER(%) and English WER(%) respectively (zh: Chinese, en: English):
|
||||||
|decoding-method | dev | dev_zh | dev_en | test | test_zh | test_en |
|
|decoding-method | dev | dev_zh | dev_en | test | test_zh | test_en |
|
||||||
|--|--|--|--|--|--|--|
|
|--|--|--|--|--|--|--|
|
||||||
|greedy_search| 7.30 | 6.48 | 19.19 |7.39| 6.66 | 19.13|
|
|greedy_search| 7.30 | 6.48 | 19.19 |7.39| 6.66 | 19.13|
|
||||||
|modified_beam_search| 7.15 | 6.35 | 18.95 | 7.22| 6.50 | 18.70 |
|
|
||||||
|fast_beam_search| 7.18 | 6.39| 18.90 | 7.27| 6.55 | 18.77|
|
|fast_beam_search| 7.18 | 6.39| 18.90 | 7.27| 6.55 | 18.77|
|
||||||
|
|modified_beam_search| 7.15 | 6.35 | 18.95 | 7.22| 6.50 | 18.70 |
|
||||||
|
|
||||||
We provide a Colab notebook to run a pre-trained Pruned Transducer Stateless model: [](https://colab.research.google.com/drive/1DmIx-NloI1CMU5GdZrlse7TRu4y3Dpf8?usp=sharing)
|
We provide a Colab notebook to test the pre-trained model: [](https://colab.research.google.com/drive/1DmIx-NloI1CMU5GdZrlse7TRu4y3Dpf8?usp=sharing)
|
||||||
|
|
||||||
## Deployment with C++
|
## TTS: Text-to-Speech
|
||||||
|
|
||||||
Once you have trained a model in icefall, you may want to deploy it with C++,
|
### Supported Datasets
|
||||||
without Python dependencies.
|
|
||||||
|
|
||||||
Please refer to the documentation
|
- [LJSpeech][ljspeech]
|
||||||
<https://icefall.readthedocs.io/en/latest/recipes/Non-streaming-ASR/librispeech/conformer_ctc.html#deployment-with-c>
|
- [VCTK][vctk]
|
||||||
|
|
||||||
|
### Supported Models
|
||||||
|
|
||||||
|
- [VITS](https://arxiv.org/abs/2106.06103)
|
||||||
|
|
||||||
|
# Deployment with C++
|
||||||
|
|
||||||
|
Once you have trained a model in icefall, you may want to deploy it with C++ without Python dependencies.
|
||||||
|
|
||||||
|
Please refer to the [document](https://icefall.readthedocs.io/en/latest/recipes/Non-streaming-ASR/librispeech/conformer_ctc.html#deployment-with-c)
|
||||||
for how to do this.
|
for how to do this.
|
||||||
|
|
||||||
We also provide a Colab notebook, showing you how to run a torch scripted model in [k2][k2] with C++.
|
We also provide a Colab notebook, showing you how to run a torch scripted model in [k2][k2] with C++.
|
||||||
Please see: [](https://colab.research.google.com/drive/1BIGLWzS36isskMXHKcqC9ysN6pspYXs_?usp=sharing)
|
Please see: [](https://colab.research.google.com/drive/1BIGLWzS36isskMXHKcqC9ysN6pspYXs_?usp=sharing)
|
||||||
|
|
||||||
|
|
||||||
[LibriSpeech_tdnn_lstm_ctc]: egs/librispeech/ASR/tdnn_lstm_ctc
|
|
||||||
[LibriSpeech_conformer_ctc]: egs/librispeech/ASR/conformer_ctc
|
|
||||||
[LibriSpeech_transducer]: egs/librispeech/ASR/transducer
|
|
||||||
[LibriSpeech_transducer_stateless]: egs/librispeech/ASR/transducer_stateless
|
|
||||||
[LibriSpeech_zipformer]: egs/librispeech/ASR/zipformer
|
|
||||||
[Aishell_tdnn_lstm_ctc]: egs/aishell/ASR/tdnn_lstm_ctc
|
|
||||||
[Aishell_conformer_ctc]: egs/aishell/ASR/conformer_ctc
|
|
||||||
[Aishell_pruned_transducer_stateless7]: egs/aishell/ASR/pruned_transducer_stateless7_bbpe
|
|
||||||
[Aishell2_pruned_transducer_stateless5]: egs/aishell2/ASR/pruned_transducer_stateless5
|
|
||||||
[Aishell4_pruned_transducer_stateless5]: egs/aishell4/ASR/pruned_transducer_stateless5
|
|
||||||
[TIMIT_tdnn_lstm_ctc]: egs/timit/ASR/tdnn_lstm_ctc
|
|
||||||
[TIMIT_tdnn_ligru_ctc]: egs/timit/ASR/tdnn_ligru_ctc
|
|
||||||
[TED-LIUM3_transducer_stateless]: egs/tedlium3/ASR/transducer_stateless
|
|
||||||
[TED-LIUM3_pruned_transducer_stateless]: egs/tedlium3/ASR/pruned_transducer_stateless
|
|
||||||
[GigaSpeech_conformer_ctc]: egs/gigaspeech/ASR/conformer_ctc
|
|
||||||
[GigaSpeech_pruned_transducer_stateless2]: egs/gigaspeech/ASR/pruned_transducer_stateless2
|
|
||||||
[GigaSpeech_zipformer]: egs/gigaspeech/ASR/zipformer
|
|
||||||
[Aidatatang_200zh_pruned_transducer_stateless2]: egs/aidatatang_200zh/ASR/pruned_transducer_stateless2
|
|
||||||
[WenetSpeech_pruned_transducer_stateless2]: egs/wenetspeech/ASR/pruned_transducer_stateless2
|
|
||||||
[WenetSpeech_pruned_transducer_stateless5]: egs/wenetspeech/ASR/pruned_transducer_stateless5
|
|
||||||
[Alimeeting_pruned_transducer_stateless2]: egs/alimeeting/ASR/pruned_transducer_stateless2
|
|
||||||
[TAL_CSASR_pruned_transducer_stateless5]: egs/tal_csasr/ASR/pruned_transducer_stateless5
|
|
||||||
[yesno]: egs/yesno/ASR
|
[yesno]: egs/yesno/ASR
|
||||||
[librispeech]: egs/librispeech/ASR
|
[librispeech]: egs/librispeech/ASR
|
||||||
[aishell]: egs/aishell/ASR
|
[aishell]: egs/aishell/ASR
|
||||||
@ -411,3 +365,15 @@ Please see: [
|
||||||
|
==============================
|
||||||
|
|
||||||
Suppose that you select the tag ``torch1.13.0-cuda11.6``, you can use
|
Suppose that you select the tag ``torch1.13.0-cuda11.6``, you can use
|
||||||
the following command to download it:
|
the following command to download it:
|
||||||
@ -53,6 +74,16 @@ the following command to download it:
|
|||||||
|
|
||||||
sudo docker image pull k2fsa/icefall:torch1.13.0-cuda11.6
|
sudo docker image pull k2fsa/icefall:torch1.13.0-cuda11.6
|
||||||
|
|
||||||
|
Download a docker image (CPU)
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Suppose that you select the tag ``cpu-py3.8-torch2.1.2-v1.1``, you can use
|
||||||
|
the following command to download it:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
sudo docker pull ghcr.io/k2-fsa/icefall:cpu-py3.8-torch2.1.2-v1.1
|
||||||
|
|
||||||
Run a docker image with GPU
|
Run a docker image with GPU
|
||||||
===========================
|
===========================
|
||||||
|
|
||||||
@ -65,7 +96,7 @@ Run a docker image with CPU
|
|||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
sudo docker run --rm -it k2fsa/icefall:torch1.13.0-cuda11.6 /bin/bash
|
sudo docker run --rm -it ghcr.io/k2-fsa/icefall:cpu-py3.8-torch2.1.2-v1.1 /bin/bash
|
||||||
|
|
||||||
Run yesno within a docker container
|
Run yesno within a docker container
|
||||||
===================================
|
===================================
|
||||||
@ -74,8 +105,13 @@ After starting the container, the following interface is presented:
|
|||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
|
# GPU-enabled docker
|
||||||
root@60c947eac59c:/workspace/icefall#
|
root@60c947eac59c:/workspace/icefall#
|
||||||
|
|
||||||
|
# CPU-only docker
|
||||||
|
root@60c947eac59c:# mkdir /workspace; git clone https://github.com/k2-fsa/icefall
|
||||||
|
root@60c947eac59c:# export PYTHONPATH=/workspace/icefall:$PYTHONPATH
|
||||||
|
|
||||||
It shows the current user is ``root`` and the current working directory
|
It shows the current user is ``root`` and the current working directory
|
||||||
is ``/workspace/icefall``.
|
is ``/workspace/icefall``.
|
||||||
|
|
||||||
@ -107,7 +143,7 @@ to switch to the ``yesno`` recipe and run
|
|||||||
|
|
||||||
.. hint::
|
.. hint::
|
||||||
|
|
||||||
If you are running without GPU, it may report the following error:
|
If you are running without GPU with a GPU-enabled docker, it may report the following error:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
|
|||||||
@ -66,13 +66,13 @@ to install dependencies of `icefall`_:
|
|||||||
|
|
||||||
pip install torch==2.0.0+cpu torchaudio==2.0.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
|
pip install torch==2.0.0+cpu torchaudio==2.0.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
|
||||||
|
|
||||||
# If you are using macOS or Windows, please use the following command to install torch and torchaudio
|
# If you are using macOS, please use the following command to install torch and torchaudio
|
||||||
# pip install torch==2.0.0 torchaudio==2.0.0 -f https://download.pytorch.org/whl/torch_stable.html
|
# pip install torch==2.0.0 torchaudio==2.0.0 -f https://download.pytorch.org/whl/torch_stable.html
|
||||||
|
|
||||||
# Now install k2
|
# Now install k2
|
||||||
# Please refer to https://k2-fsa.github.io/k2/installation/from_wheels.html#linux-cpu-example
|
# Please refer to https://k2-fsa.github.io/k2/installation/from_wheels.html#linux-cpu-example
|
||||||
|
|
||||||
pip install k2==1.24.3.dev20230726+cpu.torch2.0.0 -f https://k2-fsa.github.io/k2/cpu.html
|
pip install k2==1.24.4.dev20231220+cpu.torch2.0.0 -f https://k2-fsa.github.io/k2/cpu.html
|
||||||
|
|
||||||
# Install the latest version of lhotse
|
# Install the latest version of lhotse
|
||||||
|
|
||||||
|
|||||||
@ -85,7 +85,7 @@ We can also use it to decode files with the following command:
|
|||||||
# Please refer to https://csukuangfj.github.io/kaldifeat/installation/from_wheels.html
|
# Please refer to https://csukuangfj.github.io/kaldifeat/installation/from_wheels.html
|
||||||
# for how to install kaldifeat
|
# for how to install kaldifeat
|
||||||
|
|
||||||
pip install kaldifeat==1.25.0.dev20230726+cpu.torch2.0.0 -f https://csukuangfj.github.io/kaldifeat/cpu.html
|
pip install kaldifeat==1.25.3.dev20231221+cpu.torch2.0.0 -f https://csukuangfj.github.io/kaldifeat/cpu.html
|
||||||
|
|
||||||
./tdnn/pretrained.py \
|
./tdnn/pretrained.py \
|
||||||
--checkpoint ./tdnn/exp/pretrained.pt \
|
--checkpoint ./tdnn/exp/pretrained.pt \
|
||||||
@ -162,7 +162,7 @@ To use ``tdnn/exp/cpu_jit.pt`` with `icefall`_ to decode files, we can use:
|
|||||||
# Please refer to https://csukuangfj.github.io/kaldifeat/installation/from_wheels.html
|
# Please refer to https://csukuangfj.github.io/kaldifeat/installation/from_wheels.html
|
||||||
# for how to install kaldifeat
|
# for how to install kaldifeat
|
||||||
|
|
||||||
pip install kaldifeat==1.25.0.dev20230726+cpu.torch2.0.0 -f https://csukuangfj.github.io/kaldifeat/cpu.html
|
pip install kaldifeat==1.25.3.dev20231221+cpu.torch2.0.0 -f https://csukuangfj.github.io/kaldifeat/cpu.html
|
||||||
|
|
||||||
|
|
||||||
./tdnn/jit_pretrained.py \
|
./tdnn/jit_pretrained.py \
|
||||||
@ -249,7 +249,7 @@ To use the generated ONNX model files for decoding with `onnxruntime`_, we can u
|
|||||||
# Please refer to https://csukuangfj.github.io/kaldifeat/installation/from_wheels.html
|
# Please refer to https://csukuangfj.github.io/kaldifeat/installation/from_wheels.html
|
||||||
# for how to install kaldifeat
|
# for how to install kaldifeat
|
||||||
|
|
||||||
pip install kaldifeat==1.25.0.dev20230726+cpu.torch2.0.0 -f https://csukuangfj.github.io/kaldifeat/cpu.html
|
pip install kaldifeat==1.25.3.dev20231221+cpu.torch2.0.0 -f https://csukuangfj.github.io/kaldifeat/cpu.html
|
||||||
|
|
||||||
./tdnn/onnx_pretrained.py \
|
./tdnn/onnx_pretrained.py \
|
||||||
--nn-model ./tdnn/exp/model-epoch-14-avg-2.onnx \
|
--nn-model ./tdnn/exp/model-epoch-14-avg-2.onnx \
|
||||||
|
|||||||
@ -77,7 +77,7 @@ The next step is to train the RNNLM model. The training command is as follows:
|
|||||||
--use-fp16 0 \
|
--use-fp16 0 \
|
||||||
--tie-weights 1 \
|
--tie-weights 1 \
|
||||||
--embedding-dim 2048 \
|
--embedding-dim 2048 \
|
||||||
--hidden_dim 2048 \
|
--hidden-dim 2048 \
|
||||||
--num-layers 3 \
|
--num-layers 3 \
|
||||||
--batch-size 300 \
|
--batch-size 300 \
|
||||||
--lm-data rnn_lm/data/lang_bpe_500/sorted_lm_data.pt \
|
--lm-data rnn_lm/data/lang_bpe_500/sorted_lm_data.pt \
|
||||||
@ -93,12 +93,3 @@ The next step is to train the RNNLM model. The training command is as follows:
|
|||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
The training of RNNLM can take a long time (usually a couple of days).
|
The training of RNNLM can take a long time (usually a couple of days).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
8
docs/source/recipes/TTS/index.rst
Normal file
8
docs/source/recipes/TTS/index.rst
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
TTS
|
||||||
|
======
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 2
|
||||||
|
|
||||||
|
ljspeech/vits
|
||||||
|
vctk/vits
|
||||||
123
docs/source/recipes/TTS/ljspeech/vits.rst
Normal file
123
docs/source/recipes/TTS/ljspeech/vits.rst
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
VITS
|
||||||
|
===============
|
||||||
|
|
||||||
|
This tutorial shows you how to train an VITS model
|
||||||
|
with the `LJSpeech <https://keithito.com/LJ-Speech-Dataset/>`_ dataset.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
TTS related recipes require packages in ``requirements-tts.txt``.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
The VITS paper: `Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech <https://arxiv.org/pdf/2106.06103.pdf>`_
|
||||||
|
|
||||||
|
|
||||||
|
Data preparation
|
||||||
|
----------------
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ cd egs/ljspeech/TTS
|
||||||
|
$ ./prepare.sh
|
||||||
|
|
||||||
|
To run stage 1 to stage 5, use
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ ./prepare.sh --stage 1 --stop_stage 5
|
||||||
|
|
||||||
|
|
||||||
|
Build Monotonic Alignment Search
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ ./prepare.sh --stage -1 --stop_stage -1
|
||||||
|
|
||||||
|
or
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ cd vits/monotonic_align
|
||||||
|
$ python setup.py build_ext --inplace
|
||||||
|
$ cd ../../
|
||||||
|
|
||||||
|
|
||||||
|
Training
|
||||||
|
--------
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ export CUDA_VISIBLE_DEVICES="0,1,2,3"
|
||||||
|
$ ./vits/train.py \
|
||||||
|
--world-size 4 \
|
||||||
|
--num-epochs 1000 \
|
||||||
|
--start-epoch 1 \
|
||||||
|
--use-fp16 1 \
|
||||||
|
--exp-dir vits/exp \
|
||||||
|
--tokens data/tokens.txt
|
||||||
|
--max-duration 500
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
You can adjust the hyper-parameters to control the size of the VITS model and
|
||||||
|
the training configurations. For more details, please run ``./vits/train.py --help``.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
The training can take a long time (usually a couple of days).
|
||||||
|
|
||||||
|
Training logs, checkpoints and tensorboard logs are saved in ``vits/exp``.
|
||||||
|
|
||||||
|
|
||||||
|
Inference
|
||||||
|
---------
|
||||||
|
|
||||||
|
The inference part uses checkpoints saved by the training part, so you have to run the
|
||||||
|
training part first. It will save the ground-truth and generated wavs to the directory
|
||||||
|
``vits/exp/infer/epoch-*/wav``, e.g., ``vits/exp/infer/epoch-1000/wav``.
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ export CUDA_VISIBLE_DEVICES="0"
|
||||||
|
$ ./vits/infer.py \
|
||||||
|
--epoch 1000 \
|
||||||
|
--exp-dir vits/exp \
|
||||||
|
--tokens data/tokens.txt \
|
||||||
|
--max-duration 500
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
For more details, please run ``./vits/infer.py --help``.
|
||||||
|
|
||||||
|
|
||||||
|
Export models
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Currently we only support ONNX model exporting. It will generate two files in the given ``exp-dir``:
|
||||||
|
``vits-epoch-*.onnx`` and ``vits-epoch-*.int8.onnx``.
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ ./vits/export-onnx.py \
|
||||||
|
--epoch 1000 \
|
||||||
|
--exp-dir vits/exp \
|
||||||
|
--tokens data/tokens.txt
|
||||||
|
|
||||||
|
You can test the exported ONNX model with:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ ./vits/test_onnx.py \
|
||||||
|
--model-filename vits/exp/vits-epoch-1000.onnx \
|
||||||
|
--tokens data/tokens.txt
|
||||||
|
|
||||||
|
|
||||||
|
Download pretrained models
|
||||||
|
--------------------------
|
||||||
|
|
||||||
|
If you don't want to train from scratch, you can download the pretrained models
|
||||||
|
by visiting the following link:
|
||||||
|
|
||||||
|
- `<https://huggingface.co/Zengwei/icefall-tts-ljspeech-vits-2023-11-29>`_
|
||||||
125
docs/source/recipes/TTS/vctk/vits.rst
Normal file
125
docs/source/recipes/TTS/vctk/vits.rst
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
VITS
|
||||||
|
===============
|
||||||
|
|
||||||
|
This tutorial shows you how to train an VITS model
|
||||||
|
with the `VCTK <https://datashare.ed.ac.uk/handle/10283/3443>`_ dataset.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
TTS related recipes require packages in ``requirements-tts.txt``.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
The VITS paper: `Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech <https://arxiv.org/pdf/2106.06103.pdf>`_
|
||||||
|
|
||||||
|
|
||||||
|
Data preparation
|
||||||
|
----------------
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ cd egs/vctk/TTS
|
||||||
|
$ ./prepare.sh
|
||||||
|
|
||||||
|
To run stage 1 to stage 6, use
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ ./prepare.sh --stage 1 --stop_stage 6
|
||||||
|
|
||||||
|
|
||||||
|
Build Monotonic Alignment Search
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
To build the monotonic alignment search, use the following commands:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ ./prepare.sh --stage -1 --stop_stage -1
|
||||||
|
|
||||||
|
or
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ cd vits/monotonic_align
|
||||||
|
$ python setup.py build_ext --inplace
|
||||||
|
$ cd ../../
|
||||||
|
|
||||||
|
|
||||||
|
Training
|
||||||
|
--------
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ export CUDA_VISIBLE_DEVICES="0,1,2,3"
|
||||||
|
$ ./vits/train.py \
|
||||||
|
--world-size 4 \
|
||||||
|
--num-epochs 1000 \
|
||||||
|
--start-epoch 1 \
|
||||||
|
--use-fp16 1 \
|
||||||
|
--exp-dir vits/exp \
|
||||||
|
--tokens data/tokens.txt
|
||||||
|
--max-duration 350
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
You can adjust the hyper-parameters to control the size of the VITS model and
|
||||||
|
the training configurations. For more details, please run ``./vits/train.py --help``.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
The training can take a long time (usually a couple of days).
|
||||||
|
|
||||||
|
Training logs, checkpoints and tensorboard logs are saved in ``vits/exp``.
|
||||||
|
|
||||||
|
|
||||||
|
Inference
|
||||||
|
---------
|
||||||
|
|
||||||
|
The inference part uses checkpoints saved by the training part, so you have to run the
|
||||||
|
training part first. It will save the ground-truth and generated wavs to the directory
|
||||||
|
``vits/exp/infer/epoch-*/wav``, e.g., ``vits/exp/infer/epoch-1000/wav``.
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ export CUDA_VISIBLE_DEVICES="0"
|
||||||
|
$ ./vits/infer.py \
|
||||||
|
--epoch 1000 \
|
||||||
|
--exp-dir vits/exp \
|
||||||
|
--tokens data/tokens.txt \
|
||||||
|
--max-duration 500
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
For more details, please run ``./vits/infer.py --help``.
|
||||||
|
|
||||||
|
|
||||||
|
Export models
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Currently we only support ONNX model exporting. It will generate two files in the given ``exp-dir``:
|
||||||
|
``vits-epoch-*.onnx`` and ``vits-epoch-*.int8.onnx``.
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ ./vits/export-onnx.py \
|
||||||
|
--epoch 1000 \
|
||||||
|
--exp-dir vits/exp \
|
||||||
|
--tokens data/tokens.txt
|
||||||
|
|
||||||
|
You can test the exported ONNX model with:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ ./vits/test_onnx.py \
|
||||||
|
--model-filename vits/exp/vits-epoch-1000.onnx \
|
||||||
|
--tokens data/tokens.txt
|
||||||
|
|
||||||
|
|
||||||
|
Download pretrained models
|
||||||
|
--------------------------
|
||||||
|
|
||||||
|
If you don't want to train from scratch, you can download the pretrained models
|
||||||
|
by visiting the following link:
|
||||||
|
|
||||||
|
- `<https://huggingface.co/zrjin/icefall-tts-vctk-vits-2023-12-05>`_
|
||||||
@ -2,7 +2,7 @@ Recipes
|
|||||||
=======
|
=======
|
||||||
|
|
||||||
This page contains various recipes in ``icefall``.
|
This page contains various recipes in ``icefall``.
|
||||||
Currently, only speech recognition recipes are provided.
|
Currently, we provide recipes for speech recognition, language model, and speech synthesis.
|
||||||
|
|
||||||
We may add recipes for other tasks as well in the future.
|
We may add recipes for other tasks as well in the future.
|
||||||
|
|
||||||
@ -16,3 +16,4 @@ We may add recipes for other tasks as well in the future.
|
|||||||
Non-streaming-ASR/index
|
Non-streaming-ASR/index
|
||||||
Streaming-ASR/index
|
Streaming-ASR/index
|
||||||
RNN-LM/index
|
RNN-LM/index
|
||||||
|
TTS/index
|
||||||
|
|||||||
@ -288,8 +288,9 @@ class Aidatatang_200zhAsrDataModule:
|
|||||||
max_duration=self.args.max_duration,
|
max_duration=self.args.max_duration,
|
||||||
shuffle=self.args.shuffle,
|
shuffle=self.args.shuffle,
|
||||||
num_buckets=self.args.num_buckets,
|
num_buckets=self.args.num_buckets,
|
||||||
|
buffer_size=self.args.num_buckets * 2000,
|
||||||
|
shuffle_buffer_size=self.args.num_buckets * 5000,
|
||||||
drop_last=True,
|
drop_last=True,
|
||||||
buffer_size=50000,
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logging.info("Using SimpleCutSampler.")
|
logging.info("Using SimpleCutSampler.")
|
||||||
|
|||||||
25
egs/aidatatang_200zh/ASR/pruned_transducer_stateless2/export.py
Normal file → Executable file
25
egs/aidatatang_200zh/ASR/pruned_transducer_stateless2/export.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang)
|
# Copyright 2021 Xiaomi Corporation (Author: Fangjun Kuang)
|
||||||
#
|
#
|
||||||
# See ../../../../LICENSE for clarification regarding multiple authors
|
# See ../../../../LICENSE for clarification regarding multiple authors
|
||||||
@ -20,7 +21,7 @@
|
|||||||
Usage:
|
Usage:
|
||||||
./pruned_transducer_stateless2/export.py \
|
./pruned_transducer_stateless2/export.py \
|
||||||
--exp-dir ./pruned_transducer_stateless2/exp \
|
--exp-dir ./pruned_transducer_stateless2/exp \
|
||||||
--lang-dir data/lang_char \
|
--tokens data/lang_char/tokens.txt \
|
||||||
--epoch 29 \
|
--epoch 29 \
|
||||||
--avg 19
|
--avg 19
|
||||||
|
|
||||||
@ -45,12 +46,13 @@ import argparse
|
|||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
|
import k2
|
||||||
import torch
|
import torch
|
||||||
|
from scaling_converter import convert_scaled_to_non_scaled
|
||||||
from train import get_params, get_transducer_model
|
from train import get_params, get_transducer_model
|
||||||
|
|
||||||
from icefall.checkpoint import average_checkpoints, load_checkpoint
|
from icefall.checkpoint import average_checkpoints, load_checkpoint
|
||||||
from icefall.lexicon import Lexicon
|
from icefall.utils import num_tokens, str2bool
|
||||||
from icefall.utils import str2bool
|
|
||||||
|
|
||||||
|
|
||||||
def get_parser():
|
def get_parser():
|
||||||
@ -85,10 +87,10 @@ def get_parser():
|
|||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--lang-dir",
|
"--tokens",
|
||||||
type=str,
|
type=str,
|
||||||
default="data/lang_char",
|
default="data/lang_char/tokens.txt",
|
||||||
help="The lang dir",
|
help="Path to the tokens.txt.",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
@ -122,10 +124,14 @@ def main():
|
|||||||
|
|
||||||
logging.info(f"device: {device}")
|
logging.info(f"device: {device}")
|
||||||
|
|
||||||
lexicon = Lexicon(params.lang_dir)
|
# Load tokens.txt here
|
||||||
|
token_table = k2.SymbolTable.from_file(params.tokens)
|
||||||
|
|
||||||
params.blank_id = 0
|
# Load id of the <blk> token and the vocab size
|
||||||
params.vocab_size = max(lexicon.tokens) + 1
|
# <blk> is defined in local/train_bpe_model.py
|
||||||
|
params.blank_id = token_table["<blk>"]
|
||||||
|
params.unk_id = token_table["<unk>"]
|
||||||
|
params.vocab_size = num_tokens(token_table) + 1 # +1 for <blk>
|
||||||
|
|
||||||
logging.info(params)
|
logging.info(params)
|
||||||
|
|
||||||
@ -152,6 +158,7 @@ def main():
|
|||||||
model.eval()
|
model.eval()
|
||||||
|
|
||||||
if params.jit:
|
if params.jit:
|
||||||
|
convert_scaled_to_non_scaled(model, inplace=True)
|
||||||
# We won't use the forward() method of the model in C++, so just ignore
|
# We won't use the forward() method of the model in C++, so just ignore
|
||||||
# it here.
|
# it here.
|
||||||
# Otherwise, one of its arguments is a ragged tensor and is not
|
# Otherwise, one of its arguments is a ragged tensor and is not
|
||||||
|
|||||||
1
egs/aidatatang_200zh/ASR/pruned_transducer_stateless2/lstmp.py
Symbolic link
1
egs/aidatatang_200zh/ASR/pruned_transducer_stateless2/lstmp.py
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../../../librispeech/ASR/lstm_transducer_stateless2/lstmp.py
|
||||||
@ -242,6 +242,7 @@ def main():
|
|||||||
opts.frame_opts.snip_edges = False
|
opts.frame_opts.snip_edges = False
|
||||||
opts.frame_opts.samp_freq = params.sample_rate
|
opts.frame_opts.samp_freq = params.sample_rate
|
||||||
opts.mel_opts.num_bins = params.feature_dim
|
opts.mel_opts.num_bins = params.feature_dim
|
||||||
|
opts.mel_opts.high_freq = -400
|
||||||
|
|
||||||
fbank = kaldifeat.Fbank(opts)
|
fbank = kaldifeat.Fbank(opts)
|
||||||
|
|
||||||
|
|||||||
@ -0,0 +1 @@
|
|||||||
|
../../../librispeech/ASR/pruned_transducer_stateless3/scaling_converter.py
|
||||||
@ -24,3 +24,10 @@ The following table lists the differences among them.
|
|||||||
The decoder in `transducer_stateless` is modified from the paper
|
The decoder in `transducer_stateless` is modified from the paper
|
||||||
[Rnn-Transducer with Stateless Prediction Network](https://ieeexplore.ieee.org/document/9054419/).
|
[Rnn-Transducer with Stateless Prediction Network](https://ieeexplore.ieee.org/document/9054419/).
|
||||||
We place an additional Conv1d layer right after the input embedding layer.
|
We place an additional Conv1d layer right after the input embedding layer.
|
||||||
|
|
||||||
|
# Whisper
|
||||||
|
|
||||||
|
Recipe to finetune large pretrained models
|
||||||
|
| | Encoder | Decoder | Comment |
|
||||||
|
|------------------------------------|-----------|--------------------|-----------------------------------------------------------------------------------|
|
||||||
|
| `whisper` | Transformer | Transformer | support fine-tuning using deepspeed
|
||||||
|
|||||||
@ -1,10 +1,120 @@
|
|||||||
## Results
|
## Results
|
||||||
|
|
||||||
|
### Aishell training results (Fine-tuning Pretrained Models)
|
||||||
|
#### Whisper
|
||||||
|
[./whisper](./whisper)
|
||||||
|
##### fine-tuning results on Aishell test set on whisper medium, large-v2, large-v3
|
||||||
|
|
||||||
|
| | test (before fine-tuning) | test (after fine-tuning) | comment |
|
||||||
|
|------------------------|------|------|-----------------------------------------|
|
||||||
|
| medium | 7.23 | 3.27 | --epoch 10 --avg 4, ddp |
|
||||||
|
| large-v2 | 6.56 | 2.47 | --epoch 10 --avg 6, deepspeed zero stage1 |
|
||||||
|
| large-v3 | 6.06 | 2.84 | --epoch 5 --avg 3, deepspeed zero stage1 |
|
||||||
|
|
||||||
|
Command for training is:
|
||||||
|
```bash
|
||||||
|
pip install -r whisper/requirements.txt
|
||||||
|
|
||||||
|
./prepare.sh --stage 30 --stop_stage 30
|
||||||
|
|
||||||
|
#fine-tuning with deepspeed zero stage 1
|
||||||
|
torchrun --nproc-per-node 8 ./whisper/train.py \
|
||||||
|
--max-duration 200 \
|
||||||
|
--exp-dir whisper/exp_large_v2 \
|
||||||
|
--model-name large-v2 \
|
||||||
|
--deepspeed \
|
||||||
|
--deepspeed_config ./whisper/ds_config_zero1.json
|
||||||
|
|
||||||
|
# fine-tuning with ddp
|
||||||
|
torchrun --nproc-per-node 8 ./whisper/train.py \
|
||||||
|
--max-duration 200 \
|
||||||
|
--exp-dir whisper/exp_medium \
|
||||||
|
--base-lr 1e-5 \
|
||||||
|
--model-name medium
|
||||||
|
```
|
||||||
|
|
||||||
|
Command for decoding using fine-tuned models:
|
||||||
|
```bash
|
||||||
|
git lfs install
|
||||||
|
git clone https://huggingface.co/yuekai/icefall_asr_aishell_whisper
|
||||||
|
ln -s icefall_asr_aishell_whisper/exp_large_v2/epoch-10-avg6.pt whisper/exp_large_v2/epoch-999.pt
|
||||||
|
|
||||||
|
python3 ./whisper/decode.py \
|
||||||
|
--exp-dir whisper/exp_large_v2 \
|
||||||
|
--model-name large-v2 \
|
||||||
|
--epoch 999 --avg 1 \
|
||||||
|
--beam-size 10 --max-duration 50
|
||||||
|
```
|
||||||
|
Command for decoding using pretrained models (before fine-tuning):
|
||||||
|
```bash
|
||||||
|
python3 ./whisper/decode.py \
|
||||||
|
--exp-dir whisper/exp_large_v2 \
|
||||||
|
--model-name large-v2 \
|
||||||
|
--epoch -1 --avg 1 \
|
||||||
|
--remove-whisper-encoder-input-length-restriction False \
|
||||||
|
--beam-size 10 --max-duration 50
|
||||||
|
```
|
||||||
|
Fine-tuned models, training logs, decoding logs, tensorboard and decoding results
|
||||||
|
are available at
|
||||||
|
<https://huggingface.co/yuekai/icefall_asr_aishell_whisper>
|
||||||
|
|
||||||
### Aishell training result (Stateless Transducer)
|
### Aishell training result (Stateless Transducer)
|
||||||
|
|
||||||
|
#### Zipformer (Byte-level BPE)
|
||||||
|
|
||||||
|
[./zipformer](./zipformer/)
|
||||||
|
|
||||||
|
It's reworked Zipformer with Pruned RNNT loss, trained with Byte-level BPE, `vocab_size` set to 500.
|
||||||
|
|
||||||
|
##### normal-scaled model, number of model parameters: 65549011, i.e., 65.55 M
|
||||||
|
|
||||||
|
| | test | dev | comment |
|
||||||
|
|------------------------|------|------|-----------------------------------------|
|
||||||
|
| greedy search | 4.54 | 4.31 | --epoch 40 --avg 10 |
|
||||||
|
| modified beam search | 4.37 | 4.11 | --epoch 40 --avg 10 |
|
||||||
|
| fast beam search | 4.43 | 4.17 | --epoch 40 --avg 10 |
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./prepare.sh
|
||||||
|
|
||||||
|
export CUDA_VISIBLE_DEVICES="0,1"
|
||||||
|
|
||||||
|
./zipformer/train_bbpe.py \
|
||||||
|
--world-size 2 \
|
||||||
|
--num-epochs 40 \
|
||||||
|
--start-epoch 1 \
|
||||||
|
--use-fp16 1 \
|
||||||
|
--context-size 2 \
|
||||||
|
--enable-musan 0 \
|
||||||
|
--exp-dir zipformer/exp_bbpe \
|
||||||
|
--max-duration 1000 \
|
||||||
|
--enable-musan 0 \
|
||||||
|
--base-lr 0.045 \
|
||||||
|
--lr-batches 7500 \
|
||||||
|
--lr-epochs 10 \
|
||||||
|
--spec-aug-time-warp-factor 20
|
||||||
|
```
|
||||||
|
|
||||||
|
Command for decoding is:
|
||||||
|
```bash
|
||||||
|
for m in greedy_search modified_beam_search fast_beam_search ; do
|
||||||
|
./zipformer/decode_bbpe.py \
|
||||||
|
--epoch 40 \
|
||||||
|
--avg 10 \
|
||||||
|
--exp-dir ./zipformer_bbpe/exp \
|
||||||
|
--bpe-model data/lang_bbpe_500/bbpe.model \
|
||||||
|
--context-size 2 \
|
||||||
|
--decoding-method $m
|
||||||
|
done
|
||||||
|
```
|
||||||
|
Pretrained models, training logs, decoding logs, tensorboard and decoding results
|
||||||
|
are available at
|
||||||
|
<https://huggingface.co/zrjin/icefall-asr-aishell-zipformer-bbpe-2024-01-16>
|
||||||
|
|
||||||
|
|
||||||
#### Zipformer (Non-streaming)
|
#### Zipformer (Non-streaming)
|
||||||
|
|
||||||
[./zipformer](./zipformer)
|
[./zipformer](./zipformer/)
|
||||||
|
|
||||||
It's reworked Zipformer with Pruned RNNT loss.
|
It's reworked Zipformer with Pruned RNNT loss.
|
||||||
**Caution**: It uses `--context-size=1`.
|
**Caution**: It uses `--context-size=1`.
|
||||||
@ -260,7 +370,7 @@ done
|
|||||||
Pretrained models, training logs, decoding logs, and decoding results
|
Pretrained models, training logs, decoding logs, and decoding results
|
||||||
are available at
|
are available at
|
||||||
<https://huggingface.co/marcoyang/icefall-asr-aishell-zipformer-pruned-transducer-stateless7-2023-03-21>
|
<https://huggingface.co/marcoyang/icefall-asr-aishell-zipformer-pruned-transducer-stateless7-2023-03-21>
|
||||||
#### Pruned transducer stateless 7 (zipformer)
|
#### Pruned transducer stateless 7 (Byte-level BPE)
|
||||||
|
|
||||||
See <https://github.com/k2-fsa/icefall/pull/986>
|
See <https://github.com/k2-fsa/icefall/pull/986>
|
||||||
|
|
||||||
@ -703,7 +813,6 @@ python3 ./transducer_stateless/decode.py \
|
|||||||
--max-sym-per-frame 3
|
--max-sym-per-frame 3
|
||||||
```
|
```
|
||||||
|
|
||||||
### Aishell training results (Transducer-stateless)
|
|
||||||
#### 2022-02-18
|
#### 2022-02-18
|
||||||
(Pingfeng Luo) : The tensorboard log for training is available at <https://tensorboard.dev/experiment/k3QL6QMhRbCwCKYKM9po9w/>
|
(Pingfeng Luo) : The tensorboard log for training is available at <https://tensorboard.dev/experiment/k3QL6QMhRbCwCKYKM9po9w/>
|
||||||
And pretrained model is available at <https://huggingface.co/pfluo/icefall-aishell-transducer-stateless-char-2021-12-29>
|
And pretrained model is available at <https://huggingface.co/pfluo/icefall-aishell-transducer-stateless-char-2021-12-29>
|
||||||
|
|||||||
@ -261,6 +261,7 @@ def main():
|
|||||||
opts.frame_opts.snip_edges = False
|
opts.frame_opts.snip_edges = False
|
||||||
opts.frame_opts.samp_freq = params.sample_rate
|
opts.frame_opts.samp_freq = params.sample_rate
|
||||||
opts.mel_opts.num_bins = params.feature_dim
|
opts.mel_opts.num_bins = params.feature_dim
|
||||||
|
opts.mel_opts.high_freq = -400
|
||||||
|
|
||||||
fbank = kaldifeat.Fbank(opts)
|
fbank = kaldifeat.Fbank(opts)
|
||||||
|
|
||||||
|
|||||||
@ -29,7 +29,14 @@ import os
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from lhotse import CutSet, Fbank, FbankConfig, LilcomChunkyWriter
|
from lhotse import (
|
||||||
|
CutSet,
|
||||||
|
Fbank,
|
||||||
|
FbankConfig,
|
||||||
|
LilcomChunkyWriter,
|
||||||
|
WhisperFbank,
|
||||||
|
WhisperFbankConfig,
|
||||||
|
)
|
||||||
from lhotse.recipes.utils import read_manifests_if_cached
|
from lhotse.recipes.utils import read_manifests_if_cached
|
||||||
|
|
||||||
from icefall.utils import get_executor, str2bool
|
from icefall.utils import get_executor, str2bool
|
||||||
@ -42,9 +49,14 @@ torch.set_num_threads(1)
|
|||||||
torch.set_num_interop_threads(1)
|
torch.set_num_interop_threads(1)
|
||||||
|
|
||||||
|
|
||||||
def compute_fbank_aishell(num_mel_bins: int = 80, perturb_speed: bool = False):
|
def compute_fbank_aishell(
|
||||||
|
num_mel_bins: int = 80,
|
||||||
|
perturb_speed: bool = False,
|
||||||
|
whisper_fbank: bool = False,
|
||||||
|
output_dir: str = "data/fbank",
|
||||||
|
):
|
||||||
src_dir = Path("data/manifests")
|
src_dir = Path("data/manifests")
|
||||||
output_dir = Path("data/fbank")
|
output_dir = Path(output_dir)
|
||||||
num_jobs = min(15, os.cpu_count())
|
num_jobs = min(15, os.cpu_count())
|
||||||
|
|
||||||
dataset_parts = (
|
dataset_parts = (
|
||||||
@ -68,8 +80,12 @@ def compute_fbank_aishell(num_mel_bins: int = 80, perturb_speed: bool = False):
|
|||||||
list(manifests.keys()),
|
list(manifests.keys()),
|
||||||
dataset_parts,
|
dataset_parts,
|
||||||
)
|
)
|
||||||
|
if whisper_fbank:
|
||||||
extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins))
|
extractor = WhisperFbank(
|
||||||
|
WhisperFbankConfig(num_filters=num_mel_bins, device="cuda")
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins))
|
||||||
|
|
||||||
with get_executor() as ex: # Initialize the executor only once.
|
with get_executor() as ex: # Initialize the executor only once.
|
||||||
for partition, m in manifests.items():
|
for partition, m in manifests.items():
|
||||||
@ -82,7 +98,7 @@ def compute_fbank_aishell(num_mel_bins: int = 80, perturb_speed: bool = False):
|
|||||||
supervisions=m["supervisions"],
|
supervisions=m["supervisions"],
|
||||||
)
|
)
|
||||||
if "train" in partition and perturb_speed:
|
if "train" in partition and perturb_speed:
|
||||||
logging.info(f"Doing speed perturb")
|
logging.info("Doing speed perturb")
|
||||||
cut_set = (
|
cut_set = (
|
||||||
cut_set + cut_set.perturb_speed(0.9) + cut_set.perturb_speed(1.1)
|
cut_set + cut_set.perturb_speed(0.9) + cut_set.perturb_speed(1.1)
|
||||||
)
|
)
|
||||||
@ -111,6 +127,18 @@ def get_args():
|
|||||||
default=False,
|
default=False,
|
||||||
help="Enable 0.9 and 1.1 speed perturbation for data augmentation. Default: False.",
|
help="Enable 0.9 and 1.1 speed perturbation for data augmentation. Default: False.",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--whisper-fbank",
|
||||||
|
type=str2bool,
|
||||||
|
default=False,
|
||||||
|
help="Use WhisperFbank instead of Fbank. Default: False.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--output-dir",
|
||||||
|
type=str,
|
||||||
|
default="data/fbank",
|
||||||
|
help="Output directory. Default: data/fbank.",
|
||||||
|
)
|
||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
@ -121,5 +149,8 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
args = get_args()
|
args = get_args()
|
||||||
compute_fbank_aishell(
|
compute_fbank_aishell(
|
||||||
num_mel_bins=args.num_mel_bins, perturb_speed=args.perturb_speed
|
num_mel_bins=args.num_mel_bins,
|
||||||
|
perturb_speed=args.perturb_speed,
|
||||||
|
whisper_fbank=args.whisper_fbank,
|
||||||
|
output_dir=args.output_dir,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -376,3 +376,16 @@ if [ $stage -le 12 ] && [ $stop_stage -ge 12 ]; then
|
|||||||
--vocab-size 4336 \
|
--vocab-size 4336 \
|
||||||
--master-port 12345
|
--master-port 12345
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# whisper large-v3 using 128 mel bins, others using 80 mel bins
|
||||||
|
whisper_mel_bins=80
|
||||||
|
output_dir=data/fbank_whisper
|
||||||
|
if [ $stage -le 30 ] && [ $stop_stage -ge 30 ]; then
|
||||||
|
log "Stage 30: Compute ${whisper_mel_bins} dim fbank for whisper model fine-tuning"
|
||||||
|
if [ ! -f $output_dir/.aishell.whisper.done ]; then
|
||||||
|
mkdir -p $output_dir
|
||||||
|
./local/compute_fbank_aishell.py --perturb-speed ${perturb_speed} --num-mel-bins ${whisper_mel_bins} --whisper-fbank true --output-dir $output_dir
|
||||||
|
./local/compute_fbank_musan.py --num-mel-bins ${whisper_mel_bins} --whisper-fbank true --output-dir $output_dir
|
||||||
|
touch $output_dir/.aishell.whisper.done
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|||||||
@ -47,12 +47,12 @@ import argparse
|
|||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
|
import k2
|
||||||
import torch
|
import torch
|
||||||
from train import add_model_arguments, get_params, get_transducer_model
|
from train import add_model_arguments, get_params, get_transducer_model
|
||||||
|
|
||||||
from icefall.checkpoint import average_checkpoints, find_checkpoints, load_checkpoint
|
from icefall.checkpoint import average_checkpoints, find_checkpoints, load_checkpoint
|
||||||
from icefall.lexicon import Lexicon
|
from icefall.utils import num_tokens, str2bool
|
||||||
from icefall.utils import str2bool
|
|
||||||
|
|
||||||
|
|
||||||
def get_parser():
|
def get_parser():
|
||||||
@ -106,10 +106,10 @@ def get_parser():
|
|||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--lang-dir",
|
"--tokens",
|
||||||
type=Path,
|
type=str,
|
||||||
default=Path("data/lang_char"),
|
default="data/lang_char/tokens.txt",
|
||||||
help="The lang dir",
|
help="Path to the tokens.txt",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
@ -136,10 +136,9 @@ def main():
|
|||||||
|
|
||||||
logging.info(f"device: {device}")
|
logging.info(f"device: {device}")
|
||||||
|
|
||||||
lexicon = Lexicon(params.lang_dir)
|
token_table = k2.SymbolTable.from_file(params.tokens)
|
||||||
|
params.blank_id = token_table["<blk>"]
|
||||||
params.blank_id = 0
|
params.vocab_size = num_tokens(token_table) + 1
|
||||||
params.vocab_size = max(lexicon.tokens) + 1
|
|
||||||
|
|
||||||
logging.info(params)
|
logging.info(params)
|
||||||
|
|
||||||
|
|||||||
@ -240,6 +240,7 @@ def main():
|
|||||||
opts.frame_opts.snip_edges = False
|
opts.frame_opts.snip_edges = False
|
||||||
opts.frame_opts.samp_freq = params.sample_rate
|
opts.frame_opts.samp_freq = params.sample_rate
|
||||||
opts.mel_opts.num_bins = params.feature_dim
|
opts.mel_opts.num_bins = params.feature_dim
|
||||||
|
opts.mel_opts.high_freq = -400
|
||||||
|
|
||||||
fbank = kaldifeat.Fbank(opts)
|
fbank = kaldifeat.Fbank(opts)
|
||||||
|
|
||||||
|
|||||||
@ -47,6 +47,7 @@ import argparse
|
|||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
|
import k2
|
||||||
import torch
|
import torch
|
||||||
from scaling_converter import convert_scaled_to_non_scaled
|
from scaling_converter import convert_scaled_to_non_scaled
|
||||||
from train import add_model_arguments, get_params, get_transducer_model
|
from train import add_model_arguments, get_params, get_transducer_model
|
||||||
@ -57,8 +58,7 @@ from icefall.checkpoint import (
|
|||||||
find_checkpoints,
|
find_checkpoints,
|
||||||
load_checkpoint,
|
load_checkpoint,
|
||||||
)
|
)
|
||||||
from icefall.lexicon import Lexicon
|
from icefall.utils import num_tokens, str2bool
|
||||||
from icefall.utils import str2bool
|
|
||||||
|
|
||||||
|
|
||||||
def get_parser():
|
def get_parser():
|
||||||
@ -123,10 +123,10 @@ def get_parser():
|
|||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--lang-dir",
|
"--tokens",
|
||||||
type=Path,
|
type=str,
|
||||||
default=Path("data/lang_char"),
|
default="data/lang_char/tokens.txt",
|
||||||
help="The lang dir",
|
help="Path to the tokens.txt",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
@ -153,10 +153,9 @@ def main():
|
|||||||
|
|
||||||
logging.info(f"device: {device}")
|
logging.info(f"device: {device}")
|
||||||
|
|
||||||
lexicon = Lexicon(params.lang_dir)
|
token_table = k2.SymbolTable.from_file(params.tokens)
|
||||||
|
params.blank_id = token_table["<blk>"]
|
||||||
params.blank_id = 0
|
params.vocab_size = num_tokens(token_table) + 1
|
||||||
params.vocab_size = max(lexicon.tokens) + 1
|
|
||||||
params.datatang_prob = 0
|
params.datatang_prob = 0
|
||||||
|
|
||||||
logging.info(params)
|
logging.info(params)
|
||||||
|
|||||||
@ -241,6 +241,7 @@ def main():
|
|||||||
opts.frame_opts.snip_edges = False
|
opts.frame_opts.snip_edges = False
|
||||||
opts.frame_opts.samp_freq = params.sample_rate
|
opts.frame_opts.samp_freq = params.sample_rate
|
||||||
opts.mel_opts.num_bins = params.feature_dim
|
opts.mel_opts.num_bins = params.feature_dim
|
||||||
|
opts.mel_opts.high_freq = -400
|
||||||
|
|
||||||
fbank = kaldifeat.Fbank(opts)
|
fbank = kaldifeat.Fbank(opts)
|
||||||
|
|
||||||
|
|||||||
@ -49,14 +49,14 @@ import logging
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, Tuple
|
from typing import Dict, Tuple
|
||||||
|
|
||||||
|
import k2
|
||||||
import onnx
|
import onnx
|
||||||
import sentencepiece as spm
|
|
||||||
import torch
|
import torch
|
||||||
import torch.nn as nn
|
import torch.nn as nn
|
||||||
from decoder2 import Decoder
|
from decoder2 import Decoder
|
||||||
|
from do_not_use_it_directly import add_model_arguments, get_params, get_transducer_model
|
||||||
from onnxruntime.quantization import QuantType, quantize_dynamic
|
from onnxruntime.quantization import QuantType, quantize_dynamic
|
||||||
from scaling_converter import convert_scaled_to_non_scaled
|
from scaling_converter import convert_scaled_to_non_scaled
|
||||||
from do_not_use_it_directly import add_model_arguments, get_params, get_transducer_model
|
|
||||||
from zipformer import Zipformer
|
from zipformer import Zipformer
|
||||||
|
|
||||||
from icefall.checkpoint import (
|
from icefall.checkpoint import (
|
||||||
@ -65,8 +65,7 @@ from icefall.checkpoint import (
|
|||||||
find_checkpoints,
|
find_checkpoints,
|
||||||
load_checkpoint,
|
load_checkpoint,
|
||||||
)
|
)
|
||||||
from icefall.lexicon import Lexicon
|
from icefall.utils import num_tokens, setup_logger, str2bool
|
||||||
from icefall.utils import setup_logger, str2bool
|
|
||||||
|
|
||||||
|
|
||||||
def get_parser():
|
def get_parser():
|
||||||
@ -123,12 +122,10 @@ def get_parser():
|
|||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--lang-dir",
|
"--tokens",
|
||||||
type=str,
|
type=str,
|
||||||
help="""The lang dir
|
default="data/lang_char/tokens.txt",
|
||||||
It contains language related input files such as
|
help="Path to the tokens.txt",
|
||||||
"lexicon.txt"
|
|
||||||
""",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
@ -404,9 +401,9 @@ def main():
|
|||||||
|
|
||||||
logging.info(f"device: {device}")
|
logging.info(f"device: {device}")
|
||||||
|
|
||||||
lexicon = Lexicon(params.lang_dir)
|
token_table = k2.SymbolTable.from_file(params.tokens)
|
||||||
params.blank_id = 0
|
params.blank_id = token_table["<blk>"]
|
||||||
params.vocab_size = max(lexicon.tokens) + 1
|
params.vocab_size = num_tokens(token_table) + 1
|
||||||
|
|
||||||
logging.info(params)
|
logging.info(params)
|
||||||
|
|
||||||
|
|||||||
@ -230,6 +230,7 @@ def main():
|
|||||||
opts.frame_opts.snip_edges = False
|
opts.frame_opts.snip_edges = False
|
||||||
opts.frame_opts.samp_freq = 16000
|
opts.frame_opts.samp_freq = 16000
|
||||||
opts.mel_opts.num_bins = 80
|
opts.mel_opts.num_bins = 80
|
||||||
|
opts.mel_opts.high_freq = -400
|
||||||
|
|
||||||
fbank = kaldifeat.Fbank(opts)
|
fbank = kaldifeat.Fbank(opts)
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user