mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-09-08 08:34:19 +00:00
minor fixes
This commit is contained in:
parent
cec73bd28b
commit
aadb4507bf
@ -144,17 +144,13 @@ Note: If you don't want to train a model from scratch, we have
|
||||
provided one for you. You can get it at
|
||||
|
||||
- non-streaming model:
|
||||
https://huggingface.co/Zengwei/icefall-asr-librispeech-zipformer-2023-05-15
|
||||
|
||||
- streaming model:
|
||||
https://huggingface.co/Zengwei/icefall-asr-librispeech-streaming-zipformer-2023-05-17
|
||||
https://huggingface.co/zrjin/icefall-asr-multi-zh-hans-zipformer-2023-9-2/
|
||||
|
||||
with the following commands:
|
||||
|
||||
sudo apt-get install git-lfs
|
||||
git lfs install
|
||||
git clone https://huggingface.co/Zengwei/icefall-asr-librispeech-zipformer-2023-05-15
|
||||
git clone https://huggingface.co/Zengwei/icefall-asr-librispeech-streaming-zipformer-2023-05-17
|
||||
git clone https://huggingface.co/zrjin/icefall-asr-multi-zh-hans-zipformer-2023-9-2/
|
||||
# You will find the pre-trained models in exp dir
|
||||
"""
|
||||
|
||||
|
@ -25,18 +25,18 @@ dataset, you should change the argument values according to your dataset.
|
||||
|
||||
./zipformer/export.py \
|
||||
--exp-dir ./zipformer/exp \
|
||||
--tokens data/lang_bpe_500/tokens.txt \
|
||||
--epoch 30 \
|
||||
--avg 9
|
||||
--tokens data/lang_bpe_2000/tokens.txt \
|
||||
--epoch 23 \
|
||||
--avg 1
|
||||
|
||||
- For streaming model:
|
||||
|
||||
./zipformer/export.py \
|
||||
--exp-dir ./zipformer/exp \
|
||||
--causal 1 \
|
||||
--tokens data/lang_bpe_500/tokens.txt \
|
||||
--epoch 30 \
|
||||
--avg 9
|
||||
--tokens data/lang_bpe_2000/tokens.txt \
|
||||
--epoch 23 \
|
||||
--avg 1
|
||||
|
||||
Usage of this script:
|
||||
|
||||
@ -45,7 +45,7 @@ Usage of this script:
|
||||
(1) greedy search
|
||||
./zipformer/pretrained.py \
|
||||
--checkpoint ./zipformer/exp/pretrained.pt \
|
||||
--tokens data/lang_bpe_500/tokens.txt \
|
||||
--tokens data/lang_bpe_2000/tokens.txt \
|
||||
--method greedy_search \
|
||||
/path/to/foo.wav \
|
||||
/path/to/bar.wav
|
||||
@ -53,7 +53,7 @@ Usage of this script:
|
||||
(2) modified beam search
|
||||
./zipformer/pretrained.py \
|
||||
--checkpoint ./zipformer/exp/pretrained.pt \
|
||||
--tokens ./data/lang_bpe_500/tokens.txt \
|
||||
--tokens ./data/lang_bpe_2000/tokens.txt \
|
||||
--method modified_beam_search \
|
||||
/path/to/foo.wav \
|
||||
/path/to/bar.wav
|
||||
@ -61,7 +61,7 @@ Usage of this script:
|
||||
(3) fast beam search
|
||||
./zipformer/pretrained.py \
|
||||
--checkpoint ./zipformer/exp/pretrained.pt \
|
||||
--tokens ./data/lang_bpe_500/tokens.txt \
|
||||
--tokens ./data/lang_bpe_2000/tokens.txt \
|
||||
--method fast_beam_search \
|
||||
/path/to/foo.wav \
|
||||
/path/to/bar.wav
|
||||
@ -74,7 +74,7 @@ Usage of this script:
|
||||
--causal 1 \
|
||||
--chunk-size 16 \
|
||||
--left-context-frames 128 \
|
||||
--tokens ./data/lang_bpe_500/tokens.txt \
|
||||
--tokens ./data/lang_bpe_2000/tokens.txt \
|
||||
--method greedy_search \
|
||||
/path/to/foo.wav \
|
||||
/path/to/bar.wav
|
||||
@ -85,7 +85,7 @@ Usage of this script:
|
||||
--causal 1 \
|
||||
--chunk-size 16 \
|
||||
--left-context-frames 128 \
|
||||
--tokens ./data/lang_bpe_500/tokens.txt \
|
||||
--tokens ./data/lang_bpe_2000/tokens.txt \
|
||||
--method modified_beam_search \
|
||||
/path/to/foo.wav \
|
||||
/path/to/bar.wav
|
||||
@ -96,7 +96,7 @@ Usage of this script:
|
||||
--causal 1 \
|
||||
--chunk-size 16 \
|
||||
--left-context-frames 128 \
|
||||
--tokens ./data/lang_bpe_500/tokens.txt \
|
||||
--tokens ./data/lang_bpe_2000/tokens.txt \
|
||||
--method fast_beam_search \
|
||||
/path/to/foo.wav \
|
||||
/path/to/bar.wav
|
||||
|
Loading…
x
Reference in New Issue
Block a user