mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-12-11 06:55:27 +00:00
Update results
This commit is contained in:
parent
239524d384
commit
f293d4ade3
@ -14,7 +14,8 @@ ln -sfv /path/to/GigaSpeech download/GigaSpeech
|
||||
|
||||
## Performance Record
|
||||
| | Dev | Test |
|
||||
|-----|-------|-------|
|
||||
| WER | 10.47 | 10.58 |
|
||||
|--------------------------------|-------|-------|
|
||||
| `conformer_ctc` | 10.47 | 10.58 |
|
||||
| `pruned_transducer_stateless2` | 10.52 | 10.62 |
|
||||
|
||||
See [RESULTS](/egs/gigaspeech/ASR/RESULTS.md) for details.
|
||||
|
||||
@ -1,4 +1,70 @@
|
||||
## Results
|
||||
### GigaSpeech BPE training results (Pruned Transducer 2)
|
||||
|
||||
#### 2022-05-12
|
||||
|
||||
Results are:
|
||||
|
||||
| | Dev | Test |
|
||||
|----------------------|-------|-------|
|
||||
| greedy search | 10.59 | 10.87 |
|
||||
| fast beam search | 10.56 | 10.80 |
|
||||
| modified beam search | 10.52 | 10.62 |
|
||||
|
||||
To reproduce the above result, use the following commands for training:
|
||||
|
||||
```
|
||||
cd egs/gigaspeech/ASR
|
||||
./prepare.sh
|
||||
export CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
|
||||
./pruned_transducer_stateless2/train.py \
|
||||
--max-duration 120 \
|
||||
--num-workers 1 \
|
||||
--world-size 8 \
|
||||
--exp-dir pruned_transducer_stateless2/exp \
|
||||
--bpe-model data/lang_bpe_500/bpe.model \
|
||||
--use-fp16 True
|
||||
```
|
||||
|
||||
and the following commands for decoding:
|
||||
|
||||
```
|
||||
# greedy search
|
||||
./pruned_transducer_stateless2/decode.py \
|
||||
--epoch 29 \
|
||||
--avg 11 \
|
||||
--decoding-method greedy_search \
|
||||
--exp-dir pruned_transducer_stateless2/exp \
|
||||
--bpe-model data/lang_bpe_500/bpe.model \
|
||||
--max-duration 20 \
|
||||
--num-workers 1
|
||||
|
||||
# fast beam search
|
||||
./pruned_transducer_stateless2/decode.py \
|
||||
--epoch 29 \
|
||||
--avg 9 \
|
||||
--decoding-method fast_beam_search \
|
||||
--exp-dir pruned_transducer_stateless2/exp \
|
||||
--bpe-model data/lang_bpe_500/bpe.model \
|
||||
--max-duration 20 \
|
||||
--num-workers 1
|
||||
|
||||
# modified beam search
|
||||
./pruned_transducer_stateless2/decode.py \
|
||||
--epoch 29 \
|
||||
--avg 8 \
|
||||
--decoding-method modified_beam_search \
|
||||
--exp-dir pruned_transducer_stateless2/exp \
|
||||
--bpe-model data/lang_bpe_500/bpe.model \
|
||||
--max-duration 20 \
|
||||
--num-workers 1
|
||||
```
|
||||
|
||||
Pretrained model is available at
|
||||
<https://huggingface.co/wgb14/icefall-asr-gigaspeech-pruned-transducer-stateless2>
|
||||
|
||||
The tensorboard log for training is available at
|
||||
<https://tensorboard.dev/experiment/zmmM0MLASnG1N2RmJ4MZBw/>
|
||||
|
||||
### GigaSpeech BPE training results (Conformer-CTC)
|
||||
|
||||
|
||||
@ -98,14 +98,14 @@ def get_parser():
|
||||
parser.add_argument(
|
||||
"--epoch",
|
||||
type=int,
|
||||
default=28,
|
||||
default=29,
|
||||
help="It specifies the checkpoint to use for decoding."
|
||||
"Note: Epoch counts from 0.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--avg",
|
||||
type=int,
|
||||
default=15,
|
||||
default=8,
|
||||
help="Number of checkpoints to average. Automatically select "
|
||||
"consecutive checkpoints before the checkpoint specified by "
|
||||
"'--epoch'. ",
|
||||
|
||||
@ -111,7 +111,7 @@ def get_parser():
|
||||
parser.add_argument(
|
||||
"--num-epochs",
|
||||
type=int,
|
||||
default=20,
|
||||
default=30,
|
||||
help="Number of epochs to train.",
|
||||
)
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user