mirror of
https://github.com/k2-fsa/icefall.git
synced 2025-09-08 16:44:20 +00:00
minor updates
This commit is contained in:
parent
0d10439a83
commit
ca075f2c62
@ -34,7 +34,7 @@ dataset, you should change the argument values according to your dataset.
|
||||
./zipformer/export.py \
|
||||
--exp-dir ./zipformer/exp \
|
||||
--tokens data/lang_bpe_2000/tokens.txt \
|
||||
--epoch 23 \
|
||||
--epoch 20 \
|
||||
--avg 1 \
|
||||
--jit 1
|
||||
|
||||
@ -54,7 +54,7 @@ for how to use the exported models outside of icefall.
|
||||
--chunk-size 16 \
|
||||
--left-context-frames 128 \
|
||||
--tokens data/lang_bpe_2000/tokens.txt \
|
||||
--epoch 23 \
|
||||
--epoch 20 \
|
||||
--avg 1 \
|
||||
--jit 1
|
||||
|
||||
@ -73,7 +73,7 @@ for how to use the exported models outside of icefall.
|
||||
./zipformer/export.py \
|
||||
--exp-dir ./zipformer/exp \
|
||||
--tokens data/lang_bpe_2000/tokens.txt \
|
||||
--epoch 23 \
|
||||
--epoch 20 \
|
||||
--avg 1
|
||||
|
||||
- For streaming model:
|
||||
@ -82,7 +82,7 @@ for how to use the exported models outside of icefall.
|
||||
--exp-dir ./zipformer/exp \
|
||||
--causal 1 \
|
||||
--tokens data/lang_bpe_2000/tokens.txt \
|
||||
--epoch 23 \
|
||||
--epoch 20 \
|
||||
--avg 1
|
||||
|
||||
It will generate a file `pretrained.pt` in the given `exp_dir`. You can later
|
||||
@ -203,7 +203,7 @@ def get_parser():
|
||||
parser.add_argument(
|
||||
"--epoch",
|
||||
type=int,
|
||||
default=23,
|
||||
default=20,
|
||||
help="""It specifies the checkpoint to use for decoding.
|
||||
Note: Epoch counts from 1.
|
||||
You can specify --avg to use more checkpoints for model averaging.""",
|
||||
|
@ -283,17 +283,17 @@ class MultiDataset:
|
||||
self.fbank_dir / "kespeech" / "kespeech-asr_cuts_dev_phase2.jsonl.gz"
|
||||
)
|
||||
|
||||
# # WeNetSpeech
|
||||
# logging.info("Loading WeNetSpeech set in lazy mode")
|
||||
# wenetspeech_test_meeting_cuts = load_manifest_lazy(
|
||||
# self.fbank_dir / "wenetspeech" / "cuts_TEST_MEETING.jsonl.gz"
|
||||
# )
|
||||
# wenetspeech_test_net_cuts = load_manifest_lazy(
|
||||
# self.fbank_dir / "wenetspeech" / "cuts_TEST_NET.jsonl.gz"
|
||||
# )
|
||||
# wenetspeech_dev_cuts = load_manifest_lazy(
|
||||
# self.fbank_dir / "wenetspeech" / "cuts_DEV.jsonl.gz"
|
||||
# )
|
||||
# WeNetSpeech
|
||||
logging.info("Loading WeNetSpeech set in lazy mode")
|
||||
wenetspeech_test_meeting_cuts = load_manifest_lazy(
|
||||
self.fbank_dir / "wenetspeech" / "cuts_TEST_MEETING.jsonl.gz"
|
||||
)
|
||||
wenetspeech_test_net_cuts = load_manifest_lazy(
|
||||
self.fbank_dir / "wenetspeech" / "cuts_TEST_NET.jsonl.gz"
|
||||
)
|
||||
wenetspeech_dev_cuts = load_manifest_lazy(
|
||||
self.fbank_dir / "wenetspeech" / "cuts_DEV.jsonl.gz"
|
||||
)
|
||||
|
||||
return {
|
||||
"aidatatang_test": aidatatang_test_cuts,
|
||||
@ -310,7 +310,7 @@ class MultiDataset:
|
||||
"kespeech-asr_test": kespeech_test_cuts,
|
||||
"kespeech-asr_dev_phase1": kespeech_dev_phase1_cuts,
|
||||
"kespeech-asr_dev_phase2": kespeech_dev_phase2_cuts,
|
||||
# "wenetspeech-meeting_test": wenetspeech_test_meeting_cuts,
|
||||
# "wenetspeech-net_test": wenetspeech_test_net_cuts,
|
||||
# "wenetspeech_dev": wenetspeech_dev_cuts,
|
||||
"wenetspeech-meeting_test": wenetspeech_test_meeting_cuts,
|
||||
"wenetspeech-net_test": wenetspeech_test_net_cuts,
|
||||
"wenetspeech_dev": wenetspeech_dev_cuts,
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user