icefall/egs/ptb/LM/prepare.sh
Fangjun Kuang 2bca7032af
Update RNNLM training scripts (#720)
* Update RNNLM training scripts

* Fix a typo

* Fix CI
2022-12-01 15:57:43 +08:00

127 lines
3.7 KiB
Bash
Executable File

#!/usr/bin/env bash
# fix segmentation fault reported in https://github.com/k2-fsa/icefall/issues/674
export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python
set -eou pipefail
nj=15
stage=-1
stop_stage=100
dl_dir=$PWD/download
# The following files will be downloaded to $dl_dir
# - ptb.train.txt
# - ptb.valid.txt
# - ptb.test.txt
. shared/parse_options.sh || exit 1
# vocab size for sentence piece models.
# It will generate data/bpe_xxx, data/bpe_yyy
# if the array contains xxx, yyy
vocab_sizes=(
500
# 1000
# 2000
# 5000
)
# All files generated by this script are saved in "data".
# You can safely remove "data" and rerun this script to regenerate it.
mkdir -p data
mkdir -p $dl_dir
log() {
# This function is from espnet
local fname=${BASH_SOURCE[1]##*/}
echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*"
}
log "dl_dir: $dl_dir"
if [ $stage -le -1 ] && [ $stop_stage -ge -1 ]; then
log "Stage -1: Download data"
# Caution: The downloaded data has already been normalized for LM training.
if [ ! -f $dl_dir/.complete ]; then
url=http://raw.githubusercontent.com/townie/PTB-dataset-from-Tomas-Mikolov-s-webpage/master/data
wget --directory-prefix $dl_dir $url/ptb.train.txt
wget --directory-prefix $dl_dir $url/ptb.valid.txt
wget --directory-prefix $dl_dir $url/ptb.test.txt
touch $dl_dir/.complete
fi
fi
if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then
log "Stage 0: Train BPE model"
# Caution: You have to use the same bpe model for training your acoustic model
# Caution: You have to use the same bpe model for training your acoustic model
# Caution: You have to use the same bpe model for training your acoustic model
for vocab_size in ${vocab_sizes[@]}; do
lang_dir=data/lang_bpe_${vocab_size}
mkdir -p $lang_dir
./local/train_bpe_model.py \
--lang-dir $lang_dir \
--vocab-size $vocab_size \
--transcript $dl_dir/ptb.train.txt
done
fi
if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then
log "Stage 1: Generate LM training data"
# Note: ptb.train.txt has already been normalized
for vocab_size in ${vocab_sizes[@]}; do
lang_dir=data/lang_bpe_${vocab_size}
out_dir=data/lm_training_bpe_${vocab_size}
mkdir -p $out_dir
./local/prepare_lm_training_data.py \
--bpe-model $lang_dir/bpe.model \
--lm-data $dl_dir/ptb.train.txt \
--lm-archive $out_dir/lm_data.pt
./local/prepare_lm_training_data.py \
--bpe-model $lang_dir/bpe.model \
--lm-data $dl_dir/ptb.valid.txt \
--lm-archive $out_dir/lm_data-valid.pt
./local/prepare_lm_training_data.py \
--bpe-model $lang_dir/bpe.model \
--lm-data $dl_dir/ptb.test.txt \
--lm-archive $out_dir/lm_data-test.pt
done
fi
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then
log "Stage 2: Sort LM training data"
# Sort LM training data generated in stage 1
# by sentence length in descending order
# for ease of training.
#
# Sentence length equals to the number of BPE tokens
# in a sentence.
for vocab_size in ${vocab_sizes[@]}; do
out_dir=data/lm_training_bpe_${vocab_size}
mkdir -p $out_dir
./local/sort_lm_training_data.py \
--in-lm-data $out_dir/lm_data.pt \
--out-lm-data $out_dir/sorted_lm_data.pt \
--out-statistics $out_dir/statistics.txt
./local/sort_lm_training_data.py \
--in-lm-data $out_dir/lm_data-valid.pt \
--out-lm-data $out_dir/sorted_lm_data-valid.pt \
--out-statistics $out_dir/statistics-valid.txt
./local/sort_lm_training_data.py \
--in-lm-data $out_dir/lm_data-test.pt \
--out-lm-data $out_dir/sorted_lm_data-test.pt \
--out-statistics $out_dir/statistics-test.txt
done
fi