#!/usr/bin/env bash set -eou pipefail nj=15 stage=-1 stop_stage=100 dl_dir=$PWD/download # The following files will be downloaded to $dl_dir # - ptb.train.txt # - ptb.valid.txt # - ptb.test.txt . shared/parse_options.sh || exit 1 # vocab size for sentence piece models. # It will generate data/bpe_xxx, data/bpe_yyy # if the array contains xxx, yyy vocab_sizes=( 500 1000 2000 5000 ) # All files generated by this script are saved in "data". # You can safely remove "data" and rerun this script to regenerate it. mkdir -p data mkdir -p $dl_dir log() { # This function is from espnet local fname=${BASH_SOURCE[1]##*/} echo -e "$(date '+%Y-%m-%d %H:%M:%S') (${fname}:${BASH_LINENO[0]}:${FUNCNAME[1]}) $*" } log "dl_dir: $dl_dir" if [ $stage -le -1 ] && [ $stop_stage -ge -1 ]; then log "Stage -1: Download data" if [ ! -f $dl_dir/.complete ]; then url=https://raw.githubusercontent.com/townie/PTB-dataset-from-Tomas-Mikolov-s-webpage/master/data/ wget --no-verbose --directory-prefix $dl_dir $url/ptb.train.txt wget --no-verbose --directory-prefix $dl_dir $url/ptb.valid.txt wget --no-verbose --directory-prefix $dl_dir $url/ptb.test.txt touch $dl_dir/.complete fi fi if [ $stage -le 0 ] && [ $stop_stage -ge 0 ]; then log "Stage 0: Train BPE model" for vocab_size in ${vocab_sizes[@]}; do out_dir=data/bpe_${vocab_size} mkdir -p $out_dir ./local/train_bpe_model.py \ --out-dir $out_dir \ --vocab-size $vocab_size \ --transcript $dl_dir/ptb.train.txt done fi if [ $stage -le 1 ] && [ $stop_stage -ge 1 ]; then log "Stage 1: Generate LM training data" # Note: ptb.train.txt has already been normalized for vocab_size in ${vocab_sizes[@]}; do out_dir=data/bpe_${vocab_size} mkdir -p $out_dir ./local/prepare_lm_training_data.py \ --bpe-model $out_dir/bpe.model \ --lm-data $dl_dir/ptb.train.txt \ --lm-archive $out_dir/lm_data.pt ./local/prepare_lm_training_data.py \ --bpe-model $out_dir/bpe.model \ --lm-data $dl_dir/ptb.valid.txt \ --lm-archive $out_dir/lm_data-valid.pt ./local/prepare_lm_training_data.py \ --bpe-model $out_dir/bpe.model \ --lm-data $dl_dir/ptb.test.txt \ --lm-archive $out_dir/lm_data-test.pt done fi if [ $stage -le 2 ] && [ $stop_stage -ge 2 ]; then log "Stage 2: Sort LM training data" # Sort LM training data generated in stage 1 # by sentence length in descending order # for ease of training. # # Sentence length equals to the number of BPE tokens # in a sentence. for vocab_size in ${vocab_sizes[@]}; do out_dir=data/bpe_${vocab_size} mkdir -p $out_dir ./local/sort_lm_training_data.py \ --in-lm-data $out_dir/lm_data.pt \ --out-lm-data $out_dir/sorted_lm_data.pt \ --out-statistics $out_dir/statistics.txt ./local/sort_lm_training_data.py \ --in-lm-data $out_dir/lm_data-valid.pt \ --out-lm-data $out_dir/sorted_lm_data-valid.pt \ --out-statistics $out_dir/statistics-valid.txt ./local/sort_lm_training_data.py \ --in-lm-data $out_dir/lm_data-test.pt \ --out-lm-data $out_dir/sorted_lm_data-test.pt \ --out-statistics $out_dir/statistics-test.txt done fi