From 688fb902dd625981060b00788ed70c4c155d2b50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=98=89=E6=B8=8A?= Date: Mon, 15 May 2023 10:59:59 +0800 Subject: [PATCH] update repo --- .../paraformerbert/local/extract_embeds.sh | 4 +- egs/aishell/paraformerbert/run.sh | 103 +++++------------- funasr/bin/train.py | 6 + funasr/utils/prepare_data.py | 5 + 4 files changed, 39 insertions(+), 79 deletions(-) diff --git a/egs/aishell/paraformerbert/local/extract_embeds.sh b/egs/aishell/paraformerbert/local/extract_embeds.sh index 9cf59404a..453efb83a 100755 --- a/egs/aishell/paraformerbert/local/extract_embeds.sh +++ b/egs/aishell/paraformerbert/local/extract_embeds.sh @@ -5,8 +5,6 @@ stop_stage=3 bert_model_root="../../huggingface_models" bert_model_name="bert-base-chinese" -#bert_model_name="chinese-roberta-wwm-ext" -#bert_model_name="mengzi-bert-base" raw_dataset_path="../DATA" model_path=${bert_model_root}/${bert_model_name} @@ -16,7 +14,7 @@ nj=32 for data_set in train dev test;do scp=$raw_dataset_path/dump/fbank/${data_set}/text - local_scp_dir_raw=$raw_dataset_path/embeds/$bert_model_name/${data_set} + local_scp_dir_raw=${raw_dataset_path}/${data_set} local_scp_dir=$local_scp_dir_raw/split$nj local_records_dir=$local_scp_dir_raw/ark diff --git a/egs/aishell/paraformerbert/run.sh b/egs/aishell/paraformerbert/run.sh index bcbe1eb60..f4ffaf78c 100755 --- a/egs/aishell/paraformerbert/run.sh +++ b/egs/aishell/paraformerbert/run.sh @@ -16,12 +16,11 @@ infer_cmd=utils/run.pl feats_dir="../DATA" #feature output dictionary, for large data exp_dir="." lang=zh -dumpdir=dump/fbank -feats_type=fbank token_type=char -scp=feats.scp -type=kaldi_ark -stage=0 +type=sound +scp=wav.scp +speed_perturb="0.9 1.0 1.1" +stage=3 stop_stage=4 skip_extract_embed=false @@ -30,15 +29,14 @@ bert_model_name="bert-base-chinese" # feature configuration feats_dim=80 -sample_frequency=16000 -nj=32 -speed_perturb="0.9,1.0,1.1" +nj=64 # data -data_aishell= +raw_data= +data_url=www.openslr.org/resources/33 # exp tag -tag="" +tag="exp1" . utils/parse_options.sh || exit 1; @@ -53,7 +51,7 @@ valid_set=dev test_sets="dev test" asr_config=conf/train_asr_paraformerbert_conformer_12e_6d_2048_256.yaml -model_dir="baseline_$(basename "${asr_config}" .yaml)_${feats_type}_${lang}_${token_type}_${tag}" +model_dir="baseline_$(basename "${asr_config}" .yaml)_${lang}_${token_type}_${tag}" inference_config=conf/decode_asr_transformer_noctc_1best.yaml inference_asr_model=valid.acc.ave_10best.pb @@ -70,10 +68,17 @@ else _ngpu=0 fi +if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then + echo "stage -1: Data Download" + local/download_and_untar.sh ${raw_data} ${data_url} data_aishell + local/download_and_untar.sh ${raw_data} ${data_url} resource_aishell +fi + + if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then echo "stage 0: Data preparation" # Data preparation - local/aishell_data_prep.sh ${data_aishell}/data_aishell/wav ${data_aishell}/data_aishell/transcript ${feats_dir} + local/aishell_data_prep.sh ${raw_data}/data_aishell/wav ${raw_data}/data_aishell/transcript ${feats_dir} for x in train dev test; do cp ${feats_dir}/data/${x}/text ${feats_dir}/data/${x}/text.org paste -d " " <(cut -f 1 -d" " ${feats_dir}/data/${x}/text.org) <(cut -f 2- -d" " ${feats_dir}/data/${x}/text.org | tr -d " ") \ @@ -83,46 +88,9 @@ if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then done fi -feat_train_dir=${feats_dir}/${dumpdir}/train; mkdir -p ${feat_train_dir} -feat_dev_dir=${feats_dir}/${dumpdir}/dev; mkdir -p ${feat_dev_dir} -feat_test_dir=${feats_dir}/${dumpdir}/test; mkdir -p ${feat_test_dir} if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then - echo "stage 1: Feature Generation" - # compute fbank features - fbankdir=${feats_dir}/fbank - utils/compute_fbank.sh --cmd "$train_cmd" --nj $nj --feats_dim ${feats_dim} --sample_frequency ${sample_frequency} --speed_perturb ${speed_perturb} \ - ${feats_dir}/data/train ${exp_dir}/exp/make_fbank/train ${fbankdir}/train - utils/fix_data_feat.sh ${fbankdir}/train - utils/compute_fbank.sh --cmd "$train_cmd" --nj $nj --feats_dim ${feats_dim} --sample_frequency ${sample_frequency} \ - ${feats_dir}/data/dev ${exp_dir}/exp/make_fbank/dev ${fbankdir}/dev - utils/fix_data_feat.sh ${fbankdir}/dev - utils/compute_fbank.sh --cmd "$train_cmd" --nj $nj --feats_dim ${feats_dim} --sample_frequency ${sample_frequency} \ - ${feats_dir}/data/test ${exp_dir}/exp/make_fbank/test ${fbankdir}/test - utils/fix_data_feat.sh ${fbankdir}/test - - # compute global cmvn - utils/compute_cmvn.sh --cmd "$train_cmd" --nj $nj --feats_dim ${feats_dim} \ - ${fbankdir}/train ${exp_dir}/exp/make_fbank/train - - # apply cmvn - utils/apply_cmvn.sh --cmd "$train_cmd" --nj $nj \ - ${fbankdir}/train ${fbankdir}/train/cmvn.json ${exp_dir}/exp/make_fbank/train ${feat_train_dir} - utils/apply_cmvn.sh --cmd "$train_cmd" --nj $nj \ - ${fbankdir}/dev ${fbankdir}/train/cmvn.json ${exp_dir}/exp/make_fbank/dev ${feat_dev_dir} - utils/apply_cmvn.sh --cmd "$train_cmd" --nj $nj \ - ${fbankdir}/test ${fbankdir}/train/cmvn.json ${exp_dir}/exp/make_fbank/test ${feat_test_dir} - - cp ${fbankdir}/train/text ${fbankdir}/train/speech_shape ${fbankdir}/train/text_shape ${feat_train_dir} - cp ${fbankdir}/dev/text ${fbankdir}/dev/speech_shape ${fbankdir}/dev/text_shape ${feat_dev_dir} - cp ${fbankdir}/test/text ${fbankdir}/test/speech_shape ${fbankdir}/test/text_shape ${feat_test_dir} - - utils/fix_data_feat.sh ${feat_train_dir} - utils/fix_data_feat.sh ${feat_dev_dir} - utils/fix_data_feat.sh ${feat_test_dir} - - #generate ark list - utils/gen_ark_list.sh --cmd "$train_cmd" --nj $nj ${feat_train_dir} ${fbankdir}/train ${feat_train_dir} - utils/gen_ark_list.sh --cmd "$train_cmd" --nj $nj ${feat_dev_dir} ${fbankdir}/dev ${feat_dev_dir} + echo "stage 1: Feature and CMVN Generation" + utils/compute_cmvn.sh --cmd "$train_cmd" --nj $nj --feats_dim ${feats_dim} ${feats_dir}/data/${train_set} fi token_list=${feats_dir}/data/${lang}_token_list/char/tokens.txt @@ -135,17 +103,9 @@ if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then echo "" > ${token_list} echo "" >> ${token_list} echo "" >> ${token_list} - utils/text2token.py -s 1 -n 1 --space "" ${feats_dir}/data/train/text | cut -f 2- -d" " | tr " " "\n" \ + utils/text2token.py -s 1 -n 1 --space "" ${feats_dir}/data/$train_set/text | cut -f 2- -d" " | tr " " "\n" \ | sort | uniq | grep -a -v -e '^\s*$' | awk '{print $0}' >> ${token_list} - num_token=$(cat ${token_list} | wc -l) echo "" >> ${token_list} - vocab_size=$(cat ${token_list} | wc -l) - awk -v v=,${vocab_size} '{print $0v}' ${feat_train_dir}/text_shape > ${feat_train_dir}/text_shape.char - awk -v v=,${vocab_size} '{print $0v}' ${feat_dev_dir}/text_shape > ${feat_dev_dir}/text_shape.char - mkdir -p ${feats_dir}/asr_stats_fbank_zh_char/train - mkdir -p ${feats_dir}/asr_stats_fbank_zh_char/dev - cp ${feat_train_dir}/speech_shape ${feat_train_dir}/text_shape ${feat_train_dir}/text_shape.char ${feats_dir}/asr_stats_fbank_zh_char/train - cp ${feat_dev_dir}/speech_shape ${feat_dev_dir}/text_shape ${feat_dev_dir}/text_shape.char ${feats_dir}/asr_stats_fbank_zh_char/dev fi # Training Stage @@ -172,31 +132,22 @@ if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then rank=$i local_rank=$i gpu_id=$(echo $CUDA_VISIBLE_DEVICES | cut -d',' -f$[$i+1]) - asr_train_paraformer.py \ + train.py \ + --task_name asr \ --gpu_id $gpu_id \ --use_preprocessor true \ --token_type char \ --token_list $token_list \ - --train_data_path_and_name_and_type ${feats_dir}/${dumpdir}/${train_set}/${scp},speech,${type} \ - --train_data_path_and_name_and_type ${feats_dir}/${dumpdir}/${train_set}/text,text,text \ - --train_data_path_and_name_and_type ${feats_dir}/embeds/${bert_model_name}/${train_set}/embeds.scp,embed,${type} \ - --train_shape_file ${feats_dir}/asr_stats_fbank_zh_char/${train_set}/speech_shape \ - --train_shape_file ${feats_dir}/asr_stats_fbank_zh_char/${train_set}/text_shape.char \ - --train_shape_file ${feats_dir}/embeds/${bert_model_name}/${train_set}/embeds.shape \ - --valid_data_path_and_name_and_type ${feats_dir}/${dumpdir}/${valid_set}/${scp},speech,${type} \ - --valid_data_path_and_name_and_type ${feats_dir}/${dumpdir}/${valid_set}/text,text,text \ - --valid_data_path_and_name_and_type ${feats_dir}/embeds/${bert_model_name}/${valid_set}/embeds.scp,embed,${type} \ - --valid_shape_file ${feats_dir}/asr_stats_fbank_zh_char/${valid_set}/speech_shape \ - --valid_shape_file ${feats_dir}/asr_stats_fbank_zh_char/${valid_set}/text_shape.char \ - --valid_shape_file ${feats_dir}/embeds/${bert_model_name}/${valid_set}/embeds.shape \ + --data_dir ${feats_dir}/data \ + --train_set ${train_set} \ + --valid_set ${valid_set} \ + --cmvn_file ${feats_dir}/data/${train_set}/cmvn/cmvn.mvn \ + --speed_perturb ${speed_perturb} \ --resume true \ --output_dir ${exp_dir}/exp/${model_dir} \ --config $asr_config \ - --allow_variable_data_keys true \ - --input_size $feats_dim \ --ngpu $gpu_num \ --num_worker_count $count \ - --multiprocessing_distributed true \ --dist_init_method $init_method \ --dist_world_size $world_size \ --dist_rank $rank \ diff --git a/funasr/bin/train.py b/funasr/bin/train.py index ba5df1db3..53e5bde28 100755 --- a/funasr/bin/train.py +++ b/funasr/bin/train.py @@ -347,6 +347,12 @@ def get_parser(): default=True, help="Apply preprocessing to data or not", ) + parser.add_argument( + "--embed_path", + type=str, + default=None, + help="for model which requires embeds", + ) # optimization related parser.add_argument( diff --git a/funasr/utils/prepare_data.py b/funasr/utils/prepare_data.py index 24382c7db..d11eece0d 100644 --- a/funasr/utils/prepare_data.py +++ b/funasr/utils/prepare_data.py @@ -181,6 +181,11 @@ def prepare_data(args, distributed_option): ["{}/{}/wav.scp".format(args.data_dir, args.valid_set), data_names[0], data_types[0]], ["{}/{}/text".format(args.data_dir, args.valid_set), data_names[1], data_types[1]] ] + if args.embed_path is not None: + args.train_data_path_and_name_and_type[0].append( + "{}/embed/kaldi_ark".format(os.path.join(args.embed_path, args.train_set, "embeds.scp"))) + args.valid_data_path_and_name_and_type[0].append( + "{}/embed/kaldi_ark".format(os.path.join(args.embed_path, args.dev_set, "embeds.scp"))) else: args.train_data_file = os.path.join(args.data_dir, args.train_set, "data.list") args.valid_data_file = os.path.join(args.data_dir, args.valid_set, "data.list")