update repo

This commit is contained in:
嘉渊 2023-05-15 18:26:27 +08:00
parent 9f74f26daa
commit 2db4a207d1
5 changed files with 50 additions and 111 deletions

View File

@ -111,12 +111,12 @@ fi
world_size=$gpu_num # run on one machine
if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
echo "stage 3: Training"
# if ! "${skip_extract_embed}"; then
# echo "extract embeddings..."
# local/extract_embeds.sh \
# --bert_model_name ${bert_model_name} \
# --raw_dataset_path ${feats_dir}
# fi
if ! "${skip_extract_embed}"; then
echo "extract embeddings..."
local/extract_embeds.sh \
--bert_model_name ${bert_model_name} \
--raw_dataset_path ${feats_dir}
fi
mkdir -p ${exp_dir}/exp/${model_dir}
mkdir -p ${exp_dir}/exp/${model_dir}/log
INIT_FILE=${exp_dir}/exp/${model_dir}/ddp_init

View File

@ -3,20 +3,17 @@
stage=1
stop_stage=3
bert_model_root="../../huggingface_models"
bert_model_name="bert-base-chinese"
#bert_model_name="chinese-roberta-wwm-ext"
#bert_model_name="mengzi-bert-base"
raw_dataset_path="../DATA"
model_path=${bert_model_root}/${bert_model_name}
model_path=${bert_model_name}
. utils/parse_options.sh || exit 1;
nj=100
nj=32
for data_set in train dev_ios test_ios;do
scp=$raw_dataset_path/dump/fbank/${data_set}/text
local_scp_dir_raw=$raw_dataset_path/embeds/$bert_model_name/${data_set}
for data_set in train dev test;do
scp=$raw_dataset_path/data/${data_set}/text
local_scp_dir_raw=${raw_dataset_path}/data/embeds/${data_set}
local_scp_dir=$local_scp_dir_raw/split$nj
local_records_dir=$local_scp_dir_raw/ark
@ -31,7 +28,7 @@ for data_set in train dev_ios test_ios;do
utils/split_scp.pl $scp ${split_scps}
for num in {0..24};do
for num in {0..7};do
tmp=`expr $num \* 4`
if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
@ -41,23 +38,12 @@ for data_set in train dev_ios test_ios;do
{
beg=0
gpu=`expr $beg + $idx`
echo $local_scp_dir_raw/log/log.${JOB}
python tools/extract_embeds.py $local_scp_dir/text.$JOB.txt ${local_records_dir}/embeds.${JOB}.ark ${local_records_dir}/embeds.${JOB}.scp ${local_records_dir}/embeds.${JOB}.shape ${gpu} ${model_path} &> $local_scp_dir_raw/log/log.${JOB}
echo ${local_scp_dir}/log.${JOB}
python utils/extract_embeds.py $local_scp_dir/data.$JOB.text ${local_records_dir}/embeds.${JOB}.ark ${local_records_dir}/embeds.${JOB}.scp ${local_records_dir}/embeds.${JOB}.shape ${gpu} ${model_path} &> ${local_scp_dir}/log.${JOB}
} &
done
wait
fi
if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
for idx in {1..4}; do
JOB=`expr $tmp + $idx`
echo "upload jobid=$JOB"
{
hadoop fs -put -f ${local_records_dir}/embeds.${JOB}.ark ${odps_des_feature_dir}/embeds.${JOB}.ark
} &
done
wait
fi
done
if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then

View File

@ -17,7 +17,6 @@ if [ $# != 3 ]; then
fi
corpus=$1
#dict_dir=$2
tmp=$2
dir=$3
@ -35,14 +34,14 @@ fi
# validate utt-key list, IC0803W0380 is a bad utterance
awk '{print $1}' $corpus/wav.scp | grep -v 'IC0803W0380' > $tmp/wav_utt.list
awk '{print $1}' $corpus/trans.txt > $tmp/trans_utt.list
tools/filter_scp.pl -f 1 $tmp/wav_utt.list $tmp/trans_utt.list > $tmp/utt.list
utils/filter_scp.pl -f 1 $tmp/wav_utt.list $tmp/trans_utt.list > $tmp/utt.list
# wav.scp
awk -F'\t' -v path_prefix=$corpus '{printf("%s\t%s/%s\n",$1,path_prefix,$2)}' $corpus/wav.scp > $tmp/tmp_wav.scp
tools/filter_scp.pl -f 1 $tmp/utt.list $tmp/tmp_wav.scp | sort -k 1 | uniq > $tmp/wav.scp
utils/filter_scp.pl -f 1 $tmp/utt.list $tmp/tmp_wav.scp | sort -k 1 | uniq > $tmp/wav.scp
# text
tools/filter_scp.pl -f 1 $tmp/utt.list $corpus/trans.txt | sort -k 1 | uniq > $tmp/text
utils/filter_scp.pl -f 1 $tmp/utt.list $corpus/trans.txt | sort -k 1 | uniq > $tmp/text
# copy prepared resources from tmp_dir to target dir
mkdir -p $dir

View File

@ -8,36 +8,32 @@ gpu_num=8
count=1
gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding
# for gpu decoding, inference_nj=ngpu*njob; for cpu decoding, inference_nj=njob
njob=5
train_cmd=tools/run.pl
njob=1
train_cmd=utils/run.pl
infer_cmd=utils/run.pl
# general configuration
feats_dir="../DATA" #feature output dictionary, for large data
feats_dir="../DATA" #feature output dictionary
exp_dir="."
lang=zh
dumpdir=dump/fbank
feats_type=fbank
token_type=char
type=sound
scp=wav.scp
speed_perturb="0.9 1.0 1.1"
dataset_type=large
scp=feats.scp
type=kaldi_ark
stage=0
stop_stage=5
stage=3
stop_stage=4
skip_extract_embed=false
bert_model_root="../../huggingface_models"
bert_model_name="bert-base-chinese"
# feature configuration
feats_dim=80
sample_frequency=16000
nj=100
speed_perturb="0.9,1.0,1.1"
nj=64
# data
tr_dir=
dev_tst_dir=
tr_dir=/nfs/wangjiaming.wjm/asr_data/aishell2/AISHELL-2/iOS/data
dev_tst_dir=/nfs/wangjiaming.wjm/asr_data/aishell2/AISHELL-DEV-TEST-SET
# exp tag
tag="exp1"
@ -55,7 +51,7 @@ valid_set=dev_ios
test_sets="dev_ios test_ios"
asr_config=conf/train_asr_paraformerbert_conformer_20e_6d_1280_320.yaml
model_dir="baseline_$(basename "${asr_config}" .yaml)_${feats_type}_${lang}_${token_type}_${tag}"
model_dir="baseline_$(basename "${asr_config}" .yaml)_${lang}_${token_type}_${tag}"
inference_config=conf/decode_asr_transformer_noctc_1best.yaml
inference_asr_model=valid.acc.ave_10best.pb
@ -75,86 +71,44 @@ fi
if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
echo "stage 0: Data preparation"
# For training set
local/prepare_data.sh ${tr_dir} data/local/train data/train || exit 1;
local/prepare_data.sh ${tr_dir} ${feats_dir}/data/local/train ${feats_dir}/data/train || exit 1;
# # For dev and test set
for x in Android iOS Mic; do
local/prepare_data.sh ${dev_tst_dir}/${x}/dev data/local/dev_${x,,} data/dev_${x,,} || exit 1;
local/prepare_data.sh ${dev_tst_dir}/${x}/test data/local/test_${x,,} data/test_${x,,} || exit 1;
done
for x in iOS; do
local/prepare_data.sh ${dev_tst_dir}/${x}/dev ${feats_dir}/data/local/dev_${x,,} ${feats_dir}/data/dev_${x,,} || exit 1;
local/prepare_data.sh ${dev_tst_dir}/${x}/test ${feats_dir}/data/local/test_${x,,} ${feats_dir}/data/test_${x,,} || exit 1;
done
# Normalize text to capital letters
for x in train dev_android dev_ios dev_mic test_android test_ios test_mic; do
mv data/${x}/text data/${x}/text.org
paste <(cut -f 1 data/${x}/text.org) <(cut -f 2 data/${x}/text.org | tr '[:lower:]' '[:upper:]') \
> data/${x}/text
tools/text2token.py -n 1 -s 1 data/${x}/text > data/${x}/text.org
mv data/${x}/text.org data/${x}/text
for x in train dev_ios test_ios; do
mv ${feats_dir}/data/${x}/text ${feats_dir}/data/${x}/text.org
paste -d " " <(cut -f 1 ${feats_dir}/data/${x}/text.org) <(cut -f 2- ${feats_dir}/data/${x}/text.org \
| tr 'A-Z' 'a-z' | tr -d " ") \
> ${feats_dir}/data/${x}/text
utils/text2token.py -n 1 -s 1 ${feats_dir}/data/${x}/text > ${feats_dir}/data/${x}/text.org
mv ${feats_dir}/data/${x}/text.org ${feats_dir}/data/${x}/text
done
fi
feat_train_dir=${feats_dir}/${dumpdir}/${train_set}; mkdir -p ${feat_train_dir}
feat_dev_dir=${feats_dir}/${dumpdir}/${valid_set}; mkdir -p ${feat_dev_dir}
if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
echo "stage 1: Feature Generation"
# compute fbank features
fbankdir=${feats_dir}/fbank
steps/compute_fbank.sh --cmd "$train_cmd" --nj $nj --speed_perturb ${speed_perturb} \
data/train exp/make_fbank/train ${fbankdir}/train
tools/fix_data_feat.sh ${fbankdir}/train
for x in android ios mic; do
steps/compute_fbank.sh --cmd "$train_cmd" --nj $nj \
data/dev_${x} exp/make_fbank/dev_${x} ${fbankdir}/dev_${x}
tools/fix_data_feat.sh ${fbankdir}/dev_${x}
steps/compute_fbank.sh --cmd "$train_cmd" --nj $nj \
data/test_${x} exp/make_fbank/test_${x} ${fbankdir}/test_${x}
tools/fix_data_feat.sh ${fbankdir}/test_${x}
done
# compute global cmvn
steps/compute_cmvn.sh --cmd "$train_cmd" --nj $nj \
${fbankdir}/train exp/make_fbank/train
# apply cmvn
steps/apply_cmvn.sh --cmd "$train_cmd" --nj $nj \
${fbankdir}/${train_set} ${fbankdir}/train/cmvn.json exp/make_fbank/${train_set} ${feat_train_dir}
steps/apply_cmvn.sh --cmd "$train_cmd" --nj $nj \
${fbankdir}/${valid_set} ${fbankdir}/train/cmvn.json exp/make_fbank/${valid_set} ${feat_dev_dir}
for x in android ios mic; do
steps/apply_cmvn.sh --cmd "$train_cmd" --nj $nj \
${fbankdir}/test_${x} ${fbankdir}/train/cmvn.json exp/make_fbank/test_${x} ${feats_dir}/${dumpdir}/test_${x}
done
cp ${fbankdir}/${train_set}/text ${fbankdir}/${train_set}/speech_shape ${fbankdir}/${train_set}/text_shape ${feat_train_dir}
tools/fix_data_feat.sh ${feat_train_dir}
cp ${fbankdir}/${valid_set}/text ${fbankdir}/${valid_set}/speech_shape ${fbankdir}/${valid_set}/text_shape ${feat_dev_dir}
tools/fix_data_feat.sh ${feat_dev_dir}
for x in android ios mic; do
cp ${fbankdir}/test_${x}/text ${fbankdir}/test_${x}/speech_shape ${fbankdir}/test_${x}/text_shape ${feats_dir}/${dumpdir}/test_${x}
tools/fix_data_feat.sh ${feats_dir}/${dumpdir}/test_${x}
done
echo "stage 1: Feature and CMVN Generation"
utils/compute_cmvn.sh --cmd "$train_cmd" --nj $nj --feats_dim ${feats_dim} ${feats_dir}/data/${train_set}
fi
token_list=${feats_dir}/data/${lang}_token_list/char/tokens.txt
echo "dictionary: ${token_list}"
if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
echo "stage 2: Dictionary Preparation"
mkdir -p data/${lang}_token_list/char/
mkdir -p ${feats_dir}/data/${lang}_token_list/char/
echo "make a dictionary"
echo "<blank>" > ${token_list}
echo "<s>" >> ${token_list}
echo "</s>" >> ${token_list}
tools/text2token.py -s 1 -n 1 --space "" data/${train_set}/text | cut -f 2- -d" " | tr " " "\n" \
utils/text2token.py -s 1 -n 1 --space "" ${feats_dir}/data/${train_set}/text | cut -f 2- -d" " | tr " " "\n" \
| sort | uniq | grep -a -v -e '^\s*$' | awk '{print $0}' >> ${token_list}
num_token=$(cat ${token_list} | wc -l)
echo "<unk>" >> ${token_list}
vocab_size=$(cat ${token_list} | wc -l)
awk -v v=,${vocab_size} '{print $0v}' ${feat_train_dir}/text_shape > ${feat_train_dir}/text_shape.char
awk -v v=,${vocab_size} '{print $0v}' ${feat_dev_dir}/text_shape > ${feat_dev_dir}/text_shape.char
mkdir -p asr_stats_fbank_zh_char/${train_set}
mkdir -p asr_stats_fbank_zh_char/${valid_set}
cp ${feat_train_dir}/speech_shape ${feat_train_dir}/text_shape ${feat_train_dir}/text_shape.char asr_stats_fbank_zh_char/${train_set}
cp ${feat_dev_dir}/speech_shape ${feat_dev_dir}/text_shape ${feat_dev_dir}/text_shape.char asr_stats_fbank_zh_char/${valid_set}
fi
mkdir -p ${feats_dir}/asr_stats_fbank_zh_char/${train_set}
mkdir -p ${feats_dir}/asr_stats_fbank_zh_char/${valid_set}
fi
# Training Stage
world_size=$gpu_num # run on one machine

View File

@ -37,7 +37,7 @@ def tokenize(data,
vad = -2
if bpe_tokenizer is not None:
text = bpe_tokenizer.text2tokens("".join(text))
text = bpe_tokenizer.text2tokens(text)
if seg_dict is not None:
assert isinstance(seg_dict, dict)