mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
update data2vec finetune
This commit is contained in:
parent
cc7020e078
commit
6e32028a70
@ -0,0 +1,6 @@
|
||||
beam_size: 1
|
||||
penalty: 0.0
|
||||
maxlenratio: 0.0
|
||||
minlenratio: 0.0
|
||||
ctc_weight: 0.0
|
||||
lm_weight: 0.15
|
||||
@ -0,0 +1,104 @@
|
||||
# network architecture
|
||||
# encoder related
|
||||
encoder: data2vec_encoder
|
||||
encoder_conf:
|
||||
extractor_mode: layer_norm
|
||||
encoder_layerdrop: 0.1
|
||||
dropout_input: 0.0
|
||||
dropout_features: 0.0
|
||||
feature_grad_mult: 0.0
|
||||
encoder_embed_dim: 768
|
||||
|
||||
mask_prob: 0.65
|
||||
mask_length: 10
|
||||
|
||||
loss_beta: 0
|
||||
loss_scale: null
|
||||
|
||||
instance_norm_target_layer: true
|
||||
average_top_k_layers: 8
|
||||
|
||||
pos_conv_depth: 5
|
||||
conv_pos: 95
|
||||
|
||||
ema_decay: 0.999
|
||||
ema_end_decay: 0.9999
|
||||
ema_anneal_end_step: 30000
|
||||
ema_transformer_only: true
|
||||
ema_layers_only: true
|
||||
|
||||
require_same_masks: true
|
||||
mask_dropout: 0
|
||||
|
||||
|
||||
# decoder related
|
||||
decoder: paraformer_decoder_san
|
||||
decoder_conf:
|
||||
attention_heads: 12
|
||||
linear_units: 3072
|
||||
num_blocks: 6
|
||||
dropout_rate: 0.1
|
||||
positional_dropout_rate: 0.1
|
||||
self_attention_dropout_rate: 0.0
|
||||
src_attention_dropout_rate: 0.0
|
||||
|
||||
model: paraformer
|
||||
model_conf:
|
||||
ctc_weight: 0.3
|
||||
lsm_weight: 0.1
|
||||
length_normalized_loss: false
|
||||
predictor_weight: 1.0
|
||||
sampling_ratio: 0.4
|
||||
|
||||
# minibatch related
|
||||
batch_type: length
|
||||
batch_bins: 25000
|
||||
num_workers: 16
|
||||
|
||||
# optimization related
|
||||
accum_grad: 1
|
||||
grad_clip: 5
|
||||
max_epoch: 50
|
||||
val_scheduler_criterion:
|
||||
- valid
|
||||
- acc
|
||||
best_model_criterion:
|
||||
- - valid
|
||||
- acc
|
||||
- max
|
||||
keep_nbest_models: 10
|
||||
|
||||
optim: adam
|
||||
optim_conf:
|
||||
lr: 0.00002
|
||||
scheduler: warmuplr
|
||||
scheduler_conf:
|
||||
warmup_steps: 30000
|
||||
|
||||
specaug: specaug
|
||||
specaug_conf:
|
||||
apply_time_warp: true
|
||||
time_warp_window: 5
|
||||
time_warp_mode: bicubic
|
||||
apply_freq_mask: true
|
||||
freq_mask_width_range:
|
||||
- 0
|
||||
- 30
|
||||
num_freq_mask: 2
|
||||
apply_time_mask: true
|
||||
time_mask_width_range:
|
||||
- 0
|
||||
- 40
|
||||
num_time_mask: 2
|
||||
|
||||
predictor: cif_predictor
|
||||
predictor_conf:
|
||||
idim: 768
|
||||
threshold: 1.0
|
||||
l_order: 1
|
||||
r_order: 1
|
||||
|
||||
|
||||
log_interval: 50
|
||||
unused_parameters: true
|
||||
normalize: None
|
||||
252
egs/aishell/data2vec_paraformer_finetune/run.sh
Executable file
252
egs/aishell/data2vec_paraformer_finetune/run.sh
Executable file
@ -0,0 +1,252 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
. ./path.sh || exit 1;
|
||||
|
||||
# machines configuration
|
||||
CUDA_VISIBLE_DEVICES="0,1"
|
||||
gpu_num=2
|
||||
count=1
|
||||
gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding
|
||||
# for gpu decoding, inference_nj=ngpu*njob; for cpu decoding, inference_nj=njob
|
||||
njob=5
|
||||
train_cmd=utils/run.pl
|
||||
infer_cmd=utils/run.pl
|
||||
|
||||
# general configuration
|
||||
feats_dir="../DATA" #feature output dictionary, for large data
|
||||
exp_dir="."
|
||||
lang=zh
|
||||
dumpdir=dump/fbank
|
||||
feats_type=fbank
|
||||
token_type=char
|
||||
scp=feats.scp
|
||||
type=kaldi_ark
|
||||
stage=0
|
||||
stop_stage=4
|
||||
|
||||
# feature configuration
|
||||
feats_dim=80
|
||||
sample_frequency=16000
|
||||
nj=32
|
||||
speed_perturb="0.9,1.0,1.1"
|
||||
|
||||
# data
|
||||
data_aishell=
|
||||
|
||||
# exp tag
|
||||
tag=""
|
||||
|
||||
model_name=damo/speech_data2vec_pretrain-zh-cn-aishell2-16k-pytorch
|
||||
init_param="$HOME/.cache/modelscope/hub/$model_name/basemodel.pb"
|
||||
|
||||
. utils/parse_options.sh || exit 1;
|
||||
|
||||
# Set bash to 'debug' mode, it will exit on :
|
||||
# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands',
|
||||
set -e
|
||||
set -u
|
||||
set -o pipefail
|
||||
|
||||
train_set=train
|
||||
valid_set=dev
|
||||
test_sets="dev test"
|
||||
|
||||
asr_config=conf/train_asr_paraformer_transformer_12e_6d_3072_768.yaml
|
||||
model_dir="baseline_$(basename "${asr_config}" .yaml)_${feats_type}_${lang}_${token_type}_${tag}"
|
||||
|
||||
inference_config=conf/decode_asr_transformer_noctc_1best.yaml
|
||||
inference_asr_model=valid.acc.ave_10best.pth
|
||||
|
||||
# you can set gpu num for decoding here
|
||||
gpuid_list=$CUDA_VISIBLE_DEVICES # set gpus for decoding, the same as training stage by default
|
||||
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
|
||||
|
||||
if ${gpu_inference}; then
|
||||
inference_nj=$[${ngpu}*${njob}]
|
||||
_ngpu=1
|
||||
else
|
||||
inference_nj=$njob
|
||||
_ngpu=0
|
||||
fi
|
||||
|
||||
if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
|
||||
echo "stage 0: Data preparation"
|
||||
# Data preparation
|
||||
local/aishell_data_prep.sh ${data_aishell}/data_aishell/wav ${data_aishell}/data_aishell/transcript ${feats_dir}
|
||||
for x in train dev test; do
|
||||
cp ${feats_dir}/data/${x}/text ${feats_dir}/data/${x}/text.org
|
||||
paste -d " " <(cut -f 1 -d" " ${feats_dir}/data/${x}/text.org) <(cut -f 2- -d" " ${feats_dir}/data/${x}/text.org | tr -d " ") \
|
||||
> ${feats_dir}/data/${x}/text
|
||||
utils/text2token.py -n 1 -s 1 ${feats_dir}/data/${x}/text > ${feats_dir}/data/${x}/text.org
|
||||
mv ${feats_dir}/data/${x}/text.org ${feats_dir}/data/${x}/text
|
||||
done
|
||||
fi
|
||||
|
||||
feat_train_dir=${feats_dir}/${dumpdir}/train; mkdir -p ${feat_train_dir}
|
||||
feat_dev_dir=${feats_dir}/${dumpdir}/dev; mkdir -p ${feat_dev_dir}
|
||||
feat_test_dir=${feats_dir}/${dumpdir}/test; mkdir -p ${feat_test_dir}
|
||||
if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
|
||||
echo "stage 1: Feature Generation"
|
||||
# compute fbank features
|
||||
fbankdir=${feats_dir}/fbank
|
||||
utils/compute_fbank.sh --cmd "$train_cmd" --nj $nj --feats_dim ${feats_dim} --sample_frequency ${sample_frequency} --speed_perturb ${speed_perturb} \
|
||||
${feats_dir}/data/train ${exp_dir}/exp/make_fbank/train ${fbankdir}/train
|
||||
utils/fix_data_feat.sh ${fbankdir}/train
|
||||
utils/compute_fbank.sh --cmd "$train_cmd" --nj $nj --feats_dim ${feats_dim} --sample_frequency ${sample_frequency} \
|
||||
${feats_dir}/data/dev ${exp_dir}/exp/make_fbank/dev ${fbankdir}/dev
|
||||
utils/fix_data_feat.sh ${fbankdir}/dev
|
||||
utils/compute_fbank.sh --cmd "$train_cmd" --nj $nj --feats_dim ${feats_dim} --sample_frequency ${sample_frequency} \
|
||||
${feats_dir}/data/test ${exp_dir}/exp/make_fbank/test ${fbankdir}/test
|
||||
utils/fix_data_feat.sh ${fbankdir}/test
|
||||
|
||||
# compute global cmvn
|
||||
utils/compute_cmvn.sh --cmd "$train_cmd" --nj $nj --feats_dim ${feats_dim} \
|
||||
${fbankdir}/train ${exp_dir}/exp/make_fbank/train
|
||||
|
||||
# apply cmvn
|
||||
utils/apply_cmvn.sh --cmd "$train_cmd" --nj $nj \
|
||||
${fbankdir}/train ${fbankdir}/train/cmvn.json ${exp_dir}/exp/make_fbank/train ${feat_train_dir}
|
||||
utils/apply_cmvn.sh --cmd "$train_cmd" --nj $nj \
|
||||
${fbankdir}/dev ${fbankdir}/train/cmvn.json ${exp_dir}/exp/make_fbank/dev ${feat_dev_dir}
|
||||
utils/apply_cmvn.sh --cmd "$train_cmd" --nj $nj \
|
||||
${fbankdir}/test ${fbankdir}/train/cmvn.json ${exp_dir}/exp/make_fbank/test ${feat_test_dir}
|
||||
|
||||
cp ${fbankdir}/train/text ${fbankdir}/train/speech_shape ${fbankdir}/train/text_shape ${feat_train_dir}
|
||||
cp ${fbankdir}/dev/text ${fbankdir}/dev/speech_shape ${fbankdir}/dev/text_shape ${feat_dev_dir}
|
||||
cp ${fbankdir}/test/text ${fbankdir}/test/speech_shape ${fbankdir}/test/text_shape ${feat_test_dir}
|
||||
|
||||
utils/fix_data_feat.sh ${feat_train_dir}
|
||||
utils/fix_data_feat.sh ${feat_dev_dir}
|
||||
utils/fix_data_feat.sh ${feat_test_dir}
|
||||
|
||||
#generate ark list
|
||||
utils/gen_ark_list.sh --cmd "$train_cmd" --nj $nj ${feat_train_dir} ${fbankdir}/train ${feat_train_dir}
|
||||
utils/gen_ark_list.sh --cmd "$train_cmd" --nj $nj ${feat_dev_dir} ${fbankdir}/dev ${feat_dev_dir}
|
||||
fi
|
||||
|
||||
token_list=${feats_dir}/data/${lang}_token_list/char/tokens.txt
|
||||
echo "dictionary: ${token_list}"
|
||||
if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
|
||||
echo "stage 2: Dictionary Preparation"
|
||||
mkdir -p ${feats_dir}/data/${lang}_token_list/char/
|
||||
|
||||
echo "make a dictionary"
|
||||
echo "<blank>" > ${token_list}
|
||||
echo "<s>" >> ${token_list}
|
||||
echo "</s>" >> ${token_list}
|
||||
utils/text2token.py -s 1 -n 1 --space "" ${feats_dir}/data/train/text | cut -f 2- -d" " | tr " " "\n" \
|
||||
| sort | uniq | grep -a -v -e '^\s*$' | awk '{print $0}' >> ${token_list}
|
||||
num_token=$(cat ${token_list} | wc -l)
|
||||
echo "<unk>" >> ${token_list}
|
||||
vocab_size=$(cat ${token_list} | wc -l)
|
||||
awk -v v=,${vocab_size} '{print $0v}' ${feat_train_dir}/text_shape > ${feat_train_dir}/text_shape.char
|
||||
awk -v v=,${vocab_size} '{print $0v}' ${feat_dev_dir}/text_shape > ${feat_dev_dir}/text_shape.char
|
||||
mkdir -p ${feats_dir}/asr_stats_fbank_zh_char/train
|
||||
mkdir -p ${feats_dir}/asr_stats_fbank_zh_char/dev
|
||||
cp ${feat_train_dir}/speech_shape ${feat_train_dir}/text_shape ${feat_train_dir}/text_shape.char ${feats_dir}/asr_stats_fbank_zh_char/train
|
||||
cp ${feat_dev_dir}/speech_shape ${feat_dev_dir}/text_shape ${feat_dev_dir}/text_shape.char ${feats_dir}/asr_stats_fbank_zh_char/dev
|
||||
fi
|
||||
|
||||
# Training Stage
|
||||
world_size=$gpu_num # run on one machine
|
||||
if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
|
||||
echo "stage 3: Training"
|
||||
python utils/download_model.py --model_name ${model_name} # download pretrained model on ModelScope
|
||||
mkdir -p ${exp_dir}/exp/${model_dir}
|
||||
mkdir -p ${exp_dir}/exp/${model_dir}/log
|
||||
INIT_FILE=${exp_dir}/exp/${model_dir}/ddp_init
|
||||
if [ -f $INIT_FILE ];then
|
||||
rm -f $INIT_FILE
|
||||
fi
|
||||
init_method=file://$(readlink -f $INIT_FILE)
|
||||
echo "$0: init method is $init_method"
|
||||
for ((i = 0; i < $gpu_num; ++i)); do
|
||||
{
|
||||
rank=$i
|
||||
local_rank=$i
|
||||
gpu_id=$(echo $CUDA_VISIBLE_DEVICES | cut -d',' -f$[$i+1])
|
||||
asr_train_paraformer.py \
|
||||
--gpu_id $gpu_id \
|
||||
--use_preprocessor true \
|
||||
--token_type char \
|
||||
--token_list $token_list \
|
||||
--train_data_path_and_name_and_type ${feats_dir}/${dumpdir}/${train_set}/${scp},speech,${type} \
|
||||
--train_data_path_and_name_and_type ${feats_dir}/${dumpdir}/${train_set}/text,text,text \
|
||||
--train_shape_file ${feats_dir}/asr_stats_fbank_zh_char/${train_set}/speech_shape \
|
||||
--train_shape_file ${feats_dir}/asr_stats_fbank_zh_char/${train_set}/text_shape.char \
|
||||
--valid_data_path_and_name_and_type ${feats_dir}/${dumpdir}/${valid_set}/${scp},speech,${type} \
|
||||
--valid_data_path_and_name_and_type ${feats_dir}/${dumpdir}/${valid_set}/text,text,text \
|
||||
--valid_shape_file ${feats_dir}/asr_stats_fbank_zh_char/${valid_set}/speech_shape \
|
||||
--valid_shape_file ${feats_dir}/asr_stats_fbank_zh_char/${valid_set}/text_shape.char \
|
||||
--init_param ${init_param} \
|
||||
--resume true \
|
||||
--output_dir ${exp_dir}/exp/${model_dir} \
|
||||
--config $asr_config \
|
||||
--input_size $feats_dim \
|
||||
--ngpu $gpu_num \
|
||||
--num_worker_count $count \
|
||||
--multiprocessing_distributed true \
|
||||
--dist_init_method $init_method \
|
||||
--dist_world_size $world_size \
|
||||
--dist_rank $rank \
|
||||
--local_rank $local_rank 1> ${exp_dir}/exp/${model_dir}/log/train.log.$i 2>&1
|
||||
} &
|
||||
done
|
||||
wait
|
||||
fi
|
||||
|
||||
# Testing Stage
|
||||
if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
|
||||
echo "stage 4: Inference"
|
||||
for dset in ${test_sets}; do
|
||||
asr_exp=${exp_dir}/exp/${model_dir}
|
||||
inference_tag="$(basename "${inference_config}" .yaml)"
|
||||
_dir="${asr_exp}/${inference_tag}/${inference_asr_model}/${dset}"
|
||||
_logdir="${_dir}/logdir"
|
||||
if [ -d ${_dir} ]; then
|
||||
echo "${_dir} is already exists. if you want to decode again, please delete this dir first."
|
||||
exit 0
|
||||
fi
|
||||
mkdir -p "${_logdir}"
|
||||
_data="${feats_dir}/${dumpdir}/${dset}"
|
||||
key_file=${_data}/${scp}
|
||||
num_scp_file="$(<${key_file} wc -l)"
|
||||
_nj=$([ $inference_nj -le $num_scp_file ] && echo "$inference_nj" || echo "$num_scp_file")
|
||||
split_scps=
|
||||
for n in $(seq "${_nj}"); do
|
||||
split_scps+=" ${_logdir}/keys.${n}.scp"
|
||||
done
|
||||
# shellcheck disable=SC2086
|
||||
utils/split_scp.pl "${key_file}" ${split_scps}
|
||||
_opts=
|
||||
if [ -n "${inference_config}" ]; then
|
||||
_opts+="--config ${inference_config} "
|
||||
fi
|
||||
${infer_cmd} --gpu "${_ngpu}" --max-jobs-run "${_nj}" JOB=1:"${_nj}" "${_logdir}"/asr_inference.JOB.log \
|
||||
python -m funasr.bin.asr_inference_launch \
|
||||
--batch_size 1 \
|
||||
--ngpu "${_ngpu}" \
|
||||
--njob ${njob} \
|
||||
--gpuid_list ${gpuid_list} \
|
||||
--data_path_and_name_and_type "${_data}/${scp},speech,${type}" \
|
||||
--key_file "${_logdir}"/keys.JOB.scp \
|
||||
--asr_train_config "${asr_exp}"/config.yaml \
|
||||
--asr_model_file "${asr_exp}"/"${inference_asr_model}" \
|
||||
--output_dir "${_logdir}"/output.JOB \
|
||||
--mode paraformer \
|
||||
${_opts}
|
||||
|
||||
for f in token token_int score text; do
|
||||
if [ -f "${_logdir}/output.1/1best_recog/${f}" ]; then
|
||||
for i in $(seq "${_nj}"); do
|
||||
cat "${_logdir}/output.${i}/1best_recog/${f}"
|
||||
done | sort -k1 >"${_dir}/${f}"
|
||||
fi
|
||||
done
|
||||
python utils/proce_text.py ${_dir}/text ${_dir}/text.proc
|
||||
python utils/proce_text.py ${_data}/text ${_data}/text.proc
|
||||
python utils/compute_wer.py ${_data}/text.proc ${_dir}/text.proc ${_dir}/text.cer
|
||||
tail -n 3 ${_dir}/text.cer > ${_dir}/text.cer.txt
|
||||
cat ${_dir}/text.cer.txt
|
||||
done
|
||||
fi
|
||||
1
egs/aishell/data2vec_paraformer_finetune/utils
Symbolic link
1
egs/aishell/data2vec_paraformer_finetune/utils
Symbolic link
@ -0,0 +1 @@
|
||||
../../aishell/transformer/utils
|
||||
66
egs/aishell/data2vec_transformer_finetune/local/aishell_data_prep.sh
Executable file
66
egs/aishell/data2vec_transformer_finetune/local/aishell_data_prep.sh
Executable file
@ -0,0 +1,66 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2017 Xingyu Na
|
||||
# Apache 2.0
|
||||
|
||||
#. ./path.sh || exit 1;
|
||||
|
||||
if [ $# != 3 ]; then
|
||||
echo "Usage: $0 <audio-path> <text-path> <output-path>"
|
||||
echo " $0 /export/a05/xna/data/data_aishell/wav /export/a05/xna/data/data_aishell/transcript data"
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
aishell_audio_dir=$1
|
||||
aishell_text=$2/aishell_transcript_v0.8.txt
|
||||
output_dir=$3
|
||||
|
||||
train_dir=$output_dir/data/local/train
|
||||
dev_dir=$output_dir/data/local/dev
|
||||
test_dir=$output_dir/data/local/test
|
||||
tmp_dir=$output_dir/data/local/tmp
|
||||
|
||||
mkdir -p $train_dir
|
||||
mkdir -p $dev_dir
|
||||
mkdir -p $test_dir
|
||||
mkdir -p $tmp_dir
|
||||
|
||||
# data directory check
|
||||
if [ ! -d $aishell_audio_dir ] || [ ! -f $aishell_text ]; then
|
||||
echo "Error: $0 requires two directory arguments"
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
# find wav audio file for train, dev and test resp.
|
||||
find $aishell_audio_dir -iname "*.wav" > $tmp_dir/wav.flist
|
||||
n=`cat $tmp_dir/wav.flist | wc -l`
|
||||
[ $n -ne 141925 ] && \
|
||||
echo Warning: expected 141925 data data files, found $n
|
||||
|
||||
grep -i "wav/train" $tmp_dir/wav.flist > $train_dir/wav.flist || exit 1;
|
||||
grep -i "wav/dev" $tmp_dir/wav.flist > $dev_dir/wav.flist || exit 1;
|
||||
grep -i "wav/test" $tmp_dir/wav.flist > $test_dir/wav.flist || exit 1;
|
||||
|
||||
rm -r $tmp_dir
|
||||
|
||||
# Transcriptions preparation
|
||||
for dir in $train_dir $dev_dir $test_dir; do
|
||||
echo Preparing $dir transcriptions
|
||||
sed -e 's/\.wav//' $dir/wav.flist | awk -F '/' '{print $NF}' > $dir/utt.list
|
||||
paste -d' ' $dir/utt.list $dir/wav.flist > $dir/wav.scp_all
|
||||
utils/filter_scp.pl -f 1 $dir/utt.list $aishell_text > $dir/transcripts.txt
|
||||
awk '{print $1}' $dir/transcripts.txt > $dir/utt.list
|
||||
utils/filter_scp.pl -f 1 $dir/utt.list $dir/wav.scp_all | sort -u > $dir/wav.scp
|
||||
sort -u $dir/transcripts.txt > $dir/text
|
||||
done
|
||||
|
||||
mkdir -p $output_dir/data/train $output_dir/data/dev $output_dir/data/test
|
||||
|
||||
for f in wav.scp text; do
|
||||
cp $train_dir/$f $output_dir/data/train/$f || exit 1;
|
||||
cp $dev_dir/$f $output_dir/data/dev/$f || exit 1;
|
||||
cp $test_dir/$f $output_dir/data/test/$f || exit 1;
|
||||
done
|
||||
|
||||
echo "$0: AISHELL data preparation succeeded"
|
||||
exit 0;
|
||||
53
egs/aishell/data2vec_transformer_finetune/local/prepare_data.sh
Executable file
53
egs/aishell/data2vec_transformer_finetune/local/prepare_data.sh
Executable file
@ -0,0 +1,53 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2018 AIShell-Foundation(Authors:Jiayu DU, Xingyu NA, Bengu WU, Hao ZHENG)
|
||||
# 2018 Beijing Shell Shell Tech. Co. Ltd. (Author: Hui BU)
|
||||
# Apache 2.0
|
||||
|
||||
# transform raw AISHELL-2 data to kaldi format
|
||||
|
||||
. ./path.sh || exit 1;
|
||||
|
||||
tmp=
|
||||
dir=
|
||||
|
||||
if [ $# != 3 ]; then
|
||||
echo "Usage: $0 <corpus-data-dir> <tmp-dir> <output-dir>"
|
||||
echo " $0 /export/AISHELL-2/iOS/train data/local/train data/train"
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
corpus=$1
|
||||
tmp=$2
|
||||
dir=$3
|
||||
|
||||
echo "prepare_data.sh: Preparing data in $corpus"
|
||||
|
||||
mkdir -p $tmp
|
||||
mkdir -p $dir
|
||||
|
||||
# corpus check
|
||||
if [ ! -d $corpus ] || [ ! -f $corpus/wav.scp ] || [ ! -f $corpus/trans.txt ]; then
|
||||
echo "Error: $0 requires wav.scp and trans.txt under $corpus directory."
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
# validate utt-key list, IC0803W0380 is a bad utterance
|
||||
awk '{print $1}' $corpus/wav.scp | grep -v 'IC0803W0380' > $tmp/wav_utt.list
|
||||
awk '{print $1}' $corpus/trans.txt > $tmp/trans_utt.list
|
||||
utils/filter_scp.pl -f 1 $tmp/wav_utt.list $tmp/trans_utt.list > $tmp/utt.list
|
||||
|
||||
# wav.scp
|
||||
awk -F'\t' -v path_prefix=$corpus '{printf("%s\t%s/%s\n",$1,path_prefix,$2)}' $corpus/wav.scp > $tmp/tmp_wav.scp
|
||||
utils/filter_scp.pl -f 1 $tmp/utt.list $tmp/tmp_wav.scp | sort -k 1 | uniq > $tmp/wav.scp
|
||||
|
||||
# text
|
||||
utils/filter_scp.pl -f 1 $tmp/utt.list $corpus/trans.txt | sort -k 1 | uniq > $tmp/text
|
||||
|
||||
# copy prepared resources from tmp_dir to target dir
|
||||
mkdir -p $dir
|
||||
for f in wav.scp text; do
|
||||
cp $tmp/$f $dir/$f || exit 1;
|
||||
done
|
||||
|
||||
echo "local/prepare_data.sh succeeded"
|
||||
exit 0;
|
||||
5
egs/aishell/data2vec_transformer_finetune/path.sh
Executable file
5
egs/aishell/data2vec_transformer_finetune/path.sh
Executable file
@ -0,0 +1,5 @@
|
||||
export FUNASR_DIR=$PWD/../../..
|
||||
|
||||
# NOTE(kan-bayashi): Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C
|
||||
export PYTHONIOENCODING=UTF-8
|
||||
export PATH=$FUNASR_DIR/funasr/bin:$PATH
|
||||
@ -223,6 +223,31 @@ def inference_launch(**kwargs):
|
||||
logging.info("Unknown decoding mode: {}".format(mode))
|
||||
return None
|
||||
|
||||
def inference_launch_funasr(**kwargs):
|
||||
if 'mode' in kwargs:
|
||||
mode = kwargs['mode']
|
||||
else:
|
||||
logging.info("Unknown decoding mode.")
|
||||
return None
|
||||
if mode == "asr":
|
||||
from funasr.bin.asr_inference import inference
|
||||
return inference(**kwargs)
|
||||
elif mode == "uniasr":
|
||||
from funasr.bin.asr_inference_uniasr import inference
|
||||
return inference(**kwargs)
|
||||
elif mode == "paraformer":
|
||||
from funasr.bin.asr_inference_paraformer import inference
|
||||
return inference(**kwargs)
|
||||
elif mode == "paraformer_vad_punc":
|
||||
from funasr.bin.asr_inference_paraformer_vad_punc import inference
|
||||
return inference(**kwargs)
|
||||
elif mode == "vad":
|
||||
from funasr.bin.vad_inference import inference
|
||||
return inference(**kwargs)
|
||||
else:
|
||||
logging.info("Unknown decoding mode: {}".format(mode))
|
||||
return None
|
||||
|
||||
|
||||
def main(cmd=None):
|
||||
print(get_commandline_args(), file=sys.stderr)
|
||||
@ -251,7 +276,7 @@ def main(cmd=None):
|
||||
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = gpuid
|
||||
|
||||
inference_launch(**kwargs)
|
||||
inference_launch_funasr(**kwargs)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
Loading…
Reference in New Issue
Block a user