diff --git a/egs_modelscope/asr/TEMPLATE/infer.py b/egs_modelscope/asr/TEMPLATE/infer.py index 629f93a0e..5bc205cda 100644 --- a/egs_modelscope/asr/TEMPLATE/infer.py +++ b/egs_modelscope/asr/TEMPLATE/infer.py @@ -11,7 +11,7 @@ def modelscope_infer(args): model=args.model, output_dir=args.output_dir, batch_size=args.batch_size, - param_dict={"decoding_model": args.decoding_mode} + param_dict={"decoding_model": args.decoding_mode, "hotword": args.hotword_txt} ) inference_pipeline(audio_in=args.audio_in) @@ -21,6 +21,7 @@ if __name__ == "__main__": parser.add_argument('--audio_in', type=str, default="./data/test/wav.scp") parser.add_argument('--output_dir', type=str, default="./results/") parser.add_argument('--decoding_mode', type=str, default="normal") + parser.add_argument('--hotword_txt', type=str, default=None) parser.add_argument('--batch_size', type=int, default=64) parser.add_argument('--gpuid', type=str, default="0") args = parser.parse_args() diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/demo.py b/egs_modelscope/asr/paraformer/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/demo.py new file mode 100644 index 000000000..bec6f052e --- /dev/null +++ b/egs_modelscope/asr/paraformer/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/demo.py @@ -0,0 +1,12 @@ +from modelscope.pipelines import pipeline +from modelscope.utils.constant import Tasks + +param_dict = dict() +param_dict['hotword'] = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/hotword.txt" +inference_pipeline = pipeline( + task=Tasks.auto_speech_recognition, + model="damo/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404", + param_dict=param_dict) + +rec_result = inference_pipeline(audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_hotword.wav') +print(rec_result) diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/infer.py b/egs_modelscope/asr/paraformer/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/infer.py deleted file mode 100644 index 16a57e9af..000000000 --- a/egs_modelscope/asr/paraformer/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/infer.py +++ /dev/null @@ -1,21 +0,0 @@ -from modelscope.pipelines import pipeline -from modelscope.utils.constant import Tasks - - -if __name__ == '__main__': - param_dict = dict() - param_dict['hotword'] = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/hotword.txt" - - audio_in = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_hotword.wav" - output_dir = None - batch_size = 1 - - inference_pipeline = pipeline( - task=Tasks.auto_speech_recognition, - model="damo/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404", - output_dir=output_dir, - batch_size=batch_size, - param_dict=param_dict) - - rec_result = inference_pipeline(audio_in=audio_in) - print(rec_result) diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/infer.py b/egs_modelscope/asr/paraformer/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/infer.py new file mode 120000 index 000000000..128fc31c2 --- /dev/null +++ b/egs_modelscope/asr/paraformer/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/infer.py @@ -0,0 +1 @@ +../../TEMPLATE/infer.py \ No newline at end of file diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/infer.sh b/egs_modelscope/asr/paraformer/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/infer.sh new file mode 100644 index 000000000..e60f6d973 --- /dev/null +++ b/egs_modelscope/asr/paraformer/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/infer.sh @@ -0,0 +1,105 @@ +#!/usr/bin/env bash + +set -e +set -u +set -o pipefail + +stage=1 +stop_stage=2 +model="damo/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404" +data_dir="./data/test" +output_dir="./results" +batch_size=64 +gpu_inference=true # whether to perform gpu decoding +gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1" +njob=64 # the number of jobs for CPU decoding, if gpu_inference=false, use CPU decoding, please set njob +checkpoint_dir= +checkpoint_name="valid.cer_ctc.ave.pb" +hotword_txt=None + +. utils/parse_options.sh || exit 1; + +if ${gpu_inference} == "true"; then + nj=$(echo $gpuid_list | awk -F "," '{print NF}') +else + nj=$njob + batch_size=1 + gpuid_list="" + for JOB in $(seq ${nj}); do + gpuid_list=$gpuid_list"-1," + done +fi + +mkdir -p $output_dir/split +split_scps="" +for JOB in $(seq ${nj}); do + split_scps="$split_scps $output_dir/split/wav.$JOB.scp" +done +perl utils/split_scp.pl ${data_dir}/wav.scp ${split_scps} + +if [ -n "${checkpoint_dir}" ]; then + python utils/prepare_checkpoint.py ${model} ${checkpoint_dir} ${checkpoint_name} + model=${checkpoint_dir}/${model} +fi + +if [ $stage -le 1 ] && [ $stop_stage -ge 1 ];then + echo "Decoding ..." + gpuid_list_array=(${gpuid_list//,/ }) + for JOB in $(seq ${nj}); do + { + id=$((JOB-1)) + gpuid=${gpuid_list_array[$id]} + mkdir -p ${output_dir}/output.$JOB + python infer.py \ + --model ${model} \ + --audio_in ${output_dir}/split/wav.$JOB.scp \ + --output_dir ${output_dir}/output.$JOB \ + --batch_size ${batch_size} \ + --gpuid ${gpuid} \ + --hotword_txt ${hotword_txt} + }& + done + wait + + mkdir -p ${output_dir}/1best_recog + for f in token score text; do + if [ -f "${output_dir}/output.1/1best_recog/${f}" ]; then + for i in $(seq "${nj}"); do + cat "${output_dir}/output.${i}/1best_recog/${f}" + done | sort -k1 >"${output_dir}/1best_recog/${f}" + fi + done +fi + +if [ $stage -le 2 ] && [ $stop_stage -ge 2 ];then + echo "Computing WER ..." + cp ${output_dir}/1best_recog/text ${output_dir}/1best_recog/text.proc + cp ${data_dir}/text ${output_dir}/1best_recog/text.ref + python utils/compute_wer.py ${output_dir}/1best_recog/text.ref ${output_dir}/1best_recog/text.proc ${output_dir}/1best_recog/text.cer + tail -n 3 ${output_dir}/1best_recog/text.cer +fi + +if [ $stage -le 3 ] && [ $stop_stage -ge 3 ];then + echo "SpeechIO TIOBE textnorm" + echo "$0 --> Normalizing REF text ..." + ./utils/textnorm_zh.py \ + --has_key --to_upper \ + ${data_dir}/text \ + ${output_dir}/1best_recog/ref.txt + + echo "$0 --> Normalizing HYP text ..." + ./utils/textnorm_zh.py \ + --has_key --to_upper \ + ${output_dir}/1best_recog/text.proc \ + ${output_dir}/1best_recog/rec.txt + grep -v $'\t$' ${output_dir}/1best_recog/rec.txt > ${output_dir}/1best_recog/rec_non_empty.txt + + echo "$0 --> computing WER/CER and alignment ..." + ./utils/error_rate_zh \ + --tokenizer char \ + --ref ${output_dir}/1best_recog/ref.txt \ + --hyp ${output_dir}/1best_recog/rec_non_empty.txt \ + ${output_dir}/1best_recog/DETAILS.txt | tee ${output_dir}/1best_recog/RESULTS.txt + rm -rf ${output_dir}/1best_recog/rec.txt ${output_dir}/1best_recog/rec_non_empty.txt +fi + diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/utils b/egs_modelscope/asr/paraformer/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/utils new file mode 120000 index 000000000..2ac163ff4 --- /dev/null +++ b/egs_modelscope/asr/paraformer/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404/utils @@ -0,0 +1 @@ +../../../../egs/aishell/transformer/utils \ No newline at end of file diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-minnan-16k-common-vocab3825/infer.sh b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-minnan-16k-common-vocab3825/infer.sh index 448717db6..b44be9f3c 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-minnan-16k-common-vocab3825/infer.sh +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-minnan-16k-common-vocab3825/infer.sh @@ -9,12 +9,13 @@ stop_stage=2 model="damo/speech_UniASR_asr_2pass-minnan-16k-common-vocab3825" data_dir="./data/test" output_dir="./results" -batch_size=64 +batch_size=1 gpu_inference=true # whether to perform gpu decoding gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1" njob=64 # the number of jobs for CPU decoding, if gpu_inference=false, use CPU decoding, please set njob checkpoint_dir= checkpoint_name="valid.cer_ctc.ave.pb" +decoding_mode="normal" . utils/parse_options.sh || exit 1; @@ -54,7 +55,8 @@ if [ $stage -le 1 ] && [ $stop_stage -ge 1 ];then --audio_in ${output_dir}/split/wav.$JOB.scp \ --output_dir ${output_dir}/output.$JOB \ --batch_size ${batch_size} \ - --gpuid ${gpuid} + --gpuid ${gpuid} \ + --decoding_mode ${decoding_mode} }& done wait diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-minnan-16k-common-vocab3825/utils b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-minnan-16k-common-vocab3825/utils new file mode 120000 index 000000000..2ac163ff4 --- /dev/null +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-minnan-16k-common-vocab3825/utils @@ -0,0 +1 @@ +../../../../egs/aishell/transformer/utils \ No newline at end of file