mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
update paraformer recipe
This commit is contained in:
parent
d3e785f6ab
commit
a0879e4663
@ -16,7 +16,7 @@ def modelscope_infer(args):
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--model', type=str, default="speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
|
||||
parser.add_argument('--model', type=str, default="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
|
||||
parser.add_argument('--audio_in', type=str, default="./data/test")
|
||||
parser.add_argument('--output_dir', type=str, default="./results/")
|
||||
parser.add_argument('--batch_size', type=int, default=64)
|
||||
|
||||
@ -64,8 +64,8 @@ fi
|
||||
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ];then
|
||||
echo "Computing WER ..."
|
||||
python utils/proce_text.py ${output_dir}/1best_recog/text ${output_dir}/1best_recog/text.proc
|
||||
python utils/proce_text.py ${data_dir}/text ${data_dir}/text.proc
|
||||
python utils/compute_wer.py ${data_dir}/text.proc ${output_dir}/1best_recog/text.proc ${output_dir}/1best_recog/text.cer
|
||||
python utils/proce_text.py ${data_dir}/text ${output_dir}/1best_recog/text.ref
|
||||
python utils/compute_wer.py ${output_dir}/1best_recog/text.ref ${output_dir}/1best_recog/text.proc ${output_dir}/1best_recog/text.cer
|
||||
tail -n 3 ${output_dir}/1best_recog/text.cer
|
||||
fi
|
||||
|
||||
@ -75,7 +75,7 @@ if [ $stage -le 3 ] && [ $stop_stage -ge 3 ];then
|
||||
./utils/textnorm_zh.py \
|
||||
--has_key --to_upper \
|
||||
${data_dir}/text \
|
||||
${data_dir}/ref.txt
|
||||
${output_dir}/1best_recog/ref.txt
|
||||
|
||||
echo "$0 --> Normalizing HYP text ..."
|
||||
./utils/textnorm_zh.py \
|
||||
@ -87,7 +87,7 @@ if [ $stage -le 3 ] && [ $stop_stage -ge 3 ];then
|
||||
echo "$0 --> computing WER/CER and alignment ..."
|
||||
./utils/error_rate_zh \
|
||||
--tokenizer char \
|
||||
--ref ${data_dir}/ref.txt \
|
||||
--ref ${output_dir}/1best_recog/ref.txt \
|
||||
--hyp ${output_dir}/1best_recog/rec_non_empty.txt \
|
||||
${output_dir}/1best_recog/DETAILS.txt | tee ${output_dir}/1best_recog/RESULTS.txt
|
||||
rm -rf ${output_dir}/1best_recog/rec.txt ${output_dir}/1best_recog/rec_non_empty.txt
|
||||
|
||||
@ -6,8 +6,9 @@
|
||||
|
||||
- Modify finetune training related parameters in `finetune.py`
|
||||
- <strong>output_dir:</strong> # result dir
|
||||
- <strong>data_dir:</strong> # the dataset dir needs to include files: train/wav.scp, train/text; validation/wav.scp, validation/text.
|
||||
- <strong>batch_bins:</strong> # batch size
|
||||
- <strong>data_dir:</strong> # the dataset dir needs to include files: `train/wav.scp`, `train/text`; `validation/wav.scp`, `validation/text`
|
||||
- <strong>dataset_type:</strong> # for dataset larger than 1000 hours, set as `large`, otherwise set as `small`
|
||||
- <strong>batch_bins:</strong> # batch size. For dataset_type is `small`, `batch_bins` indicates the feature frames. For dataset_type is `large`, `batch_bins` indicates the duration in ms
|
||||
- <strong>max_epoch:</strong> # number of training epoch
|
||||
- <strong>lr:</strong> # learning rate
|
||||
|
||||
@ -20,11 +21,38 @@
|
||||
|
||||
Or you can use the finetuned model for inference directly.
|
||||
|
||||
- Setting parameters in `infer.py`
|
||||
- <strong>data_dir:</strong> # the dataset dir
|
||||
- Setting parameters in `infer.sh`
|
||||
- <strong>model:</strong> # model name on ModelScope
|
||||
- <strong>data_dir:</strong> # the dataset dir needs to include `test/wav.scp`. If `test/text` is also exists, CER will be computed
|
||||
- <strong>output_dir:</strong> # result dir
|
||||
- <strong>batch_size:</strong> # batchsize of inference
|
||||
- <strong>gpu_inference:</strong> # whether to perform gpu decoding, set false for cpu decoding
|
||||
- <strong>gpuid_list:</strong> # set gpus, e.g., gpuid_list="0,1"
|
||||
- <strong>njob:</strong> # the number of jobs for CPU decoding, if `gpu_inference`=false, use CPU decoding, please set `njob`
|
||||
|
||||
- Then you can run the pipeline to infer with:
|
||||
```python
|
||||
python infer.py
|
||||
sh infer.sh
|
||||
```
|
||||
|
||||
- Results
|
||||
|
||||
The decoding results can be found in `$output_dir/1best_recog/text.cer`, which includes recognition results of each sample and the CER metric of the whole test set.
|
||||
|
||||
### Inference using local finetuned model
|
||||
|
||||
- Modify inference related parameters in `infer_after_finetune.py`
|
||||
- <strong>modelscope_model_name: </strong> # model name on ModelScope
|
||||
- <strong>output_dir:</strong> # result dir
|
||||
- <strong>data_dir:</strong> # the dataset dir needs to include `test/wav.scp`. If `test/text` is also exists, CER will be computed
|
||||
- <strong>decoding_model_name:</strong> # set the checkpoint name for decoding, e.g., `valid.cer_ctc.ave.pb`
|
||||
- <strong>batch_size:</strong> # batchsize of inference
|
||||
|
||||
- Then you can run the pipeline to finetune with:
|
||||
```python
|
||||
python infer_after_finetune.py
|
||||
```
|
||||
|
||||
- Results
|
||||
|
||||
The decoding results can be found in `$output_dir/decoding_results/text.cer`, which includes recognition results of each sample and the CER metric of the whole test set.
|
||||
|
||||
@ -1,101 +1,25 @@
|
||||
import os
|
||||
import shutil
|
||||
from multiprocessing import Pool
|
||||
|
||||
import argparse
|
||||
from modelscope.pipelines import pipeline
|
||||
from modelscope.utils.constant import Tasks
|
||||
|
||||
from funasr.utils.compute_wer import compute_wer
|
||||
|
||||
|
||||
def modelscope_infer_core(output_dir, split_dir, njob, idx, batch_size, ngpu, model):
|
||||
output_dir_job = os.path.join(output_dir, "output.{}".format(idx))
|
||||
if ngpu > 0:
|
||||
use_gpu = 1
|
||||
gpu_id = int(idx) - 1
|
||||
else:
|
||||
use_gpu = 0
|
||||
gpu_id = -1
|
||||
if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
|
||||
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'].split(",")
|
||||
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id])
|
||||
else:
|
||||
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
|
||||
inference_pipline = pipeline(
|
||||
def modelscope_infer(args):
|
||||
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpuid)
|
||||
inference_pipeline = pipeline(
|
||||
task=Tasks.auto_speech_recognition,
|
||||
model=model,
|
||||
output_dir=output_dir_job,
|
||||
batch_size=batch_size,
|
||||
ngpu=use_gpu,
|
||||
model=args.model,
|
||||
output_dir=args.output_dir,
|
||||
batch_size=args.batch_size,
|
||||
)
|
||||
audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
|
||||
inference_pipline(audio_in=audio_in)
|
||||
|
||||
|
||||
def modelscope_infer(params):
|
||||
# prepare for multi-GPU decoding
|
||||
ngpu = params["ngpu"]
|
||||
njob = params["njob"]
|
||||
batch_size = params["batch_size"]
|
||||
output_dir = params["output_dir"]
|
||||
model = params["model"]
|
||||
if os.path.exists(output_dir):
|
||||
shutil.rmtree(output_dir)
|
||||
os.mkdir(output_dir)
|
||||
split_dir = os.path.join(output_dir, "split")
|
||||
os.mkdir(split_dir)
|
||||
if ngpu > 0:
|
||||
nj = ngpu
|
||||
elif ngpu == 0:
|
||||
nj = njob
|
||||
wav_scp_file = os.path.join(params["data_dir"], "wav.scp")
|
||||
with open(wav_scp_file) as f:
|
||||
lines = f.readlines()
|
||||
num_lines = len(lines)
|
||||
num_job_lines = num_lines // nj
|
||||
start = 0
|
||||
for i in range(nj):
|
||||
end = start + num_job_lines
|
||||
file = os.path.join(split_dir, "wav.{}.scp".format(str(i + 1)))
|
||||
with open(file, "w") as f:
|
||||
if i == nj - 1:
|
||||
f.writelines(lines[start:])
|
||||
else:
|
||||
f.writelines(lines[start:end])
|
||||
start = end
|
||||
|
||||
p = Pool(nj)
|
||||
for i in range(nj):
|
||||
p.apply_async(modelscope_infer_core,
|
||||
args=(output_dir, split_dir, njob, str(i + 1), batch_size, ngpu, model))
|
||||
p.close()
|
||||
p.join()
|
||||
|
||||
# combine decoding results
|
||||
best_recog_path = os.path.join(output_dir, "1best_recog")
|
||||
os.mkdir(best_recog_path)
|
||||
files = ["text", "token", "score"]
|
||||
for file in files:
|
||||
with open(os.path.join(best_recog_path, file), "w") as f:
|
||||
for i in range(nj):
|
||||
job_file = os.path.join(output_dir, "output.{}/1best_recog".format(str(i + 1)), file)
|
||||
with open(job_file) as f_job:
|
||||
lines = f_job.readlines()
|
||||
f.writelines(lines)
|
||||
|
||||
# If text exists, compute CER
|
||||
text_in = os.path.join(params["data_dir"], "text")
|
||||
if os.path.exists(text_in):
|
||||
text_proc_file = os.path.join(best_recog_path, "token")
|
||||
compute_wer(text_in, text_proc_file, os.path.join(best_recog_path, "text.cer"))
|
||||
|
||||
inference_pipeline(audio_in=args.audio_in)
|
||||
|
||||
if __name__ == "__main__":
|
||||
params = {}
|
||||
params["model"] = "damo/speech_paraformer_asr_nat-zh-cn-8k-common-vocab8358-tensorflow1"
|
||||
params["data_dir"] = "./data/test"
|
||||
params["output_dir"] = "./results"
|
||||
params["ngpu"] = 1 # if ngpu > 0, will use gpu decoding
|
||||
params["njob"] = 1 # if ngpu = 0, will use cpu decoding
|
||||
params["batch_size"] = 64
|
||||
modelscope_infer(params)
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--model', type=str, default="damo/speech_paraformer_asr_nat-zh-cn-8k-common-vocab8358-tensorflow1")
|
||||
parser.add_argument('--audio_in', type=str, default="./data/test")
|
||||
parser.add_argument('--output_dir', type=str, default="./results/")
|
||||
parser.add_argument('--batch_size', type=int, default=64)
|
||||
parser.add_argument('--gpuid', type=str, default="0")
|
||||
args = parser.parse_args()
|
||||
modelscope_infer(args)
|
||||
@ -0,0 +1,70 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
set -u
|
||||
set -o pipefail
|
||||
|
||||
stage=1
|
||||
stop_stage=2
|
||||
model="damo/speech_paraformer_asr_nat-zh-cn-8k-common-vocab8358-tensorflow1"
|
||||
data_dir="./data/test"
|
||||
output_dir="./results"
|
||||
batch_size=64
|
||||
gpu_inference=true # whether to perform gpu decoding
|
||||
gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1"
|
||||
njob=4 # the number of jobs for CPU decoding, if gpu_inference=false, use CPU decoding, please set njob
|
||||
|
||||
|
||||
if ${gpu_inference}; then
|
||||
nj=$(echo $gpuid_list | awk -F "," '{print NF}')
|
||||
else
|
||||
nj=$njob
|
||||
batch_size=1
|
||||
gpuid_list=""
|
||||
for JOB in $(seq ${nj}); do
|
||||
gpuid_list=$gpuid_list"-1,"
|
||||
done
|
||||
fi
|
||||
|
||||
mkdir -p $output_dir/split
|
||||
split_scps=""
|
||||
for JOB in $(seq ${nj}); do
|
||||
split_scps="$split_scps $output_dir/split/wav.$JOB.scp"
|
||||
done
|
||||
perl utils/split_scp.pl ${data_dir}/wav.scp ${split_scps}
|
||||
|
||||
if [ $stage -le 1 ] && [ $stop_stage -ge 1 ];then
|
||||
echo "Decoding ..."
|
||||
gpuid_list_array=(${gpuid_list//,/ })
|
||||
for JOB in $(seq ${nj}); do
|
||||
{
|
||||
id=$((JOB-1))
|
||||
gpuid=${gpuid_list_array[$id]}
|
||||
mkdir -p ${output_dir}/output.$JOB
|
||||
python infer.py \
|
||||
--model ${model} \
|
||||
--audio_in ${output_dir}/split/wav.$JOB.scp \
|
||||
--output_dir ${output_dir}/output.$JOB \
|
||||
--batch_size ${batch_size} \
|
||||
--gpuid ${gpuid}
|
||||
}&
|
||||
done
|
||||
wait
|
||||
|
||||
mkdir -p ${output_dir}/1best_recog
|
||||
for f in token score text; do
|
||||
if [ -f "${output_dir}/output.1/1best_recog/${f}" ]; then
|
||||
for i in $(seq "${nj}"); do
|
||||
cat "${output_dir}/output.${i}/1best_recog/${f}"
|
||||
done | sort -k1 >"${output_dir}/1best_recog/${f}"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ];then
|
||||
echo "Computing WER ..."
|
||||
python utils/proce_text.py ${output_dir}/1best_recog/text ${output_dir}/1best_recog/text.proc
|
||||
python utils/proce_text.py ${data_dir}/text ${output_dir}/1best_recog/text.ref
|
||||
python utils/compute_wer.py ${output_dir}/1best_recog/text.ref ${output_dir}/1best_recog/text.proc ${output_dir}/1best_recog/text.cer
|
||||
tail -n 3 ${output_dir}/1best_recog/text.cer
|
||||
fi
|
||||
@ -0,0 +1 @@
|
||||
../../../../egs/aishell/transformer/utils
|
||||
Loading…
Reference in New Issue
Block a user