diff --git a/egs/aishell/paraformer/run.sh b/egs/aishell/paraformer/run.sh index 06322ce77..c5ee1f289 100755 --- a/egs/aishell/paraformer/run.sh +++ b/egs/aishell/paraformer/run.sh @@ -8,7 +8,7 @@ gpu_num=2 count=1 gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding # for gpu decoding, inference_nj=ngpu*njob; for cpu decoding, inference_nj=njob -njob=8 +njob=1 train_cmd=utils/run.pl infer_cmd=utils/run.pl @@ -219,7 +219,7 @@ if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then fi ${infer_cmd} --gpu "${_ngpu}" --max-jobs-run "${_nj}" JOB=1:"${_nj}" "${_logdir}"/asr_inference.JOB.log \ python -m funasr.bin.asr_inference_launch \ - --batch_size 1 \ + --batch_size 100 \ --ngpu "${_ngpu}" \ --njob ${njob} \ --gpuid_list ${gpuid_list} \ diff --git a/egs/aishell/paraformerbert/run.sh b/egs/aishell/paraformerbert/run.sh index 6f331ec43..a39ac7758 100755 --- a/egs/aishell/paraformerbert/run.sh +++ b/egs/aishell/paraformerbert/run.sh @@ -8,7 +8,7 @@ gpu_num=2 count=1 gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding # for gpu decoding, inference_nj=ngpu*njob; for cpu decoding, inference_nj=njob -njob=8 +njob=1 train_cmd=utils/run.pl infer_cmd=utils/run.pl @@ -235,7 +235,7 @@ if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then fi ${infer_cmd} --gpu "${_ngpu}" --max-jobs-run "${_nj}" JOB=1:"${_nj}" "${_logdir}"/asr_inference.JOB.log \ python -m funasr.bin.asr_inference_launch \ - --batch_size 1 \ + --batch_size 100 \ --ngpu "${_ngpu}" \ --njob ${njob} \ --gpuid_list ${gpuid_list} \