mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
update inference config
This commit is contained in:
parent
d653df71cb
commit
532c56b3c1
@ -7,7 +7,7 @@ CUDA_VISIBLE_DEVICES="0,1" # set gpus, e.g., CUDA_VISIBLE_DEVICES="0,1"
|
||||
gpu_num=2
|
||||
count=1
|
||||
gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding
|
||||
njob=4 # the number of jobs for each gpu
|
||||
njob=1 # the number of jobs for each gpu
|
||||
train_cmd=utils/run.pl
|
||||
infer_cmd=utils/run.pl
|
||||
|
||||
@ -84,7 +84,7 @@ gpuid_list=$CUDA_VISIBLE_DEVICES # set gpus for decoding, the same as training
|
||||
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
|
||||
|
||||
if ${gpu_inference}; then
|
||||
inference_nj=$[${ngpu}*${njob}]
|
||||
inference_nj=$njob
|
||||
_ngpu=1
|
||||
else
|
||||
inference_nj=$njob
|
||||
@ -237,10 +237,10 @@ if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
|
||||
fi
|
||||
${infer_cmd} --gpu "${_ngpu}" --max-jobs-run "${_nj}" JOB=1:"${_nj}" "${_logdir}"/asr_inference.JOB.log \
|
||||
python -m funasr.bin.asr_inference_launch \
|
||||
--batch_size 1 \
|
||||
--batch_size 64 \
|
||||
--ngpu "${_ngpu}" \
|
||||
--njob ${njob} \
|
||||
--gpuid_list ${gpuid_list} \
|
||||
--gpuid_list ${gpuid_list:0:1} \
|
||||
--data_path_and_name_and_type "${_data}/${scp},speech,${type}" \
|
||||
--key_file "${_logdir}"/keys.JOB.scp \
|
||||
--asr_train_config "${asr_exp}"/config.yaml \
|
||||
|
||||
@ -10,9 +10,9 @@ exp_dir=
|
||||
model_name=speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
|
||||
model_revision="v1.0.4" # please do not modify the model revision
|
||||
inference_nj=32
|
||||
gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1"
|
||||
gpuid_list="0" # set gpus, e.g., gpuid_list="0,1"
|
||||
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
|
||||
njob=4 # the number of jobs for each gpu
|
||||
njob=1 # the number of jobs for each gpu
|
||||
gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding
|
||||
|
||||
if ${gpu_inference}; then
|
||||
|
||||
@ -7,7 +7,7 @@ CUDA_VISIBLE_DEVICES="0,1" # set gpus, e.g., CUDA_VISIBLE_DEVICES="0,1"
|
||||
gpu_num=2
|
||||
count=1
|
||||
gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding
|
||||
njob=4 # the number of jobs for each gpu
|
||||
njob=1 # the number of jobs for each gpu
|
||||
train_cmd=utils/run.pl
|
||||
infer_cmd=utils/run.pl
|
||||
|
||||
@ -85,7 +85,7 @@ gpuid_list=$CUDA_VISIBLE_DEVICES # set gpus for decoding, the same as training
|
||||
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
|
||||
|
||||
if ${gpu_inference}; then
|
||||
inference_nj=$[${ngpu}*${njob}]
|
||||
inference_nj=$njob
|
||||
_ngpu=1
|
||||
else
|
||||
inference_nj=$njob
|
||||
@ -252,10 +252,10 @@ if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
|
||||
fi
|
||||
${infer_cmd} --gpu "${_ngpu}" --max-jobs-run "${_nj}" JOB=1:"${_nj}" "${_logdir}"/asr_inference.JOB.log \
|
||||
python -m funasr.bin.asr_inference_launch \
|
||||
--batch_size 1 \
|
||||
--batch_size 64 \
|
||||
--ngpu "${_ngpu}" \
|
||||
--njob ${njob} \
|
||||
--gpuid_list ${gpuid_list} \
|
||||
--gpuid_list ${gpuid_list:0:1} \
|
||||
--data_path_and_name_and_type "${_data}/${scp},speech,${type}" \
|
||||
--key_file "${_logdir}"/keys.JOB.scp \
|
||||
--asr_train_config "${asr_exp}"/config.yaml \
|
||||
|
||||
@ -10,9 +10,9 @@ exp_dir=
|
||||
model_name=speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
|
||||
model_revision="v1.0.4" # please do not modify the model revision
|
||||
inference_nj=32
|
||||
gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1"
|
||||
gpuid_list="0" # set gpus, e.g., gpuid_list="0,1"
|
||||
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
|
||||
njob=4 # the number of jobs for each gpu
|
||||
njob=1 # the number of jobs for each gpu
|
||||
gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding
|
||||
|
||||
if ${gpu_inference}; then
|
||||
|
||||
@ -7,7 +7,7 @@ CUDA_VISIBLE_DEVICES="0,1" # set gpus, e.g., CUDA_VISIBLE_DEVICES="0,1"
|
||||
gpu_num=2
|
||||
count=1
|
||||
gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding
|
||||
njob=4 # the number of jobs for each gpu
|
||||
njob=1 # the number of jobs for each gpu
|
||||
train_cmd=utils/run.pl
|
||||
infer_cmd=utils/run.pl
|
||||
|
||||
@ -84,7 +84,7 @@ gpuid_list=$CUDA_VISIBLE_DEVICES # set gpus for decoding, the same as training
|
||||
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
|
||||
|
||||
if ${gpu_inference}; then
|
||||
inference_nj=$[${ngpu}*${njob}]
|
||||
inference_nj=$njob
|
||||
_ngpu=1
|
||||
else
|
||||
inference_nj=$njob
|
||||
@ -244,10 +244,10 @@ if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
|
||||
fi
|
||||
${infer_cmd} --gpu "${_ngpu}" --max-jobs-run "${_nj}" JOB=1:"${_nj}" "${_logdir}"/asr_inference.JOB.log \
|
||||
python -m funasr.bin.asr_inference_launch \
|
||||
--batch_size 1 \
|
||||
--batch_size 64 \
|
||||
--ngpu "${_ngpu}" \
|
||||
--njob ${njob} \
|
||||
--gpuid_list ${gpuid_list} \
|
||||
--gpuid_list ${gpuid_list:0:1} \
|
||||
--data_path_and_name_and_type "${_data}/${scp},speech,${type}" \
|
||||
--key_file "${_logdir}"/keys.JOB.scp \
|
||||
--asr_train_config "${asr_exp}"/config.yaml \
|
||||
|
||||
@ -8,9 +8,9 @@ model_name=speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch #
|
||||
model_revision="v1.0.4" # please do not modify the model revision
|
||||
data_dir= # wav list, ${data_dir}/wav.scp
|
||||
exp_dir="exp"
|
||||
gpuid_list="0,1"
|
||||
gpuid_list="0"
|
||||
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
|
||||
njob=4
|
||||
njob=1
|
||||
gpu_inference=true
|
||||
decode_cmd=utils/run.pl
|
||||
|
||||
|
||||
@ -8,9 +8,9 @@ pretrained_model_name=speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404
|
||||
data_dir= # wav list, ${data_dir}/wav.scp
|
||||
finetune_model_name= # fine-tuning model name
|
||||
finetune_exp_dir= # fine-tuning model experiment result path
|
||||
gpuid_list="0,1"
|
||||
gpuid_list="0"
|
||||
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
|
||||
njob=4
|
||||
njob=1
|
||||
gpu_inference=true
|
||||
decode_cmd=utils/run.pl
|
||||
|
||||
@ -20,7 +20,6 @@ if ${gpu_inference}; then
|
||||
inference_nj=$[${ngpu}*${njob}]
|
||||
_ngpu=1
|
||||
else
|
||||
inference_nj=${njob}
|
||||
inference_nj=${njob}
|
||||
_ngpu=0
|
||||
fi
|
||||
@ -63,4 +62,4 @@ ${decode_cmd} --max-jobs-run "${inference_nj}" JOB=1:"${inference_nj}" "${_logdi
|
||||
|
||||
for i in $(seq ${inference_nj}); do
|
||||
cat ${_logdir}/text.${i}
|
||||
done | sort -k1 >${_dir}/text
|
||||
done | sort -k1 >${_dir}/text
|
||||
|
||||
@ -10,9 +10,9 @@ exp_dir=
|
||||
model_name=speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
|
||||
model_revision="v1.0.4" # please do not modify the model revision
|
||||
inference_nj=32
|
||||
gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1"
|
||||
gpuid_list="0" # set gpus, e.g., gpuid_list="0,1"
|
||||
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
|
||||
njob=4 # the number of jobs for each gpu
|
||||
njob=1 # the number of jobs for each gpu
|
||||
gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding
|
||||
|
||||
if ${gpu_inference}; then
|
||||
|
||||
@ -10,9 +10,9 @@ exp_dir=
|
||||
model_name=speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch
|
||||
model_revision="v1.0.4" # please do not modify the model revision
|
||||
inference_nj=32
|
||||
gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1"
|
||||
gpuid_list="0" # set gpus, e.g., gpuid_list="0,1"
|
||||
ngpu=$(echo $gpuid_list | awk -F "," '{print NF}')
|
||||
njob=4 # the number of jobs for each gpu
|
||||
njob=1 # the number of jobs for each gpu
|
||||
gpu_inference=true # Whether to perform gpu decoding, set false for cpu decoding
|
||||
|
||||
if ${gpu_inference}; then
|
||||
|
||||
Loading…
Reference in New Issue
Block a user