This commit is contained in:
游雁 2023-03-14 19:46:14 +08:00
parent 525f5d7756
commit 4bdce2285b
3 changed files with 123 additions and 1 deletions

View File

@ -110,7 +110,8 @@ def inference_launch(mode, **kwargs):
if mode == "offline":
from funasr.bin.vad_inference import inference_modelscope
return inference_modelscope(**kwargs)
elif mode == "online":
# elif mode == "online":
if "param_dict" in kwargs and kwargs["param_dict"]["online"]:
from funasr.bin.vad_inference_online import inference_modelscope
return inference_modelscope(**kwargs)
else:

View File

@ -0,0 +1,47 @@
import time
import sys
import librosa
backend=sys.argv[1]
model_dir=sys.argv[2]
wav_file=sys.argv[3]
from torch_paraformer import Paraformer
if backend == "onnxruntime":
from rapid_paraformer import Paraformer
model = Paraformer(model_dir, batch_size=1, device_id="-1")
wav_file_f = open(wav_file, 'r')
wav_files = wav_file_f.readlines()
# warm-up
total = 0.0
num = 100
wav_path = wav_files[0].split("\t")[1].strip() if "\t" in wav_files[0] else wav_files[0].split(" ")[1].strip()
for i in range(num):
beg_time = time.time()
result = model(wav_path)
end_time = time.time()
duration = end_time-beg_time
total += duration
print(result)
print("num: {}, time, {}, avg: {}, rtf: {}".format(len(wav_path), duration, total/(i+1), (total/(i+1))/5.53))
# infer time
beg_time = time.time()
for i, wav_path_i in enumerate(wav_files):
wav_path = wav_path_i.split("\t")[1].strip() if "\t" in wav_path_i else wav_path_i.split(" ")[1].strip()
result = model(wav_path)
end_time = time.time()
duration = (end_time-beg_time)*1000
print("total_time_comput_ms: {}".format(int(duration)))
duration_time = 0.0
for i, wav_path_i in enumerate(wav_files):
wav_path = wav_path_i.split("\t")[1].strip() if "\t" in wav_path_i else wav_path_i.split(" ")[1].strip()
waveform, _ = librosa.load(wav_path, sr=16000)
duration_time += len(waveform)/16.0
print("total_time_wav_ms: {}".format(int(duration_time)))
print("total_rtf: {:.5}".format(duration/duration_time))

View File

@ -0,0 +1,74 @@
nj=64
#:<<!
backend=libtorch
model_dir="/nfs/zhifu.gzf/export/damo/amp_int8/libtorch"
tag=${backend}_fp32
!
:<<!
backend=libtorch
model_dir="/nfs/zhifu.gzf/export/damo/amp_int8/libtorch_fb20"
tag=${backend}_amp_fb20
!
:<<!
backend=onnxruntime
model_dir="/nfs/zhifu.gzf/export/damo/amp_int8/onnx"
tag=${backend}_fp32
!
:<<!
backend=onnxruntime
model_dir="/nfs/zhifu.gzf/export/damo/amp_int8/onnx_dynamic"
tag=${backend}_fp32
!
scp=/nfs/haoneng.lhn/funasr_data/aishell-1/data/test/wav.scp
scp="/nfs/zhifu.gzf/data_debug/test/wav_1500.scp"
local_scp_dir=/nfs/zhifu.gzf/data_debug/test/${tag}/split$nj
rtf_tool=test_rtf.py
mkdir -p ${local_scp_dir}
echo ${local_scp_dir}
split_scps=""
for JOB in $(seq ${nj}); do
split_scps="$split_scps $local_scp_dir/wav.$JOB.scp"
done
perl egs/aishell/transformer/utils/split_scp.pl $scp ${split_scps}
for JOB in $(seq ${nj}); do
{
core_id=`expr $JOB - 1`
taskset -c ${core_id} python ${rtf_tool} ${backend} ${model_dir} ${local_scp_dir}/wav.$JOB.scp &> ${local_scp_dir}/log.$JOB.txt
}&
done
wait
rm -rf ${local_scp_dir}/total_time_comput.txt
rm -rf ${local_scp_dir}/total_time_wav.txt
rm -rf ${local_scp_dir}/total_rtf.txt
for JOB in $(seq ${nj}); do
{
cat ${local_scp_dir}/log.$JOB.txt | grep "total_time_comput" | awk -F ' ' '{print $2}' >> ${local_scp_dir}/total_time_comput.txt
cat ${local_scp_dir}/log.$JOB.txt | grep "total_time_wav" | awk -F ' ' '{print $2}' >> ${local_scp_dir}/total_time_wav.txt
cat ${local_scp_dir}/log.$JOB.txt | grep "total_rtf" | awk -F ' ' '{print $2}' >> ${local_scp_dir}/total_rtf.txt
}
done
total_time_comput=`cat ${local_scp_dir}/total_time_comput.txt | awk 'BEGIN {max = 0} {if ($1+0>max+0) max=$1 fi} END {print max}'`
total_time_wav=`cat ${local_scp_dir}/total_time_wav.txt | awk '{sum +=$1};END {print sum}'`
rtf=`awk 'BEGIN{printf "%.5f\n",'$total_time_comput'/'$total_time_wav'}'`
speed=`awk 'BEGIN{printf "%.2f\n",1/'$rtf'}'`
echo "total_time_comput_ms: $total_time_comput"
echo "total_time_wav: $total_time_wav"
echo "total_rtf: $rtf, speech: $speed"