FunASR/examples/industrial_data_pretraining/fsmn_vad_streaming/demo.py
2024-01-12 12:05:08 +08:00

37 lines
1.1 KiB
Python

#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
# MIT License (https://opensource.org/licenses/MIT)
from funasr import AutoModel
wav_file = "/Users/zhifu/funasr_github/test_local/asr_example.wav"
chunk_size = 60000 # ms
model = AutoModel(model="/Users/zhifu/Downloads/modelscope_models/speech_fsmn_vad_zh-cn-16k-common-streaming", model_revision="v2.0.0")
res = model(input=wav_file,
chunk_size=chunk_size,
)
print(res)
#
# import soundfile
# import os
#
# # wav_file = os.path.join(model.model_path, "example/vad_example.wav")
# speech, sample_rate = soundfile.read(wav_file)
#
# chunk_stride = int(chunk_size * 16000 / 1000)
#
# cache = {}
#
# for i in range(int(len((speech)-1)/chunk_stride+1)):
# speech_chunk = speech[i*chunk_stride:(i+1)*chunk_stride]
# is_final = i == int(len((speech)-1)/chunk_stride+1)
# res = model(input=speech_chunk,
# cache=cache,
# is_final=is_final,
# chunk_size=chunk_size,
# )
# print(res)