mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
* sensevoice finetune * sensevoice finetune * sensevoice finetune * sensevoice finetune * sensevoice finetune * sensevoice finetune * sensevoice finetune * sensevoice finetune * sensevoice finetune * sensevoice finetune * bugfix * update with main (#1631) * update seaco finetune * v1.0.24 --------- Co-authored-by: 维石 <shixian.shi@alibaba-inc.com> * sensevoice * sensevoice * sensevoice * update with main (#1638) * update seaco finetune * v1.0.24 * update rwkv template --------- Co-authored-by: 维石 <shixian.shi@alibaba-inc.com> * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sensevoice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * sense voice * whisper * whisper * update style * update style --------- Co-authored-by: 维石 <shixian.shi@alibaba-inc.com>
32 lines
924 B
Python
32 lines
924 B
Python
from funasr_onnx import Fsmn_vad_online
|
|
import soundfile
|
|
from pathlib import Path
|
|
|
|
model_dir = "damo/speech_fsmn_vad_zh-cn-16k-common-pytorch"
|
|
wav_path = "{}/.cache/modelscope/hub/damo/speech_fsmn_vad_zh-cn-16k-common-pytorch/example/vad_example.wav".format(
|
|
Path.home()
|
|
)
|
|
|
|
model = Fsmn_vad_online(model_dir)
|
|
|
|
|
|
##online vad
|
|
speech, sample_rate = soundfile.read(wav_path)
|
|
speech_length = speech.shape[0]
|
|
#
|
|
sample_offset = 0
|
|
step = 1600
|
|
param_dict = {"in_cache": []}
|
|
for sample_offset in range(0, speech_length, min(step, speech_length - sample_offset)):
|
|
if sample_offset + step >= speech_length - 1:
|
|
step = speech_length - sample_offset
|
|
is_final = True
|
|
else:
|
|
is_final = False
|
|
param_dict["is_final"] = is_final
|
|
segments_result = model(
|
|
audio_in=speech[sample_offset : sample_offset + step], param_dict=param_dict
|
|
)
|
|
if segments_result:
|
|
print(segments_result)
|