mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
* update * update * update * update onnx * update with main (#1492) * contextual&seaco ONNX export (#1481) * contextual&seaco ONNX export * update ContextualEmbedderExport2 * update ContextualEmbedderExport2 * update code * onnx (#1482) * qwenaudio qwenaudiochat * qwenaudio qwenaudiochat * whisper * whisper * llm * llm * llm * llm * llm * llm * llm * llm * export onnx * export onnx * export onnx * dingding * dingding * llm * doc * onnx * onnx * onnx * onnx * onnx * onnx * v1.0.15 * qwenaudio * qwenaudio * issue doc * update * update * bugfix * onnx * update export calling * update codes * remove useless code * update code --------- Co-authored-by: zhifu gao <zhifu.gzf@alibaba-inc.com> * acknowledge --------- Co-authored-by: Shi Xian <40013335+R1ckShi@users.noreply.github.com> * update onnx * update onnx * train update * train update * train update * train update * punc update --------- Co-authored-by: Shi Xian <40013335+R1ckShi@users.noreply.github.com>
47 lines
1.3 KiB
Python
47 lines
1.3 KiB
Python
#!/usr/bin/env python3
|
|
# -*- encoding: utf-8 -*-
|
|
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
|
|
# MIT License (https://opensource.org/licenses/MIT)
|
|
|
|
from funasr import AutoModel
|
|
wav_file = "https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav"
|
|
|
|
model = AutoModel(model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch", model_revision="v2.0.4")
|
|
|
|
res = model.generate(input=wav_file)
|
|
print(res)
|
|
# [[beg1, end1], [beg2, end2], .., [begN, endN]]
|
|
# beg/end: ms
|
|
|
|
|
|
|
|
import soundfile
|
|
import os
|
|
|
|
wav_file = os.path.join(model.model_path, "example/vad_example.wav")
|
|
speech, sample_rate = soundfile.read(wav_file)
|
|
|
|
chunk_size = 200 # ms
|
|
chunk_stride = int(chunk_size * sample_rate / 1000)
|
|
|
|
cache = {}
|
|
|
|
total_chunk_num = int(len((speech)-1)/chunk_stride+1)
|
|
for i in range(total_chunk_num):
|
|
speech_chunk = speech[i*chunk_stride:(i+1)*chunk_stride]
|
|
is_final = i == total_chunk_num - 1
|
|
res = model.generate(input=speech_chunk,
|
|
cache=cache,
|
|
is_final=is_final,
|
|
chunk_size=chunk_size,
|
|
disable_pbar=True,
|
|
)
|
|
# print(res)
|
|
if len(res[0]["value"]):
|
|
print(res)
|
|
|
|
|
|
# 1. [[beg1, end1], [beg2, end2], .., [begN, endN]]; [[beg, end]]; [[beg1, end1], [beg2, end2]]
|
|
# 2. [[beg, -1]]
|
|
# 3. [[-1, end]]
|
|
# beg/end: ms |