mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
* update * update * update * update onnx * update with main (#1492) * contextual&seaco ONNX export (#1481) * contextual&seaco ONNX export * update ContextualEmbedderExport2 * update ContextualEmbedderExport2 * update code * onnx (#1482) * qwenaudio qwenaudiochat * qwenaudio qwenaudiochat * whisper * whisper * llm * llm * llm * llm * llm * llm * llm * llm * export onnx * export onnx * export onnx * dingding * dingding * llm * doc * onnx * onnx * onnx * onnx * onnx * onnx * v1.0.15 * qwenaudio * qwenaudio * issue doc * update * update * bugfix * onnx * update export calling * update codes * remove useless code * update code --------- Co-authored-by: zhifu gao <zhifu.gzf@alibaba-inc.com> * acknowledge --------- Co-authored-by: Shi Xian <40013335+R1ckShi@users.noreply.github.com> * update onnx * update onnx * train update * train update * train update * train update * punc update --------- Co-authored-by: Shi Xian <40013335+R1ckShi@users.noreply.github.com>
32 lines
1.3 KiB
Python
32 lines
1.3 KiB
Python
#!/usr/bin/env python3
|
|
# -*- encoding: utf-8 -*-
|
|
# Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved.
|
|
# MIT License (https://opensource.org/licenses/MIT)
|
|
|
|
from funasr import AutoModel
|
|
|
|
model = AutoModel(model="iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
|
|
model_revision="v2.0.4",
|
|
# vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch",
|
|
# vad_model_revision="v2.0.4",
|
|
# punc_model="iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch",
|
|
# punc_model_revision="v2.0.4",
|
|
# spk_model="iic/speech_campplus_sv_zh-cn_16k-common",
|
|
# spk_model_revision="v2.0.2",
|
|
)
|
|
|
|
res = model.generate(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav")
|
|
print(res)
|
|
|
|
|
|
''' can not use currently
|
|
from funasr import AutoFrontend
|
|
|
|
frontend = AutoFrontend(model="iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch", model_revision="v2.0.4")
|
|
|
|
fbanks = frontend(input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav", batch_size=2)
|
|
|
|
for batch_idx, fbank_dict in enumerate(fbanks):
|
|
res = model.generate(**fbank_dict)
|
|
print(res)
|
|
''' |