#!/usr/bin/env python3 # -*- encoding: utf-8 -*- # Copyright FunASR (https://github.com/alibaba-damo-academy/FunASR). All Rights Reserved. # MIT License (https://opensource.org/licenses/MIT) # To install requirements: pip3 install -U openai-whisper from funasr import AutoModel model = AutoModel(model="iic/Whisper-large-v3", model_revision="v2.0.5", vad_model="iic/speech_fsmn_vad_zh-cn-16k-common-pytorch", vad_kwargs={"max_single_segment_time": 30000}, ) res = model.generate( language=None, task="transcribe", batch_size_s=0, input="https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav") print(res)