output_dir = None

This commit is contained in:
游雁 2023-05-22 17:44:54 +08:00
parent 3cde6fe8e0
commit d16e9d8248
10 changed files with 10 additions and 10 deletions

View File

@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks
if __name__ == '__main__':
audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav'
output_dir = None
output_dir = "./results"
inference_pipeline = pipeline(
task=Tasks.auto_speech_recognition,
model="damo/speech_conformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch",

View File

@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks
if __name__ == '__main__':
audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav'
output_dir = None
output_dir = "./results"
inference_pipeline = pipeline(
task=Tasks.auto_speech_recognition,
model="damo/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch",

View File

@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks
if __name__ == '__main__':
audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav'
output_dir = None
output_dir = "./results"
inference_pipeline = pipeline(
task=Tasks.auto_speech_recognition,
model="damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch",

View File

@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks
if __name__ == '__main__':
audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav'
output_dir = None
output_dir = "./results"
inference_pipeline = pipeline(
task=Tasks.auto_speech_recognition,
model="damo/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-online",

View File

@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks
if __name__ == '__main__':
audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav'
output_dir = None
output_dir = "./results"
inference_pipeline = pipeline(
task=Tasks.auto_speech_recognition,
model="damo/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online",

View File

@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks
if __name__ == '__main__':
audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav'
output_dir = None
output_dir = "./results"
inference_pipeline = pipeline(
task=Tasks.auto_speech_recognition,
model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-online",

View File

@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks
if __name__ == '__main__':
audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav'
output_dir = None
output_dir = "./results"
inference_pipeline = pipeline(
task=Tasks.voice_activity_detection,
model="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch",

View File

@ -7,7 +7,7 @@ logger.setLevel(logging.CRITICAL)
import soundfile
if __name__ == '__main__':
output_dir = None
output_dir = "./results"
inference_pipeline = pipeline(
task=Tasks.voice_activity_detection,
model="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch",

View File

@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks
if __name__ == '__main__':
audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example_8k.wav'
output_dir = None
output_dir = "./results"
inference_pipeline = pipeline(
task=Tasks.voice_activity_detection,
model="damo/speech_fsmn_vad_zh-cn-8k-common",

View File

@ -7,7 +7,7 @@ logger.setLevel(logging.CRITICAL)
import soundfile
if __name__ == '__main__':
output_dir = None
output_dir = "./results"
inference_pipeline = pipeline(
task=Tasks.voice_activity_detection,
model="damo/speech_fsmn_vad_zh-cn-8k-common",