diff --git a/egs_modelscope/asr/conformer/speech_conformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/demo.py b/egs_modelscope/asr/conformer/speech_conformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/demo.py index 87bb65299..ddcae969a 100644 --- a/egs_modelscope/asr/conformer/speech_conformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/demo.py +++ b/egs_modelscope/asr/conformer/speech_conformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/demo.py @@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav' - output_dir = None + output_dir = "./results" inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_conformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch", diff --git a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/demo.py b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/demo.py index 2863c1ada..5c8fceb62 100644 --- a/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/demo.py +++ b/egs_modelscope/asr/paraformer/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/demo.py @@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav' - output_dir = None + output_dir = "./results" inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_paraformer_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch", diff --git a/egs_modelscope/asr/paraformerbert/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.py b/egs_modelscope/asr/paraformerbert/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.py index f4c4fc2fb..ba237dd7f 100644 --- a/egs_modelscope/asr/paraformerbert/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.py +++ b/egs_modelscope/asr/paraformerbert/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch/infer.py @@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav' - output_dir = None + output_dir = "./results" inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_paraformerbert_asr_nat-zh-cn-16k-aishell1-vocab4234-pytorch", diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-online/infer.py index 936d6d7ba..9c4d6c7cc 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-online/infer.py @@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav' - output_dir = None + output_dir = "./results" inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-cn-dialect-16k-vocab8358-tensorflow1-online", diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online/infer.py index a3e2a002f..f01b1dd9b 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online/infer.py @@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav' - output_dir = None + output_dir = "./results" inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-zh-cn-16k-common-vocab8358-tensorflow1-online", diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-online/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-online/infer.py index 3ab16ea72..f1006481b 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-online/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-online/infer.py @@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav' - output_dir = None + output_dir = "./results" inference_pipeline = pipeline( task=Tasks.auto_speech_recognition, model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab8358-tensorflow1-online", diff --git a/egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/demo.py b/egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/demo.py index eded5edca..9fa822839 100644 --- a/egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/demo.py +++ b/egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/demo.py @@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example.wav' - output_dir = None + output_dir = "./results" inference_pipeline = pipeline( task=Tasks.voice_activity_detection, model="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch", diff --git a/egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/demo_online.py b/egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/demo_online.py index 65693b5f1..16ebd5839 100644 --- a/egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/demo_online.py +++ b/egs_modelscope/vad/speech_fsmn_vad_zh-cn-16k-common/demo_online.py @@ -7,7 +7,7 @@ logger.setLevel(logging.CRITICAL) import soundfile if __name__ == '__main__': - output_dir = None + output_dir = "./results" inference_pipeline = pipeline( task=Tasks.voice_activity_detection, model="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch", diff --git a/egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/demo.py b/egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/demo.py index 33be5059a..6bd491bc9 100644 --- a/egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/demo.py +++ b/egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/demo.py @@ -3,7 +3,7 @@ from modelscope.utils.constant import Tasks if __name__ == '__main__': audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/vad_example_8k.wav' - output_dir = None + output_dir = "./results" inference_pipeline = pipeline( task=Tasks.voice_activity_detection, model="damo/speech_fsmn_vad_zh-cn-8k-common", diff --git a/egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/demo_online.py b/egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/demo_online.py index ec5c50281..d777089db 100644 --- a/egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/demo_online.py +++ b/egs_modelscope/vad/speech_fsmn_vad_zh-cn-8k-common/demo_online.py @@ -7,7 +7,7 @@ logger.setLevel(logging.CRITICAL) import soundfile if __name__ == '__main__': - output_dir = None + output_dir = "./results" inference_pipeline = pipeline( task=Tasks.voice_activity_detection, model="damo/speech_fsmn_vad_zh-cn-8k-common",