diff --git a/tests/test_asr_inference_pipeline.py b/tests/test_asr_inference_pipeline.py index fb71e1dea..86e651d50 100644 --- a/tests/test_asr_inference_pipeline.py +++ b/tests/test_asr_inference_pipeline.py @@ -206,6 +206,24 @@ class TestUniasrInferencePipelines(unittest.TestCase): audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav') logger.info("asr inference result: {0}".format(rec_result)) + def test_uniasr_2pass_de_common_offline(self): + inference_pipeline = pipeline( + task=Tasks.auto_speech_recognition, + model='damo/speech_UniASR_asr_2pass-de-16k-common-vocab3690-tensorflow1-offline') + rec_result = inference_pipeline( + audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_de.wav', + param_dict={"decoding_model":"offline"}) + logger.info("asr inference result: {0}".format(rec_result)) + + def test_uniasr_2pass_de_common_online(self): + inference_pipeline = pipeline( + task=Tasks.auto_speech_recognition, + model='damo/speech_UniASR_asr_2pass-de-16k-common-vocab3690-tensorflow1-online') + rec_result = inference_pipeline( + audio_in='https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_de.wav', + param_dict={"decoding_model":"normal"}) + logger.info("asr inference result: {0}".format(rec_result)) + if __name__ == '__main__': unittest.main()