diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-offline/finetune.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-offline/finetune.py index 4a5efdbe9..5485ff56e 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-offline/finetune.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-offline/finetune.py @@ -30,6 +30,6 @@ if __name__ == '__main__': params["dataset_type"] = "small" params["max_epoch"] = 50 params["lr"] = 0.00005 - params["model"] = "damo/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-online" + params["model"] = "damo/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-offline" params["model_revision"] = None modelscope_finetune(params) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-offline/infer.py index a053957d3..1a174bbca 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-offline/infer.py @@ -6,7 +6,7 @@ if __name__ == "__main__": output_dir = "./results" inference_pipline = pipeline( task=Tasks.auto_speech_recognition, - model="damo/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-online", + model="damo/speech_UniASR_asr_2pass-ja-16k-common-vocab93-tensorflow1-offline", output_dir=output_dir, ) rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"offline"}) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-offline/finetune.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-offline/finetune.py index 60f3c8208..512b844c6 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-offline/finetune.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-offline/finetune.py @@ -30,6 +30,6 @@ if __name__ == '__main__': params["dataset_type"] = "small" params["max_epoch"] = 50 params["lr"] = 0.00005 - params["model"] = "damo/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-online" + params["model"] = "damo/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-offline" params["model_revision"] = None modelscope_finetune(params) diff --git a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-offline/infer.py b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-offline/infer.py index 30a11ffd3..2dcb6638a 100644 --- a/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-offline/infer.py +++ b/egs_modelscope/asr/uniasr/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-offline/infer.py @@ -6,7 +6,7 @@ if __name__ == "__main__": output_dir = "./results" inference_pipline = pipeline( task=Tasks.auto_speech_recognition, - model="damo/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-online", + model="damo/speech_UniASR_asr_2pass-pt-16k-common-vocab1617-tensorflow1-offline", output_dir=output_dir, ) rec_result = inference_pipline(audio_in=audio_in, param_dict={"decoding_model":"offline"})