Merge pull request #122 from alibaba-damo-academy/dev_lhn

Dev lhn
This commit is contained in:
zhifu gao 2023-02-16 12:37:24 +08:00 committed by GitHub
commit b2acffedd4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 7 additions and 7 deletions

View File

@ -1,6 +1,6 @@
# ModelScope Model
## How to finetune and infer using a pretrained Paraformer-large Model
## How to finetune and infer using a pretrained UniASR Model
### Finetune

View File

@ -23,7 +23,7 @@ def modelscope_infer_core(output_dir, split_dir, njob, idx):
batch_size=1
)
audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
inference_pipline(audio_in=audio_in)
inference_pipline(audio_in=audio_in, param_dict={"decoding_model": "normal"})
def modelscope_infer(params):

View File

@ -34,7 +34,7 @@ def modelscope_infer_after_finetune(params):
batch_size=1
)
audio_in = os.path.join(params["data_dir"], "wav.scp")
inference_pipeline(audio_in=audio_in)
inference_pipeline(audio_in=audio_in, param_dict={"decoding_model": "normal"})
# computer CER if GT text is set
text_in = os.path.join(params["data_dir"], "text")

View File

@ -23,7 +23,7 @@ def modelscope_infer_core(output_dir, split_dir, njob, idx):
batch_size=1
)
audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
inference_pipline(audio_in=audio_in)
inference_pipline(audio_in=audio_in, param_dict={"decoding_model": "offline"})
def modelscope_infer(params):

View File

@ -34,7 +34,7 @@ def modelscope_infer_after_finetune(params):
batch_size=1
)
audio_in = os.path.join(params["data_dir"], "wav.scp")
inference_pipeline(audio_in=audio_in)
inference_pipeline(audio_in=audio_in, param_dict={"decoding_model": "offline"})
# computer CER if GT text is set
text_in = os.path.join(params["data_dir"], "text")

View File

@ -23,7 +23,7 @@ def modelscope_infer_core(output_dir, split_dir, njob, idx):
batch_size=1
)
audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
inference_pipline(audio_in=audio_in)
inference_pipline(audio_in=audio_in, param_dict={"decoding_model": "normal"})
def modelscope_infer(params):

View File

@ -34,7 +34,7 @@ def modelscope_infer_after_finetune(params):
batch_size=1
)
audio_in = os.path.join(params["data_dir"], "wav.scp")
inference_pipeline(audio_in=audio_in)
inference_pipeline(audio_in=audio_in, param_dict={"decoding_model": "normal"})
# computer CER if GT text is set
text_in = os.path.join(params["data_dir"], "text")