mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
update 8k uniasr recipe
This commit is contained in:
parent
02a3eefb35
commit
c5f132a451
@ -1,13 +1,14 @@
|
|||||||
# ModelScope Model
|
# ModelScope Model
|
||||||
|
|
||||||
## How to finetune and infer using a pretrained Paraformer-large Model
|
## How to finetune and infer using a pretrained UniASR Model
|
||||||
|
|
||||||
### Finetune
|
### Finetune
|
||||||
|
|
||||||
- Modify finetune training related parameters in `finetune.py`
|
- Modify finetune training related parameters in `finetune.py`
|
||||||
- <strong>output_dir:</strong> # result dir
|
- <strong>output_dir:</strong> # result dir
|
||||||
- <strong>data_dir:</strong> # the dataset dir needs to include files: train/wav.scp, train/text; validation/wav.scp, validation/text.
|
- <strong>data_dir:</strong> # the dataset dir needs to include files: `train/wav.scp`, `train/text`; `validation/wav.scp`, `validation/text`
|
||||||
- <strong>batch_bins:</strong> # batch size
|
- <strong>dataset_type:</strong> # for dataset larger than 1000 hours, set as `large`, otherwise set as `small`
|
||||||
|
- <strong>batch_bins:</strong> # batch size. For dataset_type is `small`, `batch_bins` indicates the feature frames. For dataset_type is `large`, `batch_bins` indicates the duration in ms
|
||||||
- <strong>max_epoch:</strong> # number of training epoch
|
- <strong>max_epoch:</strong> # number of training epoch
|
||||||
- <strong>lr:</strong> # learning rate
|
- <strong>lr:</strong> # learning rate
|
||||||
|
|
||||||
@ -21,10 +22,32 @@
|
|||||||
Or you can use the finetuned model for inference directly.
|
Or you can use the finetuned model for inference directly.
|
||||||
|
|
||||||
- Setting parameters in `infer.py`
|
- Setting parameters in `infer.py`
|
||||||
- <strong>audio_in:</strong> # support wav, url, bytes, and parsed audio format.
|
- <strong>data_dir:</strong> # the dataset dir needs to include `test/wav.scp`. If `test/text` is also exists, CER will be computed
|
||||||
- <strong>output_dir:</strong> # If the input format is wav.scp, it needs to be set.
|
- <strong>output_dir:</strong> # result dir
|
||||||
|
- <strong>ngpu:</strong> # the number of GPUs for decoding
|
||||||
|
- <strong>njob:</strong> # the number of jobs for each GPU
|
||||||
|
|
||||||
- Then you can run the pipeline to infer with:
|
- Then you can run the pipeline to infer with:
|
||||||
```python
|
```python
|
||||||
python infer.py
|
python infer.py
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- Results
|
||||||
|
|
||||||
|
The decoding results can be found in `$output_dir/1best_recog/text.cer`, which includes recognition results of each sample and the CER metric of the whole test set.
|
||||||
|
|
||||||
|
### Inference using local finetuned model
|
||||||
|
|
||||||
|
- Modify inference related parameters in `infer_after_finetune.py`
|
||||||
|
- <strong>output_dir:</strong> # result dir
|
||||||
|
- <strong>data_dir:</strong> # the dataset dir needs to include `test/wav.scp`. If `test/text` is also exists, CER will be computed
|
||||||
|
- <strong>decoding_model_name:</strong> # set the checkpoint name for decoding, e.g., `valid.cer_ctc.ave.pth`
|
||||||
|
|
||||||
|
- Then you can run the pipeline to finetune with:
|
||||||
|
```python
|
||||||
|
python infer_after_finetune.py
|
||||||
|
```
|
||||||
|
|
||||||
|
- Results
|
||||||
|
|
||||||
|
The decoding results can be found in `$output_dir/decoding_results/text.cer`, which includes recognition results of each sample and the CER metric of the whole test set.
|
||||||
|
|||||||
@ -1,7 +1,10 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
from modelscope.metainfo import Trainers
|
from modelscope.metainfo import Trainers
|
||||||
from modelscope.trainers import build_trainer
|
from modelscope.trainers import build_trainer
|
||||||
|
|
||||||
from funasr.datasets.ms_dataset import MsDataset
|
from funasr.datasets.ms_dataset import MsDataset
|
||||||
|
from funasr.utils.modelscope_param import modelscope_args
|
||||||
|
|
||||||
|
|
||||||
def modelscope_finetune(params):
|
def modelscope_finetune(params):
|
||||||
@ -11,7 +14,6 @@ def modelscope_finetune(params):
|
|||||||
ds_dict = MsDataset.load(params.data_path)
|
ds_dict = MsDataset.load(params.data_path)
|
||||||
kwargs = dict(
|
kwargs = dict(
|
||||||
model=params.model,
|
model=params.model,
|
||||||
model_revision=params.model_revision,
|
|
||||||
data_dir=ds_dict,
|
data_dir=ds_dict,
|
||||||
dataset_type=params.dataset_type,
|
dataset_type=params.dataset_type,
|
||||||
work_dir=params.output_dir,
|
work_dir=params.output_dir,
|
||||||
@ -23,8 +25,7 @@ def modelscope_finetune(params):
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
from funasr.utils.modelscope_param import modelscope_args
|
params = modelscope_args(model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online", data_path="./data")
|
||||||
params = modelscope_args(model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline", data_path="./data")
|
|
||||||
params.output_dir = "./checkpoint" # m模型保存路径
|
params.output_dir = "./checkpoint" # m模型保存路径
|
||||||
params.data_path = "./example_data/" # 数据路径
|
params.data_path = "./example_data/" # 数据路径
|
||||||
params.dataset_type = "small" # 小数据量设置small,若数据量大于1000小时,请使用large
|
params.dataset_type = "small" # 小数据量设置small,若数据量大于1000小时,请使用large
|
||||||
|
|||||||
@ -1,14 +1,88 @@
|
|||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
from multiprocessing import Pool
|
||||||
|
|
||||||
from modelscope.pipelines import pipeline
|
from modelscope.pipelines import pipeline
|
||||||
from modelscope.utils.constant import Tasks
|
from modelscope.utils.constant import Tasks
|
||||||
|
|
||||||
if __name__ == '__main__':
|
from funasr.utils.compute_wer import compute_wer
|
||||||
audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav'
|
|
||||||
output_dir = None
|
|
||||||
|
def modelscope_infer_core(output_dir, split_dir, njob, idx):
|
||||||
|
output_dir_job = os.path.join(output_dir, "output.{}".format(idx))
|
||||||
|
gpu_id = (int(idx) - 1) // njob
|
||||||
|
if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
|
||||||
|
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'].split(",")
|
||||||
|
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id])
|
||||||
|
else:
|
||||||
|
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
|
||||||
inference_pipline = pipeline(
|
inference_pipline = pipeline(
|
||||||
task=Tasks.auto_speech_recognition,
|
task=Tasks.auto_speech_recognition,
|
||||||
model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline",
|
model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online",
|
||||||
output_dir=output_dir,
|
output_dir=output_dir_job,
|
||||||
|
batch_size=1
|
||||||
)
|
)
|
||||||
rec_result = inference_pipline(audio_in=audio_in)
|
audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
|
||||||
print(rec_result)
|
inference_pipline(audio_in=audio_in)
|
||||||
|
|
||||||
|
|
||||||
|
def modelscope_infer(params):
|
||||||
|
# prepare for multi-GPU decoding
|
||||||
|
ngpu = params["ngpu"]
|
||||||
|
njob = params["njob"]
|
||||||
|
output_dir = params["output_dir"]
|
||||||
|
if os.path.exists(output_dir):
|
||||||
|
shutil.rmtree(output_dir)
|
||||||
|
os.mkdir(output_dir)
|
||||||
|
split_dir = os.path.join(output_dir, "split")
|
||||||
|
os.mkdir(split_dir)
|
||||||
|
nj = ngpu * njob
|
||||||
|
wav_scp_file = os.path.join(params["data_dir"], "wav.scp")
|
||||||
|
with open(wav_scp_file) as f:
|
||||||
|
lines = f.readlines()
|
||||||
|
num_lines = len(lines)
|
||||||
|
num_job_lines = num_lines // nj
|
||||||
|
start = 0
|
||||||
|
for i in range(nj):
|
||||||
|
end = start + num_job_lines
|
||||||
|
file = os.path.join(split_dir, "wav.{}.scp".format(str(i + 1)))
|
||||||
|
with open(file, "w") as f:
|
||||||
|
if i == nj - 1:
|
||||||
|
f.writelines(lines[start:])
|
||||||
|
else:
|
||||||
|
f.writelines(lines[start:end])
|
||||||
|
start = end
|
||||||
|
|
||||||
|
p = Pool(nj)
|
||||||
|
for i in range(nj):
|
||||||
|
p.apply_async(modelscope_infer_core,
|
||||||
|
args=(output_dir, split_dir, njob, str(i + 1)))
|
||||||
|
p.close()
|
||||||
|
p.join()
|
||||||
|
|
||||||
|
# combine decoding results
|
||||||
|
best_recog_path = os.path.join(output_dir, "1best_recog")
|
||||||
|
os.mkdir(best_recog_path)
|
||||||
|
files = ["text", "token", "score"]
|
||||||
|
for file in files:
|
||||||
|
with open(os.path.join(best_recog_path, file), "w") as f:
|
||||||
|
for i in range(nj):
|
||||||
|
job_file = os.path.join(output_dir, "output.{}/1best_recog".format(str(i + 1)), file)
|
||||||
|
with open(job_file) as f_job:
|
||||||
|
lines = f_job.readlines()
|
||||||
|
f.writelines(lines)
|
||||||
|
|
||||||
|
# If text exists, compute CER
|
||||||
|
text_in = os.path.join(params["data_dir"], "text")
|
||||||
|
if os.path.exists(text_in):
|
||||||
|
text_proc_file = os.path.join(best_recog_path, "token")
|
||||||
|
compute_wer(text_in, text_proc_file, os.path.join(best_recog_path, "text.cer"))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
params = {}
|
||||||
|
params["data_dir"] = "./data/test"
|
||||||
|
params["output_dir"] = "./results"
|
||||||
|
params["ngpu"] = 1
|
||||||
|
params["njob"] = 1
|
||||||
|
modelscope_infer(params)
|
||||||
|
|||||||
@ -0,0 +1,53 @@
|
|||||||
|
import json
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
from modelscope.pipelines import pipeline
|
||||||
|
from modelscope.utils.constant import Tasks
|
||||||
|
|
||||||
|
from funasr.utils.compute_wer import compute_wer
|
||||||
|
|
||||||
|
|
||||||
|
def modelscope_infer_after_finetune(params):
|
||||||
|
# prepare for decoding
|
||||||
|
pretrained_model_path = os.path.join(os.environ["HOME"], ".cache/modelscope/hub", params["modelscope_model_name"])
|
||||||
|
for file_name in params["required_files"]:
|
||||||
|
if file_name == "configuration.json":
|
||||||
|
with open(os.path.join(pretrained_model_path, file_name)) as f:
|
||||||
|
config_dict = json.load(f)
|
||||||
|
config_dict["model"]["am_model_name"] = params["decoding_model_name"]
|
||||||
|
with open(os.path.join(params["output_dir"], "configuration.json"), "w") as f:
|
||||||
|
json.dump(config_dict, f, indent=4, separators=(',', ': '))
|
||||||
|
else:
|
||||||
|
shutil.copy(os.path.join(pretrained_model_path, file_name),
|
||||||
|
os.path.join(params["output_dir"], file_name))
|
||||||
|
decoding_path = os.path.join(params["output_dir"], "decode_results")
|
||||||
|
if os.path.exists(decoding_path):
|
||||||
|
shutil.rmtree(decoding_path)
|
||||||
|
os.mkdir(decoding_path)
|
||||||
|
|
||||||
|
# decoding
|
||||||
|
inference_pipeline = pipeline(
|
||||||
|
task=Tasks.auto_speech_recognition,
|
||||||
|
model=params["output_dir"],
|
||||||
|
output_dir=decoding_path,
|
||||||
|
batch_size=1
|
||||||
|
)
|
||||||
|
audio_in = os.path.join(params["data_dir"], "wav.scp")
|
||||||
|
inference_pipeline(audio_in=audio_in)
|
||||||
|
|
||||||
|
# computer CER if GT text is set
|
||||||
|
text_in = os.path.join(params["data_dir"], "text")
|
||||||
|
if os.path.exists(text_in):
|
||||||
|
text_proc_file = os.path.join(decoding_path, "1best_recog/token")
|
||||||
|
compute_wer(text_in, text_proc_file, os.path.join(decoding_path, "text.cer"))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
params = {}
|
||||||
|
params["modelscope_model_name"] = "damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online"
|
||||||
|
params["required_files"] = ["am.mvn", "decoding.yaml", "configuration.json"]
|
||||||
|
params["output_dir"] = "./checkpoint"
|
||||||
|
params["data_dir"] = "./data/test"
|
||||||
|
params["decoding_model_name"] = "20epoch.pth"
|
||||||
|
modelscope_infer_after_finetune(params)
|
||||||
@ -1,13 +1,14 @@
|
|||||||
# ModelScope Model
|
# ModelScope Model
|
||||||
|
|
||||||
## How to finetune and infer using a pretrained Paraformer-large Model
|
## How to finetune and infer using a pretrained UniASR Model
|
||||||
|
|
||||||
### Finetune
|
### Finetune
|
||||||
|
|
||||||
- Modify finetune training related parameters in `finetune.py`
|
- Modify finetune training related parameters in `finetune.py`
|
||||||
- <strong>output_dir:</strong> # result dir
|
- <strong>output_dir:</strong> # result dir
|
||||||
- <strong>data_dir:</strong> # the dataset dir needs to include files: train/wav.scp, train/text; validation/wav.scp, validation/text.
|
- <strong>data_dir:</strong> # the dataset dir needs to include files: `train/wav.scp`, `train/text`; `validation/wav.scp`, `validation/text`
|
||||||
- <strong>batch_bins:</strong> # batch size
|
- <strong>dataset_type:</strong> # for dataset larger than 1000 hours, set as `large`, otherwise set as `small`
|
||||||
|
- <strong>batch_bins:</strong> # batch size. For dataset_type is `small`, `batch_bins` indicates the feature frames. For dataset_type is `large`, `batch_bins` indicates the duration in ms
|
||||||
- <strong>max_epoch:</strong> # number of training epoch
|
- <strong>max_epoch:</strong> # number of training epoch
|
||||||
- <strong>lr:</strong> # learning rate
|
- <strong>lr:</strong> # learning rate
|
||||||
|
|
||||||
@ -21,10 +22,32 @@
|
|||||||
Or you can use the finetuned model for inference directly.
|
Or you can use the finetuned model for inference directly.
|
||||||
|
|
||||||
- Setting parameters in `infer.py`
|
- Setting parameters in `infer.py`
|
||||||
- <strong>audio_in:</strong> # support wav, url, bytes, and parsed audio format.
|
- <strong>data_dir:</strong> # the dataset dir needs to include `test/wav.scp`. If `test/text` is also exists, CER will be computed
|
||||||
- <strong>output_dir:</strong> # If the input format is wav.scp, it needs to be set.
|
- <strong>output_dir:</strong> # result dir
|
||||||
|
- <strong>ngpu:</strong> # the number of GPUs for decoding
|
||||||
|
- <strong>njob:</strong> # the number of jobs for each GPU
|
||||||
|
|
||||||
- Then you can run the pipeline to infer with:
|
- Then you can run the pipeline to infer with:
|
||||||
```python
|
```python
|
||||||
python infer.py
|
python infer.py
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- Results
|
||||||
|
|
||||||
|
The decoding results can be found in `$output_dir/1best_recog/text.cer`, which includes recognition results of each sample and the CER metric of the whole test set.
|
||||||
|
|
||||||
|
### Inference using local finetuned model
|
||||||
|
|
||||||
|
- Modify inference related parameters in `infer_after_finetune.py`
|
||||||
|
- <strong>output_dir:</strong> # result dir
|
||||||
|
- <strong>data_dir:</strong> # the dataset dir needs to include `test/wav.scp`. If `test/text` is also exists, CER will be computed
|
||||||
|
- <strong>decoding_model_name:</strong> # set the checkpoint name for decoding, e.g., `valid.cer_ctc.ave.pth`
|
||||||
|
|
||||||
|
- Then you can run the pipeline to finetune with:
|
||||||
|
```python
|
||||||
|
python infer_after_finetune.py
|
||||||
|
```
|
||||||
|
|
||||||
|
- Results
|
||||||
|
|
||||||
|
The decoding results can be found in `$output_dir/decoding_results/text.cer`, which includes recognition results of each sample and the CER metric of the whole test set.
|
||||||
|
|||||||
@ -1,7 +1,10 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
from modelscope.metainfo import Trainers
|
from modelscope.metainfo import Trainers
|
||||||
from modelscope.trainers import build_trainer
|
from modelscope.trainers import build_trainer
|
||||||
|
|
||||||
from funasr.datasets.ms_dataset import MsDataset
|
from funasr.datasets.ms_dataset import MsDataset
|
||||||
|
from funasr.utils.modelscope_param import modelscope_args
|
||||||
|
|
||||||
|
|
||||||
def modelscope_finetune(params):
|
def modelscope_finetune(params):
|
||||||
@ -11,7 +14,6 @@ def modelscope_finetune(params):
|
|||||||
ds_dict = MsDataset.load(params.data_path)
|
ds_dict = MsDataset.load(params.data_path)
|
||||||
kwargs = dict(
|
kwargs = dict(
|
||||||
model=params.model,
|
model=params.model,
|
||||||
model_revision=params.model_revision,
|
|
||||||
data_dir=ds_dict,
|
data_dir=ds_dict,
|
||||||
dataset_type=params.dataset_type,
|
dataset_type=params.dataset_type,
|
||||||
work_dir=params.output_dir,
|
work_dir=params.output_dir,
|
||||||
@ -23,8 +25,7 @@ def modelscope_finetune(params):
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
from funasr.utils.modelscope_param import modelscope_args
|
params = modelscope_args(model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline", data_path="./data")
|
||||||
params = modelscope_args(model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online", data_path="./data")
|
|
||||||
params.output_dir = "./checkpoint" # m模型保存路径
|
params.output_dir = "./checkpoint" # m模型保存路径
|
||||||
params.data_path = "./example_data/" # 数据路径
|
params.data_path = "./example_data/" # 数据路径
|
||||||
params.dataset_type = "small" # 小数据量设置small,若数据量大于1000小时,请使用large
|
params.dataset_type = "small" # 小数据量设置small,若数据量大于1000小时,请使用large
|
||||||
|
|||||||
@ -1,14 +1,88 @@
|
|||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
from multiprocessing import Pool
|
||||||
|
|
||||||
from modelscope.pipelines import pipeline
|
from modelscope.pipelines import pipeline
|
||||||
from modelscope.utils.constant import Tasks
|
from modelscope.utils.constant import Tasks
|
||||||
|
|
||||||
if __name__ == '__main__':
|
from funasr.utils.compute_wer import compute_wer
|
||||||
audio_in = 'https://isv-data.oss-cn-hangzhou.aliyuncs.com/ics/MaaS/ASR/test_audio/asr_example_zh.wav'
|
|
||||||
output_dir = None
|
|
||||||
|
def modelscope_infer_core(output_dir, split_dir, njob, idx):
|
||||||
|
output_dir_job = os.path.join(output_dir, "output.{}".format(idx))
|
||||||
|
gpu_id = (int(idx) - 1) // njob
|
||||||
|
if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
|
||||||
|
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'].split(",")
|
||||||
|
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id])
|
||||||
|
else:
|
||||||
|
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
|
||||||
inference_pipline = pipeline(
|
inference_pipline = pipeline(
|
||||||
task=Tasks.auto_speech_recognition,
|
task=Tasks.auto_speech_recognition,
|
||||||
model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-online",
|
model="damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline",
|
||||||
output_dir=output_dir,
|
output_dir=output_dir_job,
|
||||||
|
batch_size=1
|
||||||
)
|
)
|
||||||
rec_result = inference_pipline(audio_in=audio_in)
|
audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
|
||||||
print(rec_result)
|
inference_pipline(audio_in=audio_in)
|
||||||
|
|
||||||
|
|
||||||
|
def modelscope_infer(params):
|
||||||
|
# prepare for multi-GPU decoding
|
||||||
|
ngpu = params["ngpu"]
|
||||||
|
njob = params["njob"]
|
||||||
|
output_dir = params["output_dir"]
|
||||||
|
if os.path.exists(output_dir):
|
||||||
|
shutil.rmtree(output_dir)
|
||||||
|
os.mkdir(output_dir)
|
||||||
|
split_dir = os.path.join(output_dir, "split")
|
||||||
|
os.mkdir(split_dir)
|
||||||
|
nj = ngpu * njob
|
||||||
|
wav_scp_file = os.path.join(params["data_dir"], "wav.scp")
|
||||||
|
with open(wav_scp_file) as f:
|
||||||
|
lines = f.readlines()
|
||||||
|
num_lines = len(lines)
|
||||||
|
num_job_lines = num_lines // nj
|
||||||
|
start = 0
|
||||||
|
for i in range(nj):
|
||||||
|
end = start + num_job_lines
|
||||||
|
file = os.path.join(split_dir, "wav.{}.scp".format(str(i + 1)))
|
||||||
|
with open(file, "w") as f:
|
||||||
|
if i == nj - 1:
|
||||||
|
f.writelines(lines[start:])
|
||||||
|
else:
|
||||||
|
f.writelines(lines[start:end])
|
||||||
|
start = end
|
||||||
|
|
||||||
|
p = Pool(nj)
|
||||||
|
for i in range(nj):
|
||||||
|
p.apply_async(modelscope_infer_core,
|
||||||
|
args=(output_dir, split_dir, njob, str(i + 1)))
|
||||||
|
p.close()
|
||||||
|
p.join()
|
||||||
|
|
||||||
|
# combine decoding results
|
||||||
|
best_recog_path = os.path.join(output_dir, "1best_recog")
|
||||||
|
os.mkdir(best_recog_path)
|
||||||
|
files = ["text", "token", "score"]
|
||||||
|
for file in files:
|
||||||
|
with open(os.path.join(best_recog_path, file), "w") as f:
|
||||||
|
for i in range(nj):
|
||||||
|
job_file = os.path.join(output_dir, "output.{}/1best_recog".format(str(i + 1)), file)
|
||||||
|
with open(job_file) as f_job:
|
||||||
|
lines = f_job.readlines()
|
||||||
|
f.writelines(lines)
|
||||||
|
|
||||||
|
# If text exists, compute CER
|
||||||
|
text_in = os.path.join(params["data_dir"], "text")
|
||||||
|
if os.path.exists(text_in):
|
||||||
|
text_proc_file = os.path.join(best_recog_path, "token")
|
||||||
|
compute_wer(text_in, text_proc_file, os.path.join(best_recog_path, "text.cer"))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
params = {}
|
||||||
|
params["data_dir"] = "./data/test"
|
||||||
|
params["output_dir"] = "./results"
|
||||||
|
params["ngpu"] = 1
|
||||||
|
params["njob"] = 1
|
||||||
|
modelscope_infer(params)
|
||||||
|
|||||||
@ -0,0 +1,53 @@
|
|||||||
|
import json
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
from modelscope.pipelines import pipeline
|
||||||
|
from modelscope.utils.constant import Tasks
|
||||||
|
|
||||||
|
from funasr.utils.compute_wer import compute_wer
|
||||||
|
|
||||||
|
|
||||||
|
def modelscope_infer_after_finetune(params):
|
||||||
|
# prepare for decoding
|
||||||
|
pretrained_model_path = os.path.join(os.environ["HOME"], ".cache/modelscope/hub", params["modelscope_model_name"])
|
||||||
|
for file_name in params["required_files"]:
|
||||||
|
if file_name == "configuration.json":
|
||||||
|
with open(os.path.join(pretrained_model_path, file_name)) as f:
|
||||||
|
config_dict = json.load(f)
|
||||||
|
config_dict["model"]["am_model_name"] = params["decoding_model_name"]
|
||||||
|
with open(os.path.join(params["output_dir"], "configuration.json"), "w") as f:
|
||||||
|
json.dump(config_dict, f, indent=4, separators=(',', ': '))
|
||||||
|
else:
|
||||||
|
shutil.copy(os.path.join(pretrained_model_path, file_name),
|
||||||
|
os.path.join(params["output_dir"], file_name))
|
||||||
|
decoding_path = os.path.join(params["output_dir"], "decode_results")
|
||||||
|
if os.path.exists(decoding_path):
|
||||||
|
shutil.rmtree(decoding_path)
|
||||||
|
os.mkdir(decoding_path)
|
||||||
|
|
||||||
|
# decoding
|
||||||
|
inference_pipeline = pipeline(
|
||||||
|
task=Tasks.auto_speech_recognition,
|
||||||
|
model=params["output_dir"],
|
||||||
|
output_dir=decoding_path,
|
||||||
|
batch_size=1
|
||||||
|
)
|
||||||
|
audio_in = os.path.join(params["data_dir"], "wav.scp")
|
||||||
|
inference_pipeline(audio_in=audio_in)
|
||||||
|
|
||||||
|
# computer CER if GT text is set
|
||||||
|
text_in = os.path.join(params["data_dir"], "text")
|
||||||
|
if os.path.exists(text_in):
|
||||||
|
text_proc_file = os.path.join(decoding_path, "1best_recog/token")
|
||||||
|
compute_wer(text_in, text_proc_file, os.path.join(decoding_path, "text.cer"))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
params = {}
|
||||||
|
params["modelscope_model_name"] = "damo/speech_UniASR_asr_2pass-zh-cn-8k-common-vocab3445-pytorch-offline"
|
||||||
|
params["required_files"] = ["am.mvn", "decoding.yaml", "configuration.json"]
|
||||||
|
params["output_dir"] = "./checkpoint"
|
||||||
|
params["data_dir"] = "./data/test"
|
||||||
|
params["decoding_model_name"] = "20epoch.pth"
|
||||||
|
modelscope_infer_after_finetune(params)
|
||||||
Loading…
Reference in New Issue
Block a user