Merge pull request #281 from alibaba-damo-academy/dev_lzr

update paraformer_large inference recipe
This commit is contained in:
zhifu gao 2023-03-22 15:19:54 +08:00 committed by GitHub
commit e4bf138dd8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 129 additions and 584 deletions

View File

@ -1,30 +0,0 @@
# ModelScope Model
## How to finetune and infer using a pretrained Paraformer-large Model
### Finetune
- Modify finetune training related parameters in `finetune.py`
- <strong>output_dir:</strong> # result dir
- <strong>data_dir:</strong> # the dataset dir needs to include files: train/wav.scp, train/text; validation/wav.scp, validation/text.
- <strong>batch_bins:</strong> # batch size
- <strong>max_epoch:</strong> # number of training epoch
- <strong>lr:</strong> # learning rate
- Then you can run the pipeline to finetune with:
```python
python finetune.py
```
### Inference
Or you can use the finetuned model for inference directly.
- Setting parameters in `infer.py`
- <strong>data_dir:</strong> # the dataset dir
- <strong>output_dir:</strong> # result dir
- Then you can run the pipeline to infer with:
```python
python infer.py
```

View File

@ -1,23 +0,0 @@
# Paraformer-Large
- Model link: <https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/summary>
- Model size: 220M
# Environments
- date: `Fri Feb 10 13:34:24 CST 2023`
- python version: `3.7.12`
- FunASR version: `0.1.6`
- pytorch version: `pytorch 1.7.0`
- Git hash: ``
- Commit date: ``
# Beachmark Results
## AISHELL-1
- Decode config:
- Decode without CTC
- Decode without LM
| testset CER(%) | base model|finetune model |
|:--------------:|:---------:|:-------------:|
| dev | 1.75 |1.62 |
| test | 1.95 |1.78 |

View File

@ -1,36 +0,0 @@
import os
from modelscope.metainfo import Trainers
from modelscope.trainers import build_trainer
from funasr.datasets.ms_dataset import MsDataset
from funasr.utils.modelscope_param import modelscope_args
def modelscope_finetune(params):
if not os.path.exists(params.output_dir):
os.makedirs(params.output_dir, exist_ok=True)
# dataset split ["train", "validation"]
ds_dict = MsDataset.load(params.data_path)
kwargs = dict(
model=params.model,
data_dir=ds_dict,
dataset_type=params.dataset_type,
work_dir=params.output_dir,
batch_bins=params.batch_bins,
max_epoch=params.max_epoch,
lr=params.lr)
trainer = build_trainer(Trainers.speech_asr_trainer, default_args=kwargs)
trainer.train()
if __name__ == '__main__':
params = modelscope_args(model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch", data_path="./data")
params.output_dir = "./checkpoint" # m模型保存路径
params.data_path = "./example_data/" # 数据路径
params.dataset_type = "small" # 小数据量设置small若数据量大于1000小时请使用large
params.batch_bins = 2000 # batch size如果dataset_type="small"batch_bins单位为fbank特征帧数如果dataset_type="large"batch_bins单位为毫秒
params.max_epoch = 50 # 最大训练轮数
params.lr = 0.00005 # 设置学习率
modelscope_finetune(params)

View File

@ -1,101 +0,0 @@
import os
import shutil
from multiprocessing import Pool
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from funasr.utils.compute_wer import compute_wer
def modelscope_infer_core(output_dir, split_dir, njob, idx, batch_size, ngpu, model):
output_dir_job = os.path.join(output_dir, "output.{}".format(idx))
if ngpu > 0:
use_gpu = 1
gpu_id = int(idx) - 1
else:
use_gpu = 0
gpu_id = -1
if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'].split(",")
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id])
else:
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
inference_pipline = pipeline(
task=Tasks.auto_speech_recognition,
model=model,
output_dir=output_dir_job,
batch_size=batch_size,
ngpu=use_gpu,
)
audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
inference_pipline(audio_in=audio_in)
def modelscope_infer(params):
# prepare for multi-GPU decoding
ngpu = params["ngpu"]
njob = params["njob"]
batch_size = params["batch_size"]
output_dir = params["output_dir"]
model = params["model"]
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.mkdir(output_dir)
split_dir = os.path.join(output_dir, "split")
os.mkdir(split_dir)
if ngpu > 0:
nj = ngpu
elif ngpu == 0:
nj = njob
wav_scp_file = os.path.join(params["data_dir"], "wav.scp")
with open(wav_scp_file) as f:
lines = f.readlines()
num_lines = len(lines)
num_job_lines = num_lines // nj
start = 0
for i in range(nj):
end = start + num_job_lines
file = os.path.join(split_dir, "wav.{}.scp".format(str(i + 1)))
with open(file, "w") as f:
if i == nj - 1:
f.writelines(lines[start:])
else:
f.writelines(lines[start:end])
start = end
p = Pool(nj)
for i in range(nj):
p.apply_async(modelscope_infer_core,
args=(output_dir, split_dir, njob, str(i + 1), batch_size, ngpu, model))
p.close()
p.join()
# combine decoding results
best_recog_path = os.path.join(output_dir, "1best_recog")
os.mkdir(best_recog_path)
files = ["text", "token", "score"]
for file in files:
with open(os.path.join(best_recog_path, file), "w") as f:
for i in range(nj):
job_file = os.path.join(output_dir, "output.{}/1best_recog".format(str(i + 1)), file)
with open(job_file) as f_job:
lines = f_job.readlines()
f.writelines(lines)
# If text exists, compute CER
text_in = os.path.join(params["data_dir"], "text")
if os.path.exists(text_in):
text_proc_file = os.path.join(best_recog_path, "token")
compute_wer(text_in, text_proc_file, os.path.join(best_recog_path, "text.cer"))
if __name__ == "__main__":
params = {}
params["model"] = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch"
params["data_dir"] = "./data/test"
params["output_dir"] = "./results"
params["ngpu"] = 1 # if ngpu > 0, will use gpu decoding
params["njob"] = 1 # if ngpu = 0, will use cpu decoding
params["batch_size"] = 64
modelscope_infer(params)

View File

@ -1,48 +0,0 @@
import json
import os
import shutil
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.hub.snapshot_download import snapshot_download
from funasr.utils.compute_wer import compute_wer
def modelscope_infer_after_finetune(params):
# prepare for decoding
try:
pretrained_model_path = snapshot_download(params["modelscope_model_name"], cache_dir=params["output_dir"])
except BaseException:
raise BaseException(f"Please download pretrain model from ModelScope firstly.")
shutil.copy(os.path.join(params["output_dir"], params["decoding_model_name"]), os.path.join(pretrained_model_path, "model.pb"))
decoding_path = os.path.join(params["output_dir"], "decode_results")
if os.path.exists(decoding_path):
shutil.rmtree(decoding_path)
os.mkdir(decoding_path)
# decoding
inference_pipeline = pipeline(
task=Tasks.auto_speech_recognition,
model=pretrained_model_path,
output_dir=decoding_path,
batch_size=params["batch_size"]
)
audio_in = os.path.join(params["data_dir"], "wav.scp")
inference_pipeline(audio_in=audio_in)
# computer CER if GT text is set
text_in = os.path.join(params["data_dir"], "text")
if os.path.exists(text_in):
text_proc_file = os.path.join(decoding_path, "1best_recog/token")
compute_wer(text_in, text_proc_file, os.path.join(decoding_path, "text.cer"))
if __name__ == '__main__':
params = {}
params["modelscope_model_name"] = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch"
params["output_dir"] = "./checkpoint"
params["data_dir"] = "./data/test"
params["decoding_model_name"] = "valid.acc.ave_10best.pb"
params["batch_size"] = 64
modelscope_infer_after_finetune(params)

View File

@ -1,30 +0,0 @@
# ModelScope Model
## How to finetune and infer using a pretrained Paraformer-large Model
### Finetune
- Modify finetune training related parameters in `finetune.py`
- <strong>output_dir:</strong> # result dir
- <strong>data_dir:</strong> # the dataset dir needs to include files: train/wav.scp, train/text; validation/wav.scp, validation/text.
- <strong>batch_bins:</strong> # batch size
- <strong>max_epoch:</strong> # number of training epoch
- <strong>lr:</strong> # learning rate
- Then you can run the pipeline to finetune with:
```python
python finetune.py
```
### Inference
Or you can use the finetuned model for inference directly.
- Setting parameters in `infer.py`
- <strong>data_dir:</strong> # the dataset dir
- <strong>output_dir:</strong> # result dir
- Then you can run the pipeline to infer with:
```python
python infer.py
```

View File

@ -1,25 +0,0 @@
# Paraformer-Large
- Model link: <https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/summary>
- Model size: 220M
# Environments
- date: `Fri Feb 10 13:34:24 CST 2023`
- python version: `3.7.12`
- FunASR version: `0.1.6`
- pytorch version: `pytorch 1.7.0`
- Git hash: ``
- Commit date: ``
# Beachmark Results
## AISHELL-2
- Decode config:
- Decode without CTC
- Decode without LM
| testset | base model|finetune model|
|:------------:|:---------:|:------------:|
| dev_ios | 2.80 |2.60 |
| test_android | 3.13 |2.84 |
| test_ios | 2.85 |2.82 |
| test_mic | 3.06 |2.88 |

View File

@ -1,36 +0,0 @@
import os
from modelscope.metainfo import Trainers
from modelscope.trainers import build_trainer
from funasr.datasets.ms_dataset import MsDataset
from funasr.utils.modelscope_param import modelscope_args
def modelscope_finetune(params):
if not os.path.exists(params.output_dir):
os.makedirs(params.output_dir, exist_ok=True)
# dataset split ["train", "validation"]
ds_dict = MsDataset.load(params.data_path)
kwargs = dict(
model=params.model,
data_dir=ds_dict,
dataset_type=params.dataset_type,
work_dir=params.output_dir,
batch_bins=params.batch_bins,
max_epoch=params.max_epoch,
lr=params.lr)
trainer = build_trainer(Trainers.speech_asr_trainer, default_args=kwargs)
trainer.train()
if __name__ == '__main__':
params = modelscope_args(model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch", data_path="./data")
params.output_dir = "./checkpoint" # m模型保存路径
params.data_path = "./example_data/" # 数据路径
params.dataset_type = "small" # 小数据量设置small若数据量大于1000小时请使用large
params.batch_bins = 2000 # batch size如果dataset_type="small"batch_bins单位为fbank特征帧数如果dataset_type="large"batch_bins单位为毫秒
params.max_epoch = 50 # 最大训练轮数
params.lr = 0.00005 # 设置学习率
modelscope_finetune(params)

View File

@ -1,101 +0,0 @@
import os
import shutil
from multiprocessing import Pool
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from funasr.utils.compute_wer import compute_wer
def modelscope_infer_core(output_dir, split_dir, njob, idx, batch_size, ngpu, model):
output_dir_job = os.path.join(output_dir, "output.{}".format(idx))
if ngpu > 0:
use_gpu = 1
gpu_id = int(idx) - 1
else:
use_gpu = 0
gpu_id = -1
if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'].split(",")
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id])
else:
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
inference_pipline = pipeline(
task=Tasks.auto_speech_recognition,
model=model,
output_dir=output_dir_job,
batch_size=batch_size,
ngpu=use_gpu,
)
audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
inference_pipline(audio_in=audio_in)
def modelscope_infer(params):
# prepare for multi-GPU decoding
ngpu = params["ngpu"]
njob = params["njob"]
batch_size = params["batch_size"]
output_dir = params["output_dir"]
model = params["model"]
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.mkdir(output_dir)
split_dir = os.path.join(output_dir, "split")
os.mkdir(split_dir)
if ngpu > 0:
nj = ngpu
elif ngpu == 0:
nj = njob
wav_scp_file = os.path.join(params["data_dir"], "wav.scp")
with open(wav_scp_file) as f:
lines = f.readlines()
num_lines = len(lines)
num_job_lines = num_lines // nj
start = 0
for i in range(nj):
end = start + num_job_lines
file = os.path.join(split_dir, "wav.{}.scp".format(str(i + 1)))
with open(file, "w") as f:
if i == nj - 1:
f.writelines(lines[start:])
else:
f.writelines(lines[start:end])
start = end
p = Pool(nj)
for i in range(nj):
p.apply_async(modelscope_infer_core,
args=(output_dir, split_dir, njob, str(i + 1), batch_size, ngpu, model))
p.close()
p.join()
# combine decoding results
best_recog_path = os.path.join(output_dir, "1best_recog")
os.mkdir(best_recog_path)
files = ["text", "token", "score"]
for file in files:
with open(os.path.join(best_recog_path, file), "w") as f:
for i in range(nj):
job_file = os.path.join(output_dir, "output.{}/1best_recog".format(str(i + 1)), file)
with open(job_file) as f_job:
lines = f_job.readlines()
f.writelines(lines)
# If text exists, compute CER
text_in = os.path.join(params["data_dir"], "text")
if os.path.exists(text_in):
text_proc_file = os.path.join(best_recog_path, "token")
compute_wer(text_in, text_proc_file, os.path.join(best_recog_path, "text.cer"))
if __name__ == "__main__":
params = {}
params["model"] = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch"
params["data_dir"] = "./data/test"
params["output_dir"] = "./results"
params["ngpu"] = 1 # if ngpu > 0, will use gpu decoding
params["njob"] = 1 # if ngpu = 0, will use cpu decoding
params["batch_size"] = 64
modelscope_infer(params)

View File

@ -1,48 +0,0 @@
import json
import os
import shutil
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.hub.snapshot_download import snapshot_download
from funasr.utils.compute_wer import compute_wer
def modelscope_infer_after_finetune(params):
# prepare for decoding
try:
pretrained_model_path = snapshot_download(params["modelscope_model_name"], cache_dir=params["output_dir"])
except BaseException:
raise BaseException(f"Please download pretrain model from ModelScope firstly.")
shutil.copy(os.path.join(params["output_dir"], params["decoding_model_name"]), os.path.join(pretrained_model_path, "model.pb"))
decoding_path = os.path.join(params["output_dir"], "decode_results")
if os.path.exists(decoding_path):
shutil.rmtree(decoding_path)
os.mkdir(decoding_path)
# decoding
inference_pipeline = pipeline(
task=Tasks.auto_speech_recognition,
model=pretrained_model_path,
output_dir=decoding_path,
batch_size=params["batch_size"]
)
audio_in = os.path.join(params["data_dir"], "wav.scp")
inference_pipeline(audio_in=audio_in)
# computer CER if GT text is set
text_in = os.path.join(params["data_dir"], "text")
if os.path.exists(text_in):
text_proc_file = os.path.join(decoding_path, "1best_recog/token")
compute_wer(text_in, text_proc_file, os.path.join(decoding_path, "text.cer"))
if __name__ == '__main__':
params = {}
params["modelscope_model_name"] = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch"
params["output_dir"] = "./checkpoint"
params["data_dir"] = "./data/test"
params["decoding_model_name"] = "valid.acc.ave_10best.pb"
params["batch_size"] = 64
modelscope_infer_after_finetune(params)

View File

@ -21,23 +21,26 @@
Or you can use the finetuned model for inference directly.
- Setting parameters in `infer.py`
- Setting parameters in `infer.sh`
- <strong>model:</strong> # model name on ModelScope
- <strong>data_dir:</strong> # the dataset dir needs to include `test/wav.scp`. If `test/text` is also exists, CER will be computed
- <strong>output_dir:</strong> # result dir
- <strong>ngpu:</strong> # the number of GPUs for decoding, if `ngpu` > 0, use GPU decoding
- <strong>njob:</strong> # the number of jobs for CPU decoding, if `ngpu` = 0, use CPU decoding, please set `njob`
- <strong>batch_size:</strong> # batchsize of inference
- <strong>gpu_inference:</strong> # whether to perform gpu decoding, set false for cpu decoding
- <strong>gpuid_list:</strong> # set gpus, e.g., gpuid_list="0,1"
- <strong>njob:</strong> # the number of jobs for CPU decoding, if `gpu_inference`=false, use CPU decoding, please set `njob`
- Then you can run the pipeline to infer with:
```python
python infer.py
sh infer.sh
```
- Results
The decoding results can be found in `$output_dir/1best_recog/text.cer`, which includes recognition results of each sample and the CER metric of the whole test set.
If you decode the SpeechIO test sets, you can use textnorm with `stage`=3, and `DETAILS.txt`, `RESULTS.txt` record the results and CER after text normalization.
### Inference using local finetuned model
- Modify inference related parameters in `infer_after_finetune.py`

View File

@ -17,22 +17,22 @@
- Decode without CTC
- Decode without LM
| testset | CER(%)|
|:---------:|:-----:|
| dev | 1.75 |
| test | 1.95 |
| CER(%) | Pretrain model|[Finetune model](https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell1-vocab8404-pytorch/summary) |
|:---------:|:-------------:|:-------------:|
| dev | 1.75 |1.62 |
| test | 1.95 |1.78 |
## AISHELL-2
- Decode config:
- Decode without CTC
- Decode without LM
| testset | CER(%)|
|:------------:|:-----:|
| dev_ios | 2.80 |
| test_android | 3.13 |
| test_ios | 2.85 |
| test_mic | 3.06 |
| CER(%) | Pretrain model|[Finetune model](https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-aishell2-vocab8404-pytorch/summary)|
|:------------:|:-------------:|:------------:|
| dev_ios | 2.80 |2.60 |
| test_android | 3.13 |2.84 |
| test_ios | 2.85 |2.82 |
| test_mic | 3.06 |2.88 |
## Wenetspeech
- Decode config:

View File

@ -1,101 +1,25 @@
import os
import shutil
from multiprocessing import Pool
import argparse
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from funasr.utils.compute_wer import compute_wer
def modelscope_infer_core(output_dir, split_dir, njob, idx, batch_size, ngpu, model):
output_dir_job = os.path.join(output_dir, "output.{}".format(idx))
if ngpu > 0:
use_gpu = 1
gpu_id = int(idx) - 1
else:
use_gpu = 0
gpu_id = -1
if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'].split(",")
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id])
else:
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
inference_pipline = pipeline(
def modelscope_infer(args):
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpuid)
inference_pipeline = pipeline(
task=Tasks.auto_speech_recognition,
model=model,
output_dir=output_dir_job,
batch_size=batch_size,
ngpu=use_gpu,
model=args.model,
output_dir=args.output_dir,
batch_size=args.batch_size,
)
audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
inference_pipline(audio_in=audio_in)
def modelscope_infer(params):
# prepare for multi-GPU decoding
ngpu = params["ngpu"]
njob = params["njob"]
batch_size = params["batch_size"]
output_dir = params["output_dir"]
model = params["model"]
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.mkdir(output_dir)
split_dir = os.path.join(output_dir, "split")
os.mkdir(split_dir)
if ngpu > 0:
nj = ngpu
elif ngpu == 0:
nj = njob
wav_scp_file = os.path.join(params["data_dir"], "wav.scp")
with open(wav_scp_file) as f:
lines = f.readlines()
num_lines = len(lines)
num_job_lines = num_lines // nj
start = 0
for i in range(nj):
end = start + num_job_lines
file = os.path.join(split_dir, "wav.{}.scp".format(str(i + 1)))
with open(file, "w") as f:
if i == nj - 1:
f.writelines(lines[start:])
else:
f.writelines(lines[start:end])
start = end
p = Pool(nj)
for i in range(nj):
p.apply_async(modelscope_infer_core,
args=(output_dir, split_dir, njob, str(i + 1), batch_size, ngpu, model))
p.close()
p.join()
# combine decoding results
best_recog_path = os.path.join(output_dir, "1best_recog")
os.mkdir(best_recog_path)
files = ["text", "token", "score"]
for file in files:
with open(os.path.join(best_recog_path, file), "w") as f:
for i in range(nj):
job_file = os.path.join(output_dir, "output.{}/1best_recog".format(str(i + 1)), file)
with open(job_file) as f_job:
lines = f_job.readlines()
f.writelines(lines)
# If text exists, compute CER
text_in = os.path.join(params["data_dir"], "text")
if os.path.exists(text_in):
text_proc_file = os.path.join(best_recog_path, "token")
compute_wer(text_in, text_proc_file, os.path.join(best_recog_path, "text.cer"))
inference_pipeline(audio_in=args.audio_in)
if __name__ == "__main__":
params = {}
params["model"] = "damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
params["data_dir"] = "./data/test"
params["output_dir"] = "./results"
params["ngpu"] = 1 # if ngpu > 0, will use gpu decoding
params["njob"] = 1 # if ngpu = 0, will use cpu decoding
params["batch_size"] = 64
modelscope_infer(params)
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default="speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
parser.add_argument('--audio_in', type=str, default="./data/test")
parser.add_argument('--output_dir', type=str, default="./results/")
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--gpuid', type=str, default="0")
args = parser.parse_args()
modelscope_infer(args)

View File

@ -0,0 +1,95 @@
#!/usr/bin/env bash
set -e
set -u
set -o pipefail
stage=1
stop_stage=2
model="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
data_dir="./data/test"
output_dir="./results"
batch_size=64
gpu_inference=true # whether to perform gpu decoding
gpuid_list="0,1" # set gpus, e.g., gpuid_list="0,1"
njob=4 # the number of jobs for CPU decoding, if gpu_inference=false, use CPU decoding, please set njob
if ${gpu_inference}; then
nj=$(echo $gpuid_list | awk -F "," '{print NF}')
else
nj=$njob
batch_size=1
gpuid_list=""
for JOB in $(seq ${nj}); do
gpuid_list=$gpuid_list"-1,"
done
fi
mkdir -p $output_dir/split
split_scps=""
for JOB in $(seq ${nj}); do
split_scps="$split_scps $output_dir/split/wav.$JOB.scp"
done
perl utils/split_scp.pl ${data_dir}/wav.scp ${split_scps}
if [ $stage -le 1 ] && [ $stop_stage -ge 1 ];then
echo "Decoding ..."
gpuid_list_array=(${gpuid_list//,/ })
for JOB in $(seq ${nj}); do
{
id=$((JOB-1))
gpuid=${gpuid_list_array[$id]}
mkdir -p ${output_dir}/output.$JOB
python infer.py \
--model ${model} \
--audio_in ${output_dir}/split/wav.$JOB.scp \
--output_dir ${output_dir}/output.$JOB \
--batch_size ${batch_size} \
--gpuid ${gpuid}
}&
done
wait
mkdir -p ${output_dir}/1best_recog
for f in token score text; do
if [ -f "${output_dir}/output.1/1best_recog/${f}" ]; then
for i in $(seq "${nj}"); do
cat "${output_dir}/output.${i}/1best_recog/${f}"
done | sort -k1 >"${output_dir}/1best_recog/${f}"
fi
done
fi
if [ $stage -le 2 ] && [ $stop_stage -ge 2 ];then
echo "Computing WER ..."
python utils/proce_text.py ${output_dir}/1best_recog/text ${output_dir}/1best_recog/text.proc
python utils/proce_text.py ${data_dir}/text ${data_dir}/text.proc
python utils/compute_wer.py ${data_dir}/text.proc ${output_dir}/1best_recog/text.proc ${output_dir}/1best_recog/text.cer
tail -n 3 ${output_dir}/1best_recog/text.cer
fi
if [ $stage -le 3 ] && [ $stop_stage -ge 3 ];then
echo "SpeechIO TIOBE textnorm"
echo "$0 --> Normalizing REF text ..."
./utils/textnorm_zh.py \
--has_key --to_upper \
${data_dir}/text \
${data_dir}/ref.txt
echo "$0 --> Normalizing HYP text ..."
./utils/textnorm_zh.py \
--has_key --to_upper \
${output_dir}/1best_recog/text.proc \
${output_dir}/1best_recog/rec.txt
grep -v $'\t$' ${output_dir}/1best_recog/rec.txt > ${output_dir}/1best_recog/rec_non_empty.txt
echo "$0 --> computing WER/CER and alignment ..."
./utils/error_rate_zh \
--tokenizer char \
--ref ${data_dir}/ref.txt \
--hyp ${output_dir}/1best_recog/rec_non_empty.txt \
${output_dir}/1best_recog/DETAILS.txt | tee ${output_dir}/1best_recog/RESULTS.txt
rm -rf ${output_dir}/1best_recog/rec.txt ${output_dir}/1best_recog/rec_non_empty.txt
fi

View File

@ -0,0 +1 @@
../../../../egs/aishell/transformer/utils