From e0401c12d8813036bbd7813383450a15b946d3c3 Mon Sep 17 00:00:00 2001
From: R1ckShi <2698127294@qq.com>
Date: Tue, 31 Jan 2023 15:30:12 +0800
Subject: [PATCH 1/2] egs for paraformer tiny
---
.../README.md | 30 +++++++
.../data/test/wav.scp | 3 +
.../infer.py | 88 +++++++++++++++++++
3 files changed, 121 insertions(+)
create mode 100644 egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/README.md
create mode 100644 egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/data/test/wav.scp
create mode 100644 egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/infer.py
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/README.md b/egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/README.md
new file mode 100644
index 000000000..1587d3d5d
--- /dev/null
+++ b/egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/README.md
@@ -0,0 +1,30 @@
+# ModelScope Model
+
+## How to finetune and infer using a pretrained Paraformer-large Model
+
+### Finetune
+
+- Modify finetune training related parameters in `finetune.py`
+ - output_dir: # result dir
+ - data_dir: # the dataset dir needs to include files: train/wav.scp, train/text; validation/wav.scp, validation/text.
+ - batch_bins: # batch size
+ - max_epoch: # number of training epoch
+ - lr: # learning rate
+
+- Then you can run the pipeline to finetune with:
+```python
+ python finetune.py
+```
+
+### Inference
+
+Or you can use the finetuned model for inference directly.
+
+- Setting parameters in `infer.py`
+ - data_dir: # the dataset dir
+ - output_dir: # result dir
+
+- Then you can run the pipeline to infer with:
+```python
+ python infer.py
+```
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/data/test/wav.scp b/egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/data/test/wav.scp
new file mode 100644
index 000000000..1e194c429
--- /dev/null
+++ b/egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/data/test/wav.scp
@@ -0,0 +1,3 @@
+001 /Users/shixian/Downloads/0001_ACRD0001.wav
+002 /Users/shixian/Downloads/0001_ACRD0002.wav
+013 /Users/shixian/Downloads/0001_ACRD0013.wav
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/infer.py b/egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/infer.py
new file mode 100644
index 000000000..d1fbca22d
--- /dev/null
+++ b/egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/infer.py
@@ -0,0 +1,88 @@
+import os
+import shutil
+from multiprocessing import Pool
+
+from modelscope.pipelines import pipeline
+from modelscope.utils.constant import Tasks
+
+from funasr.utils.compute_wer import compute_wer
+
+
+def modelscope_infer_core(output_dir, split_dir, njob, idx):
+ output_dir_job = os.path.join(output_dir, "output.{}".format(idx))
+ gpu_id = (int(idx) - 1) // njob
+ if "CUDA_VISIBLE_DEVICES" in os.environ.keys():
+ gpu_list = os.environ['CUDA_VISIBLE_DEVICES'].split(",")
+ os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_list[gpu_id])
+ else:
+ os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
+ inference_pipline = pipeline(
+ task=Tasks.auto_speech_recognition,
+ model="damo/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch",
+ output_dir=output_dir_job,
+ batch_size=64
+ )
+ audio_in = os.path.join(split_dir, "wav.{}.scp".format(idx))
+ inference_pipline(audio_in=audio_in)
+
+
+def modelscope_infer(params):
+ # prepare for multi-GPU decoding
+ ngpu = params["ngpu"]
+ njob = params["njob"]
+ output_dir = params["output_dir"]
+ if os.path.exists(output_dir):
+ shutil.rmtree(output_dir)
+ os.mkdir(output_dir)
+ split_dir = os.path.join(output_dir, "split")
+ os.mkdir(split_dir)
+ nj = ngpu * njob
+ wav_scp_file = os.path.join(params["data_dir"], "wav.scp")
+ with open(wav_scp_file) as f:
+ lines = f.readlines()
+ num_lines = len(lines)
+ num_job_lines = num_lines // nj
+ start = 0
+ for i in range(nj):
+ end = start + num_job_lines
+ file = os.path.join(split_dir, "wav.{}.scp".format(str(i + 1)))
+ with open(file, "w") as f:
+ if i == nj - 1:
+ f.writelines(lines[start:])
+ else:
+ f.writelines(lines[start:end])
+ start = end
+
+ p = Pool(nj)
+ for i in range(nj):
+ p.apply_async(modelscope_infer_core,
+ args=(output_dir, split_dir, njob, str(i + 1)))
+ p.close()
+ p.join()
+
+ # combine decoding results
+ best_recog_path = os.path.join(output_dir, "1best_recog")
+ os.mkdir(best_recog_path)
+ files = ["text", "token", "score"]
+ for file in files:
+ with open(os.path.join(best_recog_path, file), "w") as f:
+ for i in range(nj):
+ job_file = os.path.join(output_dir, "output.{}/1best_recog".format(str(i + 1)), file)
+ with open(job_file) as f_job:
+ lines = f_job.readlines()
+ f.writelines(lines)
+
+ # If text exists, compute CER
+ text_in = os.path.join(params["data_dir"], "text")
+ if os.path.exists(text_in):
+ text_proc_file = os.path.join(best_recog_path, "token")
+ compute_wer(text_in, text_proc_file, os.path.join(best_recog_path, "text.cer"))
+
+
+if __name__ == "__main__":
+ params = {}
+ params["data_dir"] = "./data/test"
+ params["output_dir"] = "./results"
+ params["ngpu"] = 1
+ params["njob"] = 1
+ modelscope_infer(params)
From 9f4bb0a0ee2e0c4e48fa06156ece67603b3098d7 Mon Sep 17 00:00:00 2001
From: R1ckShi <2698127294@qq.com>
Date: Tue, 31 Jan 2023 15:32:15 +0800
Subject: [PATCH 2/2] egs for paraformer-tiny
---
.../data/test/wav.scp | 3 ---
1 file changed, 3 deletions(-)
delete mode 100644 egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/data/test/wav.scp
diff --git a/egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/data/test/wav.scp b/egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/data/test/wav.scp
deleted file mode 100644
index 1e194c429..000000000
--- a/egs_modelscope/asr/paraformer/speech_paraformer-tiny-commandword_asr_nat-zh-cn-16k-vocab544-pytorch/data/test/wav.scp
+++ /dev/null
@@ -1,3 +0,0 @@
-001 /Users/shixian/Downloads/0001_ACRD0001.wav
-002 /Users/shixian/Downloads/0001_ACRD0002.wav
-013 /Users/shixian/Downloads/0001_ACRD0013.wav