mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
funasr1.0
This commit is contained in:
parent
d339765fc2
commit
ccb9488954
@ -1 +0,0 @@
|
||||
../../funasr/runtime/docs/benchmark_libtorch.md
|
||||
@ -1 +0,0 @@
|
||||
../../funasr/runtime/docs/benchmark_onnx.md
|
||||
@ -1 +0,0 @@
|
||||
../../funasr/runtime/docs/benchmark_onnx_cpp.md
|
||||
42
examples/industrial_data_pretraining/paraformer/README_zh.md
Normal file
42
examples/industrial_data_pretraining/paraformer/README_zh.md
Normal file
@ -0,0 +1,42 @@
|
||||
(简体中文|[English](./README.md))
|
||||
|
||||
# 语音识别
|
||||
|
||||
> **注意**:
|
||||
> pipeline 支持 [modelscope模型仓库](https://alibaba-damo-academy.github.io/FunASR/en/model_zoo/modelscope_models.html#pretrained-models-on-modelscope) 中的所有模型进行推理和微调。这里我们以典型模型作为示例来演示使用方法。
|
||||
|
||||
## 推理
|
||||
|
||||
### 快速使用
|
||||
#### [Paraformer 模型](https://www.modelscope.cn/models/damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/summary)
|
||||
```python
|
||||
from funasr import AutoModel
|
||||
|
||||
model = AutoModel(model="/Users/zhifu/Downloads/modelscope_models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
|
||||
|
||||
res = model(input="/Users/zhifu/Downloads/modelscope_models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav")
|
||||
print(res)
|
||||
```
|
||||
|
||||
### API接口说明
|
||||
#### AutoModel 定义
|
||||
- `model`: [模型仓库](https://alibaba-damo-academy.github.io/FunASR/en/model_zoo/modelscope_models.html#pretrained-models-on-modelscope) 中的模型名称,或本地磁盘中的模型路径
|
||||
- `device`: `cuda`(默认),使用 GPU 进行推理。如果为`cpu`,则使用 CPU 进行推理
|
||||
- `ncpu`: `None` (默认),设置用于 CPU 内部操作并行性的线程数
|
||||
- `output_dir`: `None` (默认),如果设置,输出结果的输出路径
|
||||
- `batch_size`: `1` (默认),解码时的批处理大小
|
||||
#### AutoModel 推理
|
||||
- `input`: 要解码的输入,可以是:
|
||||
- wav文件路径, 例如: asr_example.wav
|
||||
- pcm文件路径, 例如: asr_example.pcm,此时需要指定音频采样率fs(默认为16000)
|
||||
- 音频字节数流,例如:麦克风的字节数数据
|
||||
- wav.scp,kaldi 样式的 wav 列表 (`wav_id \t wav_path`), 例如:
|
||||
```text
|
||||
asr_example1 ./audios/asr_example1.wav
|
||||
asr_example2 ./audios/asr_example2.wav
|
||||
```
|
||||
在这种输入 `wav.scp` 的情况下,必须设置 `output_dir` 以保存输出结果
|
||||
- 音频采样点,例如:`audio, rate = soundfile.read("asr_example_zh.wav")`, 数据类型为 numpy.ndarray。支持batch输入,类型为list:
|
||||
```[audio_sample1, audio_sample2, ..., audio_sampleN]```
|
||||
- fbank输入,支持组batch。shape为[batch, frames, dim],类型为torch.Tensor,例如
|
||||
- `output_dir`: None (默认),如果设置,输出结果的输出路径
|
||||
@ -8,4 +8,15 @@ from funasr import AutoModel
|
||||
model = AutoModel(model="/Users/zhifu/Downloads/modelscope_models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
|
||||
|
||||
res = model(input="/Users/zhifu/Downloads/modelscope_models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch/example/asr_example.wav")
|
||||
print(res)
|
||||
print(res)
|
||||
|
||||
|
||||
from funasr import AutoFrontend
|
||||
|
||||
frontend = AutoFrontend(model="/Users/zhifu/Downloads/modelscope_models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch")
|
||||
|
||||
fbanks = frontend(input="/Users/zhifu/funasr_github/test_local/wav.scp", batch_size=2)
|
||||
|
||||
for batch_idx, fbank_dict in enumerate(fbanks):
|
||||
res = model(**fbank_dict)
|
||||
print(res)
|
||||
@ -30,4 +30,4 @@ def import_submodules(package, recursive=True):
|
||||
|
||||
import_submodules(__name__)
|
||||
|
||||
from funasr.bin.inference import AutoModel
|
||||
from funasr.bin.inference import AutoModel, AutoFrontend
|
||||
@ -16,11 +16,12 @@ import time
|
||||
import random
|
||||
import string
|
||||
from funasr.register import tables
|
||||
from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_audio
|
||||
|
||||
from funasr.datasets.audio_datasets.load_audio_extract_fbank import load_audio, extract_fbank
|
||||
from funasr.utils.vad_utils import slice_padding_audio_samples
|
||||
from funasr.utils.timestamp_tools import time_stamp_sentence
|
||||
|
||||
def build_iter_for_infer(data_in, input_len=None, data_type="sound"):
|
||||
def build_iter_for_infer(data_in, input_len=None, data_type="sound", key=None):
|
||||
"""
|
||||
|
||||
:param input:
|
||||
@ -63,7 +64,8 @@ def build_iter_for_infer(data_in, input_len=None, data_type="sound"):
|
||||
else: # raw text; audio sample point, fbank; bytes
|
||||
if isinstance(data_in, bytes): # audio bytes
|
||||
data_in = load_bytes(data_in)
|
||||
key = "rand_key_" + ''.join(random.choice(chars) for _ in range(13))
|
||||
if key is None:
|
||||
key = "rand_key_" + ''.join(random.choice(chars) for _ in range(13))
|
||||
data_list = [data_in]
|
||||
key_list = [key]
|
||||
|
||||
@ -121,11 +123,14 @@ class AutoModel:
|
||||
set_all_random_seed(kwargs.get("seed", 0))
|
||||
|
||||
device = kwargs.get("device", "cuda")
|
||||
if not torch.cuda.is_available() or kwargs.get("ngpu", 1):
|
||||
if not torch.cuda.is_available() or kwargs.get("ngpu", 0):
|
||||
device = "cpu"
|
||||
kwargs["batch_size"] = 1
|
||||
kwargs["device"] = device
|
||||
|
||||
if kwargs.get("ncpu", None):
|
||||
torch.set_num_threads(kwargs.get("ncpu"))
|
||||
|
||||
# build tokenizer
|
||||
tokenizer = kwargs.get("tokenizer", None)
|
||||
if tokenizer is not None:
|
||||
@ -169,17 +174,18 @@ class AutoModel:
|
||||
else:
|
||||
return self.generate_with_vad(input, input_len=input_len, **cfg)
|
||||
|
||||
def generate(self, input, input_len=None, model=None, kwargs=None, **cfg):
|
||||
def generate(self, input, input_len=None, model=None, kwargs=None, key=None, **cfg):
|
||||
# import pdb; pdb.set_trace()
|
||||
kwargs = self.kwargs if kwargs is None else kwargs
|
||||
kwargs.update(cfg)
|
||||
model = self.model if model is None else model
|
||||
|
||||
data_type = kwargs.get("data_type", "sound")
|
||||
batch_size = kwargs.get("batch_size", 1)
|
||||
# if kwargs.get("device", "cpu") == "cpu":
|
||||
# batch_size = 1
|
||||
if kwargs.get("device", "cpu") == "cpu":
|
||||
batch_size = 1
|
||||
|
||||
key_list, data_list = build_iter_for_infer(input, input_len=input_len, data_type=data_type)
|
||||
key_list, data_list = build_iter_for_infer(input, input_len=input_len, data_type=data_type, key=key)
|
||||
|
||||
speed_stats = {}
|
||||
asr_result_list = []
|
||||
@ -193,7 +199,7 @@ class AutoModel:
|
||||
key_batch = key_list[beg_idx:end_idx]
|
||||
batch = {"data_in": data_batch, "key": key_batch}
|
||||
if (end_idx - beg_idx) == 1 and isinstance(data_batch[0], torch.Tensor): # fbank
|
||||
batch["data_batch"] = data_batch[0]
|
||||
batch["data_in"] = data_batch[0]
|
||||
batch["data_lengths"] = input_len
|
||||
|
||||
time1 = time.perf_counter()
|
||||
@ -348,6 +354,74 @@ class AutoModel:
|
||||
f"time_speech_total_all_samples: {time_speech_total_all_samples: 0.3f}, "
|
||||
f"time_escape_total_all_samples: {time_escape_total_all_samples:0.3f}")
|
||||
return results_ret_list
|
||||
|
||||
|
||||
class AutoFrontend:
|
||||
def __init__(self, **kwargs):
|
||||
assert "model" in kwargs
|
||||
if "model_conf" not in kwargs:
|
||||
logging.info("download models from model hub: {}".format(kwargs.get("model_hub", "ms")))
|
||||
kwargs = download_model(**kwargs)
|
||||
|
||||
# build frontend
|
||||
frontend = kwargs.get("frontend", None)
|
||||
if frontend is not None:
|
||||
frontend_class = tables.frontend_classes.get(frontend.lower())
|
||||
frontend = frontend_class(**kwargs["frontend_conf"])
|
||||
|
||||
self.frontend = frontend
|
||||
self.kwargs = kwargs
|
||||
|
||||
def __call__(self, input, input_len=None, kwargs=None, **cfg):
|
||||
|
||||
kwargs = self.kwargs if kwargs is None else kwargs
|
||||
kwargs.update(cfg)
|
||||
|
||||
|
||||
key_list, data_list = build_iter_for_infer(input, input_len=input_len)
|
||||
batch_size = kwargs.get("batch_size", 1)
|
||||
device = kwargs.get("device", "cpu")
|
||||
if device == "cpu":
|
||||
batch_size = 1
|
||||
|
||||
meta_data = {}
|
||||
|
||||
result_list = []
|
||||
num_samples = len(data_list)
|
||||
pbar = tqdm(colour="blue", total=num_samples + 1, dynamic_ncols=True)
|
||||
|
||||
time0 = time.perf_counter()
|
||||
for beg_idx in range(0, num_samples, batch_size):
|
||||
end_idx = min(num_samples, beg_idx + batch_size)
|
||||
data_batch = data_list[beg_idx:end_idx]
|
||||
key_batch = key_list[beg_idx:end_idx]
|
||||
|
||||
# extract fbank feats
|
||||
time1 = time.perf_counter()
|
||||
audio_sample_list = load_audio(data_batch, fs=self.frontend.fs, audio_fs=kwargs.get("fs", 16000))
|
||||
time2 = time.perf_counter()
|
||||
meta_data["load_data"] = f"{time2 - time1:0.3f}"
|
||||
speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"),
|
||||
frontend=self.frontend)
|
||||
time3 = time.perf_counter()
|
||||
meta_data["extract_feat"] = f"{time3 - time2:0.3f}"
|
||||
meta_data["batch_data_time"] = speech_lengths.sum().item() * self.frontend.frame_shift * self.frontend.lfr_n / 1000
|
||||
|
||||
speech.to(device=device), speech_lengths.to(device=device)
|
||||
batch = {"input": speech, "input_len": speech_lengths, "key": key_batch}
|
||||
result_list.append(batch)
|
||||
|
||||
pbar.update(1)
|
||||
description = (
|
||||
f"{meta_data}, "
|
||||
)
|
||||
pbar.set_description(description)
|
||||
|
||||
time_end = time.perf_counter()
|
||||
pbar.set_description(f"time escaped total: {time_end - time0:0.3f}")
|
||||
|
||||
return result_list
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main_hydra()
|
||||
@ -495,6 +495,8 @@ class Paraformer(nn.Module):
|
||||
|
||||
results = []
|
||||
b, n, d = decoder_out.size()
|
||||
if isinstance(key[0], (list, tuple)):
|
||||
key = key[0]
|
||||
for i in range(b):
|
||||
x = encoder_out[i, :encoder_out_lens[i], :]
|
||||
am_scores = decoder_out[i, :pre_token_length[i], :]
|
||||
@ -535,6 +537,7 @@ class Paraformer(nn.Module):
|
||||
text = tokenizer.tokens2text(token)
|
||||
|
||||
text_postprocessed, _ = postprocess_utils.sentence_postprocess(token)
|
||||
|
||||
result_i = {"key": key[i], "text": text_postprocessed}
|
||||
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user