mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
70 lines
2.3 KiB
Python
70 lines
2.3 KiB
Python
# Copyright (c) Alibaba, Inc. and its affiliates.
|
|
# Part of the implementation is borrowed from espnet/espnet.
|
|
|
|
import numpy as np
|
|
import torch
|
|
|
|
|
|
def load_cmvn(cmvn_file):
|
|
with open(cmvn_file, 'r', encoding='utf-8') as f:
|
|
lines = f.readlines()
|
|
means_list = []
|
|
vars_list = []
|
|
for i in range(len(lines)):
|
|
line_item = lines[i].split()
|
|
if line_item[0] == '<AddShift>':
|
|
line_item = lines[i + 1].split()
|
|
if line_item[0] == '<LearnRateCoef>':
|
|
add_shift_line = line_item[3:(len(line_item) - 1)]
|
|
means_list = list(add_shift_line)
|
|
continue
|
|
elif line_item[0] == '<Rescale>':
|
|
line_item = lines[i + 1].split()
|
|
if line_item[0] == '<LearnRateCoef>':
|
|
rescale_line = line_item[3:(len(line_item) - 1)]
|
|
vars_list = list(rescale_line)
|
|
continue
|
|
means = np.array(means_list).astype(np.float)
|
|
vars = np.array(vars_list).astype(np.float)
|
|
cmvn = np.array([means, vars])
|
|
cmvn = torch.as_tensor(cmvn)
|
|
return cmvn
|
|
|
|
|
|
def apply_cmvn(inputs, cmvn_file): # noqa
|
|
"""
|
|
Apply CMVN with mvn data
|
|
"""
|
|
|
|
device = inputs.device
|
|
dtype = inputs.dtype
|
|
frame, dim = inputs.shape
|
|
|
|
cmvn = load_cmvn(cmvn_file)
|
|
means = np.tile(cmvn[0:1, :dim], (frame, 1))
|
|
vars = np.tile(cmvn[1:2, :dim], (frame, 1))
|
|
inputs += torch.from_numpy(means).type(dtype).to(device)
|
|
inputs *= torch.from_numpy(vars).type(dtype).to(device)
|
|
|
|
return inputs.type(torch.float32)
|
|
|
|
|
|
def apply_lfr(inputs, lfr_m, lfr_n):
|
|
LFR_inputs = []
|
|
T = inputs.shape[0]
|
|
T_lfr = int(np.ceil(T / lfr_n))
|
|
left_padding = inputs[0].repeat((lfr_m - 1) // 2, 1)
|
|
inputs = torch.vstack((left_padding, inputs))
|
|
T = T + (lfr_m - 1) // 2
|
|
for i in range(T_lfr):
|
|
if lfr_m <= T - i * lfr_n:
|
|
LFR_inputs.append((inputs[i * lfr_n:i * lfr_n + lfr_m]).view(1, -1))
|
|
else: # process last LFR frame
|
|
num_padding = lfr_m - (T - i * lfr_n)
|
|
frame = (inputs[i * lfr_n:]).view(-1)
|
|
for _ in range(num_padding):
|
|
frame = torch.hstack((frame, inputs[-1]))
|
|
LFR_inputs.append(frame)
|
|
LFR_outputs = torch.vstack(LFR_inputs)
|
|
return LFR_outputs.type(torch.float32)
|