mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
modify unit test for speech_diarization_sond-en-us-callhome-8k-n16k4-pytorch
This commit is contained in:
parent
28ecb25249
commit
faa8ad377a
2738
egs/callhome/diarization/sond/sond.yaml
Normal file
2738
egs/callhome/diarization/sond/sond.yaml
Normal file
File diff suppressed because it is too large
Load Diff
2738
egs/callhome/diarization/sond/sond_fbank.yaml
Normal file
2738
egs/callhome/diarization/sond/sond_fbank.yaml
Normal file
File diff suppressed because it is too large
Load Diff
97
egs/callhome/diarization/sond/unit_test.py
Normal file
97
egs/callhome/diarization/sond/unit_test.py
Normal file
@ -0,0 +1,97 @@
|
||||
from funasr.bin.diar_inference_launch import inference_launch
|
||||
import os
|
||||
|
||||
|
||||
def test_fbank_cpu_infer():
|
||||
diar_config_path = "sond_fbank.yaml"
|
||||
diar_model_path = "sond.pth"
|
||||
output_dir = "./outputs"
|
||||
data_path_and_name_and_type = [
|
||||
("data/unit_test/test_feats.scp", "speech", "kaldi_ark"),
|
||||
("data/unit_test/test_profile.scp", "profile", "kaldi_ark"),
|
||||
]
|
||||
pipeline = inference_launch(
|
||||
mode="sond",
|
||||
diar_train_config=diar_config_path,
|
||||
diar_model_file=diar_model_path,
|
||||
output_dir=output_dir,
|
||||
num_workers=0,
|
||||
log_level="INFO",
|
||||
)
|
||||
results = pipeline(data_path_and_name_and_type)
|
||||
print(results)
|
||||
|
||||
|
||||
def test_fbank_gpu_infer():
|
||||
diar_config_path = "config_fbank.yaml"
|
||||
diar_model_path = "sond.pth"
|
||||
output_dir = "./outputs"
|
||||
data_path_and_name_and_type = [
|
||||
("data/unit_test/test_feats.scp", "speech", "kaldi_ark"),
|
||||
("data/unit_test/test_profile.scp", "profile", "kaldi_ark"),
|
||||
]
|
||||
pipeline = inference_launch(
|
||||
mode="sond",
|
||||
diar_train_config=diar_config_path,
|
||||
diar_model_file=diar_model_path,
|
||||
output_dir=output_dir,
|
||||
ngpu=1,
|
||||
num_workers=1,
|
||||
log_level="WARNING",
|
||||
)
|
||||
results = pipeline(data_path_and_name_and_type)
|
||||
print(results)
|
||||
|
||||
|
||||
def test_wav_gpu_infer():
|
||||
diar_config_path = "config.yaml"
|
||||
diar_model_path = "sond.pth"
|
||||
output_dir = "./outputs"
|
||||
data_path_and_name_and_type = [
|
||||
("data/unit_test/test_wav.scp", "speech", "sound"),
|
||||
("data/unit_test/test_profile.scp", "profile", "kaldi_ark"),
|
||||
]
|
||||
pipeline = inference_launch(
|
||||
mode="sond",
|
||||
diar_train_config=diar_config_path,
|
||||
diar_model_file=diar_model_path,
|
||||
output_dir=output_dir,
|
||||
ngpu=1,
|
||||
num_workers=1,
|
||||
log_level="WARNING",
|
||||
)
|
||||
results = pipeline(data_path_and_name_and_type)
|
||||
print(results)
|
||||
|
||||
|
||||
def test_without_profile_gpu_infer():
|
||||
diar_config_path = "config.yaml"
|
||||
diar_model_path = "sond.pth"
|
||||
output_dir = "./outputs"
|
||||
raw_inputs = [[
|
||||
"data/unit_test/raw_inputs/record.wav",
|
||||
"data/unit_test/raw_inputs/spk1.wav",
|
||||
"data/unit_test/raw_inputs/spk2.wav",
|
||||
"data/unit_test/raw_inputs/spk3.wav",
|
||||
"data/unit_test/raw_inputs/spk4.wav"
|
||||
]]
|
||||
pipeline = inference_launch(
|
||||
mode="sond_demo",
|
||||
diar_train_config=diar_config_path,
|
||||
diar_model_file=diar_model_path,
|
||||
output_dir=output_dir,
|
||||
ngpu=1,
|
||||
num_workers=1,
|
||||
log_level="WARNING",
|
||||
param_dict={},
|
||||
)
|
||||
results = pipeline(raw_inputs=raw_inputs)
|
||||
print(results)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = "7"
|
||||
test_fbank_cpu_infer()
|
||||
# test_fbank_gpu_infer()
|
||||
# test_wav_gpu_infer()
|
||||
# test_without_profile_gpu_infer()
|
||||
@ -612,3 +612,230 @@ class ResNet34Diar(ResNet34):
|
||||
logging.warning("{} is missed from tf checkpoint".format(name))
|
||||
|
||||
return var_dict_torch_update
|
||||
|
||||
|
||||
class ResNet34SpL2RegDiar(ResNet34_SP_L2Reg):
|
||||
def __init__(
|
||||
self,
|
||||
input_size,
|
||||
embedding_node="resnet1_dense",
|
||||
use_head_conv=True,
|
||||
batchnorm_momentum=0.5,
|
||||
use_head_maxpool=False,
|
||||
num_nodes_pooling_layer=256,
|
||||
layers_in_block=(3, 4, 6, 3),
|
||||
filters_in_block=(32, 64, 128, 256),
|
||||
num_nodes_resnet1=256,
|
||||
num_nodes_last_layer=256,
|
||||
pooling_type="window_shift",
|
||||
pool_size=20,
|
||||
stride=1,
|
||||
tf2torch_tensor_name_prefix_torch="encoder",
|
||||
tf2torch_tensor_name_prefix_tf="seq2seq/speech_encoder"
|
||||
):
|
||||
super(ResNet34SpL2RegDiar, self).__init__(
|
||||
input_size,
|
||||
use_head_conv=use_head_conv,
|
||||
batchnorm_momentum=batchnorm_momentum,
|
||||
use_head_maxpool=use_head_maxpool,
|
||||
num_nodes_pooling_layer=num_nodes_pooling_layer,
|
||||
layers_in_block=layers_in_block,
|
||||
filters_in_block=filters_in_block,
|
||||
)
|
||||
|
||||
self.embedding_node = embedding_node
|
||||
self.num_nodes_resnet1 = num_nodes_resnet1
|
||||
self.num_nodes_last_layer = num_nodes_last_layer
|
||||
self.pooling_type = pooling_type
|
||||
self.pool_size = pool_size
|
||||
self.stride = stride
|
||||
self.tf2torch_tensor_name_prefix_torch = tf2torch_tensor_name_prefix_torch
|
||||
self.tf2torch_tensor_name_prefix_tf = tf2torch_tensor_name_prefix_tf
|
||||
|
||||
self.resnet1_dense = torch.nn.Linear(num_nodes_pooling_layer * 2, num_nodes_resnet1)
|
||||
self.resnet1_bn = torch.nn.BatchNorm1d(num_nodes_resnet1, eps=1e-3, momentum=batchnorm_momentum)
|
||||
|
||||
self.resnet2_dense = torch.nn.Linear(num_nodes_resnet1, num_nodes_last_layer)
|
||||
self.resnet2_bn = torch.nn.BatchNorm1d(num_nodes_last_layer, eps=1e-3, momentum=batchnorm_momentum)
|
||||
|
||||
def output_size(self) -> int:
|
||||
if self.embedding_node.startswith("resnet1"):
|
||||
return self.num_nodes_resnet1
|
||||
elif self.embedding_node.startswith("resnet2"):
|
||||
return self.num_nodes_last_layer
|
||||
|
||||
return self.num_nodes_pooling_layer
|
||||
|
||||
def forward(
|
||||
self,
|
||||
xs_pad: torch.Tensor,
|
||||
ilens: torch.Tensor,
|
||||
prev_states: torch.Tensor = None,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
|
||||
|
||||
endpoints = OrderedDict()
|
||||
res_out, ilens = super().forward(xs_pad, ilens)
|
||||
endpoints["resnet0_bn"] = res_out
|
||||
if self.pooling_type == "frame_gsp":
|
||||
features = statistic_pooling(res_out, ilens, (2, ))
|
||||
else:
|
||||
features, ilens = windowed_statistic_pooling(res_out, ilens, (2, ), self.pool_size, self.stride)
|
||||
features = features.transpose(1, 2)
|
||||
endpoints["pooling"] = features
|
||||
|
||||
features = self.resnet1_dense(features)
|
||||
endpoints["resnet1_dense"] = features
|
||||
features = F.relu(features)
|
||||
endpoints["resnet1_relu"] = features
|
||||
features = self.resnet1_bn(features.transpose(1, 2)).transpose(1, 2)
|
||||
endpoints["resnet1_bn"] = features
|
||||
|
||||
features = self.resnet2_dense(features)
|
||||
endpoints["resnet2_dense"] = features
|
||||
features = F.relu(features)
|
||||
endpoints["resnet2_relu"] = features
|
||||
features = self.resnet2_bn(features.transpose(1, 2)).transpose(1, 2)
|
||||
endpoints["resnet2_bn"] = features
|
||||
|
||||
return endpoints[self.embedding_node], ilens, None
|
||||
|
||||
def gen_tf2torch_map_dict(self):
|
||||
tensor_name_prefix_torch = self.tf2torch_tensor_name_prefix_torch
|
||||
tensor_name_prefix_tf = self.tf2torch_tensor_name_prefix_tf
|
||||
train_steps = 720000
|
||||
map_dict_local = {
|
||||
# torch: conv1d.weight in "out_channel in_channel kernel_size"
|
||||
# tf : conv1d.weight in "kernel_size in_channel out_channel"
|
||||
# torch: linear.weight in "out_channel in_channel"
|
||||
# tf : dense.weight in "in_channel out_channel"
|
||||
"{}.pre_conv.weight".format(tensor_name_prefix_torch):
|
||||
{"name": "{}/pre_conv/kernel".format(tensor_name_prefix_tf),
|
||||
"squeeze": None,
|
||||
"transpose": (3, 2, 0, 1),
|
||||
},
|
||||
"{}.pre_conv_bn.bias".format(tensor_name_prefix_torch):
|
||||
{"name": "{}/pre_conv_bn/beta".format(tensor_name_prefix_tf),
|
||||
"squeeze": None,
|
||||
"transpose": None,
|
||||
},
|
||||
"{}.pre_conv_bn.weight".format(tensor_name_prefix_torch):
|
||||
{"name": "{}/pre_conv_bn/gamma".format(tensor_name_prefix_tf),
|
||||
"squeeze": None,
|
||||
"transpose": None,
|
||||
},
|
||||
"{}.pre_conv_bn.running_mean".format(tensor_name_prefix_torch):
|
||||
{"name": "{}/pre_conv_bn/moving_mean".format(tensor_name_prefix_tf),
|
||||
"squeeze": None,
|
||||
"transpose": None,
|
||||
},
|
||||
"{}.pre_conv_bn.running_var".format(tensor_name_prefix_torch):
|
||||
{"name": "{}/pre_conv_bn/moving_variance".format(tensor_name_prefix_tf),
|
||||
"squeeze": None,
|
||||
"transpose": None,
|
||||
},
|
||||
"{}.pre_conv_bn.num_batches_tracked".format(tensor_name_prefix_torch): train_steps
|
||||
}
|
||||
for layer_idx in range(3):
|
||||
map_dict_local.update({
|
||||
"{}.resnet{}_dense.weight".format(tensor_name_prefix_torch, layer_idx):
|
||||
{"name": "{}/resnet{}_dense/kernel".format(tensor_name_prefix_tf, layer_idx),
|
||||
"squeeze": None,
|
||||
"transpose": (3, 2, 0, 1) if layer_idx == 0 else (1, 0),
|
||||
},
|
||||
"{}.resnet{}_dense.bias".format(tensor_name_prefix_torch, layer_idx):
|
||||
{"name": "{}/resnet{}_dense/bias".format(tensor_name_prefix_tf, layer_idx),
|
||||
"squeeze": None,
|
||||
"transpose": None,
|
||||
},
|
||||
"{}.resnet{}_bn.weight".format(tensor_name_prefix_torch, layer_idx):
|
||||
{"name": "{}/resnet{}_bn/gamma".format(tensor_name_prefix_tf, layer_idx),
|
||||
"squeeze": None,
|
||||
"transpose": None,
|
||||
},
|
||||
"{}.resnet{}_bn.bias".format(tensor_name_prefix_torch, layer_idx):
|
||||
{"name": "{}/resnet{}_bn/beta".format(tensor_name_prefix_tf, layer_idx),
|
||||
"squeeze": None,
|
||||
"transpose": None,
|
||||
},
|
||||
"{}.resnet{}_bn.running_mean".format(tensor_name_prefix_torch, layer_idx):
|
||||
{"name": "{}/resnet{}_bn/moving_mean".format(tensor_name_prefix_tf, layer_idx),
|
||||
"squeeze": None,
|
||||
"transpose": None,
|
||||
},
|
||||
"{}.resnet{}_bn.running_var".format(tensor_name_prefix_torch, layer_idx):
|
||||
{"name": "{}/resnet{}_bn/moving_variance".format(tensor_name_prefix_tf, layer_idx),
|
||||
"squeeze": None,
|
||||
"transpose": None,
|
||||
},
|
||||
"{}.resnet{}_bn.num_batches_tracked".format(tensor_name_prefix_torch, layer_idx): train_steps
|
||||
})
|
||||
|
||||
for block_idx in range(len(self.layers_in_block)):
|
||||
for layer_idx in range(self.layers_in_block[block_idx]):
|
||||
for i in ["1", "2", "_sc"]:
|
||||
map_dict_local.update({
|
||||
"{}.block_{}.layer_{}.conv{}.weight".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
|
||||
{"name": "{}/block_{}/layer_{}/conv{}/kernel".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
|
||||
"squeeze": None,
|
||||
"transpose": (3, 2, 0, 1),
|
||||
},
|
||||
"{}.block_{}.layer_{}.bn{}.weight".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
|
||||
{"name": "{}/block_{}/layer_{}/bn{}/gamma".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
|
||||
"squeeze": None,
|
||||
"transpose": None,
|
||||
},
|
||||
"{}.block_{}.layer_{}.bn{}.bias".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
|
||||
{"name": "{}/block_{}/layer_{}/bn{}/beta".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
|
||||
"squeeze": None,
|
||||
"transpose": None,
|
||||
},
|
||||
"{}.block_{}.layer_{}.bn{}.running_mean".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
|
||||
{"name": "{}/block_{}/layer_{}/bn{}/moving_mean".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
|
||||
"squeeze": None,
|
||||
"transpose": None,
|
||||
},
|
||||
"{}.block_{}.layer_{}.bn{}.running_var".format(tensor_name_prefix_torch, block_idx, layer_idx, i):
|
||||
{"name": "{}/block_{}/layer_{}/bn{}/moving_variance".format(tensor_name_prefix_tf, block_idx, layer_idx, i),
|
||||
"squeeze": None,
|
||||
"transpose": None,
|
||||
},
|
||||
"{}.block_{}.layer_{}.bn{}.num_batches_tracked".format(tensor_name_prefix_torch, block_idx, layer_idx, i): train_steps,
|
||||
})
|
||||
|
||||
return map_dict_local
|
||||
|
||||
def convert_tf2torch(self,
|
||||
var_dict_tf,
|
||||
var_dict_torch,
|
||||
):
|
||||
|
||||
map_dict = self.gen_tf2torch_map_dict()
|
||||
|
||||
var_dict_torch_update = dict()
|
||||
for name in sorted(var_dict_torch.keys(), reverse=False):
|
||||
if name.startswith(self.tf2torch_tensor_name_prefix_torch):
|
||||
if name in map_dict:
|
||||
if "num_batches_tracked" not in name:
|
||||
name_tf = map_dict[name]["name"]
|
||||
data_tf = var_dict_tf[name_tf]
|
||||
if map_dict[name]["squeeze"] is not None:
|
||||
data_tf = np.squeeze(data_tf, axis=map_dict[name]["squeeze"])
|
||||
if map_dict[name]["transpose"] is not None:
|
||||
data_tf = np.transpose(data_tf, map_dict[name]["transpose"])
|
||||
data_tf = torch.from_numpy(data_tf).type(torch.float32).to("cpu")
|
||||
assert var_dict_torch[name].size() == data_tf.size(), \
|
||||
"{}, {}, {} != {}".format(name, name_tf,
|
||||
var_dict_torch[name].size(), data_tf.size())
|
||||
var_dict_torch_update[name] = data_tf
|
||||
logging.info("torch tensor: {}, {}, loading from tf tensor: {}, {}".format(
|
||||
name, data_tf.size(), name_tf, var_dict_tf[name_tf].shape
|
||||
))
|
||||
else:
|
||||
var_dict_torch_update[name] = torch.Tensor(map_dict[name]).type(torch.int64).to("cpu")
|
||||
logging.info("torch tensor: {}, manually assigning to: {}".format(
|
||||
name, map_dict[name]
|
||||
))
|
||||
else:
|
||||
logging.warning("{} is missed from tf checkpoint".format(name))
|
||||
|
||||
return var_dict_torch_update
|
||||
|
||||
@ -23,7 +23,7 @@ from funasr.layers.global_mvn import GlobalMVN
|
||||
from funasr.layers.utterance_mvn import UtteranceMVN
|
||||
from funasr.layers.label_aggregation import LabelAggregate
|
||||
from funasr.models.ctc import CTC
|
||||
from funasr.models.encoder.resnet34_encoder import ResNet34Diar
|
||||
from funasr.models.encoder.resnet34_encoder import ResNet34Diar, ResNet34SpL2RegDiar
|
||||
from funasr.models.encoder.ecapa_tdnn_encoder import ECAPA_TDNN
|
||||
from funasr.models.encoder.opennmt_encoders.conv_encoder import ConvEncoder
|
||||
from funasr.models.encoder.opennmt_encoders.fsmn_encoder import FsmnEncoder
|
||||
@ -122,6 +122,7 @@ encoder_choices = ClassChoices(
|
||||
fsmn=FsmnEncoder,
|
||||
conv=ConvEncoder,
|
||||
resnet34=ResNet34Diar,
|
||||
resnet34_sp_l2reg=ResNet34SpL2RegDiar,
|
||||
sanm_chunk_opt=SANMEncoderChunkOpt,
|
||||
data2vec_encoder=Data2VecEncoder,
|
||||
ecapa_tdnn=ECAPA_TDNN,
|
||||
@ -160,6 +161,7 @@ ci_scorer_choices = ClassChoices(
|
||||
classes=dict(
|
||||
dot=DotScorer,
|
||||
cosine=CosScorer,
|
||||
conv=ConvEncoder,
|
||||
),
|
||||
type_check=torch.nn.Module,
|
||||
default=None,
|
||||
|
||||
Loading…
Reference in New Issue
Block a user