This commit is contained in:
游雁 2024-06-14 13:59:49 +08:00
parent 67329a74a5
commit 59bc02b089
4 changed files with 38 additions and 8 deletions

View File

@ -69,7 +69,7 @@ dataset_conf:
batch_size_scale_ratio_max: 2
num_workers: 4
audio_adaptor_downsample_rate: ${audio_adaptor_conf.downsample_rate}
audio_encoder_downsample_rate: 2
audio_encoder_downsample_rate: 4
data_split_num: 512
batch_size_sample_max: 15
retry: 20

View File

@ -64,6 +64,8 @@ class OpenAIDataset(torch.utils.data.Dataset):
self.max_token_length = kwargs.get("max_token_length", 1024)
self.batch_size_scale_ratio_max = kwargs.get("batch_size_scale_ratio_max", 1.5)
self.batch_size_token_max = kwargs.get("batch_size_token_max", 2500)
self.audio_adaptor_downsample_rate = kwargs.get("audio_adaptor_downsample_rate", 2)
self.audio_encoder_downsample_rate = kwargs.get("audio_encoder_downsample_rate", 4)
def get_source_len(self, index):
item = self.index_ds[index]
@ -136,10 +138,13 @@ class OpenAIDataset(torch.utils.data.Dataset):
speech = speech.permute(0, 2, 1)
# if speech_lengths > self.batch_size:
# continue
if self.audio_encoder_downsample_rate == 4:
olens = 1 + (speech_lengths[0].item() - 3 + 2 * 1) // 2
olens = 1 + (olens - 3 + 2 * 1) // 2
elif self.audio_encoder_downsample_rate == 1:
olens = speech_lengths[0].item()
olens = 1 + (speech_lengths[0].item() - 3 + 2 * 1) // 2
olens = 1 + (olens - 3 + 2 * 1) // 2
sub_token_len = (olens - 1) // 2 + 1
sub_token_len = (olens - 1) // self.audio_adaptor_downsample_rate + 1
sub_token = [0] * sub_token_len
fbank_beg_i = [len(source_ids)]
source_ids += sub_token

View File

@ -498,9 +498,7 @@ class LLMASR2(nn.Module):
with torch.cuda.amp.autocast(enabled=False):
# audio encoder
encoder_out, encoder_out_lens = self.audio_encoder(
speech.permute(0, 2, 1), speech_lengths
)
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
# audio_adaptor
encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
@ -566,6 +564,12 @@ class LLMASR2(nn.Module):
loss, stats, weight = force_gatherable((loss, stats, batch_size), loss.device)
return loss, stats, weight
def encode(self, speech, speech_lengths):
# audio encoder
encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
return encoder_out, encoder_out_lens
def data_template(self, data):
system, user, assistant = [], [], []
for i, item in enumerate(data):
@ -721,7 +725,8 @@ class LLMASR2(nn.Module):
speech = speech.to(torch.float16)
elif kwargs.get("bf16", False):
speech = speech.to(torch.bfloat16)
encoder_out, encoder_out_lens = self.audio_encoder(speech.permute(0, 2, 1), speech_lengths)
# audio encoder
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
# audio_adaptor
encoder_out, encoder_out_lens = self.audio_adaptor(encoder_out, encoder_out_lens)
@ -806,3 +811,21 @@ class LLMASR2(nn.Module):
ibest_writer["text_tn"][key[0]] = response_clean
return results, meta_data
@tables.register("model_classes", "LLMASR3")
class LLMASR3(nn.Module):
""" """
def __init__(
self,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
def encode(self, speech, speech_lengths):
# audio encoder
encoder_out, encoder_out_lens = self.audio_encoder(speech, speech_lengths)
return encoder_out, encoder_out_lens

View File

@ -1042,6 +1042,7 @@ class SenseVoiceSANM(nn.Module):
self.length_normalized_loss = length_normalized_loss
self.beam_search = None
self.activation_checkpoint = kwargs.get("activation_checkpoint", False)
self.encoder_output_size = encoder_output_size
def forward(
self,
@ -1451,6 +1452,7 @@ class SenseVoiceSANMCTC(nn.Module):
self.ctc = ctc
self.length_normalized_loss = length_normalized_loss
self.encoder_output_size = encoder_output_size
def forward(
self,