FunASR/funasr/datasets/data_sampler.py
2023-11-21 22:36:52 +08:00

60 lines
1.7 KiB
Python

import torch
class BatchSampler(torch.utils.data.BatchSampler):
def __init__(self, dataset=None, args=None, drop_last=True, ):
self.drop_last = drop_last
self.pre_idx = -1
self.dataset = dataset
self.batch_size_type = args.batch_size_type
self.batch_size = args.batch_size
self.sort_size = args.sort_size
self.max_length_token = args.max_length_token
self.total_samples = len(dataset)
def __len__(self):
return self.total_samples
def __iter__(self):
batch = []
max_token = 0
num_sample = 0
iter_num = (self.total_samples-1) // self.sort_size + 1
for iter in range(self.pre_idx + 1, iter_num):
datalen_with_index = []
for i in range(self.sort_size):
idx = iter * self.sort_size + i
if idx >= self.total_samples:
continue
if self.batch_size_type == "example":
sample_len_cur = 1
else:
idx_map = self.dataset.shuffle_idx[idx]
# prompt = self.dataset.indexed_dataset[idx_map]["prompt"]
sample_len_cur = self.dataset.indexed_dataset[idx_map]["source_len"] + \
self.dataset.indexed_dataset[idx_map]["target_len"]
datalen_with_index.append([idx, sample_len_cur])
datalen_with_index_sort = sorted(datalen_with_index, key=lambda x: x[1])
for item in datalen_with_index_sort:
idx, sample_len_cur = item
if sample_len_cur > self.max_length_token:
continue
max_token_cur = max(max_token, sample_len_cur)
max_token_padding = (1 + num_sample) * max_token_cur
if max_token_padding <= self.batch_size:
batch.append(idx)
max_token = max_token_cur
num_sample += 1
else:
yield batch
max_token = sample_len_cur
num_sample = 1
batch = [idx]