Merge pull request #445 from zhuzizyf/main

Fix memory and performance issues caused by long-term use of streaming VAD
This commit is contained in:
zhifu gao 2023-05-03 08:15:32 +08:00 committed by GitHub
commit 2d2bcdcbd3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -229,10 +229,11 @@ class E2EVadModel():
self.max_end_sil_frame_cnt_thresh = self.vad_opts.max_end_silence_time - self.vad_opts.speech_to_sil_time_thres self.max_end_sil_frame_cnt_thresh = self.vad_opts.max_end_silence_time - self.vad_opts.speech_to_sil_time_thres
self.speech_noise_thres = self.vad_opts.speech_noise_thres self.speech_noise_thres = self.vad_opts.speech_noise_thres
self.scores = None self.scores = None
self.idx_pre_chunk = 0
self.max_time_out = False self.max_time_out = False
self.decibel = [] self.decibel = []
self.data_buf = None self.data_buf_size = 0
self.data_buf_all = None self.data_buf_all_size = 0
self.waveform = None self.waveform = None
self.ResetDetection() self.ResetDetection()
@ -259,10 +260,11 @@ class E2EVadModel():
self.max_end_sil_frame_cnt_thresh = self.vad_opts.max_end_silence_time - self.vad_opts.speech_to_sil_time_thres self.max_end_sil_frame_cnt_thresh = self.vad_opts.max_end_silence_time - self.vad_opts.speech_to_sil_time_thres
self.speech_noise_thres = self.vad_opts.speech_noise_thres self.speech_noise_thres = self.vad_opts.speech_noise_thres
self.scores = None self.scores = None
self.idx_pre_chunk = 0
self.max_time_out = False self.max_time_out = False
self.decibel = [] self.decibel = []
self.data_buf = None self.data_buf_size = 0
self.data_buf_all = None self.data_buf_all_size = 0
self.waveform = None self.waveform = None
self.ResetDetection() self.ResetDetection()
@ -280,11 +282,11 @@ class E2EVadModel():
def ComputeDecibel(self) -> None: def ComputeDecibel(self) -> None:
frame_sample_length = int(self.vad_opts.frame_length_ms * self.vad_opts.sample_rate / 1000) frame_sample_length = int(self.vad_opts.frame_length_ms * self.vad_opts.sample_rate / 1000)
frame_shift_length = int(self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000) frame_shift_length = int(self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000)
if self.data_buf_all is None: if self.data_buf_all_size == 0:
self.data_buf_all = self.waveform[0] # self.data_buf is pointed to self.waveform[0] self.data_buf_all_size = len(self.waveform[0])
self.data_buf = self.data_buf_all self.data_buf_size = self.data_buf_all_size
else: else:
self.data_buf_all = np.concatenate((self.data_buf_all, self.waveform[0])) self.data_buf_all_size += len(self.waveform[0])
for offset in range(0, self.waveform.shape[1] - frame_sample_length + 1, frame_shift_length): for offset in range(0, self.waveform.shape[1] - frame_sample_length + 1, frame_shift_length):
self.decibel.append( self.decibel.append(
10 * math.log10(np.square((self.waveform[0][offset: offset + frame_sample_length])).sum() + \ 10 * math.log10(np.square((self.waveform[0][offset: offset + frame_sample_length])).sum() + \
@ -294,17 +296,14 @@ class E2EVadModel():
# scores = self.encoder(feats, in_cache) # return B * T * D # scores = self.encoder(feats, in_cache) # return B * T * D
self.vad_opts.nn_eval_block_size = scores.shape[1] self.vad_opts.nn_eval_block_size = scores.shape[1]
self.frm_cnt += scores.shape[1] # count total frames self.frm_cnt += scores.shape[1] # count total frames
if self.scores is None: self.scores=scores
self.scores = scores # the first calculation
else:
self.scores = np.concatenate((self.scores, scores), axis=1)
def PopDataBufTillFrame(self, frame_idx: int) -> None: # need check again def PopDataBufTillFrame(self, frame_idx: int) -> None: # need check again
while self.data_buf_start_frame < frame_idx: while self.data_buf_start_frame < frame_idx:
if len(self.data_buf) >= int(self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000): if self.data_buf_size >= int(self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000):
self.data_buf_start_frame += 1 self.data_buf_start_frame += 1
self.data_buf = self.data_buf_all[self.data_buf_start_frame * int( self.data_buf_size = self.data_buf_all_size-self.data_buf_start_frame * int(
self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000):] self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000)
def PopDataToOutputBuf(self, start_frm: int, frm_cnt: int, first_frm_is_start_point: bool, def PopDataToOutputBuf(self, start_frm: int, frm_cnt: int, first_frm_is_start_point: bool,
last_frm_is_end_point: bool, end_point_is_sent_end: bool) -> None: last_frm_is_end_point: bool, end_point_is_sent_end: bool) -> None:
@ -315,8 +314,8 @@ class E2EVadModel():
self.vad_opts.sample_rate * self.vad_opts.frame_in_ms / 1000)) self.vad_opts.sample_rate * self.vad_opts.frame_in_ms / 1000))
expected_sample_number += int(extra_sample) expected_sample_number += int(extra_sample)
if end_point_is_sent_end: if end_point_is_sent_end:
expected_sample_number = max(expected_sample_number, len(self.data_buf)) expected_sample_number = max(expected_sample_number, self.data_buf_size)
if len(self.data_buf) < expected_sample_number: if self.data_buf_size < expected_sample_number:
print('error in calling pop data_buf\n') print('error in calling pop data_buf\n')
if len(self.output_data_buf) == 0 or first_frm_is_start_point: if len(self.output_data_buf) == 0 or first_frm_is_start_point:
@ -334,10 +333,10 @@ class E2EVadModel():
data_to_pop = expected_sample_number data_to_pop = expected_sample_number
else: else:
data_to_pop = int(frm_cnt * self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000) data_to_pop = int(frm_cnt * self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000)
if data_to_pop > len(self.data_buf): if data_to_pop > self.data_buf_size:
print('VAD data_to_pop is bigger than self.data_buf.size()!!!\n') print('VAD data_to_pop is bigger than self.data_buf_size!!!\n')
data_to_pop = len(self.data_buf) data_to_pop = self.data_buf_size
expected_sample_number = len(self.data_buf) expected_sample_number = self.data_buf_size
cur_seg.doa = 0 cur_seg.doa = 0
for sample_cpy_out in range(0, data_to_pop): for sample_cpy_out in range(0, data_to_pop):
@ -420,7 +419,7 @@ class E2EVadModel():
assert len(self.sil_pdf_ids) == self.vad_opts.silence_pdf_num assert len(self.sil_pdf_ids) == self.vad_opts.silence_pdf_num
if len(self.sil_pdf_ids) > 0: if len(self.sil_pdf_ids) > 0:
assert len(self.scores) == 1 # 只支持batch_size = 1的测试 assert len(self.scores) == 1 # 只支持batch_size = 1的测试
sil_pdf_scores = [self.scores[0][t][sil_pdf_id] for sil_pdf_id in self.sil_pdf_ids] sil_pdf_scores = [self.scores[0][t - self.idx_pre_chunk][sil_pdf_id] for sil_pdf_id in self.sil_pdf_ids]
sum_score = sum(sil_pdf_scores) sum_score = sum(sil_pdf_scores)
noise_prob = math.log(sum_score) * self.vad_opts.speech_2_noise_ratio noise_prob = math.log(sum_score) * self.vad_opts.speech_2_noise_ratio
total_score = 1.0 total_score = 1.0
@ -502,7 +501,7 @@ class E2EVadModel():
frame_state = FrameState.kFrameStateInvalid frame_state = FrameState.kFrameStateInvalid
frame_state = self.GetFrameState(self.frm_cnt - 1 - i) frame_state = self.GetFrameState(self.frm_cnt - 1 - i)
self.DetectOneFrame(frame_state, self.frm_cnt - 1 - i, False) self.DetectOneFrame(frame_state, self.frm_cnt - 1 - i, False)
self.idx_pre_chunk += self.scores.shape[1]
return 0 return 0
def DetectLastFrames(self) -> int: def DetectLastFrames(self) -> int: