Merge pull request #267 from alibaba-damo-academy/dev_sx

fix bug for onnx paraformer-long
This commit is contained in:
zhifu gao 2023-03-20 19:55:42 +08:00 committed by GitHub
commit 837c5001d4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 37 additions and 31 deletions

View File

@ -62,26 +62,28 @@ class Paraformer():
am_scores, valid_token_lens = outputs[0], outputs[1]
if len(outputs) == 4:
# for BiCifParaformer Inference
us_alphas, us_cif_peak = outputs[2], outputs[3]
us_alphas, us_peaks = outputs[2], outputs[3]
else:
us_alphas, us_cif_peak = None, None
us_alphas, us_peaks = None, None
except:
#logging.warning(traceback.format_exc())
logging.warning("input wav is silence or noise")
preds = ['']
else:
am_scores, valid_token_lens = am_scores.detach().cpu().numpy(), valid_token_lens.detach().cpu().numpy()
preds = self.decode(am_scores, valid_token_lens)
if us_cif_peak is None:
if us_peaks is None:
for pred in preds:
pred = sentence_postprocess(pred)
asr_res.append({'preds': pred})
else:
for pred, us_cif_peak_ in zip(preds, us_cif_peak):
text, tokens = pred
timestamp, timestamp_total = time_stamp_lfr6_onnx(us_cif_peak_, copy.copy(tokens))
for pred, us_peaks_ in zip(preds, us_peaks):
raw_tokens = pred
timestamp, timestamp_raw = time_stamp_lfr6_onnx(us_peaks_, copy.copy(raw_tokens))
text_proc, timestamp_proc, _ = sentence_postprocess(raw_tokens, timestamp_raw)
# logging.warning(timestamp)
if len(self.plot_timestamp_to):
self.plot_wave_timestamp(waveform_list[0], timestamp_total, self.plot_timestamp_to)
asr_res.append({'preds': text, 'timestamp': timestamp})
self.plot_wave_timestamp(waveform_list[0], timestamp, self.plot_timestamp_to)
asr_res.append({'preds': text_proc, 'timestamp': timestamp_proc, "raw_tokens": raw_tokens})
return asr_res
def plot_wave_timestamp(self, wav, text_timestamp, dest):
@ -182,6 +184,6 @@ class Paraformer():
# Change integer-ids to tokens
token = self.converter.ids2tokens(token_int)
token = token[:valid_token_num-self.pred_bias]
texts = sentence_postprocess(token)
return texts
# texts = sentence_postprocess(token)
return token

View File

@ -1,11 +1,11 @@
import numpy as np
def time_stamp_lfr6_onnx(us_cif_peak, char_list, begin_time=0.0):
def time_stamp_lfr6_onnx(us_cif_peak, char_list, begin_time=0.0, total_offset=-1.5):
if not len(char_list):
return []
START_END_THRESHOLD = 5
MAX_TOKEN_DURATION = 14
MAX_TOKEN_DURATION = 30
TIME_RATE = 10.0 * 6 / 1000 / 3 # 3 times upsampled
cif_peak = us_cif_peak.reshape(-1)
num_frames = cif_peak.shape[-1]
@ -16,7 +16,7 @@ def time_stamp_lfr6_onnx(us_cif_peak, char_list, begin_time=0.0):
new_char_list = []
# for bicif model trained with large data, cif2 actually fires when a character starts
# so treat the frames between two peaks as the duration of the former token
fire_place = np.where(cif_peak>1.0-1e-4)[0] - 1.5 # np format
fire_place = np.where(cif_peak>1.0-1e-4)[0] + total_offset # np format
num_peak = len(fire_place)
assert num_peak == len(char_list) + 1 # number of peaks is supposed to be number of tokens + 1
# begin silence
@ -27,7 +27,7 @@ def time_stamp_lfr6_onnx(us_cif_peak, char_list, begin_time=0.0):
# tokens timestamp
for i in range(len(fire_place)-1):
new_char_list.append(char_list[i])
if MAX_TOKEN_DURATION < 0 or fire_place[i+1] - fire_place[i] < MAX_TOKEN_DURATION:
if i == len(fire_place)-2 or MAX_TOKEN_DURATION < 0 or fire_place[i+1] - fire_place[i] < MAX_TOKEN_DURATION:
timestamp_list.append([fire_place[i]*TIME_RATE, fire_place[i+1]*TIME_RATE])
else:
# cut the duration to token and sil of the 0-weight frames last long
@ -48,11 +48,12 @@ def time_stamp_lfr6_onnx(us_cif_peak, char_list, begin_time=0.0):
timestamp_list[i][0] = timestamp_list[i][0] + begin_time / 1000.0
timestamp_list[i][1] = timestamp_list[i][1] + begin_time / 1000.0
assert len(new_char_list) == len(timestamp_list)
res_total = []
res_str = ""
for char, timestamp in zip(new_char_list, timestamp_list):
res_total.append([char, timestamp[0], timestamp[1]]) # += "{} {} {};".format(char, timestamp[0], timestamp[1])
res_str += "{} {} {};".format(char, timestamp[0], timestamp[1])
res = []
for char, timestamp in zip(new_char_list, timestamp_list):
if char != '<sil>':
res.append([int(timestamp[0] * 1000), int(timestamp[1] * 1000)])
return res, res_total
return res_str, res

View File

@ -64,25 +64,28 @@ class Paraformer():
am_scores, valid_token_lens = outputs[0], outputs[1]
if len(outputs) == 4:
# for BiCifParaformer Inference
us_alphas, us_cif_peak = outputs[2], outputs[3]
us_alphas, us_peaks = outputs[2], outputs[3]
else:
us_alphas, us_cif_peak = None, None
us_alphas, us_peaks = None, None
except ONNXRuntimeError:
#logging.warning(traceback.format_exc())
logging.warning("input wav is silence or noise")
preds = ['']
else:
preds = self.decode(am_scores, valid_token_lens)
if us_cif_peak is None:
if us_peaks is None:
for pred in preds:
pred = sentence_postprocess(pred)
asr_res.append({'preds': pred})
else:
for pred, us_cif_peak_ in zip(preds, us_cif_peak):
text, tokens = pred
timestamp, timestamp_total = time_stamp_lfr6_onnx(us_cif_peak_, copy.copy(tokens))
for pred, us_peaks_ in zip(preds, us_peaks):
raw_tokens = pred
timestamp, timestamp_raw = time_stamp_lfr6_onnx(us_peaks_, copy.copy(raw_tokens))
text_proc, timestamp_proc, _ = sentence_postprocess(raw_tokens, timestamp_raw)
# logging.warning(timestamp)
if len(self.plot_timestamp_to):
self.plot_wave_timestamp(waveform_list[0], timestamp_total, self.plot_timestamp_to)
asr_res.append({'preds': text, 'timestamp': timestamp})
self.plot_wave_timestamp(waveform_list[0], timestamp, self.plot_timestamp_to)
asr_res.append({'preds': text_proc, 'timestamp': timestamp_proc, "raw_tokens": raw_tokens})
return asr_res
def plot_wave_timestamp(self, wav, text_timestamp, dest):
@ -181,6 +184,6 @@ class Paraformer():
# Change integer-ids to tokens
token = self.converter.ids2tokens(token_int)
token = token[:valid_token_num-self.pred_bias]
texts = sentence_postprocess(token)
return texts
# texts = sentence_postprocess(token)
return token

View File

@ -48,12 +48,12 @@ def time_stamp_lfr6_onnx(us_cif_peak, char_list, begin_time=0.0, total_offset=-1
timestamp_list[i][0] = timestamp_list[i][0] + begin_time / 1000.0
timestamp_list[i][1] = timestamp_list[i][1] + begin_time / 1000.0
assert len(new_char_list) == len(timestamp_list)
res_total = []
res_str = ""
for char, timestamp in zip(new_char_list, timestamp_list):
res_total.append([char, timestamp[0], timestamp[1]]) # += "{} {} {};".format(char, timestamp[0], timestamp[1])
res_str += "{} {} {};".format(char, timestamp[0], timestamp[1])
res = []
for char, timestamp in zip(new_char_list, timestamp_list):
if char != '<sil>':
res.append([int(timestamp[0] * 1000), int(timestamp[1] * 1000)])
return res, res_total
return res_str, res