mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
fix bug in asr_inference_paraformer_vad_punc and support without punc model
This commit is contained in:
parent
bcf6be4c90
commit
5db5950e07
@ -529,7 +529,8 @@ def inference_modelscope(
|
|||||||
nbest=nbest,
|
nbest=nbest,
|
||||||
)
|
)
|
||||||
speech2text = Speech2Text(**speech2text_kwargs)
|
speech2text = Speech2Text(**speech2text_kwargs)
|
||||||
|
text2punc = None
|
||||||
|
if punc_model_file is not None:
|
||||||
text2punc = Text2Punc(punc_infer_config, punc_model_file, device=device, dtype=dtype)
|
text2punc = Text2Punc(punc_infer_config, punc_model_file, device=device, dtype=dtype)
|
||||||
|
|
||||||
if output_dir is not None:
|
if output_dir is not None:
|
||||||
@ -561,37 +562,27 @@ def inference_modelscope(
|
|||||||
inference=True,
|
inference=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
forward_time_total = 0.0
|
|
||||||
length_total = 0.0
|
|
||||||
finish_count = 0
|
finish_count = 0
|
||||||
file_count = 1
|
file_count = 1
|
||||||
lfr_factor = 6
|
lfr_factor = 6
|
||||||
# 7 .Start for-loop
|
# 7 .Start for-loop
|
||||||
asr_result_list = []
|
asr_result_list = []
|
||||||
output_path = output_dir_v2 if output_dir_v2 is not None else output_dir
|
output_path = output_dir_v2 if output_dir_v2 is not None else output_dir
|
||||||
|
writer = None
|
||||||
if output_path is not None:
|
if output_path is not None:
|
||||||
writer = DatadirWriter(output_path)
|
writer = DatadirWriter(output_path)
|
||||||
ibest_writer = writer[f"1best_recog"]
|
ibest_writer = writer[f"1best_recog"]
|
||||||
# ibest_writer["punc_dict"][""] = " ".join(punc_infer_config.punc_list)
|
|
||||||
# ibest_writer["token_list"][""] = " ".join(asr_train_config.token_list)
|
|
||||||
else:
|
|
||||||
writer = None
|
|
||||||
|
|
||||||
for keys, batch in loader:
|
for keys, batch in loader:
|
||||||
assert isinstance(batch, dict), type(batch)
|
assert isinstance(batch, dict), type(batch)
|
||||||
assert all(isinstance(s, str) for s in keys), keys
|
assert all(isinstance(s, str) for s in keys), keys
|
||||||
_bs = len(next(iter(batch.values())))
|
_bs = len(next(iter(batch.values())))
|
||||||
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
|
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
|
||||||
# batch = {k: v for k, v in batch.items() if not k.endswith("_lengths")}
|
|
||||||
|
|
||||||
logging.info("decoding, utt_id: {}".format(keys))
|
|
||||||
# N-best list of (text, token, token_int, hyp_object)
|
|
||||||
time_beg = time.time()
|
|
||||||
vad_results = speech2vadsegment(**batch)
|
vad_results = speech2vadsegment(**batch)
|
||||||
time_end = time.time()
|
|
||||||
fbanks, vadsegments = vad_results[0], vad_results[1]
|
fbanks, vadsegments = vad_results[0], vad_results[1]
|
||||||
for i, segments in enumerate(vadsegments):
|
for i, segments in enumerate(vadsegments):
|
||||||
result_segments = [["", [], [], ]]
|
result_segments = [["", [], [], []]]
|
||||||
for j, segment_idx in enumerate(segments):
|
for j, segment_idx in enumerate(segments):
|
||||||
bed_idx, end_idx = int(segment_idx[0] / 10), int(segment_idx[1] / 10)
|
bed_idx, end_idx = int(segment_idx[0] / 10), int(segment_idx[1] / 10)
|
||||||
segment = fbanks[:, bed_idx:end_idx, :].to(device)
|
segment = fbanks[:, bed_idx:end_idx, :].to(device)
|
||||||
@ -600,17 +591,8 @@ def inference_modelscope(
|
|||||||
"end_time": vadsegments[i][j][1]}
|
"end_time": vadsegments[i][j][1]}
|
||||||
results = speech2text(**batch)
|
results = speech2text(**batch)
|
||||||
if len(results) < 1:
|
if len(results) < 1:
|
||||||
hyp = Hypothesis(score=0.0, scores={}, states={}, yseq=[])
|
continue
|
||||||
results = [[" ", ["sil"], [2], 0, 1, 6]] * nbest
|
|
||||||
time_end = time.time()
|
|
||||||
forward_time = time_end - time_beg
|
|
||||||
lfr_factor = results[0][-1]
|
|
||||||
length = results[0][-2]
|
|
||||||
forward_time_total += forward_time
|
|
||||||
length_total += length
|
|
||||||
logging.info(
|
|
||||||
"decoding, feature length: {}, forward_time: {:.4f}, rtf: {:.4f}".
|
|
||||||
format(length, forward_time, 100 * forward_time / (length * lfr_factor)))
|
|
||||||
result_cur = [results[0][:-2]]
|
result_cur = [results[0][:-2]]
|
||||||
if j == 0:
|
if j == 0:
|
||||||
result_segments = result_cur
|
result_segments = result_cur
|
||||||
@ -622,54 +604,38 @@ def inference_modelscope(
|
|||||||
text, token, token_int = result[0], result[1], result[2]
|
text, token, token_int = result[0], result[1], result[2]
|
||||||
time_stamp = None if len(result) < 4 else result[3]
|
time_stamp = None if len(result) < 4 else result[3]
|
||||||
|
|
||||||
# Create a directory: outdir/{n}best_recog
|
postprocessed_result = postprocess_utils.sentence_postprocess(token, time_stamp)
|
||||||
|
text_postprocessed = ""
|
||||||
|
time_stamp_postprocessed = ""
|
||||||
|
text_postprocessed_punc = postprocessed_result
|
||||||
|
if len(postprocessed_result) == 3:
|
||||||
|
text_postprocessed, time_stamp_postprocessed, word_lists = postprocessed_result[0], \
|
||||||
|
postprocessed_result[1], \
|
||||||
|
postprocessed_result[2]
|
||||||
|
text_postprocessed_punc = ""
|
||||||
|
if len(word_lists) > 0 and text2punc is not None:
|
||||||
|
text_postprocessed_punc, punc_id_list = text2punc(word_lists, 20)
|
||||||
|
|
||||||
|
item = {'key': key, 'value': text_postprocessed_punc}
|
||||||
|
if text_postprocessed != "":
|
||||||
|
item['text_postprocessed'] = text_postprocessed
|
||||||
|
if time_stamp_postprocessed != "":
|
||||||
|
item['time_stamp'] = time_stamp_postprocessed
|
||||||
|
|
||||||
|
asr_result_list.append(item)
|
||||||
|
finish_count += 1
|
||||||
|
# asr_utils.print_progress(finish_count / file_count)
|
||||||
if writer is not None:
|
if writer is not None:
|
||||||
# Write the result to each file
|
# Write the result to each file
|
||||||
ibest_writer["token"][key] = " ".join(token)
|
ibest_writer["token"][key] = " ".join(token)
|
||||||
ibest_writer["token_int"][key] = " ".join(map(str, token_int))
|
ibest_writer["token_int"][key] = " ".join(map(str, token_int))
|
||||||
ibest_writer["vad"][key] = "{}".format(vadsegments)
|
ibest_writer["vad"][key] = "{}".format(vadsegments)
|
||||||
|
|
||||||
if text is not None:
|
|
||||||
postprocessed_result = postprocess_utils.sentence_postprocess(token, time_stamp)
|
|
||||||
if len(postprocessed_result) == 3:
|
|
||||||
text_postprocessed, time_stamp_postprocessed, word_lists = postprocessed_result[0], \
|
|
||||||
postprocessed_result[1], \
|
|
||||||
postprocessed_result[2]
|
|
||||||
if len(word_lists) > 0:
|
|
||||||
text_postprocessed_punc, punc_id_list = text2punc(word_lists, 20)
|
|
||||||
text_postprocessed_punc_time_stamp = json.dumps({"predictions": text_postprocessed_punc,
|
|
||||||
"time_stamp": time_stamp_postprocessed},
|
|
||||||
ensure_ascii=False)
|
|
||||||
else:
|
|
||||||
text_postprocessed_punc = ""
|
|
||||||
punc_id_list = []
|
|
||||||
text_postprocessed_punc_time_stamp = ""
|
|
||||||
|
|
||||||
else:
|
|
||||||
text_postprocessed = ""
|
|
||||||
time_stamp_postprocessed = ""
|
|
||||||
word_lists = ""
|
|
||||||
text_postprocessed_punc_time_stamp = ""
|
|
||||||
punc_id_list = ""
|
|
||||||
text_postprocessed_punc = ""
|
|
||||||
|
|
||||||
item = {'key': key, 'value': text_postprocessed_punc, 'text_postprocessed': text_postprocessed,
|
|
||||||
'time_stamp': time_stamp_postprocessed, 'token': token}
|
|
||||||
asr_result_list.append(item)
|
|
||||||
finish_count += 1
|
|
||||||
# asr_utils.print_progress(finish_count / file_count)
|
|
||||||
if writer is not None:
|
|
||||||
ibest_writer["text"][key] = text_postprocessed
|
ibest_writer["text"][key] = text_postprocessed
|
||||||
ibest_writer["punc_id"][key] = "{}".format(punc_id_list)
|
ibest_writer["text_with_punc"][key] = text_postprocessed_punc
|
||||||
ibest_writer["text_with_punc"][key] = text_postprocessed_punc_time_stamp
|
|
||||||
if time_stamp_postprocessed is not None:
|
if time_stamp_postprocessed is not None:
|
||||||
ibest_writer["time_stamp"][key] = "{}".format(time_stamp_postprocessed)
|
ibest_writer["time_stamp"][key] = "{}".format(time_stamp_postprocessed)
|
||||||
|
|
||||||
logging.info("decoding, utt: {}, predictions: {}, time_stamp: {}".format(key, text_postprocessed_punc,
|
logging.info("decoding, utt: {}, predictions: {}".format(key, text_postprocessed_punc))
|
||||||
time_stamp_postprocessed))
|
|
||||||
|
|
||||||
logging.info("decoding, feature length total: {}, forward_time total: {:.4f}, rtf avg: {:.4f}".
|
|
||||||
format(length_total, forward_time_total, 100 * forward_time_total / (length_total * lfr_factor+1e-6)))
|
|
||||||
return asr_result_list
|
return asr_result_list
|
||||||
return _forward
|
return _forward
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user