From bbea0265f160b0180df5e7cf9ce1bc6492f06d3a Mon Sep 17 00:00:00 2001 From: "shixian.shi" Date: Tue, 27 Jun 2023 19:17:26 +0800 Subject: [PATCH] update --- funasr/datasets/large_datasets/utils/tokenize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/funasr/datasets/large_datasets/utils/tokenize.py b/funasr/datasets/large_datasets/utils/tokenize.py index 61a1ad001..6ecfb2b63 100644 --- a/funasr/datasets/large_datasets/utils/tokenize.py +++ b/funasr/datasets/large_datasets/utils/tokenize.py @@ -59,7 +59,7 @@ def tokenize(data, pre_index = None for hw in hw_config['pre_hwlist']: hw = " ".join(seg_tokenize(hw, seg_dict)) - _find = " ".join(text.find(hw)) + _find = " ".join(text).find(hw) if _find != -1: _find = text[:_find].count(" ") # bpe sometimes pre_index = [_find, _find + max(hw.count(" "), 1)]