From 5aec896e02e1403d2cbded5027349759b5668b46 Mon Sep 17 00:00:00 2001 From: "haoneng.lhn" Date: Mon, 17 Apr 2023 19:01:10 +0800 Subject: [PATCH] update --- funasr/datasets/large_datasets/utils/tokenize.py | 4 ++-- funasr/datasets/preprocessor.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/funasr/datasets/large_datasets/utils/tokenize.py b/funasr/datasets/large_datasets/utils/tokenize.py index 5a2f921f4..022d32131 100644 --- a/funasr/datasets/large_datasets/utils/tokenize.py +++ b/funasr/datasets/large_datasets/utils/tokenize.py @@ -19,6 +19,7 @@ def forward_segment(text, seg_dict): def seg_tokenize(txt, seg_dict): out_txt = "" for word in txt: + word = word.lower() if word in seg_dict: out_txt += seg_dict[word] + " " else: @@ -41,8 +42,7 @@ def tokenize(data, if seg_dict is not None: assert isinstance(seg_dict, dict) - txt = forward_segment("".join(text).lower(), seg_dict) - text = seg_tokenize(txt, seg_dict) + text = seg_tokenize(text, seg_dict) length = len(text) for i in range(length): diff --git a/funasr/datasets/preprocessor.py b/funasr/datasets/preprocessor.py index 1adca0597..20a38314a 100644 --- a/funasr/datasets/preprocessor.py +++ b/funasr/datasets/preprocessor.py @@ -48,6 +48,7 @@ def forward_segment(text, dic): def seg_tokenize(txt, seg_dict): out_txt = "" for word in txt: + word = word.lower() if word in seg_dict: out_txt += seg_dict[word] + " " else: @@ -359,7 +360,6 @@ class CommonPreprocessor(AbsPreprocessor): if self.split_with_space: tokens = text.strip().split(" ") if self.seg_dict is not None: - tokens = forward_segment("".join(tokens), self.seg_dict) tokens = seg_tokenize(tokens, self.seg_dict) else: tokens = self.tokenizer.text2tokens(text)