mirror of
https://github.com/modelscope/FunASR
synced 2025-09-15 14:48:36 +08:00
* resume from step * batch * batch * batch * batch * batch * batch * batch * batch * batch * batch * batch * batch * batch * batch * batch * train_loss_avg train_acc_avg * train_loss_avg train_acc_avg * train_loss_avg train_acc_avg * log step * wav is not exist * wav is not exist * decoding * decoding * decoding * wechat * decoding key * decoding key * decoding key * decoding key * decoding key * decoding key * dynamic batch * start_data_split_i=0 * total_time/accum_grad * total_time/accum_grad * total_time/accum_grad * update avg slice * update avg slice * sensevoice sanm * sensevoice sanm * add * add * add * add * deepspeed * update with main (#1731) * c++ runtime adapt to 1.0 (#1724) * adapt vad runtime to 1.0 * add json * change yml name * add func LoadVocabFromJson * add token file for InitAsr * add token path for OfflineStream * add funcOpenYaml * add token file for InitPunc * add token file for stream * update punc-model * update funasr-wss-server * update runtime_sdk_download_tool.py * update docker list * Delete docs/images/wechat.png * Add files via upload * Emo2Vec限定选择的情感类别 (#1730) * 限定选择的情感类别 * 使用none来禁用情感标签输出 * 修改输出接口 * 使用unuse来禁用token --------- Co-authored-by: 常材 <gaochangfeng.gcf@alibaba-inc.com> * bugfix * v1.0.27 * update docs * hf hub * Fix incorrect assignment of 'end' attribute to 'start' in sentences list comprehension (#1680) --------- Co-authored-by: Yabin Li <wucong.lyb@alibaba-inc.com> Co-authored-by: gaochangfeng <54253717+gaochangfeng@users.noreply.github.com> Co-authored-by: 常材 <gaochangfeng.gcf@alibaba-inc.com> Co-authored-by: nsdou <168500039+nsdou@users.noreply.github.com> * docs --------- Co-authored-by: Yabin Li <wucong.lyb@alibaba-inc.com> Co-authored-by: gaochangfeng <54253717+gaochangfeng@users.noreply.github.com> Co-authored-by: 常材 <gaochangfeng.gcf@alibaba-inc.com> Co-authored-by: nsdou <168500039+nsdou@users.noreply.github.com>
52 lines
1.7 KiB
Python
52 lines
1.7 KiB
Python
from pathlib import Path
|
|
from typing import Iterable
|
|
from typing import List
|
|
from typing import Union
|
|
|
|
import sentencepiece as spm
|
|
|
|
from funasr.tokenizer.abs_tokenizer import BaseTokenizer
|
|
from funasr.register import tables
|
|
|
|
|
|
@tables.register("tokenizer_classes", "SentencepiecesTokenizer")
|
|
class SentencepiecesTokenizer(BaseTokenizer):
|
|
def __init__(self, bpemodel: Union[Path, str], **kwargs):
|
|
super().__init__(**kwargs)
|
|
self.bpemodel = str(bpemodel)
|
|
# NOTE(kamo):
|
|
# Don't build SentencePieceProcessor in __init__()
|
|
# because it's not picklable and it may cause following error,
|
|
# "TypeError: can't pickle SwigPyObject objects",
|
|
# when giving it as argument of "multiprocessing.Process()".
|
|
self.sp = None
|
|
self._build_sentence_piece_processor()
|
|
|
|
def __repr__(self):
|
|
return f'{self.__class__.__name__}(model="{self.bpemodel}")'
|
|
|
|
def _build_sentence_piece_processor(self):
|
|
# Build SentencePieceProcessor lazily.
|
|
if self.sp is None:
|
|
self.sp = spm.SentencePieceProcessor()
|
|
self.sp.load(self.bpemodel)
|
|
|
|
def text2tokens(self, line: str) -> List[str]:
|
|
self._build_sentence_piece_processor()
|
|
return self.sp.EncodeAsPieces(line)
|
|
|
|
def tokens2text(self, tokens: Iterable[str]) -> str:
|
|
self._build_sentence_piece_processor()
|
|
return self.sp.DecodePieces(list(tokens))
|
|
|
|
def encode(self, line: str, **kwargs) -> List[int]:
|
|
self._build_sentence_piece_processor()
|
|
return self.sp.EncodeAsIds(line)
|
|
|
|
def decode(self, line: List[int], **kwargs):
|
|
self._build_sentence_piece_processor()
|
|
return self.sp.DecodeIds(line)
|
|
|
|
def get_vocab_size(self):
|
|
return self.sp.GetPieceSize()
|