예제 #1
0
def spm_tokenizer(tmp_path, spm_srcs):
    model, vocabs = spm_srcs
    sp = spm.SentencePieceProcessor()
    sp.load(model)

    token_list = tmp_path / "token.list"
    with token_list.open("w") as f:
        for v in vocabs:
            f.write(f"{v}\n")
    return SentencepiecesTokenizer(model=model)
예제 #2
0
def build_tokenizer(
    token_type: str,
    bpemodel: Union[Path, str, Iterable[str]] = None,
    non_linguistic_symbols: Union[Path, str, Iterable[str]] = None,
    remove_non_linguistic_symbols: bool = False,
    space_symbol: str = "<space>",
    delimiter: str = None,
    g2p_type: str = None,
) -> AbsTokenizer:
    """A helper function to instantiate Tokenizer"""
    assert check_argument_types()
    if token_type == "bpe":
        if bpemodel is None:
            raise ValueError('bpemodel is required if token_type = "bpe"')

        if remove_non_linguistic_symbols:
            raise RuntimeError(
                "remove_non_linguistic_symbols is not implemented for token_type=bpe"
            )
        return SentencepiecesTokenizer(bpemodel)

    elif token_type == "word":
        if remove_non_linguistic_symbols and non_linguistic_symbols is not None:
            return WordTokenizer(
                delimiter=delimiter,
                non_linguistic_symbols=non_linguistic_symbols,
                remove_non_linguistic_symbols=True,
            )
        else:
            return WordTokenizer(delimiter=delimiter)

    elif token_type == "char":
        return CharTokenizer(
            non_linguistic_symbols=non_linguistic_symbols,
            space_symbol=space_symbol,
            remove_non_linguistic_symbols=remove_non_linguistic_symbols,
        )

    elif token_type == "phn":
        if g2p_type is None:
            raise ValueError("g2p_type is required if token_type=phn")
        return PhonemeTokenizer(
            g2p_type=g2p_type,
            non_linguistic_symbols=non_linguistic_symbols,
            space_symbol=space_symbol,
            remove_non_linguistic_symbols=remove_non_linguistic_symbols,
        )
    else:
        raise ValueError(f"token_mode must be one of bpe, word, char or phn: "
                         f"{token_type}")
예제 #3
0
def test_text2tokens(spm_tokenizer: SentencepiecesTokenizer):
    assert spm_tokenizer.tokens2text(
        spm_tokenizer.text2tokens("Hello")) == "Hello"