예제 #1
0
    def test_full_tokenizer(self):
        tokenizer = BartphoTokenizer(SAMPLE_VOCAB, self.monolingual_vocab_file,
                                     **self.special_tokens_map)
        text = "This is a là test"
        bpe_tokens = "▁This ▁is ▁a ▁l à ▁t est".split()
        tokens = tokenizer.tokenize(text)
        self.assertListEqual(tokens, bpe_tokens)

        input_tokens = tokens + [tokenizer.unk_token]
        input_bpe_tokens = [4, 5, 6, 3, 3, 7, 8, 3]
        self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens),
                             input_bpe_tokens)
예제 #2
0
    def setUp(self):
        super().setUp()

        vocab = ["▁This", "▁is", "▁a", "▁t", "est"]
        vocab_tokens = dict(zip(vocab, range(len(vocab))))
        self.special_tokens_map = {"unk_token": "<unk>"}

        self.monolingual_vocab_file = os.path.join(
            self.tmpdirname, VOCAB_FILES_NAMES["monolingual_vocab_file"])
        with open(self.monolingual_vocab_file, "w", encoding="utf-8") as fp:
            for token in vocab_tokens:
                fp.write(f"{token} {vocab_tokens[token]}\n")

        tokenizer = BartphoTokenizer(SAMPLE_VOCAB, self.monolingual_vocab_file,
                                     **self.special_tokens_map)
        tokenizer.save_pretrained(self.tmpdirname)
예제 #3
0
 def get_tokenizer(self, **kwargs):
     kwargs.update(self.special_tokens_map)
     return BartphoTokenizer.from_pretrained(self.tmpdirname, **kwargs)