Esempio n. 1
0
    def test_full_tokenizer(self):
        tokenizer = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=True)

        tokens = tokenizer.tokenize("<unk> UNwanted , running")
        self.assertListEqual(tokens, ["<unk>", "unwanted", ",", "running"])

        self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [0, 4, 8, 7])
    def test_full_tokenizer(self):
        vocab_tokens = [
            "<unk>",
            "[CLS]",
            "[SEP]",
            "want",
            "unwanted",
            "wa",
            "un",
            "running",
            ",",
            "low",
            "l",
        ]
        with TemporaryDirectory() as tmpdirname:
            vocab_file = os.path.join(tmpdirname,
                                      VOCAB_FILES_NAMES['vocab_file'])
            with open(vocab_file, "w", encoding='utf-8') as vocab_writer:
                vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))

            input_text = u"<unk> UNwanted , running"
            output_text = u"<unk> unwanted, running"

            create_and_check_tokenizer_commons(self,
                                               input_text,
                                               output_text,
                                               TransfoXLTokenizer,
                                               tmpdirname,
                                               lower_case=True)

            tokenizer = TransfoXLTokenizer(vocab_file=vocab_file,
                                           lower_case=True)

            tokens = tokenizer.tokenize(u"<unk> UNwanted , running")
            self.assertListEqual(tokens, ["<unk>", "unwanted", ",", "running"])

            self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens),
                                 [0, 4, 8, 7])