def test_full_tokenizer(self): tokenizer = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=True) tokens = tokenizer.tokenize("<unk> UNwanted , running") self.assertListEqual(tokens, ["<unk>", "unwanted", ",", "running"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [0, 4, 8, 7])
def test_full_tokenizer(self): vocab_tokens = [ "<unk>", "[CLS]", "[SEP]", "want", "unwanted", "wa", "un", "running", ",", "low", "l", ] with TemporaryDirectory() as tmpdirname: vocab_file = os.path.join(tmpdirname, VOCAB_FILES_NAMES['vocab_file']) with open(vocab_file, "w", encoding='utf-8') as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) input_text = u"<unk> UNwanted , running" output_text = u"<unk> unwanted, running" create_and_check_tokenizer_commons(self, input_text, output_text, TransfoXLTokenizer, tmpdirname, lower_case=True) tokenizer = TransfoXLTokenizer(vocab_file=vocab_file, lower_case=True) tokens = tokenizer.tokenize(u"<unk> UNwanted , running") self.assertListEqual(tokens, ["<unk>", "unwanted", ",", "running"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [0, 4, 8, 7])
def test_full_tokenizer_moses_numbers(self): tokenizer = TransfoXLTokenizer(lower_case=False) text_in = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?" tokens_out = [ "Hello", "(", "bracket", ")", "and", "side", "@-@", "scrolled", "[", "and", "]", "Henry", "'s", "$", "5", "@,@", "000", "with", "3", "@.@", "34", "m", ".", "What", "'s", "up", "!", "?", ] self.assertListEqual(tokenizer.tokenize(text_in), tokens_out) self.assertEqual(tokenizer.convert_tokens_to_string(tokens_out), text_in)
def transformerXLTokenizer(*args, **kwargs): """ Instantiate a Transformer-XL tokenizer adapted from Vocab class in https://github.com/kimiyoung/transformer-xl Args: pretrained_model_name_or_path: Path to pretrained model archive or one of pre-trained vocab configs below. * transfo-xl-wt103 Example: >>> import torch >>> tokenizer = torch.hub.load('huggingface/pytorch-transformers', 'transformerXLTokenizer', 'transfo-xl-wt103') >>> text = "Who was Jim Henson ?" >>> tokenized_text = tokenizer.tokenize(tokenized_text) >>> indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) """ tokenizer = TransfoXLTokenizer.from_pretrained(*args, **kwargs) return tokenizer
def test_full_tokenizer_no_lower(self): tokenizer = TransfoXLTokenizer(lower_case=False) self.assertListEqual( tokenizer.tokenize(u" \tHeLLo ! how \n Are yoU ? "), ["HeLLo", "!", "how", "Are", "yoU", "?"])
def test_full_tokenizer_lower(self): tokenizer = TransfoXLTokenizer(lower_case=True) self.assertListEqual( tokenizer.tokenize(u" \tHeLLo ! how \n Are yoU ? "), ["hello", "!", "how", "are", "you", "?"])
def get_tokenizer(self, **kwargs): kwargs['lower_case'] = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **kwargs)