Ejemplo n.º 1
0
 def create_and_check_roberta_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels,
                                            token_labels, choice_labels):
     model = RobertaForMaskedLM(config=config)
     model.eval()
     loss, prediction_scores = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels)
     result = {
         "loss": loss,
         "prediction_scores": prediction_scores,
     }
     self.parent.assertListEqual(
         list(result["prediction_scores"].size()),
         [self.batch_size, self.seq_length, self.vocab_size])
     self.check_loss_output(result)
Ejemplo n.º 2
0
    def test_inference_masked_lm(self):
        model = RobertaForMaskedLM.from_pretrained('roberta-base')

        input_ids = torch.tensor(
            [[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
        output = model(input_ids)[0]
        expected_shape = torch.Size((1, 11, 50265))
        self.assertEqual(output.shape, expected_shape)
        # compare the actual values for a slice.
        expected_slice = torch.Tensor([[[33.8843, -4.3107, 22.7779],
                                        [4.6533, -2.8099, 13.6252],
                                        [1.8222, -3.6898, 8.8600]]])
        self.assertTrue(
            torch.allclose(output[:, :3, :3], expected_slice, atol=1e-3))
Ejemplo n.º 3
0
    def __init__(self, args):
        super().__init__()

        if args.hfroberta_model_dir is not None:
            # load bert model from file
            roberta_model_name = str(args.hfroberta_model_dir) + "/"
            dict_file = roberta_model_name
            print("loading huggingface RoBERTa model from {}".format(roberta_model_name))
        else:
            # load RoBERTa model from huggingface cache
            roberta_model_name = args.hfroberta_model_name
            dict_file = roberta_model_name

        # When using a cased model, make sure to pass do_lower_case=False directly to BaseTokenizer
        do_lower_case = False
        if 'uncased' in roberta_model_name:
            do_lower_case=True

        # Load pre-trained model tokenizer (vocabulary)
        self.tokenizer = RobertaTokenizer.from_pretrained(dict_file)

        # original vocab

        # The following process is baded on gpt_connector.

        # RoBERTa also uses BPE. the bytes_to_unicode function takes all control
        # and whitespace characters in code points 0-255 and shifts them up
        # by 256 to make them printable. So space (code point 32) becomes Ġ (code point 288).
        # (copied from https://github.com/openai/gpt-2/issues/80#issuecomment-487202159).
        #
        # Other control characters will be removed during voca_intersection process.
        def convert_word(word):
            if word == ROBERTA_UNK:
                return word
            if word == ROBERTA_MASK:
                return word
            if word == ROBERTA_START_SENTENCE:
                return word
            if word == ROBERTA_END_SENTENCE:
                return word
            if word == ROBERTA_PAD:
                return word

            if word.startswith('Ġ'):  # the token starts with a whitespace
                return word[1:]
            
            return f'_{word}_'  # the token not start with a white space.
                                # may be not a head of a word,
                                # or may be a head of a sentence.

            # need duplitation check?

        _, gpt_vocab = zip(*sorted(self.tokenizer.decoder.items()))
        self.vocab = [convert_word(word) for word in gpt_vocab]
        self._init_inverse_vocab()

        # Get UNK symbol as it's written in the origin RoBERTa vocab.
        unk_index = self.inverse_vocab[ROBERTA_UNK]  # OPENAI_UNK
        self.unk_symbol = self.tokenizer.decoder[unk_index]

        # Get MASK symbol as it's written in the origin RoBERTa vocab.
        mask_index = self.inverse_vocab[ROBERTA_MASK]
        self.mask_symbol = self.tokenizer.decoder[mask_index]

        # Load pre-trained model (weights)
        self.masked_roberta_model = RobertaForMaskedLM.from_pretrained(roberta_model_name)
        self.masked_roberta_model.eval()
        #print(self.masked_roberta_model.config)

        # ... to get hidden states
        self.roberta_model = self.masked_roberta_model.roberta

        # Sanity check.
        #assert len(self.vocab) == self.masked_roberta_model.config.vocab_size
        #assert 0 == self.masked_roberta_model.config.n_special

        self.eos_id = self.inverse_vocab[ROBERTA_END_SENTENCE]  # OPENAI_EOS
        self.model_vocab = self.vocab

        self.pad_id = self.inverse_vocab[ROBERTA_PAD]
        self.unk_index = self.inverse_vocab[ROBERTA_UNK]
        self.mask_index = mask_index
Ejemplo n.º 4
0
    new_pronoun_index = pronoun_index + len(tokenized_option)
    tokenized_text.pop(new_pronoun_index)
    return tokenized_text


# Load pre-trained model tokenizer (vocabulary)
tokenizer = RobertaTokenizer.from_pretrained('roberta-large')

correct_preds = 0
correct_preds_enhanced = 0
stability_match = 0

all_preds = 0

# Load pre-trained model (weights)
model = RobertaForMaskedLM.from_pretrained('roberta-large')
model.eval()

for q_index, dp_split in wsc_datapoints.iterrows():
    if dp_split['text_adverb'].replace(
            ' ', '') != '-' and dp_split['text_adverb'].replace(' ', ''):

        # Tokenized input
        correct_answer = dp_split['correct_answer']

        #check for empty
        text = dp_split['text_original'].strip().lower()
        text = re.sub(' +', ' ', text)
        print(text, " text")

        text_enhanced = dp_split['text_adverb'].lower()
Ejemplo n.º 5
0
    parser.add_argument("--model_type",
                        default="bert",
                        choices=["bert", "roberta"])
    parser.add_argument("--model_name", default='bert-base-uncased', type=str)
    parser.add_argument(
        "--dump_checkpoint",
        default='serialization_dir/tf_bert-base-uncased_0247911.pth',
        type=str)
    parser.add_argument("--vocab_transform", action='store_true')
    args = parser.parse_args()

    if args.model_type == 'bert':
        model = BertForMaskedLM.from_pretrained(args.model_name)
        prefix = 'bert'
    elif args.model_type == 'roberta':
        model = RobertaForMaskedLM.from_pretrained(args.model_name)
        prefix = 'roberta'

    state_dict = model.state_dict()
    compressed_sd = {}

    for w in ['word_embeddings', 'position_embeddings']:
        compressed_sd[f'distilbert.embeddings.{w}.weight'] = \
            state_dict[f'{prefix}.embeddings.{w}.weight']
    for w in ['weight', 'bias']:
        compressed_sd[f'distilbert.embeddings.LayerNorm.{w}'] = \
            state_dict[f'{prefix}.embeddings.LayerNorm.{w}']

    std_idx = 0
    for teacher_idx in [0, 2, 4, 7, 9, 11]:
        for w in ['weight', 'bias']:
Ejemplo n.º 6
0
def convert_roberta_checkpoint_to_pytorch(roberta_checkpoint_path,
                                          pytorch_dump_folder_path,
                                          classification_head):
    """
    Copy/paste/tweak roberta's weights to our BERT structure.
    """
    roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)
    roberta.eval()  # disable dropout
    config = BertConfig(
        vocab_size_or_config_json_file=50265,
        hidden_size=roberta.args.encoder_embed_dim,
        num_hidden_layers=roberta.args.encoder_layers,
        num_attention_heads=roberta.args.encoder_attention_heads,
        intermediate_size=roberta.args.encoder_ffn_embed_dim,
        max_position_embeddings=514,
        type_vocab_size=1,
        layer_norm_eps=1e-5,  # PyTorch default used in fairseq
    )
    if classification_head:
        config.num_labels = roberta.args.num_classes
    print("Our BERT config:", config)

    model = RobertaForSequenceClassification(
        config) if classification_head else RobertaForMaskedLM(config)
    model.eval()

    # Now let's copy all the weights.
    # Embeddings
    roberta_sent_encoder = roberta.model.decoder.sentence_encoder
    model.roberta.embeddings.word_embeddings.weight = roberta_sent_encoder.embed_tokens.weight
    model.roberta.embeddings.position_embeddings.weight = roberta_sent_encoder.embed_positions.weight
    model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like(
        model.roberta.embeddings.token_type_embeddings.weight
    )  # just zero them out b/c RoBERTa doesn't use them.
    model.roberta.embeddings.LayerNorm.weight = roberta_sent_encoder.emb_layer_norm.weight
    model.roberta.embeddings.LayerNorm.bias = roberta_sent_encoder.emb_layer_norm.bias

    for i in range(config.num_hidden_layers):
        # Encoder: start of layer
        layer: BertLayer = model.roberta.encoder.layer[i]
        roberta_layer: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[
            i]

        ### self attention
        self_attn: BertSelfAttention = layer.attention.self
        assert (roberta_layer.self_attn.in_proj_weight.shape == torch.Size(
            (3 * config.hidden_size, config.hidden_size)))
        # we use three distinct linear layers so we split the source layer here.
        self_attn.query.weight.data = roberta_layer.self_attn.in_proj_weight[:
                                                                             config
                                                                             .
                                                                             hidden_size, :]
        self_attn.query.bias.data = roberta_layer.self_attn.in_proj_bias[:
                                                                         config
                                                                         .
                                                                         hidden_size]
        self_attn.key.weight.data = roberta_layer.self_attn.in_proj_weight[
            config.hidden_size:2 * config.hidden_size, :]
        self_attn.key.bias.data = roberta_layer.self_attn.in_proj_bias[
            config.hidden_size:2 * config.hidden_size]
        self_attn.value.weight.data = roberta_layer.self_attn.in_proj_weight[
            2 * config.hidden_size:, :]
        self_attn.value.bias.data = roberta_layer.self_attn.in_proj_bias[
            2 * config.hidden_size:]

        ### self-attention output
        self_output: BertSelfOutput = layer.attention.output
        assert (self_output.dense.weight.shape ==
                roberta_layer.self_attn.out_proj.weight.shape)
        self_output.dense.weight = roberta_layer.self_attn.out_proj.weight
        self_output.dense.bias = roberta_layer.self_attn.out_proj.bias
        self_output.LayerNorm.weight = roberta_layer.self_attn_layer_norm.weight
        self_output.LayerNorm.bias = roberta_layer.self_attn_layer_norm.bias

        ### intermediate
        intermediate: BertIntermediate = layer.intermediate
        assert (
            intermediate.dense.weight.shape == roberta_layer.fc1.weight.shape)
        intermediate.dense.weight = roberta_layer.fc1.weight
        intermediate.dense.bias = roberta_layer.fc1.bias

        ### output
        bert_output: BertOutput = layer.output
        assert (
            bert_output.dense.weight.shape == roberta_layer.fc2.weight.shape)
        bert_output.dense.weight = roberta_layer.fc2.weight
        bert_output.dense.bias = roberta_layer.fc2.bias
        bert_output.LayerNorm.weight = roberta_layer.final_layer_norm.weight
        bert_output.LayerNorm.bias = roberta_layer.final_layer_norm.bias
        #### end of layer

    if classification_head:
        model.classifier.dense.weight = roberta.model.classification_heads[
            'mnli'].dense.weight
        model.classifier.dense.bias = roberta.model.classification_heads[
            'mnli'].dense.bias
        model.classifier.out_proj.weight = roberta.model.classification_heads[
            'mnli'].out_proj.weight
        model.classifier.out_proj.bias = roberta.model.classification_heads[
            'mnli'].out_proj.bias
    else:
        # LM Head
        model.lm_head.dense.weight = roberta.model.decoder.lm_head.dense.weight
        model.lm_head.dense.bias = roberta.model.decoder.lm_head.dense.bias
        model.lm_head.layer_norm.weight = roberta.model.decoder.lm_head.layer_norm.weight
        model.lm_head.layer_norm.bias = roberta.model.decoder.lm_head.layer_norm.bias
        model.lm_head.decoder.weight = roberta.model.decoder.lm_head.weight
        model.lm_head.bias = roberta.model.decoder.lm_head.bias

    # Let's check that we get the same results.
    input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(
        0)  # batch of size 1

    our_output = model(input_ids)[0]
    if classification_head:
        their_output = roberta.model.classification_heads['mnli'](
            roberta.extract_features(input_ids))
    else:
        their_output = roberta.model(input_ids)[0]
    print(our_output.shape, their_output.shape)
    max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
    print(f"max_absolute_diff = {max_absolute_diff}")  # ~ 1e-7
    success = torch.allclose(our_output, their_output, atol=1e-3)
    print("Do both models output the same tensors?",
          "🔥" if success else "💩")
    if not success:
        raise Exception("Something went wRoNg")

    print(f"Saving model to {pytorch_dump_folder_path}")
    model.save_pretrained(pytorch_dump_folder_path)
Ejemplo n.º 7
0
def main():
    parser = argparse.ArgumentParser(description="Training")

    parser.add_argument(
        "--dump_path",
        type=str,
        required=True,
        help="The output directory (log, checkpoints, parameters, etc.)")
    parser.add_argument(
        "--data_file",
        type=str,
        required=True,
        help=
        "The binarized file (tokenized + tokens_to_ids) and grouped by sequence."
    )
    parser.add_argument("--token_counts",
                        type=str,
                        required=True,
                        help="The token counts in the data_file for MLM.")
    parser.add_argument("--force",
                        action='store_true',
                        help="Overwrite dump_path if it already exists.")

    parser.add_argument("--vocab_size",
                        default=30522,
                        type=int,
                        help="The vocabulary size.")
    parser.add_argument(
        "--max_position_embeddings",
        default=512,
        type=int,
        help="Maximum sequence length we can model (including [CLS] and [SEP])."
    )
    parser.add_argument(
        "--sinusoidal_pos_embds",
        action='store_false',
        help=
        "If true, the position embeddings are simply fixed with sinusoidal embeddings."
    )
    parser.add_argument("--n_layers",
                        default=6,
                        type=int,
                        help="Number of Transformer blocks.")
    parser.add_argument("--n_heads",
                        default=12,
                        type=int,
                        help="Number of heads in the self-attention module.")
    parser.add_argument(
        "--dim",
        default=768,
        type=int,
        help="Dimension through the network. Must be divisible by n_heads")
    parser.add_argument("--hidden_dim",
                        default=3072,
                        type=int,
                        help="Intermediate dimension in the FFN.")
    parser.add_argument("--dropout", default=0.1, type=float, help="Dropout.")
    parser.add_argument("--attention_dropout",
                        default=0.1,
                        type=float,
                        help="Dropout in self-attention.")
    parser.add_argument("--activation",
                        default='gelu',
                        type=str,
                        help="Activation to use in self-attention")
    parser.add_argument(
        "--tie_weights_",
        action='store_false',
        help=
        "If true, we tie the embeddings matrix with the projection over the vocabulary matrix. Default is true."
    )

    parser.add_argument("--from_pretrained_weights",
                        default=None,
                        type=str,
                        help="Load student initialization checkpoint.")
    parser.add_argument(
        "--from_pretrained_config",
        default=None,
        type=str,
        help="Load student initialization architecture config.")
    parser.add_argument("--teacher_type",
                        default="bert",
                        choices=["bert", "roberta"],
                        help="Teacher type (BERT, RoBERTa).")
    parser.add_argument("--teacher_name",
                        default="bert-base-uncased",
                        type=str,
                        help="The teacher model.")

    parser.add_argument("--temperature",
                        default=2.,
                        type=float,
                        help="Temperature for the softmax temperature.")
    parser.add_argument(
        "--alpha_ce",
        default=0.5,
        type=float,
        help="Linear weight for the distillation loss. Must be >=0.")
    parser.add_argument("--alpha_mlm",
                        default=0.5,
                        type=float,
                        help="Linear weight for the MLM loss. Must be >=0.")
    parser.add_argument("--alpha_mse",
                        default=0.0,
                        type=float,
                        help="Linear weight of the MSE loss. Must be >=0.")
    parser.add_argument(
        "--alpha_cos",
        default=0.0,
        type=float,
        help="Linear weight of the cosine embedding loss. Must be >=0.")
    parser.add_argument(
        "--mlm_mask_prop",
        default=0.15,
        type=float,
        help="Proportion of tokens for which we need to make a prediction.")
    parser.add_argument("--word_mask",
                        default=0.8,
                        type=float,
                        help="Proportion of tokens to mask out.")
    parser.add_argument("--word_keep",
                        default=0.1,
                        type=float,
                        help="Proportion of tokens to keep.")
    parser.add_argument("--word_rand",
                        default=0.1,
                        type=float,
                        help="Proportion of tokens to randomly replace.")
    parser.add_argument(
        "--mlm_smoothing",
        default=0.7,
        type=float,
        help=
        "Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)."
    )
    parser.add_argument(
        "--restrict_ce_to_mask",
        action='store_true',
        help=
        "If true, compute the distilation loss only the [MLM] prediction distribution."
    )

    parser.add_argument("--n_epoch",
                        type=int,
                        default=3,
                        help="Number of pass on the whole dataset.")
    parser.add_argument("--batch_size",
                        type=int,
                        default=5,
                        help="Batch size (for each process).")
    parser.add_argument(
        "--tokens_per_batch",
        type=int,
        default=-1,
        help=
        "If specified, modify the batches so that they have approximately this number of tokens."
    )
    parser.add_argument(
        "--shuffle",
        action='store_false',
        help="If true, shuffle the sequence order. Default is true.")
    parser.add_argument(
        "--group_by_size",
        action='store_false',
        help=
        "If true, group sequences that have similar length into the same batch. Default is true."
    )

    parser.add_argument(
        "--gradient_accumulation_steps",
        type=int,
        default=50,
        help="Gradient accumulation for larger training batches.")
    parser.add_argument("--warmup_prop",
                        default=0.05,
                        type=float,
                        help="Linear warmup proportion.")
    parser.add_argument("--weight_decay",
                        default=0.0,
                        type=float,
                        help="Weight deay if we apply some.")
    parser.add_argument("--learning_rate",
                        default=5e-4,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--adam_epsilon",
                        default=1e-6,
                        type=float,
                        help="Epsilon for Adam optimizer.")
    parser.add_argument("--max_grad_norm",
                        default=5.0,
                        type=float,
                        help="Max gradient norm.")
    parser.add_argument("--initializer_range",
                        default=0.02,
                        type=float,
                        help="Random initialization range.")

    parser.add_argument(
        '--fp16',
        action='store_true',
        help=
        "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit"
    )
    parser.add_argument(
        '--fp16_opt_level',
        type=str,
        default='O1',
        help=
        "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
        "See details at https://nvidia.github.io/apex/amp.html")
    parser.add_argument("--n_gpu",
                        type=int,
                        default=1,
                        help="Number of GPUs in the node.")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="Distributed training - Local rank")
    parser.add_argument("--seed", type=int, default=56, help="Random seed")

    parser.add_argument("--log_interval",
                        type=int,
                        default=500,
                        help="Tensorboard logging interval.")
    parser.add_argument("--checkpoint_interval",
                        type=int,
                        default=4000,
                        help="Checkpoint interval.")
    args = parser.parse_args()

    ## ARGS ##
    init_gpu_params(args)
    set_seed(args)
    if args.is_master:
        if os.path.exists(args.dump_path):
            if not args.force:
                raise ValueError(
                    f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite it'
                    'Use `--force` if you want to overwrite it')
            else:
                shutil.rmtree(args.dump_path)

        if not os.path.exists(args.dump_path):
            os.makedirs(args.dump_path)
        logger.info(
            f'Experiment will be dumped and logged in {args.dump_path}')

        ### SAVE PARAMS ###
        logger.info(f'Param: {args}')
        with open(os.path.join(args.dump_path, 'parameters.json'), 'w') as f:
            json.dump(vars(args), f, indent=4)
        git_log(args.dump_path)
    assert (args.from_pretrained_weights is None and args.from_pretrained_config is None) or \
           (args.from_pretrained_weights is not None and args.from_pretrained_config is not None)

    ### TOKENIZER ###
    if args.teacher_type == 'bert':
        tokenizer = BertTokenizer.from_pretrained(args.teacher_name)
    elif args.teacher_type == 'roberta':
        tokenizer = RobertaTokenizer.from_pretrained(args.teacher_name)
    special_tok_ids = {}
    for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
        idx = tokenizer.all_special_tokens.index(tok_symbol)
        special_tok_ids[tok_name] = tokenizer.all_special_ids[idx]
    logger.info(f'Special tokens {special_tok_ids}')
    args.special_tok_ids = special_tok_ids

    ## DATA LOADER ##
    logger.info(f'Loading data from {args.data_file}')
    with open(args.data_file, 'rb') as fp:
        data = pickle.load(fp)

    assert os.path.isfile(args.token_counts)
    logger.info(
        f'Loading token counts from {args.token_counts} (already pre-computed)'
    )
    with open(args.token_counts, 'rb') as fp:
        counts = pickle.load(fp)
        assert len(counts) == args.vocab_size
    token_probs = np.maximum(counts, 1)**-args.mlm_smoothing
    for idx in special_tok_ids.values():
        token_probs[idx] = 0.  # do not predict special tokens
    token_probs = torch.from_numpy(token_probs)

    train_dataloader = Dataset(params=args, data=data)
    logger.info(f'Data loader created.')

    ## STUDENT ##
    if args.from_pretrained_weights is not None:
        assert os.path.isfile(args.from_pretrained_weights)
        assert os.path.isfile(args.from_pretrained_config)
        logger.info(
            f'Loading pretrained weights from {args.from_pretrained_weights}')
        logger.info(
            f'Loading pretrained config from {args.from_pretrained_config}')
        stu_architecture_config = DistilBertConfig.from_json_file(
            args.from_pretrained_config)
        stu_architecture_config.output_hidden_states = True
        student = DistilBertForMaskedLM.from_pretrained(
            args.from_pretrained_weights, config=stu_architecture_config)
    else:
        args.vocab_size_or_config_json_file = args.vocab_size
        stu_architecture_config = DistilBertConfig(**vars(args),
                                                   output_hidden_states=True)
        student = DistilBertForMaskedLM(stu_architecture_config)

    if args.n_gpu > 0:
        student.to(f'cuda:{args.local_rank}')
    logger.info(f'Student loaded.')

    ## TEACHER ##
    if args.teacher_type == 'bert':
        teacher = BertForMaskedLM.from_pretrained(args.teacher_name,
                                                  output_hidden_states=True)
    elif args.teacher_type == 'roberta':
        teacher = RobertaForMaskedLM.from_pretrained(args.teacher_name,
                                                     output_hidden_states=True)
    if args.n_gpu > 0:
        teacher.to(f'cuda:{args.local_rank}')
    logger.info(f'Teacher loaded from {args.teacher_name}.')

    ## DISTILLER ##
    torch.cuda.empty_cache()
    distiller = Distiller(params=args,
                          dataloader=train_dataloader,
                          token_probs=token_probs,
                          student=student,
                          teacher=teacher)
    distiller.train()
    logger.info("Let's go get some drinks.")
Ejemplo n.º 8
0
from pytorch_transformers import RobertaModel, RobertaForMaskedLM, RobertaTokenizer

if __name__ == '__main__':
    _ = RobertaForMaskedLM.from_pretrained('roberta-large',
                                           cache_dir='./hugfacecache')
    _ = RobertaModel.from_pretrained('roberta-large',
                                     cache_dir='./hugfacecache')
    _ = RobertaTokenizer.from_pretrained('roberta-large',
                                         cache_dir='./hugfacecache')

    # _ = BertForMaskedLM.from_pretrained('bert-large-cased', cache_dir='./hugfacecache')
    # _ = BertModel.from_pretrained('bert-large-cased', cache_dir='./hugfacecache')
    # _ = BertTokenizer.from_pretrained('bert-large-cased', cache_dir='./hugfacecache')
    #
    # _ = XLNetLMHeadModel.from_pretrained('xlnet-large-cased', cache_dir='./hugfacecache')
    # _ = XLNetModel.from_pretrained('xlnet-large-cased', cache_dir='./hugfacecache')
    # _ = XLNetTokenizer.from_pretrained('xlnet-large-cased', cache_dir='./hugfacecache')