Пример #1
0
def load_and_cache_examples(args,
                            tokenizer,
                            processor,
                            label_list,
                            mode="train"):
    if args.local_rank not in [-1, 0]:
        # Make sure only the first process in distributed training process the dataset,
        # and the others will use the cache
        torch.distributed.barrier()

    examples = processor.get_examples(args.data_dir, mode)
    features = convert_examples_to_features(
        examples,
        label_list,
        args.max_seq_length,
        tokenizer,
        args.output_mode,
        cls_token_at_end=bool(args.model_type in ['xlnet']),
        # xlnet has a cls token at the end
        cls_token=tokenizer.cls_token,
        cls_token_segment_id=2 if args.model_type in ['xlnet'] else 0,
        sep_token=tokenizer.sep_token,
        sep_token_extra=bool(args.model_type in ['roberta']),
        # roberta uses an extra separator b/w pairs of sentences,
        # cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
        pad_on_left=bool(args.model_type in ['xlnet']),
        # pad on the left for xlnet
        pad_token=tokenizer.encoder[tokenizer.pad_token] if args.model_type
        in ['roberta'] else tokenizer.vocab[tokenizer.pad_token],
        pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0,
    )

    if args.local_rank == 0:
        # Make sure only the first process in distributed training process the dataset,
        # and the others will use the cache
        torch.distributed.barrier()

    # Convert to Tensors and build dataset
    all_input_ids = torch.tensor([f.input_ids for f in features],
                                 dtype=torch.long)
    all_entity1 = torch.tensor([f.e1_pos for f in features], dtype=torch.long)
    all_entity2 = torch.tensor([f.e2_pos for f in features], dtype=torch.long)
    all_input_mask = torch.tensor([f.input_mask for f in features],
                                  dtype=torch.long)
    all_segment_ids = torch.tensor([f.segment_ids for f in features],
                                   dtype=torch.long)
    if args.output_mode == "classification":
        all_label_ids = torch.tensor([f.label_id for f in features],
                                     dtype=torch.long)
    elif args.output_mode == "regression":
        all_label_ids = torch.tensor([f.label_id for f in features],
                                     dtype=torch.float)

    dataset = TensorDataset(all_input_ids, all_entity1, all_entity2,
                            all_input_mask, all_segment_ids, all_label_ids)
    return dataset
Пример #2
0
def load_and_cache_examples(mode, train_batch_size, eval_batch_size):
    '''
    :param mode: "train","dev"
    :return:
    '''
    """构造迭代器"""
    processor, tokenizer = init_params()

    if args.local_rank not in [-1, 0]:
        torch.distributed.barrier(
        )  # Make sure only the first process in distributed training process the dataset, and the others will use the cache

    if mode == "train":
        examples = processor.get_train_examples(args.data_dir)
        #t_total
        #num_train_steps = int(
        #    len(examples) / train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)

        batch_size = train_batch_size

        #logger.info("  Num steps = %d", num_train_steps)
    elif mode == 'dev':
        examples = processor.get_dev_examples(args.data_dir)
        batch_size = eval_batch_size
    else:
        raise ValueError("Invalid mode %s" % mode)

    #label_list = processor.get_labels()
    output_mode = "classification"
    #特征
    try:
        if mode == "train":
            with open(os.path.join(args.data_dir, args.TRAIN_US_FEATURE_FILE),
                      'rb') as f:  #TRAIN_FEATURE_FILE
                features = pickle.load(f)
        else:
            with open(os.path.join(args.data_dir, args.DEV_US_FEATURE_FILE),
                      'rb') as f:  #DEV_FEATURE_FILE
                features = pickle.load(f)
    except:
        features = convert_examples_to_features(
            examples,
            args.max_seq_length,
            args.split_num,
            tokenizer,
            mode=mode,
            cls_token_at_end=bool(args.model_type in ['xlnet']
                                  ),  # xlnet has a cls token at the end
            cls_token=tokenizer.cls_token,
            sep_token=tokenizer.sep_token,
            cls_token_segment_id=2 if args.model_type in ['xlnet'] else 0,
            pad_on_left=bool(
                args.model_type in ['xlnet']),  # pad on the left for xlnet
            pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0)

    logger.info("  Num examples = %d", len(examples))
    logger.info("  Batch size = %d", batch_size)

    # Convert to Tensors and build dataset
    all_input_ids = torch.tensor(select_field(features, 'input_ids'),
                                 dtype=torch.long)
    all_input_mask = torch.tensor(select_field(features, 'input_mask'),
                                  dtype=torch.long)
    all_segment_ids = torch.tensor(select_field(features, 'segment_ids'),
                                   dtype=torch.long)
    #all_label = torch.tensor([f.label for f in features], dtype=torch.long)

    # all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
    # all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
    # all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
    if output_mode == "classification":
        all_labels = torch.tensor([int(f.label) for f in features],
                                  dtype=torch.long)
    elif output_mode == "regression":
        all_labels = torch.tensor([int(f.label) for f in features],
                                  dtype=torch.float)

    #数据集
    dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
                            all_labels)

    if mode == "train":
        sampler = RandomSampler(dataset)  #作用近似shuffle
    elif mode == 'dev':
        sampler = SequentialSampler(dataset)
    else:
        raise ValueError("Invalid mode %s" % mode)

    # 迭代器
    iterator = DataLoader(dataset, sampler=sampler, batch_size=batch_size)
    return iterator
Пример #3
0
    def train(global_step, epoch):
        train_features = convert_examples_to_features(train_examples,
                                                      label_list,
                                                      args.max_seq_length,
                                                      tokenizer)
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_steps)
        all_input_ids = torch.tensor([f.input_ids for f in train_features],
                                     dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in train_features],
                                      dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in train_features],
                                       dtype=torch.long)
        all_label_ids = torch.tensor([f.label_id for f in train_features],
                                     dtype=torch.long)
        train_data = TensorDataset(all_input_ids, all_input_mask,
                                   all_segment_ids, all_label_ids)

        if args.local_rank == -1:
            train_sampler = RandomSampler(train_data)
        else:
            train_sampler = DistributedSampler(train_data)
        train_dataloader = DataLoader(train_data,
                                      sampler=train_sampler,
                                      batch_size=args.train_batch_size)
        model.train()
        total_loss, total_accuracy = 0, 0
        nb_tr_examples, nb_tr_steps = 0, 0
        for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
            batch = tuple(t.to(device) for t in batch)
            input_ids, input_mask, segment_ids, label_ids = batch

            # 加上 label_ids 输出结果是 loss
            # loss = model(input_ids, segment_ids, input_mask, label_ids)
            out, loss = model(input_ids, segment_ids, input_mask,
                              label_ids)  # [batch, 1]
            logits = (out > args.threshold).type(torch.LongTensor)
            logits = logits.detach().cpu().numpy()
            label_ids = label_ids.to('cpu').numpy()
            tmp_train_accuracy = compute_accuracy(logits, label_ids)
            # print(tmp_train_accuracy)

            total_loss += loss.mean().item()
            total_accuracy += tmp_train_accuracy

            if n_gpu > 1:
                loss = loss.mean()  # mean() to average on multi-gpu.
            if args.gradient_accumulation_steps > 1:
                loss = loss / args.gradient_accumulation_steps

            if args.fp16:
                optimizer.backward(loss)
            else:
                loss.backward()

            # tr_loss += loss.item()
            # nb_tr_examples += input_ids.size(0)
            nb_tr_steps += 1

            if (step + 1) % args.gradient_accumulation_steps == 0:
                # modify learning rate with special warm up BERT uses
                lr_this_step = args.learning_rate * warmup_linear(
                    global_step / (t_total), args.warmup_proportion)
                for param_group in optimizer.param_groups:
                    param_group['lr'] = lr_this_step
                optimizer.step()
                optimizer.zero_grad()
                global_step += 1

            args.log_interval = 300
            if (step + 1) % args.log_interval == 0:
                # print(out)
                # print(pred)
                # print(label)
                logger.info("|----epoch {}, eclipse {}/{}, lr {:.4f},"
                            "loss {:.4f}, acc {:.4f}".format(
                                epoch, step + 1, len(train_dataloader),
                                lr_this_step, total_loss / args.log_interval,
                                total_accuracy / args.log_interval))
                total_loss, total_accuracy = 0.0, 0.0
Пример #4
0
def main(debug=True):
    # device
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    # distribution training
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    logger.info(
        "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".
        format(device, n_gpu, bool(args.local_rank != -1), args.fp16))

    # gradient accumulation
    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    args.train_batch_size = int(args.train_batch_size /
                                args.gradient_accumulation_steps)

    # use the same random seed
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)
    if not args.do_train and not args.do_eval:
        raise ValueError(
            "At least one of `do_train` or `do_eval` must be True.")

    # output dir is need
    #if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
    #    raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
    #os.makedirs(args.output_dir, exist_ok=True)

    # data processor
    processor = QuoraProcessor()
    # processor = MrpcProcessor()
    num_labels = 2
    label_list = processor.get_labels()

    # BertTokenizer
    # tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
    tokenizer = BertTokenizer.from_pretrained(
        "./pre_trained_models/bert-base-uncased-vocab.txt")
    train_examples = None
    num_train_steps = None
    if args.do_train:
        train_examples = processor.get_train_examples(args.data_dir,
                                                      debug=debug,
                                                      debug_length=100)
        num_train_steps = int(
            len(train_examples) / args.train_batch_size /
            args.gradient_accumulation_steps * args.num_train_epochs)
    print(len(train_examples))

    model = Bce_model()

    if args.fp16:
        model.half()
    model.to(device)

    # distributed parallel
    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
            )

        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    # print(param_optimizer)

    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']

    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]

    t_total = num_train_steps

    if args.fp16:
        try:
            from apex.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
            )

        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.learning_rate,
                              bias_correction=False,
                              max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer,
                                       static_loss_scale=args.loss_scale)

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=t_total)

    def train(global_step, epoch):
        train_features = convert_examples_to_features(train_examples,
                                                      label_list,
                                                      args.max_seq_length,
                                                      tokenizer)
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_steps)
        all_input_ids = torch.tensor([f.input_ids for f in train_features],
                                     dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in train_features],
                                      dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in train_features],
                                       dtype=torch.long)
        all_label_ids = torch.tensor([f.label_id for f in train_features],
                                     dtype=torch.long)
        train_data = TensorDataset(all_input_ids, all_input_mask,
                                   all_segment_ids, all_label_ids)

        if args.local_rank == -1:
            train_sampler = RandomSampler(train_data)
        else:
            train_sampler = DistributedSampler(train_data)
        train_dataloader = DataLoader(train_data,
                                      sampler=train_sampler,
                                      batch_size=args.train_batch_size)
        model.train()
        total_loss, total_accuracy = 0, 0
        nb_tr_examples, nb_tr_steps = 0, 0
        for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
            batch = tuple(t.to(device) for t in batch)
            input_ids, input_mask, segment_ids, label_ids = batch

            # 加上 label_ids 输出结果是 loss
            # loss = model(input_ids, segment_ids, input_mask, label_ids)
            out, loss = model(input_ids, segment_ids, input_mask,
                              label_ids)  # [batch, 1]
            logits = (out > args.threshold).type(torch.LongTensor)
            logits = logits.detach().cpu().numpy()
            label_ids = label_ids.to('cpu').numpy()
            tmp_train_accuracy = compute_accuracy(logits, label_ids)
            # print(tmp_train_accuracy)

            total_loss += loss.mean().item()
            total_accuracy += tmp_train_accuracy

            if n_gpu > 1:
                loss = loss.mean()  # mean() to average on multi-gpu.
            if args.gradient_accumulation_steps > 1:
                loss = loss / args.gradient_accumulation_steps

            if args.fp16:
                optimizer.backward(loss)
            else:
                loss.backward()

            # tr_loss += loss.item()
            # nb_tr_examples += input_ids.size(0)
            nb_tr_steps += 1

            if (step + 1) % args.gradient_accumulation_steps == 0:
                # modify learning rate with special warm up BERT uses
                lr_this_step = args.learning_rate * warmup_linear(
                    global_step / (t_total), args.warmup_proportion)
                for param_group in optimizer.param_groups:
                    param_group['lr'] = lr_this_step
                optimizer.step()
                optimizer.zero_grad()
                global_step += 1

            args.log_interval = 300
            if (step + 1) % args.log_interval == 0:
                # print(out)
                # print(pred)
                # print(label)
                logger.info("|----epoch {}, eclipse {}/{}, lr {:.4f},"
                            "loss {:.4f}, acc {:.4f}".format(
                                epoch, step + 1, len(train_dataloader),
                                lr_this_step, total_loss / args.log_interval,
                                total_accuracy / args.log_interval))
                total_loss, total_accuracy = 0.0, 0.0

    if args.do_eval and (args.local_rank == -1
                         or torch.distributed.get_rank() == 0):
        # get dev examples anf dev features
        eval_examples = processor.get_dev_examples(args.data_dir,
                                                   debug=debug,
                                                   debug_length=16)
        eval_features = convert_examples_to_features(eval_examples, label_list,
                                                     args.max_seq_length,
                                                     tokenizer)
        logger.info("***** Running evaluation *****")
        logger.info("  Num examples = %d", len(eval_examples))
        logger.info("  Batch size = %d", args.eval_batch_size)
        all_input_ids = torch.tensor([f.input_ids for f in eval_features],
                                     dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in eval_features],
                                      dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in eval_features],
                                       dtype=torch.long)
        all_label_ids = torch.tensor([f.label_id for f in eval_features],
                                     dtype=torch.long)
        eval_data = TensorDataset(all_input_ids, all_input_mask,
                                  all_segment_ids, all_label_ids)
        # Run prediction for full data
        eval_sampler = SequentialSampler(eval_data)
        eval_dataloader = DataLoader(eval_data,
                                     sampler=eval_sampler,
                                     batch_size=args.eval_batch_size)

    def eval():
        model.eval()
        eval_loss, eval_accuracy = 0, 0
        nb_eval_steps, nb_eval_examples = 0, 0
        out_all = []
        labels_all = []
        for i, (input_ids, input_mask, segment_ids,
                label_ids) in enumerate(eval_dataloader):
            input_ids = input_ids.to(device)
            input_mask = input_mask.to(device)
            segment_ids = segment_ids.to(device)
            label_ids = label_ids.to(device)
            with torch.no_grad():
                out, tmp_eval_loss = model(input_ids, segment_ids, input_mask,
                                           label_ids)
            eval_loss += tmp_eval_loss.mean().item()
            nb_eval_examples += input_ids.size(0)
            nb_eval_steps += 1
            # print(out.shape)
            out_all.append(out)
            labels_all.append(label_ids)

        eval_loss = eval_loss / nb_eval_steps
        model.train()
        return eval_loss, out_all, labels_all

    if args.do_train and args.do_eval:
        for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
            global_step = 0
            train(global_step, epoch)
            best_acc, best_f1 = 0.0, 0.0
            if args.do_eval and (args.local_rank == -1
                                 or torch.distributed.get_rank() == 0):
                eval_loss, out_all, labels_all = eval()
                out_all = torch.cat(out_all, dim=0)
                labels_all = torch.cat(labels_all, dim=0)
                labels_eval = labels_all.cpu().numpy()
                for threshold in np.linspace(0.2, 0.6, 40):
                    logits_eval = (out_all > threshold).type(torch.LongTensor)
                    logits_eval = logits_eval.detach().cpu().numpy()
                    eval_accuracy = compute_accuracy(logits_eval, labels_eval)
                    eval_f1, eval_precision, eval_recall = compute_f1_precision_recall(
                        logits_eval, labels_eval)
                    if eval_f1 > best_f1:
                        best_f1 = eval_f1
                        # Save a trained model
                        model_to_save = model.module if hasattr(
                            model,
                            'module') else model  # Only save the model it-self
                        output_model_file = os.path.join(
                            args.output_dir, "pytorch_model.bin")
                        torch.save(model_to_save.state_dict(),
                                   output_model_file)
                    logger.info(
                        'epcoh {:d}, threshold {:.4f}, accuracy {:.4f}, precision {:.4f}, recall {:.4f}, f1 {:.4f}, best_f1 {:.4f}'
                        .format(epoch, threshold, eval_accuracy,
                                eval_precision, eval_recall, eval_f1, best_f1))

    if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank()
                         == 0) and not args.do_train:
        # Load a trained model that you have fine-tuned
        model_state_dict = torch.load(output_model_file)
        model = BertForSequenceClassification.from_pretrained(
            args.bert_model, state_dict=model_state_dict)
        model.to(device)
        eval_loss, out_all, labels_all = eval()
Пример #5
0
    def eval():
        eval_examples = processor.get_dev_examples(args.data_dir,
                                                   debug=debug,
                                                   debug_length=100)
        eval_features = convert_examples_to_features(eval_examples, label_list,
                                                     args.max_seq_length,
                                                     tokenizer)
        logger.info("***** Running evaluation *****")
        logger.info("  Num examples = %d", len(eval_examples))
        logger.info("  Batch size = %d", args.eval_batch_size)
        all_input_ids = torch.tensor([f.input_ids for f in eval_features],
                                     dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in eval_features],
                                      dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in eval_features],
                                       dtype=torch.long)
        all_label_ids = torch.tensor([f.label_id for f in eval_features],
                                     dtype=torch.long)
        eval_data = TensorDataset(all_input_ids, all_input_mask,
                                  all_segment_ids, all_label_ids)
        # Run prediction for full data
        eval_sampler = SequentialSampler(eval_data)
        eval_dataloader = DataLoader(eval_data,
                                     sampler=eval_sampler,
                                     batch_size=args.eval_batch_size)

        model.eval()
        eval_loss, eval_accuracy = 0, 0
        nb_eval_steps, nb_eval_examples = 0, 0
        logits_all = []
        labels_all = []
        for i, (input_ids, input_mask, segment_ids,
                label_ids) in enumerate(eval_dataloader):
            input_ids = input_ids.to(device)
            input_mask = input_mask.to(device)
            segment_ids = segment_ids.to(device)
            label_ids = label_ids.to(device)

            with torch.no_grad():
                # 加上 label_ids 输出是 loss, 不加 label_ids 输出是 logits, [batch, 2]
                tmp_eval_loss = model(input_ids, segment_ids, input_mask,
                                      label_ids)
                logits = model(input_ids, segment_ids, input_mask)
                # print(logits)

            logits = logits.detach().cpu().numpy()
            label_ids = label_ids.to('cpu').numpy()
            tmp_eval_accuracy = compute_accuracy(logits, label_ids)

            eval_loss += tmp_eval_loss.mean().item()
            eval_accuracy += tmp_eval_accuracy

            nb_eval_examples += input_ids.size(0)
            nb_eval_steps += 1
            logits_all.append(logits)
            labels_all.append(label_ids)

        eval_loss = eval_loss / nb_eval_steps
        eval_accuracy = eval_accuracy / nb_eval_examples
        # return eval_loss, eval_accuracy
        logits_eval = np.concatenate(logits_all, axis=0)
        labels_eval = np.concatenate(labels_all, axis=0)
        # print(logits_eval)
        # print(labels_eval)
        eval_f1, eval_precision, eval_recall = compute_f1_precision_recall(
            logits_eval, labels_eval)
        model.train()
        return eval_loss, eval_accuracy, eval_f1, eval_precision, eval_recall
Пример #6
0
def preprocess():
    '''
    针对测试集数据预测
    :return:
    '''
    model = load_model(os.path.join(args.ROOT_DIR, args.output_dir),
                       args.model_type)

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        torch.distributed.init_process_group(backend='nccl')
        n_gpu = 1

    model.to(device)

    processors = {"sentiment_analysis": SentiAnalysisProcessor}
    task_name = args.task_name.lower()
    processor = processors[task_name]()
    examples = processor.get_test_examples(args.data_dir)

    tokenizer = XLNetTokenizer.from_pretrained(
        os.path.join(args.ROOT_DIR, args.xlnet_model),
        do_lower_case=args.do_lower_case)
    mode = 'test'
    try:
        if mode == 'test':
            with open(os.path.join(args.data_dir, args.TEST_FEATURE_FILE),
                      'rb') as f:  # TRAIN_FEATURE_FILE
                features = pickle.load(f)
    except:
        features = convert_examples_to_features(
            examples,
            args.max_seq_length,
            args.split_num,
            tokenizer,
            mode=mode,
            cls_token_at_end=bool(args.model_type in ['xlnet']),
            # xlnet has a cls token at the end
            cls_token=tokenizer.cls_token,
            sep_token=tokenizer.sep_token,
            cls_token_segment_id=2 if args.model_type in ['xlnet'] else 0,
            pad_on_left=bool(
                args.model_type in ['xlnet']),  # pad on the left for xlnet
            pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0)

    all_input_ids = torch.tensor(select_field(features, 'input_ids'),
                                 dtype=torch.long)
    all_input_mask = torch.tensor(select_field(features, 'input_mask'),
                                  dtype=torch.long)
    all_segment_ids = torch.tensor(select_field(features, 'segment_ids'),
                                   dtype=torch.long)

    #数据集
    dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids)
    sampler = SequentialSampler(dataset)  #作用近似shuffle
    dataloader = DataLoader(dataset,
                            sampler=sampler,
                            batch_size=args.per_gpu_train_batch_size *
                            max(1, n_gpu))
    model.eval()
    y_predicts = []
    for input_ids, input_mask, segment_ids in dataloader:
        input_ids = input_ids.to(device)
        input_mask = input_mask.to(device)
        segment_ids = segment_ids.to(device)

        with torch.no_grad():
            logits = model(input_ids=input_ids,
                           token_type_ids=segment_ids,
                           attention_mask=input_mask)[0]
            predicts = model.predict(logits)
        y_predicts.append(torch.from_numpy(predicts))
    eval_predicted = torch.cat(y_predicts, dim=0).cpu().numpy()

    df = pd.read_csv(os.path.join(args.data_dir, args.TEST_CORPUS_FILE))
    df['labels'] = eval_predicted
    df[['id', 'labels']].to_csv('./data/test_final.csv',
                                sep=',',
                                encoding='utf_8_sig',
                                header=True,
                                index=False)