def main(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) # Path parser.add_argument("--output_model_path", default="./models/classifier_model.bin", type=str, help="Path of the output model.") parser.add_argument("--output_lossfig_path", default="./models/loss.png", type=str, help="Path of the output model.") # Model options. parser.add_argument("--batch_size", type=int, default=32, help="Batch size.") parser.add_argument("--seq_length", type=int, default=128, help="Sequence length.") # Optimizer options. parser.add_argument("--learning_rate", type=float, default=2e-5, help="Learning rate.") parser.add_argument("--warmup", type=float, default=0.1, help="Warm up value.") # Training options. parser.add_argument("--dropout", type=float, default=0.5, help="Dropout.") parser.add_argument("--epochs_num", type=int, default=5, help="Number of epochs.") parser.add_argument("--report_steps", type=int, default=100, help="Specific steps to print prompt.") parser.add_argument("--seed", type=int, default=7, help="Random seed.") parser.add_argument("--device", type=str, default='cpu', help="Device use.") args = parser.parse_args() def set_seed(seed=7): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True set_seed(args.seed) # 读取数据 train = pd.read_csv('../data5k/train.tsv', encoding='utf-8', sep='\t') dev = pd.read_csv('../data5k/dev.tsv', encoding='utf-8', sep='\t') test = pd.read_csv('../data5k/test.tsv', encoding='utf-8', sep='\t') # Load bert vocabulary and tokenizer bert_config = BertConfig('bert_model/bert_config.json') BERT_MODEL_PATH = 'bert_model' bert_tokenizer = BertTokenizer.from_pretrained(BERT_MODEL_PATH, cache_dir=None, do_lower_case=False) # 产生输入数据 processor = DataPrecessForSingleSentence(bert_tokenizer=bert_tokenizer) # train dataset seqs, seq_masks, seq_segments = processor.get_input( sentences=train['text_a'].tolist(), max_seq_len=args.seq_length) labels = train['label'].tolist() t_seqs = torch.tensor(seqs, dtype=torch.long) t_seq_masks = torch.tensor(seq_masks, dtype=torch.long) t_seq_segments = torch.tensor(seq_segments, dtype=torch.long) t_labels = torch.tensor(labels, dtype=torch.long) train_data = TensorDataset(t_seqs, t_seq_masks, t_seq_segments, t_labels) train_sampler = RandomSampler(train_data) train_dataloder = DataLoader(dataset=train_data, sampler=train_sampler, batch_size=args.batch_size) # dev dataset seqs, seq_masks, seq_segments = processor.get_input( sentences=dev['text_a'].tolist(), max_seq_len=args.seq_length) labels = dev['label'].tolist() t_seqs = torch.tensor(seqs, dtype=torch.long) t_seq_masks = torch.tensor(seq_masks, dtype=torch.long) t_seq_segments = torch.tensor(seq_segments, dtype=torch.long) t_labels = torch.tensor(labels, dtype=torch.long) dev_data = TensorDataset(t_seqs, t_seq_masks, t_seq_segments, t_labels) dev_sampler = RandomSampler(dev_data) dev_dataloder = DataLoader(dataset=dev_data, sampler=dev_sampler, batch_size=args.batch_size) # test dataset seqs, seq_masks, seq_segments = processor.get_input( sentences=test['text_a'].tolist(), max_seq_len=args.seq_length) labels = test['label'].tolist() t_seqs = torch.tensor(seqs, dtype=torch.long) t_seq_masks = torch.tensor(seq_masks, dtype=torch.long) t_seq_segments = torch.tensor(seq_segments, dtype=torch.long) t_labels = torch.tensor(labels, dtype=torch.long) test_data = TensorDataset(t_seqs, t_seq_masks, t_seq_segments, t_labels) test_sampler = RandomSampler(test_data) test_dataloder = DataLoader(dataset=test_data, sampler=test_sampler, batch_size=args.batch_size) # build classification model model = BertForSequenceClassification(bert_config, 2) # For simplicity, we use DataParallel wrapper to use multiple GPUs. if args.device == 'cpu': device = torch.device("cpu") else: device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if torch.cuda.device_count() > 1: print("{} GPUs are available. Let's use them.".format( torch.cuda.device_count())) model = nn.DataParallel(model) model = model.to(device) # evaluation function def evaluate(args, is_test, metrics='Acc'): if is_test: dataset = test_dataloder instances_num = test.shape[0] print("The number of evaluation instances: ", instances_num) else: dataset = dev_dataloder instances_num = dev.shape[0] print("The number of evaluation instances: ", instances_num) correct = 0 model.eval() # Confusion matrix. confusion = torch.zeros(2, 2, dtype=torch.long) for i, batch_data in enumerate(dataset): batch_data = tuple(t.to(device) for t in batch_data) batch_seqs, batch_seq_masks, batch_seq_segments, batch_labels = batch_data with torch.no_grad(): logits = model(batch_seqs, batch_seq_masks, batch_seq_segments, labels=None) pred = logits.softmax(dim=1).argmax(dim=1) gold = batch_labels for j in range(pred.size()[0]): confusion[pred[j], gold[j]] += 1 correct += torch.sum(pred == gold).item() if is_test: print("Confusion matrix:") print(confusion) print("Report precision, recall, and f1:") for i in range(confusion.size()[0]): p = confusion[i, i].item() / confusion[i, :].sum().item() r = confusion[i, i].item() / confusion[:, i].sum().item() f1 = 2 * p * r / (p + r) if i == 1: label_1_f1 = f1 print("Label {}: {:.3f}, {:.3f}, {:.3f}".format(i, p, r, f1)) print("Acc. (Correct/Total): {:.4f} ({}/{}) ".format( correct / instances_num, correct, instances_num)) if metrics == 'Acc': return correct / instances_num elif metrics == 'f1': return label_1_f1 else: return correct / instances_num # training phase print("Start training.") instances_num = train.shape[0] batch_size = args.batch_size train_steps = int(instances_num * args.epochs_num / batch_size) + 1 print("Batch size: ", batch_size) print("The number of training instances:", instances_num) # 待优化的参数 param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup, t_total=train_steps) # 存储每一个batch的loss all_loss = [] all_acc = [] total_loss = 0.0 result = 0.0 best_result = 0.0 for epoch in range(1, args.epochs_num + 1): model.train() for step, batch_data in enumerate(train_dataloder): batch_data = tuple(t.to(device) for t in batch_data) batch_seqs, batch_seq_masks, batch_seq_segments, batch_labels = batch_data # 对标签进行onehot编码 one_hot = torch.zeros(batch_labels.size(0), 2).long() '''one_hot_batch_labels = one_hot.scatter_( dim=1, index=torch.unsqueeze(batch_labels, dim=1), src=torch.ones(batch_labels.size(0), 2).long()) logits = model( batch_seqs, batch_seq_masks, batch_seq_segments, labels=None) logits = logits.softmax(dim=1) loss_function = CrossEntropyLoss() loss = loss_function(logits, batch_labels)''' loss = model(batch_seqs, batch_seq_masks, batch_seq_segments, batch_labels) loss.backward() total_loss += loss.item() if (step + 1) % 100 == 0: print("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}". format(epoch, step + 1, total_loss / 100)) sys.stdout.flush() total_loss = 0. #print("Epoch id: {}, Training steps: {}, Avg loss: {:.3f}".format(epoch, step+1, loss)) optimizer.step() optimizer.zero_grad() all_loss.append(total_loss) total_loss = 0. print("Start evaluation on dev dataset.") result = evaluate(args, False) all_acc.append(result) if result > best_result: best_result = result torch.save(model, open(args.output_model_path, "wb")) #save_model(model, args.output_model_path) else: continue print("Start evaluation on test dataset.") evaluate(args, True) print('all_loss:', all_loss) print('all_acc:', all_acc) # Evaluation phase. print("Final evaluation on the test dataset.") model.load_state_dict(torch.load(args.output_model_path)) evaluate(args, True) '''
}, { 'params': [ p for n, p in param_optimizer if not pattern.findall(n) and any(nd in n for nd in no_decay) ], 'weight_decay': 0.0 }] train = train_dataset num_train_optimization_steps = int(EPOCHS * len(train) / batch_size / accumulation_steps) optimizer = BertAdam(optimizer_grouped_parameters, lr=lr, warmup=0.05, t_total=num_train_optimization_steps) model = model.train() for epoch in tqdm(range(EPOCHS)): train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True) avg_loss = 0. avg_accuracy = 0. lossf = None optimizer.zero_grad() # Bug fix - thanks to @chinhuic for i, (x_batch, y_batch) in tqdm(enumerate(train_loader)): y_pred = model(x_batch.to(device), attention_mask=(x_batch > 0).to(device), labels=None) y_batch = y_batch.unsqueeze(1) loss = F.binary_cross_entropy_with_logits(y_pred, y_batch.to(device))
class ClassificationModel: def __init__(self, bert_model=config.bert_model, gpu=False, seed=0): self.gpu = gpu self.bert_model = bert_model self.train_df = data_reader.load_train_dataset(config.data_path) self.val_df = data_reader.load_dev_dataset(config.data_path) self.test_df = data_reader.load_test_dataset(config.data_path) self.num_classes = len(LABELS) self.model = None self.optimizer = None self.tokenizer = BertTokenizer.from_pretrained(self.bert_model) # to plot loss during training process self.plt_x = [] self.plt_y = [] random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if self.gpu: torch.cuda.manual_seed_all(seed) def __init_model(self): if self.gpu: self.device = torch.device("cuda") else: self.device = torch.device("cpu") self.model.to(self.device) print(torch.cuda.memory_allocated(self.device)) # log available cuda if self.device.type == 'cuda': print(torch.cuda.get_device_name(0)) print('Memory Usage:') print('Allocated:', round(torch.cuda.memory_allocated(0) / 1024**3, 1), 'GB') print('Cached: ', round(torch.cuda.memory_cached(0) / 1024**3, 1), 'GB') def new_model(self): self.model = BertForSequenceClassification.from_pretrained( self.bert_model, num_labels=self.num_classes) self.__init_model() def load_model(self, path_model, path_config): self.model = BertForSequenceClassification(BertConfig(path_config), num_labels=self.num_classes) self.model.load_state_dict(torch.load(path_model)) self.__init_model() def save_model(self, path_model, path_config, epoch_n, acc, f1): if not os.path.exists(path_model): os.makedirs(path_model) model_save_path = os.path.join( path_model, 'model_{:.4f}_{:.4f}_{:.4f}'.format(epoch_n, acc, f1)) torch.save(self.model.state_dict(), model_save_path) if not os.path.exists(path_config): os.makedirs(path_config) model_config_path = os.path.join(path_config, 'config.cf') with open(model_config_path, 'w') as f: f.write(self.model.config.to_json_string()) def train(self, epochs, batch_size=config.batch_size, lr=config.lr, plot_path=None, model_path=None, config_path=None): model_params = list(self.model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [ p for n, p in model_params if not any(nd in n for nd in no_decay) ], 'weight_decay': 0.01 }, { 'params': [p for n, p in model_params if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] self.optimizer = BertAdam( optimizer_grouped_parameters, lr=lr, warmup=0.1, t_total=int(len(self.train_df) / batch_size) * epochs) nb_tr_steps = 0 train_features = data_reader.convert_examples_to_features( self.train_df, config.MAX_SEQ_LENGTH, self.tokenizer) # create tensor of all features all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) # eval dataloader eval_features = data_reader.convert_examples_to_features( self.val_df, config.MAX_SEQ_LENGTH, self.tokenizer) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=batch_size) # class weighting _, counts = np.unique(self.train_df['label'], return_counts=True) class_weights = [sum(counts) / c for c in counts] # assign wight to each input sample example_weights = [class_weights[e] for e in self.train_df['label']] sampler = WeightedRandomSampler(example_weights, len(self.train_df['label'])) train_dataloader = DataLoader(train_data, sampler=sampler, batch_size=batch_size) self.model.train() for e in range(epochs): print("Epoch {}".format(e)) if e is not 0: f1, acc = self.val(eval_dataloader) print("\nF1 score: {}, Accuracy: {}".format(f1, acc)) if model_path is not None and config_path is not None: if e is not 0: self.save_model(model_path, config_path, e, acc, f1) for step, batch in enumerate( tqdm(train_dataloader, desc="Iteration")): batch = tuple(t.to(self.device) for t in batch) input_ids, input_mask, segment_ids, label_ids = batch loss = self.model(input_ids, segment_ids, input_mask, label_ids) loss.backward() #if plot_path is not None: # self.plt_y.append(loss.item()) # self.plt_x.append(nb_tr_steps) # self.save_plot(plot_path) nb_tr_steps += 1 self.optimizer.step() self.optimizer.zero_grad() if self.gpu: torch.cuda.empty_cache() def val(self, eval_dataloader, batch_size=config.batch_size): f1, acc = 0, 0 nb_eval_examples = 0 for input_ids, input_mask, segment_ids, gnd_labels in tqdm( eval_dataloader, desc="Evaluating"): input_ids = input_ids.to(self.device) input_mask = input_mask.to(self.device) segment_ids = segment_ids.to(self.device) with torch.no_grad(): logits = self.model(input_ids, segment_ids, input_mask) predicted_labels = np.argmax(logits.detach().cpu().numpy(), axis=1) acc += np.sum(predicted_labels == gnd_labels.numpy()) tmp_eval_f1 = f1_score(predicted_labels, gnd_labels, average='macro') f1 += tmp_eval_f1 * input_ids.size(0) nb_eval_examples += input_ids.size(0) return f1 / nb_eval_examples, acc / nb_eval_examples def save_plot(self, path): fig, ax = plt.subplots() ax.plot(self.plt_x, self.plt_y) ax.set(xlabel='Training steps', ylabel='Loss') fig.savefig(path) plt.close() def create_test_predictions(self, path): tests_features = data_reader.convert_examples_to_features( self.test_df, config.MAX_SEQ_LENGTH, self.tokenizer) all_input_ids = torch.tensor([f.input_ids for f in tests_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in tests_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in tests_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in tests_features], dtype=torch.long) all_sample_ids = [f.sample_id for f in tests_features] test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) test_sampler = SequentialSampler(test_data) test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=16) predictions = [] inverse_labels = {v: k for k, v in LABELS} for input_ids, input_mask, segment_ids, gnd_labels in tqdm( test_dataloader, desc="Evaluating"): input_ids = input_ids.to(self.device) input_mask = input_mask.to(self.device) segment_ids = segment_ids.to(self.device) with torch.no_grad(): encoded_layers, logits = self.model(input_ids, segment_ids, input_mask) predictions += [ inverse_labels[p] for p in list(np.argmax(logits.detach().cpu().numpy(), axis=1)) ] with open(path, "w") as csv_file: writer = csv.writer(csv_file, delimiter=',') for i, prediction in enumerate(predictions): writer.writerow([all_sample_ids[i], prediction]) return predictions
def train_unfixed(): # 配置文件 cf = Config('./config.yaml') # 有GPU用GPU device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 训练数据 train_data = NewsDataset("./data/cnews_final_train.txt", cf.max_seq_len) train_dataloader = DataLoader(train_data, batch_size=cf.batch_size, shuffle=True) # 测试数据 test_data = NewsDataset("./data/cnews_final_test.txt", cf.max_seq_len) test_dataloader = DataLoader(test_data, batch_size=cf.batch_size, shuffle=True) # 模型 config = BertConfig("./output/pytorch_bert_config.json") model = BertForSequenceClassification(config, num_labels=cf.num_labels) model.load_state_dict(torch.load("./output/pytorch_model.bin")) # 优化器用adam for param in model.parameters(): param.requires_grad = True param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] num_train_optimization_steps = int( len(train_data) / cf.batch_size) * cf.epoch optimizer = BertAdam(optimizer_grouped_parameters, lr=cf.lr, t_total=num_train_optimization_steps) # 把模型放到指定设备 model.to(device) # 让模型并行化运算 if torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) # 训练 start_time = time.time() total_batch = 0 # 总批次 best_acc_val = 0.0 # 最佳验证集准确率 last_improved = 0 # 记录上一次提升批次 require_improvement = 1500 # 如果超过1500轮未提升,提前结束训练 # 获取当前验证集acc model.eval() _, best_acc_val = evaluate(model, test_dataloader, device) flag = False model.train() for epoch_id in range(cf.epoch): print("Epoch %d" % epoch_id) for step, batch in enumerate( tqdm(train_dataloader, desc="batch", total=len(train_dataloader))): # for step,batch in enumerate(train_dataloader): label_id = batch['label_id'].squeeze(1).to(device) word_ids = batch['word_ids'].to(device) segment_ids = batch['segment_ids'].to(device) word_mask = batch['word_mask'].to(device) loss = model(word_ids, segment_ids, word_mask, label_id) loss.backward() optimizer.step() optimizer.zero_grad() total_batch += 1 if total_batch % cf.print_per_batch == 0: model.eval() with torch.no_grad(): loss_train, acc_train = get_model_loss_acc( model, word_ids, segment_ids, word_mask, label_id) loss_val, acc_val = evaluate(model, test_dataloader, device) if acc_val > best_acc_val: # 保存最好结果 best_acc_val = acc_val last_improved = total_batch torch.save(model.state_dict(), "./output/pytorch_model.bin") with open("./output/pytorch_bert_config.json", 'w') as f: f.write(model.config.to_json_string()) improved_str = "*" else: improved_str = "" time_dif = get_time_dif(start_time) msg = 'Iter: {0:>6}, Train Loss: {1:>6.2}, Train Acc: {2:>7.2%},' \ + ' Val Loss: {3:>6.2}, Val Acc: {4:>7.2%}, Time: {5} {6}' print( msg.format(total_batch, loss_train, acc_train, loss_val, acc_val, time_dif, improved_str)) model.train() if total_batch - last_improved > require_improvement: print("长时间未优化") flag = True break if flag: break
class ClassificationModel: def __init__(self, task, val=0.1, bert_model=BERT_MODEL, gpu=False, seed=0): self.gpu = gpu self.task = task self.bert_model = bert_model self.x_train, self.y_train = load_train_dataset(self.task) self.x_val = np.random.choice(self.x_train, size=(int(val * len(self.x_train)), ), replace=False) self.y_val = np.random.choice(self.y_train, size=(int(val * len(self.x_train)), ), replace=False) self.x_test_ids, self.x_test = load_test_dataset(self.task) self.num_classes = len(TASK_LABELS[task]) self.model = None self.optimizer = None self.tokenizer = BertTokenizer.from_pretrained(self.bert_model) self.plt_x = [] self.plt_y = [] random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if self.gpu: torch.cuda.manual_seed_all(seed) def __init_model(self): if self.gpu: self.device = torch.device("cuda") print("Start learning with GPU") else: self.device = torch.device("cpu") print("Start learning with CPU") self.model.to(self.device) print(torch.cuda.memory_allocated(self.device)) def new_model(self): self.model = BertForSequenceClassification.from_pretrained( self.bert_model, num_labels=self.num_classes) self.__init_model() def load_model(self, path_model, path_config): self.model = BertForSequenceClassification(BertConfig(path_config), num_labels=self.num_classes) self.model.load_state_dict(torch.load(path_model)) self.__init_model() def save_model(self, path_model, path_config): torch.save(self.model.state_dict(), path_model) with open(path_config, 'w') as f: f.write(self.model.config.to_json_string()) # noinspection PyArgumentList def train(self, epochs, plot_path, batch_size=32, lr=5e-5, model_path=None, config_path=None): model_params = list(self.model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [ p for n, p in model_params if not any(nd in n for nd in no_decay) ], 'weight_decay': 0.01 }, { 'params': [p for n, p in model_params if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] self.optimizer = BertAdam(optimizer_grouped_parameters, lr=lr, warmup=0.1, t_total=int(len(self.x_train) / batch_size) * epochs) train_features = convert_examples_to_features(self.x_train, self.y_train, MAX_SEQ_LENGTH, self.tokenizer) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) _, counts = np.unique(self.y_train, return_counts=True) class_weights = [sum(counts) / c for c in counts] example_weights = [class_weights[e] for e in self.y_train] sampler = WeightedRandomSampler(example_weights, len(self.y_train)) train_dataloader = DataLoader(train_data, sampler=sampler, batch_size=batch_size) self.model.train() temp_loss = 0 nb_tr_steps = 0 for e in range(epochs): print("Epoch {e}".format(e=e)) f1, acc = self.val() print("\nF1 score: {f1}, Accuracy: {acc}".format(f1=f1, acc=acc)) if model_path is not None and config_path is not None: self.save_model(model_path, config_path) for step, batch in enumerate( tqdm(train_dataloader, desc="Iteration")): batch = tuple(t.to(self.device) for t in batch) input_ids, input_mask, segment_ids, label_ids = batch loss = self.model(input_ids, segment_ids, input_mask, label_ids) loss.backward() self.plt_y.append(loss.item()) self.plt_x.append(nb_tr_steps) self.save_plot(plot_path) nb_tr_steps += 1 self.optimizer.step() self.optimizer.zero_grad() if self.gpu: torch.cuda.empty_cache() def val(self, batch_size=32, test=False): eval_features = convert_examples_to_features(self.x_val, self.y_val, MAX_SEQ_LENGTH, self.tokenizer) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=batch_size) f1, acc = 0, 0 nb_eval_examples = 0 for input_ids, input_mask, segment_ids, gnd_labels in tqdm( eval_dataloader, desc="Evaluating"): input_ids = input_ids.to(self.device) input_mask = input_mask.to(self.device) segment_ids = segment_ids.to(self.device) with torch.no_grad(): logits = self.model(input_ids, segment_ids, input_mask) predicted_labels = np.argmax(logits.detach().cpu().numpy(), axis=1) acc += np.sum(predicted_labels == gnd_labels.numpy()) tmp_eval_f1 = f1_score(predicted_labels, gnd_labels, average='macro') f1 += tmp_eval_f1 * input_ids.size(0) nb_eval_examples += input_ids.size(0) return f1 / nb_eval_examples, acc / nb_eval_examples def save_plot(self, path): import matplotlib.pyplot as plt fig, ax = plt.subplots() ax.plot(self.plt_x, self.plt_y) ax.set(xlabel='Training steps', ylabel='Loss') fig.savefig(path) plt.close() def create_test_predictions(self, path): eval_features = convert_examples_to_features(self.x_test, [-1] * len(self.x_test), MAX_SEQ_LENGTH, self.tokenizer) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=16) predictions = [] inverse_labels = {v: k for k, v in TASK_LABELS[self.task].items()} for input_ids, input_mask, segment_ids, gnd_labels in tqdm( eval_dataloader, desc="Evaluating"): input_ids = input_ids.to(self.device) input_mask = input_mask.to(self.device) segment_ids = segment_ids.to(self.device) with torch.no_grad(): logits = self.model(input_ids, segment_ids, input_mask) predictions += [ inverse_labels[p] for p in list(np.argmax(logits.detach().cpu().numpy(), axis=1)) ] with open(path, "w") as csv_file: writer = csv.writer(csv_file, delimiter=',') for i, prediction in enumerate(predictions): writer.writerow([int(self.x_test_ids[i]), prediction]) return predictions