def main(args): print(20 * "=", " Preparing for training ", 20 * "=") if not os.path.exists(args.result): os.makedirs(args.result) tokenizer = tokenization.BertTokenizer(args.vocab, do_lower_case=True) # -------------------- Data loading ------------------- # print("\t* Loading testing data...") # train_data = LCQMC_dataset(args.train_file, args.vocab_file, args.max_length, test_flag=False) test_data = DataProcessForSentence(tokenizer, args.test_file, args.max_length, test_flag=True) test_loader = DataLoader(test_data, batch_size=args.batch_size) # -------------------- Model definition ------------------- # print("\t* Building model...") model = Roberta_pooling(args).to(args.device) all_predict = predict(model, test_loader, args) index = np.array([], dtype=int) for i in range(len(all_predict)): index = np.append(index, i) # ---------------------生成文件-------------------------- df_test = pd.DataFrame(columns=['index', 'prediction']) df_test['index'] = index df_test['prediction'] = all_predict df_test.to_csv(args.submit_example_path, index=False, columns=['index', 'prediction'], sep='\t')
n_gpu = torch.cuda.device_count() print("device %s n_gpu %d" % (device, n_gpu)) print("device: {} n_gpu: {} 16-bits training: {}".format(device, n_gpu, args.float16)) # load the bert setting if 'albert' not in args.bert_config_file: bert_config = BertConfig.from_json_file(args.bert_config_file) else: if 'google' in args.bert_config_file: bert_config = AlbertConfig.from_json_file(args.bert_config_file) else: bert_config = ALBertConfig.from_json_file(args.bert_config_file) # load data print('loading data...') tokenizer = tokenization.BertTokenizer(vocab_file=args.vocab_file, do_lower_case=True) assert args.vocab_size == len(tokenizer.vocab) if not os.path.exists(args.test_dir1) or not os.path.exists(args.test_dir2): json2features(args.test_file, [args.test_dir1, args.test_dir2], tokenizer, is_training=False, max_seq_length=args.max_seq_length) if not os.path.exists(args.test_dir1): json2features(input_file=args.test_file, output_files=[args.test_dir1, args.test_dir2], tokenizer=tokenizer, is_training=False, repeat_limit=3, max_query_length=96, max_seq_length=args.max_seq_length, doc_stride=128) test_examples = json.load(open(args.test_dir1, 'r')) test_features = json.load(open(args.test_dir2, 'r')) dev_steps_per_epoch = len(test_features) // args.n_batch if len(test_features) % args.n_batch != 0:
def main(args): print(20 * "=", " Preparing for training ", 20 * "=") # 保存模型的路径 if not os.path.exists(args.target_dir): os.makedirs(args.target_dir) tokenizer = tokenization.BertTokenizer(args.vocab, do_lower_case=True) # tokenizer = BertTokenizer.from_pretrained(args.vocab) # -------------------- Data loading ------------------- # print("\t* Loading training data...") # train_data = LCQMC_dataset(args.train_file, args.vocab_file, args.max_length, test_flag=False) train_data = DataProcessForSentence(tokenizer, args.train_file, args.max_length, test_flag=False) train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True) print("\t* Loading valid data...") dev_data = DataProcessForSentence(tokenizer, args.dev_file, args.max_length, test_flag=False) dev_loader = DataLoader(dev_data, batch_size=args.batch_size, shuffle=True) # -------------------- Model definition ------------------- # print("\t* Building model...") # model = Bert_model(args).to(args.device) model = Roberta_pooling(args).to(args.device) # -------------------- Preparation for training ------------------- # criterion = nn.CrossEntropyLoss() # 交叉熵损失函数 # 列出所有需要更新权重的参数 param_optimizer = list(model.named_parameters()) # 不需要权重衰减的 no_decay = ['bias', 'LearyNorm.bias', 'LayerNorm.weight'] # 不是这几种类型的就需要进行权重衰减,这里中是不需要进行权重衰减的,保持正常的梯度更新即可 optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.00 }] optimizer_params = {'lr': args.lr, 'eps': 1e-6, 'correct_bias': False} optimizer = transformers.AdamW(optimizer_grouped_parameters, **optimizer_params) scheduler = transformers.get_linear_schedule_with_warmup( optimizer, num_warmup_steps=0.5, num_training_steps=len(train_loader) * args.epochs) best_score = 0.0 start_epoch = 1 epochs_count = [] train_losses = [] valid_losses = [] # Continuing training from a checkpoint if one was given as argument if args.checkpoint: # 从文件中加载checkpoint数据, 从而继续训练模型 checkpoints = torch.load(args.checkpoint) start_epoch = checkpoints["epoch"] + 1 best_score = checkpoints["best_score"] print("\t* Training will continue on existing model from epoch {}...". format(start_epoch)) model.load_state_dict(checkpoints["model"]) # 模型部分 optimizer.load_state_dict(checkpoints["optimizer"]) epochs_count = checkpoints["epochs_count"] train_losses = checkpoints["train_losses"] valid_losses = checkpoints["valid_losses"] # 这里改为只有从以前加载的checkpoint中才进行计算 valid, Compute loss and accuracy before starting (or resuming) training. _, valid_loss, valid_accuracy, auc = validate(model, dev_loader, criterion, args) print( "\t* Validation loss before training: {:.4f}, accuracy: {:.4f}%, auc: {:.4f}" .format(valid_loss, (valid_accuracy * 100), auc)) # -------------------- Training epochs ------------------- # print("\n", 20 * "=", "Training Bert model on device: {}".format(args.device), 20 * "=") patience_counter = 0 for epoch in range(start_epoch, args.epochs + 1): epochs_count.append(epoch) # -------------------- train -------------------------- print("* Training epoch {}:".format(epoch)) epoch_time, epoch_loss, epoch_accuracy = train(model, train_loader, optimizer, scheduler, criterion, args) train_losses.append(epoch_loss) print("-> Training time: {:.4f}s, loss = {:.4f}, accuracy: {:.4f}%". format(epoch_time, epoch_loss, (epoch_accuracy * 100))) # -------------------- valid -------------------------- print("* Validation for epoch {}:".format(epoch)) epoch_time, epoch_loss, epoch_accuracy, epoch_auc = validate( model, train_loader, criterion, args) valid_losses.append(epoch_loss) print( "-> Valid. time: {:.4f}s, loss: {:.4f}, accuracy: {:.4f}%, auc: {:.4f}\n" .format(epoch_time, epoch_loss, (epoch_accuracy * 100), epoch_auc)) # Update the optimizer's learning rate with the scheduler. scheduler.step(epoch_accuracy) # Early stopping on validation accuracy. if epoch_accuracy < best_score: patience_counter += 1 else: best_score = epoch_accuracy patience_counter = 0 # 保存最好的结果,需要保存的参数,这些参数在checkpoint中都能找到 torch.save( { "epoch": epoch, "model": model.state_dict(), "best_score": best_score, "epochs_count": epochs_count, "train_losses": train_losses, "valid_losses": valid_losses }, os.path.join(args.target_dir, "pooling_bert_best.bin")) if patience_counter >= args.patience: print("-> Early stopping: patience limit reached, stopping...") break del model
def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help= "The input data dir. Should contain the .tsv files (or other data files) for the task." ) parser.add_argument( "--bert_model", default=None, type=str, required=False, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, " "bert-base-multilingual-cased, bert-base-chinese.") # trained_model_file parser.add_argument("--trained_model_dir", default=None, type=str, help="trained model for eval or predict") parser.add_argument("--task_name", default=None, type=str, required=True, help="The name of the task to train.") parser.add_argument( "--output_dir", default=None, type=str, required=True, help= "The output directory where the model predictions and checkpoints will be written." ) ## Other parameters parser.add_argument( "--cache_dir", default="", type=str, help= "Where do you want to store the pre-trained models downloaded from s3") parser.add_argument( "--max_seq_length", default=128, type=int, help= "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--my_tokenization", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--do_test", action='store_true', help="Whether to run eval on the test set.") parser.add_argument( "--do_lower_case", default=False, help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass." ) parser.add_argument( '--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument( '--loss_scale', type=float, default=0, help= "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.") parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.") args = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mrpc": MrpcProcessor, "lcqmc": LcqmcProcessor, "sst-2": Sst2Processor, "text-clf": TextClfProcessor, "xnli": XnliProcessor } num_labels_task = { "cola": 2, "sst-2": 2, "mnli": 3, "mrpc": 2, "lcqmc": 2, "xnli": 3, "text-clf": 0 } if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info( "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}". format(device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError( "Invalid gradient_accumulation_steps parameter: {}, should be >= 1" .format(args.gradient_accumulation_steps)) args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval and not args.do_test: raise ValueError( "At least one of `do_train` or `do_eval(test)` must be True.") if os.path.exists(args.output_dir) and os.listdir( args.output_dir) and args.do_train: raise ValueError( "Output directory ({}) already exists and is not empty.".format( args.output_dir)) if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) task_name = args.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() num_labels = num_labels_task[task_name] label_list = processor.get_labels() if args.bert_model: tokenizer = tokenization.BertTokenizer(vocab_file=os.path.join( args.bert_model, 'vocab.txt'), do_lower_case=True) elif args.trained_model_dir: tokenization.BertTokenizer(vocab_file=os.path.join( args.trained_model_dir, 'vocab.txt'), do_lower_case=True) logger.info('vocab size is %d' % (len(tokenizer.vocab))) label_map_reverse = {} train_examples = None num_train_optimization_steps = None if args.do_train: train_examples = processor.get_train_examples(args.data_dir) if task_name == 'text-clf': num_labels = len(label_list) label_map = {label: i for i, label in enumerate(label_list)} label_file = os.path.join(args.output_dir, "label_map_training.txt") with open(label_file, "w") as writer: for (k, v) in label_map.items(): writer.write(str(k)) writer.write('\t') writer.write(str(v)) writer.write('\n') label_map_reverse = {v: k for k, v in label_map.items()} num_train_optimization_steps = int( len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs if args.local_rank != -1: num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size( ) else: train_examples = processor.get_train_examples(args.data_dir) label_map = {label: i for i, label in enumerate(label_list)} label_map_reverse = {v: k for k, v in label_map.items()} num_labels = len(label_list) # Prepare model cache_dir = args.cache_dir if args.cache_dir else os.path.join( str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format( args.local_rank)) if args.trained_model_dir: config = BertConfig( os.path.join(args.trained_model_dir, 'bert_config.json')) model = BertForSequenceClassification(config, num_labels=num_labels) model.load_state_dict( torch.load( os.path.join(args.trained_model_dir, 'pytorch_model.bin'))) logger.info('finish trained model loading!') elif args.bert_model: # # model = BertForSequenceClassification.from_pretrained(args.bert_model, # cache_dir=cache_dir, # num_labels=num_labels) print('init model...') bert_config = BertConfig.from_json_file( os.path.join(args.bert_model, 'bert_config.json')) model = BertForSequenceClassification(bert_config, num_labels=num_labels) utils.torch_show_all_params(model) utils.torch_init_model( model, os.path.join(args.bert_model, 'pytorch_model.bin')) if args.fp16: model.half() model.to(device) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training." ) model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare optimizer param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training." ) optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=num_train_optimization_steps) global_step = 0 nb_tr_steps = 0 tr_loss = 0 if args.do_train: train_features = convert_examples_to_features( train_examples, label_list, args.max_seq_length, tokenizer, my_tokenization=args.my_tokenization) logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_examples)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_optimization_steps) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) num_epoch = 0 for _ in trange(int(args.num_train_epochs), desc="Epoch"): model.train() tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate( tqdm(train_dataloader, desc="Iteration")): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, label_ids = batch loss = model(input_ids, segment_ids, input_mask, label_ids) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward() tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: # modify learning rate with special warm up BERT uses # if args.fp16 is False, BertAdam is used that handles this automatically lr_this_step = args.learning_rate * warmup_linear( global_step / num_train_optimization_steps, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 num_epoch += 1 ## begin to evaluate eval_all_result = [] eval_examples = processor.get_dev_examples(args.data_dir) eval_features = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer, my_tokenization=args.my_tokenization) logger.info("***** Running %d -th evaluation *****" % num_epoch) logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor( [f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor( [f.segment_ids for f in eval_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for input_ids, input_mask, segment_ids, label_ids in tqdm( eval_dataloader, desc="Evaluating"): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids) logits = model(input_ids, segment_ids, input_mask) logits = logits.detach().cpu().numpy() label_ids = label_ids.to('cpu').numpy() tmp_eval_accuracy, batch_result = accuracy(logits, label_ids) for i in range(input_ids.size()[0]): eval_all_result.append(batch_result[i]) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples loss = tr_loss / nb_tr_steps if args.do_train else None result = { 'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'global_step': global_step, 'loss': loss } output_eval_file = os.path.join(args.output_dir, "eval_results.txt") epoch_eval_result_file = os.path.join( args.output_dir, str(num_epoch) + "th_epoch_eval_results.txt") with open(output_eval_file, "a") as writer: logger.info("***** %d th epoch eval results *****" % num_epoch) writer.write("***%d th epoch result***" % num_epoch) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if num_epoch < 0: continue with open(epoch_eval_result_file, "w") as writer: for element in eval_all_result: tokens_sample = 'Text' result_sample = element writer.write(str(tokens_sample)) writer.write('\t') for ele in result_sample: writer.write(label_map_reverse[ele]) writer.write('\t') writer.write('\n') # Save a trained model and the associated configuration model_to_save = model.module if hasattr( model, 'module') else model # Only save the model it-self output_model_file = os.path.join( args.output_dir, WEIGHTS_NAME + '.ep' + str(num_epoch)) torch.save(model_to_save.state_dict(), output_model_file) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) with open(output_config_file, 'w') as f: f.write(model_to_save.config.to_json_string()) model.to(device) logger.info('%s' % str(args.do_eval)) if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0): eval_all_result = [] stime = datetime.datetime.now() eval_examples = processor.get_dev_examples(args.data_dir) eval_features = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer, my_tokenization=args.my_tokenization) logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for input_ids, input_mask, segment_ids, label_ids in tqdm( eval_dataloader, desc="Evaluating"): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids) logits = model(input_ids, segment_ids, input_mask) logits = logits.detach().cpu().numpy() label_ids = label_ids.to('cpu').numpy() tmp_eval_accuracy, batch_result = accuracy(logits, label_ids) for i in range(input_ids.size()[0]): eval_all_result.append(batch_result[i]) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples loss = tr_loss / nb_tr_steps if args.do_train else None result = { 'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'global_step': global_step, 'loss': loss } output_eval_file = os.path.join(args.output_dir, "eval_metrics.txt") eval_result_file = os.path.join(args.output_dir, "eval_all_results.txt") with open(output_eval_file, "a") as writer: logger.info("***** Final Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) etime = datetime.datetime.now() logger.info('eval precoess cost time: %s' % str(etime - stime)) with open(eval_result_file, "w") as writer: logger.info("***** eval_all_results *****") for element in eval_all_result: tokens_sample = 'Text' result_sample = element writer.write(str(tokens_sample)) writer.write('\t') for ele in result_sample: writer.write(label_map_reverse[ele]) writer.write('\t') writer.write('\n') if args.do_test: eval_all_result = [] eval_examples = processor.get_test_examples(args.data_dir) eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer) logger.info("***** Running testing *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for input_ids, input_mask, segment_ids, label_ids in tqdm( eval_dataloader, desc="Evaluating"): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids) logits = model(input_ids, segment_ids, input_mask) logits = logits.detach().cpu().numpy() label_ids = label_ids.to('cpu').numpy() tmp_eval_accuracy, batch_result = accuracy(logits, label_ids) for i in range(input_ids.size()[0]): eval_all_result.append(batch_result[i]) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples loss = tr_loss / nb_tr_steps if args.do_train else None result = { 'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'global_step': global_step, 'loss': loss } output_test_file = os.path.join(args.output_dir, "test_results.txt") test_metric_file = os.path.join(args.output_dir, "test_metric.txt") with open(test_metric_file, "w") as writer: for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) with open(output_test_file, "w") as writer: for element in eval_all_result: result_sample = element for ele in result_sample: writer.write(str(ele)) writer.write('\t') writer.write('\n')
def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--gpu_ids", default='0', type=str, required=True) parser.add_argument( "--data_dir", default=None, type=str, required=True, help= "The input data dir. Should contain the .tsv files (or other data files) for the task." ) parser.add_argument("--task_name", default='c3', type=str, required=True) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help= "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture.") parser.add_argument( "--vocab_file", default=None, type=str, required=True, help="The vocabulary file that the BERT model was trained on.") parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints will be written." ) ## Other parameters parser.add_argument( "--init_checkpoint", default= 'check_points/pretrain_models/albert_xxlarge_google_zh_v1121/pytorch_model.pth', type=str, help="Initial checkpoint (usually from a pre-trained BERT model).") parser.add_argument( "--do_lower_case", default=True, action='store_true', help= "Whether to lower case the input text. True for uncased models, False for cased models." ) parser.add_argument( "--max_seq_length", default=512, type=int, help= "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", default=False, action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--train_batch_size", default=16, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=16, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=2e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--schedule", default='warmup_linear', type=str, help='schedule') parser.add_argument("--weight_decay_rate", default=0.01, type=float, help='weight_decay_rate') parser.add_argument('--clip_norm', type=float, default=1.0) parser.add_argument("--num_train_epochs", default=8.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", default=False, action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--float16', action='store_true', default=False) parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=422, help="random seed for initialization") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumualte before performing a backward/update pass." ) parser.add_argument('--setting_file', type=str, default='setting.txt') parser.add_argument('--log_file', type=str, default='log.txt') args = parser.parse_args() args.setting_file = os.path.join(args.output_dir, args.setting_file) args.log_file = os.path.join(args.output_dir, args.log_file) os.makedirs(args.output_dir, exist_ok=True) with open(args.setting_file, 'wt') as opt_file: opt_file.write('------------ Options -------------\n') print('------------ Options -------------') for k in args.__dict__: v = args.__dict__[k] opt_file.write('%s: %s\n' % (str(k), str(v))) print('%s: %s' % (str(k), str(v))) opt_file.write('-------------- End ----------------\n') print('------------ End -------------') os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids if os.path.exists(args.log_file): os.remove(args.log_file) if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1)) if args.gradient_accumulation_steps < 1: raise ValueError( "Invalid gradient_accumulation_steps parameter: {}, should be >= 1" .format(args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval: raise ValueError( "At least one of `do_train` or `do_eval` must be True.") processor = c3Processor(args.data_dir) label_list = processor.get_labels() tokenizer = tokenization.BertTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case) train_examples = None num_train_steps = None if args.do_train: train_examples = processor.get_train_examples() num_train_steps = int( len(train_examples) / n_class / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs) if 'albert' in args.bert_config_file: if 'google' in args.bert_config_file: bert_config = AlbertConfig.from_json_file(args.bert_config_file) model = AlbertForMultipleChoice(bert_config, num_choices=n_class) else: bert_config = ALBertConfig.from_json_file(args.bert_config_file) model = ALBertForMultipleChoice(bert_config, num_choices=n_class) else: bert_config = BertConfig.from_json_file(args.bert_config_file) model = BertForMultipleChoice(bert_config, num_choices=n_class) if args.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length {} because the BERT model was only trained up to sequence length {}" .format(args.max_seq_length, bert_config.max_position_embeddings)) if args.init_checkpoint is not None: utils.torch_show_all_params(model) utils.torch_init_model(model, args.init_checkpoint) if args.float16: model.half() model.to(device) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank) elif n_gpu > 1: model = torch.nn.DataParallel(model) optimizer = get_optimization( model=model, float16=args.float16, learning_rate=args.learning_rate, total_steps=num_train_steps, schedule=args.schedule, warmup_rate=args.warmup_proportion, max_grad_norm=args.clip_norm, weight_decay_rate=args.weight_decay_rate, opt_pooler=True) # multi_choice must update pooler global_step = 0 eval_dataloader = None if args.do_eval: eval_examples = processor.get_dev_examples() feature_dir = os.path.join( args.data_dir, 'dev_features{}.pkl'.format(args.max_seq_length)) if os.path.exists(feature_dir): eval_features = pickle.load(open(feature_dir, 'rb')) else: eval_features = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer) with open(feature_dir, 'wb') as w: pickle.dump(eval_features, w) input_ids = [] input_mask = [] segment_ids = [] label_id = [] for f in eval_features: input_ids.append([]) input_mask.append([]) segment_ids.append([]) for i in range(n_class): input_ids[-1].append(f[i].input_ids) input_mask[-1].append(f[i].input_mask) segment_ids[-1].append(f[i].segment_ids) label_id.append(f[0].label_id) all_input_ids = torch.tensor(input_ids, dtype=torch.long) all_input_mask = torch.tensor(input_mask, dtype=torch.long) all_segment_ids = torch.tensor(segment_ids, dtype=torch.long) all_label_ids = torch.tensor(label_id, dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) if args.local_rank == -1: eval_sampler = SequentialSampler(eval_data) else: eval_sampler = DistributedSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) if args.do_train: best_accuracy = 0 feature_dir = os.path.join( args.data_dir, 'train_features{}.pkl'.format(args.max_seq_length)) if os.path.exists(feature_dir): train_features = pickle.load(open(feature_dir, 'rb')) else: train_features = convert_examples_to_features( train_examples, label_list, args.max_seq_length, tokenizer) with open(feature_dir, 'wb') as w: pickle.dump(train_features, w) logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_examples)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_steps) input_ids = [] input_mask = [] segment_ids = [] label_id = [] for f in train_features: input_ids.append([]) input_mask.append([]) segment_ids.append([]) for i in range(n_class): input_ids[-1].append(f[i].input_ids) input_mask[-1].append(f[i].input_mask) segment_ids[-1].append(f[i].segment_ids) label_id.append(f[0].label_id) all_input_ids = torch.tensor(input_ids, dtype=torch.long) all_input_mask = torch.tensor(input_mask, dtype=torch.long) all_segment_ids = torch.tensor(segment_ids, dtype=torch.long) all_label_ids = torch.tensor(label_id, dtype=torch.long) train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size, drop_last=True) steps_per_epoch = int(num_train_steps / args.num_train_epochs) for ie in range(int(args.num_train_epochs)): model.train() tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 with tqdm(total=int(steps_per_epoch), desc='Epoch %d' % (ie + 1)) as pbar: for step, batch in enumerate(train_dataloader): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, label_ids = batch loss = model(input_ids, segment_ids, input_mask, label_ids) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps tr_loss += loss.item() if args.float16: optimizer.backward(loss) # modify learning rate with special warm up BERT uses # if args.fp16 is False, BertAdam is used and handles this automatically lr_this_step = args.learning_rate * warmup_linear( global_step / num_train_steps, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step else: loss.backward() nb_tr_examples += input_ids.size(0) if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step( ) # We have accumulated enought gradients model.zero_grad() global_step += 1 nb_tr_steps += 1 pbar.set_postfix({ 'loss': '{0:1.5f}'.format(tr_loss / (nb_tr_steps + 1e-5)) }) pbar.update(1) if args.do_eval: model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 logits_all = [] for input_ids, input_mask, segment_ids, label_ids in tqdm( eval_dataloader): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): tmp_eval_loss, logits = model(input_ids, segment_ids, input_mask, label_ids, return_logits=True) logits = logits.detach().cpu().numpy() label_ids = label_ids.cpu().numpy() for i in range(len(logits)): logits_all += [logits[i]] tmp_eval_accuracy = accuracy(logits, label_ids.reshape(-1)) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples if args.do_train: result = { 'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'global_step': global_step, 'loss': tr_loss / nb_tr_steps } else: result = { 'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy } logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) with open(args.log_file, 'a') as aw: aw.write( "-------------------global steps:{}-------------------\n" .format(global_step)) aw.write(str(json.dumps(result, indent=2)) + '\n') if eval_accuracy >= best_accuracy: torch.save(model.state_dict(), os.path.join(args.output_dir, "model_best.pt")) best_accuracy = eval_accuracy model.load_state_dict( torch.load(os.path.join(args.output_dir, "model_best.pt"))) torch.save(model.state_dict(), os.path.join(args.output_dir, "model.pt")) model.load_state_dict(torch.load(os.path.join(args.output_dir, "model.pt"))) if args.do_eval: logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 logits_all = [] for input_ids, input_mask, segment_ids, label_ids in tqdm( eval_dataloader): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): tmp_eval_loss, logits = model(input_ids, segment_ids, input_mask, label_ids, return_logits=True) logits = logits.detach().cpu().numpy() label_ids = label_ids.cpu().numpy() for i in range(len(logits)): logits_all += [logits[i]] tmp_eval_accuracy = accuracy(logits, label_ids.reshape(-1)) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy} output_eval_file = os.path.join(args.output_dir, "results_dev.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) output_eval_file = os.path.join(args.output_dir, "logits_dev.txt") with open(output_eval_file, "w") as f: for i in range(len(logits_all)): for j in range(len(logits_all[i])): f.write(str(logits_all[i][j])) if j == len(logits_all[i]) - 1: f.write("\n") else: f.write(" ") test_examples = processor.get_test_examples() feature_dir = os.path.join( args.data_dir, 'test_features{}.pkl'.format(args.max_seq_length)) if os.path.exists(feature_dir): test_features = pickle.load(open(feature_dir, 'rb')) else: test_features = convert_examples_to_features( test_examples, label_list, args.max_seq_length, tokenizer) with open(feature_dir, 'wb') as w: pickle.dump(test_features, w) logger.info("***** Running testing *****") logger.info(" Num examples = %d", len(test_examples)) logger.info(" Batch size = %d", args.eval_batch_size) input_ids = [] input_mask = [] segment_ids = [] label_id = [] for f in test_features: input_ids.append([]) input_mask.append([]) segment_ids.append([]) for i in range(n_class): input_ids[-1].append(f[i].input_ids) input_mask[-1].append(f[i].input_mask) segment_ids[-1].append(f[i].segment_ids) label_id.append(f[0].label_id) all_input_ids = torch.tensor(input_ids, dtype=torch.long) all_input_mask = torch.tensor(input_mask, dtype=torch.long) all_segment_ids = torch.tensor(segment_ids, dtype=torch.long) all_label_ids = torch.tensor(label_id, dtype=torch.long) test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) if args.local_rank == -1: test_sampler = SequentialSampler(test_data) else: test_sampler = DistributedSampler(test_data) test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args.eval_batch_size) model.eval() test_loss, test_accuracy = 0, 0 nb_test_steps, nb_test_examples = 0, 0 logits_all = [] for input_ids, input_mask, segment_ids, label_ids in tqdm( test_dataloader): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): tmp_test_loss, logits = model(input_ids, segment_ids, input_mask, label_ids, return_logits=True) logits = logits.detach().cpu().numpy() label_ids = label_ids.to('cpu').numpy() for i in range(len(logits)): logits_all += [logits[i]] tmp_test_accuracy = accuracy(logits, label_ids.reshape(-1)) test_loss += tmp_test_loss.mean().item() test_accuracy += tmp_test_accuracy nb_test_examples += input_ids.size(0) nb_test_steps += 1 test_loss = test_loss / nb_test_steps test_accuracy = test_accuracy / nb_test_examples result = {'test_loss': test_loss, 'test_accuracy': test_accuracy} output_test_file = os.path.join(args.output_dir, "results_test.txt") with open(output_test_file, "w") as writer: logger.info("***** Test results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) output_test_file = os.path.join(args.output_dir, "logits_test.txt") with open(output_test_file, "w") as f: for i in range(len(logits_all)): for j in range(len(logits_all[i])): f.write(str(logits_all[i][j])) if j == len(logits_all[i]) - 1: f.write("\n") else: f.write(" ") # the test submission order can't be changed submission_test = os.path.join(args.output_dir, "submission_test.json") test_preds = [int(np.argmax(logits_)) for logits_ in logits_all] with open(submission_test, "w") as f: json.dump(test_preds, f)