def main(text): args = Config() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Set seed seed_everything(args.seed) # Prepare NER task args.task_name = args.task_name.lower() processor = processors[args.task_name]() label_list = processor.get_labels() id2label = {i: label for i, label in enumerate(label_list)} # label2id = {label: i for i, label in enumerate(label_list)} num_labels = len(label_list) args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = BertConfig, BertCrfForNer, CNerTokenizer config = config_class.from_pretrained(args.model_name_or_path, num_labels=num_labels, cache_dir=None) if args.do_predict: tokenizer = tokenizer_class.from_pretrained( args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] for checkpoint in checkpoints: model = model_class.from_pretrained(checkpoint, config=config) model.to(device) result = predict(args, text, id2label, model, tokenizer) for i in result: print(i)
def run(args): seed_everything(args.seed) if args.do_train == 1: # print('划分训练数据和验证数据') # split_data(args=args, file_name_or_path=args.file_name_or_path) print('加载训练数据和验证数据') train_data, train_dataloader = load_dataset( args=args, model_name_or_path=args.model_name_or_path, type='train') valid_data, valid_dataloader = load_dataset( args=args, model_name_or_path=args.model_name_or_path, type='valid') print('训练数据和验证数据加载完成') print('开始训练') train(args=args, model_name_or_path=args.model_name_or_path, train_data=train_data, train_dataloader=train_dataloader, valid_data=valid_data, valid_dataloader=valid_dataloader) print('训练结束') if args.do_predict == 1: print('加载测试数据') pre_data, predict_dataloader = load_dataset( args=args, model_name_or_path=args.model_name_or_path, type='test') print('测试数据加载完成') print('开始预测') preds, predict_label = predict( predict_model_name_or_path=args.predict_model_name_or_path, pre_data=pre_data, pre_dataloader=predict_dataloader) print('预测完成') print('形成输出结果形式') target_names = ['非涉网案件', '涉网案件'] outputlines = get_output(preds=preds, predict_label=predict_label, pre_data=pre_data, target_names=target_names) print('进行规则筛选') relu = net_relus() predict_result = relu.net_relus( nonet_keyword_list=config['nonet_keyword_list'], predict_result=outputlines) print('将预测结果写入文件') # predict_result = outputlines write_pre_result_to_file(args=args, output_lines=predict_result) print('预测结果写入完成')
def run(args): seed_everything(args.seed) if args.do_train == 1: logger.info('划分训练数据和验证数据') split_data(args=args, file_name_or_path=args.file_name_or_path) logger.info('加载训练数据和验证数据') train_data, train_dataloader = load_dataset( args=args, model_name_or_path=args.model_name_or_path, type='train') valid_data, valid_dataloader = load_dataset( args=args, model_name_or_path=args.model_name_or_path, type='valid') logger.info('训练数据和验证数据加载完成') logger.info('开始训练') train(args=args, model_name_or_path=args.model_name_or_path, train_data=train_data, train_dataloader=train_dataloader, valid_data=valid_data, valid_dataloader=valid_dataloader) logger.info('训练结束') if args.do_predict == 1: logger.info('加载测试数据') pre_data, predict_dataloader = load_dataset( args=args, model_name_or_path=args.model_name_or_path, type='test') logger.info('测试数据加载完成') logger.info('开始预测') preds, predict_label = predict( predict_model_name_or_path=args.predict_model_name_or_path, pre_data=pre_data, pre_dataloader=predict_dataloader) logger.info('预测完成') logger.info('将预测结果写入文件') write_pre_result_to_file(args=args, preds=preds, predict_label=predict_label, pre_data=pre_data) logger.info('预测结果写入完成')
def main(): parser = ArgumentParser() ## Required parameters parser.add_argument("--do_data", default=True, action='store_true') parser.add_argument('--data_name', default='albert', type=str) parser.add_argument('--max_ngram', default=3, type=int) parser.add_argument("--do_lower_case", default=False, action='store_true') parser.add_argument('--seed', default=42, type=int) # parser.add_argument("--file_num", type=int, default=10, help="Number of dynamic masking to pregenerate (with different masks)") parser.add_argument("--max_seq_len", type=int, default=128) parser.add_argument( "--short_seq_prob", type=float, default=0.1, help="Probability of making a short sentence as a training example") parser.add_argument( "--masked_lm_prob", type=float, default=0.15, help="Probability of masking each token for the LM task") # 128 * 0.15 parser.add_argument( "--max_predictions_per_seq", type=int, default=20, help="Maximum number of tokens to mask in each sequence") args = parser.parse_args() seed_everything(args.seed) from configs.base import config args.vocab_path = config['albert_vocab_path'] args.data_dir = config['data_dir'] logger.info("pregenerate training data parameters:\n %s", args) tokenizer = BertTokenizer(vocab_file=args.vocab_path, do_lower_case=args.do_lower_case) small_path = config['data_dir'] / "corpus/small" files = sorted( [f for f in small_path.iterdir() if f.exists() and '.txt' in str(f)]) file_path = files[0].absolute() max_seq_len = args.max_seq_len train(file_path, tokenizer, max_seq_len) print(" dataloader ok! ") sys.exit(0)
def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task_name", default=None, type=str, help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) parser.add_argument( "--data_dir", default=None, type=str, help= "The input data dir. Should contain the training files for the CoNLL-2003 NER task.", ) parser.add_argument( "--model_type", default=None, type=str, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), ) parser.add_argument( "--model_name_or_path", default=None, type=str, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS), ) parser.add_argument( "--output_dir", default=None, type=str, help= "The output directory where the model predictions and checkpoints will be written.", ) # Other parameters parser.add_argument('--markup', default='bios', type=str, choices=['bios', 'bio']) parser.add_argument( "--labels", default="", type=str, help= "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help= "Where do you want to store the pre-trained models downloaded from s3", ) parser.add_argument( "--train_max_seq_length", default=128, type=int, help= "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--eval_max_seq_length", default=512, type=int, help= "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument("--do_train", default=None, type=bool, help="Whether to run training.") parser.add_argument("--do_eval", default=None, type=bool, help="Whether to run eval on the dev set.") parser.add_argument("--do_predict", default=None, type=bool, help="Whether to run predictions on the test set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Whether to run evaluation during training at each logging step.", ) parser.add_argument( "--do_lower_case", default=None, type=bool, help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.01, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--max_steps", default=-1, type=int, help= "If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for,E.g., 0.1 = 10% of training." ) parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help= "Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument( '--predict_all_checkpoints', action="store_true", help= "Predict all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") parser.add_argument( "--overwrite_output_dir", default=None, type=bool, help="Overwrite the content of the output directory 将输出目录覆写") parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets") parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--fp16", action="store_true", help= "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument("--fp16_opt_level",type=str,default="O1",\ help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html",) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.") parser.add_argument("--server_port", type=str, default="", help="For distant debugging.") args = parser.parse_args() # 参数修改 # 主要必须修改labels, 位置在processors文件夹ner_seq文件中 args.task_name = 'cner' args.model_type = 'bert' args.model_name_or_path = '/root/models/chinese/bert/pytorch/bert-base-chinese' args.do_train = True args.do_eval = True args.do_predict = True args.do_lower_case = True args.data_dir = '/root/A/违法主体识别/train_data' args.train_max_seq_length = 150 args.eval_max_seq_length = 150 args.per_gpu_train_batch_size = 4 args.per_gpu_eval_batch_size = 4 args.learning_rate = 2e-5 args.num_train_epochs = 5.0 args.logging_steps = 300 args.saving_steps = 600 args.output_dir = './outputs' args.overwrite_output_dir = True args.seed = 42 if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) args.output_dir = os.path.join(args.output_dir, args.model_type) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) init_logger(log_file=args.output_dir + '/{}-{}-{}.log'.format( args.model_type, args.task_name, time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()))) if os.path.exists(args.output_dir) and os.listdir( args.output_dir ) and args.do_train and not args.overwrite_output_dir: raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome." .format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed seed_everything(args.seed) # Prepare NER task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() label_list = processor.get_labels() args.id2label = {i: label for i, label in enumerate(label_list)} args.label2id = {label: i for i, label in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, cache_dir=args.cache_dir if args.cache_dir else None, ) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, ) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, label2id=args.label2id, device=args.device) if args.local_rank == 0: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) tokenizer = tokenizer_class.from_pretrained( args.output_dir, do_lower_case=args.do_lower_case) checkpoint = args.output_dir logger.info("Predict the following checkpoints: %s", checkpoint) model = model_class.from_pretrained(checkpoint, config=config, label2id=args.label2id, device=args.device) model.to(args.device) global _args, _model, _tokenizer _args, _model, _tokenizer = args, model, tokenizer
def train(args, train_features, model, tokenizer, use_crf): """ Train the model """ # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] bert_param_optimizer = list(model.bert.named_parameters()) if args.model_encdec == 'bert2crf': crf_param_optimizer = list(model.crf.named_parameters()) linear_param_optimizer = list(model.classifier.named_parameters()) optimizer_grouped_parameters = [{ 'params': [ p for n, p in bert_param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay, 'lr': args.learning_rate }, { 'params': [ p for n, p in bert_param_optimizer if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0, 'lr': args.learning_rate }, { 'params': [ p for n, p in crf_param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay, 'lr': args.crf_learning_rate }, { 'params': [ p for n, p in crf_param_optimizer if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0, 'lr': args.crf_learning_rate }, { 'params': [ p for n, p in linear_param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay, 'lr': args.crf_learning_rate }, { 'params': [ p for n, p in linear_param_optimizer if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0, 'lr': args.crf_learning_rate }] elif args.model_encdec == 'bert2gru': gru_param_optimizer = list(model.decoder.named_parameters()) linear_param_optimizer = list(model.clsdense.named_parameters()) optimizer_grouped_parameters = [{ 'params': [ p for n, p in bert_param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay, 'lr': args.learning_rate }, { 'params': [ p for n, p in bert_param_optimizer if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0, 'lr': args.learning_rate }, { 'params': [ p for n, p in gru_param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay, 'lr': args.crf_learning_rate }, { 'params': [ p for n, p in gru_param_optimizer if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0, 'lr': args.crf_learning_rate }, { 'params': [ p for n, p in linear_param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay, 'lr': args.crf_learning_rate }, { 'params': [ p for n, p in linear_param_optimizer if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0, 'lr': args.crf_learning_rate }] elif args.model_encdec == 'bert2soft': linear_param_optimizer = list(model.classifier.named_parameters()) optimizer_grouped_parameters = [{ 'params': [ p for n, p in bert_param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay, 'lr': args.learning_rate }, { 'params': [ p for n, p in bert_param_optimizer if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0, 'lr': args.learning_rate }, { 'params': [ p for n, p in linear_param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay, 'lr': args.crf_learning_rate }, { 'params': [ p for n, p in linear_param_optimizer if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0, 'lr': args.crf_learning_rate }] elif args.model_encdec == 'multi2point': # gru_param_optimizer = list(model.decoder.named_parameters()) linear_param_optimizer = list(model.pointer.named_parameters()) optimizer_grouped_parameters = [{ 'params': [ p for n, p in bert_param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay, 'lr': args.learning_rate }, { 'params': [ p for n, p in bert_param_optimizer if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0, 'lr': args.learning_rate }, { 'params': [ p for n, p in linear_param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay, 'lr': args.point_learning_rate }, { 'params': [ p for n, p in linear_param_optimizer if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0, 'lr': args.point_learning_rate }] t_total = len(train_features) // args.batch_size * args.num_train_epochs args.warmup_steps = int(t_total * args.warmup_proportion) optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join( args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt")): # Load in optimizer and scheduler states optimizer.load_state_dict( torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict( torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_features)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.batch_size * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) # logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 pre_result = {} model.zero_grad() seed_everything( args.seed ) # Added here for reproductibility (even between python 2 and 3) total_step = 0 best_spanf = -1 test_results = {} for ep in range(int(args.num_train_epochs)): pbar = ProgressBar(n_total=len(train_features) // args.batch_size, desc='Training') if ep == int(args.num_train_epochs) - 1: eval_features = load_and_cache_examples(args, args.data_type, tokenizer, data_type='dev') train_features.extend(eval_features) step = 0 for batch in batch_generator(features=train_features, batch_size=args.batch_size, use_crf=use_crf, answer_seq_len=args.answer_seq_len): batch_input_ids, batch_input_mask, batch_segment_ids, batch_label_ids, batch_multi_span_label, batch_context_mask, batch_start_position, batch_end_position, batch_raw_labels, _, batch_example = batch model.train() if args.model_encdec == 'bert2crf' or args.model_encdec == 'bert2gru' or args.model_encdec == 'bert2soft': batch_inputs = tuple(t.to(args.device) for t in batch[0:6]) inputs = { "input_ids": batch_inputs[0], "attention_mask": batch_inputs[1], "token_type_ids": batch_inputs[2], "context_mask": batch_inputs[5], "labels": batch_inputs[3], "testing": False } elif args.model_encdec == 'multi2point': batch_inputs = tuple(t.to(args.device) for t in batch[0:5]) inputs = { "input_ids": batch_inputs[0], "attention_mask": batch_inputs[1], "token_type_ids": batch_inputs[2], "span_label": batch_inputs[4], "testing": False } outputs = model(**inputs) loss = outputs[ 0] # model outputs are always tuple in pytorch-transformers (see doc) if args.n_gpu > 1: loss = loss.mean( ) # mean() to average on multi-gpu parallel training loss.backward() if step % 15 == 0: pbar(step, {'epoch': ep, 'loss': loss.item()}) step += 1 tr_loss += loss.item() torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scheduler.step() # Update learning rate schedule optimizer.step() model.zero_grad() global_step += 1 if args.local_rank in [ -1, 0 ] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics print("start evalue") if args.local_rank == -1: # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args=args, model=model, tokenizer=tokenizer, prefix="dev", use_crf=use_crf) span_f = results['span_f'] if span_f > best_spanf: output_dir = os.path.join(args.output_dir, "checkpoint-bestf") if os.path.exists(output_dir): shutil.rmtree(output_dir) print('remove file', args.output_dir) print('\n\n eval results:', results) test_results = evaluate(args=args, model=model, tokenizer=tokenizer, prefix="test", use_crf=use_crf) print('\n\n test results', test_results) print('\n epoch = :', ep) best_spanf = span_f os.makedirs(output_dir) # print('dir = ', output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save( args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) tokenizer.save_vocabulary(output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info( "Saving optimizer and scheduler states to %s", output_dir) np.random.seed() np.random.shuffle(train_features) logger.info("\n") # if 'cuda' in str(args.device): torch.cuda.empty_cache() return global_step, tr_loss / global_step, test_results
def train_model(): metrics = {} device = torch.device( "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 1 if args.model_encdec == 'bert2gru' or args.model_encdec == 'bert2soft' or args.model_encdec == 'multi2point': use_crf = False elif args.model_encdec == 'bert2crf': use_crf = True args.device = device logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), ) # Set seed seed_everything(args.seed) # Prepare NER task args.data_type = args.data_type.lower() if args.data_type not in processors: raise ValueError("Task not found: %s" % (args.data_type)) processor = processors[args.data_type]() label_list = processor.get_labels() #这里是获取的标签列表,本任务:【B I O】 args.id2label = {i: label for i, label in enumerate(label_list) } #获取的是字典{0: O, 1: B, 2: I} args.label2id = {label: i for i, label in enumerate(label_list) } #获取的是字典{O:0, B:1, I: 2} num_labels = len(label_list) # 标签的个数 # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[ args.model_type] #BertConfig, BertCrfForNer, EcaTokenizer #BertConfig.from_pretrained config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, cache_dir=args.cache_dir if args.cache_dir else None, ) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, ) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) #模型 if args.local_rank == 0: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training train_features = load_and_cache_examples(args, args.data_type, tokenizer, data_type='train') global_step, tr_loss, metrics = train(args, train_features, model, tokenizer, use_crf=use_crf) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) return metrics
def main(): parser = argparse.ArgumentParser() parser.add_argument( "--data_dir", default=None, type=str, required=True, help= "The input data dir. Should contain the .tsv files (or other data files) for the task." ) parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) parser.add_argument( "--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) parser.add_argument( "--output_dir", default=None, type=str, required=True, help= "The output directory where the model predictions and checkpoints will be written." ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument( "--cache_dir", default="", type=str, help= "Where do you want to store the pre-trained models downloaded from s3") parser.add_argument( "--max_seq_length", default=128, type=int, help= "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument( "--do_predict", action='store_true', help="Whether to run the model in inference mode on the test set.") parser.add_argument( "--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass." ) parser.add_argument("--learning_rate", default=1e-6, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.01, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--max_steps", default=-1, type=int, help= "If > 0: set total number of training steps to perform. Override num_train_epochs." ) parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for,E.g., 0.1 = 10% of training." ) parser.add_argument('--logging_steps', type=int, default=10, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=1000, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action='store_true', help= "Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" ) parser.add_argument("--no_cuda", action='store_true', default=False, help="Avoid using CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument( '--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") args = parser.parse_args() args.output_dir = args.output_dir + '{}'.format(args.model_type) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) init_logger(log_file=args.output_dir + '/{}-{}.log'.format(args.model_type, args.task_name)) if os.path.exists(args.output_dir) and os.listdir( args.output_dir ) and args.do_train and not args.overwrite_output_dir: raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome." .format(args.output_dir)) if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() print("args.n_gpu=", args.n_gpu) else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device print("运算设备:", args.device) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1)) seed_everything(args.seed) args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) if args.local_rank not in [-1, 0]: torch.distributed.barrier() args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) if args.local_rank == 0: torch.distributed.barrier() model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='train') global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) model_to_save = model.module if hasattr(model, 'module') else model model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) model = model_class.from_pretrained(args.output_dir) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained( args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted( glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel( logging.WARN) logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split( '-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split( '/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) result = evaluate( args, model, tokenizer, prefix=prefix, ) result = dict( (k + '_{}'.format(global_step), v) for k, v in result.items()) results.update(result) output_eval_file = os.path.join(args.output_dir, "checkpoint_eval_results.txt") with open(output_eval_file, "w") as writer: for key in sorted(results.keys()): writer.write("%s = %s\n" % (key, str(results[key]))) # Predict if args.do_predict and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained( args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted( glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel( logging.WARN) logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: prefix = checkpoint.split( '/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) predict(args, model, tokenizer, prefix=prefix)
def train(args, model_name_or_path, train_data, train_dataloader, valid_data, valid_dataloader): pro = processer() labellist = pro.get_labels() trainloss = TrainLoss() #*****加载模型***** model = BertForSequenceClassification config = BertConfig.from_pretrained(model_name_or_path, num_labels=len(labellist)) model = model.from_pretrained(model_name_or_path, config=config) # *****模型加载到设备***** if torch.cuda.is_available(): # 单GPU计算 torch.cuda.set_device(0) device = torch.device('cuda', 0) # 设置GPU设备号 else: device = torch.device('cpu') model.to(device) #*****优化函数***** t_total = len(train_dataloader ) // args.gradient_accumulation_steps * args.num_train_epochs warmup_steps = int(t_total * args.warmup_proportion) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay }, { 'params': [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0 }] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = WarmupLinearSchedule(optimizer, warmup_steps=warmup_steps, t_total=t_total) #*****训练过程相关信息***** logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_data)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.train_batch_size) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) #*****开始训练***** tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() seed_everything(args.seed) for num in range(args.num_train_epochs): train_all_steps = 0 train_steps = [] train_losses = [] global_step = 0 logger.info(f'****************Train epoch-{num}****************') pbar = ProgressBar(n_total=len(train_dataloader), desc='Train') for step, batch in enumerate(train_dataloader): #***存储step用于绘制Loss曲线*** train_all_steps += 1 train_steps.append(train_all_steps) model.train() #***输入模型进行计算*** batch = tuple(t.to(device) for t in batch) inputs = { 'input_ids': batch[0], 'attention_mask': batch[1], 'token_type_ids': batch[2], 'labels': batch[3] } outputs = model( **inputs) #模型原文件中已经使用损失函数对输出值和标签值进行了计算,返回的outputs中包含损失函数值 #***损失函数值反向传播*** loss = outputs[0] loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) #梯度裁剪 #***存储loss用于绘制loss曲线*** train_losses.append(loss.detach().cpu().numpy()) #***优化器进行优化*** pbar(step, {'loss': loss.item()}) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() #优化器优化 scheduler.step() #学习率机制更新 model.zero_grad() global_step += 1 #训练一个epoch保存一个模型 output_dir = os.path.join(args.output_dir, f'model_checkpoint_epoch_{num}') if not os.path.exists(output_dir): os.makedirs(output_dir) print('') #避免输出信息都在同一行 # logger.info(f'save model checkpoint-{global_step} to {output_dir} ') model.save_pretrained(output_dir) #保存模型 #***训练一个epoch绘制一个Loss曲线*** trainloss.train_loss(steps=train_steps, losses=train_losses, epoch=num, args=args, type='train', max_step=train_all_steps) #*****一个epoch训练结束以后,进行验证***** print('') logger.info(f'****************Valid epoch-{num}****************') logger.info(" Num examples = %d", len(valid_data)) logger.info(" Batch size = %d", args.valid_batch_size) valid_steps, valid_losses, valid_all_steps = valid( args=args, model=model, device=device, valid_data=valid_data, valid_dataloader=valid_dataloader) trainloss.train_loss(steps=valid_steps, losses=valid_losses, epoch=num, args=args, type='valid', max_steps=valid_all_steps) #每训练一个epoch清空cuda缓存 if 'cuda' in str(device): torch.cuda.empty_cache()
def main(): args = deal_parser() if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) args.output_dir = args.output_dir + '{}'.format(args.model_type) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) init_logger(log_file=args.output_dir + '/{}-{}.log'.format(args.model_type, args.task_name)) if os.path.exists(args.output_dir) and os.listdir( args.output_dir ) and args.do_train and not args.overwrite_output_dir: raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome." .format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed seed_everything(args.seed) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() if args.local_rank == 0: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab config = BertConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name) # albert model # tokenizer = tokenization_albert.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case,spm_model_file=args.spm_model_file) # model = AlbertForSequenceClassification.from_pretrained(args.model_name_or_path,from_tf=bool('.ckpt' in args.model_name_or_path),config=config) # bert model tokenizer = tokenization_bert.BertTokenizer( vocab_file=args.vocab_file, do_lower_case=args.do_lower_case, spm_model_file=args.spm_model_file) model = BertForSequenceClassification.from_pretrained( args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='train') global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr( model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Evaluation results = [] if args.do_eval and args.local_rank in [-1, 0]: # albert model #tokenizer = tokenization_albert.FullTokenizer(vocab_file=args.vocab_file,do_lower_case=args.do_lower_case,spm_model_file=args.spm_model_file) # bert model tokenizer = tokenization_bert.BertTokenizer( vocab_file=args.vocab_file, do_lower_case=args.do_lower_case, spm_model_file=args.spm_model_file) checkpoints = [(0, args.output_dir)] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted( glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) checkpoints = [(int(checkpoint.split('-')[-1]), checkpoint) for checkpoint in checkpoints if checkpoint.find('checkpoint') != -1] checkpoints = sorted(checkpoints, key=lambda x: x[0]) logger.info("Evaluate the following checkpoints: %s", checkpoints) for _, checkpoint in checkpoints: global_step = checkpoint.split( '-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split( '/')[-1] if checkpoint.find('checkpoint') != -1 else "" # albert model # model = AlbertForSequenceClassification.from_pretrained(checkpoint) # bert model model = BertForSequenceClassification.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) results.extend([(k + '_{}'.format(global_step), v) for k, v in result.items()]) output_eval_file = os.path.join(args.output_dir, "checkpoint_eval_results.txt") with open(output_eval_file, "w") as writer: for key, value in results: writer.write("%s = %s\n" % (key, str(value))) # Test results = [] if args.do_predict and args.local_rank in [-1, 0]: # albert model # tokenizer = tokenization_albert.FullTokenizer(vocab_file=args.vocab_file,do_lower_case=args.do_lower_case,spm_model_file=args.spm_model_file) # bert model tokenizer = tokenization_bert.BertTokenizer( vocab_file=args.vocab_file, do_lower_case=args.do_lower_case, spm_model_file=args.spm_model_file) checkpoints = [(0, args.output_dir)] if args.predict_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted( glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) checkpoints = [(int(checkpoint.split('-')[-1]), checkpoint) for checkpoint in checkpoints if checkpoint.find('checkpoint') != -1] checkpoints = sorted(checkpoints, key=lambda x: x[0]) logger.info("Test the following checkpoints: %s", checkpoints) for _, checkpoint in checkpoints: global_step = checkpoint.split( '-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split( '/')[-1] if checkpoint.find('checkpoint') != -1 else "" # albert model # model = AlbertForSequenceClassification.from_pretrained(checkpoint) # bert model model = BertForSequenceClassification.from_pretrained(checkpoint) model.to(args.device) result = test(args, model, tokenizer, prefix=prefix) results.extend([(k + '_{}'.format(global_step), v) for k, v in result.items()]) output_test_file = os.path.join(args.output_dir, "checkpoint_test_results.txt") with open(output_test_file, "w") as writer: for key, value in results: writer.write("%s = %s\n" % (key, str(value)))
def train(args, train_dataset, model, tokenizer, config): """ Train the model """ args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler( train_dataset) if args.local_rank == -1 else DistributedSampler( train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate_fn) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // ( len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len( train_dataloader ) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] bert_param_optimizer = list(model.bert.named_parameters()) crf_param_optimizer = list(model.crf.named_parameters()) linear_param_optimizer = list(model.classifier.named_parameters()) optimizer_grouped_parameters = [{ 'params': [ p for n, p in bert_param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay, 'lr': args.learning_rate }, { 'params': [ p for n, p in bert_param_optimizer if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0, 'lr': args.learning_rate }, { 'params': [ p for n, p in crf_param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay, 'lr': args.crf_learning_rate }, { 'params': [p for n, p in crf_param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0, 'lr': args.crf_learning_rate }, { 'params': [ p for n, p in linear_param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay, 'lr': args.crf_learning_rate }, { 'params': [ p for n, p in linear_param_optimizer if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0, 'lr': args.crf_learning_rate }] args.warmup_steps = int(t_total * args.warmup_proportion) optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join( args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt")): # Load in optimizer and scheduler states optimizer.load_state_dict( torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict( torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path ) and "checkpoint" in args.model_name_or_path: # set global_step to gobal_step of last saved checkpoint from model path global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0]) epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % ( len(train_dataloader) // args.gradient_accumulation_steps) logger.info( " Continuing training from checkpoint, will skip to saved global_step" ) logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() seed_everything( args.seed ) # Added here for reproductibility (even between python 2 and 3) # train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) best_eval_p = 0.0 best_eval_r = 0.0 best_eval_f1 = 0.0 for _ in range(int(args.num_train_epochs)): # pbar = ProgressBar(n_total=len(train_dataloader), desc='Training') epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], 'input_lens': batch[4] } if args.model_type != "distilbert": # XLM and RoBERTa don"t use segment_ids inputs["token_type_ids"] = (batch[2] if args.model_type in ["bert", "xlnet"] else None) outputs = model(**inputs) loss = outputs[ 0] # model outputs are always tuple in pytorch-transformers (see doc) if args.n_gpu > 1: loss = loss.mean( ) # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() # pbar(step, {'loss': loss.item()}) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scheduler.step() # Update learning rate schedule optimizer.step() model.zero_grad() global_step += 1 # if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # # Log metrics # print(" ") # if args.local_rank == -1: # # Only evaluate when single GPU otherwise metrics may not average well # evaluate(args, model, tokenizer) # if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # # Save model checkpoint # output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) # if not os.path.exists(output_dir): # os.makedirs(output_dir) # model_to_save = ( # model.module if hasattr(model, "module") else model # ) # Take care of distributed/parallel training # model_to_save.save_pretrained(output_dir) # torch.save(args, os.path.join(output_dir, "training_args.bin")) # logger.info("Saving model checkpoint to %s", output_dir) # tokenizer.save_vocabulary(output_dir) # torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) # torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) # logger.info("Saving optimizer and scheduler states to %s", output_dir) # best eval results = evaluate(args, model, tokenizer) if results["f1"] > best_eval_f1: best_eval_p = results['acc'] best_eval_r = results['recall'] best_eval_f1 = results["f1"] ## 保存最好模型 args.best_eval_output_dir = os.path.join(args.output_dir) if not os.path.exists(args.best_eval_output_dir): os.makedirs(args.best_eval_output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.best_eval_output_dir) torch.save( args, os.path.join(args.best_eval_output_dir, "training_args.bin")) logger.info("eval results: p:{:.4f} r:{:.4f} f1:{:.4f}".format( best_eval_p, best_eval_r, best_eval_f1)) logger.info("Saving step:{} as best model to {}".format( global_step, args.best_eval_output_dir)) tokenizer.save_vocabulary(args.best_eval_output_dir) torch.save(optimizer.state_dict(), os.path.join(args.best_eval_output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(args.best_eval_output_dir, "scheduler.pt")) # logger.info("Saving optimizer and scheduler states to %s", args.best_eval_output_dir) # config_file = os.path.join(args.best_eval_output_dir, "best_config.json") # json.dump(config, config_file) logger.info("\n") if 'cuda' in str(args.device): torch.cuda.empty_cache() if args.do_predict: config_class, model_class, tokenizer_class = MODEL_CLASSES[ args.model_type] model = model_class.from_pretrained(args.best_eval_output_dir, config=config) model.to(args.device) predict(args, model, tokenizer) return global_step, tr_loss / global_step
def main(): parser = ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True) parser.add_argument("--vocab_path", default=None, type=str, required=True) parser.add_argument("--output_dir", default=None, type=str, required=True) parser.add_argument('--data_name', default='albert', type=str) parser.add_argument('--max_ngram', default=3, type=int) parser.add_argument("--do_data", default=False, action='store_true') parser.add_argument("--do_split", default=False, action='store_true') parser.add_argument("--do_lower_case", default=False, action='store_true') parser.add_argument('--seed', default=42, type=int) parser.add_argument("--line_per_file", default=1000000000, type=int) parser.add_argument("--file_num", type=int, default=10, help="Number of dynamic masking to pregenerate (with different masks)") parser.add_argument("--max_seq_len", type=int, default=128) parser.add_argument("--short_seq_prob", type=float, default=0.1, help="Probability of making a short sentence as a training example") parser.add_argument("--masked_lm_prob", type=float, default=0.15, help="Probability of masking each token for the LM task") parser.add_argument("--max_predictions_per_seq", type=int, default=20, # 128 * 0.15 help="Maximum number of tokens to mask in each sequence") args = parser.parse_args() seed_everything(args.seed) args.data_dir = Path(args.data_dir) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) init_logger(log_file=args.output_dir +"pregenerate_training_data_ngram.log") logger.info("pregenerate training data parameters:\n %s", args) tokenizer = BertTokenizer(vocab_file=args.vocab_path, do_lower_case=args.do_lower_case) # split big file if args.do_split: corpus_path =args.data_dir / "corpus/corpus.txt" split_save_path = args.data_dir / "/corpus/train" if not split_save_path.exists(): split_save_path.mkdir(exist_ok=True) line_per_file = args.line_per_file command = f'split -a 4 -l {line_per_file} -d {corpus_path} {split_save_path}/shard_' os.system(f"{command}") # generator train data if args.do_data: data_path = args.data_dir / "corpus/train" files = sorted([f for f in data_path.parent.iterdir() if f.exists() and '.txt' in str(f)]) for idx in range(args.file_num): logger.info(f"pregenetate {args.data_name}_file_{idx}.json") save_filename = data_path / f"{args.data_name}_file_{idx}.json" num_instances = 0 with save_filename.open('w') as fw: for file_idx in range(len(files)): file_path = files[file_idx] file_examples = create_training_instances(input_file=file_path, tokenizer=tokenizer, max_seq_len=args.max_seq_len, max_ngram=args.max_ngram, short_seq_prob=args.short_seq_prob, masked_lm_prob=args.masked_lm_prob, max_predictions_per_seq=args.max_predictions_per_seq) file_examples = [json.dumps(instance) for instance in file_examples] for instance in file_examples: fw.write(instance + '\n') num_instances += 1 metrics_file = data_path / f"{args.data_name}_file_{idx}_metrics.json" print(f"num_instances: {num_instances}") with metrics_file.open('w') as metrics_file: metrics = { "num_training_examples": num_instances, "max_seq_len": args.max_seq_len } metrics_file.write(json.dumps(metrics))
def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--model_type", default=None, type=str, required=True, help="Model type selected in the list: ") parser.add_argument("--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list") parser.add_argument("--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") parser.add_argument("--vocab_file",default='', type=str) parser.add_argument("--spm_model_file",default='',type=str) ## Other parameters parser.add_argument("--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_seq_length", default=512, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--output_eval", action='store_true', help="Whether to write output result.") parser.add_argument("--do_predict", action='store_true', help="Whether to run the model in inference mode on the test set.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for,E.g., 0.1 = 10% of training.") parser.add_argument('--logging_steps', type=int, default=10, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=1000, help="Save checkpoint every X updates steps.") parser.add_argument("--eval_all_checkpoints", action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", action='store_true', help="Avoid using CUDA when available") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") parser.add_argument("--label_with_bi", action='store_true', help="Label with B/I") args = parser.parse_args() if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) args.output_dir = args.output_dir + '{}'.format(args.model_type) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) init_logger(log_file=args.output_dir + '/{}-{}.log'.format(args.model_type, args.task_name)) if os.path.exists(args.output_dir) and os.listdir( args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed seed_everything(args.seed) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name != "ner": raise ValueError("Task error: %s, must be ner" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels_ner(args.data_dir, args.label_with_bi) num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config = AlbertConfig.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name) tokenizer = tokenization_albert.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case, spm_model_file=args.spm_model_file) model =AlbertFocalLossForNer.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='train') global_step, tr_loss = train(args, train_dataset, label_list, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Evaluation results = [] if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenization_albert.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case, spm_model_file=args.spm_model_file) checkpoints = [(0,args.output_dir)] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) checkpoints = [(int(checkpoint.split('-')[-1]),checkpoint) for checkpoint in checkpoints if checkpoint.find('checkpoint') != -1] checkpoints = sorted(checkpoints,key =lambda x:x[0]) logger.info("Evaluate the following checkpoints: %s", checkpoints) for _,checkpoint in checkpoints: global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = AlbertFocalLossForNer.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, label_list, prefix=prefix) results.extend([(k + '_{}'.format(global_step), v) for k, v in result.items()]) output_eval_file = os.path.join(args.output_dir, "checkpoint_eval_results.txt") with open(output_eval_file, "w") as writer: for key,value in results: writer.write("%s = %s\n" % (key, str(value))) if args.do_predict and args.local_rank in [-1, 0]: tokenizer = tokenization_albert.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case, spm_model_file=args.spm_model_file) result = evaluate(args, model, tokenizer, label_list, prefix="") output_eval_file = os.path.join(args.output_dir, "checkpoint_eval_results.txt") with open(output_eval_file, "w") as writer: for key,value in result.items(): writer.write("%s = %s\n" % (key, str(value)))
def train(args, train_dataset, model, tokenizer): writer = SummaryWriter(args.summary_dir) """ Train the model """ args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler( train_dataset) if args.local_rank == -1 else DistributedSampler( train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate_fn) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // ( len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len( train_dataloader ) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] bert_parameters = model.bert.named_parameters() start_parameters = model.start_fc.named_parameters() end_parameters = model.end_fc.named_parameters() optimizer_grouped_parameters = [ { "params": [ p for n, p in bert_parameters if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay, 'lr': args.learning_rate }, { "params": [p for n, p in bert_parameters if any(nd in n for nd in no_decay)], "weight_decay": 0.0, 'lr': args.learning_rate }, { "params": [ p for n, p in start_parameters if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay, 'lr': 0.001 }, { "params": [ p for n, p in start_parameters if any(nd in n for nd in no_decay) ], "weight_decay": 0.0, 'lr': 0.001 }, { "params": [ p for n, p in end_parameters if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay, 'lr': 0.001 }, { "params": [p for n, p in end_parameters if any(nd in n for nd in no_decay)], "weight_decay": 0.0, 'lr': 0.001 }, ] # optimizer_grouped_parameters = [ # {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], # "weight_decay": args.weight_decay, }, # {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, # ] if args.warmup_steps == -1: args.warmup_steps = int(t_total * args.warmup_proportion) # args.warmup_steps = int(t_total * args.warmup_proportion) optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) # scheduler = get_limit_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, # num_training_steps=t_total, limit_lr=7e-6) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join( args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt")): # Load in optimizer and scheduler states optimizer.load_state_dict( torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict( torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path ) and "checkpoint" in args.model_name_or_path: # set global_step to gobal_step of last saved checkpoint from model path global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0]) epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % ( len(train_dataloader) // args.gradient_accumulation_steps) logger.info( " Continuing training from checkpoint, will skip to saved global_step" ) logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) best_eval_f1 = 0 tr_loss, logging_loss = 0.0, 0.0 if args.do_adv: fgm = FGM(model, emb_name=args.adv_name, epsilon=args.adv_epsilon) model.zero_grad() seed_everything( args.seed ) # Added here for reproductibility (even between python 2 and 3) for epoch in range(int(args.num_train_epochs)): pbar = ProgressBar(n_total=len(train_dataloader), desc='Training') for step, batch in enumerate(train_dataloader): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "start_positions": batch[3], "end_positions": batch[4] } if args.model_type != "distilbert": # XLM and RoBERTa don"t use segment_ids inputs["token_type_ids"] = (batch[2] if args.model_type in ["bert", "xlnet"] else None) outputs = model(**inputs) loss = outputs[ 0] # model outputs are always tuple in pytorch-transformers (see doc) if args.n_gpu > 1: loss = loss.mean( ) # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() if args.do_adv: fgm.attack() loss_adv = model(**inputs)[0] if args.n_gpu > 1: loss_adv = loss_adv.mean() loss_adv.backward() fgm.restore() cur_lr = optimizer.state_dict()['param_groups'][0]['lr'] # cur_lr = scheduler.get_lr() pbar(step, { 'epoch': int(epoch), 'loss': loss.item(), 'lr': cur_lr }) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: writer.add_scalar('loss', loss.item(), global_step=global_step) writer.add_scalar('lr', cur_lr, global_step=global_step) if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scheduler.step() # Update learning rate schedule optimizer.step() model.zero_grad() global_step += 1 if args.local_rank in [ -1, 0 ] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics logger.warn("\n") if args.local_rank == -1: # Only evaluate when single GPU otherwise metrics may not average well prefix = f'epoch {epoch} global_step {global_step}' results = evaluate(args, model, tokenizer, prefix=prefix) if results['f1'] > best_eval_f1: best_eval_f1 = results['f1'] output_dir = os.path.join(args.output_dir, "best_eval_checkpoint") if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save( args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) tokenizer.save_vocabulary(output_dir) torch.save( optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save( scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info( "Saving optimizer and scheduler states to %s", output_dir) results['checkoutpoint'] = global_step results['epoch'] = epoch # results.update(vars(args)) json.dump(results, open( os.path.join(output_dir, 'eval_result.txt'), 'w'), ensure_ascii=False, indent=4) # if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # # Save model checkpoint # output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) # if not os.path.exists(output_dir): # os.makedirs(output_dir) # model_to_save = ( # model.module if hasattr(model, "module") else model # ) # Take care of distributed/parallel training # model_to_save.save_pretrained(output_dir) # torch.save(args, os.path.join(output_dir, "training_args.bin")) # tokenizer.save_vocabulary(output_dir) # logger.info("Saving model checkpoint to %s", output_dir) # torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) # torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) # logger.info("Saving optimizer and scheduler states to %s", output_dir) logger.warn("\n") if 'cuda' in str(args.device): torch.cuda.empty_cache() return global_step, tr_loss / global_step
def main(): args = get_argparse().parse_args() if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) args.output_dir = args.output_dir + '{}'.format(args.model_type) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) time_ = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) init_logger(log_file=args.output_dir + f'/{args.model_type}-{args.task_name}-{time_}.log') if os.path.exists(args.output_dir) and os.listdir( args.output_dir ) and args.do_train and not args.overwrite_output_dir: raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome." .format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed seed_everything(args.seed) # Prepare NER task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() label_list = processor.get_labels() args.id2label = {i: label for i, label in enumerate(label_list)} args.label2id = {label: i for i, label in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.model_name_or_path, num_labels=num_labels) config.soft_label = True config.loss_type = args.loss_type tokenizer = tokenizer_class.from_pretrained( args.model_name_or_path, do_lower_case=args.do_lower_case) model = model_class.from_pretrained(args.model_name_or_path, config=config) if args.local_rank == 0: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='train') global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = (model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_vocabulary(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained( args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted( glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))) logging.getLogger("pytorch_transformers.modeling_utils").setLevel( logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split( "-")[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split( '/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) if global_step: result = { "{}_{}".format(global_step, k): v for k, v in result.items() } results.update(result) output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: for key in sorted(results.keys()): writer.write("{} = {}\n".format(key, str(results[key]))) if args.do_predict and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained( args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.predict_checkpoints > 0: checkpoints = list( os.path.dirname(c) for c in sorted( glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel( logging.WARN) # Reduce logging checkpoints = [ x for x in checkpoints if x.split('-')[-1] == str(args.predict_checkpoints) ] logger.info("Predict the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: prefix = checkpoint.split( '/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) predict(args, model, tokenizer, prefix=prefix)
def train(args, train_dataset, model, tokenizer): """ Train the model """ """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler( train_dataset) if args.local_rank == -1 else DistributedSampler( train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=xlnet_collate_fn if args.model_type in ['xlnet'] else collate_fn) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // ( len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len( train_dataloader ) // args.gradient_accumulation_steps * args.num_train_epochs args.warmup_steps = int(t_total * args.warmup_proportion) # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.0 }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): # set global_step to global_step of last saved checkpoint from model path try: global_step = int( args.model_name_or_path.split("-")[-1].split("/")[0]) except ValueError: global_step = 0 epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % ( len(train_dataloader) // args.gradient_accumulation_steps) logger.info( " Continuing training from checkpoint, will skip to saved global_step" ) logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() seed_everything( args.seed ) # Added here for reproductibility (even between python 2 and 3) for _ in range(int(args.num_train_epochs)): pbar = ProgressBar(n_total=len(train_dataloader), desc='Training') for step, batch in enumerate(train_dataloader): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { 'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3] } if args.model_type != 'distilbert': inputs['token_type_ids'] = batch[2] if args.model_type in [ 'bert', 'xlnet', 'albert', 'roberta' ] else None # XLM, DistilBERT don't use segment_ids outputs = model(**inputs) loss = outputs[ 0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean( ) # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() pbar(step, {'loss': loss.item()}) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [ -1, 0 ] and args.logging_steps > 0 and global_step % args.logging_steps == 0: print(" ") logs = {} # Log metrics if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value loss_scalar = (tr_loss - logging_loss) / args.logging_steps learning_rate_scalar = scheduler.get_lr()[0] logs["learning_rate"] = learning_rate_scalar logs["loss"] = loss_scalar logging_loss = tr_loss for key, value in logs.items(): tb_writer.add_scalar(key, value, global_step) print(json.dumps({**logs, **{"step": global_step}})) if args.local_rank in [ -1, 0 ] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join( args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("Saving optimizer and scheduler states to %s", output_dir) print(" ") if 'cuda' in str(args.device): torch.cuda.empty_cache() if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step
def main(): args = get_argparse() if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) args.output_dir = args.output_dir + '{}'.format(args.model_type) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) time_ = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) init_logger(log_file=args.output_dir + f'/{args.model_type}-{args.task_name}-{time_}.log') if os.path.exists(args.output_dir) and os.listdir( args.output_dir) and args.do_train and not args.overwrite_output_dir: raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed seed_everything(args.seed) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, cache_dir=args.cache_dir if args.cache_dir else None, ) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, ) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='train') global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
with torch.no_grad(): for batch_idx,(data, target) in enumerate(valid_loader): data, target = data.to(device), target.to(device) output = model(data) loss = loss_fn(output, target).item() # sum up batch loss pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability correct = pred.eq(target.view_as(pred)).sum().item() valid_loss.update(loss,n = data.size(0)) valid_acc.update(correct, n=1) count += data.size(0) pbar(step=batch_idx) return {'valid_loss':valid_loss.avg, 'valid_acc':valid_acc.sum /count} if __name__ =="__main__": parser = argparse.ArgumentParser(description='CIFAR10') parser.add_argument("--model", type=str, default='ResNet50') parser.add_argument("--task", type=str, default='image') parser.add_argument("--epochs", default=50,type=int) parser.add_argument('--batch_size',default=256,type=int) parser.add_argument("--seed",default=42,type=int) parser.add_argument('--norm_type',default='bn',choices = ['bn','enb0','ens0']) args = parser.parse_args() seed_everything(args.seed) loaders = data_loader(args) model = resnet18(args.norm_type) device = torch.device("cuda") model.to(device) train(args,loaders,model)
def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default='dataset/car_data', type=str, required=False, help="输入数据文件地址") parser.add_argument("--model_type", default='albert', type=str, required=False, help="模型种类") parser.add_argument("--model_name_or_path", default='prev_trained_model/albert_chinese_small', type=str, required=False, help="模型参数文件地址") parser.add_argument("--task_name", default='car', type=str, required=False, help="那个种类数据" + ", ".join(processors.keys())) parser.add_argument("--output_dir", default='outputs', type=str, required=False, help="输出文件地址") parser.add_argument("--vocab_file", default='prev_trained_model/albert_chinese_small/vocab.txt', type=str) ## Other parameters parser.add_argument("--config_name", default="", type=str, help="配置文件地址") parser.add_argument("--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_seq_length", default=512, type=int, help="句子最大长度") parser.add_argument("--do_train", action='store_true', help="训练") parser.add_argument("--do_eval", action='store_true', help="验证") parser.add_argument("--do_predict", action='store_true', help="预测") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="批量大小") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="验证批量大小") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="Adam学习率") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for,E.g., 0.1 = 10% of training.") parser.add_argument('--logging_steps', type=int, default=10, help="Log every X updates steps.") parser.add_argument('--save_steps', type=int, default=1000, help="每多少部保存一次") parser.add_argument("--eval_all_checkpoints",type=str,default='do',# action='store_true', help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") parser.add_argument("--no_cuda", type=int, default=0, # action='store_true', help="GPU") parser.add_argument('--overwrite_output_dir', action='store_true', help="Overwrite the content of the output directory") parser.add_argument('--overwrite_cache', action='store_true', help="Overwrite the cached training and evaluation sets") parser.add_argument('--seed', type=int, default=42, help="随机种子") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--local_rank", type=int, default=0, help="For distributed training: local_rank") args = parser.parse_args() if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) type_task = args.model_type + '_' + '{}'.format(args.task_name) if not os.path.exists(os.path.join(args.output_dir, type_task)): os.mkdir(os.path.join(args.output_dir, type_task)) init_logger(log_file=args.output_dir + '/{}-{}.log'.format(args.model_type, args.task_name)) # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) # torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device # Setup logging logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) # Set seed seed_everything(args.seed) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) args.model_type = args.model_type.lower() config = AlbertConfig.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name) tokenizer = tokenization_albert.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case, ) model =AlbertForSequenceClassification.from_pretrained(args.model_name_or_path, config=config) #if args.local_rank == 0: # torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training # args.do_train = True if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='train') global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train:# and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Evaluation # args.do_eval = True results = [] if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenization_albert.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case, ) checkpoints = [(0,args.output_dir)] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) checkpoints = [(int(checkpoint.split('-')[-1]),checkpoint) for checkpoint in checkpoints if checkpoint.find('checkpoint') != -1] checkpoints = sorted(checkpoints,key =lambda x:x[0]) logger.info("Evaluate the following checkpoints: %s", checkpoints) for _,checkpoint in checkpoints: global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else "" model =AlbertForSequenceClassification.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) results.extend([(k + '_{}'.format(global_step), v) for k, v in result.items()]) output_eval_file = os.path.join(args.output_dir, "checkpoint_eval_results.txt") with open(output_eval_file, "w") as writer: for key,value in results: writer.write("%s = %s\n" % (key, str(value))) # args.do_predict = True predict_results = [] if args.do_predict and args.local_rank in [-1, 0]: tokenizer = tokenization_albert.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case, ) # checkpoints_path = os.path.join(args.output_dir, 'checkpoint-4000') checkpoints = [(0, args.output_dir)] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) checkpoints = [(int(checkpoint.split('-')[-1]), checkpoint) for checkpoint in checkpoints if checkpoint.find('checkpoint') != -1] checkpoints = sorted(checkpoints, key=lambda x: x[0]) logger.info("Evaluate the following checkpoints: %s", checkpoints) checkpoints = [checkpoints[-1]] for _, checkpoint in checkpoints: global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = AlbertForSequenceClassification.from_pretrained(checkpoint) model.to(args.device) result = predict(args, model, tokenizer, prefix=prefix) predict_results.extend([(k + '_{}'.format(global_step), v) for k, v in result.items()]) output_eval_file = os.path.join(args.output_dir, "checkpoint_eval_results.txt") with open(output_eval_file, "w") as writer: for key, value in predict_results: writer.write("%s = %s\n" % (key, str(value)))
torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) args.n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info( f"device: {device} , distributed training: {bool(args.local_rank != -1)}, 16-bits training: {args.fp16}" ) if args.gradient_accumulation_steps < 1: raise ValueError( f"Invalid gradient_accumulation_steps parameter: {args.gradient_accumulation_steps}, should be >= 1" ) args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps seed_everything(args.seed) tokenizer = BertTokenizer.from_pretrained(args.vocab_path, do_lower_case=args.do_lower_case) total_train_examples = samples_per_epoch * args.epochs num_train_optimization_steps = int(total_train_examples / args.train_batch_size / args.gradient_accumulation_steps) if args.local_rank != -1: num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size( ) args.warmup_steps = int(num_train_optimization_steps * args.warmup_proportion) bert_config = AlbertConfig.from_pretrained(args.config_path) model = AlbertForPreTraining(config=bert_config)
def main(): args = get_argparse().parse_args() if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) args.output_dir = args.output_dir + '{}'.format(args.model_type) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) time_ = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) init_logger(log_file=args.output_dir + f'/{args.model_type}-{args.task_name}-{time_}.log') if os.path.exists(args.output_dir) and os.listdir( args.output_dir ) and args.do_train and not args.overwrite_output_dir: raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome." .format(args.output_dir)) seed_everything(args.seed) # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Prepare NER task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() label_list = processor.get_labels() args.id2label = {i: label for i, label in enumerate(label_list)} args.label2id = {label: i for i, label in enumerate(label_list)} args.num_labels = len(label_list) tokenizer = CNerTokenizer.from_pretrained( args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, ) args.vocab_size = tokenizer.vocab_size model = BiLSTMForNer(args) model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='train') global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = (model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training # model_to_save.save_pretrained(args.output_dir) save_model(model_to_save, args.output_dir) tokenizer.save_vocabulary(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: tokenizer = CNerTokenizer.from_pretrained( args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted( glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))) logging.getLogger("pytorch_transformers.modeling_utils").setLevel( logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split( "-")[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split( '/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = load_model(model, checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) if global_step: result = { "{}_{}".format(global_step, k): v for k, v in result.items() } results.update(result) output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "a") as writer: writer.write("task_name: {}, model_type: {}\n".format( args.task_name, args.model_type)) for key in sorted(results.keys()): writer.write("{} = {}\n".format(key, str(results[key]))) writer.write("\n") if args.do_predict and args.local_rank in [-1, 0]: tokenizer = CNerTokenizer.from_pretrained( args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.predict_checkpoints > 0: checkpoints = list( os.path.dirname(c) for c in sorted( glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel( logging.WARN) # Reduce logging checkpoints = [ x for x in checkpoints if x.split('-')[-1] == str(args.predict_checkpoints) ] logger.info("Predict the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: prefix = checkpoint.split( '/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = load_model(model, checkpoint) model.to(args.device) predict(args, model, tokenizer, prefix=prefix)
def main(): args = get_argparse().parse_args() # 模型保存目录 if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) args.output_dir = args.output_dir + '{}'.format(args.model_type) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) # CUDA, GPU if torch.cuda.is_available() and not args.no_cuda: args.device = torch.device("cuda:0") else: args.device = torch.device("cpu") # 打印参数 time_ = time.strftime("%Y-%m-%d", time.localtime()) init_logger(log_file=args.output_dir + f'/{args.model_type}-{args.task_name}-{time_}.log') logger.info("="*20+" args "+"="*20) for para in args.__dict__: msg = para + " = " + str(args.__dict__[para]) logger.info(msg) # Set seed seed_everything(args.seed) # Prepare NER task processor = NerProcessor() label_list = processor.get_labels() args.id2label = {i: label for i, label in enumerate(label_list)} args.label2id = {label: i for i, label in enumerate(label_list)} num_labels = len(label_list) num_labels = int((num_labels+1)/2) # 部分B I pad_token_label_id = CrossEntropyLoss().ignore_index # -100 # Load pretrained model and tokenizer args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, cache_dir=args.cache_dir if args.cache_dir else None, ) tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, ) setattr(config, 'soft_label', args.soft_label) model = model_class(config=config) # 训练 if args.do_train: if args.continue_train: model = model_class.from_pretrained(args.continue_train_checkpoint, config=config) print(f"Continue training from {args.continue_train_checkpoint}") # 基础预训练模型 elif args.model_type.lower() == "electra": model.BaseModel = ElectraModel.from_pretrained(args.model_name_or_path) logger.info(f"Loading Electra from {args.model_name_or_path}...") elif args.model_type.lower() == "bert" : model.BaseModel = BertModel.from_pretrained(args.model_name_or_path) logger.info(f"Loading Bert from {args.model_name_or_path}...") elif args.model_type.lower() == "albert": model.BaseModel = AlbertModel.from_pretrained(args.model_name_or_path) logger.info(f"Loading AlBert from {args.model_name_or_path}...") print(model) model.to(args.device) train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, label_list, pad_token_label_id, data_type='train') global_step, lr_loss = train(args, train_dataset, model, tokenizer, label_list, pad_token_label_id) logger.info(" global_step = %s, average loss = %s", global_step, lr_loss) # 保存 logger.info("Saving model checkpoint to %s", args.output_dir) model.save_pretrained(args.output_dir) tokenizer.save_vocabulary(args.output_dir) torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # 测试集 if args.do_predict: tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] logger.info("Predict the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint, config=config) model.to(args.device) predict(args, model, tokenizer, label_list, pad_token_label_id, prefix=prefix)
def main(): parser = ArgumentParser() ## Required parameters parser.add_argument( "--data_dir", default="dataset", type=str, help= "The input data dir. Should contain the .tsv files (or other data files) for the task." ) parser.add_argument("--config_path", default="prev_trained_model/electra_small/config.json", type=str) parser.add_argument("--vocab_path", default="prev_trained_model/electra_small/vocab.txt", type=str) parser.add_argument( "--output_dir", default="outputs", type=str, help= "The output directory where the model predictions and checkpoints will be written." ) parser.add_argument("--model_path", default='prev_trained_model/electra_small', type=str) parser.add_argument('--data_name', default='electra', type=str) parser.add_argument( "--file_num", type=int, default=10, help="Number of dynamic masking to pregenerate (with different masks)") parser.add_argument( "--reduce_memory", action="store_true", help= "Store training data as on-disc memmaps to massively reduce memory usage" ) parser.add_argument("--epochs", type=int, default=4, help="Number of epochs to train for") parser.add_argument( "--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument('--num_eval_steps', default=100) parser.add_argument('--num_save_steps', default=2000) parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument("--weight_decay", default=0.01, type=float, help="Weight deay if we apply some.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass." ) parser.add_argument("--train_batch_size", default=128, type=int, help="Total batch size for training.") parser.add_argument("--gen_weight", default=1.0, type=float, help='masked language modeling / generator loss') parser.add_argument("--disc_weight", default=50, type=float, help='discriminator loss') parser.add_argument('--untied_generator', action='store_true', help='tie all generator/discriminator weights?') parser.add_argument('--temperature', default=0, type=float, help='temperature for sampling from generator') parser.add_argument( '--loss_scale', type=float, default=0, help= "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Linear warmup over warmup_steps.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument('--max_grad_norm', default=1.0, type=float) parser.add_argument("--learning_rate", default=0.000176, type=float, help="The initial learning rate for Adam.") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument( '--fp16_opt_level', type=str, default='O2', help= "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument( '--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument('--continue_train', default='', help="continue train path") args = parser.parse_args() args.data_dir = Path(args.data_dir) args.output_dir = Path(args.output_dir) pregenerated_data = args.data_dir / "corpus/train" init_logger(log_file=str(args.output_dir / "train_albert_model.log")) assert pregenerated_data.is_dir(), \ "--pregenerated_data should point to the folder of files made by prepare_lm_data_mask.py!" samples_per_epoch = 0 for i in range(args.file_num): data_file = pregenerated_data / f"{args.data_name}_file_{i}.json" metrics_file = pregenerated_data / f"{args.data_name}_file_{i}_metrics.json" if data_file.is_file() and metrics_file.is_file(): metrics = json.loads(metrics_file.read_text()) samples_per_epoch += metrics['num_training_examples'] else: if i == 0: exit("No training data was found!") print( f"Warning! There are fewer epochs of pregenerated data ({i}) than training epochs ({args.epochs})." ) print( "This script will loop over the available data, but training diversity may be negatively impacted." ) break logger.info(f"samples_per_epoch: {samples_per_epoch}") if args.local_rank == -1 or args.no_cuda: device = torch.device(f"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) args.n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info( f"device: {device} , distributed training: {bool(args.local_rank != -1)}, 16-bits training: {args.fp16}" ) if args.gradient_accumulation_steps < 1: raise ValueError( f"Invalid gradient_accumulation_steps parameter: {args.gradient_accumulation_steps}, should be >= 1" ) args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps seed_everything(args.seed) tokenizer = BertTokenizer.from_pretrained(args.vocab_path, do_lower_case=args.do_lower_case) total_train_examples = samples_per_epoch * args.epochs num_train_optimization_steps = int(total_train_examples / args.train_batch_size / args.gradient_accumulation_steps) if args.local_rank != -1: num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size( ) args.warmup_steps = int(num_train_optimization_steps * args.warmup_proportion) bert_config = ElectraConfig.from_pretrained(args.config_path, gen_weight=args.gen_weight, temperature=args.temperature, disc_weight=args.disc_weight) model = ElectraForPreTraining(config=bert_config) if args.continue_train: print(f"Continue train from {args.continue_train}") model = model.from_pretrained(args.continue_train) elif args.model_path: print("载入预训练模型") model.generator = AutoModel.from_pretrained(args.model_path + "/G") model.electra = AutoModel.from_pretrained(args.model_path + "/D") # print(model) model.to(device) # Prepare optimizer param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] optimizer = AdamW(params=optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=num_train_optimization_steps) # optimizer = Lamb(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) # if args.model_path: # optimizer.load_state_dict(torch.load(args.model_path + "/optimizer.bin")) if args.fp16: try: from apex import amp except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) if args.n_gpu > 1: # model = BalancedDataParallel(gpu0_bsz=32,dim=0,model).to(device) model = torch.nn.DataParallel(model) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank) global_step = 0 g_metric = LMAccuracy() d_metric = AccuracyThresh() tr_g_acc = AverageMeter() tr_d_acc = AverageMeter() tr_loss = AverageMeter() tr_g_loss = AverageMeter() tr_d_loss = AverageMeter() train_logs = {} logger.info("***** Running training *****") logger.info(f" Num examples = {total_train_examples}") logger.info(f" Batch size = {args.train_batch_size}") logger.info(f" Num steps = {num_train_optimization_steps}") logger.info(f" warmup_steps = {args.warmup_steps}") logger.info(f" Num workable gpus = {args.n_gpu}") start_time = time.time() seed_everything(args.seed) # Added here for reproducibility for epoch in range(args.epochs): for idx in range(args.file_num): epoch_dataset = PregeneratedDataset( file_id=idx, training_path=pregenerated_data, tokenizer=tokenizer, reduce_memory=args.reduce_memory, data_name=args.data_name) if args.local_rank == -1: train_sampler = RandomSampler(epoch_dataset) else: train_sampler = DistributedSampler(epoch_dataset) train_dataloader = DataLoader(epoch_dataset, sampler=train_sampler, batch_size=args.train_batch_size) model.train() nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(train_dataloader): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, lm_label_ids = batch outputs = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask, masked_lm_labels=lm_label_ids) loss, g_loss, d_loss, d_logits, g_logits, is_replaced_label = outputs active_indices = input_mask.view(-1) == 1 active_logits = d_logits.view(-1)[active_indices] active_labels = is_replaced_label.view(-1)[active_indices] g_metric(logits=g_logits.view(-1, bert_config.vocab_size), target=lm_label_ids.view(-1)) d_metric(logits=active_logits.view(-1, 1), target=active_labels) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. g_loss = g_loss.mean() d_loss = d_loss.mean() if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() nb_tr_steps += 1 tr_g_acc.update(g_metric.value(), n=input_ids.size(0)) tr_d_acc.update(d_metric.value(), n=input_ids.size(0)) tr_loss.update(loss.item(), n=1) tr_g_loss.update(g_loss.item(), n=1) tr_d_loss.update(d_loss.item(), n=1) if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scheduler.step() optimizer.step() optimizer.zero_grad() global_step += 1 if global_step % args.num_eval_steps == 0: now = time.time() eta = now - start_time if eta > 3600: eta_format = ('%d:%02d:%02d' % (eta // 3600, (eta % 3600) // 60, eta % 60)) elif eta > 60: eta_format = '%d:%02d' % (eta // 60, eta % 60) else: eta_format = '%ds' % eta train_logs['loss'] = tr_loss.avg train_logs['g_acc'] = tr_g_acc.avg train_logs['d_acc'] = tr_d_acc.avg train_logs['g_loss'] = tr_g_loss.avg train_logs['d_loss'] = tr_d_loss.avg show_info = f'[Training]:[{epoch}/{args.epochs}]{global_step}/{num_train_optimization_steps} ' \ f'- ETA: {eta_format}' + "-".join( [f' {key}: {value:.4f} ' for key, value in train_logs.items()]) logger.info(show_info) tr_g_acc.reset() tr_d_acc.reset() tr_loss.reset() tr_g_loss.reset() tr_d_loss.reset() start_time = now if global_step % args.num_save_steps == 0: if args.local_rank in [-1, 0] and args.num_save_steps > 0: # Save model checkpoint output_dir = args.output_dir / f'lm-checkpoint-{global_step}' if not output_dir.exists(): output_dir.mkdir() # save model model_to_save = model.module if hasattr( model, 'module' ) else model # Take care of distributed/parallel training model_to_save.save_pretrained(str(output_dir)) torch.save(args, str(output_dir / 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) model.module.generator.save_pretrained( str(output_dir / "G")) logger.info("Saving generator model checkpoint to %s", output_dir / "G") model.module.electra.save_pretrained( str(output_dir / "D")) logger.info("Saving electra model checkpoint to %s", output_dir / "D") torch.save(optimizer.state_dict(), str(output_dir / "optimizer.bin")) # save config output_config_file = output_dir / CONFIG_NAME output_config_file_D = output_dir / "D" / CONFIG_NAME output_config_file_G = output_dir / "G" / CONFIG_NAME with open(str(output_config_file), 'w') as f: f.write(model_to_save.config.to_json_string()) with open(str(output_config_file_D), 'w') as f: f.write( model.module.electra.config.to_json_string()) with open(str(output_config_file_G), 'w') as f: f.write( model.module.generator.config.to_json_string()) # save vocab tokenizer.save_vocabulary(output_dir)
def train(args, train_dataset, model, tokenizer, label_list, pad_token_label_id): """ Train the model """ # 载入数据 args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate_fn) # 总训练步数 t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # 优化器 if args.optimizer.lower() == "adamw": no_decay = ["bias", "LayerNorm.weight"] # bert_parameters = eval('model.{}'.format(args.model_type)).named_parameters() base_model_param_optimizer = list(model.BaseModel.named_parameters()) start_parameters = model.start_fc.named_parameters() end_parameters = model.end_fc.named_parameters() args.bert_lr = args.bert_lr if args.bert_lr else args.learning_rate args.start_lr = args.start_lr if args.start_lr else args.learning_rate args.end_lr = args.end_lr if args.end_lr else args.learning_rate optimizer_grouped_parameters = [ {"params": [p for n, p in base_model_param_optimizer if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, "lr": args.bert_lr}, {"params": [p for n, p in base_model_param_optimizer if any(nd in n for nd in no_decay)], "weight_decay": 0.0, "lr": args.bert_lr}, {"params": [p for n, p in start_parameters if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, "lr": args.start_lr}, {"params": [p for n, p in start_parameters if any(nd in n for nd in no_decay)], "weight_decay": 0.0, "lr": args.start_lr}, {"params": [p for n, p in end_parameters if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, "lr": args.end_lr}, {"params": [p for n, p in end_parameters if any(nd in n for nd in no_decay)], "weight_decay": 0.0, "lr": args.end_lr} ] if "lstm" in args.model_type.lower(): lstm_param_optimizer = list(model.Bilstm.named_parameters()) optimizer_grouped_parameters.extend([{'params': [p for n, p in lstm_param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay, 'lr': args.crf_learning_rate}, {'params': [p for n, p in lstm_param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0,'lr': args.crf_learning_rate}]) optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) else: optimizer = Lamb(model.parameters()) # 学习率 args.warmup_steps = int(t_total * args.warmup_proportion) # 学习率预热 scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) # 继续训练 if args.continue_train and \ os.path.isfile(os.path.join(args.continue_train_checkpoint, "optimizer.pt")) and \ os.path.isfile(os.path.join(args.continue_train_checkpoint, "scheduler.pt")): # Load in optimizer and scheduler states optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp logger.info("using fp16 !!!") except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # 多GPU训练 (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # adversarial_training if args.adv_training == 'fgm': adv = FGM(model=model, param_name='word_embeddings') elif args.adv_training == 'pgd': adv = PGD(model=model, param_name='word_embeddings') # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps, ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() seed_everything(args.seed) # 复现 for _ in range(int(args.num_train_epochs)): logger.info(f"############### Epoch_{_} ###############") pbar = ProgressBar(n_total=len(train_dataloader), desc='Training') for step, batch in enumerate(train_dataloader): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {"input_ids": batch[0], "attention_mask": batch[1], "start_positions": batch[5], "end_positions": batch[6]} if args.model_type != "distilbert": # XLM and RoBERTa don"t use segment_ids inputs["token_type_ids"] = (batch[2] if args.model_type in ["bert", "xlnet"] else None) outputs = model(**inputs) loss = outputs[0] if args.n_gpu > 1: loss = loss.mean() if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() if args.adv_training: adv.adversarial_training(args, inputs, optimizer) pbar(step, {'loss': loss.item()}) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scheduler.step() optimizer.step() model.zero_grad() global_step += 1 if args.logging_steps > 0 and global_step % args.logging_steps == 0: # 验证集 logger.info("\n global_step: %s", global_step) logger.info("average tr_loss: %s", tr_loss/global_step) evaluate(args, model, tokenizer, label_list, pad_token_label_id) if args.save_steps > 0 and global_step % args.save_steps == 0: # 保存模型 logger.info("global_step: %s 模型已保存!", global_step) output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) logger.info("Saving model checkpoint to %s", output_dir) if not os.path.exists(output_dir): os.makedirs(output_dir) model.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) tokenizer.save_vocabulary(output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("\n") if 'cuda' in str(args.device): torch.cuda.empty_cache() return global_step, tr_loss / global_step
def train(args, train_dataset, model, tokenizer): """ Train the model """ args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler( train_dataset) if args.local_rank == -1 else DistributedSampler( train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate_fn) if args.max_steps > 0: num_training_steps = args.max_steps args.num_train_epochs = args.max_steps // ( len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: num_training_steps = len( train_dataloader ) // args.gradient_accumulation_steps * args.num_train_epochs args.warmup_steps = int(num_training_steps * args.warmup_proportion) # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay }, { 'params': [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0 }] # optimizer = Lamb(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) optimizer = AdamW(params=optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=num_training_steps) if args.fp16: try: from apex import amp except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1)) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", num_training_steps) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() seed_everything( args.seed ) # Added here for reproductibility (even between python 2 and 3) for _ in range(int(args.num_train_epochs)): pbar = ProgressBar(n_total=len(train_dataloader), desc='Training') for step, batch in enumerate(train_dataloader): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { 'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3], 'token_type_ids': batch[2] } outputs = model(**inputs) loss = outputs[ 0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean( ) # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [ -1, 0 ] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1: # Only evaluate when single GPU otherwise metrics may not average well evaluate(args, model, tokenizer) if args.local_rank in [ -1, 0 ] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr( model, 'module' ) else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) pbar(step, {'loss': loss.item()}) print(" ") if 'cuda' in str(args.device): torch.cuda.empty_cache() return global_step, tr_loss / global_step
def main(): if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) # type_task = args.model_type + '_' + '{}'.format(args.task_name) # if not os.path.exists(os.path.join(args.output_dir, type_task)): # os.mkdir(os.path.join(args.output_dir, type_task)) init_logger(log_file=args.output_dir + '/{}-{}.log'.format(args.model_type, args.task_name)) if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) args.n_gpu = 1 args.device = device # logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", # args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) seed_everything(args.seed) args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name] args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) # if args.local_rank not in [-1, 0]: # torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config = AlbertConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name) tokenizer = tokenization_albert.FullTokenizer( vocab_file=args.vocab_file, do_lower_case=args.do_lower_case, ) model = AlbertForSequenceClassification.from_pretrained( args.model_name_or_path, config=config) model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training args.do_train = True if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='train') global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) if args.do_train: if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) model_to_save = model.module if hasattr( model, 'module') else model # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) # Evaluation args.do_eval = True results = [] if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenization_albert.FullTokenizer( vocab_file=args.vocab_file, do_lower_case=args.do_lower_case, ) checkpoints = [(0, args.output_dir)] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted( glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) checkpoints = [(int(checkpoint.split('-')[-1]), checkpoint) for checkpoint in checkpoints if checkpoint.find('checkpoint') != -1] checkpoints = sorted(checkpoints, key=lambda x: x[0]) logger.info("Evaluate the following checkpoints: %s", checkpoints) for _, checkpoint in checkpoints: global_step = checkpoint.split( '-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split( '/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = AlbertForSequenceClassification.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) results.extend([(k + '_{}'.format(global_step), v) for k, v in result.items()]) output_eval_file = os.path.join(args.output_dir, "checkpoint_eval_results.txt") with open(output_eval_file, "w") as writer: for key, value in results: writer.write("%s = %s\n" % (key, str(value))) args.do_predict = True predict_results = [] if args.do_predict and args.local_rank in [-1, 0]: tokenizer = tokenization_albert.FullTokenizer( vocab_file=args.vocab_file, do_lower_case=args.do_lower_case, ) checkpoints = [(0, args.output_dir)] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted( glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) checkpoints = [(int(checkpoint.split('-')[-1]), checkpoint) for checkpoint in checkpoints if checkpoint.find('checkpoint') != -1] checkpoints = sorted(checkpoints, key=lambda x: x[0]) logger.info("Evaluate the following checkpoints: %s", checkpoints) checkpoints = [checkpoints[-1]] for _, checkpoint in checkpoints: global_step = checkpoint.split( '-')[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split( '/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = AlbertForSequenceClassification.from_pretrained(checkpoint) model.to(args.device) result = predict(args, model, tokenizer, prefix=prefix) predict_results.extend([(k + '_{}'.format(global_step), v) for k, v in result.items()]) output_eval_file = os.path.join(args.output_dir, "checkpoint_eval_results.txt") with open(output_eval_file, "w") as writer: for key, value in predict_results: writer.write("%s = %s\n" % (key, str(value)))
def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(processors.keys())) parser.add_argument( "--data_dir", default=None, type=str, required=True, help= "The input data dir. Should contain the training files for the CoNLL-2003 NER task.", ) parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS), ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help= "The output directory where the model predictions and checkpoints will be written.", ) # Other parameters parser.add_argument('--markup', default='bios', type=str, choices=['bios', 'bio']) parser.add_argument('--loss_type', default='ce', type=str, choices=['lsr', 'focal', 'ce']) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name") parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help= "Where do you want to store the pre-trained models downloaded from s3", ) parser.add_argument( "--train_max_seq_length", default=128, type=int, help= "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--eval_max_seq_length", default=512, type=int, help= "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Whether to run evaluation during training at each logging step.", ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model.") parser.add_argument("--do_adv", action="store_true", help="Whether to adversarial training.") parser.add_argument('--adv_epsilon', default=1.0, type=float, help="Epsilon for adversarial.") parser.add_argument('--adv_name', default='word_embeddings', type=str, help="name for adversarial layer.") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.") parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.01, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--max_steps", default=-1, type=int, help= "If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for,E.g., 0.1 = 10% of training." ) parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help= "Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument( '--predict_checkpoints', action="store_true", help= "Predict checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") parser.add_argument("--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory") parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets") parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--fp16", action="store_true", help= "Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help= "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.") parser.add_argument("--server_port", type=str, default="", help="For distant debugging.") args = parser.parse_args() if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) args.output_dir = args.output_dir + '{}'.format(args.model_type) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) time_ = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) init_logger(log_file=args.output_dir + f'/{args.model_type}-{args.task_name}-{time_}.log') if os.path.exists(args.output_dir) and os.listdir( args.output_dir ) and args.do_train and not args.overwrite_output_dir: raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome." .format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed seed_everything(args.seed) # Prepare NER task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() label_list = processor.get_labels() args.id2label = {i: label for i, label in enumerate(label_list)} args.label2id = {label: i for i, label in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, loss_type=args.loss_type, cache_dir=args.cache_dir if args.cache_dir else None, ) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, ) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) if args.local_rank == 0: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='train') global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = (model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_vocabulary(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained( args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted( glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))) logging.getLogger("pytorch_transformers.modeling_utils").setLevel( logging.WARN) # Reduce logging logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split( "-")[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split( '/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) if global_step: result = { "{}_{}".format(global_step, k): v for k, v in result.items() } results.update(result) output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: for key in sorted(results.keys()): writer.write("{} = {}\n".format(key, str(results[key]))) if args.do_predict and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained( args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.predict_checkpoints > 0: checkpoints = list( os.path.dirname(c) for c in sorted( glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True))) logging.getLogger("transformers.modeling_utils").setLevel( logging.WARN) # Reduce logging checkpoints = [ x for x in checkpoints if x.split('-')[-1] == str(args.predict_checkpoints) ] logger.info("Predict the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: prefix = checkpoint.split( '/')[-1] if checkpoint.find('checkpoint') != -1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) predict(args, model, tokenizer, prefix=prefix)
def train(args, train_dataset, model, tokenizer): """ Train the model """ args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate_fn) if args.max_steps > 0: num_training_steps = args.max_steps args.num_train_epochs = args.max_steps // ( len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: num_training_steps = len( train_dataloader ) // args.gradient_accumulation_steps * args.num_train_epochs args.warmup_steps = int(num_training_steps * args.warmup_proportion) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay }, { 'params': [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0 }] optimizer = AdamW(params=optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=num_training_steps) if args.n_gpu > 1: model = torch.nn.DataParallel(model) logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", num_training_steps) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() seed_everything( args.seed ) # Added here for reproductibility (even between python 2 and 3) for _ in range(int(args.num_train_epochs)): pbar = ProgressBar(n_total=len(train_dataloader), desc='Training') for step, batch in enumerate(train_dataloader): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { 'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3] } inputs['token_type_ids'] = batch[2] outputs = model(**inputs) loss = outputs[0] if args.n_gpu > 1: loss = loss.mean() if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() scheduler.step() model.zero_grad() global_step += 1 if args.local_rank in [ -1, 0 ] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if args.local_rank == -1: evaluate(args, model, tokenizer) # if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) # if not os.path.exists(output_dir): # os.makedirs(output_dir) # model_to_save = model.module if hasattr(model, # 'module') else model # model_to_save.save_pretrained(output_dir) # torch.save(args, os.path.join(output_dir, 'training_args.bin')) # logger.info("Saving model checkpoint to %s", output_dir) pbar(step, {'loss': loss.item()}) print(" ") if 'cuda' in str(args.device): torch.cuda.empty_cache() # if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr(model, 'module') else model model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) return global_step, tr_loss / global_step
def main(): args = get_argparse().parse_args() if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) args.output_dir = args.output_dir + '{}'.format(args.model_type) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) time_ = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) init_logger(log_file=args.output_dir + f'/{args.model_type}-{args.task_name}-{time_}.log') if os.path.exists(args.output_dir) and os.listdir( args.output_dir ) and args.do_train and not args.overwrite_output_dir: raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome." .format(args.output_dir)) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed seed_everything(args.seed) # Prepare NER task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() label_list = processor.get_labels() args.id2label = {i: label for i, label in enumerate(label_list)} args.label2id = {label: i for i, label in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, cache_dir=args.cache_dir if args.cache_dir else None) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None) if args.local_rank == 0: torch.distributed.barrier( ) # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='train') global_step, tr_loss = train(args, train_dataset, model, tokenizer, config) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
def train(args, train_dataset, model, tokenizer): """ Train the model """ ################################################################### # 初始化一个pandas DataFrame进行训练日志的存储 df_path = args.output_dir + "/bert" + "df_log.pickle" if not os.path.isfile(df_path): df = pd.DataFrame(columns=[ "epoch", "train_loss", "train_auc", "test_loss", "test_auc" ]) df.to_pickle(df_path) print("log DataFrame created!") ################################################################# args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler( train_dataset) if args.local_rank == -1 else DistributedSampler( train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate_fn) t_total = len(train_dataloader ) // args.gradient_accumulation_steps * args.num_train_epochs args.warmup_steps = int(t_total * args.warmup_proportion) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay }, { 'params': [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0 }] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total) if args.n_gpu > 1: model = torch.nn.DataParallel(model) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() seed_everything(args.seed) for _ in range(int(args.num_train_epochs)): all_predictions, all_labels = [], [] pbar = ProgressBar(n_total=len(train_dataloader), desc='Train') for step, batch in enumerate(train_dataloader): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { 'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3] } if args.model_type != 'distilbert': inputs['token_type_ids'] = batch[2] if args.model_type in [ 'bert', 'xlnet', 'albert', 'roberta' ] else None # XLM, DistilBERT don't use segment_ids outputs = model(**inputs) loss = outputs[0] ############################################################################################################################# # 提取预测的结果和标记, 并存到all_predictions, all_labels里 # 用来计算auc # 存储所有预测的结果和标记, 用来计算auc predictions = fl.softmax(outputs[1], dim=-1)[:, 1] predictions = predictions.detach().cpu().numpy().reshape( -1).tolist() labels = batch[3].cpu().numpy().reshape(-1).tolist() all_predictions.extend(predictions) all_labels.extend(labels) # 计算auc fpr, tpr, thresholds = metrics.roc_curve(y_true=all_labels, y_score=all_predictions) auc = metrics.auc(fpr, tpr) ############################################################################################################################# if args.n_gpu > 1: loss = loss.mean() if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) tr_loss += loss.item() ############################################################################### log_dic = { "epoch": _, "train_loss": tr_loss / (step + 1), "train_auc": auc, "test_loss": 0, "test_auc": 0 } if step % 10 == 0: print((str({k: v for k, v in log_dic.items() if v != 0}))) df = pd.read_pickle(df_path) df = df.append([log_dic]) df.reset_index(inplace=True, drop=True) df.to_pickle(df_path) ############################################################################## if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() scheduler.step() model.zero_grad() global_step += 1 # flag if args.local_rank in [ -1, 0 ] and args.logging_steps > 0 and global_step % args.logging_steps == 0: if args.local_rank == -1: results = evaluate(args, model, tokenizer) # 每args.save_steps步保存一次模型 if args.local_rank in [ -1, 0 ] and args.save_steps > 0 and global_step % args.save_steps == 0: output_dir = os.path.join( args.output_dir, 'checkpoint-{}'.format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = model.module if hasattr( model, 'module' ) else model # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) tokenizer.save_vocabulary(vocab_path=output_dir) pbar(step, {'loss': loss.item()}) #################################################################################################################### global all_auc global threshold all_auc.append(auc) best_auc = max(all_auc) if all_auc[-1] < best_auc: threshold += 1 args.learning_rate *= 0.8 optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) predict(args, model, tokenizer, prefix="") else: # 如果 threshold = 0 if threshold >= 5: print("epoch {} has the lowest loss".format( 0 + np.argmax(np.array(all_auc)))) print("early stop!") break ##################################################################################################################### print(" ") if 'cuda' in str(args.device): torch.cuda.empty_cache() return global_step, tr_loss / global_step