def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path, is_albert): config_path = os.path.abspath(bert_config_file) tf_path = os.path.abspath(tf_checkpoint_path) print("Converting TensorFlow checkpoint from {} with config at {}".format(tf_path, config_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: print("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) # Initialise PyTorch model if is_albert: config = ALBertConfig.from_json_file(bert_config_file) print("Building PyTorch model from configuration: {}".format(str(config))) model = ALBertForPreTraining(config) else: config = BertConfig.from_json_file(bert_config_file) print("Building PyTorch model from configuration: {}".format(str(config))) model = BertForPreTraining(config) for name, array in zip(names, arrays): name = name.split('/') if name[0] == 'global_step': continue # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any(n in ["adam_v", "adam_m"] for n in name): print("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r'[A-Za-z]+_\d+', m_name): l = re.split(r'_(\d+)', m_name) else: l = [m_name] if l[0] == 'kernel' or l[0] == 'gamma': pointer = getattr(pointer, 'weight') elif l[0] == 'output_bias' or l[0] == 'beta': pointer = getattr(pointer, 'bias') elif l[0] == 'output_weights': pointer = getattr(pointer, 'weight') else: pointer = getattr(pointer, l[0]) if len(l) >= 2: num = int(l[1]) pointer = pointer[num] if m_name[-11:] == '_embeddings': pointer = getattr(pointer, 'weight') elif m_name[-13:] == '_embeddings_2': pointer = getattr(pointer, 'weight') array = np.transpose(array) elif m_name == 'kernel': array = np.transpose(array) try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) # Save pytorch-model print("Save PyTorch model to {}".format(pytorch_dump_path)) torch.save(model.state_dict(), pytorch_dump_path)
# use some global vars for convenience args = parser.parse_args() args.checkpoint_dir += ('/epoch{}_batch{}_lr{}_warmup{}_anslen{}/'.format( args.train_epochs, args.n_batch, args.lr, args.warmup_rate, args.max_ans_length)) args = utils.check_args(args) os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids device = torch.device("cuda") n_gpu = torch.cuda.device_count() print("device %s n_gpu %d" % (device, n_gpu)) print("device: {} n_gpu: {} 16-bits training: {}".format( device, n_gpu, args.float16)) # load the bert setting if 'albert' not in args.bert_config_file: bert_config = BertConfig.from_json_file(args.bert_config_file) else: bert_config = ALBertConfig.from_json_file(args.bert_config_file) # load data print('loading data...') tokenizer = tokenization.BertTokenizer(vocab_file=args.vocab_file, do_lower_case=True) assert args.vocab_size == len(tokenizer.vocab) if not os.path.exists(args.train_dir): json2features(args.train_file, [ args.train_dir.replace('_features_', '_examples_'), args.train_dir ], tokenizer, is_training=True, max_seq_length=bert_config.max_position_embeddings)
def main(): parser = argparse.ArgumentParser() parser.add_argument("--gpu_ids", default='0,1,2,3,4,5,6,7', type=str) parser.add_argument("--model_name", default='albert_xxlarge_google_zh') parser.add_argument( "--bert_config_file", default= 'check_points/pretrain_models/albert_xxlarge_google_zh_v1121/bert_config.json' ) parser.add_argument( "--vocab_file", default='check_points/pretrain_models/albert_xlarge_zh/vocab.txt') parser.add_argument( "--init_checkpoint", default= 'check_points/pretrain_models/albert_xxlarge_google_zh_v1121/pytorch_model.pth' ) parser.add_argument("--input_dir", default='dataset/CHID') parser.add_argument("--output_dir", default='check_points/CHID') ## Other parameters parser.add_argument("--train_file", default='./origin_data/CHID/train.json', type=str, help="SQuAD json for training. E.g., train-v1.1.json") parser.add_argument( "--train_ans_file", default='./origin_data/CHID/train_answer.json', type=str, help="SQuAD answer for training. E.g., train-v1.1.json") parser.add_argument( "--predict_file", default='./origin_data/CHID/dev.json', type=str, help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json" ) parser.add_argument( "--predict_ans_file", default='origin_data/CHID/dev_answer.json', type=str, help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json" ) parser.add_argument( "--max_seq_length", default=64, type=int, help= "The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded." ) parser.add_argument( "--max_num_choices", default=10, type=int, help= "The maximum number of cadicate answer, shorter than this will be padded." ) parser.add_argument("--do_train", default=True, action='store_true', help="Whether to run training.") parser.add_argument("--do_predict", default=True, action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--predict_batch_size", default=16, type=int, help="Total batch size for predictions.") parser.add_argument("--learning_rate", default=2e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--warmup_proportion", default=0.06, type=float, help= "Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10% " "of training.") parser.add_argument("--no_cuda", default=False, action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--seed', type=int, default=422, help="random seed for initialization") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass." ) parser.add_argument( "--do_lower_case", default=True, action='store_true', help= "Whether to lower case the input text. True for uncased models, False for cased models." ) parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument( '--fp16', default=True, action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument( '--loss_scale', type=float, default=0, help= "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") args = parser.parse_args() args.output_dir = os.path.join(args.output_dir, args.model_name) print(args) os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') print( "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}". format(device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError( "Invalid gradient_accumulation_steps parameter: {}, should be >= 1" .format(args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_predict: raise ValueError( "At least one of `do_train` or `do_predict` must be True.") if args.do_train: if not args.train_file: raise ValueError( "If `do_train` is True, then `train_file` must be specified.") if args.do_predict: if not args.predict_file: raise ValueError( "If `do_predict` is True, then `predict_file` must be specified." ) if os.path.exists(args.output_dir) == False: # raise ValueError("Output directory () already exists and is not empty.") os.makedirs(args.output_dir, exist_ok=True) tokenizer = BertTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case) if args.do_train: print('ready for train dataset') train_example_file = os.path.join( args.input_dir, 'train_examples_{}.pkl'.format(str(args.max_seq_length))) train_feature_file = os.path.join( args.input_dir, 'train_features_{}.pkl'.format(str(args.max_seq_length))) train_features = generate_input(args.train_file, args.train_ans_file, train_example_file, train_feature_file, tokenizer, max_seq_length=args.max_seq_length, max_num_choices=args.max_num_choices, is_training=True) dev_example_file = os.path.join( args.input_dir, 'dev_examples_{}.pkl'.format(str(args.max_seq_length))) dev_feature_file = os.path.join( args.input_dir, 'dev_features_{}.pkl'.format(str(args.max_seq_length))) eval_features = generate_input(args.predict_file, None, dev_example_file, dev_feature_file, tokenizer, max_seq_length=args.max_seq_length, max_num_choices=args.max_num_choices, is_training=False) print("train features {}".format(len(train_features))) num_train_steps = int( len(train_features) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs) print("loaded train dataset") print("Num generate examples = {}".format(len(train_features))) print("Batch size = {}".format(args.train_batch_size)) print("Num steps for a epoch = {}".format(num_train_steps)) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_masks = torch.tensor([f.input_masks for f in train_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) all_choice_masks = torch.tensor( [f.choice_masks for f in train_features], dtype=torch.long) all_labels = torch.tensor([f.label for f in train_features], dtype=torch.long) train_data = TensorDataset(all_input_ids, all_input_masks, all_segment_ids, all_choice_masks, all_labels) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size, drop_last=True) # Prepare model if 'albert' in args.model_name: if 'google' in args.model_name: bert_config = AlbertConfig.from_json_file( args.bert_config_file) model = reset_model(args, bert_config, AlbertForMultipleChoice) else: bert_config = ALBertConfig.from_json_file( args.bert_config_file) model = reset_model(args, bert_config, ALBertForMultipleChoice) else: bert_config = BertConfig.from_json_file(args.bert_config_file) model = reset_model(args, bert_config, BertForMultipleChoice) model = model.to(device) if n_gpu > 1: model = torch.nn.DataParallel(model) optimizer = get_optimization( model, float16=args.fp16, learning_rate=args.learning_rate, total_steps=num_train_steps, schedule='warmup_linear', warmup_rate=args.warmup_proportion, weight_decay_rate=0.01, max_grad_norm=1.0, opt_pooler=True) # multi_choice must update pooler global_step = 0 best_acc = 0 acc = 0 for i in range(int(args.num_train_epochs)): num_step = 0 average_loss = 0 model.train() model.zero_grad() # 等价于optimizer.zero_grad() steps_per_epoch = num_train_steps // args.num_train_epochs with tqdm(total=int(steps_per_epoch), desc='Epoch %d' % (i + 1)) as pbar: for step, batch in enumerate(train_dataloader): if n_gpu == 1: batch = tuple( t.to(device) for t in batch) # multi-gpu does scattering it-self input_ids, input_masks, segment_ids, choice_masks, labels = batch if step == 0 and i == 0: print('shape of input_ids: {}'.format(input_ids.shape)) print('shape of labels: {}'.format(labels.shape)) loss = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_masks, labels=labels) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) # modify learning rate with special warm up BERT uses # if args.fp16 is False, BertAdam is used and handles this automatically lr_this_step = args.learning_rate * warmup_linear( global_step / num_train_steps, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step else: loss.backward() if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() optimizer.zero_grad() global_step += 1 average_loss += loss.item() num_step += 1 pbar.set_postfix({ 'loss': '{0:1.5f}'.format(average_loss / (num_step + 1e-5)) }) pbar.update(1) if args.do_predict and (args.local_rank == -1 or torch.distributed.get_rank() == 0): print("***** Running predictions *****") print("Num split examples = {}".format(len(eval_features))) print("Batch size = {}".format(args.predict_batch_size)) all_example_ids = [f.example_id for f in eval_features] all_tags = [f.tag for f in eval_features] all_input_ids = torch.tensor( [f.input_ids for f in eval_features], dtype=torch.long) all_input_masks = torch.tensor( [f.input_masks for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor( [f.segment_ids for f in eval_features], dtype=torch.long) all_choice_masks = torch.tensor( [f.choice_masks for f in eval_features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_masks, all_segment_ids, all_choice_masks, all_example_index) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader( eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size) model.eval() all_results = [] print("Start evaluating") for input_ids, input_masks, segment_ids, choice_masks, example_indices in tqdm( eval_dataloader, desc="Evaluating", disable=None): if len(all_results) == 0: print('shape of input_ids: {}'.format(input_ids.shape)) input_ids = input_ids.to(device) input_masks = input_masks.to(device) segment_ids = segment_ids.to(device) with torch.no_grad(): batch_logits = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_masks, labels=None) for i, example_index in enumerate(example_indices): logits = batch_logits[i].detach().cpu().tolist() eval_feature = eval_features[example_index.item()] unique_id = int(eval_feature.unique_id) all_results.append( RawResult(unique_id=unique_id, example_id=all_example_ids[unique_id], tag=all_tags[unique_id], logit=logits)) else: print("prediction is over") predict_file = 'dev_predictions.json' print('decoder raw results') tmp_predict_file = os.path.join(args.output_dir, "raw_predictions.pkl") output_prediction_file = os.path.join(args.output_dir, predict_file) results = get_final_predictions(all_results, tmp_predict_file, g=True) write_predictions(results, output_prediction_file) print('predictions saved to {}'.format(output_prediction_file)) if args.predict_ans_file: acc = evaluate(args.predict_ans_file, output_prediction_file) print(f'{args.predict_file} 预测精度:{acc}') # Save a epoch trained model if not args.do_predict or acc > best_acc: best_acc = acc output_model_file = os.path.join(args.output_dir, "best_checkpoint.bin") print('save trained model from {}'.format(output_model_file)) model_to_save = model.module if hasattr( model, 'module') else model # Only save the model it-self torch.save(model_to_save.state_dict(), output_model_file)
def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--gpu_ids", default='0,1,2,3', type=str) parser.add_argument("--data_dir", default='origin_data/C3', type=str) parser.add_argument("--task_name", default='c3', type=str) parser.add_argument( "--bert_config_file", # albert_xxlarge_google_zh_v1121 # roberta_wwm_ext_large default= 'check_points/pretrain_models/albert_xxlarge_google_zh_v1121/bert_config.json', type=str) parser.add_argument( "--vocab_file", default='check_points/pretrain_models/google_bert_base/vocab.txt', type=str) parser.add_argument( "--output_dir", default='check_points/c3/albert_xxlarge_google_zh_v1121', type=str) ## Other parameters parser.add_argument( "--init_checkpoint", default= 'check_points/pretrain_models/albert_xxlarge_google_zh_v1121/pytorch_model.pth', type=str, help="Initial checkpoint (usually from a pre-trained BERT model).") parser.add_argument( "--do_lower_case", default=True, action='store_true', help= "Whether to lower case the input text. True for uncased models, False for cased models." ) parser.add_argument( "--max_seq_length", default=512, type=int, help= "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", default=True, action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", default=True, action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--train_batch_size", default=16, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=16, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=2e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--schedule", default='warmup_linear', type=str, help='schedule') parser.add_argument("--weight_decay_rate", default=0.01, type=float, help='weight_decay_rate') parser.add_argument('--clip_norm', type=float, default=1.0) parser.add_argument("--num_train_epochs", default=8.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--warmup_proportion", default=0.05, type=float, help= "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", default=False, action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--float16', type=bool, default=True) parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=345, help="random seed for initialization") parser.add_argument( '--gradient_accumulation_steps', type=int, default=4, help= "Number of updates steps to accumualte before performing a backward/update pass." ) parser.add_argument('--setting_file', type=str, default='setting.txt') parser.add_argument('--log_file', type=str, default='log.txt') args = parser.parse_args() args.setting_file = os.path.join(args.output_dir, args.setting_file) args.log_file = os.path.join(args.output_dir, args.log_file) os.makedirs(args.output_dir, exist_ok=True) with open(args.setting_file, 'wt') as opt_file: opt_file.write('------------ Options -------------\n') print('------------ Options -------------') for k in args.__dict__: v = args.__dict__[k] opt_file.write('%s: %s\n' % (str(k), str(v))) print('%s: %s' % (str(k), str(v))) opt_file.write('-------------- End ----------------\n') print('------------ End -------------') os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids if os.path.exists(args.log_file): os.remove(args.log_file) if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1)) if args.gradient_accumulation_steps < 1: raise ValueError( "Invalid gradient_accumulation_steps parameter: {}, should be >= 1" .format(args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval: raise ValueError( "At least one of `do_train` or `do_eval` must be True.") processor = c3Processor(args.data_dir) label_list = processor.get_labels() tokenizer = tokenization.BertTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case) train_examples = None num_train_steps = None if args.do_train: train_examples = processor.get_train_examples() num_train_steps = int( len(train_examples) / n_class / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs) if 'albert' in args.bert_config_file: if 'google' in args.bert_config_file: bert_config = AlbertConfig.from_json_file(args.bert_config_file) model = AlbertForMultipleChoice(bert_config, num_choices=n_class) else: bert_config = ALBertConfig.from_json_file(args.bert_config_file) model = ALBertForMultipleChoice(bert_config, num_choices=n_class) else: bert_config = BertConfig.from_json_file(args.bert_config_file) model = BertForMultipleChoice(bert_config, num_choices=n_class) if args.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length {} because the BERT model was only trained up to sequence length {}" .format(args.max_seq_length, bert_config.max_position_embeddings)) if args.init_checkpoint is not None: utils.torch_show_all_params(model) utils.torch_init_model(model, args.init_checkpoint) if args.float16: model.half() model.to(device) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank) elif n_gpu > 1: model = torch.nn.DataParallel(model) optimizer = get_optimization( model=model, float16=args.float16, learning_rate=args.learning_rate, total_steps=num_train_steps, schedule=args.schedule, warmup_rate=args.warmup_proportion, max_grad_norm=args.clip_norm, weight_decay_rate=args.weight_decay_rate, opt_pooler=True) # multi_choice must update pooler global_step = 0 eval_dataloader = None if args.do_eval: eval_examples = processor.get_dev_examples() feature_dir = os.path.join( args.data_dir, 'dev_features{}.pkl'.format(args.max_seq_length)) if os.path.exists(feature_dir): eval_features = pickle.load(open(feature_dir, 'rb')) else: eval_features = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer) with open(feature_dir, 'wb') as w: pickle.dump(eval_features, w) input_ids = [] input_mask = [] segment_ids = [] label_id = [] for f in eval_features: input_ids.append([]) input_mask.append([]) segment_ids.append([]) for i in range(n_class): input_ids[-1].append(f[i].input_ids) input_mask[-1].append(f[i].input_mask) segment_ids[-1].append(f[i].segment_ids) label_id.append(f[0].label_id) all_input_ids = torch.tensor(input_ids, dtype=torch.long) all_input_mask = torch.tensor(input_mask, dtype=torch.long) all_segment_ids = torch.tensor(segment_ids, dtype=torch.long) all_label_ids = torch.tensor(label_id, dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) if args.local_rank == -1: eval_sampler = SequentialSampler(eval_data) else: eval_sampler = DistributedSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) if args.do_train: best_accuracy = 0 feature_dir = os.path.join( args.data_dir, 'train_features{}.pkl'.format(args.max_seq_length)) if os.path.exists(feature_dir): train_features = pickle.load(open(feature_dir, 'rb')) else: train_features = convert_examples_to_features( train_examples, label_list, args.max_seq_length, tokenizer) with open(feature_dir, 'wb') as w: pickle.dump(train_features, w) logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_examples)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_steps) input_ids = [] input_mask = [] segment_ids = [] label_id = [] for f in train_features: input_ids.append([]) input_mask.append([]) segment_ids.append([]) for i in range(n_class): input_ids[-1].append(f[i].input_ids) input_mask[-1].append(f[i].input_mask) segment_ids[-1].append(f[i].segment_ids) label_id.append(f[0].label_id) all_input_ids = torch.tensor(input_ids, dtype=torch.long) all_input_mask = torch.tensor(input_mask, dtype=torch.long) all_segment_ids = torch.tensor(segment_ids, dtype=torch.long) all_label_ids = torch.tensor(label_id, dtype=torch.long) train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size, drop_last=True) steps_per_epoch = int(num_train_steps / args.num_train_epochs) for ie in range(int(args.num_train_epochs)): model.train() tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 with tqdm(total=int(steps_per_epoch), desc='Epoch %d' % (ie + 1)) as pbar: for step, batch in enumerate(train_dataloader): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, label_ids = batch loss = model(input_ids, segment_ids, input_mask, label_ids) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps tr_loss += loss.item() if args.float16: optimizer.backward(loss) # modify learning rate with special warm up BERT uses # if args.fp16 is False, BertAdam is used and handles this automatically lr_this_step = args.learning_rate * warmup_linear( global_step / num_train_steps, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step else: loss.backward() nb_tr_examples += input_ids.size(0) if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step( ) # We have accumulated enought gradients model.zero_grad() global_step += 1 nb_tr_steps += 1 pbar.set_postfix({ 'loss': '{0:1.5f}'.format(tr_loss / (nb_tr_steps + 1e-5)) }) pbar.update(1) if args.do_eval: model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 logits_all = [] for input_ids, input_mask, segment_ids, label_ids in tqdm( eval_dataloader): input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): tmp_eval_loss, logits = model(input_ids, segment_ids, input_mask, label_ids, return_logits=True) logits = logits.detach().cpu().numpy() label_ids = label_ids.cpu().numpy() for i in range(len(logits)): logits_all += [logits[i]] tmp_eval_accuracy = accuracy(logits, label_ids.reshape(-1)) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples if args.do_train: result = { 'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'global_step': global_step, 'loss': tr_loss / nb_tr_steps } else: result = { 'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy } logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) with open(args.log_file, 'a') as aw: aw.write( "-------------------global steps:{}-------------------\n" .format(global_step)) aw.write(str(json.dumps(result, indent=2)) + '\n') if eval_accuracy >= best_accuracy: torch.save(model.state_dict(), os.path.join(args.output_dir, "model_best.pt")) best_accuracy = eval_accuracy model.load_state_dict( torch.load(os.path.join(args.output_dir, "model_best.pt"))) torch.save(model.state_dict(), os.path.join(args.output_dir, "model.pt")) model.load_state_dict(torch.load(os.path.join(args.output_dir, "model.pt"))) if args.do_eval: logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 logits_all = [] for input_ids, input_mask, segment_ids, label_ids in eval_dataloader: input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): tmp_eval_loss, logits = model(input_ids, segment_ids, input_mask, label_ids, return_logits=True) logits = logits.detach().cpu().numpy() label_ids = label_ids.cpu().numpy() for i in range(len(logits)): logits_all += [logits[i]] tmp_eval_accuracy = accuracy(logits, label_ids.reshape(-1)) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy} output_eval_file = os.path.join(args.output_dir, "results_dev.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) output_eval_file = os.path.join(args.output_dir, "logits_dev.txt") with open(output_eval_file, "w") as f: for i in range(len(logits_all)): for j in range(len(logits_all[i])): f.write(str(logits_all[i][j])) if j == len(logits_all[i]) - 1: f.write("\n") else: f.write(" ") test_examples = processor.get_test_examples() feature_dir = os.path.join( args.data_dir, 'test_features{}.pkl'.format(args.max_seq_length)) if os.path.exists(feature_dir): test_features = pickle.load(open(feature_dir, 'rb')) else: test_features = convert_examples_to_features( test_examples, label_list, args.max_seq_length, tokenizer) with open(feature_dir, 'wb') as w: pickle.dump(test_features, w) logger.info("***** Running testing *****") logger.info(" Num examples = %d", len(test_examples)) logger.info(" Batch size = %d", args.eval_batch_size) input_ids = [] input_mask = [] segment_ids = [] label_id = [] for f in test_features: input_ids.append([]) input_mask.append([]) segment_ids.append([]) for i in range(n_class): input_ids[-1].append(f[i].input_ids) input_mask[-1].append(f[i].input_mask) segment_ids[-1].append(f[i].segment_ids) label_id.append(f[0].label_id) all_input_ids = torch.tensor(input_ids, dtype=torch.long) all_input_mask = torch.tensor(input_mask, dtype=torch.long) all_segment_ids = torch.tensor(segment_ids, dtype=torch.long) all_label_ids = torch.tensor(label_id, dtype=torch.long) test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) if args.local_rank == -1: test_sampler = SequentialSampler(test_data) else: test_sampler = DistributedSampler(test_data) test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args.eval_batch_size) model.eval() test_loss, test_accuracy = 0, 0 nb_test_steps, nb_test_examples = 0, 0 logits_all = [] for input_ids, input_mask, segment_ids, label_ids in test_dataloader: input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): tmp_test_loss, logits = model(input_ids, segment_ids, input_mask, label_ids, return_logits=True) logits = logits.detach().cpu().numpy() label_ids = label_ids.to('cpu').numpy() for i in range(len(logits)): logits_all += [logits[i]] tmp_test_accuracy = accuracy(logits, label_ids.reshape(-1)) test_loss += tmp_test_loss.mean().item() test_accuracy += tmp_test_accuracy nb_test_examples += input_ids.size(0) nb_test_steps += 1 test_loss = test_loss / nb_test_steps test_accuracy = test_accuracy / nb_test_examples result = {'test_loss': test_loss, 'test_accuracy': test_accuracy} output_test_file = os.path.join(args.output_dir, "results_test.txt") with open(output_test_file, "w") as writer: logger.info("***** Test results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) output_test_file = os.path.join(args.output_dir, "logits_test.txt") with open(output_test_file, "w") as f: for i in range(len(logits_all)): for j in range(len(logits_all[i])): f.write(str(logits_all[i][j])) if j == len(logits_all[i]) - 1: f.write("\n") else: f.write(" ")