def main(): parser = ArgumentParser() parser.add_argument('--data_name', default='albert', type=str) parser.add_argument( "--file_num", type=int, default=10, help="Number of dynamic masking to pregenerate (with different masks)" ) # TODO: --file-num 이름이 어울리지 않음 parser.add_argument( "--reduce_memory", action="store_true", help= "Store training data as on-disc memmaps to massively reduce memory usage" ) parser.add_argument("--epochs", type=int, default=4, help="Number of epochs to train for") parser.add_argument('--share_type', default='all', type=str, choices=['all', 'attention', 'ffn', 'None']) parser.add_argument('--num_eval_steps', default=100) parser.add_argument('--num_save_steps', default=200) parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass." ) parser.add_argument("--train_batch_size", default=4, type=int, help="Total batch size for training.") parser.add_argument( '--loss_scale', type=float, default=0, help= "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Linear warmup over warmup_steps.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument('--max_grad_norm', default=1.0, type=float) parser.add_argument("--learning_rate", default=0.00176, type=float, help="The initial learning rate for Adam.") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument( '--fp16_opt_level', type=str, default='O2', help= "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument( '--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") args = parser.parse_args() pregenerated_data = config['data_dir'] / "corpus/train" assert pregenerated_data.is_dir(), \ "--pregenerated_data should point to the folder of files made by prepare_lm_data_mask.py!" samples_per_epoch = 0 for i in range(args.file_num): data_file = pregenerated_data / f"{args.data_name}_file_{i}.json" metrics_file = pregenerated_data / f"{args.data_name}_file_{i}_metrics.json" # TODO: 어디에 있지? if data_file.is_file() and metrics_file.is_file(): metrics = json.loads(metrics_file.read_text()) samples_per_epoch += metrics['num_training_examples'] else: if i == 0: exit("No training data was found!") print( f"Warning! There are fewer epochs of pregenerated data ({i}) than training epochs ({args.epochs})." ) print( "This script will loop over the available data, but training diversity may be negatively impacted." ) break logger.info(f"samples_per_epoch: {samples_per_epoch}") if args.local_rank == -1 or args.no_cuda: device = torch.device(f"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) args.n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info( f"device: {device} , distributed training: {bool(args.local_rank != -1)}, 16-bits training: {args.fp16}, " f"share_type: {args.share_type}") if args.gradient_accumulation_steps < 1: raise ValueError( f"Invalid gradient_accumulation_steps parameter: {args.gradient_accumulation_steps}, should be >= 1" ) args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps seed_everything(args.seed) tokenizer = BertTokenizer(vocab_file=config['albert_vocab_path']) total_train_examples = samples_per_epoch * args.epochs num_train_optimization_steps = int(total_train_examples / args.train_batch_size / args.gradient_accumulation_steps) if args.local_rank != -1: num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size( ) args.warmup_steps = int(num_train_optimization_steps * args.warmup_proportion) bert_config = BertConfig.from_pretrained(str(config['albert_config_path']), share_type=args.share_type) model = BertForPreTraining(config=bert_config) # model = BertForMaskedLM.from_pretrained(config['checkpoint_dir'] / 'checkpoint-580000') model.to(device) # Prepare optimizer param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) # optimizer = Lamb(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) lr_scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=num_train_optimization_steps) if args.fp16: try: from apex import amp model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) if args.n_gpu > 1: model = torch.nn.DataParallel(model) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank) global_step = 0 mask_metric = LMAccuracy() sop_metric = LMAccuracy() tr_mask_acc = AverageMeter() tr_sop_acc = AverageMeter() tr_loss = AverageMeter() tr_mask_loss = AverageMeter() tr_sop_loss = AverageMeter() loss_fct = CrossEntropyLoss(ignore_index=-1) train_logs = {} logger.info("***** Running training *****") logger.info(f" Num examples = {total_train_examples}") logger.info(f" Batch size = {args.train_batch_size}") logger.info(f" Num steps = {num_train_optimization_steps}") logger.info(f" warmup_steps = {args.warmup_steps}") start_time = time.time() seed_everything(args.seed) # Added here for reproducibility for epoch in range(args.epochs): for idx in range(args.file_num): epoch_dataset = PregeneratedDataset( file_id=idx, training_path=pregenerated_data, tokenizer=tokenizer, reduce_memory=args.reduce_memory, data_name=args.data_name) if args.local_rank == -1: train_sampler = RandomSampler(epoch_dataset) else: train_sampler = DistributedSampler(epoch_dataset) train_dataloader = DataLoader(epoch_dataset, sampler=train_sampler, batch_size=args.train_batch_size) model.train() nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(train_dataloader): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch outputs = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=input_mask) prediction_scores = outputs[0] seq_relationship_score = outputs[1] masked_lm_loss = loss_fct( prediction_scores.view(-1, bert_config.vocab_size), lm_label_ids.view(-1)) next_sentence_loss = loss_fct( seq_relationship_score.view(-1, 2), is_next.view(-1)) loss = masked_lm_loss + next_sentence_loss mask_metric(logits=prediction_scores.view( -1, bert_config.vocab_size), target=lm_label_ids.view(-1)) sop_metric(logits=seq_relationship_score.view(-1, 2), target=is_next.view(-1)) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() nb_tr_steps += 1 tr_mask_acc.update(mask_metric.value(), n=input_ids.size(0)) tr_sop_acc.update(sop_metric.value(), n=input_ids.size(0)) tr_loss.update(loss.item(), n=1) tr_mask_loss.update(masked_lm_loss.item(), n=1) tr_sop_loss.update(next_sentence_loss.item(), n=1) if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) lr_scheduler.step() optimizer.step() optimizer.zero_grad() global_step += 1 if global_step % args.num_eval_steps == 0: now = time.time() eta = now - start_time if eta > 3600: eta_format = ('%d:%02d:%02d' % (eta // 3600, (eta % 3600) // 60, eta % 60)) elif eta > 60: eta_format = '%d:%02d' % (eta // 60, eta % 60) else: eta_format = '%ds' % eta train_logs['loss'] = tr_loss.avg train_logs['mask_acc'] = tr_mask_acc.avg train_logs['sop_acc'] = tr_sop_acc.avg train_logs['mask_loss'] = tr_mask_loss.avg train_logs['sop_loss'] = tr_sop_loss.avg show_info = f'[Training]:[{epoch}/{args.epochs}]{global_step}/{num_train_optimization_steps} ' \ f'- ETA: {eta_format}' + "-".join( [f' {key}: {value:.4f} ' for key, value in train_logs.items()]) logger.info(show_info) tr_mask_acc.reset() tr_sop_acc.reset() tr_loss.reset() tr_mask_loss.reset() tr_sop_loss.reset() start_time = now if global_step % args.num_save_steps == 0: if args.local_rank in [-1, 0] and args.num_save_steps > 0: # Save model checkpoint output_dir = config[ 'checkpoint_dir'] / f'lm-checkpoint-{global_step}' if not output_dir.exists(): output_dir.mkdir() # save model model_to_save = model.module if hasattr( model, 'module' ) else model # Take care of distributed/parallel training model_to_save.save_pretrained(str(output_dir)) torch.save(args, str(output_dir / 'training_args.bin')) logger.info("Saving model checkpoint to %s", output_dir) # save config output_config_file = output_dir / CONFIG_NAME with open(str(output_config_file), 'w') as f: f.write(model_to_save.config.to_json_string()) # save vocab tokenizer.save_vocabulary(output_dir)
def main(args): device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() print("device: {}, n_gpu: {}, 16-bits training: {}".format( device, n_gpu, args.fp16)) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) args.train_batch_size = \ args.train_batch_size // args.gradient_accumulation_steps if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") if args.do_train: assert (args.train_file is not None) and (args.dev_file is not None) if args.eval_test: assert args.test_file is not None else: assert args.dev_file is not None if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) if args.do_train: logger.addHandler(logging.FileHandler(os.path.join(args.output_dir, "train.log"), 'w')) else: logger.addHandler(logging.FileHandler(os.path.join(args.output_dir, "eval.log"), 'w')) print(args) # tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") tokenizer = BertTokenizer(args.model + "vocab.txt", lowercase=False) # BertWordPieceTokenizer , lowercase=True if args.do_train or (not args.eval_test): with open(args.dev_file) as f: dataset_json = json.load(f) eval_dataset = dataset_json['data'] eval_examples = read_squad_examples( input_file=args.dev_file, is_training=False, version_2_with_negative=args.version_2_with_negative) eval_features = convert_examples_to_features( examples=eval_examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=False) print("***** Dev *****") print(" Num orig examples = %d", len(eval_examples)) print(" Num split examples = %d", len(eval_features)) print(" Batch size = %d", args.eval_batch_size) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index) eval_dataloader = DataLoader(eval_data, batch_size=args.eval_batch_size) if args.do_train: train_examples = read_squad_examples( input_file=args.train_file, is_training=True, version_2_with_negative=args.version_2_with_negative) train_features = convert_examples_to_features( examples=train_examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=True) if args.train_mode == 'sorted' or args.train_mode == 'random_sorted': train_features = sorted(train_features, key=lambda f: np.sum(f.input_mask)) else: random.shuffle(train_features) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long) all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long) train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions) train_dataloader = DataLoader(train_data, batch_size=args.train_batch_size) train_batches = [batch for batch in train_dataloader] num_train_optimization_steps = \ len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs print("***** Train *****") print(" Num orig examples = %d", len(train_examples)) print(" Num split examples = %d", len(train_features)) print(" Batch size = %d", args.train_batch_size) print(" Num steps = %d", num_train_optimization_steps) eval_step = max(1, len(train_batches) // args.eval_per_epoch) best_result = None lrs = [args.learning_rate] if args.learning_rate else \ [1e-6, 2e-6, 3e-6, 5e-6, 1e-5, 2e-5, 3e-5, 5e-5] for lr in lrs: model = BertForQuestionAnswering.from_pretrained( args.model) if args.fp16: model.half() model.to(device) if n_gpu > 1: model = torch.nn.DataParallel(model) param_optimizer = list(model.named_parameters()) param_optimizer = [n for n in param_optimizer if 'pooler' not in n[0]] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex" "to use distributed and fp16 training.") optimizer = FusedAdam(optimizer_grouped_parameters, lr=lr, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer = AdamW(optimizer_grouped_parameters, lr=lr) tr_loss = 0 nb_tr_examples = 0 nb_tr_steps = 0 global_step = 0 start_time = time.time() for epoch in range(int(args.num_train_epochs)): model.train() print("Start epoch #{} (lr = {})...".format(epoch, lr)) if args.train_mode == 'random' or args.train_mode == 'random_sorted': random.shuffle(train_batches) for step, batch in enumerate(train_batches): if n_gpu == 1: batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, start_positions, end_positions = batch # print('TENSORS', input_ids.size(), segment_ids.size(), input_mask.size(), start_positions.size(), end_positions.size()) outputs = model(input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids, start_positions=start_positions, end_positions=end_positions, return_dict=False) loss = outputs[0] if n_gpu > 1: loss = loss.mean() if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if args.fp16: optimizer.backward(loss) else: loss.backward() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: lr_this_step = lr * \ warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 if (step + 1) % eval_step == 0: print('Epoch: {}, Step: {} / {}, used_time = {:.2f}s, loss = {:.6f}'.format( epoch, step + 1, len(train_batches), time.time() - start_time, tr_loss / nb_tr_steps)) save_model = False if args.do_eval: result, _, _ = \ evaluate(args, model, device, eval_dataset, eval_dataloader, eval_examples, eval_features) model.train() result['global_step'] = global_step result['epoch'] = epoch result['learning_rate'] = lr result['batch_size'] = args.train_batch_size if (best_result is None) or (result[args.eval_metric] > best_result[args.eval_metric]): best_result = result save_model = True print("!!! Best dev %s (lr=%s, epoch=%d): %.2f" % (args.eval_metric, str(lr), epoch, result[args.eval_metric])) else: save_model = True if save_model: model_to_save = model.module if hasattr(model, 'module') else model output_model_file = os.path.join(args.output_dir, "pytorch_model.bin") output_config_file = os.path.join(args.output_dir, "config.json") torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) if best_result: with open(os.path.join(args.output_dir, "eval_results.txt"), "w") as writer: for key in sorted(best_result.keys()): writer.write("%s = %s\n" % (key, str(best_result[key]))) if args.do_eval: if args.eval_test: with open(args.test_file) as f: dataset_json = json.load(f) eval_dataset = dataset_json['data'] eval_examples = read_squad_examples( input_file=args.test_file, is_training=False, version_2_with_negative=args.version_2_with_negative) eval_features = convert_examples_to_features( examples=eval_examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=False) print("***** Test *****") print(" Num orig examples = %d", len(eval_examples)) print(" Num split examples = %d", len(eval_features)) print(" Batch size = %d", args.eval_batch_size) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index) eval_dataloader = DataLoader(eval_data, batch_size=args.eval_batch_size) model = BertForQuestionAnswering.from_pretrained(args.output_dir) if args.fp16: model.half() model.to(device) na_prob_thresh = 1.0 if args.version_2_with_negative: eval_result_file = os.path.join(args.output_dir, "eval_results.txt") if os.path.isfile(eval_result_file): with open(eval_result_file) as f: for line in f.readlines(): if line.startswith('best_f1_thresh'): na_prob_thresh = float(line.strip().split()[-1]) print("na_prob_thresh = %.6f" % na_prob_thresh) result, preds, _ = \ evaluate(args, model, device, eval_dataset, eval_dataloader, eval_examples, eval_features, na_prob_thresh=na_prob_thresh, pred_only=args.eval_test) with open(os.path.join(args.output_dir, "predictions.json"), "w") as writer: writer.write(json.dumps(preds, indent=4) + "\n")
print(s) tokenizer = BertTokenizer("data/atis/token.vocab", bos_token="<BOS>", eos_token="<EOS>", model_max_len=50) tokenizer.prepare_for_model(tokenizer.encode(y), return_tensors="pt") tokenizer.SPECIAL_TOKENS_ATTRIBUTES tokenizer.encode(y) tokenizer.encode_plus(y) y = "<BOS> embedding what is the flight number <EOS>" ids = tokenizer.encode_plus tokenizer.decode(tokenizer.encode(y)) tokenizer.save_pretrained("data/atis/save") tokenizer.save_vocabulary("data/atis/save/saved") tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", bos_token="<BOS>", eos_token="<EOS>") tokenizer.tokenize("i like tea") special_tokens = {"bos_token": "<BOS>", "eos_token": "<EOS>"} tokenizer.add_special_tokens(special_tokens) tokenizer.bos_token_id tokenizer.eos_token_id tokenizer.all_special_ids tokenizer.special_tokens_map tokenizer.additional_special_tokens y = "<BOS> I like embeddings <EOS> [SEP] i like tea"
- clinical BERT is used for the WordPiece vocabulary """ with open('data/vocab.pk', 'rb') as fd: vocab = pickle.load(fd) split_pt = min(vocab.section_start_vocab_id, vocab.category_start_vocab_id) metadata_tokens = vocab.i2w[split_pt:] + ['digitparsed'] with open('data/clinic_bert_vocab.txt', 'r') as fd: clinic_bert_tokens = fd.readlines() clinic_bert_tokens = list(set(list(map(_standardize, clinic_bert_tokens)))) vocab_fn = 'data/clinic_bert_plus_metadata_vocab.txt' with open(vocab_fn, 'w') as fd: fd.write('\n'.join(clinic_bert_tokens + metadata_tokens)) tokenizer = BertTokenizer(vocab_fn, never_split=metadata_tokens, do_basic_tokenize=False) # Add metadata as `additional_special_tokens` so that they do not get subdivided into word pieces special_tokens_dict = {'cls_token': '[CLS]', 'sep_token': '[SEP]', 'unk_token': '[UNK]', 'bos_token': '[BOS]', 'eos_token': '[EOS]', 'pad_token': '[PAD]', 'mask_token': '[MASK]', 'additional_special_tokens': metadata_tokens} num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) print('We have added', num_added_toks, 'tokens') example_sentence = 'header=HPI example clinical text with tricky autoimmunihistory word' print('Tokenization of sentence: {}'.format(example_sentence)) out_fn = 'data/bert_tokenizer_vocab.pth' print('Generated BERT tokenizer vocabulary and saving to {}'.format(out_fn)) tokenizer.save_vocabulary(out_fn)