def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help= "The input data dir. Should contain the .tsv files (or other data files) for the task." ) parser.add_argument("--ernie_model", default=None, type=str, required=True, help="Ernie pre-trained model") parser.add_argument( "--output_dir", default=None, type=str, required=True, help= "The output directory where the model predictions and checkpoints will be written." ) parser.add_argument("--model_name_or_path", default='/data1', type=str) ## Other parameters parser.add_argument( "--max_seq_length", default=128, type=int, help= "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", default=False, action='store_true', help="Whether to run eval on the dev set.") parser.add_argument( "--do_lower_case", default=False, action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", default=False, action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass." ) parser.add_argument( '--fp16', default=False, action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument( '--loss_scale', type=float, default=0, help= "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument('--threshold', type=float, default=.3) args = parser.parse_args() if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info( "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}". format(device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError( "Invalid gradient_accumulation_steps parameter: {}, should be >= 1" .format(args.gradient_accumulation_steps)) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) processor = TypingProcessor() tokenizer_label = BertTokenizer_label.from_pretrained( args.ernie_model, do_lower_case=args.do_lower_case) tokenizer = BertTokenizer.from_pretrained(args.ernie_model, do_lower_case=args.do_lower_case) _, label_list, _ = processor.get_train_examples(args.data_dir) label_list = sorted(label_list) #class_weight = [min(d[x], 100) for x in label_list] #logger.info(class_weight) # S = [] # for l in label_list: # s = [] # for ll in label_list: # if ll in l: # s.append(1.) # else: # s.append(0.) # S.append(s) # vecs = [] # vecs.append([0]*100) # with open("kg_embed/entity2vec.vec", 'r') as fin: # for line in fin: # vec = line.strip().split('\t') # vec = [float(x) for x in vec] # vecs.append(vec) # embed = torch.FloatTensor(vecs) # embed = torch.nn.Embedding.from_pretrained(embed) # logger.info("Shape of entity embedding: "+str(embed.weight.size())) # del vecs filenames = os.listdir(args.output_dir) filenames = [x for x in filenames if "pytorch_model.bin_" in x] file_mark = [] for x in filenames: file_mark.append([x, True]) file_mark.append([x, False]) for x, mark in file_mark: print(x, mark) output_model_file = os.path.join(args.output_dir, x) model_state_dict = torch.load(output_model_file) bert_model = BertModel.from_pretrained(args.model_name_or_path) model = BertForEntityTyping(bert_model, len(label_list)) model.load_state_dict(model_state_dict) #model, _ = BertForEntityTyping.from_pretrained(args.ernie_model, state_dict=model_state_dict, num_labels=len(label_list)) model.to(device) if mark: eval_examples = processor.get_dev_examples(args.data_dir) else: eval_examples = processor.get_test_examples(args.data_dir) eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer_label, tokenizer, args.threshold) logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) # zeros = [0 for _ in range(args.max_seq_length)] # zeros_ent = [0 for _ in range(100)] # zeros_ent = [zeros_ent for _ in range(args.max_seq_length)] all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_span_mask = torch.tensor([f.span_mask for f in eval_features], dtype=torch.float) all_labels = torch.tensor([f.labels for f in eval_features], dtype=torch.float) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_span_mask, all_labels) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 pred = [] true = [] for input_ids, input_mask, segment_ids, span_mask, labels in eval_dataloader: input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) span_mask = span_mask.to(device) labels = labels.to(device) with torch.no_grad(): tmp_eval_loss = model(input_ids, segment_ids, input_mask, span_mask, labels) logits = model(input_ids, segment_ids, input_mask, span_mask) logits = logits.detach().cpu().numpy() labels = labels.to('cpu').numpy() tmp_eval_accuracy, tmp_pred, tmp_true = accuracy(logits, labels) pred.extend(tmp_pred) true.extend(tmp_true) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples def f1(p, r): if r == 0.: return 0. return 2 * p * r / float(p + r) def loose_macro(true, pred): num_entities = len(true) p = 0. r = 0. for true_labels, predicted_labels in zip(true, pred): if len(predicted_labels) > 0: p += len( set(predicted_labels).intersection( set(true_labels))) / float(len(predicted_labels)) if len(true_labels): r += len( set(predicted_labels).intersection( set(true_labels))) / float(len(true_labels)) precision = p / num_entities recall = r / num_entities return precision, recall, f1(precision, recall) def loose_micro(true, pred): num_predicted_labels = 0. num_true_labels = 0. num_correct_labels = 0. for true_labels, predicted_labels in zip(true, pred): num_predicted_labels += len(predicted_labels) num_true_labels += len(true_labels) num_correct_labels += len( set(predicted_labels).intersection(set(true_labels))) if num_predicted_labels > 0: precision = num_correct_labels / num_predicted_labels else: precision = 0. recall = num_correct_labels / num_true_labels return precision, recall, f1(precision, recall) result = { 'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'macro': loose_macro(true, pred), 'micro': loose_micro(true, pred) } if mark: output_eval_file = os.path.join( args.output_dir, "eval_results_{}.txt".format(x.split("_")[-1])) else: output_eval_file = os.path.join( args.output_dir, "test_results_{}.txt".format(x.split("_")[-1])) with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key])))
def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help= "The input data dir. Should contain the .tsv files (or other data files) for the task." ) parser.add_argument("--ernie_model", default=None, type=str, required=True, help="Ernie pre-trained model") parser.add_argument( "--output_dir", default=None, type=str, required=True, help= "The output directory where the model predictions and checkpoints will be written." ) ## Other parameters parser.add_argument( "--max_seq_length", default=128, type=int, help= "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", default=False, action='store_true', help="Whether to run eval on the dev set.") parser.add_argument( "--do_lower_case", default=False, action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=16, type=int, help="Total batch size for training.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", default=False, action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass." ) parser.add_argument( '--fp16', default=False, action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument( '--loss_scale', type=float, default=0, help= "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument('--threshold', type=float, default=.3) parser.add_argument("--vec_file", default=None, type=str, required=True, help="File with embeddings") parser.add_argument("--qid_file", default=None, type=str, required=True, help="File with qid mapping") parser.add_argument("--use_lim_ents", default=None, type=str, required=True, help="Whether to use limited entities") args = parser.parse_args() processors = FewrelProcessor num_labels_task = 80 if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info( "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}". format(device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError( "Invalid gradient_accumulation_steps parameter: {}, should be >= 1" .format(args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train: raise ValueError( "At least one of `do_train` or `do_eval` must be True.") if os.path.exists(args.output_dir) and os.listdir( args.output_dir) and args.do_train: raise ValueError( "Output directory ({}) already exists and is not empty.".format( args.output_dir)) os.makedirs(args.output_dir, exist_ok=True) processor = processors() num_labels = num_labels_task label_list = None tokenizer = BertTokenizer.from_pretrained(args.ernie_model, do_lower_case=args.do_lower_case) train_examples = None num_train_steps = None train_examples, label_list = processor.get_train_examples(args.data_dir) num_train_steps = int( len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs) # Prepare model model, _ = BertForSequenceClassification.from_pretrained( args.ernie_model, cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank), num_labels=num_labels) # if args.fp16: # model.half() model.to(device) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training." ) model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare optimizer param_optimizer = list(model.named_parameters()) no_grad = [ 'bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent' ] param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] t_total = num_train_steps if args.local_rank != -1: t_total = t_total // torch.distributed.get_world_size() if args.fp16: try: #from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam from apex import amp except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training." ) optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False) if args.loss_scale == 0: model, optimizer = amp.initialize(model, optimizer, opt_level="O2") # optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total) global_step = 0 if args.do_train: train_features = convert_examples_to_features( train_examples, label_list, args.max_seq_length, tokenizer, args.threshold, args.qid_file) # check for limited ents lim_ents = [] lim_check = (args.use_lim_ents == "y") if lim_check: lim_ents = lim_ent_map(0, "kg_embeddings/dbp_eid_2_wd_eid.txt") logger.info( "Limited entities flag is on. Count of unique entities considered: " + str(len(lim_ents))) vecs = [] vecs.append([0] * 100) # CLS lineindex = 1 uid_map = {} logger.info("Reading embeddings file.") with open(args.vec_file, 'r') as fin: for line in fin: vec = line.strip().split('\t') # first element is unique id uniqid = int(vec[0]) # map line index to unique id uid_map[uniqid] = lineindex # increment line index lineindex = lineindex + 1 if (lim_check and (uniqid in lim_ents)) or not lim_check: vec = [float(x) for x in vec[1:101]] else: vec = vecs[0] vecs.append(vec) embed = torch.FloatTensor(vecs) embed = torch.nn.Embedding.from_pretrained(embed) #embed = torch.nn.Embedding(5041175, 100) logger.info("Shape of entity embedding: " + str(embed.weight.size())) del vecs logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_examples)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_steps) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) all_ent = torch.tensor([f.input_ent for f in train_features], dtype=torch.long) all_ent_masks = torch.tensor([f.ent_mask for f in train_features], dtype=torch.long) train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ent, all_ent_masks, all_label_ids) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) output_loss_file = os.path.join(args.output_dir, "loss") loss_fout = open(output_loss_file, 'w') model.train() for _ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate( tqdm(train_dataloader, desc="Iteration")): batch = tuple( t.to(device) if i != 3 else t for i, t in enumerate(batch)) input_ids, input_mask, segment_ids, input_ent, ent_mask, label_ids = batch input_ent = embed(input_ent + 1).to(device) # -1 -> 0 loss = model(input_ids, segment_ids, input_mask, input_ent.half(), ent_mask, label_ids) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: try: from apex import amp except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training." ) with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() loss_fout.write("{}\n".format(loss.item())) tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: # modify learning rate with special warm up BERT uses lr_this_step = args.learning_rate * warmup_linear( global_step / t_total, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 model_to_save = model.module if hasattr(model, 'module') else model output_model_file = os.path.join( args.output_dir, "pytorch_model.bin_{}".format(global_step)) torch.save(model_to_save.state_dict(), output_model_file) # Save a trained model model_to_save = model.module if hasattr( model, 'module') else model # Only save the model it-self output_model_file = os.path.join(args.output_dir, "pytorch_model.bin") torch.save(model_to_save.state_dict(), output_model_file)
def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help= "The input data dir. Should contain the .tsv files (or other data files) for the task." ) parser.add_argument("--ernie_model", default=None, type=str, required=True, help="Ernie pre-trained model") parser.add_argument( "--output_dir", default=None, type=str, required=True, help= "The output directory where the model predictions and checkpoints will be written." ) ## Other parameters parser.add_argument( "--max_seq_length", default=128, type=int, help= "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", default=False, action='store_true', help="Whether to run eval on the dev set.") parser.add_argument( "--do_lower_case", default=False, action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", default=False, action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass." ) parser.add_argument( '--fp16', default=False, action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument( '--loss_scale', type=float, default=0, help= "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument('--threshold', type=float, default=.3) parser.add_argument("--vec_file", default=None, type=str, required=True, help="File with embeddings") parser.add_argument("--qid_file", default=None, type=str, required=True, help="File with qid mapping") parser.add_argument("--use_lim_ents", default=None, type=str, required=True, help="Whether to use limited entities") args = parser.parse_args() processors = FewrelProcessor num_labels_task = 80 if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info( "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}". format(device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError( "Invalid gradient_accumulation_steps parameter: {}, should be >= 1" .format(args.gradient_accumulation_steps)) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval: raise ValueError( "At least one of `do_train` or `do_eval` must be True.") processor = processors() num_labels = num_labels_task label_list = None tokenizer = BertTokenizer.from_pretrained(args.ernie_model, do_lower_case=args.do_lower_case) train_examples = None num_train_steps = None train_examples, label_list = processor.get_train_examples(args.data_dir) # check for limited ents lim_ents = [] lim_check = (args.use_lim_ents == "y") if lim_check: lim_ents = lim_ent_map(0, "kg_embeddings/dbp_eid_2_wd_eid.txt") logger.info( "Limited entities flag is on. Count of unique entities considered: " + str(len(lim_ents))) vecs = [] vecs.append([0] * 100) # CLS lineindex = 1 uid_map = {} logger.info("Reading embeddings file.") with open(args.vec_file, 'r') as fin: for line in fin: vec = line.strip().split('\t') # first element is unique id uniqid = int(vec[0]) # map line index to unique id uid_map[uniqid] = lineindex # increment line index lineindex = lineindex + 1 if (lim_check and (uniqid in lim_ents)) or not lim_check: vec = [float(x) for x in vec[1:101]] else: vec = vecs[0] vecs.append(vec) embed = torch.FloatTensor(vecs) embed = torch.nn.Embedding.from_pretrained(embed) #embed = torch.nn.Embedding(5041175, 100) logger.info("Shape of entity embedding: " + str(embed.weight.size())) del vecs filenames = os.listdir(args.output_dir) filenames = [x for x in filenames if "pytorch_model.bin_" in x] file_mark = [] for x in filenames: file_mark.append([x, True]) file_mark.append([x, False]) eval_examples = processor.get_dev_examples(args.data_dir) dev = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer, args.threshold, args.qid_file) eval_examples = processor.get_test_examples(args.data_dir) test = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer, args.threshold, args.qid_file) for x, mark in file_mark: print(x, mark) output_model_file = os.path.join(args.output_dir, x) model_state_dict = torch.load(output_model_file) model, _ = BertForSequenceClassification.from_pretrained( args.ernie_model, state_dict=model_state_dict, num_labels=len(label_list)) model.to(device) if mark: eval_features = dev else: eval_features = test logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) # zeros = [0 for _ in range(args.max_seq_length)] # zeros_ent = [0 for _ in range(100)] # zeros_ent = [zeros_ent for _ in range(args.max_seq_length)] all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) all_ent = torch.tensor([f.input_ent for f in eval_features], dtype=torch.long) all_ent_masks = torch.tensor([f.ent_mask for f in eval_features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ent, all_ent_masks, all_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) if mark: output_eval_file = os.path.join( args.output_dir, "eval_results_{}.txt".format(x.split("_")[-1])) output_file_pred = os.path.join( args.output_dir, "eval_pred_{}.txt".format(x.split("_")[-1])) output_file_glod = os.path.join( args.output_dir, "eval_gold_{}.txt".format(x.split("_")[-1])) else: output_eval_file = os.path.join( args.output_dir, "test_results_{}.txt".format(x.split("_")[-1])) output_file_pred = os.path.join( args.output_dir, "test_pred_{}.txt".format(x.split("_")[-1])) output_file_glod = os.path.join( args.output_dir, "test_gold_{}.txt".format(x.split("_")[-1])) fpred = open(output_file_pred, "w") fgold = open(output_file_glod, "w") model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for input_ids, input_mask, segment_ids, input_ent, ent_mask, label_ids in eval_dataloader: input_ent = embed(input_ent + 1) # -1 -> 0 input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) input_ent = input_ent.to(device) ent_mask = ent_mask.to(device) label_ids = label_ids.to(device) with torch.no_grad(): tmp_eval_loss = model(input_ids, segment_ids, input_mask, input_ent, ent_mask, label_ids) logits = model(input_ids, segment_ids, input_mask, input_ent, ent_mask) logits = logits.detach().cpu().numpy() label_ids = label_ids.to('cpu').numpy() tmp_eval_accuracy, pred = accuracy(logits, label_ids) for a, b in zip(pred, label_ids): fgold.write("{}\n".format(b)) fpred.write("{}\n".format(a)) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy} with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key])))
def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help= "The input data dir. Should contain the .tsv files (or other data files) for the task." ) parser.add_argument("--ernie_model", default=None, type=str, required=True, help="Ernie pre-trained model") parser.add_argument( "--output_dir", default=None, type=str, required=True, help= "The output directory where the model predictions and checkpoints will be written." ) ## Other parameters parser.add_argument( "--max_seq_length", default=128, type=int, help= "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", default=False, action='store_true', help="Whether to run eval on the dev set.") parser.add_argument( "--do_lower_case", default=False, action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", default=False, action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass." ) parser.add_argument( '--fp16', default=False, action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument( '--loss_scale', type=float, default=0, help= "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument('--threshold', type=float, default=.3) ##########ADD## parser.add_argument("--K_V_dim", type=int, default=100, help="Key and Value dim == KG representation dim") parser.add_argument( "--Q_dim", type=int, default=768, help="Query dim == Bert six output layer representation dim") parser.add_argument( '--graphsage', default=False, action='store_true', help="Whether to use Attention GraphSage instead of GAT") parser.add_argument('--self_att', default=True, action='store_true', help="Whether to use GAT") ############### args = parser.parse_args() processors = TacredProcessor num_labels_task = 42 if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info( "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}". format(device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError( "Invalid gradient_accumulation_steps parameter: {}, should be >= 1" .format(args.gradient_accumulation_steps)) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval: raise ValueError( "At least one of `do_train` or `do_eval` must be True.") processor = processors() num_labels = num_labels_task label_list = None tokenizer = BertTokenizer.from_pretrained(args.ernie_model, do_lower_case=args.do_lower_case) train_examples = None num_train_steps = None train_examples, label_list = processor.get_train_examples(args.data_dir) label_list = sorted(label_list) ''' vecs = [] vecs.append([0]*100) with open("kg_embed/entity2vec.vec", 'r') as fin: for line in fin: vec = line.strip().split('\t') vec = [float(x) for x in vec] vecs.append(vec) embed = torch.FloatTensor(vecs) embed = torch.nn.Embedding.from_pretrained(embed) logger.info("Shape of entity embedding: "+str(embed.weight.size())) del vecs ''' filenames = os.listdir(args.output_dir) filenames = [x for x in filenames if "pytorch_model.bin_" in x] file_mark = [] for x in filenames: file_mark.append([x, True]) file_mark.append([x, False]) eval_examples = processor.get_dev_examples(args.data_dir) dev = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer, args.threshold) eval_examples = processor.get_test_examples(args.data_dir) test = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer, args.threshold) for x, mark in file_mark: print(x, mark) output_model_file = os.path.join(args.output_dir, x) model_state_dict = torch.load(output_model_file) model, _ = BertForSequenceClassification.from_pretrained( args.ernie_model, state_dict=model_state_dict, num_labels=len(label_list), args=args) if args.fp16: model.half() model.to(device) if mark: eval_features = dev output_file = os.path.join( args.output_dir, "eval_pred_{}.txt".format(x.split("_")[-1])) output_file_ = os.path.join( args.output_dir, "eval_gold_{}.txt".format(x.split("_")[-1])) else: eval_features = test output_file = os.path.join( args.output_dir, "test_pred_{}.txt".format(x.split("_")[-1])) output_file_ = os.path.join( args.output_dir, "test_gold_{}.txt".format(x.split("_")[-1])) fpred = open(output_file, "w") fgold = open(output_file_, "w") logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) # zeros = [0 for _ in range(args.max_seq_length)] # zeros_ent = [0 for _ in range(100)] # zeros_ent = [zeros_ent for _ in range(args.max_seq_length)] all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) all_ent = torch.tensor([f.input_ent for f in eval_features], dtype=torch.long) all_ent_masks = torch.tensor([f.ent_mask for f in eval_features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ent, all_ent_masks, all_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for input_ids, input_mask, segment_ids, input_ent, ent_mask, label_ids in eval_dataloader: input_ent = input_ent + 1 #input_ent = embed(input_ent+1) # -1 -> 0 input_ent = input_ent.to(device) # -1 -> 0 input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) ent_mask = ent_mask.to(device) label_ids = label_ids.to(device) #k,v = load_k_v_queryR_small(input_ent) k_1, v_1, k_2, v_2 = load_k_v_queryR_small(input_ent) ####test if int(input_ent[input_ent != 0].shape[0]) == 0: ###open or not? print("None ent") #print(input_ent) print(input_ent.shape) #print("------------") #k,v = load_k_v_queryR_small(input_ent) #print(k) #print(k.shape) #print("------------") #print(v) #print(v.shape) #continue k_1 = torch.zeros(input_ent.shape[0], 1, 1, 100).cuda().half() v_1 = torch.zeros(input_ent.shape[0], 1, 1, 100).cuda().half() k_2 = torch.zeros(input_ent.shape[0], 1, 1, 1, 100).cuda().half() v_2 = torch.zeros(input_ent.shape[0], 1, 1, 1, 100).cuda().half() ''' else: #print(k_2) print(k_1.shape) print(k_2.shape) exit() ''' with torch.no_grad(): #tmp_eval_loss = model(input_ids, segment_ids, input_mask, input_ent, ent_mask, label_ids, k.half(), v.half()) tmp_eval_loss = model(input_ids, segment_ids, input_mask, input_ent, ent_mask, label_ids, k_1.half(), v_1.half(), k_2.half(), v_2.half()) #logits = model(input_ids, segment_ids, input_mask, input_ent, ent_mask, None, k.half(), v.half()) logits = model(input_ids, segment_ids, input_mask, input_ent, ent_mask, None, k_1.half(), v_1.half(), k_2.half(), v_2.half()) logits = logits.detach().cpu().numpy() label_ids = label_ids.to('cpu').numpy() tmp_eval_accuracy, pred = accuracy(logits, label_ids) for a, b in zip(pred, label_ids): fgold.write("{}\n".format(label_list[b])) fpred.write("{}\n".format(label_list[a])) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy} if mark: output_eval_file = os.path.join( args.output_dir, "eval_results_{}.txt".format(x.split("_")[-1])) else: output_eval_file = os.path.join( args.output_dir, "test_results_{}.txt".format(x.split("_")[-1])) with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key])))
def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help= "The input data dir. Should contain the .tsv files (or other data files) for the task." ) parser.add_argument("--ernie_model", default=None, type=str, required=True, help="Ernie pre-trained model") parser.add_argument( "--output_dir", default=None, type=str, required=True, help= "The output directory where the model predictions and checkpoints will be written." ) ## Other parameters parser.add_argument( "--max_seq_length", default=128, type=int, help= "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", default=False, action='store_true', help="Whether to run eval on the dev set.") parser.add_argument( "--do_lower_case", default=False, action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", default=False, action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass." ) parser.add_argument( '--fp16', default=False, action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument( '--loss_scale', type=float, default=0, help= "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument('--threshold', type=float, default=.3) ##########ADD## parser.add_argument("--K_V_dim", type=int, default=100, help="Key and Value dim == KG representation dim") parser.add_argument( "--Q_dim", type=int, default=768, help="Query dim == Bert six output layer representation dim") parser.add_argument( '--graphsage', default=False, action='store_true', help="Whether to use Attention GraphSage instead of GAT") parser.add_argument('--self_att', default=True, action='store_true', help="Whether to use GAT") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( '--fp16_opt_level', type=str, default='O1', help= "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") ############### args = parser.parse_args() processors = TacredProcessor num_labels_task = 80 if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info( "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}". format(device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError( "Invalid gradient_accumulation_steps parameter: {}, should be >= 1" .format(args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval: raise ValueError( "At least one of `do_train` or `do_eval` must be True.") if os.path.exists(args.output_dir) and os.listdir( args.output_dir) and args.do_train: raise ValueError( "Output directory ({}) already exists and is not empty.".format( args.output_dir)) os.makedirs(args.output_dir, exist_ok=True) processor = processors() label_list = None tokenizer = BertTokenizer.from_pretrained(args.ernie_model, do_lower_case=args.do_lower_case) train_examples = None num_train_steps = None train_examples, label_list = processor.get_train_examples(args.data_dir) num_labels = len(label_list) num_train_steps = int( len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs) # Prepare model model, _ = BertForSequenceClassification.from_pretrained( args.ernie_model, cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank), num_labels=num_labels, args=args) ### ''' if args.fp16: model.half() model.to(device) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare optimizer param_optimizer = list(model.named_parameters()) no_grad = ['bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent'] param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] t_total = num_train_steps if args.local_rank != -1: t_total = t_total // torch.distributed.get_world_size() if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total) ''' # Prepare optimizer t_total = num_train_steps if args.local_rank != -1: t_total = t_total // torch.distributed.get_world_size() ### #if args.fp16: # model.half() ### model.to(device) param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] #no_decay = ['bias', 'LayerNorm.weight'] no_grad = [ 'bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent' ] param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(t_total * 0.1), num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) ### global_step = 0 if args.do_train: train_features = convert_examples_to_features(train_examples, label_list, args.max_seq_length, tokenizer, args.threshold) logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_examples)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_steps) # zeros = [0 for _ in range(args.max_seq_length)] # zeros_ent = [0 for _ in range(100)] # zeros_ent = [zeros_ent for _ in range(args.max_seq_length)] all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) all_ent = torch.tensor([f.input_ent for f in train_features], dtype=torch.long) all_ent_masks = torch.tensor([f.ent_mask for f in train_features], dtype=torch.long) train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ent, all_ent_masks, all_label_ids) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) output_loss_file = os.path.join(args.output_dir, "loss") loss_fout = open(output_loss_file, 'w') model.train() for _ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate( tqdm(train_dataloader, desc="Iteration")): batch = tuple( t.to(device) if i != 3 else t for i, t in enumerate(batch)) input_ids, input_mask, segment_ids, input_ent, ent_mask, label_ids = batch #input_ent = embed(input_ent+1).to(device) # -1 -> 0 input_ent = input_ent + 1 # -1 -> 0 #k,v = load_k_v_queryR_small(input_ent) k_1, v_1, k_2, v_2 = load_k_v_queryR_small(input_ent) #loss = model(input_ids, segment_ids, input_mask, input_ent.half(), ent_mask, label_ids, k.half(), v.half()) loss = model(input_ids, segment_ids, input_mask, input_ent.float(), ent_mask, label_ids, k_1.half(), v_1.half(), k_2.half(), v_2.half()) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: ### #optimizer.backward(loss) with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() ### else: loss.backward() loss_fout.write("{}\n".format(loss.item())) tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: # modify learning rate with special warm up BERT uses ### ''' lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 ''' if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() model.zero_grad() global_step += 1 ### model_to_save = model.module if hasattr(model, 'module') else model output_model_file = os.path.join( args.output_dir, "pytorch_model.bin_{}".format(global_step)) torch.save(model_to_save.state_dict(), output_model_file) # Save a trained model model_to_save = model.module if hasattr( model, 'module') else model # Only save the model it-self output_model_file = os.path.join(args.output_dir, "pytorch_model.bin") torch.save(model_to_save.state_dict(), output_model_file)
def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help= "The input data dir. Should contain the .tsv files (or other data files) for the task." ) parser.add_argument("--ernie_model", default=None, type=str, required=True, help="Ernie pre-trained model") parser.add_argument( "--output_dir", default=None, type=str, required=True, help= "The output directory where the model predictions and checkpoints will be written." ) ## Other parameters parser.add_argument( "--max_seq_length", default=128, type=int, help= "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", default=False, action='store_true', help="Whether to run eval on the dev set.") parser.add_argument( "--do_lower_case", default=False, action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", default=False, action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass." ) parser.add_argument( '--fp16', default=False, action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument( '--loss_scale', type=float, default=0, help= "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument('--threshold', type=float, default=.3) ##########ADD## parser.add_argument("--K_V_dim", type=int, default=100, help="Key and Value dim == KG representation dim") parser.add_argument( "--Q_dim", type=int, default=768, help="Query dim == Bert six output layer representation dim") parser.add_argument( '--graphsage', default=False, action='store_true', help="Whether to use Attention GraphSage instead of GAT") parser.add_argument('--self_att', default=True, action='store_true', help="Whether to use GAT") ############### args = parser.parse_args() processors = FewrelProcessor num_labels_task = 80 if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info( "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}". format(device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError( "Invalid gradient_accumulation_steps parameter: {}, should be >= 1" .format(args.gradient_accumulation_steps)) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval: raise ValueError( "At least one of `do_train` or `do_eval` must be True.") processor = processors() num_labels = num_labels_task label_list = None tokenizer = BertTokenizer.from_pretrained(args.ernie_model, do_lower_case=args.do_lower_case) train_examples = None num_train_steps = None #train_examples, label_list = processor.get_train_examples(args.data_dir) filenames = os.listdir(args.output_dir) #filenames = [x for x in filenames if "pytorch_model.bin_" in x] ### #filenames = [x for x in filenames if x in ["pytorch_model.bin_1750", "pytorch_model.bin_2000", "pytorch_model.bin_2250", "pytorch_model.bin_2500", "pytorch_model.bin_2750", "pytorch_model.bin_3000", "pytorch_model.bin_3250", "pytorch_model.bin_3500", "pytorch_model.bin_3750", "pytorch_model.bin_4000", "pytorch_model.bin_4250", "pytorch_model.bin_4500", "pytorch_model.bin_4750", "pytorch_model.bin_5000"] ] #filenames = [x for x in filenames if x in ["pytorch_model.bin_1750", "pytorch_model.bin_2000", "pytorch_model.bin_2250", "pytorch_model.bin_2500", "pytorch_model.bin_2750", "pytorch_model.bin_3000", "pytorch_model.bin_3250", "pytorch_model.bin_3500", "pytorch_model.bin_3750", "pytorch_model.bin_4000"] ] filenames = ["pytorch_model.bin"] ### file_mark = [] for x in filenames: #file_mark.append([x, True]) file_mark.append([x, False]) ### ''' eval_examples = processor.get_dev_examples(args.data_dir) dev = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer, args.threshold) ''' ### eval_examples = processor.get_test_examples(args.data_dir) test = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer, args.threshold) for x, mark in file_mark: print(x, mark) output_model_file = os.path.join(args.output_dir, x) model_state_dict = torch.load(output_model_file) #model, _ = BertForSequenceClassification.from_pretrained(args.ernie_model, state_dict=model_state_dict, num_labels=len(label_list), args=args) model, _ = BertForSequenceClassification.from_pretrained( args.ernie_model, state_dict=model_state_dict, num_labels=num_labels_task, args=args) #model.to(device) #print(device) if args.fp16: # model.half() # model.to(device) #print(model) #print(list(model.named_parameters())) #print("==") #print(list(model.bert.word_graph_attention.K_V_linear.weight)) #exit() #for i in model.parameters(): # print(i) #exit() #for name, param in model.named_parameters(): # print(name,param.requires_grad) if mark: eval_features = dev else: eval_features = test logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) # zeros = [0 for _ in range(args.max_seq_length)] # zeros_ent = [0 for _ in range(100)] # zeros_ent = [zeros_ent for _ in range(args.max_seq_length)] all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long) all_label_ids = torch.tensor([1 for f in eval_features], dtype=torch.long) #all_text = torch.tensor([f.text for f in eval_features], dtype=torch.long) all_ent = torch.tensor([f.input_ent for f in eval_features], dtype=torch.long) all_ent_masks = torch.tensor([f.ent_mask for f in eval_features], dtype=torch.long) #eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ent, all_ent_masks, all_label_ids) ### #output_label = torch.tensor([f.label for f in eval_features], dtype=torch.long) #output_text = torch.tensor([f.text for f in eval_features], dtype=torch.long) #output_ent = torch.tensor([f.ent for f in eval_features], dtype=torch.long) #output_ans = torch.tensor([f.ans for f in eval_features], dtype=torch.long) output_label_map = dict() output_text_map = dict() output_ent_map = dict() output_ans_map = dict() output_mention_map = dict() for i, f in enumerate(eval_features): output_label_map[i] = f.label output_text_map[i] = f.text output_ent_map[i] = f.ent output_ans_map[i] = f.ans #output_mention_map[i] = f.mention output_label_id = torch.tensor( [f[0] for f in enumerate(eval_features)], dtype=torch.long) output_text_id = torch.tensor([f[0] for f in enumerate(eval_features)], dtype=torch.long) output_ent_id = torch.tensor([f[0] for f in enumerate(eval_features)], dtype=torch.long) output_ans_id = torch.tensor([f[0] for f in enumerate(eval_features)], dtype=torch.long) #output_mention_id = torch.tensor([f[0] for f in enumerate(eval_features)], dtype=torch.long) #eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ent, all_ent_masks, all_label_ids, output_label_id, output_text_id, output_ent_id, output_ans_id) #eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ent, all_ent_masks, all_label_ids, output_label_id, output_text_id, output_ent_id, output_mention_id) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ent, all_ent_masks, all_label_ids, output_label_id, output_text_id, output_ent_id, output_ans_id) ### # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) if mark: #output_eval_file = os.path.join(args.output_dir, "eval_results_{}.txt".format(x.split("_")[-1])) output_file_pred = os.path.join( args.output_dir, "eval_pred_{}.txt".format(x.split("_")[-1])) #output_file_glod = os.path.join(args.output_dir, "eval_gold_{}.txt".format(x.split("_")[-1])) else: #output_eval_file = os.path.join(args.output_dir, "test_results_{}.txt".format(x.split("_")[-1])) output_file_pred = os.path.join( args.output_dir, "test_pred_{}.txt".format(x.split("_")[-1])) #output_file_glod = os.path.join(args.output_dir, "test_gold_{}.txt".format(x.split("_")[-1])) fpred = open(output_file_pred, "w") #fgold = open(output_file_glod, "w") model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 save_data_list = list() counter = 0 re_all = 0 pre_all = 0 f1_all = 0 tp_all = 0 fp_all = 0 fn_all = 0 tn_all = 0 #ccc=0 for input_ids, input_mask, segment_ids, input_ent, ent_mask, label_ids, output_label_id, output_text_id, output_ent_id, output_ans_id in eval_dataloader: input_ent = input_ent + 1 output_ans = output_ans_map[int(output_ans_id)] ''' if output_ans == None: #ccc+=1 #print(ccc) print("==1==") continue elif len(input_ent[input_ent!=0]) != len(output_ans): print(len(input_ent[input_ent!=0]),len(output_ans)) #ccc+=1 #print(ccc) #exit() print("==2==") continue ''' input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) input_ent = input_ent.to(device) ent_mask = ent_mask.to(device) label_ids = label_ids.to(device) #k, v = load_k_v_queryR(input_ent) k_1, v_1, new_input_ent, input_ent_nb, input_ent_r = load_k_v_queryR_small( input_ent) with torch.no_grad(): output_gen_ids = model(input_ids, segment_ids, input_mask, input_ent, ent_mask, None, k_1.half(), v_1.half(), input_ent_nb) #output_ans = output_ans_map[int(output_ans_id)] #if output_gen_ids == None: # print("None") # continue #examples_n = 0 if len(output_gen_ids) != len(output_ans): #print(len(output_gen_ids),len(output_ans)) #print("++++++") #print("skip") #print("++++++") continue for i, ids_list_pre in enumerate(output_gen_ids): ids_list_ans = output_ans[i] if len(ids_list_pre) != len(ids_list_ans): print("========") print(ids_list_pre) print(ids_list_ans) print("========") continue #print("{}/{}".format(counter,329)) #ids_list_ans #ids_list_pre tp = 0 fp = 0 fn = 0 tn = 0 re = 0 pre = 0 f1 = 0 for idx, id in enumerate(ids_list_ans): if id == -1: if tp == 0 and fp == 0: pre = 0 else: pre = tp / (tp + fp) if tp == 0 and fn == 0: re = 0 else: re = tp / (tp + fn) if pre == 0 and re == 0: f1 = 0 else: f1 = float(2.0 * pre * re / (re + pre)) else: if ids_list_ans[idx] == 1 and ids_list_pre[ idx] == 1: tp += 1 elif ids_list_ans[idx] == 0 and ids_list_pre[ idx] == 1: fp += 1 elif ids_list_ans[idx] == 0 and ids_list_pre[ idx] == 0: tn += 1 elif ids_list_ans[idx] == 1 and ids_list_pre[ idx] == 0: fn += 1 counter += 1 tp_all += tp fp_all += fp fn_all += fn tn_all += tn re_all += re pre_all += pre f1_all += f1 pp = tp_all / (tp_all + fp_all) rr = tp_all / (tp_all + fn_all) ff = float(2.0 * pp * rr / (pp + rr)) print("==============================") print("P:", pp) print("R:", rr) print("F1-micro:", ff) #print("---") #print("P:",re_all/counter) #print("R:",pre_all/counter) #print("F1-macro:",f1_all/counter) #print("F1-micro:",float(2.0*(pre_all/counter)*(re_all/counter)/(pre_all/counter+re_all/counter))) print("==============================") with open(output_file_pred, "w") as writer: logger.info("***** Results*****") fpred.write("P: {}\n".format(pp)) fpred.write("R: {}\n".format(rr)) fpred.write("F1-micro: {}\n".format(ff))
def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help= "The input data dir. Should contain the .tsv files (or other data files) for the task." ) parser.add_argument("--ernie_model", default=None, type=str, required=True, help="Ernie pre-trained model") parser.add_argument( "--output_dir", default=None, type=str, required=True, help= "The output directory where the model predictions and checkpoints will be written." ) ## Other parameters parser.add_argument( "--max_seq_length", default=128, type=int, help= "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", default=False, action='store_true', help="Whether to run eval on the dev set.") parser.add_argument( "--do_lower_case", default=False, action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", default=False, action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass." ) parser.add_argument( '--fp16', default=False, action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument( '--loss_scale', type=float, default=0, help= "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument('--threshold', type=float, default=.1) args = parser.parse_args() processors = SemevalProcessor num_labels_task = 3 if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info( "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}". format(device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError( "Invalid gradient_accumulation_steps parameter: {}, should be >= 1" .format(args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval: raise ValueError( "At least one of `do_train` or `do_eval` must be True.") if os.path.exists(args.output_dir) and os.listdir( args.output_dir) and args.do_train: raise ValueError( "Output directory ({}) already exists and is not empty.".format( args.output_dir)) os.makedirs(args.output_dir, exist_ok=True) processor = processors() num_labels = num_labels_task label_list = None tokenizer = BertTokenizer.from_pretrained(args.ernie_model, do_lower_case=args.do_lower_case) train_examples = None num_train_steps = None train_examples, label_list = processor.get_train_examples(args.data_dir) num_train_steps = int( len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs) # Prepare model model, _ = BertForSequenceClassification.from_pretrained( args.ernie_model, cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank), num_labels=num_labels) if args.fp16: model.half() model.to(device) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training." ) model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare optimizer param_optimizer = list(model.named_parameters()) no_grad = [ 'bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent' ] param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] t_total = num_train_steps if args.local_rank != -1: t_total = t_total // torch.distributed.get_world_size() if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training." ) optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total) global_step = 0 if args.do_train: train_features = convert_examples_to_features(train_examples, label_list, args.max_seq_length, tokenizer, args.threshold) vecs = [] vecs.append([0] * 100) logger.info("Loading entity embedding.") with open("kg_embed/entity2vec.vec", 'r') as fin: for line in fin: vec = line.strip().split('\t') vec = [float(x) for x in vec] vecs.append(vec) embed = torch.FloatTensor(vecs) embed = torch.nn.Embedding.from_pretrained(embed) # embed = torch.nn.Embedding(5041175, 100) logger.info("Shape of entity embedding: " + str(embed.weight.size())) del vecs if args.do_eval: eval_examples = processor.get_dev_examples(args.data_dir) dev = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer, args.threshold) eval_features = dev logger.info("Eval Num examples = %d", len(eval_examples)) logger.info("Eval Batch size = %d", args.train_batch_size) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor( [f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor( [f.segment_ids for f in eval_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) all_ent = torch.tensor([f.input_ent for f in eval_features], dtype=torch.long) all_ent_masks = torch.tensor([f.ent_mask for f in eval_features], dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ent, all_ent_masks, all_label_ids) # Run prediction for full data eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.train_batch_size) logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_examples)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_steps) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) all_ent = torch.tensor([f.input_ent for f in train_features], dtype=torch.long) all_ent_masks = torch.tensor([f.ent_mask for f in train_features], dtype=torch.long) train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_ent, all_ent_masks, all_label_ids) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) output_loss_file = os.path.join(args.output_dir, "loss") loss_fout = open(output_loss_file, 'w') model.train() max_acc = 0 for _ in trange(int(args.num_train_epochs), desc="Epoch"): model.train() tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(train_dataloader): batch = tuple( t.to(device) if i != 3 else t for i, t in enumerate(batch)) input_ids, input_mask, segment_ids, input_ent, ent_mask, label_ids = batch input_ent = embed(input_ent + 1).to(device) # -1 -> 0 loss = model(input_ids, segment_ids, input_mask, input_ent.half(), ent_mask, label_ids) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward() loss_fout.write("{}\n".format(loss.item())) tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: # modify learning rate with special warm up BERT uses lr_this_step = args.learning_rate * warmup_linear( global_step / t_total, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 if args.do_eval: logger.info("***** Running evaluation *****") output_eval_file = os.path.join( args.output_dir, "eval_results_{}.txt".format(global_step)) model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for input_ids, input_mask, segment_ids, input_ent, ent_mask, label_ids in eval_dataloader: input_ent = embed(input_ent + 1) # -1 -> 0 input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) input_ent = input_ent.to(device) ent_mask = ent_mask.to(device) label_ids = label_ids.to(device) with torch.no_grad(): tmp_eval_loss = model(input_ids, segment_ids, input_mask, input_ent, ent_mask, label_ids) logits = model(input_ids, segment_ids, input_mask, input_ent, ent_mask) logits = logits.detach().cpu().numpy() label_ids = label_ids.to('cpu').numpy() tmp_eval_accuracy = accuracy(logits, label_ids) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples max_acc = max(max_acc, eval_accuracy) result = { 'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'max_accuracy': max_acc } with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key])))
def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help= "The input data dir. Should contain the .tsv files (or other data files) for the task." ) parser.add_argument("--train_file", default=None, type=str, required=True) parser.add_argument("--ernie_model", default=None, type=str, required=True, help="Ernie pre-trained model") parser.add_argument( "--output_dir", default=None, type=str, required=True, help= "The output directory where the model predictions and checkpoints will be written." ) parser.add_argument("--ckpt", default='None', type=str) ## Other parameters parser.add_argument( "--max_seq_length", default=128, type=int, help= "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", default=False, action='store_true', help="Whether to run eval on the dev set.") parser.add_argument( "--do_lower_case", default=False, action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.") parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", default=False, action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass." ) parser.add_argument( '--fp16', default=False, action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument( '--loss_scale', type=float, default=0, help= "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument('--mean_pool', type=float, default=1) parser.add_argument("--bert_model", type=str, default='bert') args = parser.parse_args() logger.info(args) print(args) if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info( "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}". format(device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError( "Invalid gradient_accumulation_steps parameter: {}, should be >= 1" .format(args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval: raise ValueError( "At least one of `do_train` or `do_eval` must be True.") if os.path.exists(args.output_dir) and os.listdir( args.output_dir) and args.do_train: raise ValueError( "Output directory ({}) already exists and is not empty.".format( args.output_dir)) os.makedirs(args.output_dir, exist_ok=True) processor = TypingProcessor() tokenizer_label = BertTokenizer_label.from_pretrained( args.ernie_model, do_lower_case=args.do_lower_case) tokenizer = BertTokenizer.from_pretrained(args.ernie_model, do_lower_case=args.do_lower_case) if os.path.exists('***path_to_your_roberta***'): load_path = '***path_to_your_roberta***' else: load_path = '***path_to_your_roberta***' roberta_tokenizer = RobertaTokenizer.from_pretrained(load_path) bert_tokenizer_cased = BertTokenizer_cased.from_pretrained( '***path_to_your_bert_tokenizer_cased***') train_examples = None num_train_steps = None train_examples, label_list, d = processor.get_train_examples( args.data_dir, args.train_file) label_list = sorted(label_list) #class_weight = [min(d[x], 100) for x in label_list] #logger.info(class_weight) S = [] for l in label_list: s = [] for ll in label_list: if ll in l: s.append(1.) else: s.append(0.) S.append(s) num_train_steps = int( len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs) # Prepare model if args.bert_model == 'bert' and args.do_lower_case: if os.path.exists('***path_to_your_bert_uncased***'): bert_model = BertModel.from_pretrained( '***path_to_your_bert_uncased***') else: bert_model = BertModel.from_pretrained( '***path_to_your_bert_uncased***') if args.ckpt != 'None': if os.path.exists('***path_to_your_bert_uncased***'): load_path = '***path_to_your_trained_checkpoint***' + args.ckpt else: load_path = '***path_to_your_trained_checkpoint***' + args.ckpt ckpt = torch.load(load_path) bert_model.load_state_dict(ckpt["bert-base"]) elif args.bert_model == 'roberta': if os.path.exists('***path_to_your_roberta***'): bert_model = RobertaModel.from_pretrained( '***path_to_your_roberta***') else: bert_model = RobertaModel.from_pretrained( '***path_to_your_roberta***') if args.ckpt != 'None': if os.path.exists('***path_to_your_roberta***'): load_path = '***path_to_your_trained_checkpoint***' + args.ckpt else: load_path = '***path_to_your_trained_checkpoint***' + args.ckpt ckpt = torch.load(load_path) bert_model.load_state_dict(ckpt["bert-base"]) else: bert_model = BertModel.from_pretrained( '***path_to_your_bert_model_cased***') model = BertForEntityTyping(bert_model, len(label_list)) if args.fp16: model.half() model.to(device) if args.local_rank != -1: try: from apex.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training." ) model = DDP(model) elif n_gpu > 1: model = torch.nn.DataParallel(model) # Prepare optimizer param_optimizer = list(model.named_parameters()) no_grad = [ 'bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent' ] param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] t_total = num_train_steps if args.local_rank != -1: t_total = t_total // torch.distributed.get_world_size() if args.fp16: try: from apex.optimizers import FP16_Optimizer from apex.optimizers import FusedAdam except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training." ) optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total) global_step = 0 if args.do_train: if args.do_lower_case: if args.train_file == 'train.json' and os.path.exists( 'train_features_1.0' ) and 'FIGER' in args.data_dir and args.mean_pool == 1: train_features = torch.load('train_features_1.0') elif args.train_file == 'train.json' and os.path.exists( 'train_features_1.0_se' ) and 'FIGER' in args.data_dir and args.mean_pool == 0: train_features = torch.load('train_features_1.0_se') else: train_features = convert_examples_to_features( train_examples, label_list, args.max_seq_length, tokenizer_label, tokenizer, roberta_tokenizer, bert_tokenizer_cased, args.mean_pool, args.bert_model, args.do_lower_case) else: if args.train_file == 'train.json' and os.path.exists( 'train_features_1.0' ) and 'FIGER' in args.data_dir and args.mean_pool == 1: train_features = torch.load('train_features_cased') else: train_features = convert_examples_to_features( train_examples, label_list, args.max_seq_length, tokenizer_label, tokenizer, roberta_tokenizer, bert_tokenizer_cased, args.mean_pool, args.bert_model, args.do_lower_case) logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_examples)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_steps) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long) all_span_mask = torch.tensor([f.span_mask for f in train_features], dtype=torch.float) all_labels = torch.tensor([f.labels for f in train_features], dtype=torch.float) train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_span_mask, all_labels) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) output_loss_file = os.path.join(args.output_dir, "loss") loss_fout = open(output_loss_file, 'w') model.train() for epoch in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(train_dataloader): batch = tuple( t.to(device) if i != 3 else t for i, t in enumerate(batch)) input_ids, input_mask, segment_ids, span_mask, labels = batch loss = model(input_ids, args.bert_model, segment_ids, input_mask, span_mask, labels.half()) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) else: loss.backward() loss_fout.write("{}\n".format( loss.item() * args.gradient_accumulation_steps)) tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: # modify learning rate with special warm up BERT uses lr_this_step = args.learning_rate * warmup_linear( global_step / t_total, args.warmup_proportion) for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 if global_step % 150 == 0 and global_step > 0: model_to_save = model.module if hasattr( model, 'module') else model output_model_file = os.path.join( args.output_dir, "pytorch_model.bin_{}".format(global_step)) torch.save(model_to_save.state_dict(), output_model_file) model_to_save = model.module if hasattr(model, 'module') else model output_model_file = os.path.join( args.output_dir, "pytorch_model.bin_{}".format(epoch)) torch.save(model_to_save.state_dict(), output_model_file) x = "pytorch_model.bin_{}".format(epoch) for mark in [True, False]: if mark: eval_examples = processor.get_dev_examples(args.data_dir) else: eval_examples = processor.get_test_examples(args.data_dir) eval_features = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer_label, tokenizer, roberta_tokenizer, bert_tokenizer_cased, args.mean_pool, args.bert_model, args.do_lower_case) logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) all_input_ids = torch.tensor( [f.input_ids for f in eval_features], dtype=torch.long) all_input_mask = torch.tensor( [f.input_mask for f in eval_features], dtype=torch.long) all_segment_ids = torch.tensor( [f.segment_ids for f in eval_features], dtype=torch.long) all_span_mask = torch.tensor( [f.span_mask for f in eval_features], dtype=torch.float) all_labels = torch.tensor([f.labels for f in eval_features], dtype=torch.float) eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_span_mask, all_labels) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 pred = [] true = [] for input_ids, input_mask, segment_ids, span_mask, labels in eval_dataloader: input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) span_mask = span_mask.to(device) labels = labels.to(device) with torch.no_grad(): tmp_eval_loss = model(input_ids, args.bert_model, segment_ids, input_mask, span_mask, labels) logits = model(input_ids, args.bert_model, segment_ids, input_mask, span_mask) logits = logits.detach().cpu().numpy() labels = labels.to('cpu').numpy() tmp_eval_accuracy, tmp_pred, tmp_true = accuracy( logits, labels) pred.extend(tmp_pred) true.extend(tmp_true) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples def f1(p, r): if r == 0.: return 0. return 2 * p * r / float(p + r) def loose_macro(true, pred): num_entities = len(true) p = 0. r = 0. for true_labels, predicted_labels in zip(true, pred): if len(predicted_labels) > 0: p += len( set(predicted_labels).intersection( set(true_labels))) / float( len(predicted_labels)) if len(true_labels): r += len( set(predicted_labels).intersection( set(true_labels))) / float( len(true_labels)) precision = p / num_entities recall = r / num_entities return precision, recall, f1(precision, recall) def loose_micro(true, pred): num_predicted_labels = 0. num_true_labels = 0. num_correct_labels = 0. for true_labels, predicted_labels in zip(true, pred): num_predicted_labels += len(predicted_labels) num_true_labels += len(true_labels) num_correct_labels += len( set(predicted_labels).intersection( set(true_labels))) if num_predicted_labels > 0: precision = num_correct_labels / num_predicted_labels else: precision = 0. recall = num_correct_labels / num_true_labels return precision, recall, f1(precision, recall) if False: result = { 'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'macro': loose_macro(true, pred), 'micro': loose_micro(true, pred) } else: result = { 'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'macro': loose_macro(true, pred), 'micro': loose_micro(true, pred) } if mark: output_eval_file = os.path.join( args.output_dir, "eval_results_{}.txt".format(x.split("_")[-1])) else: output_eval_file = os.path.join( args.output_dir, "test_results_{}.txt".format(x.split("_")[-1])) with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) exit(0)