def main(): parser = argparse.ArgumentParser() ## Required parameters ############### parser.add_argument( "--data_dir", default=None, type=str, required=True, help= "The input data dir. Should contain the .tsv files (or other data files) for the task." ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help= "The output directory where the model predictions and checkpoints will be written." ) parser.add_argument("--pretrain_model", default='bert-case-uncased', type=str, required=True, help="Pre-trained model") parser.add_argument("--num_labels_task", default=None, type=int, required=True, help="num_labels_task") parser.add_argument( "--max_seq_length", default=128, type=int, help= "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", default=False, action='store_true', help="Whether to run eval on the dev set.") parser.add_argument( "--do_lower_case", default=False, action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", default=False, action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass." ) parser.add_argument( '--fp16', default=False, action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument( '--loss_scale', type=float, default=0, help= "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( '--fp16_opt_level', type=str, default='O1', help= "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--task", default=None, type=int, required=True, help="Choose Task") ############### args = parser.parse_args() processors = Processor_1 num_labels = args.num_labels_task if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info( "device: {}, n_gpu: {}, distributed training: {}, 16-bits training: {}" .format(device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError( "Invalid gradient_accumulation_steps parameter: {}, should be >= 1" .format(args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train: raise ValueError( "At least one of `do_train` or `do_eval` must be True.") if os.path.exists(args.output_dir) and os.listdir( args.output_dir) and args.do_train: raise ValueError( "Output directory ({}) already exists and is not empty.".format( args.output_dir)) os.makedirs(args.output_dir, exist_ok=True) tokenizer = RobertaTokenizer.from_pretrained(args.pretrain_model) train_examples = None num_train_steps = None aspect_list = None sentiment_list = None processor = processors() num_labels = num_labels train_examples, aspect_list, sentiment_list = processor.get_train_examples( args.data_dir) if args.task == 1: num_labels = len(aspect_list) elif args.task == 2: num_labels = len(sentiment_list) else: print("What's task?") exit() num_train_steps = int( len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs) # Prepare model #model = RobertaForSequenceClassification.from_pretrained(args.pretrain_model, num_labels=args.num_labels_task, output_hidden_states=False, output_attentions=False, return_dict=True) model = RobertaForMaskedLMDomainTask.from_pretrained( args.pretrain_model, num_labels=args.num_labels_task, output_hidden_states=False, output_attentions=False, return_dict=True) # Prepare optimizer t_total = num_train_steps if args.local_rank != -1: t_total = t_total // torch.distributed.get_world_size() model.to(device) param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] #no_decay = ['bias', 'LayerNorm.weight'] no_grad = [ 'bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent' ] param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(t_total * 0.1), num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) exit() model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) global_step = 0 if args.do_train: train_features = convert_examples_to_features(train_examples, aspect_list, sentiment_list, args.max_seq_length, tokenizer, args.task) logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_examples)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_steps) all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long) all_attention_mask = torch.tensor( [f.attention_mask for f in train_features], dtype=torch.long) if args.task == 1: print("Excuting the task 1") elif args.task == 2: all_segment_ids = torch.tensor( [f.segment_ids for f in train_features], dtype=torch.long) else: print("Wrong here2") all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long) if args.task == 1: train_data = TensorDataset(all_input_ids, all_attention_mask, all_label_ids) elif args.task == 2: train_data = TensorDataset(all_input_ids, all_attention_mask, all_segment_ids, all_label_ids) else: print("Wrong here1") ''' print("========") print(train_data) print(type(train_data)) exit() ''' if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) output_loss_file = os.path.join(args.output_dir, "loss") loss_fout = open(output_loss_file, 'w') model.train() ##########Pre-Pprocess######### ############################### for epoch in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate( tqdm(train_dataloader, desc="Iteration")): #batch = tuple(t.to(device) if i != 3 else t for i, t in enumerate(batch)) batch = tuple(t.to(device) for i, t in enumerate(batch)) if args.task == 1: input_ids, attention_mask, label_ids = batch elif args.task == 2: input_ids, attention_mask, segment_ids, label_ids = batch else: print("Wrong here3") if args.task == 1: #loss, logits, hidden_states, attentions #output = model(input_ids=input_ids, token_type_ids=None, attention_mask=attention_mask, labels=label_ids) #loss = output.loss loss, logit = model(input_ids_org=input_ids, token_type_ids=None, attention_mask=attention_mask, sentence_label=label_ids, func="task_class") elif args.task == 2: #loss, logits, hidden_states, attentions #output = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=attention_mask, labels=label_ids) #output = model(input_ids=input_ids, token_type_ids=segment_ids, attention_mask=attention_mask, labels=label_ids) #output = model(input_ids=input_ids, token_type_ids=None, attention_mask=attention_mask, labels=label_ids) #loss = output.loss loss, logit = model(input_ids_org=input_ids, token_type_ids=None, attention_mask=attention_mask, sentence_label=label_ids, func="task_class") else: print("Wrong!!") if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: ### #optimizer.backward(loss) with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() ### else: loss.backward() loss_fout.write("{}\n".format(loss.item())) tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: # modify learning rate with special warm up BERT uses ### if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() model.zero_grad() global_step += 1 ### if epoch < -1: continue else: model_to_save = model.module if hasattr(model, 'module') else model #output_model_file = os.path.join(args.output_dir, "pytorch_model.bin_{}".format(global_step)) output_model_file = os.path.join( args.output_dir, "pytorch_model.bin_{}".format(epoch)) torch.save(model_to_save.state_dict(), output_model_file) # Save a trained model model_to_save = model.module if hasattr( model, 'module') else model # Only save the model it-self output_model_file = os.path.join(args.output_dir, "pytorch_model.bin") torch.save(model_to_save.state_dict(), output_model_file)
device = torch.device("cuda", local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if n_gpu > 0: torch.cuda.manual_seed_all(seed) tokenizer = RobertaTokenizer.from_pretrained(model) # Prepare model model = RobertaForMaskedLMDomainTask.from_pretrained(model, output_hidden_states=True, return_dict=True, num_labels=num_labels) model.to(device) ########################## param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] #no_decay = ['bias', 'LayerNorm.weight'] no_grad = [ 'bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent' ] param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)] optimizer_grouped_parameters = [{ 'params':
def main(): parser = argparse.ArgumentParser() ## Required parameters ############### parser.add_argument( "--data_dir", default=None, type=str, required=True, help= "The input data dir. Should contain the .tsv files (or other data files) for the task." ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help= "The output directory where the model predictions and checkpoints will be written." ) parser.add_argument("--pretrain_model", default='bert-case-uncased', type=str, required=True, help="Pre-trained model") parser.add_argument("--num_labels_task", default=None, type=int, required=True, help="num_labels_task") parser.add_argument( "--max_seq_length", default=128, type=int, help= "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", default=False, action='store_true', help="Whether to run eval on the dev set.") parser.add_argument( "--do_lower_case", default=False, action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--eval_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", default=False, action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass." ) parser.add_argument( '--fp16', default=False, action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument( '--loss_scale', type=float, default=0, help= "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( '--fp16_opt_level', type=str, default='O1', help= "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--task", default=2, type=int, required=True, help="Choose Task") parser.add_argument("--choose_eval_test_both", default=2, type=int, help="choose test dev both") ############### args = parser.parse_args() #print(args.do_train, args.do_eval) #exit() processors = Processor_1 num_labels = args.num_labels_task if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() print(n_gpu) print(device) else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info( "device: {}, n_gpu: {}, distributed training: {}, 16-bits training: {}" .format(device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError( "Invalid gradient_accumulation_steps parameter: {}, should be >= 1" .format(args.gradient_accumulation_steps)) #args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_eval: raise ValueError( "At least one of `do_train` or `do_eval` must be True.") ''' if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) ''' os.makedirs(args.output_dir, exist_ok=True) tokenizer = RobertaTokenizer.from_pretrained(args.pretrain_model) train_examples = None num_train_steps = None aspect_list = None sentiment_list = None processor = processors() num_labels = num_labels #train_examples, aspect_list, sentiment_list = processor.get_train_examples(args.data_dir) filenames = os.listdir(args.output_dir) filenames = [x for x in filenames if "pytorch_model.bin_" in x] print(filenames) file_mark = [] model_performace_dev = dict() model_performace_test = dict() for x in filenames: ### #test if args.choose_eval_test_both == 0: file_mark.append([x, True]) #eval elif args.choose_eval_test_both == 1: file_mark.append([x, False]) else: file_mark.append([x, True]) file_mark.append([x, False]) ### #file_mark.append([x, True]) #file_mark.append([x, False]) #### #### train_examples, aspect_list, sentiment_list = processor.get_test_examples( args.data_dir) test_examples, _, _ = processor.get_test_examples(args.data_dir) eval_examples, _, _ = processor.get_dev_examples(args.data_dir) if args.task == 1: num_labels = len(aspect_list) elif args.task == 2: num_labels = len(sentiment_list) else: print("What's task?") exit() test = convert_examples_to_features(test_examples, aspect_list, sentiment_list, args.max_seq_length, tokenizer, args.task) dev = convert_examples_to_features(eval_examples, aspect_list, sentiment_list, args.max_seq_length, tokenizer, args.task) ### for x, mark in file_mark: #mark: eval-True; test-False #choose_eval_test_both: eval-0, test-1, both-2 print(x, mark) output_model_file = os.path.join(args.output_dir, x) #model = RobertaForSequenceClassification.from_pretrained(args.pretrain_model, num_labels=num_labels, output_hidden_states=False, output_attentions=False, return_dict=True) model = RobertaForMaskedLMDomainTask.from_pretrained( args.pretrain_model, output_hidden_states=False, output_attentions=False, return_dict=True, num_labels=args.num_labels_task) model.load_state_dict(torch.load(output_model_file), strict=False) #strict False: ignore non-matching keys model.to(device) ####################################### param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] #no_decay = ['bias', 'LayerNorm.weight'] no_grad = [ 'bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent' ] param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)] optimizer_grouped_parameters = [{ 'params': [ p for n, p in param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) #scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(t_total*0.1), num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) exit() model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) ####################################### #param_optimizer = [para[0] for para in model.named_parameters()] #param_optimizer = [para for para in model.named_parameters()][-2] #print(param_optimizer) if mark: eval_features = dev else: eval_features = test logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_attention_mask = torch.tensor( [f.attention_mask for f in eval_features], dtype=torch.long) if args.task == 1: print("Excuting the task 1") elif args.task == 2: all_segment_ids = torch.tensor( [f.segment_ids for f in eval_features], dtype=torch.long) else: print("Wrong here2") all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) if args.task == 1: eval_data = TensorDataset(all_input_ids, all_attention_mask, all_label_ids) elif args.task == 2: eval_data = TensorDataset(all_input_ids, all_attention_mask, all_segment_ids, all_label_ids) else: print("Wrong here1") if args.local_rank == -1: eval_sampler = RandomSampler(eval_data) else: eval_sampler = DistributedSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) if mark: output_eval_file = os.path.join( args.output_dir, "eval_results_{}.txt".format(x.split("_")[-1])) output_file_pred = os.path.join( args.output_dir, "eval_pred_{}.txt".format(x.split("_")[-1])) output_file_glod = os.path.join( args.output_dir, "eval_gold_{}.txt".format(x.split("_")[-1])) else: output_eval_file = os.path.join( args.output_dir, "test_results_{}.txt".format(x.split("_")[-1])) output_file_pred = os.path.join( args.output_dir, "test_pred_{}.txt".format(x.split("_")[-1])) output_file_glod = os.path.join( args.output_dir, "test_gold_{}.txt".format(x.split("_")[-1])) fpred = open(output_file_pred, "w") fgold = open(output_file_glod, "w") model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for step, batch in enumerate(tqdm(eval_dataloader, desc="Iteration")): #batch = tuple(t.to(device) if i != 3 else t for i, t in enumerate(batch)) batch = tuple(t.to(device) for i, t in enumerate(batch)) if args.task == 1: input_ids, attention_mask, label_ids = batch elif args.task == 2: input_ids, attention_mask, segment_ids, label_ids = batch else: print("Wrong here3") if args.task == 1: #loss, logits, hidden_states, attentions ''' output = model(input_ids=input_ids, token_type_ids=None, attention_mask=attention_mask, labels=label_ids) logits = output.logits tmp_eval_loss = output.loss ''' # tmp_eval_loss, logits = model(input_ids_org=input_ids, sentence_label=label_ids, attention_mask=attention_mask, func="task_class_domain") #logits = output.logits #tmp_eval_loss = output.loss elif args.task == 2: #loss, logits, hidden_states, attentions ''' output = model(input_ids=input_ids, token_type_ids=None, attention_mask=attention_mask, labels=label_ids) logits = output.logits tmp_eval_loss = output.loss ''' # tmp_eval_loss, logits = model(input_ids_org=input_ids, sentence_label=label_ids, attention_mask=attention_mask, func="task_class_domain") #exit() #logits = output.logits #tmp_eval_loss = output.loss else: print("Wrong!!") logits = logits.detach().cpu().numpy() label_ids = label_ids.to('cpu').numpy() tmp_eval_accuracy, pred = accuracy(logits, label_ids) for a, b in zip(pred, label_ids): fgold.write("{}\n".format(b)) fpred.write("{}\n".format(a)) eval_loss += tmp_eval_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples result = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy} with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) #if mark and step > int(math.ceil(len(eval_examples)/args.eval_batch_size)): if mark: model_performace_dev[x] = eval_accuracy else: model_performace_test[x] = eval_accuracy ################# ################# #####dev######### if args.choose_eval_test_both != 1: model_name_best = 0 score_best = 0 for model_name, score in model_performace_dev.items(): if score >= score_best: score_best = score model_name_best = model_name model = RobertaForMaskedLMDomainTask.from_pretrained( args.pretrain_model, output_hidden_states=False, output_attentions=False, return_dict=True, num_labels=args.num_labels_task) model_name_best = os.path.join(args.output_dir, model_name_best) model.load_state_dict(torch.load(model_name_best), strict=False) # Save a trained model logger.info("** ** * Saving fine - tuned model ** ** * ") model_to_save = model.module if hasattr( model, 'module') else model # Only save the model it-self output_model_file = os.path.join(args.output_dir, "pytorch_model.bin_dev_best") torch.save(model_to_save.state_dict(), output_model_file) if args.choose_eval_test_both != 0: model_name_best = 0 score_best = 0 for model_name, score in model_performace_test.items(): if score >= score_best: score_best = score model_name_best = model_name model = RobertaForMaskedLMDomainTask.from_pretrained( args.pretrain_model, output_hidden_states=False, output_attentions=False, return_dict=True, num_labels=args.num_labels_task) model_name_best = os.path.join(args.output_dir, model_name_best) model.load_state_dict(torch.load(model_name_best), strict=False) # Save a trained model logger.info("** ** * Saving fine - tuned model ** ** * ") model_to_save = model.module if hasattr( model, 'module') else model # Only save the model it-self output_model_file = os.path.join(args.output_dir, "pytorch_model.bin_test_best") torch.save(model_to_save.state_dict(), output_model_file)
def main(): parser = argparse.ArgumentParser() ## Required parameters ############### parser.add_argument( "--data_dir", default=None, type=str, required=True, help= "The input data dir. Should contain the .tsv files (or other data files) for the task." ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help= "The output directory where the model predictions and checkpoints will be written." ) parser.add_argument("--pretrain_model", default='bert-case-uncased', type=str, required=True, help="Pre-trained model") parser.add_argument("--num_labels_task", default=None, type=int, required=True, help="num_labels_task") parser.add_argument( "--max_seq_length", default=128, type=int, help= "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", default=False, action='store_true', help="Whether to run eval on the dev set.") parser.add_argument( "--do_lower_case", default=False, action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--eval_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument( "--warmup_proportion", default=0.1, type=float, help= "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", default=False, action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument( '--gradient_accumulation_steps', type=int, default=1, help= "Number of updates steps to accumulate before performing a backward/update pass." ) parser.add_argument( '--fp16', default=False, action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument( '--loss_scale', type=float, default=0, help= "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( '--fp16_opt_level', type=str, default='O1', help= "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html") parser.add_argument("--task", default=2, type=int, required=True, help="Choose Task") parser.add_argument("--choose_eval_test_both", default=2, type=int, help="choose test dev both") ############### args = parser.parse_args() #print(args.do_train, args.do_eval) #exit() processors = Processor_1 num_labels = args.num_labels_task if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() print(n_gpu) print(device) else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info( "device: {}, n_gpu: {}, distributed training: {}, 16-bits training: {}" .format(device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError( "Invalid gradient_accumulation_steps parameter: {}, should be >= 1" .format(args.gradient_accumulation_steps)) #args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_eval: raise ValueError( "At least one of `do_train` or `do_eval` must be True.") ''' if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train: raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) ''' os.makedirs(args.output_dir, exist_ok=True) tokenizer = RobertaTokenizer.from_pretrained(args.pretrain_model) train_examples = None num_train_steps = None aspect_list = None sentiment_list = None processor = processors() num_labels = num_labels #train_examples, aspect_list, sentiment_list = processor.get_train_examples(args.data_dir) filenames = os.listdir(args.output_dir) filenames = [x for x in filenames if "pytorch_model.bin_test_best" in x] print(filenames) file_mark = [] #model_performace_dev = dict() model_performace_test = dict() for x in filenames: ### #eval:0 #test:1 if args.choose_eval_test_both == 0: file_mark.append([x, True]) elif args.choose_eval_test_both == 1: file_mark.append([x, False]) else: file_mark.append([x, True]) file_mark.append([x, False]) #### #### train_examples, aspect_list, sentiment_list = processor.get_test_examples( args.data_dir) test_examples, _, _ = processor.get_test_examples(args.data_dir) #eval_examples, _, _ = processor.get_dev_examples(args.data_dir) if args.task == 1: num_labels = len(aspect_list) elif args.task == 2: num_labels = len(sentiment_list) else: print("What's task?") exit() test = convert_examples_to_features(test_examples, aspect_list, sentiment_list, args.max_seq_length, tokenizer, args.task) #dev = convert_examples_to_features( #eval_examples, aspect_list, sentiment_list, args.max_seq_length, tokenizer, args.task) ### for x, mark in file_mark: #mark: eval-True; test-False #choose_eval_test_both: eval-0, test-1, both-2 if mark == True: #dev continue print(x, mark) output_model_file = os.path.join(args.output_dir, x) #model = RobertaForSequenceClassification.from_pretrained(args.pretrain_model, num_labels=num_labels, output_hidden_states=False, output_attentions=False, return_dict=True) model = RobertaForMaskedLMDomainTask.from_pretrained( args.pretrain_model, output_hidden_states=False, output_attentions=False, return_dict=True, num_labels=args.num_labels_task) model.load_state_dict(torch.load(output_model_file), strict=False) #strict False: ignore non-matching keys model.to(device) ####################################### param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] #no_decay = ['bias', 'LayerNorm.weight'] no_grad = [ 'bert.encoder.layer.11.output.dense_ent', 'bert.encoder.layer.11.output.LayerNorm_ent' ] param_optimizer = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_grad)] optimizer_grouped_parameters = [{ 'params': [ p for n, p in param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': args.weight_decay }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) #scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(t_total*0.1), num_training_steps=t_total) if args.fp16: try: from apex import amp except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) exit() model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) ####################################### #param_optimizer = [para[0] for para in model.named_parameters()] #param_optimizer = [para for para in model.named_parameters()][-2] #print(param_optimizer) if mark: eval_features = dev print(0) else: eval_features = test print(1) logger.info("***** Running evaluation *****") #logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Num examples = %d", len(eval_features)) logger.info(" Batch size = %d", args.eval_batch_size) all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long) all_attention_mask = torch.tensor( [f.attention_mask for f in eval_features], dtype=torch.long) if args.task == 1: print("Excuting the task 1") elif args.task == 2: all_segment_ids = torch.tensor( [f.segment_ids for f in eval_features], dtype=torch.long) else: print("Wrong here2") all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long) all_aspect_ids = torch.tensor([f.aspect_id for f in eval_features], dtype=torch.long) if args.task == 1: eval_data = TensorDataset(all_input_ids, all_attention_mask, all_label_ids, all_aspect_ids) elif args.task == 2: eval_data = TensorDataset(all_input_ids, all_attention_mask, all_segment_ids, all_label_ids, all_aspect_ids) else: print("Wrong here1") if args.local_rank == -1: eval_sampler = RandomSampler(eval_data) else: eval_sampler = DistributedSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) if mark: output_eval_file = os.path.join( args.output_dir, "eval_results_{}.txt".format(x.split("_")[-1])) output_file_pred = os.path.join( args.output_dir, "eval_pred_{}.txt".format(x.split("_")[-1])) output_file_glod = os.path.join( args.output_dir, "eval_gold_{}.txt".format(x.split("_")[-1])) else: output_eval_file = os.path.join( args.output_dir, "test_results_{}.txt".format(x.split("_")[-1])) output_file_pred = os.path.join( args.output_dir, "test_pred_{}.txt".format(x.split("_")[-1])) output_file_glod = os.path.join( args.output_dir, "test_gold_{}.txt".format(x.split("_")[-1])) fpred = open(output_file_pred, "w") fgold = open(output_file_glod, "w") model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 sentiment_map = sorted(list(set(sentiment_list))) aspect_map = sorted(list(set(aspect_list))) sentiment_map = {label: i for i, label in enumerate(sentiment_map)} aspect_map = {label: i for i, label in enumerate(aspect_map)} print(sentiment_map) print(aspect_map) #exit() #data_dict = {'laptop':{'negative':[],'neutral':[],'positive':[]},'restaurant':{'negative':[],'neutral':[],'positive':[]}} #aspect, sentiment, tensor all_aspect_list = list() all_sentiment_list = list() all_tensor_list = list() restaurant_aspect_list = list() restaurant_sentiment_list = list() restaurant_tensor_list = list() laptop_aspect_list = list() laptop_sentiment_list = list() laptop_tensor_list = list() for step, batch in enumerate(tqdm(eval_dataloader, desc="Iteration")): #batch = tuple(t.to(device) if i != 3 else t for i, t in enumerate(batch)) batch = tuple(t.to(device) for i, t in enumerate(batch)) if args.task == 1: input_ids, attention_mask, label_ids, aspect_ids = batch elif args.task == 2: input_ids, attention_mask, segment_ids, label_ids, aspect_ids = batch else: print("Wrong here3") if args.task == 1: #loss, logits, hidden_states, attentions ''' output = model(input_ids=input_ids, token_type_ids=None, attention_mask=attention_mask, labels=label_ids) logits = output.logits tmp_eval_loss = output.loss ''' # #tmp_eval_loss, logits = model(input_ids_org=input_ids, sentence_label=label_ids, attention_mask=attention_mask, func="task_class") with torch.no_grad(): rep_domain, rep_task = model(input_ids_org=input_ids, sentence_label=label_ids, attention_mask=attention_mask, func="in_domain_task_rep") #logits = output.logits #tmp_eval_loss = output.loss elif args.task == 2: #loss, logits, hidden_states, attentions ''' output = model(input_ids=input_ids, token_type_ids=None, attention_mask=attention_mask, labels=label_ids) logits = output.logits tmp_eval_loss = output.loss ''' # with torch.no_grad(): rep_domain, rep_task = model(input_ids_org=input_ids, sentence_label=label_ids, attention_mask=attention_mask, func="in_domain_task_rep") else: print("Wrong!!") #print(rep_domain.shape) #print(rep_task.shape) rep = torch.cat([rep_task, rep_domain], -1).to("cpu") #print(rep.shape) #label_ids:{'negative': 0, 'neutral': 1, 'positive': 2} #aspect_ids:{'laptop': 0, 'restaurant': 1} #sentiment_map={"laptop_negative":1,"laptop_neutral":3,"laptop_positive":5, "restaurant_negative":0,"restaurant_neutral":2,"restaurant_positive":4} sentiment_map = { "l_neg": 1, "l_ne": 3, "l_pos": 5, "Negative": 0, "Neutral": 2, "Postive": 4 } #sentiment_map={"laptop_negative":0,"laptop_positive":2, "restaurant_negative":1,"restaurant_positive":3} for index, tensor in enumerate(rep): #aspect, sentiment, tensor #if label_ids[index] == 1: #netural # continue if aspect_ids[index] == 0: if label_ids[index] == 0: #data_dict['laptop']['negative'].append(tensor) laptop_sentiment_list.append(torch.tensor(1)) all_sentiment_list.append(torch.tensor(1)) elif label_ids[index] == 1: #data_dict['laptop']['neutral'].append(tensor) laptop_sentiment_list.append(torch.tensor(3)) all_sentiment_list.append(torch.tensor(3)) elif label_ids[index] == 2: #data_dict['laptop']['positive'].append(tensor) laptop_sentiment_list.append(torch.tensor(5)) all_sentiment_list.append(torch.tensor(5)) laptop_aspect_list.append(aspect_ids[index]) #laptop_sentiment_list.append(label_ids[index]) laptop_tensor_list.append(tensor) else: if label_ids[index] == 0: #data_dict['restaurant']['negative'].append(tensor) restaurant_sentiment_list.append(torch.tensor(0)) all_sentiment_list.append(torch.tensor(0)) elif label_ids[index] == 1: #data_dict['restaurant']['neutral'].append(tensor) restaurant_sentiment_list.append(torch.tensor(2)) all_sentiment_list.append(torch.tensor(2)) elif label_ids[index] == 2: #data_dict['restaurant']['positive'].append(tensor) restaurant_sentiment_list.append(torch.tensor(4)) all_sentiment_list.append(torch.tensor(4)) restaurant_aspect_list.append(aspect_ids[index]) #restaurant_sentiment_list.append(label_ids[index]) restaurant_tensor_list.append(tensor) all_aspect_list.append(aspect_ids[index]) #all_sentiment_list.append(label_ids[index]) all_tensor_list.append(tensor) ######### laptop_aspect_list = torch.stack(laptop_aspect_list).to("cpu").numpy() laptop_sentiment_list = torch.stack(laptop_sentiment_list).to( "cpu").numpy() laptop_tensor_list = torch.stack(laptop_tensor_list).to("cpu").numpy() restaurant_aspect_list = torch.stack(restaurant_aspect_list).to( "cpu").numpy() restaurant_sentiment_list = torch.stack(restaurant_sentiment_list).to( "cpu").numpy() restaurant_tensor_list = torch.stack(restaurant_tensor_list).to( "cpu").numpy() all_aspect_list = torch.stack(all_aspect_list).to("cpu").numpy() all_sentiment_list = torch.stack(all_sentiment_list).to("cpu").numpy() all_tensor_list = torch.stack(all_tensor_list).to("cpu").numpy() ######### ######### print(laptop_aspect_list.shape) print(laptop_sentiment_list.shape) print(laptop_tensor_list.shape) print("===") print(restaurant_aspect_list.shape) print(restaurant_sentiment_list.shape) print(restaurant_tensor_list.shape) print("===") print(all_aspect_list.shape) #print(all_sentiment_list) print(all_sentiment_list.shape) print(all_tensor_list.shape) print("===") ######### #with open(args.output_dir+".json", "w") as outfile: # json.dump(data_dict, outfile) #####Start to draw######## #emb = TSNE(n_components=2, perplexity=15, learning_rate=10).fit_transform(all_tensor_list) #print(emb.shape) ''' = TSNE(n_components=2, perplexity=15, learning_rate=10).fit_transform(X) = TSNE(n_components=2, perplexity=15, learning_rate=10).fit_transform(X) = TSNE(n_components=2, perplexity=15, learning_rate=10).fit_transform(X) = TSNE(n_components=2, perplexity=15, learning_rate=10).fit_transform(X) = TSNE(n_components=2, perplexity=15, learning_rate=10).fit_transform(X) = TSNE(n_components=2, perplexity=15, learning_rate=10).fit_transform(X) ''' #tsne = TSNE(perplexity=30,metric="euclidean",callbacks=ErrorLogger(),n_jobs=64,random_state=42) ''' tsne = TSNE( perplexity=30, n_iter=50, metric="euclidean", callbacks=ErrorLogger(), n_jobs=64, random_state=42, ) embedding_train = tsne.fit(all_tensor_list) ''' #plot(all_tensor_list, all_sentiment_list) #cosine #perplexity #400-->1200 #64 tsne = TSNE( perplexity=64, n_iter=1200, metric="euclidean", callbacks=ErrorLogger(), n_jobs=64, random_state=42, learning_rate='auto', initialization='pca', n_components=2, ) ### #embedding_train = tsne.fit(all_tensor_list) #utils_.plot(x=embedding_train, y=all_aspect_list, colors=utils_.MOUSE_10X_COLORS, label_map=aspect_map) #utils_.plot(x=embedding_train, y=all_sentiment_list, colors=utils_.MOUSE_10X_COLORS, label_map=sentiment_map) ### ### embedding_train = tsne.fit(restaurant_tensor_list) utils_.plot(x=embedding_train, y=restaurant_sentiment_list, colors=utils_.MOUSE_10X_COLORS, label_map=sentiment_map) ### ### #embedding_train = tsne.fit(laptop_tensor_list) #utils_.plot(x=embedding_train, y=laptop_sentiment_list, colors=utils_.MOUSE_10X_COLORS, label_map=sentiment_map) ### #plt.savefig(args.output_dir+'.pdf') plt.title("Semi-supervised contrastive learning") #plt.title("Fine-tune (Standard)") #plt.title("Fine-tune (Few-shot)") #plt.title("Supervised contrastive learning") #plt.title("Common fine-tuning") plt.savefig('output.pdf')