def freeze_BERT_parameters(model: BertForSequenceClassification, verbose: bool = True) -> None: # https://github.com/huggingface/transformers/issues/400 if not isinstance(model, BertForSequenceClassification): raise TypeError # Table 3 in https://arxiv.org/pdf/1911.03090.pdf params_to_freeze = [ "bert.embeddings.", "bert.encoder.layer.0.", "bert.encoder.layer.1.", "bert.encoder.layer.2.", "bert.encoder.layer.3.", "bert.encoder.layer.4.", "bert.encoder.layer.5.", "bert.encoder.layer.6.", "bert.encoder.layer.7.", "bert.encoder.layer.8.", "bert.encoder.layer.9.", ] for name, param in model.named_parameters(): # if "classifier" not in name: # classifier layer # param.requires_grad = False if any(pfreeze in name for pfreeze in params_to_freeze): param.requires_grad = False if verbose is True: num_trainable_params = sum([ p.numel() for n, p in model.named_parameters() if p.requires_grad ]) trainable_param_names = [ n for n, p in model.named_parameters() if p.requires_grad ] print(f"Params Trainable: {num_trainable_params}\n\t" + f"\n\t".join(trainable_param_names))
def get_model(): if args.model == 'trans': transformer_config = BertConfig.from_pretrained('bert-base-uncased', num_labels=args.labels) if args.init_only: model = BertForSequenceClassification( config=transformer_config).to(device) else: model = BertForSequenceClassification.from_pretrained( 'bert-base-uncased', config=transformer_config).to(device) param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [ p for n, p in param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr) es = EarlyStopping(patience=args.patience, percentage=False, mode='max', min_delta=0.0) scheduler = get_constant_schedule_with_warmup(optimizer, num_warmup_steps=0.05) else: if args.model == 'cnn': model = CNN_MODEL(tokenizer, args, n_labels=args.labels).to(device) elif args.model == 'lstm': model = LSTM_MODEL(tokenizer, args, n_labels=args.labels).to(device) optimizer = AdamW(model.parameters(), lr=args.lr) scheduler = ReduceLROnPlateau(optimizer, verbose=True) es = EarlyStopping(patience=args.patience, percentage=False, mode='max', min_delta=0.0) return model, optimizer, scheduler, es
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') transformer_config = BertConfig.from_pretrained('bert-base-uncased', num_labels=args.labels) model = BertForSequenceClassification(transformer_config).to(device) else: tokenizer = RobertaTokenizer.from_pretrained('roberta-base') transformer_config = RobertaConfig.from_pretrained('roberta-base', num_labels=args.labels) # , use_bfloat16=True model = RobertaForSequenceClassification.from_pretrained('roberta-base', config=transformer_config).to( device) collate_fn = partial(collate_fever, tokenizer=tokenizer, device=device) print(args, flush=True) param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr) if args.mode == 'test':
from transformers import BertTokenizer tokenizer = BertTokenizer.from_pretrained('bert-base-chinese') model.resize_token_embeddings(len(tokenizer)) model.config.pad_token_id = model.config.eos_token_id model.config.max_position_embeddings = 1024 model.to(device) model.train() model.to(device) import pdb pdb.set_trace() from transformers import AdamW optimizer = AdamW(model.parameters(), lr=1e-5) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], 'weight_decay': 0.01 }, { 'params': [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0 }] epochs = 400 optimizer = AdamW(optimizer_grouped_parameters, lr=1e-5) from transformers import get_linear_schedule_with_warmup
def train_process(config, train_load, train_sampler, model_name): # load source bert weights model_config = BertConfig.from_pretrained( pretrained_model_name_or_path="../user_data/bert_source/{}_config.json" .format(model_name)) # model_config = BertConfig() model_config.vocab_size = len( pd.read_csv('../user_data/vocab', names=["score"])) model = BertForSequenceClassification(config=model_config) checkpoint = torch.load( '../user_data/save_bert/{}_checkpoint.pth.tar'.format(model_name), map_location=torch.device('cpu')) model.load_state_dict(checkpoint['status'], strict=False) print('***********load pretrained mlm {} weight*************'.format( model_name)) for param in model.parameters(): param.requires_grad = True # 4) 封装之前要把模型移到对应的gpu model = model.to(config.device) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": config.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.0 }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=config.learning_rate) # t_total = len(train_load) * config.num_train_epochs # scheduler = get_linear_schedule_with_warmup( # optimizer, num_warmup_steps=t_total * config.warmup_proportion, num_training_steps=t_total # ) cudnn.benchmark = True if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") # 5)封装 model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[config.local_rank]) model.train() if config.fgm: fgm = FGM(model) for epoch in range(config.num_train_epochs): train_sampler.set_epoch(epoch) torch.cuda.empty_cache() for batch, (input_ids, token_type_ids, attention_mask, label) in enumerate(train_load): input_ids = input_ids.cuda(config.local_rank, non_blocking=True) attention_mask = attention_mask.cuda(config.local_rank, non_blocking=True) token_type_ids = token_type_ids.cuda(config.local_rank, non_blocking=True) label = label.cuda(config.local_rank, non_blocking=True) outputs = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=label) loss = outputs.loss model.zero_grad() loss.backward() # torch.nn.utils.clip_grad_norm_(model.parameters(), config.max_grad_norm) if config.fgm: fgm.attack() # 在embedding上添加对抗扰动 loss_adv = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=label).loss loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度 fgm.restore() # 恢复embedding参数 optimizer.step() # scheduler.step() # dev_auc = model_evaluate(config, model, valid_load) # 同步各个进程的速度,计算分布式loss torch.distributed.barrier() # reduce_dev_auc = reduce_auc(dev_auc, config.nprocs).item() # if reduce_dev_auc > best_dev_auc: # best_dev_auc = reduce_dev_auc # is_best = True now = strftime("%Y-%m-%d %H:%M:%S", localtime()) msg = 'model_name:{},time:{},epoch:{}/{}' if config.local_rank in [0, -1]: print( msg.format(model_name, now, epoch + 1, config.num_train_epochs)) checkpoint = {"status": model.module.state_dict()} torch.save( checkpoint, '../user_data/save_model' + os.sep + '{}_checkpoint.pth.tar'.format(model_name)) del checkpoint torch.distributed.barrier()
class Classifier: """The Classifier""" ############################################# def __init__(self, train_batch_size=16, eval_batch_size=8, max_length=128, lr=2e-5, eps=1e-6, n_epochs=11): """ :param train_batch_size: (int) Training batch size :param eval_batch_size: (int) Batch size while using the `predict` method. :param max_length: (int) Maximum length for padding :param lr: (float) Learning rate :param eps: (float) Adam optimizer epsilon parameter :param n_epochs: (int) Number of epochs to train """ # model parameters self.train_batch_size = train_batch_size self.eval_batch_size = eval_batch_size self.max_length = max_length self.lr = lr self.eps = eps self.n_epochs = n_epochs # Information to be set or updated later self.trainset = None self.categories = None self.labels = None self.model = None # Tokenizer self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # The model # # We first need to specify some configurations to the model configs = BertConfig.from_pretrained( 'bert-base-uncased', num_labels=3, type_vocab_size=8) # BERT configuration self.model = BertForSequenceClassification(configs) # We are changing the header classifier of the model (Which is initially a simple fully connect layer layer) clf = Net() self.model.classifier = clf self.model.to( device ) # putting the model on GPU if available otherwise device is CPU def preprocess(self, sentences): """ The preprocessing function :param sentences: List of all sentences to be given at once. :return: List of preprocessed sentences. """ preprocessed = [] for sentence in tqdm(sentences): assert isinstance(sentence, str) doc = nlp(str(sentence)) tokens = [] for token in doc: if (not token.is_punct) or (token.text not in [ ',', '-', '.', "'", '!' ]): # Some punctuations can be interesting for BERT tokens.append(token.text) tokens = (' '.join(tokens)).lower().replace(" '", "'") preprocessed.append(tokens) return preprocessed def question(self, category): """ Computes the questions corresponding to each category :param category: (str) The category/aspect :return: (str) computed question using the QA-M task """ assert category in self.categories if category == 'AMBIENCE#GENERAL': return "what do you think of the ambience of it ?" elif category == 'DRINKS#PRICES' or category == 'FOOD#PRICES' or category == 'RESTAURANT#PRICES': return "what do you think of the price of it ?" elif category == 'DRINKS#QUALITY' or category == 'FOOD#QUALITY': return "what do you think of the quality of it ?" elif category == 'DRINKS#STYLE_OPTIONS': return "what do you think of drinks ?" elif category == 'FOOD#STYLE_OPTIONS': return "what do you think of the food ?" elif category == 'LOCATION#GENERAL': return "what do you think of the location of it ?" elif category == 'RESTAURANT#GENERAL' or category == 'RESTAURANT#MISCELLANEOUS': return "what do you think of the restaurant ?" elif category == 'SERVICE#GENERAL': return "what do you think of the service of it ?" def train(self, trainfile): """Trains the classifier model on the training set stored in file trainfile""" # Loading the data and splitting up its information in lists print("\n Loading training data...") trainset = np.genfromtxt(trainfile, delimiter='\t', dtype=str, comments=None) self.trainset = trainset n = len(trainset) targets = trainset[:, 0] categories = trainset[:, 1] self.labels = list(Counter(targets).keys()) # label names self.categories = list(Counter(categories).keys()) # category names start_end = [[int(x) for x in w.split(':')] for w in trainset[:, 3]] # target words words_of_interest = [ trainset[:, 4][i][start_end[i][0]:start_end[i][1]] for i in range(n) ] # sentences to be classified sentences = [str(s) for s in trainset[:, 4]] # Preprocessing the text data print(" Preprocessing the text data...") sentences = self.preprocess(sentences) # Computing question sequences print(" Computing questions...") questions = [self.question(categories[i]) for i in tqdm(range(n))] # Tokenization attention_masks = [] input_ids = [] token_type_ids = [] labels = [] for word, question, answer in zip(words_of_interest, questions, sentences): encoded_dict = self.tokenizer.encode_plus( answer, question + ' ' + word.lower(), add_special_tokens=True, # Add '[CLS]' and '[SEP]' tokens max_length=self.max_length, # Pad & truncate all sequences pad_to_max_length=True, return_attention_mask=True, # Construct attention masks return_tensors='pt', # Return pytorch tensors. ) attention_masks.append(encoded_dict['attention_mask']) input_ids.append(encoded_dict['input_ids']) token_type_ids.append(encoded_dict['token_type_ids']) attention_masks = torch.cat(attention_masks, dim=0) input_ids = torch.cat(input_ids, dim=0) token_type_ids = torch.cat(token_type_ids, dim=0) # Converting polarities into integers (0: positive, 1: negative, 2: neutral) for target in targets: if target == 'positive': labels.append(0) elif target == 'negative': labels.append(1) elif target == 'neutral': labels.append(2) labels = torch.tensor(labels) # Pytorch data iterators train_data = TensorDataset(input_ids, attention_masks, token_type_ids, labels) train_sampler = RandomSampler(train_data) train_dataloader = DataLoader(train_data, batch_size=self.train_batch_size, sampler=train_sampler) # Optimizer and scheduler (we are using a linear scheduler without warm up) no_decay = ['bias', 'gamma', 'beta'] # These parameters are not going to be decreased optimizer_parameters = [{ 'params': [ p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay) ], 'weight_decay': 0.01 }, { 'params': [ p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0 }] optimizer = AdamW(optimizer_parameters, lr=self.lr, eps=self.eps) total_steps = len(train_dataloader) * self.n_epochs scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=0, num_training_steps=total_steps) # Training initial_t0 = time.time() for epoch in range(self.n_epochs): print('\n ======== Epoch %d / %d ========' % (epoch + 1, self.n_epochs)) print(' Training...\n') t0 = time.time() total_train_loss = 0 self.model.train() for step, batch in enumerate(train_dataloader): batch = tuple(t.to(device) for t in batch) input_ids_, input_mask_, segment_ids_, label_ids_ = batch self.model.zero_grad() loss, _ = self.model(input_ids_, token_type_ids=segment_ids_, attention_mask=input_mask_, labels=label_ids_) total_train_loss += loss.item() loss.backward() # clip gradient norm torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0) optimizer.step() scheduler.step() avg_train_loss = total_train_loss / len(train_dataloader) training_time = format_time(time.time() - t0) # print(" Average training loss: {0:.2f}".format(avg_train_loss)) print(" Training epoch duration: {:}".format(training_time)) print(" Total training time: {:}".format( format_time(time.time() - initial_t0))) def predict(self, datafile): """Predicts class labels for the input instances in file 'datafile' Returns the list of predicted labels """ # Loading the data and splitting up its information in lists evalset = np.genfromtxt(datafile, delimiter='\t', dtype=str, comments=None) m = len(evalset) categories = evalset[:, 1] start_end = [[int(x) for x in w.split(':')] for w in evalset[:, 3]] # target words words_of_interest = [ evalset[:, 4][i][start_end[i][0]:start_end[i][1]] for i in range(m) ] # sentences to be classified sentences = [str(s) for s in evalset[:, 4]] # Preprocessing the text data print("\n Preprocessing the text data...") sentences = self.preprocess(sentences) # Computing question sequences print(" Computing questions...") questions = [self.question(categories[i]) for i in tqdm(range(m))] # Tokenization attention_masks = [] input_ids = [] token_type_ids = [] for word, question, answer in zip(words_of_interest, questions, sentences): encoded_dict = self.tokenizer.encode_plus( answer, question + ' ' + word.lower(), add_special_tokens=True, # Add '[CLS]' and '[SEP]' max_length=self.max_length, # Pad & truncate all sequences pad_to_max_length=True, return_attention_mask=True, # Construct attention masks return_tensors='pt', # Return pytorch tensors. ) attention_masks.append(encoded_dict['attention_mask']) input_ids.append(encoded_dict['input_ids']) token_type_ids.append(encoded_dict['token_type_ids']) attention_masks = torch.cat(attention_masks, dim=0) input_ids = torch.cat(input_ids, dim=0) token_type_ids = torch.cat(token_type_ids, dim=0) # Pytorch data iterators eval_data = TensorDataset(input_ids, attention_masks, token_type_ids) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, batch_size=self.eval_batch_size, sampler=eval_sampler) # Prediction named_labels = [] self.model.eval() for batch in eval_dataloader: batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids = batch with torch.no_grad(): logits = self.model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)[0] logits = softmax(logits, dim=-1) logits = logits.detach().cpu().numpy() outputs = np.argmax(logits, axis=1) # converting integer labels into named labels for label in outputs: if label == 0: named_labels.append('positive') elif label == 1: named_labels.append('negative') elif label == 2: named_labels.append('neutral') return np.array(named_labels)
class bert_classifier(object): def __init__(self): self.config = Config() self.device_setup() self.model_setup() def device_setup(self): """ 设备配置并加载BERT模型 :return: """ # 使用GPU,通过model.to(device)的方式使用 self.device = torch.device( "cuda:0" if torch.cuda.is_available() else "cpu") model_save_path = self.config.get("result", "model_save_path") config_save_path = self.config.get("result", "config_save_path") vocab_save_path = self.config.get("result", "vocab_save_path") self.model_config = BertConfig.from_json_file(config_save_path) self.model = BertForSequenceClassification(self.model_config) self.state_dict = torch.load(model_save_path) self.model.load_state_dict(self.state_dict) self.tokenizer = transformers.BertTokenizer(vocab_save_path) self.model.to(self.device) self.model.eval() def model_setup(self): weight_decay = self.config.get("training_rule", "weight_decay") learning_rate = self.config.get("training_rule", "learning_rate") # 定义优化器和损失函数 # Prepare optimizer and schedule (linear warmup and decay) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [ p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay) ], 'weight_decay': weight_decay }, { 'params': [ p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0 }] self.optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate) self.criterion = nn.CrossEntropyLoss() def predict(self, sentence): input_ids, token_type_ids = convert_text_to_ids( self.tokenizer, sentence) input_ids = seq_padding(self.tokenizer, [input_ids]) token_type_ids = seq_padding(self.tokenizer, [token_type_ids]) # 需要 LongTensor input_ids, token_type_ids = input_ids.long(), token_type_ids.long() # 梯度清零 self.optimizer.zero_grad() # 迁移到GPU input_ids, token_type_ids = input_ids.to( self.device), token_type_ids.to(self.device) output = self.model(input_ids=input_ids, token_type_ids=token_type_ids) y_pred_prob = output[0] y_pred_label = y_pred_prob.argmax(dim=1) print(y_pred_label)
def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--data_dir", default='/hdd/lujunyu/dataset/multi_turn_corpus/ubuntu/', type=str, required=False, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--task_name", default='ubuntu', type=str, required=False, help="The name of the task to train.") parser.add_argument("--output_dir", default='/hdd/lujunyu/model/chatbert/ubuntu_without_pretraining/', type=str, required=False, help="The output directory where the model checkpoints will be written.") ## Other parameters parser.add_argument("--init_model_name", default='bert-base-uncased', type=str, help="Initial checkpoint (usually from a pre-trained BERT model).") parser.add_argument("--do_lower_case", default=True, action='store_true', help="Whether to lower case the input text. True for uncased models, False for cased models.") parser.add_argument("--data_augmentation", default=False, action='store_true', help="Whether to use augmentation") parser.add_argument("--max_seq_length", default=256, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", default=True, action='store_true', help="Whether to run training.") parser.add_argument("--do_test", default=True, action='store_true', help="Whether to run eval on the test set.") parser.add_argument("--train_batch_size", default=500, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=500, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=3e-3, type=float, help="The initial learning rate for Adam.") parser.add_argument("--num_train_epochs", default=10.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--warmup_steps", default=0.0, type=float, help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--weight_decay", default=1e-3, type=float, help="weight_decay") parser.add_argument("--save_checkpoints_steps", default=8000, type=int, help="How often to save the model checkpoint.") parser.add_argument("--no_cuda", default=False, action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=20, help="Number of updates steps to accumualte before performing a backward/update pass.") args = parser.parse_args() if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.distributed.init_process_group(backend='nccl') logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1)) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") bert_config = BertConfig.from_pretrained(args.init_model_name, num_labels=2) if args.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length {} because the BERT model was only trained up to sequence length {}".format( args.max_seq_length, bert_config.max_position_embeddings)) if os.path.exists(args.output_dir) and os.listdir(args.output_dir): if args.do_train: raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) else: os.makedirs(args.output_dir, exist_ok=True) tokenizer = BertTokenizer.from_pretrained(args.init_model_name, do_lower_case=args.do_lower_case) if args.data_augmentation: train_dataset = UbuntuDatasetForSP( file_path=os.path.join(args.data_dir, "train_augment_3.txt"), max_seq_length=args.max_seq_length, tokenizer=tokenizer ) else: train_dataset = UbuntuDatasetForSP( file_path=os.path.join(args.data_dir, "train.txt"), max_seq_length=args.max_seq_length, tokenizer=tokenizer ) eval_dataset = UbuntuDatasetForSP( file_path=os.path.join(args.data_dir, "valid.txt"), max_seq_length=args.max_seq_length, tokenizer=tokenizer ) train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, sampler=RandomSampler(train_dataset), num_workers=4) eval_dataloader = torch.utils.data.DataLoader(eval_dataset, batch_size=args.eval_batch_size, sampler=SequentialSampler(eval_dataset), num_workers=4) model = BertForSequenceClassification(config=bert_config) model.to(device) num_train_steps = None if args.do_train: num_train_steps = int( len(train_dataset) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs) # Prepare optimizer param_optimizer = list(model.named_parameters()) # remove pooler, which is not used thus it produce None grad that break apex param_optimizer = [n for n in param_optimizer] no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay}, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=num_train_steps) else: optimizer = None scheduler = None if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank) elif n_gpu > 1: model = torch.nn.DataParallel(model) global_step = 0 best_metric = 0.0 if args.do_train: logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_steps) model.train() for _ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")): batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids, label_ids = batch loss, _ = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=label_ids) if n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu. if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps loss.backward() tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() # We have accumulated enought gradients scheduler.step() model.zero_grad() global_step += 1 if step % args.save_checkpoints_steps == 0: model.eval() f = open(os.path.join(args.output_dir, 'logits_dev.txt'), 'w') eval_loss = 0 nb_eval_steps, nb_eval_examples = 0, 0 logits_all = [] for input_ids, input_mask, segment_ids, label_ids in eval_dataloader: input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) label_ids = label_ids.to(device) with torch.no_grad(): tmp_eval_loss, logits = model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask, labels=label_ids) logits = logits.detach().cpu().numpy() logits_all.append(logits) label_ids = label_ids.cpu().numpy() for logit, label in zip(logits, label_ids): logit = '{},{}'.format(logit[0], logit[1]) f.write('_\t{}\t{}\n'.format(logit, label)) eval_loss += tmp_eval_loss.mean().item() nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 f.close() logits_all = np.concatenate(logits_all,axis=0) eval_loss = eval_loss / nb_eval_steps result = evaluate(os.path.join(args.output_dir, 'logits_dev.txt')) result.update({'eval_loss': eval_loss}) output_eval_file = os.path.join(args.output_dir, "eval_results_dev.txt") with open(output_eval_file, "a") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) ### Save the best checkpoint if best_metric < result['R10@1'] + result['R10@2']: try: ### Remove 'module' prefix when using DataParallel state_dict = model.module.state_dict() except AttributeError: state_dict = model.state_dict() torch.save(state_dict, os.path.join(args.output_dir, "model.pt")) best_metric = result['R10@1'] + result['R10@2'] logger.info('Saving the best model in {}'.format(os.path.join(args.output_dir, "model.pt"))) ### visualize bad cases of the best model # logger.info('Saving Bad cases...') # visualize_bad_cases( # logits=logits_all, # input_file_path=os.path.join(args.data_dir, 'valid.txt'), # output_file_path=os.path.join(args.output_dir, 'valid_bad_cases.txt') # ) model.train()
sampler=SequentialSampler(test_dataset), # Pull out batches sequentially. batch_size=batch_size # Evaluate with this batch size. ) model = BertForSequenceClassification(config=rob_config) model.cuda() if torch.cuda.is_available(): device = torch.device("cuda") print('There are %d GPU(s) available.' % torch.cuda.device_count()) print('We will use the GPU:', torch.cuda.get_device_name(0)) else: print('No GPU available, using the CPU instead.') device = torch.device("cpu") params = list(model.named_parameters()) print('The model has {:} different named parameters.\n'.format(len(params))) print('==== Embedding Layer ====\n') for p in params[0:5]: print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size())))) print('\n==== First Transformer ====\n') for p in params[5:21]: print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size())))) print('\n==== Output Layer ====\n') for p in params[-4:]: print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size())))) optimizer = AdamW(model.parameters(), lr=2e-5, eps=1e-8)