def train(model: BertForSequenceClassification, num_epochs: int, num_training_steps: int, train_dataloader: DataLoader, device: torch.device) -> List[torch.Tensor]: optimizer = AdamW(model.parameters(), lr=5e-5) lr_scheduler = get_scheduler('linear', optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps) model.train() losses = [] for _ in range(num_epochs): for batch in train_dataloader: batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss loss.backward() losses.append(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() if 'lazy' in str(model.device): print("Calling Mark Step") torch._lazy.mark_step() return losses
def get_model(): if args.model == 'trans': transformer_config = BertConfig.from_pretrained('bert-base-uncased', num_labels=args.labels) if args.init_only: model = BertForSequenceClassification( config=transformer_config).to(device) else: model = BertForSequenceClassification.from_pretrained( 'bert-base-uncased', config=transformer_config).to(device) param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [ p for n, p in param_optimizer if not any(nd in n for nd in no_decay) ], 'weight_decay': 0.01 }, { 'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0 }] optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr) es = EarlyStopping(patience=args.patience, percentage=False, mode='max', min_delta=0.0) scheduler = get_constant_schedule_with_warmup(optimizer, num_warmup_steps=0.05) else: if args.model == 'cnn': model = CNN_MODEL(tokenizer, args, n_labels=args.labels).to(device) elif args.model == 'lstm': model = LSTM_MODEL(tokenizer, args, n_labels=args.labels).to(device) optimizer = AdamW(model.parameters(), lr=args.lr) scheduler = ReduceLROnPlateau(optimizer, verbose=True) es = EarlyStopping(patience=args.patience, percentage=False, mode='max', min_delta=0.0) return model, optimizer, scheduler, es
test_dataloader = DataLoader(test_dataset, batch_size=11, shuffle=True) model_config = BertConfig.from_pretrained('bert-base-chinese') model_config.num_hidden_layers = 3 model = BertForSequenceClassification(model_config) from transformers import BertTokenizer tokenizer = BertTokenizer.from_pretrained('bert-base-chinese') model.resize_token_embeddings(len(tokenizer)) model.config.pad_token_id = model.config.eos_token_id model.config.max_position_embeddings = 1024 model.to(device) model.train() model.to(device) import pdb pdb.set_trace() from transformers import AdamW optimizer = AdamW(model.parameters(), lr=1e-5) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{ 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], 'weight_decay': 0.01 }, { 'params': [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0
def train_model(device: torch.device, model: BertForSequenceClassification, settings: Settings, train_dataloader: DataLoader): settings.write_debug('Starting train hate speech model') optimizer = get_optimizer(settings.get_optimizer_name(), model.parameters(), settings) # Number of training epochs (authors recommend between 2 and 4) epochs = settings.get_num_training_epochs() # Total number of training steps is number of batches * number of epochs. total_steps = len(train_dataloader) * epochs # Create the learning rate scheduler. scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=settings.get_num_warmup_steps( ), # Default value in run_glue.py num_training_steps=total_steps) # This training code is based on the `run_glue.py` script here: # https://github.com/huggingface/transformers/blob/5bfcd0485ece086ebcbed2d008813037968a9e58/examples/run_glue.py#L128 # Set the seed value all over the place to make this reproducible. random_seed = settings.get_random_seed() random.seed(random_seed) np.random.seed(random_seed) torch.manual_seed(random_seed) torch.cuda.manual_seed_all(random_seed) # Store the average loss after each epoch so we can plot them. loss_values = [] # For each epoch... for epoch_i in range(0, epochs): # ======================================== # Training # ======================================== # Perform one full pass over the training set. settings.write_debug("") settings.write_debug('======== Epoch {:} / {:} ========'.format( epoch_i + 1, epochs)) settings.write_debug('Training...') # Measure how long the training epoch takes. t0 = time.time() # Reset the total loss for this epoch. total_loss = 0 # Put the model into training mode. Don't be mislead--the call to # `train` just changes the *mode*, it doesn't *perform* the training. # `dropout` and `batchnorm` layers behave differently during training # vs. test (source: https://stackoverflow.com/questions/51433378/what-does-model-train-do-in-pytorch) model.train() # For each batch of training data... for step, batch in enumerate(train_dataloader): # Progress update every 40 batches. if step % 40 == 0 and not step == 0: # Calculate elapsed time in minutes. elapsed = format_time(time.time() - t0) # Report progress. settings.write_debug( ' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format( step, len(train_dataloader), elapsed)) # Unpack this training batch from our dataloader. # # As we unpack the batch, we'll also copy each tensor to the GPU using the # `to` method. # # `batch` contains three pytorch tensors: # [0]: input ids # [1]: attention masks # [2]: labels b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2].to(device) # Always clear any previously calculated gradients before performing a # backward pass. PyTorch doesn't do this automatically because # accumulating the gradients is "convenient while training RNNs". # (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch) model.zero_grad() # Perform a forward pass (evaluate the model on this training batch). # This will return the loss (rather than the model output) because we # have provided the `labels`. # The documentation for this `model` function is here: # https://huggingface.co/transformers/v2.2.0/model_doc/bert.html#transformers.BertForSequenceClassification outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels) # The call to `model` always returns a tuple, so we need to pull the # loss value out of the tuple. loss = outputs[0] # Accumulate the training loss over all of the batches so that we can # calculate the average loss at the end. `loss` is a Tensor containing a # single value; the `.item()` function just returns the Python value # from the tensor. total_loss += loss.item() # Perform a backward pass to calculate the gradients. loss.backward() # Clip the norm of the gradients to 1.0. # This is to help prevent the "exploding gradients" problem. torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) # Update parameters and take a step using the computed gradient. # The optimizer dictates the "update rule"--how the parameters are # modified based on their gradients, the learning rate, etc. optimizer.step() # Update the learning rate. scheduler.step() # Calculate the average loss over the training data. avg_train_loss = total_loss / len(train_dataloader) # Store the loss value for plotting the learning curve. loss_values.append(avg_train_loss) settings.write_debug("") settings.write_debug( " Average training loss: {0:.2f}".format(avg_train_loss)) settings.write_debug(" Training epcoh took: {:}".format( format_time(time.time() - t0))) settings.write_debug("") settings.write_debug("Training complete!") settings.write_debug('Finished train hate speech model')
def train_process(config, train_load, train_sampler, model_name): # load source bert weights model_config = BertConfig.from_pretrained( pretrained_model_name_or_path="../user_data/bert_source/{}_config.json" .format(model_name)) # model_config = BertConfig() model_config.vocab_size = len( pd.read_csv('../user_data/vocab', names=["score"])) model = BertForSequenceClassification(config=model_config) checkpoint = torch.load( '../user_data/save_bert/{}_checkpoint.pth.tar'.format(model_name), map_location=torch.device('cpu')) model.load_state_dict(checkpoint['status'], strict=False) print('***********load pretrained mlm {} weight*************'.format( model_name)) for param in model.parameters(): param.requires_grad = True # 4) 封装之前要把模型移到对应的gpu model = model.to(config.device) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": config.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.0 }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=config.learning_rate) # t_total = len(train_load) * config.num_train_epochs # scheduler = get_linear_schedule_with_warmup( # optimizer, num_warmup_steps=t_total * config.warmup_proportion, num_training_steps=t_total # ) cudnn.benchmark = True if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") # 5)封装 model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[config.local_rank]) model.train() if config.fgm: fgm = FGM(model) for epoch in range(config.num_train_epochs): train_sampler.set_epoch(epoch) torch.cuda.empty_cache() for batch, (input_ids, token_type_ids, attention_mask, label) in enumerate(train_load): input_ids = input_ids.cuda(config.local_rank, non_blocking=True) attention_mask = attention_mask.cuda(config.local_rank, non_blocking=True) token_type_ids = token_type_ids.cuda(config.local_rank, non_blocking=True) label = label.cuda(config.local_rank, non_blocking=True) outputs = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=label) loss = outputs.loss model.zero_grad() loss.backward() # torch.nn.utils.clip_grad_norm_(model.parameters(), config.max_grad_norm) if config.fgm: fgm.attack() # 在embedding上添加对抗扰动 loss_adv = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=label).loss loss_adv.backward() # 反向传播,并在正常的grad基础上,累加对抗训练的梯度 fgm.restore() # 恢复embedding参数 optimizer.step() # scheduler.step() # dev_auc = model_evaluate(config, model, valid_load) # 同步各个进程的速度,计算分布式loss torch.distributed.barrier() # reduce_dev_auc = reduce_auc(dev_auc, config.nprocs).item() # if reduce_dev_auc > best_dev_auc: # best_dev_auc = reduce_dev_auc # is_best = True now = strftime("%Y-%m-%d %H:%M:%S", localtime()) msg = 'model_name:{},time:{},epoch:{}/{}' if config.local_rank in [0, -1]: print( msg.format(model_name, now, epoch + 1, config.num_train_epochs)) checkpoint = {"status": model.module.state_dict()} torch.save( checkpoint, '../user_data/save_model' + os.sep + '{}_checkpoint.pth.tar'.format(model_name)) del checkpoint torch.distributed.barrier()
class Classifier: """The Classifier""" ############################################# def __init__(self, train_batch_size=16, eval_batch_size=8, max_length=128, lr=2e-5, eps=1e-6, n_epochs=11): """ :param train_batch_size: (int) Training batch size :param eval_batch_size: (int) Batch size while using the `predict` method. :param max_length: (int) Maximum length for padding :param lr: (float) Learning rate :param eps: (float) Adam optimizer epsilon parameter :param n_epochs: (int) Number of epochs to train """ # model parameters self.train_batch_size = train_batch_size self.eval_batch_size = eval_batch_size self.max_length = max_length self.lr = lr self.eps = eps self.n_epochs = n_epochs # Information to be set or updated later self.trainset = None self.categories = None self.labels = None self.model = None # Tokenizer self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # The model # # We first need to specify some configurations to the model configs = BertConfig.from_pretrained( 'bert-base-uncased', num_labels=3, type_vocab_size=8) # BERT configuration self.model = BertForSequenceClassification(configs) # We are changing the header classifier of the model (Which is initially a simple fully connect layer layer) clf = Net() self.model.classifier = clf self.model.to( device ) # putting the model on GPU if available otherwise device is CPU def preprocess(self, sentences): """ The preprocessing function :param sentences: List of all sentences to be given at once. :return: List of preprocessed sentences. """ preprocessed = [] for sentence in tqdm(sentences): assert isinstance(sentence, str) doc = nlp(str(sentence)) tokens = [] for token in doc: if (not token.is_punct) or (token.text not in [ ',', '-', '.', "'", '!' ]): # Some punctuations can be interesting for BERT tokens.append(token.text) tokens = (' '.join(tokens)).lower().replace(" '", "'") preprocessed.append(tokens) return preprocessed def question(self, category): """ Computes the questions corresponding to each category :param category: (str) The category/aspect :return: (str) computed question using the QA-M task """ assert category in self.categories if category == 'AMBIENCE#GENERAL': return "what do you think of the ambience of it ?" elif category == 'DRINKS#PRICES' or category == 'FOOD#PRICES' or category == 'RESTAURANT#PRICES': return "what do you think of the price of it ?" elif category == 'DRINKS#QUALITY' or category == 'FOOD#QUALITY': return "what do you think of the quality of it ?" elif category == 'DRINKS#STYLE_OPTIONS': return "what do you think of drinks ?" elif category == 'FOOD#STYLE_OPTIONS': return "what do you think of the food ?" elif category == 'LOCATION#GENERAL': return "what do you think of the location of it ?" elif category == 'RESTAURANT#GENERAL' or category == 'RESTAURANT#MISCELLANEOUS': return "what do you think of the restaurant ?" elif category == 'SERVICE#GENERAL': return "what do you think of the service of it ?" def train(self, trainfile): """Trains the classifier model on the training set stored in file trainfile""" # Loading the data and splitting up its information in lists print("\n Loading training data...") trainset = np.genfromtxt(trainfile, delimiter='\t', dtype=str, comments=None) self.trainset = trainset n = len(trainset) targets = trainset[:, 0] categories = trainset[:, 1] self.labels = list(Counter(targets).keys()) # label names self.categories = list(Counter(categories).keys()) # category names start_end = [[int(x) for x in w.split(':')] for w in trainset[:, 3]] # target words words_of_interest = [ trainset[:, 4][i][start_end[i][0]:start_end[i][1]] for i in range(n) ] # sentences to be classified sentences = [str(s) for s in trainset[:, 4]] # Preprocessing the text data print(" Preprocessing the text data...") sentences = self.preprocess(sentences) # Computing question sequences print(" Computing questions...") questions = [self.question(categories[i]) for i in tqdm(range(n))] # Tokenization attention_masks = [] input_ids = [] token_type_ids = [] labels = [] for word, question, answer in zip(words_of_interest, questions, sentences): encoded_dict = self.tokenizer.encode_plus( answer, question + ' ' + word.lower(), add_special_tokens=True, # Add '[CLS]' and '[SEP]' tokens max_length=self.max_length, # Pad & truncate all sequences pad_to_max_length=True, return_attention_mask=True, # Construct attention masks return_tensors='pt', # Return pytorch tensors. ) attention_masks.append(encoded_dict['attention_mask']) input_ids.append(encoded_dict['input_ids']) token_type_ids.append(encoded_dict['token_type_ids']) attention_masks = torch.cat(attention_masks, dim=0) input_ids = torch.cat(input_ids, dim=0) token_type_ids = torch.cat(token_type_ids, dim=0) # Converting polarities into integers (0: positive, 1: negative, 2: neutral) for target in targets: if target == 'positive': labels.append(0) elif target == 'negative': labels.append(1) elif target == 'neutral': labels.append(2) labels = torch.tensor(labels) # Pytorch data iterators train_data = TensorDataset(input_ids, attention_masks, token_type_ids, labels) train_sampler = RandomSampler(train_data) train_dataloader = DataLoader(train_data, batch_size=self.train_batch_size, sampler=train_sampler) # Optimizer and scheduler (we are using a linear scheduler without warm up) no_decay = ['bias', 'gamma', 'beta'] # These parameters are not going to be decreased optimizer_parameters = [{ 'params': [ p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay) ], 'weight_decay': 0.01 }, { 'params': [ p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay) ], 'weight_decay': 0.0 }] optimizer = AdamW(optimizer_parameters, lr=self.lr, eps=self.eps) total_steps = len(train_dataloader) * self.n_epochs scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=0, num_training_steps=total_steps) # Training initial_t0 = time.time() for epoch in range(self.n_epochs): print('\n ======== Epoch %d / %d ========' % (epoch + 1, self.n_epochs)) print(' Training...\n') t0 = time.time() total_train_loss = 0 self.model.train() for step, batch in enumerate(train_dataloader): batch = tuple(t.to(device) for t in batch) input_ids_, input_mask_, segment_ids_, label_ids_ = batch self.model.zero_grad() loss, _ = self.model(input_ids_, token_type_ids=segment_ids_, attention_mask=input_mask_, labels=label_ids_) total_train_loss += loss.item() loss.backward() # clip gradient norm torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0) optimizer.step() scheduler.step() avg_train_loss = total_train_loss / len(train_dataloader) training_time = format_time(time.time() - t0) # print(" Average training loss: {0:.2f}".format(avg_train_loss)) print(" Training epoch duration: {:}".format(training_time)) print(" Total training time: {:}".format( format_time(time.time() - initial_t0))) def predict(self, datafile): """Predicts class labels for the input instances in file 'datafile' Returns the list of predicted labels """ # Loading the data and splitting up its information in lists evalset = np.genfromtxt(datafile, delimiter='\t', dtype=str, comments=None) m = len(evalset) categories = evalset[:, 1] start_end = [[int(x) for x in w.split(':')] for w in evalset[:, 3]] # target words words_of_interest = [ evalset[:, 4][i][start_end[i][0]:start_end[i][1]] for i in range(m) ] # sentences to be classified sentences = [str(s) for s in evalset[:, 4]] # Preprocessing the text data print("\n Preprocessing the text data...") sentences = self.preprocess(sentences) # Computing question sequences print(" Computing questions...") questions = [self.question(categories[i]) for i in tqdm(range(m))] # Tokenization attention_masks = [] input_ids = [] token_type_ids = [] for word, question, answer in zip(words_of_interest, questions, sentences): encoded_dict = self.tokenizer.encode_plus( answer, question + ' ' + word.lower(), add_special_tokens=True, # Add '[CLS]' and '[SEP]' max_length=self.max_length, # Pad & truncate all sequences pad_to_max_length=True, return_attention_mask=True, # Construct attention masks return_tensors='pt', # Return pytorch tensors. ) attention_masks.append(encoded_dict['attention_mask']) input_ids.append(encoded_dict['input_ids']) token_type_ids.append(encoded_dict['token_type_ids']) attention_masks = torch.cat(attention_masks, dim=0) input_ids = torch.cat(input_ids, dim=0) token_type_ids = torch.cat(token_type_ids, dim=0) # Pytorch data iterators eval_data = TensorDataset(input_ids, attention_masks, token_type_ids) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, batch_size=self.eval_batch_size, sampler=eval_sampler) # Prediction named_labels = [] self.model.eval() for batch in eval_dataloader: batch = tuple(t.to(device) for t in batch) input_ids, input_mask, segment_ids = batch with torch.no_grad(): logits = self.model(input_ids, token_type_ids=segment_ids, attention_mask=input_mask)[0] logits = softmax(logits, dim=-1) logits = logits.detach().cpu().numpy() outputs = np.argmax(logits, axis=1) # converting integer labels into named labels for label in outputs: if label == 0: named_labels.append('positive') elif label == 1: named_labels.append('negative') elif label == 2: named_labels.append('neutral') return np.array(named_labels)
class TorchBertClassifierModel(TorchModel): """Bert-based model for text classification on PyTorch. It uses output from [CLS] token and predicts labels using linear transformation. Args: n_classes: number of classes pretrained_bert: pretrained Bert checkpoint path or key title (e.g. "bert-base-uncased") one_hot_labels: set True if one-hot encoding for labels is used multilabel: set True if it is multi-label classification return_probas: set True if return class probabilites instead of most probable label needed attention_probs_keep_prob: keep_prob for Bert self-attention layers hidden_keep_prob: keep_prob for Bert hidden layers optimizer: optimizer name from `torch.optim` optimizer_parameters: dictionary with optimizer's parameters, e.g. {'lr': 0.1, 'weight_decay': 0.001, 'momentum': 0.9} clip_norm: clip gradients by norm coefficient bert_config_file: path to Bert configuration file (not used if pretrained_bert is key title) """ def __init__(self, n_classes, pretrained_bert, one_hot_labels: bool = False, multilabel: bool = False, return_probas: bool = False, attention_probs_keep_prob: Optional[float] = None, hidden_keep_prob: Optional[float] = None, optimizer: str = "AdamW", optimizer_parameters: dict = { "lr": 1e-3, "weight_decay": 0.01, "betas": (0.9, 0.999), "eps": 1e-6 }, clip_norm: Optional[float] = None, bert_config_file: Optional[str] = None, **kwargs) -> None: self.return_probas = return_probas self.one_hot_labels = one_hot_labels self.multilabel = multilabel self.pretrained_bert = pretrained_bert self.bert_config_file = bert_config_file self.attention_probs_keep_prob = attention_probs_keep_prob self.hidden_keep_prob = hidden_keep_prob self.n_classes = n_classes self.clip_norm = clip_norm if self.multilabel and not self.one_hot_labels: raise RuntimeError( 'Use one-hot encoded labels for multilabel classification!') if self.multilabel and not self.return_probas: raise RuntimeError( 'Set return_probas to True for multilabel classification!') super().__init__(optimizer=optimizer, optimizer_parameters=optimizer_parameters, **kwargs) def train_on_batch(self, features: List[InputFeatures], y: Union[List[int], List[List[int]]]) -> Dict: """Train model on given batch. This method calls train_op using features and y (labels). Args: features: batch of InputFeatures y: batch of labels (class id or one-hot encoding) Returns: dict with loss and learning_rate values """ input_ids = [f.input_ids for f in features] input_masks = [f.attention_mask for f in features] b_input_ids = torch.cat(input_ids, dim=0).to(self.device) b_input_masks = torch.cat(input_masks, dim=0).to(self.device) b_labels = torch.from_numpy(np.array(y)).to(self.device) self.optimizer.zero_grad() loss, logits = self.model(b_input_ids, token_type_ids=None, attention_mask=b_input_masks, labels=b_labels) loss.backward() # Clip the norm of the gradients to 1.0. # This is to help prevent the "exploding gradients" problem. if self.clip_norm: torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip_norm) self.optimizer.step() if self.lr_scheduler is not None: self.lr_scheduler.step() return {'loss': loss.item()} def __call__( self, features: List[InputFeatures] ) -> Union[List[int], List[List[float]]]: """Make prediction for given features (texts). Args: features: batch of InputFeatures Returns: predicted classes or probabilities of each class """ input_ids = [f.input_ids for f in features] input_masks = [f.attention_mask for f in features] b_input_ids = torch.cat(input_ids, dim=0).to(self.device) b_input_masks = torch.cat(input_masks, dim=0).to(self.device) with torch.no_grad(): # Forward pass, calculate logit predictions logits = self.model(b_input_ids, token_type_ids=None, attention_mask=b_input_masks) logits = logits[0] if self.return_probas: if not self.multilabel: pred = torch.nn.functional.softmax(logits, dim=-1) else: pred = torch.nn.functional.sigmoid(logits) pred = pred.detach().cpu().numpy() else: logits = logits.detach().cpu().numpy() pred = np.argmax(logits, axis=1) return pred @overrides def load(self, fname=None): if fname is not None: self.load_path = fname if self.pretrained_bert and not Path(self.pretrained_bert).is_file(): self.model = BertForSequenceClassification.from_pretrained( self.pretrained_bert, num_labels=self.n_classes, output_attentions=False, output_hidden_states=False) elif self.bert_config_file and Path(self.bert_config_file).is_file(): self.bert_config = BertConfig.from_json_file( str(expand_path(self.bert_config_file))) if self.attention_probs_keep_prob is not None: self.bert_config.attention_probs_dropout_prob = 1.0 - self.attention_probs_keep_prob if self.hidden_keep_prob is not None: self.bert_config.hidden_dropout_prob = 1.0 - self.hidden_keep_prob self.model = BertForSequenceClassification(config=self.bert_config) else: raise ConfigError("No pre-trained BERT model is given.") self.model.to(self.device) self.optimizer = getattr(torch.optim, self.optimizer_name)( self.model.parameters(), **self.optimizer_parameters) if self.lr_scheduler_name is not None: self.lr_scheduler = getattr(torch.optim.lr_scheduler, self.lr_scheduler_name)( self.optimizer, **self.lr_scheduler_parameters) if self.load_path: log.info(f"Load path {self.load_path} is given.") if isinstance(self.load_path, Path) and not self.load_path.parent.is_dir(): raise ConfigError("Provided load path is incorrect!") weights_path = Path(self.load_path.resolve()) weights_path = weights_path.with_suffix(f".pth.tar") if weights_path.exists(): log.info(f"Load path {weights_path} exists.") log.info( f"Initializing `{self.__class__.__name__}` from saved.") # now load the weights, optimizer from saved log.info(f"Loading weights from {weights_path}.") checkpoint = torch.load(weights_path, map_location=self.device) self.model.load_state_dict(checkpoint["model_state_dict"]) self.optimizer.load_state_dict( checkpoint["optimizer_state_dict"]) self.epochs_done = checkpoint.get("epochs_done", 0) else: log.info( f"Init from scratch. Load path {weights_path} does not exist." )
def __len__(self): return self.all_input_ids.size(0) # Prepare random data all_input_ids = torch.randint(low=0, high=100, size=(100, 128)) # 100 examples of length 128 all_attention_mask = torch.ones_like(all_input_ids) all_labels = torch.randint(low=0, high=2, size=(100, )) dataset = DictDataset(all_input_ids, all_attention_mask, all_labels) eval_dataset = DictDataset(all_input_ids, all_attention_mask, all_labels) dataloader = DataLoader(dataset, batch_size=32) # Optimizer and learning rate scheduler optimizer = AdamW(student_model.parameters(), lr=1e-4) scheduler = None # display model parameters statistics print("\nteacher_model's parametrers:") _ = textbrewer.utils.display_parameters(teacher_model, max_level=3) print("student_model's parametrers:") _ = textbrewer.utils.display_parameters(student_model, max_level=3) def simple_adaptor(batch, model_outputs): # The second element of model_outputs is the logits before softmax # The third element of model_outputs is hidden states return { 'logits': model_outputs[1],
params = list(model.named_parameters()) print('The model has {:} different named parameters.\n'.format(len(params))) print('==== Embedding Layer ====\n') for p in params[0:5]: print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size())))) print('\n==== First Transformer ====\n') for p in params[5:21]: print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size())))) print('\n==== Output Layer ====\n') for p in params[-4:]: print("{:<55} {:>12}".format(p[0], str(tuple(p[1].size())))) optimizer = AdamW(model.parameters(), lr=2e-5, eps=1e-8) epochs = 4 total_steps = len(train_dataloader) * epochs scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=0, # Default value in run_glue.py num_training_steps=total_steps) def format_time(elapsed): elapsed_rounded = int(round((elapsed))) return str(datetime.timedelta(seconds=elapsed_rounded)) def flat_accuracy(preds, labels):
def train_classifier(model: BertForSequenceClassification, dataset: TensorDataset, validation_ratio: float, batch_size: int, freeze_embeddings_layer: bool, freeze_encoder_layers: int, epochs: int) -> (BertForSequenceClassification, list): device = select_device() train_size = int(validation_ratio * len(dataset)) val_size = len(dataset) - train_size train_dataset, val_dataset = random_split(dataset, [train_size, val_size]) train_dataloader = DataLoader(train_dataset, sampler=RandomSampler(train_dataset), batch_size=batch_size) validation_dataloader = DataLoader(val_dataset, sampler=SequentialSampler(val_dataset), batch_size=batch_size) modules = [] if freeze_embeddings_layer: modules.append(model.bert.embeddings) for i in range(freeze_encoder_layers): modules.append(model.bert.encoder.layer[i]) for module in modules: for param in module.parameters(): param.requires_grad = False model.to(device) optimizer = AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=5e-5) total_steps = len(train_dataloader) * epochs scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps) training_stats = [] total_t0 = time.time() for epoch_i in range(0, epochs): print("") print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs)) print('Training...') t0 = time.time() total_train_loss = 0 model.train() for step, batch in enumerate(train_dataloader): if step % 40 == 0 and not step == 0: elapsed = format_time(time.time() - t0) print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format( step, len(train_dataloader), elapsed)) b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2].to(device) model.zero_grad() outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels) loss = outputs.loss logits = outputs.logits total_train_loss += loss.item() loss.backward() # Clip the norm of the gradients to 1.0. # This is to help prevent the "exploding gradients" problem. torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) optimizer.step() scheduler.step() avg_train_loss = total_train_loss / len(train_dataloader) training_time = format_time(time.time() - t0) print("") print(" Average training loss: {0:.2f}".format(avg_train_loss)) print(" Training epcoh took: {:}".format(training_time)) print("") print("Running Validation...") t0 = time.time() model.eval() total_eval_accuracy = 0 total_eval_loss = 0 nb_eval_steps = 0 for batch in validation_dataloader: b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2].to(device) with torch.no_grad(): outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels) loss = outputs.loss logits = outputs.logits total_eval_loss += loss.item() logits = logits.detach().cpu().numpy() label_ids = b_labels.cpu().numpy() total_eval_accuracy += flat_accuracy(logits, label_ids) avg_val_accuracy = total_eval_accuracy / len(validation_dataloader) print(" Accuracy: {0:.2f}".format(avg_val_accuracy)) avg_val_loss = total_eval_loss / len(validation_dataloader) validation_time = format_time(time.time() - t0) print(" Validation Loss: {0:.2f}".format(avg_val_loss)) print(" Validation took: {:}".format(validation_time)) training_stats.append({ 'epoch': epoch_i + 1, 'Training Loss': avg_train_loss, 'Valid. Loss': avg_val_loss, 'Valid. Accur.': avg_val_accuracy, 'Training Time': training_time, 'Validation Time': validation_time }) print("") print("Training complete!") print("Total training took {:} (h:mm:ss)".format( format_time(time.time() - total_t0))) return model, training_stats
test_dataset = load_dataset('data/Test.csv', max_len=MAX_LEN) # 7356 * 3 train_cus = Custom_dataset(train_dataset) train_loader = DataLoader(dataset=train_cus, batch_size=BATCH_SIZE, shuffle=False) # Bert模型以及相关配置 config = BertConfig.from_pretrained('bert-base-chinese') config.num_labels = 3 model = BertForSequenceClassification(config=config) model = BertForSequenceClassification.from_pretrained('bert-base-chinese', config=config) model.to(device) optimizer = AdamW(model.parameters(), lr=LR, correct_bias=False) scheduler = WarmupLinearSchedule(optimizer, warmup_steps=WARMUP_STEPS, t_total=T_TOTAL) optimizer = optim.Adam(model.parameters(), lr=LR) model.train() print('开始训练...') for epoch in range(EPOCHS): for text, label in train_loader: text_list = list(map(json.loads, text)) label_list = list(map(json.loads, label)) text_tensor = torch.tensor(text_list).to(device) label_tensor = torch.tensor(label_list).to(device)
def train_and_test(): # prepare data fileNameList = glob.glob( 'C:/YYQ/PGproject/PreProcessing/processed_features_facenet/*.pkl') # print(fileNameList) # basic features # text-list and tf-idf text_list = [] labels = [] visual_features = [] audio_features = [] for file_name in fileNameList: data_point = pkl.load(open(file_name, 'rb')) clip_name, label, transcription, smoothed_seq = data_point[ 0], data_point[1], data_point[2], data_point[3] # print(label, transcription) # continue labels.append(label) text_list.append(transcription) # average visual features # visual_seq = np.stack([w['landmark_feature'] for w in smoothed_seq], axis=0) visual_seq = np.stack( [w['facenet_feature'].squeeze() for w in smoothed_seq], axis=0) # visual_seq = scale(visual_seq) # visual_seq = visual_seq - np.mean(visual_seq, axis=0) # print(visual_seq.shape) # visual_mean = np.mean(visual_seq, axis=0) visual_features.append(visual_seq) # average audio features audio_seq = np.stack([w['audio_grp'] for w in smoothed_seq], axis=0) # audio_seq = scale(audio_seq) # print(audio_seq.shape) # audio_mean = np.mean(audio_seq, axis=0) audio_features.append(audio_seq) # exit() print(text_list) lens = [len(a.split()) for a in text_list] print(min(lens), max(lens)) exit() tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') pg = tokenizer.batch_encode_plus(text_list, max_length=128, pad_to_max_length=True) '''print(len(pg)) for k in pg.keys(): print(k, len(pg[k]), [len(kk) for kk in pg[k]])''' x = pg['input_ids'] token_type_ids = pg['token_type_ids'] attention_mask = pg['attention_mask'] '''for xx in x: print(xx)''' x, token_type_ids, attention_mask = np.array(x), np.array( token_type_ids), np.array(attention_mask) labels = np.array(labels) skf = StratifiedKFold(n_splits=5) cv5_ids = list(skf.split(x, labels)) sp = cv5_ids[0] train_l, train_labels = x[sp[0]], labels[sp[0]] # train_data, train_labels = sm.fit_sample(train_data, train_labels) test_l, test_labels = x[sp[1]], labels[sp[1]] print(train_l.shape) train_token_type_ids, test_token_type_ids, train_attention_mask, test_attention_mask = token_type_ids[sp[0]], \ token_type_ids[sp[1]], attention_mask[sp[0]], attention_mask[sp[1]] # shuffle training data for batch reading n_train = len(train_l) n_eval = len(test_l) perm = np.random.permutation(n_train) train_l = train_l[perm] train_labels = np.array(train_labels)[perm] train_token_type_ids, train_attention_mask = train_token_type_ids[ perm], train_attention_mask[perm] train_l, test_l, train_labels, test_labels, train_token_type_ids, test_token_type_ids = torch.LongTensor(train_l), \ torch.LongTensor(test_l), \ torch.LongTensor(train_labels), \ torch.LongTensor(test_labels), \ torch.LongTensor(train_token_type_ids), \ torch.LongTensor(test_token_type_ids) train_attention_mask, test_attention_mask = torch.FloatTensor(train_attention_mask), \ torch.FloatTensor(test_attention_mask) # model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=3).to('cuda') config = BertConfig.from_pretrained('bert-base-uncased', num_labels=3) model = BertForSequenceClassification(config).to('cuda') # print(model(train_l[:32], token_type_ids=train_token_type_ids[:32], attention_mask=train_attention_mask[:32], labels=train_labels[:32])[1]) eval_every = 5 batch_size = 32 test_batch_size = 8 max_epochs = 500 t_total = math.ceil(n_train / batch_size) * max_epochs lr = 2e-5 epsilon = 1e-8 max_grad_norm = 1.0 weight_decay = 0.0 optimizer, scheduler = get_optimizers(model, learning_rate=lr, adam_epsilon=epsilon, weight_decay=weight_decay, num_training_steps=t_total) # loss_fn = torch.nn.CrossEntropyLoss().cuda() model.train() model.zero_grad() for ep in range(max_epochs): idx = 0 avg_loss = 0 n_batch = 0 model.train() while idx < n_train: optimizer.zero_grad() batch_l = train_l[idx:(idx + batch_size)].to('cuda') batch_ty = train_token_type_ids[idx:(idx + batch_size)].to('cuda') batch_am = train_attention_mask[idx:(idx + batch_size)].to('cuda') ans = train_labels[idx:(idx + batch_size)].to('cuda') idx += batch_size preds = model(input_ids=batch_l, token_type_ids=batch_ty, attention_mask=batch_am, labels=ans) loss = preds[0] # print(preds, ans) loss.backward() # print(loss.data.cpu().numpy()) avg_loss += loss.data.cpu().numpy() n_batch += 1. torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm) optimizer.step() scheduler.step() model.zero_grad() avg_loss = avg_loss / n_batch print("epoch: %d avg_loss: %f" % (ep + 1, avg_loss)) del batch_l, batch_ty, batch_am torch.cuda.empty_cache() # time.sleep(20) if ep % eval_every == 0: idx = 0 model.eval() eval_preds = np.array([]) while idx < n_eval: test_batch_l = test_l[idx:(idx + test_batch_size)].to('cuda') test_batch_ty = test_token_type_ids[idx:( idx + test_batch_size)].to('cuda') test_batch_am = test_attention_mask[idx:( idx + test_batch_size)].to('cuda') test_ans = test_labels[idx:(idx + test_batch_size)].to('cuda') # time.sleep(20) # exit() test_pred = model(input_ids=test_batch_l, token_type_ids=test_batch_ty, attention_mask=test_batch_am, labels=test_ans) scores = test_pred[1] _, batch_eval_preds = scores.data.cpu().max(1) eval_preds = np.concatenate((eval_preds, batch_eval_preds), axis=-1) idx += test_batch_size # metrics precison, recall, fscore, support = precision_recall_fscore_support( test_labels.cpu().numpy(), eval_preds, labels=[0, 1, 2], average=None) '''scores = model(train_data, train_lens) _, train_preds = scores.data.cpu().max(1) print("training set: %f" % (float(sum(train_preds.numpy() == train_labels.cpu().numpy())) / len(train_preds.numpy()))) print(eval_preds.numpy())''' print( float(sum(eval_preds == test_labels.cpu().numpy())) / len(eval_preds)) print(precison, recall, fscore, support)
training_set = Intents(train_dataset) testing_set = Intents(test_dataset) """**Dataloaders and Parameters**""" ### Dataloaders Parameters params = {'batch_size': 16, 'shuffle': True, 'drop_last': True, 'num_workers': 0} training_loader = DataLoader(training_set, **params) testing_loader = DataLoader(testing_set, **params) loss_function = nn.CrossEntropyLoss() learning_rate = 2e-05 optimizer = optim.Adam(params = model.parameters(), lr=learning_rate) if torch.cuda.is_available(): print("GPU is AVAILABLE!") model = model.cuda() ids, tokens, labels = next(iter(training_loader)) # iterated one element at a time ids.shape, tokens.shape, labels if model_type == 'bert': print(model_type) out = model.forward(ids.cuda())[0] print(loss_function(out, labels.cuda())) print(out.shape) """**Training the model**"""