def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx): x, y = self.to_device(batch, pl_module.device) with torch.no_grad(): representations = self.get_representations(pl_module, x) # forward pass mlp_preds = pl_module.non_linear_evaluator(representations) mlp_loss = F.cross_entropy(mlp_preds, y) # update finetune weights mlp_loss.backward() self.optimizer.step() self.optimizer.zero_grad() # log metrics if trainer.datamodule is not None: acc = accuracy(mlp_preds, y, num_classes=trainer.datamodule.num_classes) else: acc = accuracy(mlp_preds, y) metrics = { 'ft_callback_mlp_loss': mlp_loss, 'ft_callback_mlp_acc': acc } pl_module.logger.log_metrics(metrics, step=trainer.global_step)
def test_step(self, batch, batch_idx): # please notice that the test step is assuming that the whole test is passed as a unit. # Use LearningDataSet located in the simple_dataset module in order to achieve that. x, y = batch logits = torch.squeeze(self(x)) if self.goal == 'binary': preds = self.probability_fn(logits) acc = accuracy(preds, y) f1_score = f1(preds, y, 1) auc_score = roc_auc_score(y, preds) metrics_dict = {'acc': acc.item(), 'f1_score': f1_score.item(), 'auc_score': auc_score} elif self.goal == 'multi_class': preds = self.probability_fn(logits) acc = accuracy(preds, y) metrics_dict = {'acc': acc.item()} else: r2 = r2score(logits,y) rho = stats.spearmanr(logits,y)[0] metrics_dict = {'r2': r2.item(),'correlation':rho} self.test_predictions = logits self.log_dict(metrics_dict)
def training_step(self, batch, batch_id): x, label = batch class1_y, class2_y, class3_y = self.forward(x) loss_class_1 = self.loss(class1_y, label).view(1) loss_class_2 = self.loss(class2_y, label).view(1) loss_class_3 = self.loss(class3_y, label).view(1) loss = torch.mean(torch.cat([loss_class_1, loss_class_2, loss_class_3])) accuracy_class_1 = accuracy(torch.argmax(class1_y, dim=1), label).view(1) accuracy_class_2 = accuracy(torch.argmax(class2_y, dim=1), label).view(1) accuracy_class_3 = accuracy(torch.argmax(class3_y, dim=1), label).view(1) acc = torch.mean( torch.cat([accuracy_class_1, accuracy_class_2, accuracy_class_3])) logs = {'train_loss': loss} return { 'loss': loss, 'loss_c1': loss_class_1, 'loss_c2': loss_class_2, 'loss_c3': loss_class_3, 'acc': acc, 'acc_C1': accuracy_class_1, 'acc_C2': accuracy_class_2, 'acc_C3': accuracy_class_3, 'log': logs }
def test_topk_accuracy_wrong_input_types(preds, target): topk = Accuracy(top_k=1) with pytest.raises(ValueError): topk(preds[0], target[0]) with pytest.raises(ValueError): accuracy(preds[0], target[0], top_k=1)
def test_wrong_params(top_k, threshold): preds, target = _input_mcls_prob.preds, _input_mcls_prob.target with pytest.raises(ValueError): acc = Accuracy(threshold=threshold, top_k=top_k) acc(preds, target) acc.compute() with pytest.raises(ValueError): accuracy(preds, target, threshold=threshold, top_k=top_k)
def validation_step(self, batch, batch_idx): x, y = batch y_pred = self(x) loss = F.cross_entropy(y_pred.to(self.device), y.to(self.device)) acc = accuracy(y_pred, y) acc_weighted = accuracy(y_pred, y, class_reduction='weighted') self.log("val_acc", acc, prog_bar=True, logger=True) self.log("val_acc_weighted", acc_weighted, prog_bar=True, logger=True) self.log("val_loss", loss, prog_bar=True, logger=True) return {'loss': loss, 'y': y, 'y_pred': y_pred}
def acc_disc(self, outputs): # TODO: acc = FM.accuracy(y_hat, y) # https://pytorch-lightning.readthedocs.io/en/stable/lightning-module.html#lightningmodule-for-production out_pos, target_pos = outputs["out_pos"], outputs["target_pos"] out_neg, target_neg = outputs["out_neg"], outputs["target_neg"] num_classes = self.hparams.disc.out acc_pos, acc_neg = ( FM.accuracy(out_pos.argmax(dim=-1), target_pos, num_classes=num_classes), FM.accuracy(out_neg.argmax(dim=-1), target_neg, num_classes=num_classes), ) return (acc_pos + acc_neg) / 2
def validation_step(self, batch, batch_idx): x, y = batch y_hat = self(x) y_hat = torch.argmax(y_hat, dim=1) acc = functional.accuracy(y_hat, y) self.log("val_acc", acc, on_epoch=True, prog_bar=True) return acc
def validation_step(self, batch, batch_idx): data, target = batch['waveform'], batch['label'] # Perform step _, output2 = self(data) # Calculate loss, must be CrossEntropy or a derivative val_loss = self.loss(output2, target) # Calculate KL divergence between the approximate posterior and the prior over all Bayesian layers kl_clean = kldiv(self.model) # Weight the KL divergence, so it does not overflow the loss term kl = self.model.weight_kl(kl_clean, self.val_dataset_size) # Apply KL weighting scheme, allows for balancing the KL term non-uniformly M = self.val_dataset_size / self.batch_size beta = get_beta(batch_idx, M, beta_type=self.kl_weighting_scheme) kl_weighted = beta * kl # Calculate accuracy acc = FM.accuracy(output2.squeeze(), target) # Loss is tensor metrics = {'val_loss': val_loss.item(), 'val_acc': acc.item()} self.log('val_acc', acc.item()) self.log('val_loss', val_loss.item()) self.log('val_kl_weighted', kl_weighted.item()) return metrics
def training_step(self, batch, batch_idx): x, y = batch y_hat = self(x) loss = F.nll_loss(y_hat, y) self.log('train_loss', loss) self.log('train_acc', accuracy(y_hat.exp(), y), prog_bar=True) return loss
def test_step(self, batch, batch_idx): txt, segment, mask, img, y = batch if self.hparams.model == "bert": y_hat = self(txt, mask, segment) elif self.hparams.model == "mmbt": y_hat = self(txt, mask, segment, img) else: raise ValueError( f'Specified model ({hparams.model}) not implemented') loss = self.criterion(y_hat, y) if self.hparams.task_type == "multilabel": #preds = torch.sigmoid(y_hat).cpu().detach().numpy() > 0.5 preds = (torch.sigmoid(y_hat) > 0.5).float().detach() else: preds = torch.nn.functional.softmax( y_hat, dim=1).argmax(dim=1).cpu().detach().numpy() if self.hparams.task_type == "multilabel": macro_f1 = f1_score(preds, y, class_reduction='macro') micro_f1 = f1_score(preds, y, class_reduction='micro') result = pl.EvalResult(checkpoint_on=micro_f1) result.log('test_micro_f1', micro_f1, prog_bar=True, on_epoch=True) result.log('test_macro_f1', macro_f1, prog_bar=True, on_epoch=True) else: acc = accuracy(preds, y) result.log('test_acc', acc, prog_bar=True, on_epoch=True) result.log('test_loss', loss, prog_bar=True, on_epoch=True) return result
def validation_step(self, batch, batch_idx): prediction_individual = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes) data, target = batch['waveform'], batch['label'] # Predict for each model for model_idx in range(self.ensemble_size): # Make prediction _, output2_mean, output2_log_var = self(data, model_idx) # Sample from logits, returning avector x_i x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True) prediction_individual[:, model_idx] = x_i # Calculate mean over predictions from individual ensemble members prediction_ensemble_mean = F.softmax(torch.mean(prediction_individual, dim=1), dim=1).type_as(data) val_loss = self.loss(prediction_ensemble_mean, target) acc = FM.accuracy(prediction_ensemble_mean, target) # loss is tensor. The Checkpoint Callback is monitoring 'checkpoint_on' metrics = {'val_loss': val_loss.item(), 'val_acc': acc.item()} self.log('val_acc', acc.item()) self.log('val_loss', val_loss.item()) return metrics
def on_validation_batch_end( self, trainer: pl.Trainer, pl_module: pl.LightningModule, outputs: Sequence, batch: Sequence, batch_idx: int, dataloader_idx: int, ) -> None: x, y = self.extract_online_finetuning_view(batch, pl_module.device) with torch.no_grad(): feats = pl_module(x) feats = feats.detach() preds = pl_module.online_finetuner(feats) loss = F.cross_entropy(preds, y) acc = accuracy(F.softmax(preds, dim=1), y) pl_module.log('online_val_acc', acc, on_step=False, on_epoch=True, sync_dist=True) pl_module.log('online_val_loss', loss, on_step=False, on_epoch=True, sync_dist=True)
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx): x, y = self.to_device(batch, pl_module.device) with torch.no_grad(): representations = self.get_representations(pl_module, x) representations = representations.detach() # forward pass mlp_preds = pl_module.non_linear_evaluator(representations) mlp_loss = F.cross_entropy(mlp_preds, y) # log metrics val_acc = accuracy(mlp_preds, y) pl_module.log('online_val_acc', val_acc, on_step=False, on_epoch=True, sync_dist=True, prog_bar=True) pl_module.log('online_val_loss', mlp_loss, on_step=False, on_epoch=True, sync_dist=True)
def metrics(self, y_sig, y): y_pred = y_sig > 0.5 return { 'acc': metricsF.accuracy(y_pred, y), 'roc': metricsF.auroc(y_sig, y), 'iou': metricsF.iou(y_pred, y), }
def test_step(self, batch, batch_idx): # getting outputs data, target = batch h = self.CNN(data) y_hat = F.log_softmax(self.linear(h.view(h.size(0), -1)), dim=1) # storing predictions for confusion matrix if self.test_pred is None: self.test_pred = torch.cat((y_hat.argmax(dim=1).unsqueeze(1),\ target.unsqueeze(1)),dim=1) else: self.test_pred = torch.cat( (self.test_pred,\ torch.cat((y_hat.argmax(dim=1).unsqueeze(1),\ target.unsqueeze(1)),dim=1)),\ dim=0 ) # metrics acc = accuracy(y_hat, target=target) loss = F.nll_loss(y_hat, target) # logging self.log('test_loss', loss, on_step=False, on_epoch=True, prog_bar=True) self.log('test_accuracy', acc, on_step=False, on_epoch=True, prog_bar=True) return loss
def validation_step(self, batch, batch_idx): prediction_individual = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes) data, target = batch['waveform'], batch['label'] # Predict for each model for i, model in enumerate(self.models): output1, output2 = self(data, i) prediction_individual[:, i] = output2.data # Calculate mean and variance over predictions from individual ensemble members prediction_ensemble_mean = F.softmax(torch.mean(prediction_individual, dim=1), dim=1) prediction_ensemble_var = torch.var(prediction_individual, dim=1) val_loss = self.loss(prediction_ensemble_mean, target) acc = FM.accuracy(prediction_ensemble_mean, target) # loss is tensor. The Checkpoint Callback is monitoring 'checkpoint_on' metrics = {'val_loss': val_loss.item(), 'val_acc': acc.item()} self.log('val_acc', acc.item()) self.log('val_loss', val_loss.item()) return metrics
def test_step(self, batch, batch_idx): x, y = batch y_hat = self(x) y_hat = torch.argmax(y_hat, dim=1) acc = functional.accuracy(y_hat, y) self.log("test_acc", acc, on_epoch=True) return acc
def training_step(self, batch: Tuple[torch.Tensor], batch_nb: int, *args, **kwargs) -> Dict[str, torch.Tensor]: """ Runs one training step. This usually consists in the forward function followed by the loss function. :param batch: The output of your dataloader. :param batch_nb: Integer displaying which batch this is Returns: - dictionary containing the loss and the metrics to be added to the lightning logger. """ # input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids = batch output = self.forward(*batch) loss_val = (output.loss * self.hparams.lm_coef + output.mc_loss * self.hparams.mc_coef) # Language Modeling Negative Log-Likelihood nll = output.loss # Multiple-Choice Prediction. mc_pred, mc_target = torch.topk(output.mc_logits, 1)[1].view(-1), batch[3] acc = accuracy(mc_pred, mc_target) return { "loss": loss_val, "log": { "train_loss": loss_val, "train_nll": nll, "train_acc": acc }, }
def training_step(self, batch, batch_nb): x, y = batch loss = F.cross_entropy(self(x), y) acc = accuracy(loss, y) self.log("train_loss", loss, on_epoch=True) self.log("acc", acc, on_epoch=True) return loss
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx): x, y = self.to_device(batch, pl_module.device) with torch.no_grad(): representations = self.get_representations(pl_module, x) representations = representations.detach() # forward pass mlp_preds = pl_module.non_linear_evaluator(representations) mlp_loss = F.cross_entropy(mlp_preds, y) # update finetune weights mlp_loss.backward() self.optimizer.step() self.optimizer.zero_grad() # log metrics train_acc = accuracy(mlp_preds, y) pl_module.log('online_train_acc', train_acc, on_step=True, on_epoch=False) pl_module.log('online_train_loss', mlp_loss, on_step=True, on_epoch=False)
def validation_step(self, batch: Tuple[torch.Tensor], batch_nb: int, *args, **kwargs) -> Dict[str, torch.Tensor]: """Similar to the training step but with the model in eval mode. :returns: dictionary passed to the validation_end function. """ # # input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids = batch # output = self.forward(*batch) input_ids, mc_token_ids, lm_labels, mc_labels, token_type_ids = batch self.tempdata = { 'mc_token_ids': mc_token_ids, 'lm_labels': lm_labels, 'mc_labels': mc_labels } output = self.forward(input_ids, token_type_ids) loss_val = (output.loss * self.hparams.lm_coef + output.mc_loss * self.hparams.mc_coef) # Language Modeling Negative Log-Likelihood nll = output.loss # Multiple-Choice Prediction. mc_pred, mc_target = torch.topk(output.mc_logits, 1)[1].view(-1), batch[3] acc = accuracy(mc_pred, mc_target) output = {"val_loss": loss_val, "val_nll": nll, "val_acc": acc} return output
def test_step(self, batch, batch_idx): y_hat = self(batch['input_ids']) loss = nn.CrossEntropyLoss()(y_hat, batch['label'].flatten()) acc = FM.accuracy(y_hat.detach().argmax(axis=1), batch['label'].flatten(), num_classes=2) result = pl.EvalResult(checkpoint_on=loss) result.log_dict({'val_acc': acc, 'val_loss': loss}) return result
def training_step(self, batch, batch_idx): x, y = batch y_hat = self(x) loss = F.cross_entropy(y_hat, y) acc = accuracy(y_hat, y) tensorboard_logs = {'train_loss': loss, 'train_acc': acc} return {'loss': loss, 'log': tensorboard_logs}
def validation_epoch_end(self, outputs) -> None: logits = torch.cat([it[0] for it in outputs], dim=0) y = torch.cat([it[1] for it in outputs], dim=0) loss = F.nll_loss(logits, y) preds = torch.argmax(logits, dim=1) acc = accuracy(preds, y) self.log_dict({"test_loss": loss, "test_acc": acc}, prog_bar=True) # dump metric metric = { "test_acc": acc.item(), "net": self.net, "params": sum(p.numel() for p in self.model.parameters() if p.requires_grad), "s": self.s, } if self.is_pruned: metric["prune_ratio"] = self.prune_ratio else: metric["prune_ratio"] = 0.0 with open(os.path.join(self.save_dir, "metric.json"), "w", encoding="utf-8") as f: json.dump(metric, f, indent=2, ensure_ascii=False) return loss
def test_step(self, batch, batch_idx): x, y = batch preds = self(x) acc = accuracy(preds, y) return {"test_acc": acc}
def training_step(self, batch, batch_idx): x, y = batch y_hat = self(x) loss = F.cross_entropy(y_hat, y) self.log('train_loss', loss) self.log('val_acc', accuracy(y_hat, y)) return loss
def on_validation_batch_end( self, trainer: Trainer, pl_module: LightningModule, outputs: Sequence, batch: Sequence, batch_idx: int, dataloader_idx: int, ) -> None: x, y = self.to_device(batch, pl_module.device) with torch.no_grad(): representations = self.get_representations(pl_module, x) representations = representations.detach() # forward pass mlp_preds = pl_module.non_linear_evaluator( representations) # type: ignore[operator] mlp_loss = F.cross_entropy(mlp_preds, y) # log metrics val_acc = accuracy(mlp_preds, y) pl_module.log('online_val_acc', val_acc, on_step=False, on_epoch=True, sync_dist=True) pl_module.log('online_val_loss', mlp_loss, on_step=False, on_epoch=True, sync_dist=True)
def training_step(self, batch, batch_idx): # x = images , y = batch, logits = labels x, y = batch logits = self(x) # 2. Compute loss & accuracy: train_loss = F.cross_entropy(logits, y) # train_loss = self.loss(logits, y) print(train_loss) preds = torch.argmax(logits, dim=1) # num_correct = torch.sum(preds == y).float() / preds.size(0) num_correct = torch.eq(preds.view(-1), y.view(-1)).sum() acc = accuracy(preds, y) self.log('train_loss', train_loss, on_step=True, on_epoch=True, logger=True) self.log('train_acc', acc, on_step=True, on_epoch=True, logger=True) # self.log('num_correct', num_correct, on_step=True, on_epoch=True, logger=True) # 3. Outputs: tqdm_dict = {'train_loss': train_loss} output = OrderedDict({ 'loss': train_loss, 'num_correct': num_correct, 'log': tqdm_dict, 'progress_bar': tqdm_dict }) return output
def test_step(self, batch, batch_idx, save_to_csv=False): prediction_individual = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes) data, target = batch['waveform'], batch['label'] # Predict for each model for i, model in enumerate(self.models): output1, output2 = self(data, i) prediction_individual[:, i] = output2.data # Calculate mean and variance over predictions from individual ensemble members prediction_ensemble_mean = F.softmax(torch.mean(prediction_individual, dim=1), dim=1) prediction_ensemble_var = torch.var(prediction_individual, dim=1) test_loss = self.loss(prediction_ensemble_mean, target) acc = FM.accuracy(prediction_ensemble_mean, target) # Get the variance of the predicted labels by selecting the variance of # the labels with highest average Softmax value predicted_labels_var = torch.gather(prediction_ensemble_var, 1, prediction_ensemble_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu() predicted_labels = prediction_ensemble_mean.argmax(dim=1) # Log and save metrics self.log('test_acc', acc.item()) self.log('test_loss', test_loss.item()) self.IDs = torch.cat((self.IDs, batch['id']), 0) self.predicted_labels = torch.cat((self.predicted_labels, predicted_labels), 0) self.epistemic_uncertainty = torch.cat((self.epistemic_uncertainty, predicted_labels_var), 0) self.correct_predictions = torch.cat((self.correct_predictions, torch.eq(predicted_labels, target.data.cpu())), 0) return {'test_loss': test_loss.item(), 'test_acc': acc.item(), 'test_loss': test_loss.item()}