def epoch_end(self, epoch, num_epochs, loaders_dict, model, train_loss, valid_loss): super(DetailedMeasurementTrainingHook, self).epoch_end(epoch=epoch, num_epochs=num_epochs, loaders_dict=loaders_dict, model=model, train_loss=train_loss, valid_loss=valid_loss) _LOGGER.info("epoch end metrics") self.metrics_dict['train_loss'].append(train_loss) self.metrics_dict['valid_loss'].append(valid_loss) val_conf_mat = evaluate(model=model, data=loaders_dict[VALID], device=self.device) val_acc = metrics.accuracy(confusion_matrix=val_conf_mat) val_f1 = metrics.f1(confusion_matrix=val_conf_mat) self.metrics_dict['valid_acc'].append(val_acc) self.metrics_dict['valid_f1'].append(val_f1) _LOGGER.info("eval metrics acc, f1 ") _LOGGER.info("%s, %s", str(val_acc), str(val_f1)) train_conf_mat = evaluate(model=model, data=loaders_dict[TRAIN], device=self.device) train_acc = metrics.accuracy(confusion_matrix=train_conf_mat) train_f1 = metrics.f1(confusion_matrix=train_conf_mat) self.metrics_dict['train_acc'].append(train_acc) self.metrics_dict['train_f1'].append(train_f1) _LOGGER.info("train metrics acc, f1 ") _LOGGER.info("%s, %s", str(train_acc), str(train_f1))
def training_start(self, model, loaders_dict, criterion): super(DetailedMeasurementTrainingHook, self).training_start(model=model, loaders_dict=loaders_dict, criterion=criterion) self.loaders_dict = loaders_dict _LOGGER.info("start metrics") val_conf_mat = evaluate(model=model, data=loaders_dict[VALID], device=self.device) val_acc = metrics.accuracy(confusion_matrix=val_conf_mat) val_f1 = metrics.f1(confusion_matrix=val_conf_mat) self.metrics_dict['valid_acc'].append(val_acc) self.metrics_dict['valid_f1'].append(val_f1) _LOGGER.info("eval metrics acc, f1") _LOGGER.info("%s, %s", str(val_acc), str(val_f1)) train_conf_mat = evaluate(model=model, data=loaders_dict[TRAIN], device=self.device) train_acc = metrics.accuracy(confusion_matrix=train_conf_mat) train_f1 = metrics.f1(confusion_matrix=train_conf_mat) self.metrics_dict['train_acc'].append(train_acc) self.metrics_dict['train_f1'].append(train_f1) _LOGGER.info("train metrics acc, f1") _LOGGER.info("%s, %s", str(train_acc), str(train_f1))
def training_end(self, best_model): super(DetailedMeasurementTrainingHook, self).training_end(best_model=best_model) for metric_key in self.metrics_dict: values_str = "\t".join( [str(i) for i in self.metrics_dict[metric_key]]) _LOGGER.info("%s", metric_key) _LOGGER.info("%s", values_str) _LOGGER.info("Best model metrics: train, valid, test: acc, f1") train_conf_mat = evaluate(model=best_model, data=self.loaders_dict[TRAIN], device=self.device) train_acc = metrics.accuracy(confusion_matrix=train_conf_mat) train_f1 = metrics.f1(confusion_matrix=train_conf_mat) _LOGGER.info("%s, %s", str(train_acc), str(train_f1)) val_conf_mat = evaluate(model=best_model, data=self.loaders_dict[VALID], device=self.device) val_acc = metrics.accuracy(confusion_matrix=val_conf_mat) val_f1 = metrics.f1(confusion_matrix=val_conf_mat) _LOGGER.info("%s, %s", str(val_acc), str(val_f1)) test_conf_mat = evaluate(model=best_model, data=self.loaders_dict[TEST], device=self.device) test_acc = metrics.accuracy(confusion_matrix=test_conf_mat) test_f1 = metrics.f1(confusion_matrix=test_conf_mat) _LOGGER.info("%s, %s", str(test_acc), str(test_f1))
def batch_train_end(self, batch_num, data, model, batch_loss): # super(DetailedMeasurementTrainingHook, self).batch_train_end( # batch_num=batch_num, data=data, model=model, batch_loss=batch_loss) if batch_num % 1024 == 0 and batch_num > 0: val_conf_mat = evaluate(model=model, data=self.loaders_dict[VALID], device=self.device) val_acc = metrics.accuracy(confusion_matrix=val_conf_mat) val_f1 = metrics.f1(confusion_matrix=val_conf_mat) self.metrics_dict['valid_acc'].append(val_acc) self.metrics_dict['valid_f1'].append(val_f1) _LOGGER.info("eval metrics, batch: %s acc, f1", str(batch_num)) _LOGGER.info("%s, %s", str(val_acc), str(val_f1)) if batch_num == 4096: train_conf_mat = evaluate(model=model, data=self.loaders_dict[TRAIN], device=self.device) train_acc = metrics.accuracy(confusion_matrix=train_conf_mat) train_f1 = metrics.f1(confusion_matrix=train_conf_mat) self.metrics_dict['train_acc'].append(train_acc) self.metrics_dict['train_f1'].append(train_f1) _LOGGER.info("train metrics, batch: %s acc, f1 ", str(batch_num)) _LOGGER.info("%s, %s", str(train_acc), str(train_f1))
if __name__ == "__main__": model_name = input("Input model name: ") device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # load model model = models.get_resnet(num_outputs=1, pretrained=False) model.load_state_dict(torch.load(f=f"models/{model_name}.pth")) model.to(device) model.eval() # load data base_data = ds.PCamDatasets(data_transforms=ds.PCAM_DATA_TRANSFORM) test_set = base_data.test test_iter = DataLoader(dataset=test_set, batch_size=1024, shuffle=False) predictions, labels = train.predict_data( model=model, data=test_iter, device=device, return_labels=True) print(model_name) # Accuracy and F1 metric confusion_matrix = train.evaluate(model, test_iter, device) print("Accuracy:", hmetrics.accuracy(confusion_matrix=confusion_matrix)) print("F1:", hmetrics.f1(confusion_matrix=confusion_matrix)) hmetrics.plot_roc_curve( experiment_name=model_name, y_true=labels, y_score=predictions) print("AUC:", metrics.roc_auc_score(y_true=labels, y_score=predictions))
def train(model, loaders_dict, num_epochs, optimizer, criterion, device, hook=None): """Method trains given model. Parameters ---------- model : nn.Module PyTorch model that needs to be trained loaders_dict : dict(str, torch.utils.data.DataLoader) dictionary with training and validation dataloaders num_epochs : int total number of epochs optimizer : torch.optim.Optimizer model optimizer, None if validation phase criterion : loss pytorch loss function device : torch.device device on which to perform operations hook : TrainingHook used for adding functionalities to epoch run Returns ------- model : nn.Module trained model """ hook_flag = hook is not None if hook_flag: hook.training_start(model=model, loaders_dict=loaders_dict, criterion=criterion) best_model_wts = copy.deepcopy(model.state_dict()) best_f1 = 0.0 for epoch in range(0, num_epochs): if hook_flag: hook.epoch_start(epoch=epoch, num_epochs=num_epochs, loaders_dict=loaders_dict) train_loss = run_epoch(model=model, data=loaders_dict[TRAIN], optimizer=optimizer, criterion=criterion, phase=TRAIN, device=device, hook=hook) valid_loss = run_epoch(model=model, data=loaders_dict[VALID], optimizer=optimizer, criterion=criterion, phase=VALID, device=device, hook=hook) val_conf_mat = evaluate(model=model, data=loaders_dict[VALID], device=device) curr_f1 = metrics.f1(confusion_matrix=val_conf_mat) if curr_f1 > best_f1: best_f1 = curr_f1 best_model_wts = copy.deepcopy(model.state_dict()) if hook_flag: hook.epoch_end(epoch=epoch, num_epochs=num_epochs, loaders_dict=loaders_dict, model=model, train_loss=train_loss, valid_loss=valid_loss) # load best model weights model.load_state_dict(best_model_wts) if hook_flag: hook.training_end(best_model=model) return model