def get_ROC_mnist_ensemble(ensemble): _, _, _, fashionmnist_test_dataset = get_FashionMNIST() _, _, _, mnist_test_dataset = get_MNIST() dataloader, anomaly_targets = prepare_ood_datasets(fashionmnist_test_dataset, mnist_test_dataset) scores = [] accuracies = [] with torch.no_grad(): for data, target in dataloader: data = data.cuda() target = target.cuda() output_l=[] for i,model in enumerate(ensemble): model.eval() output = model(data) output_l.append(output) output_l = torch.stack(output_l) kernel_distance=output_l.exp().mean(0) pred=kernel_distance.argmax(1) kernel_distance=-(kernel_distance*torch.log(kernel_distance)).sum(1) accuracy = pred.eq(target) accuracies.append(accuracy.cpu().numpy()) scores.append(kernel_distance.cpu().numpy()) scores = np.concatenate(scores) accuracies = np.concatenate(accuracies) accuracy = np.mean(accuracies[: len(fashionmnist_test_dataset)]) roc = roc_curve(anomaly_targets, scores) return roc
def get_ROC_mnist(model): _, _, _, fashionmnist_test_dataset = get_FashionMNIST() _, _, _, mnist_test_dataset = get_MNIST() dataloader, anomaly_targets = prepare_ood_datasets( fashionmnist_test_dataset, mnist_test_dataset) scores, accuracies = loop_over_dataloader(model, dataloader) accuracy = np.mean(accuracies[:len(fashionmnist_test_dataset)]) roc = roc_curve(anomaly_targets, scores) return roc
def get_fashionmnist_notmnist_ood(model): _, _, _, fashionmnist_test_dataset = get_FashionMNIST() _, _, _, notmnist_test_dataset = get_notMNIST() return get_auroc_ood(fashionmnist_test_dataset, notmnist_test_dataset, model)
# Train trainer.run(dl_train, max_epochs=epochs) # Validation evaluator.run(dl_val) val_accuracy = evaluator.state.metrics["accuracy"] # Test evaluator.run(dl_test) test_accuracy = evaluator.state.metrics["accuracy"] return model, val_accuracy, test_accuracy if __name__ == "__main__": _, _, _, fashionmnist_test_dataset = get_FashionMNIST() l_gradient_penalties = [0.05] length_scales = [0.1] epochs = 30 repetition = 3 # Increase for multiple repetitions final_model = True # set true for final model to train on full train set input_dep_ls = False #input dependent length scale (sigma) use_grad_norm = False #gradient normalization results = {} for l_gradient_penalty in l_gradient_penalties: for length_scale in length_scales: val_accuracies = []