def test(nnName, dataName, epsilon, temperature): """ :param nnName: in-distribution :param dataName: out-of-distribution :param epsilon: noiseMagnitude :param temperature: scaling """ print("--start testing!--") net1 = cifar10vgg(train=False).model if nnName == "densenet10": (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() # testloaderIn=x_train[:10000] testloaderIn = x_train[:10000] if dataName == "Imagenet_crop": # (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar100.load_data() testloaderOut = load_images_from_folder("./Imagenet/test") if dataName == "CIFAR-100": (_, _), (testloaderOut, _) = tf.keras.datasets.cifar100.load_data() if dataName == "Gaussian": testloaderOut = np.random.standard_normal( size=testloaderIn.shape) + 0.5 if dataName == "Uniform": testloaderOut = np.random.uniform(0, 1, size=testloaderIn.shape) testloaderIn, testloaderOut = normalize(testloaderIn, testloaderOut) testloaderIn = (testloaderIn, y_train[:10000]) d.testData(net1, testloaderIn, testloaderOut, nnName, dataName, epsilon, temperature) m.metric(nnName, dataName, temperature, epsilon)
def test(nnName, dataName, CUDA_DEVICE, epsilon, temperature): model = DenseNetBC_50_12() model.load_state_dict(torch.load("../models/{}.pth".format(nnName))) # checkpoint = torch.load("../checkpoints_healthy/{}.pth.tar".format(nnName)) # model.load_state_dict(checkpoint['state_dict']) # optimizer.load_state_dict(checkpoint['optimizer']) optimizer1 = optim.SGD(model.parameters(), lr=0, momentum=0) for i, (name, module) in enumerate(model._modules.items()): module = recursion_change_bn(model) model.cuda(CUDA_DEVICE) transform_test = transforms.Compose( [transforms.Resize((512, 512)), transforms.ToTensor()]) testsetout = torchvision.datasets.ImageFolder( "/home/yoon/jyk416/odin-pytorch/data/{}".format(dataName), transform=transform_test) testloaderOut = torch.utils.data.DataLoader(testsetout, batch_size=1, shuffle=False, num_workers=2) # if dataName != "Uniform" and dataName != "Gaussian": # testsetout = torchvision.datasets.ImageFolder("../data/{}".format(dataName), transform=transform) # testloaderOut = torch.utils.data.DataLoader(testsetout, batch_size=1, # shuffle=False, num_workers=2) train_test_dir = '/home/yoon/jyk416/odin-pytorch/data/train3' if nnName == "model104": testset = torchvision.datasets.ImageFolder(train_test_dir, transform=transform_test) testloaderIn = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=True, num_workers=2) # if nnName == "densenet10" or nnName == "wideresnet10": # testset = torchvision.datasets.CIFAR10(root='../data', train=False, download=True, transform=transform) # testloaderIn = torch.utils.data.DataLoader(testset, batch_size=1, # shuffle=False, num_workers=2) # if nnName == "densenet100" or nnName == "wideresnet100": # testset = torchvision.datasets.CIFAR100(root='../data', train=False, download=True, transform=transform) # testloaderIn = torch.utils.data.DataLoader(testset, batch_size=1, # shuffle=False, num_workers=2) # if dataName == "Gaussian": # d.testGaussian(net1, criterion, CUDA_DEVICE, testloaderIn, testloaderIn, nnName, dataName, epsilon, temperature) # m.metric(nnName, dataName) # # elif dataName == "Uniform": # d.testUni(net1, criterion, CUDA_DEVICE, testloaderIn, testloaderIn, nnName, dataName, epsilon, temperature) # m.metric(nnName, dataName) # else: # d.testData(net1, criterion, CUDA_DEVICE, testloaderIn, testloaderOut, nnName, dataName, epsilon, temperature) # m.metric(nnName, dataName) d.testData(model, criterion, CUDA_DEVICE, testloaderIn, testloaderOut, nnName, epsilon, temperature) m.metric(nnName, dataName)
def test(nnName, dataName, CUDA_DEVICE, epsilon, temperature): net1 = torch.load("../models/{}.pth".format(nnName)) optimizer1 = optim.SGD(net1.parameters(), lr = 0, momentum = 0) net1.cuda(CUDA_DEVICE) if dataName != "Uniform" and dataName != "Gaussian": testsetout = torchvision.datasets.ImageFolder("../data/{}".format(dataName), transform=transform) testloaderOut = torch.utils.data.DataLoader(testsetout, batch_size=1, shuffle=False, num_workers=2) if nnName == "densenet10" or nnName == "wideresnet10": testset = torchvision.datasets.CIFAR10(root='../data', train=False, download=True, transform=transform) testloaderIn = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=False, num_workers=2) if nnName == "densenet100" or nnName == "wideresnet100": testset = torchvision.datasets.CIFAR100(root='../data', train=False, download=True, transform=transform) testloaderIn = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=False, num_workers=2) if dataName == "Gaussian": d.testGaussian(net1, criterion, CUDA_DEVICE, testloaderIn, testloaderIn, nnName, dataName, epsilon, temperature) m.metric(nnName, dataName) elif dataName == "Uniform": d.testUni(net1, criterion, CUDA_DEVICE, testloaderIn, testloaderIn, nnName, dataName, epsilon, temperature) m.metric(nnName, dataName) else: d.testData(net1, criterion, CUDA_DEVICE, testloaderIn, testloaderOut, nnName, dataName, epsilon, temperature) m.metric(nnName, dataName)
def detect_ood_odin(self, gpu, nnName="model", epsilon=0.0014, temperature=1000): net1 = self.model testloaderIn = torch.utils.data.DataLoader(self.in_distribution, batch_size=1, shuffle=False, num_workers=2) testloadersOut = [] for dataset in self.out_of_distribution: testloadersOut.append( torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=2)) net1.cuda(gpu) d.testData(net1, torch.nn.CrossEntropyLoss(), gpu, testloaderIn, testloadersOut, nnName, self.data_lables[0], epsilon, temperature) m.metric(nnName, self.data_lables[0])
def test(nnName, dataName, CUDA_DEVICE, epsilon, temperature): use_cuda = torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") net1 = vgg1.VGG('VGG16').to(device) # Test set: Average loss: 0.0015, Accuracy: 9337 / 10000(93 %) if use_cuda: net1 = torch.nn.DataParallel(net1) cudnn.benchmark = True checkpoint = torch.load("../../model/{}.pth".format(nnName)) net1.load_state_dict(checkpoint['net']) net1.eval() if dataName != "Uniform" and dataName != "Gaussian": testsetout = torchvision.datasets.ImageFolder( "../../data/{}".format(dataName), transform=transform) testloaderOut = torch.utils.data.DataLoader(testsetout, batch_size=1, shuffle=False, num_workers=2) # if nnName == "densenet10" or nnName == "wideresnet10": testset = torchvision.datasets.CIFAR10(root='../../data', train=False, download=True, transform=transform) testloaderIn = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=False, num_workers=2) # if nnName == "densenet100" or nnName == "wideresnet100": # testset = torchvision.datasets.CIFAR100(root='../../data', train=False, download=True, transform=transform) # testloaderIn = torch.utils.data.DataLoader(testset, batch_size=1, # shuffle=False, num_workers=2) if dataName == "Gaussian": d.testGaussian(net1, criterion, CUDA_DEVICE, testloaderIn, testloaderIn, nnName, dataName, epsilon, temperature) m.metric(nnName, dataName) elif dataName == "Uniform": d.testUni(net1, criterion, CUDA_DEVICE, testloaderIn, testloaderIn, nnName, dataName, epsilon, temperature) m.metric(nnName, dataName) else: d.testData(net1, criterion, CUDA_DEVICE, testloaderIn, testloaderOut, nnName, dataName, epsilon, temperature) m.metric(nnName, dataName)
def test(nnName, dataName, CUDA_DEVICE, epsilon, temperature): net1 = torch.load("../models/{}.pth".format(nnName)) optimizer1 = optim.SGD(net1.parameters(), lr = 0, momentum = 0) net1.cuda(CUDA_DEVICE) if dataName != "Uniform" and dataName != "Gaussian": if dataName == "SVHN": testsetout = svhn.SVHN("../data/SVHN", split='test', transform=transform, download=True) elif dataName in ["HFlip","VFlip"]: testsetout = torchvision.datasets.CIFAR10('../data', train=False, download=True, transform=Flip[dataName]) elif dataName == "CelebA": testsetout = torchvision.datasets.ImageFolder( "../data/{}".format(dataName), transform=transforms.Compose([transforms.CenterCrop(178), Resize(32), transform])) else: testsetout = torchvision.datasets.ImageFolder("../data/{}".format(dataName), transform=transform) testloaderOut = torch.utils.data.DataLoader(testsetout, batch_size=1, shuffle=False, num_workers=2) if nnName == "densenet10" or nnName == "wideresnet10": testset = torchvision.datasets.CIFAR10(root='../data', train=False, download=True, transform=transform) testloaderIn = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=False, num_workers=2) if nnName == "densenet100" or nnName == "wideresnet100": testset = torchvision.datasets.CIFAR100(root='../data', train=False, download=True, transform=transform) testloaderIn = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=False, num_workers=2) if dataName == "Gaussian": d.testGaussian(net1, criterion, CUDA_DEVICE, testloaderIn, testloaderIn, nnName, dataName, epsilon, temperature) m.metric(nnName, dataName) elif dataName == "Uniform": d.testUni(net1, criterion, CUDA_DEVICE, testloaderIn, testloaderIn, nnName, dataName, epsilon, temperature) m.metric(nnName, dataName) else: d.testData(net1, criterion, CUDA_DEVICE, testloaderIn, testloaderOut, nnName, dataName, epsilon, temperature) m.metric(nnName, dataName)
def odin(args): # args : (defaults) # 1) in_dataset : CIFAR10 # 2) out_dataset : CIFAR100 # 3) nn : Densenet_BC # 4) magnitude : 0. # 5) temperature : 1000 # 6) gpu : 0 # 7) tuning : True # 8) training : False criterion = nn.CrossEntropyLoss() in_dataset = args.in_dataset out_dataset = args.out_dataset target_dataset = args.target_dataset NNModels = args.nn magnitude = args.magnitude temperature = args.temperature CUDA_DEVICE = args.gpu TUNING = args.tuning Training = args.training ##### Pretrained model setting ##### model_name = in_dataset + '_' + NNModels adapted_model_name = args.train_mode + '_' + 'to' + '_' + args.target_dataset + '_' + 'final.ckpt' model = globals()[model_name]() # only model module is imported modelpath = './workspace/model_ckpts/' + model_name + '/' ##### Datamodule setting ##### if args.in_num != 1: in_dm = globals()[in_dataset + target_dataset + 'DataModule']( TUNING=TUNING, Training=Training, batch_size=1) out_dm = globals()[out_dataset + 'DataModule'](TUNING=TUNING, Training=Training, batch_size=1) else: # train_mode에 상관없이 in-dist 에 들어가는 dataset의 개수가 1개인 경우 in_dm = globals()[in_dataset + 'DataModule'](TUNING=TUNING, Training=Training, batch_size=1) out_dm = globals()[out_dataset + 'DataModule'](TUNING=TUNING, Training=Training, batch_size=1) os.makedirs(modelpath, exist_ok=True) checkpoint_callback = ModelCheckpoint(filepath=modelpath + adapted_model_name) trainer = Trainer(checkpoint_callback=checkpoint_callback, gpus=1, num_nodes=1, max_epochs=1) if os.path.isfile(modelpath + adapted_model_name): DANN_ = DANN(None, model, args.train_mode) model = DANN_.load_from_checkpoint(modelpath + adapted_model_name, model, args.train_mode) else: print('No pretrained model.', 'Execute train.py first', sep='\n') return 0 ##### Softmax Scores Path Setting ##### path = './workspace/softmax_scores/' os.makedirs(path, exist_ok=True) ##### distribution saver path Setting ##### result_path = './OOD_method/distribution_result/' + args.train_mode + '/' + args.in_dataset + str( args.in_num) + args.out_dataset + '/' os.makedirs(result_path, exist_ok=True) T_candidate = [1, 10, 100, 1000] e_candidate = [ 0, 0.0005, 0.001, 0.0014, 0.002, 0.0024, 0.005, 0.01, 0.05, 0.1, 0.2 ] if TUNING: temperature = T_candidate magnitude = e_candidate else: # Make float/int object iterable temperature = [args.temperature] magnitude = [args.magnitude] tnr_best = 0. T_temp = 1 ep_temp = 0 for T in temperature: for ep in magnitude: print('T : ', T) print('epsilon : ', ep) ##### Open files to save confidence score ##### f1 = open(path + "confidence_Base_In.txt", 'w') f2 = open(path + "confidence_Base_Out.txt", 'w') g1 = open(path + "confidence_Odin_In.txt", 'w') g2 = open(path + "confidence_Odin_Out.txt", 'w') if out_dataset == "Gaussian": calMetric.metric(path) elif out_dataset == "Uniform": calMetric.metric(path) else: # setting in-dist detector detector = ODIN(model, criterion, CUDA_DEVICE, ep, T, f1, g1) trainer.fit(detector, datamodule=in_dm) # setting out-dist detector detector = ODIN(model, criterion, CUDA_DEVICE, ep, T, f2, g2) trainer.fit(detector, datamodule=out_dm) # calculate metrics results = calMetric.metric(path) if tnr_best < results['Odin']['TNR']: tnr_best = results['Odin']['TNR'] results_best = results T_temp = T ep_temp = ep f1.close() f2.close() g1.close() g2.close() if TUNING: TUNING = False # Tuning Ended. run calMetric with rest 9,000 data with min(T,ep) print('\nBest Performance Out-of-Distribution Detection') print('T : ', T_temp) print('epsilon : ', ep_temp) f1 = open(path + "confidence_Base_In.txt", 'w') f2 = open(path + "confidence_Base_Out.txt", 'w') g1 = open(path + "confidence_Odin_In.txt", 'w') g2 = open(path + "confidence_Odin_Out.txt", 'w') in_dm = globals()[in_dataset + 'DataModule'](TUNING=TUNING, Training=Training, batch_size=1) out_dm = globals()[out_dataset + 'DataModule'](TUNING=TUNING, Training=Training, batch_size=1) detector = ODIN(model, criterion, CUDA_DEVICE, ep_temp, T_temp, f1, g1) trainer.fit(detector, datamodule=in_dm) detector = ODIN(model, criterion, CUDA_DEVICE, ep_temp, T_temp, f2, g2) trainer.fit(detector, datamodule=out_dm) results_best = calMetric.metric(path) f1.close() f2.close() g1.close() g2.close() save_histogram(path, result_path) print('\nBest Performance Out-of-Distribution Detection') print('T : ', T_temp) print('epsilon : ', ep_temp) print("{:31}{:>22}".format("Neural network architecture:", NNModels)) print("{:31}{:>22}".format("In-distribution dataset:", in_dataset)) print("{:31}{:>22}".format("Out-of-distribution dataset:", out_dataset)) print("") print("{:>34}{:>19}".format("Baseline", "Odin")) print("{:20}{:13.1f}%{:>18.1f}% ".format( "TNR at TPR 95%:", results_best['Base']['TNR'] * 100, results_best['Odin']['TNR'] * 100)) print("{:20}{:13.1f}%{:>18.1f}%".format( "Accuracy:", results_best['Base']['DTACC'] * 100, results_best['Odin']['DTACC'] * 100)) print("{:20}{:13.1f}%{:>18.1f}%".format( "AUROC:", results_best['Base']['AUROC'] * 100, results_best['Odin']['AUROC'] * 100)) print("{:20}{:13.1f}%{:>18.1f}%".format( "AUPR In:", results_best['Base']['AUIN'] * 100, results_best['Odin']['AUIN'] * 100)) print("{:20}{:13.1f}%{:>18.1f}%".format( "AUPR Out:", results_best['Base']['AUOUT'] * 100, results_best['Odin']['AUOUT'] * 100))