def test(self, data, adversarial, test_noise=args.test_noise):
        """
        Computes Mahalanobis scores for test set in_data, out_data
        Args:
        - data: name of out dataset
        - adversarial: boolean flag for if this is an adversarial input
        - ood boolean flag for if this is not the in-dataset
        - test_noise: constant noise to add to test data to help separation
        """
        adversarial = data in ADVERSARIAL  # an adversarial out
        positive = data == self.in_data  # true label in

        if positive:
            test_loader = self.in_test_loader
        elif not adversarial:
            # if not adversarial, using torch.DataLoader, so just load once
            test_loader = data_loader.getNonTargetDataSet(
                data, self.batch_size, args.data_path)

        if args.verbose:
            print(">> Testing on dataset ", data)

        Mahalanobis_scores = np.array([])
        for i in range(self.num_layers):
            if args.verbose:
                print(">> Layer  ", i)

            # if adversarial, using a list, and for some reason, need to load this continuously
            if adversarial:
                test_loader = data_loader.getAdversarialDataSet(
                    data, args.model, self.in_data, self.batch_size)

            layer_scores = self._get_Mahalanobis_score(data, test_loader, i,
                                                       test_noise)
            layer_scores = np.expand_dims(layer_scores, axis=1)  #N, -> Nx1
            Mahalanobis_scores = np.hstack(
                (Mahalanobis_scores,
                 layer_scores)) if Mahalanobis_scores.size else layer_scores
            # print(Mahalanobis_scores.shape)

        # save results
        Mahalanobis_labels = np.ones(
            Mahalanobis_scores.shape[0]) if positive else np.zeros(
                Mahalanobis_scores.shape[0])
        Mahalanobis_labels = np.expand_dims(Mahalanobis_labels,
                                            axis=1)  #N, -> Nx1

        Mahalanobis_data = np.hstack((Mahalanobis_scores, Mahalanobis_labels))

        file_name = os.path.join(
            self.save_path,
            'Mahalanobis_%s_%s_%s.npy' % (str(test_noise), self.in_data, data))

        if args.verbose:
            print(">> Writing cumulative results to ", file_name)

        np.save(file_name, Mahalanobis_data)
def main():
    # set the path to pre-trained model and output
    pre_trained_net = './pre_trained/' + args.net_type + '_' + args.dataset + '.pth'
    args.outf = args.outf + args.net_type + '_' + args.dataset + '/'
    if os.path.isdir(args.outf) == False:
        os.mkdir(args.outf)
    torch.cuda.manual_seed(0)
    torch.cuda.set_device(args.gpu)
    # check the in-distribution dataset
    if args.dataset == 'cifar100':
        args.num_classes = 100
    if args.dataset == 'svhn':
        out_dist_list = ['cifar10', 'imagenet_resize', 'lsun_resize']
    else:
        out_dist_list = ['svhn', 'imagenet_resize', 'lsun_resize']

    # load networks
    if args.net_type == 'densenet':
        if args.dataset == 'svhn':
            model = models.DenseNet3(100, int(args.num_classes))
            model.load_state_dict(
                torch.load(pre_trained_net,
                           map_location="cuda:" + str(args.gpu)))
        else:
            model = torch.load(pre_trained_net,
                               map_location="cuda:" + str(args.gpu))
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((125.3 / 255, 123.0 / 255, 113.9 / 255),
                                 (63.0 / 255, 62.1 / 255.0, 66.7 / 255.0)),
        ])
    elif args.net_type == 'resnet':
        model = models.ResNet34(num_c=args.num_classes)
        model.load_state_dict(
            torch.load(pre_trained_net, map_location="cuda:" + str(args.gpu)))
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])
    model.cuda()
    print('load model: ' + args.net_type)

    # load dataset
    print('load target data: ', args.dataset)
    train_loader, test_loader = data_loader.getTargetDataSet(
        args.dataset, args.batch_size, in_transform, args.dataroot)

    # set information about feature extaction
    model.eval()
    temp_x = torch.rand(2, 3, 32, 32).cuda()
    temp_x = Variable(temp_x)
    temp_list = model.feature_list(temp_x)[1]
    num_output = len(temp_list)
    feature_list = np.empty(num_output)
    count = 0
    for out in temp_list:
        feature_list[count] = out.size(1)
        count += 1


################################ edits
    print("Calculate SVD before getting sample mean and variance")
    # svd_result= lib_generation.get_pca(model, args.num_classes, feature_list, train_loader)
    # lib_generation.get_pca_incremental(model, args.num_classes, feature_list, train_loader,args)
    svd_result = None
    print('get sample mean and covariance')
    sample_mean, precision = lib_generation.sample_estimator(
        model, args.num_classes, feature_list, train_loader, svd_result, args)
    ################################ edits_end_sample_generator
    print('get Mahalanobis scores')
    m_list = [0.0, 0.01, 0.005, 0.002, 0.0014, 0.001, 0.0005]
    for magnitude in m_list:
        print('Noise: ' + str(magnitude))
        for i in range(num_output):
            M_in = lib_generation.get_Mahalanobis_score(model, test_loader, args.num_classes, args.outf, \
                                                        True, args.net_type, sample_mean, precision, i, magnitude,svd_result,args)
            M_in = np.asarray(M_in, dtype=np.float32)
            if i == 0:
                Mahalanobis_in = M_in.reshape((M_in.shape[0], -1))
            else:
                Mahalanobis_in = np.concatenate(
                    (Mahalanobis_in, M_in.reshape((M_in.shape[0], -1))),
                    axis=1)

        for out_dist in out_dist_list:
            out_test_loader = data_loader.getNonTargetDataSet(
                out_dist, args.batch_size, in_transform, args.dataroot)
            print('Out-distribution: ' + out_dist)
            for i in range(num_output):
                M_out = lib_generation.get_Mahalanobis_score(model, out_test_loader, args.num_classes, args.outf, \
                                                             False, args.net_type, sample_mean, precision, i, magnitude,svd_result,args)
                M_out = np.asarray(M_out, dtype=np.float32)
                if i == 0:
                    Mahalanobis_out = M_out.reshape((M_out.shape[0], -1))
                else:
                    Mahalanobis_out = np.concatenate(
                        (Mahalanobis_out, M_out.reshape((M_out.shape[0], -1))),
                        axis=1)

            Mahalanobis_in = np.asarray(Mahalanobis_in, dtype=np.float32)
            Mahalanobis_out = np.asarray(Mahalanobis_out, dtype=np.float32)
            Mahalanobis_data, Mahalanobis_labels = lib_generation.merge_and_generate_labels(
                Mahalanobis_out, Mahalanobis_in)
            file_name = os.path.join(
                args.outf, 'Mahalanobis_%s_%s_%s.npy' %
                (str(magnitude), args.dataset, out_dist))
            Mahalanobis_data = np.concatenate(
                (Mahalanobis_data, Mahalanobis_labels), axis=1)
            np.save(file_name, Mahalanobis_data)
def main():
    # set the path to pre-trained model and output
    pre_trained_net = './pre_trained/' + args.net_type + '_' + args.dataset + '.pth'
    args.outf = args.outf + args.net_type + '_' + args.dataset + '/'
    if os.path.isdir(args.outf) == False:
        os.mkdir(args.outf)
    torch.cuda.manual_seed(0)
    torch.cuda.set_device(args.gpu)
    
    # check the in-distribution dataset
    if args.dataset == 'cifar100':
        args.num_classes = 100
    if args.dataset == 'svhn':
        out_dist_list = ['cifar10', 'imagenet_resize',] #'lsun_resize']
    else:
        out_dist_list = ['svhn', 'imagenet_resize',] #'lsun_resize']
        
    # load networks
    if args.net_type == 'densenet':
        if args.dataset == 'svhn':
            model = models.DenseNet3(100, int(args.num_classes))
            model.load_state_dict(torch.load(pre_trained_net, map_location = "cuda:" + str(args.gpu)))
        else:
            model = torch.load(pre_trained_net, map_location = "cuda:" + str(args.gpu))
        in_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5071, 0.4867, 0.4408], [0.2675, 0.2565, 0.2761]),])
    elif args.net_type == 'resnet':
        model = models.ResNet34(num_c=args.num_classes)
        model.load_state_dict(torch.load(pre_trained_net, map_location = "cuda:" + str(args.gpu)))
        in_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
    model.cuda()
    print('load model: ' + args.net_type)
    
    # load dataset
    print('load target data: ', args.dataset)
    train_loader, test_loader = data_loader.getTargetDataSet(args.dataset, args.batch_size, in_transform, args.dataroot)

    # measure the performance
    M_list = [0, 0.0005, 0.001, 0.0014, 0.002, 0.0024, 0.005, 0.01, 0.05, 0.1, 0.2]
    T_list = [1, 10, 100, 1000]
    base_line_list = []
    ODIN_best_tnr = [0, 0, 0]
    ODIN_best_results = [0 , 0, 0]
    ODIN_best_temperature = [-1, -1, -1]
    ODIN_best_magnitude = [-1, -1, -1]
    for T in T_list:
        for m in M_list:
            magnitude = m
            temperature = T
            lib_generation.get_posterior(model, args.net_type, test_loader, magnitude, temperature, args.outf, True)
            out_count = 0
            print('Temperature: ' + str(temperature) + ' / noise: ' + str(magnitude)) 
            for out_dist in out_dist_list:
                out_test_loader = data_loader.getNonTargetDataSet(out_dist, args.batch_size, in_transform, args.dataroot)
                print('Out-distribution: ' + out_dist) 
                lib_generation.get_posterior(model, args.net_type, out_test_loader, magnitude, temperature, args.outf, False)
                if temperature == 1 and magnitude == 0:
                    test_results = callog.metric(args.outf, ['PoT'])
                    base_line_list.append(test_results)
                else:
                    val_results = callog.metric(args.outf, ['PoV'])
                    if ODIN_best_tnr[out_count] < val_results['PoV']['TNR']:
                        ODIN_best_tnr[out_count] = val_results['PoV']['TNR']
                        ODIN_best_results[out_count] = callog.metric(args.outf, ['PoT'])
                        ODIN_best_temperature[out_count] = temperature
                        ODIN_best_magnitude[out_count] = magnitude
                out_count += 1
    
    # print the results
    mtypes = ['TNR', 'AUROC', 'DTACC', 'AUIN', 'AUOUT']
    print('Baseline method: in_distribution: ' + args.dataset + '==========')
    count_out = 0
    for results in base_line_list:
        print('out_distribution: '+ out_dist_list[count_out])
        for mtype in mtypes:
            print(' {mtype:6s}'.format(mtype=mtype), end='')
        print('\n{val:6.2f}'.format(val=100.*results['PoT']['TNR']), end='')
        print(' {val:6.2f}'.format(val=100.*results['PoT']['AUROC']), end='')
        print(' {val:6.2f}'.format(val=100.*results['PoT']['DTACC']), end='')
        print(' {val:6.2f}'.format(val=100.*results['PoT']['AUIN']), end='')
        print(' {val:6.2f}\n'.format(val=100.*results['PoT']['AUOUT']), end='')
        print('')
        count_out += 1
        
    print('ODIN method: in_distribution: ' + args.dataset + '==========')
    count_out = 0
    for results in ODIN_best_results:
        print('out_distribution: '+ out_dist_list[count_out])
        for mtype in mtypes:
            print(' {mtype:6s}'.format(mtype=mtype), end='')
        print('\n{val:6.2f}'.format(val=100.*results['PoT']['TNR']), end='')
        print(' {val:6.2f}'.format(val=100.*results['PoT']['AUROC']), end='')
        print(' {val:6.2f}'.format(val=100.*results['PoT']['DTACC']), end='')
        print(' {val:6.2f}'.format(val=100.*results['PoT']['AUIN']), end='')
        print(' {val:6.2f}\n'.format(val=100.*results['PoT']['AUOUT']), end='')
        print('temperature: ' + str(ODIN_best_temperature[count_out]))
        print('magnitude: '+ str(ODIN_best_magnitude[count_out]))
        print('')
        count_out += 1
Beispiel #4
0
def main():
    # set the path to pre-trained model and output
    pre_trained_net = "./pre_trained/" + args.net_type + "_" + args.dataset + ".pth"
    args.outf = args.outf + args.net_type + "_" + args.dataset + "/"
    if os.path.isdir(args.outf) == False:
        os.mkdir(args.outf)
    torch.cuda.manual_seed(0)
    torch.cuda.set_device(args.gpu)
    # check the in-distribution dataset
    if args.dataset == "cifar100":
        args.num_classes = 100
    if args.dataset == "svhn":
        out_dist_list = ["cifar10", "imagenet_resize", "lsun_resize"]
    else:
        out_dist_list = ["svhn", "imagenet_resize", "lsun_resize"]

    # load networks
    if args.net_type == "densenet":
        if args.dataset == "svhn":
            model = models.DenseNet3(100, int(args.num_classes))
            model.load_state_dict(
                torch.load(pre_trained_net,
                           map_location="cuda:" + str(args.gpu)))
        else:
            model = torch.load(pre_trained_net,
                               map_location="cuda:" + str(args.gpu))
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(
                (125.3 / 255, 123.0 / 255, 113.9 / 255),
                (63.0 / 255, 62.1 / 255.0, 66.7 / 255.0),
            ),
        ])
    elif args.net_type == "resnet":
        model = models.ResNet34(num_c=args.num_classes)
        model.load_state_dict(
            torch.load(pre_trained_net, map_location="cuda:" + str(args.gpu)))
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])
    model.cuda()
    print("load model: " + args.net_type)

    # load dataset
    print("load target data: ", args.dataset)
    train_loader, test_loader = data_loader.getTargetDataSet(
        args.dataset, args.batch_size, in_transform, args.dataroot)

    # set information about feature extaction
    model.eval()
    temp_x = torch.rand(2, 3, 32, 32).cuda()
    temp_x = Variable(temp_x)
    temp_list = model.feature_list(temp_x)[1]
    num_output = len(temp_list)
    feature_list = np.empty(num_output)
    count = 0
    for out in temp_list:
        feature_list[count] = out.size(1)
        count += 1

    print("get sample mean and covariance")
    sample_mean, precision = lib_generation.sample_estimator(
        model, args.num_classes, feature_list, train_loader)

    print("get Mahalanobis scores")
    m_list = [0.0, 0.01, 0.005, 0.002, 0.0014, 0.001, 0.0005]
    for magnitude in m_list:
        print("Noise: " + str(magnitude))
        for i in range(num_output):
            M_in = lib_generation.get_Mahalanobis_score(
                model,
                test_loader,
                args.num_classes,
                args.outf,
                True,
                args.net_type,
                sample_mean,
                precision,
                i,
                magnitude,
            )
            M_in = np.asarray(M_in, dtype=np.float32)
            if i == 0:
                Mahalanobis_in = M_in.reshape((M_in.shape[0], -1))
            else:
                Mahalanobis_in = np.concatenate(
                    (Mahalanobis_in, M_in.reshape((M_in.shape[0], -1))),
                    axis=1)

        for out_dist in out_dist_list:
            out_test_loader = data_loader.getNonTargetDataSet(
                out_dist, args.batch_size, in_transform, args.dataroot)
            print("Out-distribution: " + out_dist)
            for i in range(num_output):
                M_out = lib_generation.get_Mahalanobis_score(
                    model,
                    out_test_loader,
                    args.num_classes,
                    args.outf,
                    False,
                    args.net_type,
                    sample_mean,
                    precision,
                    i,
                    magnitude,
                )
                M_out = np.asarray(M_out, dtype=np.float32)
                if i == 0:
                    Mahalanobis_out = M_out.reshape((M_out.shape[0], -1))
                else:
                    Mahalanobis_out = np.concatenate(
                        (Mahalanobis_out, M_out.reshape((M_out.shape[0], -1))),
                        axis=1)

            Mahalanobis_in = np.asarray(Mahalanobis_in, dtype=np.float32)
            Mahalanobis_out = np.asarray(Mahalanobis_out, dtype=np.float32)
            (
                Mahalanobis_data,
                Mahalanobis_labels,
            ) = lib_generation.merge_and_generate_labels(
                Mahalanobis_out, Mahalanobis_in)
            file_name = os.path.join(
                args.outf,
                "Mahalanobis_%s_%s_%s.npy" %
                (str(magnitude), args.dataset, out_dist),
            )
            Mahalanobis_data = np.concatenate(
                (Mahalanobis_data, Mahalanobis_labels), axis=1)
            np.save(file_name, Mahalanobis_data)
Beispiel #5
0
def main():

    if os.path.isdir('feature_lists') == False:
        os.mkdir('feature_lists')

    if args.dataset == 'cifar100':
        args.num_classes = 100
    else:
        args.num_classes = 10

    # load networks
    pre_trained_net = args.net_type + '_' + args.dataset + '.pth'
    pre_trained_net = os.path.join('pre_trained', pre_trained_net)
    if args.net_type == 'densenet':
        if args.dataset == 'svhn':
            model = models.DenseNet3(100, int(args.num_classes))
            model.load_state_dict(
                torch.load(pre_trained_net, map_location="cuda:" + str(0)))
        else:
            model = torch.load(pre_trained_net, map_location="cpu")
            for i, (name, module) in enumerate(model._modules.items()):
                module = recursion_change_bn(model)
            for m in model.modules():
                if 'Conv' in str(type(m)):
                    setattr(m, 'padding_mode', 'zeros')
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((125.3 / 255, 123.0 / 255, 113.9 / 255),
                                 (63.0 / 255, 62.1 / 255.0, 66.7 / 255.0)),
        ])
    elif args.net_type == 'resnet':

        model = models.ResNet34(num_c=args.num_classes)
        model.load_state_dict(
            torch.load(pre_trained_net, map_location="cuda:" + str(0)))
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])

    if args.validation_src == 'FGSM':
        if args.dataset == 'svhn':
            out_dist_list = [
                'cifar10', 'imagenet_resize', 'lsun_resize', 'FGSM'
            ]
        else:
            out_dist_list = ['svhn', 'imagenet_resize', 'lsun_resize', 'FGSM']

    else:
        if args.dataset == 'svhn':
            out_dist_list = ['cifar10', 'imagenet_resize', 'lsun_resize']
        else:
            out_dist_list = ['svhn', 'imagenet_resize', 'lsun_resize']

    print('load model: ' + args.net_type)
    model.to(device)
    model.eval()

    # load dataset
    print('load target data: ', args.dataset)
    train_loader, test_loader = data_loader.getTargetDataSet(
        args.dataset, args.batch_size, in_transform, args.dataroot)

    # set information about feature extraction
    temp_x = torch.rand(2, 3, 32, 32).cuda()
    temp_x = Variable(temp_x)
    temp_list = model.feature_list(temp_x)[1]
    num_output = len(temp_list)
    feature_list = np.empty(num_output)
    count = 0
    for out in temp_list:
        feature_list[count] = out.size(1)
        count += 1

    correct, total = 0, 0
    num_output = len(feature_list)
    num_sample_per_class = np.empty(args.num_classes)
    num_sample_per_class.fill(0)
    list_features = []
    list_features_test = []
    list_features_out = []
    for i in range(num_output):
        temp_list = []
        list_features_test.append(0)
        list_features_out.append(0)
        for j in range(args.num_classes):
            temp_list.append(0)
        list_features.append(temp_list)

    for data, target in train_loader:
        total += data.size(0)
        data = data.cuda()
        data = Variable(data, volatile=True)
        output, out_features = model.feature_list(data)

        # get hidden features
        for i in range(num_output):
            out_features[i] = out_features[i].view(out_features[i].size(0),
                                                   out_features[i].size(1), -1)
            out_features[i] = torch.mean(out_features[i].data, 2)

        # compute the accuracy
        pred = output.data.max(1)[1]
        equal_flag = pred.eq(target.cuda()).cpu()
        correct += equal_flag.sum()

        # construct the sample matrix
        for i in range(data.size(0)):
            label = target[i]
            if num_sample_per_class[label] == 0:
                out_count = 0
                for out in out_features:
                    list_features[out_count][label] = out[i].view(1, -1)
                    out_count += 1
            else:
                out_count = 0
                for out in out_features:
                    list_features[out_count][label] = torch.cat(
                        (list_features[out_count][label], out[i].view(1, -1)),
                        0)
                    out_count += 1
            num_sample_per_class[label] += 1

    sample_class_mean = []
    out_count = 0
    for num_feature in feature_list:
        temp_list = torch.Tensor(args.num_classes, int(num_feature)).cuda()
        for j in range(args.num_classes):
            temp_list[j] = torch.mean(list_features[out_count][j], 0)
        sample_class_mean.append(temp_list)
        out_count += 1

    A = []
    A_inv = []
    log_abs_det_A_inv = []
    for k in range(num_output):
        X = 0
        for i in range(args.num_classes):
            if i == 0:
                X = list_features[k][i] - sample_class_mean[k][i]
            else:
                X = torch.cat(
                    (X, list_features[k][i] - sample_class_mean[k][i]), 0)

        # find inverse
        u, s, vh = np.linalg.svd((X.cpu().numpy()) / np.sqrt(X.shape[0]),
                                 full_matrices=False)
        covariance_real = np.cov(X.cpu().numpy().T)
        valid_indx = s > 1e-5
        if (valid_indx.sum() % 2 > 0):
            valid_indx[valid_indx.sum() - 1] = False
        covriance_cal = np.matmul(
            np.matmul(vh[valid_indx, :].transpose(),
                      np.diag(s[valid_indx]**2)), vh[valid_indx, :])
        A_temp = np.matmul(vh[valid_indx, :].transpose(),
                           np.diag(s[valid_indx]))
        A.append(A_temp)
        covriance_cal2 = np.matmul(A_temp, A_temp.transpose())
        s_inv = 1 / s[valid_indx]
        A_inv_temp = np.matmul(np.diag(s_inv), vh[valid_indx, :])
        A_inv.append(A_inv_temp)
        log_abs_det_A_inv_temp = np.sum(np.log(np.abs(s_inv)))
        log_abs_det_A_inv.append(log_abs_det_A_inv_temp)

    print('\n Training Accuracy:({:.2f}%)\n'.format(100.0 * int(correct) /
                                                    int(total)))

    num_sample_per_output = np.empty(num_output)
    num_sample_per_output.fill(0)
    for data, target in test_loader:

        data = data.cuda()
        data = Variable(data, volatile=True)
        output, out_features = model.feature_list(data)

        # get hidden features
        for i in range(num_output):
            out_features[i] = out_features[i].view(out_features[i].size(0),
                                                   out_features[i].size(1), -1)
            out_features[i] = torch.mean(out_features[i].data, 2)

            if num_sample_per_output[i] == 0:
                list_features_test[i] = out_features[i]
            else:
                list_features_test[i] = torch.cat(
                    (list_features_test[i], out_features[i]), 0)
            num_sample_per_output[i] += 1

    for out_dist in out_dist_list:

        if out_dist == 'FGSM':
            test_loader, out_test_loader = data_loader.getFGSM(
                args.batch_size, args.dataset, args.net_type)
            num_sample_per_output.fill(0)

            for data in test_loader:

                data = data.cuda()
                data = Variable(data, volatile=True)
                output, out_features = model.feature_list(data)

                # get hidden features
                for i in range(num_output):
                    out_features[i] = out_features[i].view(
                        out_features[i].size(0), out_features[i].size(1), -1)
                    out_features[i] = torch.mean(out_features[i].data, 2)

                    if num_sample_per_output[i] == 0:
                        list_features_test[i] = out_features[i]
                    else:
                        list_features_test[i] = torch.cat(
                            (list_features_test[i], out_features[i]), 0)
                    num_sample_per_output[i] += 1

            num_sample_per_output = np.empty(num_output)
            num_sample_per_output.fill(0)

            for data in out_test_loader:
                data = data.cuda()
                data = Variable(data, requires_grad=True)
                output, out_features = model.feature_list(data)

                # get hidden features
                for i in range(num_output):
                    out_features[i] = out_features[i].view(
                        out_features[i].size(0), out_features[i].size(1), -1)
                    out_features[i] = torch.mean(out_features[i].data, 2)

                    if num_sample_per_output[i] == 0:
                        list_features_out[i] = out_features[i]
                    else:
                        list_features_out[i] = torch.cat(
                            (list_features_out[i], out_features[i]), 0)
                    num_sample_per_output[i] += 1

            for i in range(num_output):
                sample_class_mean[i] = sample_class_mean[i].cpu()
                list_features_test[i] = list_features_test[i].cpu()
                list_features_out[i] = list_features_out[i].cpu()
                for j in range(args.num_classes):
                    list_features[i][j] = list_features[i][j].cpu()

        else:
            out_test_loader = data_loader.getNonTargetDataSet(
                out_dist, args.batch_size, in_transform, args.dataroot)
            num_sample_per_output.fill(0)

            for data, target in out_test_loader:

                data, target = data.cuda(), target.cuda()
                data, target = Variable(data,
                                        requires_grad=True), Variable(target)
                output, out_features = model.feature_list(data)

                # get hidden features
                for i in range(num_output):
                    out_features[i] = out_features[i].view(
                        out_features[i].size(0), out_features[i].size(1), -1)
                    out_features[i] = torch.mean(out_features[i].data, 2)

                    if num_sample_per_output[i] == 0:
                        list_features_out[i] = out_features[i]
                    else:
                        list_features_out[i] = torch.cat(
                            (list_features_out[i], out_features[i]), 0)
                    num_sample_per_output[i] += 1

            for i in range(num_output):
                sample_class_mean[i] = sample_class_mean[i].cpu()
                list_features_test[i] = list_features_test[i].cpu()
                list_features_out[i] = list_features_out[i].cpu()
                for j in range(args.num_classes):
                    list_features[i][j] = list_features[i][j].cpu()

        file_name = os.path.join(
            'feature_lists', 'feature_lists_{}_{}_{}_Wlinear.pickle'.format(
                args.net_type, out_dist, args.dataset))
        with open(file_name, 'wb') as f:
            pickle.dump([
                sample_class_mean, list_features, list_features_test,
                list_features_out, A, A_inv, log_abs_det_A_inv
            ], f)
Beispiel #6
0
    torch.cuda.manual_seed(args.seed)

kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

print('Load model')
model = models.vgg13()
model.load_state_dict(torch.load(args.pre_trained_net))
print(model)

print('load target data: ', args.dataset)
_, test_loader = data_loader.getTargetDataSet(args.dataset, args.batch_size,
                                              args.imageSize, args.dataroot)

print('load non target data: ', args.out_dataset)
nt_test_loader = data_loader.getNonTargetDataSet(args.out_dataset,
                                                 args.batch_size,
                                                 args.imageSize, args.dataroot)

if args.cuda:
    model.cuda()


def generate_target():
    model.eval()
    correct = 0
    total = 0
    f1 = open('%s/confidence_Base_In.txt' % args.outf, 'w')

    for data, target in test_loader:
        total += data.size(0)
        #vutils.save_image(data, '%s/target_samples.png'%args.outf, normalize=True)
Beispiel #7
0
def main():
    # set the path to pre-trained model and output
    args.outf = args.outf + args.net_type + '_' + args.dataset + '/'
    if os.path.isdir(args.outf) == False:
        os.mkdir(args.outf)
    torch.cuda.manual_seed(0)
    torch.cuda.set_device(args.gpu)

    out_dist_list = [
        'skin_cli', 'skin_derm', 'corrupted', 'corrupted_70', 'imgnet', 'nct'
    ]

    # load networks
    if args.net_type == 'densenet_121':
        model = densenet_121.Net(models.densenet121(pretrained=False), 8)
        ckpt = torch.load("../checkpoints/densenet-121/checkpoint.pth")
        model.load_state_dict(ckpt['model_state_dict'])
        model.eval()
        model.cuda()
    elif args.net_type == 'mobilenet':
        model = mobilenet.Net(models.mobilenet_v2(pretrained=False), 8)
        ckpt = torch.load("../checkpoints/mobilenet/checkpoint.pth")
        model.load_state_dict(ckpt['model_state_dict'])
        model.eval()
        model.cuda()
        print("Done!")
    elif args.net_type == 'resnet_50':
        model = resnet_50.Net(models.resnet50(pretrained=False), 8)
        ckpt = torch.load("../checkpoints/resnet-50/checkpoint.pth")
        model.load_state_dict(ckpt['model_state_dict'])
        model.eval()
        model.cuda()
        print("Done!")
    elif args.net_type == 'vgg_16':
        model = vgg_16.Net(models.vgg16_bn(pretrained=False), 8)
        ckpt = torch.load("../checkpoints/vgg-16/checkpoint.pth")
        model.load_state_dict(ckpt['model_state_dict'])
        model.eval()
        model.cuda()
        print("Done!")
    else:
        raise Exception(f"There is no net_type={args.net_type} available.")

    in_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    print('load model: ' + args.net_type)

    # load dataset
    print('load target data: ', args.dataset)
    train_loader, test_loader = data_loader.getTargetDataSet(
        args.dataset, args.batch_size, in_transform, args.dataroot)

    # measure the performance
    M_list = [
        0, 0.0005, 0.001, 0.0014, 0.002, 0.0024, 0.005, 0.01, 0.05, 0.1, 0.2
    ]
    T_list = [1, 10, 100, 1000]
    base_line_list = []
    ODIN_best_tnr = [0, 0, 0] * 2
    ODIN_best_results = [0, 0, 0] * 2
    ODIN_best_temperature = [-1, -1, -1] * 2
    ODIN_best_magnitude = [-1, -1, -1] * 2
    for T in T_list:
        for m in M_list:
            magnitude = m
            temperature = T
            lib_generation.get_posterior(model, args.net_type, test_loader,
                                         magnitude, temperature, args.outf,
                                         True)
            out_count = 0
            print('Temperature: ' + str(temperature) + ' / noise: ' +
                  str(magnitude))
            for out_dist in out_dist_list:
                out_test_loader = data_loader.getNonTargetDataSet(
                    out_dist, args.batch_size, in_transform, args.dataroot)
                print('Out-distribution: ' + out_dist)
                lib_generation.get_posterior(model, args.net_type,
                                             out_test_loader, magnitude,
                                             temperature, args.outf, False)
                if temperature == 1 and magnitude == 0:
                    test_results = callog.metric(args.outf, ['PoT'])
                    base_line_list.append(test_results)
                else:
                    val_results = callog.metric(args.outf, ['PoV'])
                    if ODIN_best_tnr[out_count] < val_results['PoV']['TNR']:
                        ODIN_best_tnr[out_count] = val_results['PoV']['TNR']
                        ODIN_best_results[out_count] = callog.metric(
                            args.outf, ['PoT'])
                        ODIN_best_temperature[out_count] = temperature
                        ODIN_best_magnitude[out_count] = magnitude
                out_count += 1

    # print the results
    mtypes = ['TNR', 'AUROC', 'DTACC', 'AUIN', 'AUOUT']
    print('Baseline method: in_distribution: ' + args.dataset + '==========')
    count_out = 0
    for results in base_line_list:
        print('out_distribution: ' + out_dist_list[count_out])
        for mtype in mtypes:
            print(' {mtype:6s}'.format(mtype=mtype), end='')
        print('\n{val:6.2f}'.format(val=100. * results['PoT']['TNR']), end='')
        print(' {val:6.2f}'.format(val=100. * results['PoT']['AUROC']), end='')
        print(' {val:6.2f}'.format(val=100. * results['PoT']['DTACC']), end='')
        print(' {val:6.2f}'.format(val=100. * results['PoT']['AUIN']), end='')
        print(' {val:6.2f}\n'.format(val=100. * results['PoT']['AUOUT']),
              end='')
        print('')
        count_out += 1

    print('ODIN method: in_distribution: ' + args.dataset + '==========')
    count_out = 0
    for results in ODIN_best_results:
        print('out_distribution: ' + out_dist_list[count_out])
        for mtype in mtypes:
            print(' {mtype:6s}'.format(mtype=mtype), end='')
        print('\n{val:6.2f}'.format(val=100. * results['PoT']['TNR']), end='')
        print(' {val:6.2f}'.format(val=100. * results['PoT']['AUROC']), end='')
        print(' {val:6.2f}'.format(val=100. * results['PoT']['DTACC']), end='')
        print(' {val:6.2f}'.format(val=100. * results['PoT']['AUIN']), end='')
        print(' {val:6.2f}\n'.format(val=100. * results['PoT']['AUOUT']),
              end='')
        print('temperature: ' + str(ODIN_best_temperature[count_out]))
        print('magnitude: ' + str(ODIN_best_magnitude[count_out]))
        print('')
        count_out += 1
def main():
    # set the path to pre-trained model and output
    pre_trained_net = "./pre_trained/" + args.net_type + "_" + args.dataset + ".pth"
    args.outf = args.outf + args.net_type + "_" + args.dataset + "/"
    if os.path.isdir(args.outf) == False:
        os.mkdir(args.outf)
    torch.cuda.manual_seed(0)
    torch.cuda.set_device(args.gpu)

    # check the in-distribution dataset
    if args.dataset == "cifar100":
        args.num_classes = 100
    if args.dataset == "svhn":
        out_dist_list = ["cifar10", "imagenet_resize", "lsun_resize"]
    else:
        out_dist_list = ["svhn", "imagenet_resize", "lsun_resize"]

    # load networks
    if args.net_type == "densenet":
        if args.dataset == "svhn":
            model = models.DenseNet3(100, int(args.num_classes))
            model.load_state_dict(
                torch.load(pre_trained_net, map_location="cuda:" + str(args.gpu))
            )
        else:
            model = torch.load(pre_trained_net, map_location="cuda:" + str(args.gpu))
        in_transform = transforms.Compose(
            [
                transforms.ToTensor(),
                transforms.Normalize(
                    (125.3 / 255, 123.0 / 255, 113.9 / 255),
                    (63.0 / 255, 62.1 / 255.0, 66.7 / 255.0),
                ),
            ]
        )
    elif args.net_type == "resnet":
        model = models.ResNet34(num_c=args.num_classes)
        model.load_state_dict(
            torch.load(pre_trained_net, map_location="cuda:" + str(args.gpu))
        )
        in_transform = transforms.Compose(
            [
                transforms.ToTensor(),
                transforms.Normalize(
                    (0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)
                ),
            ]
        )
    model.cuda()
    print("load model: " + args.net_type)

    # load dataset
    print("load target data: ", args.dataset)
    train_loader, test_loader = data_loader.getTargetDataSet(
        args.dataset, args.batch_size, in_transform, args.dataroot
    )

    # measure the performance
    M_list = [0, 0.0005, 0.001, 0.0014, 0.002, 0.0024, 0.005, 0.01, 0.05, 0.1, 0.2]
    T_list = [1, 10, 100, 1000]
    base_line_list = []
    ODIN_best_tnr = [0, 0, 0]
    ODIN_best_results = [0, 0, 0]
    ODIN_best_temperature = [-1, -1, -1]
    ODIN_best_magnitude = [-1, -1, -1]
    for T in T_list:
        for m in M_list:
            magnitude = m
            temperature = T
            lib_generation.get_posterior(
                model,
                args.net_type,
                test_loader,
                magnitude,
                temperature,
                args.outf,
                True,
            )
            out_count = 0
            print("Temperature: " + str(temperature) + " / noise: " + str(magnitude))
            for out_dist in out_dist_list:
                out_test_loader = data_loader.getNonTargetDataSet(
                    out_dist, args.batch_size, in_transform, args.dataroot
                )
                print("Out-distribution: " + out_dist)
                lib_generation.get_posterior(
                    model,
                    args.net_type,
                    out_test_loader,
                    magnitude,
                    temperature,
                    args.outf,
                    False,
                )
                if temperature == 1 and magnitude == 0:
                    test_results = callog.metric(args.outf, ["PoT"])
                    base_line_list.append(test_results)
                else:
                    val_results = callog.metric(args.outf, ["PoV"])
                    if ODIN_best_tnr[out_count] < val_results["PoV"]["TNR"]:
                        ODIN_best_tnr[out_count] = val_results["PoV"]["TNR"]
                        ODIN_best_results[out_count] = callog.metric(args.outf, ["PoT"])
                        ODIN_best_temperature[out_count] = temperature
                        ODIN_best_magnitude[out_count] = magnitude
                out_count += 1

    # print the results
    mtypes = ["TNR", "AUROC", "DTACC", "AUIN", "AUOUT"]
    print("Baseline method: in_distribution: " + args.dataset + "==========")
    count_out = 0
    for results in base_line_list:
        print("out_distribution: " + out_dist_list[count_out])
        for mtype in mtypes:
            print(" {mtype:6s}".format(mtype=mtype), end="")
        print("\n{val:6.2f}".format(val=100.0 * results["PoT"]["TNR"]), end="")
        print(" {val:6.2f}".format(val=100.0 * results["PoT"]["AUROC"]), end="")
        print(" {val:6.2f}".format(val=100.0 * results["PoT"]["DTACC"]), end="")
        print(" {val:6.2f}".format(val=100.0 * results["PoT"]["AUIN"]), end="")
        print(" {val:6.2f}\n".format(val=100.0 * results["PoT"]["AUOUT"]), end="")
        print("")
        count_out += 1

    print("ODIN method: in_distribution: " + args.dataset + "==========")
    count_out = 0
    for results in ODIN_best_results:
        print("out_distribution: " + out_dist_list[count_out])
        for mtype in mtypes:
            print(" {mtype:6s}".format(mtype=mtype), end="")
        print("\n{val:6.2f}".format(val=100.0 * results["PoT"]["TNR"]), end="")
        print(" {val:6.2f}".format(val=100.0 * results["PoT"]["AUROC"]), end="")
        print(" {val:6.2f}".format(val=100.0 * results["PoT"]["DTACC"]), end="")
        print(" {val:6.2f}".format(val=100.0 * results["PoT"]["AUIN"]), end="")
        print(" {val:6.2f}\n".format(val=100.0 * results["PoT"]["AUOUT"]), end="")
        print("temperature: " + str(ODIN_best_temperature[count_out]))
        print("magnitude: " + str(ODIN_best_magnitude[count_out]))
        print("")
        count_out += 1
Beispiel #9
0
def main():

    if args.validation_src == 'FGSM':
        if args.dataset == 'svhn':
            out_dist_list = [
                'cifar10', 'imagenet_resize', 'lsun_resize', 'FGSM'
            ]
        else:
            out_dist_list = ['svhn', 'imagenet_resize', 'lsun_resize', 'FGSM']
    else:
        if args.dataset == 'svhn':
            out_dist_list = ['cifar10', 'imagenet_resize', 'lsun_resize']
        else:
            out_dist_list = ['svhn', 'imagenet_resize', 'lsun_resize']

    outf_load = os.path.join(args.outf,
                             args.net_type + '_' + args.dataset + 'RealNVP')
    outf = os.path.join(
        args.outf, args.net_type + '_' + args.dataset + 'RealNVP_magnitude')
    if os.path.isdir(outf) == False:
        os.mkdir(outf)

    # torch.cuda.manual_seed(0)
    torch.cuda.set_device(args.cuda_index)

    if args.dataset == 'cifar100':
        args.num_classes = 100
    else:
        args.num_classes = 10

    with open(
            'feature_lists/feature_lists_{}_imagenet_resize_{}_Wlinear.pickle'.
            format(args.net_type, args.dataset), 'rb') as f:
        [
            sample_class_mean, list_features, list_features_test,
            list_features_out, A, A_inv, log_abs_det_A_inv
        ] = pickle.load(f)

    pre_trained_net = args.net_type + '_' + args.dataset + '.pth'
    pre_trained_net = os.path.join('pre_trained', pre_trained_net)
    if args.net_type == 'densenet':

        if args.dataset == 'svhn':
            model = models.DenseNet3(100, int(args.num_classes))
            model.load_state_dict(
                torch.load(pre_trained_net,
                           map_location="cuda:" + str(args.cuda_index)))
        else:
            model = torch.load(pre_trained_net, map_location="cpu")
            for i, (name, module) in enumerate(model._modules.items()):
                module = recursion_change_bn(model)
            for m in model.modules():
                if 'Conv' in str(type(m)):
                    setattr(m, 'padding_mode', 'zeros')
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((125.3 / 255, 123.0 / 255, 113.9 / 255),
                                 (63.0 / 255, 62.1 / 255.0, 66.7 / 255.0)),
        ])
    else:
        model = models.ResNet34(num_c=args.num_classes)
        model.load_state_dict(
            torch.load(pre_trained_net,
                       map_location="cuda:" + str(args.cuda_index)))
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])

    print('load model: ' + args.net_type)
    model.to(device)
    model.eval()

    # load dataset
    print('load in-data: ', args.dataset)

    num_layers = len(sample_class_mean)

    for layer in range(num_layers):

        num_features = A_inv[layer].shape[0]
        half_features = int(num_features / 2)
        zeros = np.zeros(half_features)
        ones = np.ones(half_features)
        right = np.concatenate((zeros, ones), axis=None)
        left = np.concatenate((ones, zeros), axis=None)

        masks = torch.from_numpy(
            np.array([
                right, left, right, left, right, left, right, left, right, left
            ]).astype(np.float32)).cuda()
        flow = []

        # We reduce the number of neurons in the hidden layers due to GPU memory limitations (11 GB in GTX 2080Ti) - comment out this line for larger GPU memory
        length_hidden = reture_length_hidden(layer)

        A_layer = torch.tensor(A[layer])
        A_inv_layer = torch.tensor(A_inv[layer])
        log_abs_det_A_inv_layer = torch.tensor(log_abs_det_A_inv[layer])

        for i in range(args.num_classes):
            MODEL_FLOW = os.path.join(
                outf_load,
                'model_{}_layer_{}_residual_flow_{}length_hidden'.format(
                    args.dataset, layer, length_hidden), 'flow_{}'.format(i))
            flow.append(
                RealNVP(masks, num_features, length_hidden, A_layer,
                        A_inv_layer, log_abs_det_A_inv_layer))
            flow[i].load_state_dict(torch.load(MODEL_FLOW,
                                               map_location="cuda:{}".format(
                                                   args.cuda_index)),
                                    strict=False)
            flow[i].to(device)
            flow[i].eval()

        sample_class_mean_layer = sample_class_mean[layer]

        m_list = [0.0, 0.01, 0.005, 0.002, 0.0014, 0.001, 0.0005]

        for magnitude in m_list:

            print('Noise: ' + str(magnitude))
            _, test_loader = data_loader.getTargetDataSet(
                args.dataset, args.batch_size, in_transform, args.dataroot)
            score_in = lib_generation.get_resflow_score(
                test_loader, model, layer, args.num_classes, args.net_type,
                sample_class_mean_layer, flow, magnitude)

            for out_dist in out_dist_list:
                print('load out-data: ', out_dist)

                if out_dist == 'FGSM':
                    test_loader, out_loader = data_loader.getFGSM(
                        args.batch_size, args.dataset, args.net_type)
                    score_in = lib_generation.get_resflow_score_FGSM(
                        test_loader, model, layer, args.num_classes,
                        args.net_type, sample_class_mean_layer, flow,
                        magnitude)
                    score_out = lib_generation.get_resflow_score_FGSM(
                        out_loader, model, layer, args.num_classes,
                        args.net_type, sample_class_mean_layer, flow,
                        magnitude)

                else:
                    out_loader = data_loader.getNonTargetDataSet(
                        out_dist, args.batch_size, in_transform, args.dataroot)
                    score_out = lib_generation.get_resflow_score(
                        out_loader, model, layer, args.num_classes,
                        args.net_type, sample_class_mean_layer, flow,
                        magnitude)

                pram = {
                    'out_dist': out_dist,
                    'Network_type': args.net_type,
                    'Layer': layer,
                    'Batch_size': args.batch_size,
                    'cuda_index': args.cuda_index,
                    'length_hidden': length_hidden,
                    'dropout': False,
                    'weight_decay': 0,
                    'init_zeros': True,
                    'num_flows': int(len(flow[0].t)),
                    'magnitude': magnitude,
                }

                with open(
                        os.path.join(
                            outf,
                            'Residual_flow_%s_%s_layer_%s_%smagnitude.txt' %
                            (args.dataset, out_dist, layer, magnitude)),
                        'w') as file:
                    file.write('date: %s\n' % (datetime.datetime.now()))
                    file.write(json.dumps(pram))

                score_in = np.asarray(score_in, dtype=np.float32)
                score_out = np.asarray(score_out, dtype=np.float32)
                score_data, score_labels = lib_generation.merge_and_generate_labels(
                    score_out, score_in)
                file_name = os.path.join(
                    outf, 'Residual_flow_%s_%s_layer_%s_%smagnitude' %
                    (args.dataset, out_dist, layer, magnitude))
                score_data = np.concatenate((score_data, score_labels), axis=1)
                np.savez(file_name, score_data, pram)
Beispiel #10
0
def main():
    # set the path to pre-trained model and output
    pre_trained_net = './pre_trained/' + args.net_type + '_' + args.dataset + '.pth'
    args.outf = args.outf + args.net_type + '_' + args.dataset + '/'
    if os.path.isdir(args.outf) == False:
        os.mkdir(args.outf)
    torch.cuda.manual_seed(0)
    torch.cuda.set_device(args.gpu)
    # check the in-distribution dataset
    if args.dataset == 'cifar100':
        args.num_classes = 100
        out_dist_list = ['svhn', 'imagenet_resize', 'lsun_resize']
    elif args.dataset == 'svhn':
        out_dist_list = ['cifar10', 'imagenet_resize', 'lsun_resize']
    elif args.dataset == 'ham10000':
        #out_dist_list = ['cifar10', 'imagenet_resize', 'face', 'face_age', 'isic-2017', 'isic-2016']
        #out_dist_list = ['cifar10', 'face', 'face_age', 'isic-2017', 'isic-2016']
        #out_dist_list = ['cifar10', 'cifar100', 'svhn', 'imagenet_resize', 'lsun_resize', 'face', 'face_age', 'isic-2017', 'isic-2016']
        out_dist_list = [
            'ham10000-avg-smoothing', 'ham10000-brightness',
            'ham10000-contrast', 'ham10000-dilation', 'ham10000-erosion',
            'ham10000-med-smoothing', 'ham10000-rotation', 'ham10000-shift'
        ]

    # load networks
    if args.net_type == 'densenet':
        if args.dataset == 'svhn':
            model = models.DenseNet3(100, int(args.num_classes))
            model.load_state_dict(
                torch.load(pre_trained_net,
                           map_location="cuda:" + str(args.gpu)))
        else:
            model = torch.load(pre_trained_net,
                               map_location="cuda:" + str(args.gpu))
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((125.3 / 255, 123.0 / 255, 113.9 / 255),
                                 (63.0 / 255, 62.1 / 255.0, 66.7 / 255.0)),
        ])
    elif args.net_type == 'resnet':
        model = models.ResNet34(num_c=args.num_classes)
        model.load_state_dict(
            torch.load(pre_trained_net, map_location="cuda:" + str(args.gpu)))
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])
    elif args.net_type == 'densenet121':
        model = DenseNet121(num_classes=args.num_classes)
        model.load_state_dict(
            torch.load(pre_trained_net,
                       map_location="cuda:" + str(args.gpu)).state_dict())
        in_transform = transforms.Compose([
            transforms.Resize(224),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize((0.7630069, 0.5456578, 0.5700767),
                                 (0.14093237, 0.15263236, 0.17000099))
        ])
    model.cuda()
    print('load model: ' + args.net_type)

    # load dataset
    print('load target data: ', args.dataset)
    train_loader, test_loader = data_loader.getTargetDataSet(
        args.dataset, args.batch_size, in_transform, args.dataroot)

    # set information about feature extaction
    model.eval()
    temp_x = torch.rand(2, 3, 32, 32).cuda()
    temp_x = Variable(temp_x)
    temp_list = model.feature_list(temp_x)[1]
    num_output = len(temp_list)
    feature_list = np.empty(num_output)
    count = 0
    for out in temp_list:
        feature_list[count] = out.size(1)
        count += 1

    print('get sample mean and covariance')
    sample_mean, precision = lib_generation.sample_estimator(
        model, args.num_classes, feature_list, train_loader)

    print('get Mahalanobis scores', num_output)
    m_list = [0.0, 0.01, 0.005, 0.002, 0.0014, 0.001, 0.0005]
    for magnitude in m_list:
        print('Noise: ' + str(magnitude))
        for i in range(num_output):
            print('layer_num', i)
            M_in = lib_generation.get_Mahalanobis_score(model, test_loader, args.num_classes, args.outf, \
                                                        True, args.net_type, sample_mean, precision, i, magnitude)
            M_in = np.asarray(M_in, dtype=np.float32)
            if i == 0:
                Mahalanobis_in = M_in.reshape((M_in.shape[0], -1))
            else:
                Mahalanobis_in = np.concatenate(
                    (Mahalanobis_in, M_in.reshape((M_in.shape[0], -1))),
                    axis=1)

        for out_dist in out_dist_list:
            out_test_loader = data_loader.getNonTargetDataSet(
                out_dist, args.batch_size, in_transform, args.dataroot)
            print('Out-distribution: ' + out_dist)
            for i in range(num_output):
                M_out = lib_generation.get_Mahalanobis_score(model, out_test_loader, args.num_classes, args.outf, \
                                                             False, args.net_type, sample_mean, precision, i, magnitude)
                M_out = np.asarray(M_out, dtype=np.float32)
                if i == 0:
                    Mahalanobis_out = M_out.reshape((M_out.shape[0], -1))
                else:
                    Mahalanobis_out = np.concatenate(
                        (Mahalanobis_out, M_out.reshape((M_out.shape[0], -1))),
                        axis=1)

            Mahalanobis_in = np.asarray(Mahalanobis_in, dtype=np.float32)
            Mahalanobis_out = np.asarray(Mahalanobis_out, dtype=np.float32)
            Mahalanobis_data, Mahalanobis_labels = lib_generation.merge_and_generate_labels(
                Mahalanobis_out, Mahalanobis_in)
            file_name = os.path.join(
                args.outf, 'Mahalanobis_%s_%s_%s.npy' %
                (str(magnitude), args.dataset, out_dist))
            Mahalanobis_data = np.concatenate(
                (Mahalanobis_data, Mahalanobis_labels), axis=1)
            np.save(file_name, Mahalanobis_data)
Beispiel #11
0
def main():
    # set the path to pre-trained model and output
    args.outf = args.outf + args.net_type + '_' + args.dataset + '/'
    if os.path.isdir(args.outf) == False:
        os.mkdir(args.outf)
    torch.cuda.manual_seed(0)
    torch.cuda.set_device(args.gpu)

    out_dist_list = [
        'skin_cli', 'skin_derm', 'corrupted', 'corrupted_70', 'imgnet', 'nct',
        'final_test'
    ]

    # load networks
    if args.net_type == 'densenet_121':
        model = densenet_121.Net(models.densenet121(pretrained=False), 8)
        ckpt = torch.load("../checkpoints/densenet-121/checkpoint.pth")
        model.load_state_dict(ckpt['model_state_dict'])
        model.eval()
        model.cuda()
    elif args.net_type == 'mobilenet':
        model = mobilenet.Net(models.mobilenet_v2(pretrained=False), 8)
        ckpt = torch.load("../checkpoints/mobilenet/checkpoint.pth")
        model.load_state_dict(ckpt['model_state_dict'])
        model.eval()
        model.cuda()
        print("Done!")
    elif args.net_type == 'resnet_50':
        model = resnet_50.Net(models.resnet50(pretrained=False), 8)
        ckpt = torch.load("../checkpoints/resnet-50/checkpoint.pth")
        model.load_state_dict(ckpt['model_state_dict'])
        model.eval()
        model.cuda()
        print("Done!")
    elif args.net_type == 'vgg_16':
        model = vgg_16.Net(models.vgg16_bn(pretrained=False), 8)
        ckpt = torch.load("../checkpoints/vgg-16/checkpoint.pth")
        model.load_state_dict(ckpt['model_state_dict'])
        model.eval()
        model.cuda()
        print("Done!")
    else:
        raise Exception(f"There is no net_type={args.net_type} available.")

    in_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    print('load model: ' + args.net_type)

    # load dataset
    print('load target data: ', args.dataset)
    train_loader, test_loader = data_loader.getTargetDataSet(
        args.dataset, args.batch_size, in_transform, args.dataroot)

    # set information about feature extaction
    model.eval()
    temp_x = torch.rand(2, 3, 224, 224).cuda()
    temp_x = Variable(temp_x)
    temp_list = model.feature_list(temp_x)[1]
    num_output = len(temp_list)
    feature_list = np.empty(num_output)
    count = 0
    for out in temp_list:
        feature_list[count] = out.size(1)
        count += 1

    print('get sample mean and covariance')
    sample_mean, precision = lib_generation.sample_estimator(
        model, args.num_classes, feature_list, train_loader)

    print('get Mahalanobis scores')
    m_list = [0.0, 0.01, 0.005, 0.002, 0.0014, 0.001, 0.0005]

    for magnitude in m_list:
        print('Noise: ' + str(magnitude))
        for i in range(num_output):
            M_in = lib_generation.get_Mahalanobis_score(model, test_loader, args.num_classes, args.outf, \
                                                        True, args.net_type, sample_mean, precision, i, magnitude)
            M_in = np.asarray(M_in, dtype=np.float32)
            if i == 0:
                Mahalanobis_in = M_in.reshape((M_in.shape[0], -1))
            else:
                Mahalanobis_in = np.concatenate(
                    (Mahalanobis_in, M_in.reshape((M_in.shape[0], -1))),
                    axis=1)

        for out_dist in out_dist_list:
            out_test_loader = data_loader.getNonTargetDataSet(
                out_dist, args.batch_size, in_transform, args.dataroot)
            print('Out-distribution: ' + out_dist)
            for i in range(num_output):
                M_out = lib_generation.get_Mahalanobis_score(model, out_test_loader, args.num_classes, args.outf, \
                                                             False, args.net_type, sample_mean, precision, i, magnitude)
                M_out = np.asarray(M_out, dtype=np.float32)
                if i == 0:
                    Mahalanobis_out = M_out.reshape((M_out.shape[0], -1))
                else:
                    Mahalanobis_out = np.concatenate(
                        (Mahalanobis_out, M_out.reshape((M_out.shape[0], -1))),
                        axis=1)

            Mahalanobis_in = np.asarray(Mahalanobis_in, dtype=np.float32)
            Mahalanobis_out = np.asarray(Mahalanobis_out, dtype=np.float32)
            Mahalanobis_data, Mahalanobis_labels = lib_generation.merge_and_generate_labels(
                Mahalanobis_out, Mahalanobis_in)
            file_name = os.path.join(
                args.outf, 'Mahalanobis_%s_%s_%s.npy' %
                (str(magnitude), args.dataset, out_dist))
            Mahalanobis_data = np.concatenate(
                (Mahalanobis_data, Mahalanobis_labels), axis=1)
            np.save(file_name, Mahalanobis_data)
def get_conf(config, device, model, in_transform, train_loader, test_loader):

    # get layer list
    layer_list = config['exp_params']['feature_layers']

    # get feature list
    model.eval()
    input_dim = config['exp_params']['input_dim']  # 2 x 3 x 32 x 32

    if config['exp_params']['dataset'] != 'toy_data':
        temp_x = torch.rand(2, input_dim[0], input_dim[1],
                            input_dim[2]).to(device)
    else:
        temp_x = torch.rand(2, 2).to(device)
    _, temp_list = model.feature_list(temp_x, layer_list)

    # ASK : Why this round about way to get feature_list size? Using temp_x etc.
    # RESPONSE: number of layers
    num_output = len(temp_list)
    feature_list = np.empty(num_output)
    count = 0
    for out in temp_list:
        feature_list[count] = out.size(1)
        count += 1

    # m_list is the list of noise
    m_list = config['exp_params']['noise_params']['m_list']

    # calculate confidence components to be sent to regressor
    regressor_features = config['exp_params']['regressor_features']
    class_mean, class_precision, tied_precision, pca_list, knn_search_list, knn_mean, knn_precision = \
                get_inputs_for_computing_regressor_feature(regressor_features, model, config, num_output, feature_list, layer_list, train_loader, device)

    print("For in-distribution: {}".format(config['exp_params']['dataset']))
    init_reg_in = True
    for regressor_feature in regressor_features:
        # num_output is the number of layers
        for i in range(num_output):
            in_dist_input = get_features_for_regressor(
                regressor_feature, model, config, test_loader,
                config['exp_params']['dataset'], i, True, device, class_mean,
                class_precision, tied_precision, pca_list, knn_search_list,
                knn_mean, knn_precision)

            print("in_dist_input shape: ", in_dist_input.shape)
            in_dist_input = np.asarray(in_dist_input, dtype=np.float32)

            # ASK: what does score of regression mean?
            print("Mean score at layer {} for regression type {}: {}".format(
                i, regressor_feature, np.mean(in_dist_input)))

            if init_reg_in:
                regressor_in_dist_input = in_dist_input.reshape(
                    (in_dist_input.shape[0], -1))
                init_reg_in = False
            else:
                regressor_in_dist_input = np.concatenate(
                    (regressor_in_dist_input,
                     in_dist_input.reshape((in_dist_input.shape[0], -1))),
                    axis=1)

    print("Out-distributions to test agains: ",
          config['model_params']['out_dist_list'])
    for out_dist in config['model_params']['out_dist_list']:
        print('Out-distribution: ' + out_dist)
        if out_dist == 'subset_cifar100':
            out_test_loader = data_loader.getNonTargetDataSet(
                out_dist,
                config['trainer_params']['batch_size'],
                in_transform,
                config['exp_params']['dataroot'],
                idx=config['model_params']['out_idx'],
                num_oods=config['model_params']['num_ood_samples'])
        else:
            out_test_loader = data_loader.getNonTargetDataSet(
                out_dist, config['trainer_params']['batch_size'], in_transform,
                config['exp_params']['dataroot'])

        init_reg_in = True
        for regressor_feature in regressor_features:
            # num_output is the number of layers
            for i in range(num_output):
                out_dist_input = get_features_for_regressor(
                    regressor_feature, model, config, out_test_loader,
                    out_dist, i, False, device, class_mean, class_precision,
                    tied_precision, pca_list, knn_search_list, knn_mean,
                    knn_precision)

                print("out_dist_input shape- ", out_dist_input.shape)
                out_dist_input = np.asarray(out_dist_input, dtype=np.float32)
                print(
                    "Mean score at layer {} for regression type {}: {}".format(
                        i, regressor_feature, np.mean(out_dist_input)))
                if init_reg_in:
                    regressor_out_dist_input = out_dist_input.reshape(
                        (out_dist_input.shape[0], -1))
                    init_reg_in = False
                else:
                    regressor_out_dist_input = np.concatenate(
                        (regressor_out_dist_input,
                         out_dist_input.reshape(
                             (out_dist_input.shape[0], -1))),
                        axis=1)

        regressor_in_dist_input = np.asarray(regressor_in_dist_input,
                                             dtype=np.float32)
        regressor_out_dist_input = np.asarray(regressor_out_dist_input,
                                              dtype=np.float32)
        ood_output, Mahalanobis_labels = lib_generation.merge_and_generate_labels(
            regressor_out_dist_input, regressor_in_dist_input)
        file_name = os.path.join(
            config['logging_params']['outf'], 'Mahalanobis_%s_%s_%s.npy' %
            (str(m_list[0]), config['exp_params']['dataset'], out_dist))
        ood_output = np.concatenate((ood_output, Mahalanobis_labels), axis=1)
        np.save(file_name, ood_output)
    return ood_output
Beispiel #13
0
def main():
    dir_path = os.path.join("experiments", args.dir, "train_classify", "data~"+args.dataset+"+model~"+args.net_type+"+loss~"+str(args.loss))
    file_path = os.path.join(dir_path, "results_odd.csv")

    with open(file_path, "w") as results_file:
        results_file.write(
            "EXECUTION,MODEL,IN-DATA,OUT-DATA,LOSS,AD-HOC,SCORE,INFER-LEARN,INFER-TRANS,"
            "TNR,AUROC,DTACC,AUIN,AUOUT,CPU_FALSE,CPU_TRUE,GPU_FALSE,GPU_TRUE,TEMPERATURE,MAGNITUDE\n")

    args_outf = os.path.join("temporary", args.dir, args.loss, args.net_type + '+' + args.dataset)
    if os.path.isdir(args_outf) == False:
        os.makedirs(args_outf)
    
    # define number of classes
    if args.dataset == 'cifar100':
        args.num_classes = 100
    elif args.dataset == 'imagenet32':
        args.num_classes = 1000
    else:
        args.num_classes = 10

    if args.dataset == 'cifar10':
        out_dist_list = ['svhn', 'imagenet_resize', 'lsun_resize']
    elif args.dataset == 'cifar100':
        out_dist_list = ['svhn', 'imagenet_resize', 'lsun_resize']
    elif args.dataset == 'svhn':
        out_dist_list = ['cifar10', 'imagenet_resize', 'lsun_resize']

    if args.dataset == 'cifar10':
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.491, 0.482, 0.446), (0.247, 0.243, 0.261))])
    elif args.dataset == 'cifar100':
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.507, 0.486, 0.440), (0.267, 0.256, 0.276))])
    elif args.dataset == 'svhn':
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.437, 0.443, 0.472), (0.198, 0.201, 0.197))])

    for args.execution in range(1, args.executions + 1):    
        print("EXECUTION:", args.execution)
        pre_trained_net = os.path.join(dir_path, "model" + str(args.execution) + ".pth")

        if args.loss.split("_")[0] == "softmax":
            loss_first_part = losses.SoftMaxLossFirstPart
            scores = ["ES"]
        elif args.loss.split("_")[0] == "isomax":
            loss_first_part = losses.IsoMaxLossFirstPart
            scores = ["ES"]
        elif args.loss.split("_")[0] == "isomaxplus":
            loss_first_part = losses.IsoMaxPlusLossFirstPart
            scores = ["MDS"]

        # load networks
        if args.net_type == 'densenetbc100':
            model = models.DenseNet3(100, int(args.num_classes), loss_first_part=loss_first_part)
        elif args.net_type == 'resnet110':
            model = models.ResNet110(num_c=args.num_classes, loss_first_part=loss_first_part)
        model.load_state_dict(torch.load(pre_trained_net, map_location="cuda:" + str(args.gpu)))
        model.cuda()
        print('load model: ' + args.net_type)
        
        # load dataset
        print('load target valid data: ', args.dataset)
        _, test_loader = data_loader.getTargetDataSet(args.dataset, args.batch_size, in_transform, args.dataroot)

        for score in scores:
            print("\n\n\n###############################")
            print("###############################")
            print("SCORE:", score)
            print("###############################")
            print("###############################")
            base_line_list = []
            get_scores(model, test_loader, args_outf, True, score)
            out_count = 0
            for out_dist in out_dist_list:
                out_test_loader = data_loader.getNonTargetDataSet(out_dist, args.batch_size, in_transform, args.dataroot)
                print('Out-distribution: ' + out_dist)
                get_scores(model, out_test_loader, args_outf, False, score)
                test_results = callog.metric(args_outf, ['PoT'])
                base_line_list.append(test_results)
                out_count += 1
            
            # print the results
            mtypes = ['TNR', 'AUROC', 'DTACC', 'AUIN', 'AUOUT']
            print('Baseline method: train in_distribution: ' + args.dataset + '==========')
            count_out = 0
            for results in base_line_list:
                print('out_distribution: '+ out_dist_list[count_out])
                for mtype in mtypes:
                    print(' {mtype:6s}'.format(mtype=mtype), end='')
                print('\n{val:6.2f}'.format(val=100.*results['PoT']['TNR']), end='')
                print(' {val:6.2f}'.format(val=100.*results['PoT']['AUROC']), end='')
                print(' {val:6.2f}'.format(val=100.*results['PoT']['DTACC']), end='')
                print(' {val:6.2f}'.format(val=100.*results['PoT']['AUIN']), end='')
                print(' {val:6.2f}\n'.format(val=100.*results['PoT']['AUOUT']), end='')
                print('')
                #Saving odd results:
                with open(file_path, "a") as results_file:
                    results_file.write("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n".format(
                        str(args.execution), args.net_type, args.dataset, out_dist_list[count_out],
                        str(args.loss), "NATIVE", score, 'NO', False,
                        '{:.2f}'.format(100.*results['PoT']['TNR']),
                        '{:.2f}'.format(100.*results['PoT']['AUROC']),
                        '{:.2f}'.format(100.*results['PoT']['DTACC']),
                        '{:.2f}'.format(100.*results['PoT']['AUIN']),
                        '{:.2f}'.format(100.*results['PoT']['AUOUT']),
                        0, 0, 0, 0, 1, 0))
                count_out += 1