コード例 #1
0
def main():
    # set the path to pre-trained model and output
    pre_trained_net = './pre_trained/' + args.net_type + '_' + args.dataset + '.pth'
    args.outf = args.outf + args.net_type + '_' + args.dataset + '/'
    if os.path.isdir(args.outf) == False:
        os.mkdir(args.outf)
    torch.cuda.manual_seed(0)
    torch.cuda.set_device(args.gpu)
    # check the in-distribution dataset
    if args.dataset == 'cifar100':
        args.num_classes = 100
    if args.dataset == 'svhn':
        out_dist_list = ['cifar10', 'imagenet_resize', 'lsun_resize']
    else:
        out_dist_list = ['svhn', 'imagenet_resize', 'lsun_resize']

    # load networks
    if args.net_type == 'densenet':
        if args.dataset == 'svhn':
            model = models.DenseNet3(100, int(args.num_classes))
            model.load_state_dict(
                torch.load(pre_trained_net,
                           map_location="cuda:" + str(args.gpu)))
        else:
            model = torch.load(pre_trained_net,
                               map_location="cuda:" + str(args.gpu))
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((125.3 / 255, 123.0 / 255, 113.9 / 255),
                                 (63.0 / 255, 62.1 / 255.0, 66.7 / 255.0)),
        ])
    elif args.net_type == 'resnet':
        model = models.ResNet34(num_c=args.num_classes)
        model.load_state_dict(
            torch.load(pre_trained_net, map_location="cuda:" + str(args.gpu)))
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])
    model.cuda()
    print('load model: ' + args.net_type)

    # load dataset
    print('load target data: ', args.dataset)
    train_loader, test_loader = data_loader.getTargetDataSet(
        args.dataset, args.batch_size, in_transform, args.dataroot)

    # set information about feature extaction
    model.eval()
    temp_x = torch.rand(2, 3, 32, 32).cuda()
    temp_x = Variable(temp_x)
    temp_list = model.feature_list(temp_x)[1]
    num_output = len(temp_list)
    feature_list = np.empty(num_output)
    count = 0
    for out in temp_list:
        feature_list[count] = out.size(1)
        count += 1


################################ edits
    print("Calculate SVD before getting sample mean and variance")
    # svd_result= lib_generation.get_pca(model, args.num_classes, feature_list, train_loader)
    # lib_generation.get_pca_incremental(model, args.num_classes, feature_list, train_loader,args)
    svd_result = None
    print('get sample mean and covariance')
    sample_mean, precision = lib_generation.sample_estimator(
        model, args.num_classes, feature_list, train_loader, svd_result, args)
    ################################ edits_end_sample_generator
    print('get Mahalanobis scores')
    m_list = [0.0, 0.01, 0.005, 0.002, 0.0014, 0.001, 0.0005]
    for magnitude in m_list:
        print('Noise: ' + str(magnitude))
        for i in range(num_output):
            M_in = lib_generation.get_Mahalanobis_score(model, test_loader, args.num_classes, args.outf, \
                                                        True, args.net_type, sample_mean, precision, i, magnitude,svd_result,args)
            M_in = np.asarray(M_in, dtype=np.float32)
            if i == 0:
                Mahalanobis_in = M_in.reshape((M_in.shape[0], -1))
            else:
                Mahalanobis_in = np.concatenate(
                    (Mahalanobis_in, M_in.reshape((M_in.shape[0], -1))),
                    axis=1)

        for out_dist in out_dist_list:
            out_test_loader = data_loader.getNonTargetDataSet(
                out_dist, args.batch_size, in_transform, args.dataroot)
            print('Out-distribution: ' + out_dist)
            for i in range(num_output):
                M_out = lib_generation.get_Mahalanobis_score(model, out_test_loader, args.num_classes, args.outf, \
                                                             False, args.net_type, sample_mean, precision, i, magnitude,svd_result,args)
                M_out = np.asarray(M_out, dtype=np.float32)
                if i == 0:
                    Mahalanobis_out = M_out.reshape((M_out.shape[0], -1))
                else:
                    Mahalanobis_out = np.concatenate(
                        (Mahalanobis_out, M_out.reshape((M_out.shape[0], -1))),
                        axis=1)

            Mahalanobis_in = np.asarray(Mahalanobis_in, dtype=np.float32)
            Mahalanobis_out = np.asarray(Mahalanobis_out, dtype=np.float32)
            Mahalanobis_data, Mahalanobis_labels = lib_generation.merge_and_generate_labels(
                Mahalanobis_out, Mahalanobis_in)
            file_name = os.path.join(
                args.outf, 'Mahalanobis_%s_%s_%s.npy' %
                (str(magnitude), args.dataset, out_dist))
            Mahalanobis_data = np.concatenate(
                (Mahalanobis_data, Mahalanobis_labels), axis=1)
            np.save(file_name, Mahalanobis_data)
コード例 #2
0
def extract_features(pre_trained_net, in_distribution, in_dist_name,
                     out_dist_list, out_of_distribution, in_transform, gpu,
                     batch_size, num_classes):
    # set the path to pre-trained model and output
    outf = "/output/"
    outf = outf + "model" + '_' + in_dist_name + '/'
    if os.path.isdir(outf) == False:
        os.mkdir(outf)

    torch.cuda.manual_seed(0)
    torch.cuda.set_device(gpu)

    # load networks
    model = torch.load(pre_trained_net, map_location="cuda:" + str(gpu))

    model.cuda()
    print('loaded model')

    # load target dataset
    validation_split = .2
    shuffle_dataset = True
    random_seed = 42

    # Creating data indices for training and validation splits:
    dataset_size = len(in_distribution)
    indices = list(range(dataset_size))
    split = int(np.floor(validation_split * dataset_size))
    if shuffle_dataset:
        np.random.seed(random_seed)
        np.random.shuffle(indices)
    train_indices, val_indices = indices[split:], indices[:split]

    # Creating PT data samplers and loaders:
    train_sampler = SubsetRandomSampler(train_indices)
    valid_sampler = SubsetRandomSampler(val_indices)

    if in_transform is not None:
        train_loader = torch.utils.data.DataLoader(in_distribution,
                                                   batch_size=batch_size,
                                                   shuffle=True,
                                                   sampler=train_sampler,
                                                   **kwargs)
        test_loader = torch.utils.data.DataLoader(in_distribution,
                                                  batch_size=batch_size,
                                                  shuffle=True,
                                                  sampler=valid_sampler,
                                                  **kwargs)
    else:
        train_loader = torch.utils.data.DataLoader(in_distribution,
                                                   batch_size=batch_size,
                                                   shuffle=True,
                                                   sampler=train_sampler,
                                                   **kwargs)
        test_loader = torch.utils.data.DataLoader(in_distribution,
                                                  batch_size=batch_size,
                                                  shuffle=True,
                                                  sampler=valid_sampler,
                                                  **kwargs)

    print('loaded target data: ', in_dist_name)

    # set information about feature extaction
    model.eval()
    temp_x = torch.rand(*(list(next(iter(train_loader))[0].size()))).cuda()
    temp_x = Variable(temp_x)
    temp_list = model.feature_list(temp_x)[1]
    num_output = len(temp_list)
    feature_list = np.empty(num_output)
    count = 0
    for out in temp_list:
        feature_list[count] = out.size(1)
        count += 1

    print('get sample mean and covariance')
    sample_mean, precision = lib_generation.sample_estimator(
        model, num_classes, feature_list, train_loader, model_name="model")

    print('Generate dataloaders...')

    out_test_loaders = []
    for out_dist in out_of_distribution:
        out_test_loaders.append(
            torch.utils.data.DataLoader(out_dist,
                                        batch_size=batch_size,
                                        shuffle=True,
                                        **kwargs))

    print('get Mahalanobis scores', num_output)
    m_list = [0.0, 0.01, 0.005, 0.002, 0.0014, 0.001, 0.0005]
    for magnitude in m_list:
        print('Noise: ' + str(magnitude))
        for i in range(num_output):
            print('layer_num', i)
            M_in = lib_generation.get_Mahalanobis_score(model, test_loader, num_classes, outf, \
                                                        True, "model", sample_mean, precision, i, magnitude)
            M_in = np.asarray(M_in, dtype=np.float32)
            if i == 0:
                Mahalanobis_in = M_in.reshape((M_in.shape[0], -1))
            else:
                Mahalanobis_in = np.concatenate(
                    (Mahalanobis_in, M_in.reshape((M_in.shape[0], -1))),
                    axis=1)

        for out_test_loader, out_dist in zip(out_test_loaders, out_dist_list):
            print('Out-distribution: ' + out_dist)
            for i in range(num_output):
                M_out = lib_generation.get_Mahalanobis_score(model, out_test_loader, num_classes, outf, \
                                                             False, "model", sample_mean, precision, i, magnitude)
                M_out = np.asarray(M_out, dtype=np.float32)
                if i == 0:
                    Mahalanobis_out = M_out.reshape((M_out.shape[0], -1))
                else:
                    Mahalanobis_out = np.concatenate(
                        (Mahalanobis_out, M_out.reshape((M_out.shape[0], -1))),
                        axis=1)

            Mahalanobis_in = np.asarray(Mahalanobis_in, dtype=np.float32)
            Mahalanobis_out = np.asarray(Mahalanobis_out, dtype=np.float32)
            Mahalanobis_data, Mahalanobis_labels = lib_generation.merge_and_generate_labels(
                Mahalanobis_out, Mahalanobis_in)
            file_name = os.path.join(
                outf, 'Mahalanobis_%s_%s_%s.npy' %
                (str(magnitude), in_dist_name, out_dist))
            Mahalanobis_data = np.concatenate(
                (Mahalanobis_data, Mahalanobis_labels), axis=1)
            np.save(file_name, Mahalanobis_data)
コード例 #3
0
def main():
    # set the path to pre-trained model and output
    pre_trained_net = "./pre_trained/" + args.net_type + "_" + args.dataset + ".pth"
    args.outf = args.outf + args.net_type + "_" + args.dataset + "/"
    if os.path.isdir(args.outf) == False:
        os.mkdir(args.outf)
    torch.cuda.manual_seed(0)
    torch.cuda.set_device(args.gpu)
    # check the in-distribution dataset
    if args.dataset == "cifar100":
        args.num_classes = 100
    if args.dataset == "svhn":
        out_dist_list = ["cifar10", "imagenet_resize", "lsun_resize"]
    else:
        out_dist_list = ["svhn", "imagenet_resize", "lsun_resize"]

    # load networks
    if args.net_type == "densenet":
        if args.dataset == "svhn":
            model = models.DenseNet3(100, int(args.num_classes))
            model.load_state_dict(
                torch.load(pre_trained_net,
                           map_location="cuda:" + str(args.gpu)))
        else:
            model = torch.load(pre_trained_net,
                               map_location="cuda:" + str(args.gpu))
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(
                (125.3 / 255, 123.0 / 255, 113.9 / 255),
                (63.0 / 255, 62.1 / 255.0, 66.7 / 255.0),
            ),
        ])
    elif args.net_type == "resnet":
        model = models.ResNet34(num_c=args.num_classes)
        model.load_state_dict(
            torch.load(pre_trained_net, map_location="cuda:" + str(args.gpu)))
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])
    model.cuda()
    print("load model: " + args.net_type)

    # load dataset
    print("load target data: ", args.dataset)
    train_loader, test_loader = data_loader.getTargetDataSet(
        args.dataset, args.batch_size, in_transform, args.dataroot)

    # set information about feature extaction
    model.eval()
    temp_x = torch.rand(2, 3, 32, 32).cuda()
    temp_x = Variable(temp_x)
    temp_list = model.feature_list(temp_x)[1]
    num_output = len(temp_list)
    feature_list = np.empty(num_output)
    count = 0
    for out in temp_list:
        feature_list[count] = out.size(1)
        count += 1

    print("get sample mean and covariance")
    sample_mean, precision = lib_generation.sample_estimator(
        model, args.num_classes, feature_list, train_loader)

    print("get Mahalanobis scores")
    m_list = [0.0, 0.01, 0.005, 0.002, 0.0014, 0.001, 0.0005]
    for magnitude in m_list:
        print("Noise: " + str(magnitude))
        for i in range(num_output):
            M_in = lib_generation.get_Mahalanobis_score(
                model,
                test_loader,
                args.num_classes,
                args.outf,
                True,
                args.net_type,
                sample_mean,
                precision,
                i,
                magnitude,
            )
            M_in = np.asarray(M_in, dtype=np.float32)
            if i == 0:
                Mahalanobis_in = M_in.reshape((M_in.shape[0], -1))
            else:
                Mahalanobis_in = np.concatenate(
                    (Mahalanobis_in, M_in.reshape((M_in.shape[0], -1))),
                    axis=1)

        for out_dist in out_dist_list:
            out_test_loader = data_loader.getNonTargetDataSet(
                out_dist, args.batch_size, in_transform, args.dataroot)
            print("Out-distribution: " + out_dist)
            for i in range(num_output):
                M_out = lib_generation.get_Mahalanobis_score(
                    model,
                    out_test_loader,
                    args.num_classes,
                    args.outf,
                    False,
                    args.net_type,
                    sample_mean,
                    precision,
                    i,
                    magnitude,
                )
                M_out = np.asarray(M_out, dtype=np.float32)
                if i == 0:
                    Mahalanobis_out = M_out.reshape((M_out.shape[0], -1))
                else:
                    Mahalanobis_out = np.concatenate(
                        (Mahalanobis_out, M_out.reshape((M_out.shape[0], -1))),
                        axis=1)

            Mahalanobis_in = np.asarray(Mahalanobis_in, dtype=np.float32)
            Mahalanobis_out = np.asarray(Mahalanobis_out, dtype=np.float32)
            (
                Mahalanobis_data,
                Mahalanobis_labels,
            ) = lib_generation.merge_and_generate_labels(
                Mahalanobis_out, Mahalanobis_in)
            file_name = os.path.join(
                args.outf,
                "Mahalanobis_%s_%s_%s.npy" %
                (str(magnitude), args.dataset, out_dist),
            )
            Mahalanobis_data = np.concatenate(
                (Mahalanobis_data, Mahalanobis_labels), axis=1)
            np.save(file_name, Mahalanobis_data)
コード例 #4
0
def main():
    # set the path to pre-trained model and output
    pre_trained_net = './pre_trained/' + args.net_type + '_' + args.dataset + '.pth'
    args.outf = args.outf + args.net_type + '_' + args.dataset + '/'
    if os.path.isdir(args.outf) == False:
        os.mkdir(args.outf)
    torch.cuda.manual_seed(0)
    torch.cuda.set_device(args.gpu)
    # check the in-distribution dataset
    if args.dataset == 'cifar100':
        args.num_classes = 100
        out_dist_list = ['svhn', 'imagenet_resize', 'lsun_resize']
    elif args.dataset == 'svhn':
        out_dist_list = ['cifar10', 'imagenet_resize', 'lsun_resize']
    elif args.dataset == 'ham10000':
        #out_dist_list = ['cifar10', 'imagenet_resize', 'face', 'face_age', 'isic-2017', 'isic-2016']
        #out_dist_list = ['cifar10', 'face', 'face_age', 'isic-2017', 'isic-2016']
        #out_dist_list = ['cifar10', 'cifar100', 'svhn', 'imagenet_resize', 'lsun_resize', 'face', 'face_age', 'isic-2017', 'isic-2016']
        out_dist_list = [
            'ham10000-avg-smoothing', 'ham10000-brightness',
            'ham10000-contrast', 'ham10000-dilation', 'ham10000-erosion',
            'ham10000-med-smoothing', 'ham10000-rotation', 'ham10000-shift'
        ]

    # load networks
    if args.net_type == 'densenet':
        if args.dataset == 'svhn':
            model = models.DenseNet3(100, int(args.num_classes))
            model.load_state_dict(
                torch.load(pre_trained_net,
                           map_location="cuda:" + str(args.gpu)))
        else:
            model = torch.load(pre_trained_net,
                               map_location="cuda:" + str(args.gpu))
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((125.3 / 255, 123.0 / 255, 113.9 / 255),
                                 (63.0 / 255, 62.1 / 255.0, 66.7 / 255.0)),
        ])
    elif args.net_type == 'resnet':
        model = models.ResNet34(num_c=args.num_classes)
        model.load_state_dict(
            torch.load(pre_trained_net, map_location="cuda:" + str(args.gpu)))
        in_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465),
                                 (0.2023, 0.1994, 0.2010)),
        ])
    elif args.net_type == 'densenet121':
        model = DenseNet121(num_classes=args.num_classes)
        model.load_state_dict(
            torch.load(pre_trained_net,
                       map_location="cuda:" + str(args.gpu)).state_dict())
        in_transform = transforms.Compose([
            transforms.Resize(224),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize((0.7630069, 0.5456578, 0.5700767),
                                 (0.14093237, 0.15263236, 0.17000099))
        ])
    model.cuda()
    print('load model: ' + args.net_type)

    # load dataset
    print('load target data: ', args.dataset)
    train_loader, test_loader = data_loader.getTargetDataSet(
        args.dataset, args.batch_size, in_transform, args.dataroot)

    # set information about feature extaction
    model.eval()
    temp_x = torch.rand(2, 3, 32, 32).cuda()
    temp_x = Variable(temp_x)
    temp_list = model.feature_list(temp_x)[1]
    num_output = len(temp_list)
    feature_list = np.empty(num_output)
    count = 0
    for out in temp_list:
        feature_list[count] = out.size(1)
        count += 1

    print('get sample mean and covariance')
    sample_mean, precision = lib_generation.sample_estimator(
        model, args.num_classes, feature_list, train_loader)

    print('get Mahalanobis scores', num_output)
    m_list = [0.0, 0.01, 0.005, 0.002, 0.0014, 0.001, 0.0005]
    for magnitude in m_list:
        print('Noise: ' + str(magnitude))
        for i in range(num_output):
            print('layer_num', i)
            M_in = lib_generation.get_Mahalanobis_score(model, test_loader, args.num_classes, args.outf, \
                                                        True, args.net_type, sample_mean, precision, i, magnitude)
            M_in = np.asarray(M_in, dtype=np.float32)
            if i == 0:
                Mahalanobis_in = M_in.reshape((M_in.shape[0], -1))
            else:
                Mahalanobis_in = np.concatenate(
                    (Mahalanobis_in, M_in.reshape((M_in.shape[0], -1))),
                    axis=1)

        for out_dist in out_dist_list:
            out_test_loader = data_loader.getNonTargetDataSet(
                out_dist, args.batch_size, in_transform, args.dataroot)
            print('Out-distribution: ' + out_dist)
            for i in range(num_output):
                M_out = lib_generation.get_Mahalanobis_score(model, out_test_loader, args.num_classes, args.outf, \
                                                             False, args.net_type, sample_mean, precision, i, magnitude)
                M_out = np.asarray(M_out, dtype=np.float32)
                if i == 0:
                    Mahalanobis_out = M_out.reshape((M_out.shape[0], -1))
                else:
                    Mahalanobis_out = np.concatenate(
                        (Mahalanobis_out, M_out.reshape((M_out.shape[0], -1))),
                        axis=1)

            Mahalanobis_in = np.asarray(Mahalanobis_in, dtype=np.float32)
            Mahalanobis_out = np.asarray(Mahalanobis_out, dtype=np.float32)
            Mahalanobis_data, Mahalanobis_labels = lib_generation.merge_and_generate_labels(
                Mahalanobis_out, Mahalanobis_in)
            file_name = os.path.join(
                args.outf, 'Mahalanobis_%s_%s_%s.npy' %
                (str(magnitude), args.dataset, out_dist))
            Mahalanobis_data = np.concatenate(
                (Mahalanobis_data, Mahalanobis_labels), axis=1)
            np.save(file_name, Mahalanobis_data)
def _generate_Mahalanobis(model,
                          loaders,
                          device,
                          num_classes,
                          model_type='eb0'):

    model.eval()
    train_ind_loader, val_ind_loader, test_ind_loader, val_ood_loader, test_ood_loader = loaders

    temp_x = torch.rand(2, 3, 224, 224).to(device)
    temp_x = Variable(temp_x)
    temp_x = temp_x.to(device)
    if model_type == 'eb0':
        idxs = [0, 2, 4, 7, 10, 14, 15]
        x, features = model.extract_features(temp_x, mode='all')
    features = [features[idx] for idx in idxs] + [x]
    num_output = len(features)
    feature_list = np.empty(num_output)
    count = 0
    for out in features:
        feature_list[count] = out.size(1)
        count += 1

    sample_mean, precision = lib_generation.sample_estimator(model,
                                                             num_classes,
                                                             feature_list,
                                                             train_ind_loader,
                                                             device=device)

    m_list = [-0.01, -0.0005, 0.0, 0.0005, 0.01]
    for magnitude in tqdm(m_list):

        for i in range(num_output):
            M_val = lib_generation.get_Mahalanobis_score(model,
                                                         val_ind_loader,
                                                         num_classes,
                                                         sample_mean,
                                                         precision,
                                                         i,
                                                         magnitude,
                                                         device=device)
            M_val = np.asarray(M_val, dtype=np.float32)
            if i == 0:
                Mahalanobis_val_ind = M_val.reshape((M_val.shape[0], -1))
            else:
                Mahalanobis_val_ind = np.concatenate(
                    (Mahalanobis_val_ind, M_val.reshape((M_val.shape[0], -1))),
                    axis=1)

        for i in range(num_output):
            M_val_ood = lib_generation.get_Mahalanobis_score(model,
                                                             val_ood_loader,
                                                             num_classes,
                                                             sample_mean,
                                                             precision,
                                                             i,
                                                             magnitude,
                                                             device=device)
            M_val_ood = np.asarray(M_val_ood, dtype=np.float32)
            if i == 0:
                Mahalanobis_val_ood = M_val_ood.reshape(
                    (M_val_ood.shape[0], -1))
            else:
                Mahalanobis_val_ood = np.concatenate(
                    (Mahalanobis_val_ood,
                     M_val_ood.reshape((M_val_ood.shape[0], -1))),
                    axis=1)

        Mahalanobis_val_ind = np.asarray(Mahalanobis_val_ind, dtype=np.float32)
        Mahalanobis_val_ood = np.asarray(Mahalanobis_val_ood, dtype=np.float32)

        regressor, _, _ = _score_mahalanobis(Mahalanobis_val_ind,
                                             Mahalanobis_val_ood)

        val_ind = regressor.predict_proba(Mahalanobis_val_ind)[:, 1]
        val_ood = regressor.predict_proba(Mahalanobis_val_ood)[:, 1]

        for i in range(num_output):
            M_test = lib_generation.get_Mahalanobis_score(model,
                                                          test_ind_loader,
                                                          num_classes,
                                                          sample_mean,
                                                          precision,
                                                          i,
                                                          magnitude,
                                                          device=device)
            M_test = np.asarray(M_test, dtype=np.float32)
            if i == 0:
                Mahalanobis_test = M_test.reshape((M_test.shape[0], -1))
            else:
                Mahalanobis_test = np.concatenate(
                    (Mahalanobis_test, M_test.reshape((M_test.shape[0], -1))),
                    axis=1)

        for i in range(num_output):
            M_ood = lib_generation.get_Mahalanobis_score(model,
                                                         test_ood_loader,
                                                         num_classes,
                                                         sample_mean,
                                                         precision,
                                                         i,
                                                         magnitude,
                                                         device=device)
            M_ood = np.asarray(M_ood, dtype=np.float32)
            if i == 0:
                Mahalanobis_ood = M_ood.reshape((M_ood.shape[0], -1))
            else:
                Mahalanobis_ood = np.concatenate(
                    (Mahalanobis_ood, M_ood.reshape((M_ood.shape[0], -1))),
                    axis=1)

        Mahalanobis_test = np.asarray(Mahalanobis_test, dtype=np.float32)
        Mahalanobis_ood = np.asarray(Mahalanobis_ood, dtype=np.float32)

        ind = regressor.predict_proba(Mahalanobis_test)[:, 1]
        ood = regressor.predict_proba(Mahalanobis_ood)[:, 1]

        print(f'########## epsilon: {magnitude} ##########')
        _ood_detection_performance('Mahalanobis', val_ind, val_ood, ind, ood)
コード例 #6
0
def main():
    # set the path to pre-trained model and output
    args.outf = args.outf + args.net_type + '_' + args.dataset + '/'
    if os.path.isdir(args.outf) == False:
        os.mkdir(args.outf)
    torch.cuda.manual_seed(0)
    torch.cuda.set_device(args.gpu)

    out_dist_list = [
        'skin_cli', 'skin_derm', 'corrupted', 'corrupted_70', 'imgnet', 'nct',
        'final_test'
    ]

    # load networks
    if args.net_type == 'densenet_121':
        model = densenet_121.Net(models.densenet121(pretrained=False), 8)
        ckpt = torch.load("../checkpoints/densenet-121/checkpoint.pth")
        model.load_state_dict(ckpt['model_state_dict'])
        model.eval()
        model.cuda()
    elif args.net_type == 'mobilenet':
        model = mobilenet.Net(models.mobilenet_v2(pretrained=False), 8)
        ckpt = torch.load("../checkpoints/mobilenet/checkpoint.pth")
        model.load_state_dict(ckpt['model_state_dict'])
        model.eval()
        model.cuda()
        print("Done!")
    elif args.net_type == 'resnet_50':
        model = resnet_50.Net(models.resnet50(pretrained=False), 8)
        ckpt = torch.load("../checkpoints/resnet-50/checkpoint.pth")
        model.load_state_dict(ckpt['model_state_dict'])
        model.eval()
        model.cuda()
        print("Done!")
    elif args.net_type == 'vgg_16':
        model = vgg_16.Net(models.vgg16_bn(pretrained=False), 8)
        ckpt = torch.load("../checkpoints/vgg-16/checkpoint.pth")
        model.load_state_dict(ckpt['model_state_dict'])
        model.eval()
        model.cuda()
        print("Done!")
    else:
        raise Exception(f"There is no net_type={args.net_type} available.")

    in_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    print('load model: ' + args.net_type)

    # load dataset
    print('load target data: ', args.dataset)
    train_loader, test_loader = data_loader.getTargetDataSet(
        args.dataset, args.batch_size, in_transform, args.dataroot)

    # set information about feature extaction
    model.eval()
    temp_x = torch.rand(2, 3, 224, 224).cuda()
    temp_x = Variable(temp_x)
    temp_list = model.feature_list(temp_x)[1]
    num_output = len(temp_list)
    feature_list = np.empty(num_output)
    count = 0
    for out in temp_list:
        feature_list[count] = out.size(1)
        count += 1

    print('get sample mean and covariance')
    sample_mean, precision = lib_generation.sample_estimator(
        model, args.num_classes, feature_list, train_loader)

    print('get Mahalanobis scores')
    m_list = [0.0, 0.01, 0.005, 0.002, 0.0014, 0.001, 0.0005]

    for magnitude in m_list:
        print('Noise: ' + str(magnitude))
        for i in range(num_output):
            M_in = lib_generation.get_Mahalanobis_score(model, test_loader, args.num_classes, args.outf, \
                                                        True, args.net_type, sample_mean, precision, i, magnitude)
            M_in = np.asarray(M_in, dtype=np.float32)
            if i == 0:
                Mahalanobis_in = M_in.reshape((M_in.shape[0], -1))
            else:
                Mahalanobis_in = np.concatenate(
                    (Mahalanobis_in, M_in.reshape((M_in.shape[0], -1))),
                    axis=1)

        for out_dist in out_dist_list:
            out_test_loader = data_loader.getNonTargetDataSet(
                out_dist, args.batch_size, in_transform, args.dataroot)
            print('Out-distribution: ' + out_dist)
            for i in range(num_output):
                M_out = lib_generation.get_Mahalanobis_score(model, out_test_loader, args.num_classes, args.outf, \
                                                             False, args.net_type, sample_mean, precision, i, magnitude)
                M_out = np.asarray(M_out, dtype=np.float32)
                if i == 0:
                    Mahalanobis_out = M_out.reshape((M_out.shape[0], -1))
                else:
                    Mahalanobis_out = np.concatenate(
                        (Mahalanobis_out, M_out.reshape((M_out.shape[0], -1))),
                        axis=1)

            Mahalanobis_in = np.asarray(Mahalanobis_in, dtype=np.float32)
            Mahalanobis_out = np.asarray(Mahalanobis_out, dtype=np.float32)
            Mahalanobis_data, Mahalanobis_labels = lib_generation.merge_and_generate_labels(
                Mahalanobis_out, Mahalanobis_in)
            file_name = os.path.join(
                args.outf, 'Mahalanobis_%s_%s_%s.npy' %
                (str(magnitude), args.dataset, out_dist))
            Mahalanobis_data = np.concatenate(
                (Mahalanobis_data, Mahalanobis_labels), axis=1)
            np.save(file_name, Mahalanobis_data)
def _generate_Mahalanobis(model,
                          loaders,
                          device,
                          num_classes,
                          model_type='eb0'):

    model.eval()
    train_ind_loader, val_ind_loader, test_ind_loader, val_ood_loader, test_ood_loader_1, test_ood_loader_2, test_ood_loader_3 = loaders

    temp_x = torch.rand(2, 3, 224, 224).to(device)
    temp_x = Variable(temp_x)
    temp_x = temp_x.to(device)
    if model_type == 'eb0':
        idxs = [0, 2, 4, 7, 10, 14, 15]
        x, features = model.extract_features(temp_x, mode='all')
    else:
        # TODO: In case you wish to evaluate other models, you need to define a proper way to get middle level features
        pass
    features = [features[idx] for idx in idxs] + [x]
    num_output = len(features)
    feature_list = np.empty(num_output)
    count = 0
    for out in features:
        feature_list[count] = out.size(1)
        count += 1

    sample_mean, precision = lib_generation.sample_estimator(model,
                                                             num_classes,
                                                             feature_list,
                                                             train_ind_loader,
                                                             device=device)

    best_auc = 0
    m_list = [0.0, 0.01, 0.005, 0.002, 0.0014, 0.001, 0.0005]

    best_magnitudes, best_fprs, regressors, thresholds = [], [], [], []
    for magnitude in m_list:
        for i in range(num_output):
            M_val = lib_generation.get_Mahalanobis_score(model,
                                                         val_ind_loader,
                                                         num_classes,
                                                         sample_mean,
                                                         precision,
                                                         i,
                                                         magnitude,
                                                         device=device)
            M_val = np.asarray(M_val, dtype=np.float32)
            if i == 0:
                Mahalanobis_val_ind = M_val.reshape((M_val.shape[0], -1))
            else:
                Mahalanobis_val_ind = np.concatenate(
                    (Mahalanobis_val_ind, M_val.reshape((M_val.shape[0], -1))),
                    axis=1)

        for i in range(num_output):
            M_val_ood = lib_generation.get_Mahalanobis_score(model,
                                                             val_ood_loader,
                                                             num_classes,
                                                             sample_mean,
                                                             precision,
                                                             i,
                                                             magnitude,
                                                             device=device)
            M_val_ood = np.asarray(M_val_ood, dtype=np.float32)
            if i == 0:
                Mahalanobis_val_ood = M_val_ood.reshape(
                    (M_val_ood.shape[0], -1))
            else:
                Mahalanobis_val_ood = np.concatenate(
                    (Mahalanobis_val_ood,
                     M_val_ood.reshape((M_val_ood.shape[0], -1))),
                    axis=1)

        Mahalanobis_val_ind = np.asarray(Mahalanobis_val_ind, dtype=np.float32)
        Mahalanobis_val_ood = np.asarray(Mahalanobis_val_ood, dtype=np.float32)

        regressor, auc, threshold = _score_mahalanobis(Mahalanobis_val_ind,
                                                       Mahalanobis_val_ood)
        with open(
                f'lr_pickles/logistic_regressor_{ind_dataset}_{val_dataset}_{magnitude}.pickle',
                'wb') as lrp:
            pickle.dump(regressor, lrp, protocol=pickle.HIGHEST_PROTOCOL)

        if auc > best_auc:
            best_auc = auc
            best_magnitudes = [magnitude]
            regressors = [regressor]
            thresholds = [threshold]
            best_val_ind = regressor.predict_proba(Mahalanobis_val_ind)[:, 1]
            best_val_ood = regressor.predict_proba(Mahalanobis_val_ood)[:, 1]
            cnt = 1
        elif auc == best_auc:
            best_magnitudes.append(magnitude)
            regressors.append(regressor)
            thresholds.append(threshold)
            best_val_ind += regressor.predict_proba(Mahalanobis_val_ind)[:, 1]
            best_val_ood += regressor.predict_proba(Mahalanobis_val_ood)[:, 1]
            cnt += 1

    best_val_ind /= cnt
    best_val_ood /= cnt

    print('###############################################')
    print()
    print(f'Selected magnitudes: {best_magnitudes}')
    print(f'Selected thresholds: {thresholds}')
    print()

    idx = 0
    for (best_magnitude, regressor, threshold) in zip(best_magnitudes,
                                                      regressors, thresholds):
        for i in range(num_output):
            M_test = lib_generation.get_Mahalanobis_score(model,
                                                          test_ind_loader,
                                                          num_classes,
                                                          sample_mean,
                                                          precision,
                                                          i,
                                                          best_magnitude,
                                                          device=device)
            M_test = np.asarray(M_test, dtype=np.float32)
            if i == 0:
                Mahalanobis_test = M_test.reshape((M_test.shape[0], -1))
            else:
                Mahalanobis_test = np.concatenate(
                    (Mahalanobis_test, M_test.reshape((M_test.shape[0], -1))),
                    axis=1)

        for i in range(num_output):
            M_ood_1 = lib_generation.get_Mahalanobis_score(model,
                                                           test_ood_loader_1,
                                                           num_classes,
                                                           sample_mean,
                                                           precision,
                                                           i,
                                                           best_magnitude,
                                                           device=device)
            M_ood_1 = np.asarray(M_ood_1, dtype=np.float32)
            if i == 0:
                Mahalanobis_ood_1 = M_ood_1.reshape((M_ood_1.shape[0], -1))
            else:
                Mahalanobis_ood_1 = np.concatenate(
                    (Mahalanobis_ood_1, M_ood_1.reshape(
                        (M_ood_1.shape[0], -1))),
                    axis=1)

        for i in range(num_output):
            M_ood_2 = lib_generation.get_Mahalanobis_score(model,
                                                           test_ood_loader_2,
                                                           num_classes,
                                                           sample_mean,
                                                           precision,
                                                           i,
                                                           best_magnitude,
                                                           device=device)
            M_ood_2 = np.asarray(M_ood_2, dtype=np.float32)
            if i == 0:
                Mahalanobis_ood_2 = M_ood_2.reshape((M_ood_2.shape[0], -2))
            else:
                Mahalanobis_ood_2 = np.concatenate(
                    (Mahalanobis_ood_2, M_ood_2.reshape(
                        (M_ood_2.shape[0], -2))),
                    axis=1)

        for i in range(num_output):
            M_ood_3 = lib_generation.get_Mahalanobis_score(model,
                                                           test_ood_loader_3,
                                                           num_classes,
                                                           sample_mean,
                                                           precision,
                                                           i,
                                                           best_magnitude,
                                                           device=device)
            M_ood_3 = np.asarray(M_ood_3, dtype=np.float32)
            if i == 0:
                Mahalanobis_ood_3 = M_ood_3.reshape((M_ood_3.shape[0], -3))
            else:
                Mahalanobis_ood_3 = np.concatenate(
                    (Mahalanobis_ood_3, M_ood_3.reshape(
                        (M_ood_3.shape[0], -3))),
                    axis=1)

        if idx == 0:
            test_ind = regressor.predict_proba(Mahalanobis_test)[:, 1]
            test_ood_1 = regressor.predict_proba(Mahalanobis_ood_1)[:, 1]
            test_ood_2 = regressor.predict_proba(Mahalanobis_ood_2)[:, 1]
            test_ood_3 = regressor.predict_proba(Mahalanobis_ood_3)[:, 1]
        else:
            test_ind += regressor.predict_proba(Mahalanobis_test)[:, 1]
            test_ood_1 += regressor.predict_proba(Mahalanobis_ood_1)[:, 1]
            test_ood_2 += regressor.predict_proba(Mahalanobis_ood_2)[:, 1]
            test_ood_3 += regressor.predict_proba(Mahalanobis_ood_3)[:, 1]
        idx += 1

    test_ind /= idx
    test_ood_1 /= idx
    test_ood_2 /= idx
    test_ood_3 /= idx

    return best_val_ind, best_val_ood, test_ind, test_ood_1, test_ood_2, test_ood_3
コード例 #8
0
def get_features_for_regressor(regressor_feature, model, config, test_loader,
                               dataset, i, out_flag, device, class_mean,
                               class_precision, tied_precision, pca_list,
                               knn_search_list, knn_mean, knn_precision):

    if regressor_feature == 'mahalanobis_class_cov':

        print("Getting scores using Mahalanobis class covoriance")
        scores = []
        for magnitude in config['exp_params']['noise_params']['m_list']:
            cur_score = lib_generation.get_Mahalanobis_score(
                regressor_feature,
                model,
                config,
                test_loader,
                out_flag,
                class_mean,
                class_precision,
                class_mean,
                class_precision,
                i,
                magnitude,
                knn_search_list[i],
                device,
                knn=False)
            cur_score = np.array(cur_score)
            scores.append(cur_score.reshape(-1, 1))
        return np.hstack(scores)

    elif regressor_feature == 'mahalanobis_tied_cov':

        print(
            "Getting scores using Mahalanobis tied covoriance [Mahalanobis Paper]"
        )
        scores = []
        for magnitude in config['exp_params']['noise_params']['m_list']:
            cur_score = lib_generation.get_Mahalanobis_score(
                regressor_feature,
                model,
                config,
                test_loader,
                out_flag,
                class_mean,
                tied_precision,
                class_mean,
                tied_precision,
                i,
                magnitude,
                knn_search_list[i],
                device,
                knn=False)
            cur_score = np.array(cur_score)
            scores.append(cur_score.reshape(-1, 1))
        return np.hstack(scores)
    elif regressor_feature == 'pca':

        print("Getting scores using PCA")
        scores = []
        for magnitude in config['exp_params']['noise_params']['m_list']:
            cur_score = lib_generation.get_pca_score(
                model, config, test_loader, out_flag, class_mean,
                class_precision, i, magnitude, pca_list, device)
            cur_score = np.array(cur_score)
            scores.append(cur_score.reshape(-1, 1))
        return np.hstack(scores)

    elif regressor_feature == 'knn_mahalanobis_class_cov':

        print("Getting scores using Mahalanobis class covoriance on K-NNs")
        scores = []
        for magnitude in config['exp_params']['noise_params']['m_list']:
            cur_score = lib_generation.get_Mahalanobis_score(
                regressor_feature,
                model,
                config,
                test_loader,
                out_flag,
                class_mean,
                class_precision,
                knn_mean,
                knn_precision,
                i,
                magnitude,
                knn_search_list[i],
                device,
                knn=True)
            cur_score = np.array(cur_score)
            scores.append(cur_score.reshape(-1, 1))
        return np.hstack(scores)

    elif regressor_feature == 'knn_mahalanobis_tied_cov':

        print("Getting scores using Mahalanobis tied covoriance on K-NNs")
        scores = []
        for magnitude in config['exp_params']['noise_params']['m_list']:
            cur_score = lib_generation.get_Mahalanobis_score(
                regressor_feature,
                model,
                config,
                test_loader,
                out_flag,
                class_mean,
                tied_precision,
                knn_mean,
                knn_precision,
                i,
                magnitude,
                knn_search_list[i],
                device,
                knn=True)
            cur_score = np.array(cur_score)
            scores.append(cur_score.reshape(-1, 1))
        return np.hstack(scores)

    elif regressor_feature == 'ODIN':
        scores = []
        for params in config['exp_params']['odin_args']['settings']:
            cur_score = lib_generation.get_posterior(
                model, config['model_params']['net_type'], test_loader,
                params[1], params[0], config['logging_params']['outf'],
                out_flag, device)
            scores.append(cur_score.reshape(-1, 1))

        return np.hstack(scores)

    elif regressor_feature == 'LID':
        # dumping code
        os.system(
            "python ADV_Samples.py --dataset {} --net_type {} --adv_type {} --gpu {} --outf {} --model {} --ood_idx {} --num_oods {}"
            .format(dataset, config['model_params']['net_type'],
                    config['exp_params']['lid_args']['adv_type'],
                    config['trainer_params']['gpu'],
                    config['exp_params']['lid_args']['outf'],
                    config['exp_params']['dataset'],
                    config['model_params']['out_idx'],
                    config['model_params']['num_oods']))
        # scoring code
        base_path = config['exp_params']['lid_args']['outf'] + config[
            'model_params']['net_type'] + '_' + dataset + '/'
        test_clean_data = torch.load(
            base_path + 'clean_data_%s_%s_%s.pth' %
            (config['model_params']['net_type'], dataset,
             config['exp_params']['lid_args']['adv_type']))
        test_adv_data = torch.load(
            base_path + 'adv_data_%s_%s_%s.pth' %
            (config['model_params']['net_type'], dataset,
             config['exp_params']['lid_args']['adv_type']))
        test_noisy_data = torch.load(
            base_path + 'noisy_data_%s_%s_%s.pth' %
            (config['model_params']['net_type'], dataset,
             config['exp_params']['lid_args']['adv_type']))
        test_label = torch.load(base_path + 'label_%s_%s_%s.pth' %
                                (config['model_params']['net_type'], dataset,
                                 config['exp_params']['lid_args']['adv_type']))
        LID, LID_adv, LID_noisy = lib_generation.get_LID(
            model, test_clean_data, test_adv_data, test_noisy_data, test_label,
            i + 1)
        LID_scores = np.hstack([np.vstack(s) for s in LID])
        print("LID_scores_shape:", LID_scores.shape)
        return LID_scores

    else:
        raise Exception("Wrong type of regressor feature")