def transformation_cifar10_vs_tinyimagenet():
    _, (x_test, y_test) = load_cifar10()
    x_test_out = load_tinyimagenet('/home/izikgo/Imagenet_resize/Imagenet_resize/')

    transformer = Transformer(8, 8)
    n = 16
    k = 8
    base_mdl = create_wide_residual_network(x_test.shape[1:], 10, n, k)

    transformations_cls_out = Activation('softmax')(dense(transformer.n_transforms)(base_mdl.get_layer(index=-3).output))

    mdl = Model(base_mdl.input, [base_mdl.output, transformations_cls_out])
    mdl.load_weights('cifar10_WRN_doublehead-transformations_{}-{}.h5'.format(n, k))

    scores_mdl = Model(mdl.input, mdl.output[1])
    x_test_all = np.concatenate((x_test, x_test_out))
    preds = np.zeros((len(x_test_all), transformer.n_transforms))
    for t in range(transformer.n_transforms):
        preds[:, t] = scores_mdl.predict(transformer.transform_batch(x_test_all, [t] * len(x_test_all)),
                                  batch_size=128)[:, t]

    labels = np.concatenate((np.ones(len(x_test)), np.zeros(len(x_test_out))))
    scores = preds.mean(axis=-1)

    save_roc_pr_curve_data(scores, labels, 'cifar10-vs-tinyimagenet_transformations.npz')
Example #2
0
def osoc_test(hyper_para):
    transformer = Transformer(8, 8)
    C = wrn.WideResNet(28, num_classes=10, dropout_rate=0,
                       widen_factor=10).cuda()
    C.load_state_dict(torch.load(hyper_para.experiment_name + '.pth'))
    single_class_ind = hyper_para.inclass[0]
    (x_train, y_train), (x_test, y_test) = load_cifar10()
    C.eval()
    score = []
    lbl = []
    features = []
    correct = 0
    total = 0
    dset = CustomDataset(x_test, y_test)
    testLoader = DataLoader(dset, batch_size=128)
    for i, (inputs0, labels) in enumerate(testLoader):
        inputs = inputs0.permute(0, 3, 2, 1)
        if hyper_para.gpu:
            inputs = inputs.cuda()
            labels = labels.cuda()
        act, f = C(inputs)
        features += f.detach().cpu().tolist()
        val, ind = torch.max(act, dim=1)
        #score += val.detach().cpu().tolist()
        CC = wrn.WideResNet(28,
                            num_classes=72,
                            dropout_rate=0,
                            widen_factor=10).cuda()
        val, ind, labels = (val.detach().cpu().tolist(),
                            ind.detach().cpu().tolist(),
                            labels.detach().cpu().tolist())
        for ind, (ii, gt) in enumerate(zip(ind, labels)):
            gt = gt[0]
            CC.load_state_dict(
                torch.load('saved/' + hyper_para.experiment_name + '_' +
                           str(gt) + '.pth'))

            x_test0 = transformer.transform_batch(
                np.expand_dims(inputs0[ind, :, :, :].detach().cpu().numpy(),
                               0), [0])
            x_test0 = torch.tensor(x_test0).permute(0, 3, 2, 1).cuda()
            act, f = CC(x_test0)
            act = act.detach().cpu().tolist()
            score += [act[0][0]]

            if gt in hyper_para.inclass:
                total += 1
                if ii == gt:
                    correct += 1
                lbl.append(1)
            else:
                lbl.append(0)
        break
    fpr, tpr, thresholds = metrics.roc_curve(lbl, score)
    AUC = metrics.auc(fpr, tpr)
    ACC = float(correct) / total
    print('AUROC: ' + str(AUC) + '\t Accuracy: ' + str(ACC))
Example #3
0
class RotateDataset(Dataset):
    def __init__(self, x_tensor):
        self.x = x_tensor
        self.transformer = Transformer(8, 8)

    def __getitem__(self, index):
        trans_id = np.random.randint(self.transformer.n_transforms, size=1)[0]
        #return(self.transformer.transform_sample(self.x[index], trans_id), trans_id)
        return ((self.transformer.transform_batch(
            np.expand_dims(self.x[index], 0), [trans_id]))[0], trans_id)

    def __len__(self):
        return len(self.x)
Example #4
0
class RotateDataset(Dataset):
    def __init__(self, x_tensor, lbl):
        self.x = x_tensor
        self.y = lbl
        self.perm = np.random.permutation(len(x_tensor))
        self.transformer = Transformer(8, 8)

    def __getitem__(self, index):
        index = self.perm[index]
        trans_id = np.random.randint(self.transformer.n_transforms, size=1)[0]
        #return(self.transformer.transform_sample(self.x[index], trans_id), trans_id)
        return ((self.transformer.transform_batch(
            np.expand_dims(self.x[index], 0),
            [trans_id]))[0], trans_id, self.y[index])

    def __len__(self):
        return len(self.x)
Example #5
0
def os_test_ens(testLoader, hyper_para, C, isauc):
    C = wrn.WideResNet(28, num_classes=72, dropout_rate=0,
                       widen_factor=10).cuda()
    C.load_state_dict(
        torch.load('saved/' + hyper_para.experiment_name + '_' +
                   str(hyper_para.inclass[0]) + '.pth'))
    single_class_ind = hyper_para.inclass[0]
    (x_train, y_train), (x_test, y_test) = load_cifar10()
    transformer = Transformer(8, 8)
    glabels = y_test.flatten() == single_class_ind
    C.cuda().eval()
    scores = np.array([[]])
    features = []

    correct = 0
    total = 0
    preds = np.zeros((len(x_test), transformer.n_transforms))
    for t in [0]:  #range(72):
        score = []
        x_test0 = transformer.transform_batch(x_test, [t] * len(x_test))
        dset = CustomDataset(x_test0, [t] * len(x_test))
        testLoader = DataLoader(dset, batch_size=128, shuffle=False)
        for i, (inputs, labels) in enumerate(testLoader):
            inputs = inputs.permute(0, 3, 2, 1)
            if True:
                inputs = inputs.cuda()
                labels = labels.cuda()
            act, f = C(inputs)
            features += f.detach().cpu().tolist()
            #act = torch.nn.functional.softmax(act, dim=1)
            score += act[:, t].detach().cpu().tolist()
        preds[:, t] = list(score)
        fpr, tpr, thresholds = metrics.roc_curve(glabels, score)
        AUC = metrics.auc(fpr, tpr)
        print('AUROC: ' + str(AUC))

    scores = np.sum(((preds)), 1)
    fpr, tpr, thresholds = metrics.roc_curve(glabels, scores)
    AUC = metrics.auc(fpr, tpr)
    print('AUROC: ' + str(AUC))
    return ([0, 0])
def _transformations_experiment(dataset_load_fn, dataset_name, single_class_ind, gpu_q):
    gpu_to_use = gpu_q.get()
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_to_use

    (x_train, y_train), (x_test, y_test) = dataset_load_fn()

    if dataset_name in ['cats-vs-dogs']:
        transformer = Transformer(16, 16)
        n, k = (16, 8)
    else:
        transformer = Transformer(8, 8)
        n, k = (10, 4)
    mdl = create_wide_residual_network(x_train.shape[1:], transformer.n_transforms, n, k)
    mdl.compile('adam',
                'categorical_crossentropy',
                ['acc'])

    x_train_task = x_train[y_train.flatten() == single_class_ind]
    transformations_inds = np.tile(np.arange(transformer.n_transforms), len(x_train_task))
    x_train_task_transformed = transformer.transform_batch(np.repeat(x_train_task, transformer.n_transforms, axis=0),
                                                           transformations_inds)
    batch_size = 128

    mdl.fit(x=x_train_task_transformed, y=to_categorical(transformations_inds),
            batch_size=batch_size, epochs=int(np.ceil(200/transformer.n_transforms))
            )

    #################################################################################################
    # simplified normality score
    #################################################################################################
    # preds = np.zeros((len(x_test), transformer.n_transforms))
    # for t in range(transformer.n_transforms):
    #     preds[:, t] = mdl.predict(transformer.transform_batch(x_test, [t] * len(x_test)),
    #                               batch_size=batch_size)[:, t]
    #
    # labels = y_test.flatten() == single_class_ind
    # scores = preds.mean(axis=-1)
    #################################################################################################

    def calc_approx_alpha_sum(observations):
        N = len(observations)
        f = np.mean(observations, axis=0)

        return (N * (len(f) - 1) * (-psi(1))) / (
                N * np.sum(f * np.log(f)) - np.sum(f * np.sum(np.log(observations), axis=0)))

    def inv_psi(y, iters=5):
        # initial estimate
        cond = y >= -2.22
        x = cond * (np.exp(y) + 0.5) + (1 - cond) * -1 / (y - psi(1))

        for _ in range(iters):
            x = x - (psi(x) - y) / polygamma(1, x)
        return x

    def fixed_point_dirichlet_mle(alpha_init, log_p_hat, max_iter=1000):
        alpha_new = alpha_old = alpha_init
        for _ in range(max_iter):
            alpha_new = inv_psi(psi(np.sum(alpha_old)) + log_p_hat)
            if np.sqrt(np.sum((alpha_old - alpha_new) ** 2)) < 1e-9:
                break
            alpha_old = alpha_new
        return alpha_new

    def dirichlet_normality_score(alpha, p):
        return np.sum((alpha - 1) * np.log(p), axis=-1)

    scores = np.zeros((len(x_test),))
    observed_data = x_train_task
    for t_ind in range(transformer.n_transforms):
        observed_dirichlet = mdl.predict(transformer.transform_batch(observed_data, [t_ind] * len(observed_data)),
                                         batch_size=1024)
        log_p_hat_train = np.log(observed_dirichlet).mean(axis=0)

        alpha_sum_approx = calc_approx_alpha_sum(observed_dirichlet)
        alpha_0 = observed_dirichlet.mean(axis=0) * alpha_sum_approx

        mle_alpha_t = fixed_point_dirichlet_mle(alpha_0, log_p_hat_train)

        x_test_p = mdl.predict(transformer.transform_batch(x_test, [t_ind] * len(x_test)),
                               batch_size=1024)
        scores += dirichlet_normality_score(mle_alpha_t, x_test_p)

    scores /= transformer.n_transforms
    labels = y_test.flatten() == single_class_ind

    res_file_name = '{}_transformations_{}_{}.npz'.format(dataset_name,
                                                 get_class_name_from_index(single_class_ind, dataset_name),
                                                 datetime.now().strftime('%Y-%m-%d-%H%M'))
    res_file_path = os.path.join(RESULTS_DIR, dataset_name, res_file_name)
    save_roc_pr_curve_data(scores, labels, res_file_path)

    mdl_weights_name = '{}_transformations_{}_{}_weights.h5'.format(dataset_name,
                                                           get_class_name_from_index(single_class_ind, dataset_name),
                                                           datetime.now().strftime('%Y-%m-%d-%H%M'))
    mdl_weights_path = os.path.join(RESULTS_DIR, dataset_name, mdl_weights_name)
    mdl.save_weights(mdl_weights_path)

    gpu_q.put(gpu_to_use)
                metrics=['acc'])

    print(mdl.summary())

    # get inliers of specific class
    x_train_task = x_train[y_train.flatten() == single_class_ind]
    print(x_train_task.shape)
    # [0_i, ..., (N_transforms-1)_i, ..., ..., 0_N_samples, ...,
    # (N_transforms-1)_N_samples] shape: (N_transforms*N_samples,)
    transformations_inds = np.tile(np.arange(transformer.n_transforms),
                                   len(x_train_task))
    print(len(transformations_inds))
    #
    start_time = time.time()
    x_train_task_transformed = transformer.transform_batch(
        np.repeat(x_train_task, transformer.n_transforms, axis=0),
        transformations_inds)
    time_usage = str(
        datetime.timedelta(seconds=int(round(time.time() - start_time))))
    print("Time to perform transforms: " + time_usage)
    print(x_train_task_transformed.shape)
    batch_size = 128

    start_time = time.time()
    mdl.fit(x=x_train_task_transformed,
            y=to_categorical(transformations_inds),
            batch_size=batch_size,
            epochs=int(np.ceil(200 / transformer.n_transforms)))
    time_usage = str(
        datetime.timedelta(seconds=int(round(time.time() - start_time))))
    print("Time to train model: " + time_usage)
Example #8
0
def _transformations_experiment(dataset_load_fn, dataset_name,
                                single_class_ind, gpu_q):
    # gpu_to_use = gpu_q.get()
    # os.environ["CUDA_VISIBLE_DEVICES"] = gpu_to_use

    (x_train, y_train), (x_test, y_test) = dataset_load_fn()

    if dataset_name in ['cats-vs-dogs']:
        transformer = Transformer(16, 16)
        n, k = (16, 8)
    else:
        transformer = Transformer(8, 8)
        n, k = (10, 4)
    mdl = create_wide_residual_network(x_train.shape[1:],
                                       transformer.n_transforms, n, k)
    mdl.compile('adam', 'categorical_crossentropy', ['acc'])

    # get inliers of specific class
    x_train_task = x_train[y_train.flatten() == single_class_ind]
    # [0_i, ..., (N_transforms-1)_i, ..., ..., 0_N_samples, ...,
    # (N_transforms-1)_N_samples] shape: (N_transforms*N_samples,)
    transformations_inds = np.tile(np.arange(transformer.n_transforms),
                                   len(x_train_task))
    x_train_task_transformed = transformer.transform_batch(
        np.repeat(x_train_task, transformer.n_transforms, axis=0),
        transformations_inds)
    batch_size = 128

    mdl.fit(x=x_train_task_transformed,
            y=to_categorical(transformations_inds),
            batch_size=batch_size,
            epochs=int(np.ceil(200 / transformer.n_transforms)))

    scores = np.zeros((len(x_test), ))
    matrix_evals = np.zeros(
        (len(x_test), transformer.n_transforms, transformer.n_transforms))
    observed_data = x_train_task
    for t_ind in range(transformer.n_transforms):
        observed_dirichlet = mdl.predict(transformer.transform_batch(
            observed_data, [t_ind] * len(observed_data)),
                                         batch_size=1024)
        log_p_hat_train = np.log(observed_dirichlet).mean(axis=0)

        alpha_sum_approx = calc_approx_alpha_sum(observed_dirichlet)
        alpha_0 = observed_dirichlet.mean(axis=0) * alpha_sum_approx

        mle_alpha_t = fixed_point_dirichlet_mle(alpha_0, log_p_hat_train)

        x_test_p = mdl.predict(transformer.transform_batch(
            x_test, [t_ind] * len(x_test)),
                               batch_size=1024)
        matrix_evals[:, :, t_ind] += x_test_p
        scores += dirichlet_normality_score(mle_alpha_t, x_test_p)

    scores /= transformer.n_transforms
    matrix_evals /= transformer.n_transforms
    scores_simple = np.trace(matrix_evals, axis1=1, axis2=2)
    scores_entropy = get_entropy(matrix_evals)
    scores_xH = get_xH(transformer, matrix_evals)
    labels = y_test.flatten() == single_class_ind

    save_results_file(dataset_name,
                      single_class_ind,
                      scores=scores,
                      labels=labels,
                      experiment_name='transformations')
    save_results_file(dataset_name,
                      single_class_ind,
                      scores=scores_simple,
                      labels=labels,
                      experiment_name='transformations-simple')
    save_results_file(dataset_name,
                      single_class_ind,
                      scores=scores_entropy,
                      labels=labels,
                      experiment_name='transformations-entropy')
    save_results_file(dataset_name,
                      single_class_ind,
                      scores=scores_xH,
                      labels=labels,
                      experiment_name='transformations-xH')

    mdl_weights_name = '{}_transformations_{}_{}_weights.h5'.format(
        dataset_name, get_class_name_from_index(single_class_ind,
                                                dataset_name),
        datetime.datetime.now().strftime('%Y-%m-%d-%H%M'))
    mdl_weights_path = os.path.join(RESULTS_DIR, dataset_name,
                                    mdl_weights_name)
    mdl.save_weights(mdl_weights_path)
Example #9
0
def octest0(hyper_para):
    C = wrn.WideResNet(28, num_classes=72, dropout_rate=0,
                       widen_factor=10).cuda()
    C.load_state_dict(
        torch.load(hyper_para.experiment_name + '_' +
                   str(hyper_para.inclass[0]) + '.pth'))
    single_class_ind = hyper_para.inclass[0]
    if hyper_para.source == 'mnist':
        (x_train, y_train), (x_test, y_test) = load_mnist()
    elif hyper_para.source == 'svhn':
        (x_train, y_train), (x_test, y_test) = load_svhn()
    elif hyper_para.source == 'amazon':
        (x_train, y_train), (x_test, y_test) = load_amazon()
    elif hyper_para.source == 'dslr':
        (x_train, y_train), (x_test, y_test) = load_dslr()
    transformer = Transformer(8, 8)

    glabels = y_test.flatten() == single_class_ind

    print(hyper_para.source)
    print(len(x_test))
    print(np.sum(glabels))
    C.cuda().eval()
    scores = np.array([[]])
    features = []

    correct = 0
    total = 0
    preds = np.zeros((len(x_test), transformer.n_transforms))
    for t in range(1):
        score = []

        dset = CustomDataset(x_test, y_test)
        testLoader = DataLoader(dset, batch_size=128, shuffle=False)
        transformer = Transformer(8, 8)

        for i, (inputs, labels) in enumerate(testLoader):
            inputs = torch.tensor(
                transformer.transform_batch(inputs.detach().cpu().numpy(),
                                            [t] * len(inputs)))
            inputs = inputs.permute(0, 3, 2, 1)
            if True:
                inputs = inputs.cuda()
                labels = labels.cuda()
            act, f = C(inputs)
            features += f.detach().cpu().tolist()
            #act = torch.nn.functional.softmax(act, dim=1)
            score += act[:, t].detach().cpu().tolist()
        preds[:, t] = list(score)
        fpr, tpr, thresholds = metrics.roc_curve(glabels, score)
        AUC = metrics.auc(fpr, tpr)
        #print('AUROC: ' + str(AUC) )

    scores = np.sum(((preds)), 1)
    fpr, tpr, thresholds = metrics.roc_curve(glabels, scores)
    AUC = metrics.auc(fpr, tpr)
    print('AUROC: ' + str(AUC))
    file1 = open("AUC.txt", "a")  #append mode
    file1.write(hyper_para.source + "\t" + hyper_para.method + "\t" +
                str(hyper_para.inclass[0]) + "\t" +
                hyper_para.experiment_name + "\t" + str(AUC) + "\n")
    file1.close()

    C = wrn.WideResNet(28, num_classes=72, dropout_rate=0,
                       widen_factor=10).cuda()
    C.load_state_dict(
        torch.load(hyper_para.experiment_name + '_' +
                   str(hyper_para.inclass[0]) + '.pth'))
    single_class_ind = hyper_para.inclass[0]
    if hyper_para.target == 'mnist':
        (x_train, y_train), (x_test, y_test) = load_mnist()
    elif hyper_para.target == 'svhn':
        (x_train, y_train), (x_test, y_test) = load_svhn()
    elif hyper_para.target == 'amazon':
        (x_train, y_train), (sx_test, y_test) = load_amazon()
    elif hyper_para.target == 'dslr':
        (x_train, y_train), (x_test, y_test) = load_dslr()
    transformer = Transformer(8, 8)

    C.cuda().eval()
    scores = np.array([[]])
    features = []

    correct = 0
    total = 0
    preds = np.zeros((len(x_test) + len(x_train), transformer.n_transforms))
    for t in range(1):
        score = []

        dset = CustomDataset(np.concatenate((x_test, x_train), 0),
                             np.concatenate((y_test, y_train), 0))
        testLoader = DataLoader(dset, batch_size=128, shuffle=False)
        transformer = Transformer(8, 8)

        for i, (inputs, labels) in enumerate(testLoader):
            inputs = torch.tensor(
                transformer.transform_batch(inputs.detach().cpu().numpy(),
                                            [t] * len(inputs)))
            inputs = inputs.permute(0, 3, 2, 1)
            if True:
                inputs = inputs.cuda()
                labels = labels.cuda()
            act, f = C(inputs)
            features += f.detach().cpu().tolist()
            #act = torch.nn.functional.softmax(act, dim=1)
            score += act[:, t].detach().cpu().tolist()
        preds[:, t] = list(score)
        #print('AUROC: ' + str(AUC) )
    glabels = (np.concatenate((y_test, y_train))).flatten() == single_class_ind

    scores = np.sum(((preds)), 1)
    fpr, tpr, thresholds = metrics.roc_curve(glabels, scores)
    AUC = metrics.auc(fpr, tpr)
    print('AUROC: ' + str(AUC))
    file1 = open("AUC.txt", "a")  #append mode
    file1.write(hyper_para.target + "\t" + hyper_para.method + "\t" +
                str(hyper_para.inclass[0]) + "\t" +
                hyper_para.experiment_name + "\t" + str(AUC) + "\n")
    file1.close()
Example #10
0
def _transformations_experiment(dataset_load_fn, dataset_name,
                                single_class_ind, gpu_q):
    gpu_to_use = gpu_q.get()
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_to_use

    (x_train, y_train), (x_test, y_test) = dataset_load_fn()

    if dataset_name in ['cats-vs-dogs']:
        transformer = Transformer(16, 16)
        n, k = (16, 8)
    else:
        transformer = Transformer(8, 8)
        n, k = (10, 4)
    #mdl = create_wide_residual_network(x_train.shape[1:], transformer.n_transforms, n, k)
    '''mdl.compile('adam',
                'categorical_crossentropy',
                ['acc'])'''

    x_train_task = x_train[y_train.flatten() == single_class_ind]
    #x_train_task =     x_train_task[0:15][:][:][:]
    transformations_inds = np.tile(np.arange(transformer.n_transforms),
                                   len(x_train_task))
    x_train_task_transformed = transformer.transform_batch(
        np.repeat(x_train_task, transformer.n_transforms, axis=0),
        transformations_inds)
    batch_size = 32
    '''mdl.fit(x=x_train_task_transformed, y=to_categorical(transformations_inds),
                batch_size=batch_size, epochs=int(np.ceil(200/transformer.n_transforms)))
    mdl_weights_name = '{}_transformations_{}_weights.h5'.format(dataset_name,
                                                               get_class_name_from_index(single_class_ind, dataset_name))
    #mdl.load_weights('cifar10/'+mdl_weights_name)

    #################################################################################################
    # simplified normality score
    #################################################################################################
    preds = np.zeros((len(x_test), transformer.n_transforms))
    for t in [1]:#range(1):
         preds[:, t] = mdl.predict(transformer.transform_batch(x_test, [t] * len(x_test)),
                                   batch_size=batch_size)[:, t]
    
    labels = y_test.flatten() == single_class_ind
    scores = preds.mean(axis=-1)
    #print("Accuracy : "+ str(scores))
    #################################################################################################'''
    dset = CustomDataset(x_train_task_transformed, transformations_inds)
    trainLoader = DataLoader(dset, batch_size=32, shuffle=True)
    ce_criterion = nn.CrossEntropyLoss()
    C = wrn.WideResNet(28, num_classes=72, dropout_rate=0,
                       widen_factor=10).cuda()
    C = nn.DataParallel(C)
    optimizer_c = optim.Adam(C.parameters(), lr=0.001, betas=(0.9, 0.999))
    C.train(mode=True)
    '''for epoch in range(int(1)):
        for (inputs, labels) in trainLoader:
            inputs = inputs.permute(0,3,2,1)
            if True:
                inputs = inputs.cuda()
                labels = labels.cuda()
            act,_ = C(inputs)
            loss_cc = ce_criterion(act, labels)
            optimizer_c.zero_grad()
            loss_cc.backward()
            optimizer_c.step()
            #running_cc += loss_cc.data


            running_cc = 0.0
    torch.save(C.cpu().state_dict(),  'out.pth')'''

    C = C.cpu()
    C.load_state_dict(torch.load('out.pth'))
    (x_train, y_train), (x_test, y_test) = load_cifar10()
    transformer = Transformer(8, 8)
    glabels = y_test.flatten() == single_class_ind
    C.cuda().eval()
    scores = np.array([[]])
    features = []

    correct = 0
    total = 0
    preds = np.zeros((len(x_test), transformer.n_transforms))
    for t in [6, 6]:  #range(72):
        score = []
        x_test0 = transformer.transform_batch(x_test, [t] * len(x_test))
        dset = CustomDataset(x_test0, [t] * len(x_test))
        testLoader = DataLoader(dset, batch_size=128, shuffle=False)
        for i, (inputs, labels) in enumerate(testLoader):
            inputs = inputs.permute(0, 3, 2, 1)
            if True:
                inputs = inputs.cuda()
                labels = labels.cuda()
            act, f = C(inputs)
            features += f.detach().cpu().tolist()
            #act = torch.nn.functional.softmax(act, dim=1)
            score += act[:, t].detach().cpu().tolist()
        preds[:, t] = list(score)
        fpr, tpr, thresholds = metrics.roc_curve(glabels, score)
        AUC = metrics.auc(fpr, tpr)
        print('AUROC: ' + str(AUC))

    scores = np.sum(((preds)), 1)
    fpr, tpr, thresholds = metrics.roc_curve(glabels, scores)
    AUC = metrics.auc(fpr, tpr)
    print('AUROC: ' + str(AUC))

    def calc_approx_alpha_sum(observations):
        N = len(observations)
        f = np.mean(observations, axis=0)

        return (N * (len(f) - 1) *
                (-psi(1))) / (N * np.sum(f * np.log(f)) -
                              np.sum(f * np.sum(np.log(observations), axis=0)))

    def inv_psi(y, iters=5):
        # initial estimate
        cond = y >= -2.22
        x = cond * (np.exp(y) + 0.5) + (1 - cond) * -1 / (y - psi(1))

        for _ in range(iters):
            x = x - (psi(x) - y) / polygamma(1, x)
        return x

    def fixed_point_dirichlet_mle(alpha_init, log_p_hat, max_iter=1000):
        alpha_new = alpha_old = alpha_init
        for _ in range(max_iter):
            alpha_new = inv_psi(psi(np.sum(alpha_old)) + log_p_hat)
            if np.sqrt(np.sum((alpha_old - alpha_new)**2)) < 1e-9:
                break
            alpha_old = alpha_new
        return alpha_new

    def dirichlet_normality_score(alpha, p):
        return np.sum((alpha - 1) * np.log(p), axis=-1)

    '''scores = np.zeros((len(x_test),))
    observed_data = x_train_task
    for t_ind in range(transformer.n_transforms):
        observed_dirichlet = mdl.predict(transformer.transform_batch(observed_data, [t_ind] * len(observed_data)),
                                         batch_size=64)
        log_p_hat_train = np.log(observed_dirichlet).mean(axis=0)

        alpha_sum_approx = calc_approx_alpha_sum(observed_dirichlet)
        alpha_0 = observed_dirichlet.mean(axis=0) * alpha_sum_approx

        mle_alpha_t = fixed_point_dirichlet_mle(alpha_0, log_p_hat_train)

        x_test_p = mdl.predict(transformer.transform_batch(x_test, [t_ind] * len(x_test)),
                               batch_size=64)
        scores += dirichlet_normality_score(mle_alpha_t, x_test_p)'''

    scores /= transformer.n_transforms
    labels = y_test.flatten() == single_class_ind
    r = (roc_auc_score(labels, scores))
    f = open("guru99.txt", "a")
    f.write(str(r) + "\n")
    f.close()
    '''res_file_name = '{}_transformations_{}_{}.npz'.format(dataset_name,
Example #11
0
def octest(hyper_para):
    CC = wrn.WideResNet(28, num_classes=10, dropout_rate=0,
                        widen_factor=10).cuda()
    CC.load_state_dict(torch.load(hyper_para.experiment_name + '.pth'))
    C = wrn.WideResNet(28, num_classes=72, dropout_rate=0,
                       widen_factor=10).cuda()
    C.cuda().eval()
    CC.cuda().eval()

    muc = {}
    stdc = {}
    mu = {}
    std = {}
    for cname in hyper_para.inclass:
        single_class_ind = cname
        C.load_state_dict(
            torch.load('saved/' + hyper_para.experiment_name + '_' +
                       str(cname) + '.pth'))
        (x_train, y_train), (x_test, y_test) = load_cifar10()
        transformer = Transformer(8, 8)
        x_train_task0 = x_train[[
            i in hyper_para.inclass for i in y_train.flatten()
        ]]
        y_train_task = y_train[[
            i in hyper_para.inclass for i in y_train.flatten()
        ]]

        for t in range(72):
            x_train_task = transformer.transform_batch(x_train_task0,
                                                       [t] * len(y_train_task))
            dset = CustomDataset(x_train_task, y_train_task)
            trainLoader = DataLoader(dset, batch_size=128, shuffle=True)
            features = []
            for inputs0, labels in trainLoader:
                inputs = inputs0.permute(0, 3, 2, 1).cuda()
                act, f = C(inputs)
                features += act[:, t].detach().cpu().tolist()
            if t == 0:
                totfeatures = features
            else:
                totfeatures += features

        mu[str(cname)] = np.mean(totfeatures)
        std[str(cname)] = np.sqrt(np.var(totfeatures))

    features = {}

    if True:
        (x_train, y_train), (x_test, y_test) = load_cifar10()
        x_train_task = x_train[[
            i in hyper_para.inclass for i in y_train.flatten()
        ]]
        y_train_task = y_train[[
            i in hyper_para.inclass for i in y_train.flatten()
        ]]
        dset = CustomDataset(x_train_task, y_train_task)
        trainLoader = DataLoader(dset, batch_size=128, shuffle=True)
        for inputs0, labels0 in trainLoader:
            inputs = inputs0.permute(0, 3, 2, 1).cuda()
            act, f = CC(inputs)
            val, ind = torch.max(act, dim=1)
            val, ind, labels0 = (val.detach().cpu().tolist(),
                                 ind.detach().cpu().tolist(),
                                 labels0.detach().cpu().tolist())
            for idx, (ii, gt) in enumerate(zip(ind, labels0)):
                gt = gt[0]
                if ii == gt and gt in hyper_para.inclass:
                    if str(ii) not in features.keys():
                        features[str(ii)] = [
                            act[idx, ii].detach().cpu().tolist()
                        ]
                    else:
                        features[str(ii)] += [
                            act[idx, ii].detach().cpu().tolist()
                        ]
                    print(np.shape(features[str(ii)]))
        for k in features.keys():
            muc[str(k)] = np.mean(features[str(k)])
            stdc[str(k)] = np.sqrt(np.var(features[str(k)]))

    (x_train, y_train), (x_test, y_test) = load_cifar10()
    transformer = Transformer(8, 8)

    scores = np.array([[]])
    features = []
    lbl = []
    correct = 0
    total = 0
    preds = np.zeros((len(x_test), transformer.n_transforms))
    dset = CustomDataset(x_test, y_test)
    testLoader = DataLoader(dset, batch_size=128)
    score = []
    for i, (inputs0, labels) in enumerate(testLoader):
        inputs = inputs0.permute(0, 3, 2, 1)
        if True:
            inputs = inputs.cuda()
            labels = labels.cuda()

        act0, f = CC(inputs)
        features += f.detach().cpu().tolist()
        val, ind = torch.max(act0, dim=1)
        val, ind, labels = (val.detach().cpu().tolist(),
                            ind.detach().cpu().tolist(),
                            labels.detach().cpu().tolist())
        #act = torch.nn.functional.softmax(act, dim=1)

        #score += val

        for idx, (ii, gt) in enumerate(zip(ind, labels)):
            C.load_state_dict(
                torch.load('saved/' + hyper_para.experiment_name + '_' +
                           str(ii) + '.pth'))
            gt = gt[0]
            score_temp = []
            for t in range(72):
                x_test0 = transformer.transform_batch(
                    torch.unsqueeze(inputs0[idx, :, :, :],
                                    0).detach().cpu().numpy(), [t])
                inputs = torch.tensor(x_test0).permute(0, 3, 2, 1).cuda()
                act, _ = C(inputs)
                act = act[:, t]
                act = act.detach().cpu().tolist()
                if t == 0:
                    score_temp = act[0]
                else:
                    score_temp += act[t]
            score += [(score_temp - mu[str(ii)]) / (std[str(ii)]) +
                      (val[idx] - muc[str(ii)]) / (stdc[str(ii)])]

            if gt in hyper_para.inclass:
                total += 1
                if ii == gt:
                    correct += 1
                lbl.append(1)
            else:
                lbl.append(0)

    fpr, tpr, thresholds = metrics.roc_curve(lbl, score)
    AUC = metrics.auc(fpr, tpr)
    print('AUROC: ' + str(AUC))
    return ([0, 0])