def energyevalmix(dataloader, test_dataloader, target_vars, sess):
    X = target_vars['X']
    Y_GT = target_vars['Y_GT']
    energy = target_vars['energy']

    if FLAGS.svhnmix:
        dataset = Svhn(train=False)
        test_dataloader_val = DataLoader(dataset,
                                         batch_size=FLAGS.batch_size,
                                         num_workers=FLAGS.data_workers,
                                         shuffle=True,
                                         drop_last=False)
        test_iter = iter(test_dataloader_val)
    elif FLAGS.cifar100mix:
        dataset = Cifar100(train=False)
        test_dataloader_val = DataLoader(dataset,
                                         batch_size=FLAGS.batch_size,
                                         num_workers=FLAGS.data_workers,
                                         shuffle=True,
                                         drop_last=False)
        test_iter = iter(test_dataloader_val)
    elif FLAGS.texturemix:
        dataset = Textures()
        test_dataloader_val = DataLoader(dataset,
                                         batch_size=FLAGS.batch_size,
                                         num_workers=FLAGS.data_workers,
                                         shuffle=True,
                                         drop_last=False)
        test_iter = iter(test_dataloader_val)

    probs = []
    labels = []
    negs = []
    pos = []
    for data_corrupt, data, label_gt in tqdm(test_dataloader):
        data = data.numpy()
        data_corrupt = data_corrupt.numpy()
        if FLAGS.svhnmix:
            _, data_mix, _ = test_iter.next()
        elif FLAGS.cifar100mix:
            _, data_mix, _ = test_iter.next()
        elif FLAGS.texturemix:
            _, data_mix, _ = test_iter.next()
        elif FLAGS.randommix:
            data_mix = np.random.randn(FLAGS.batch_size, 32, 32, 3) * 0.5 + 0.5
        else:
            data_idx = np.concatenate([np.arange(1, data.shape[0]), [0]])
            data_other = data[data_idx]
            data_mix = (data + data_other) / 2

        data_mix = data_mix[:data.shape[0]]

        if FLAGS.cclass:
            # It's unfair to take a random class
            label_gt = np.tile(np.eye(10), (data.shape[0], 1, 1))
            label_gt = label_gt.reshape(data.shape[0] * 10, 10)
            data_mix = np.tile(data_mix[:, None, :, :, :], (1, 10, 1, 1, 1))
            data = np.tile(data[:, None, :, :, :], (1, 10, 1, 1, 1))

            data_mix = data_mix.reshape(-1, 32, 32, 3)
            data = data.reshape(-1, 32, 32, 3)

        feed_dict = {X: data, Y_GT: label_gt}
        feed_dict_neg = {X: data_mix, Y_GT: label_gt}

        pos_energy = sess.run([energy], feed_dict)[0]
        neg_energy = sess.run([energy], feed_dict_neg)[0]

        if FLAGS.cclass:
            pos_energy = pos_energy.reshape(-1, 10).min(axis=1)
            neg_energy = neg_energy.reshape(-1, 10).min(axis=1)

        probs.extend(list(-1 * pos_energy))
        probs.extend(list(-1 * neg_energy))
        pos.extend(list(-1 * pos_energy))
        negs.extend(list(-1 * neg_energy))
        labels.extend([1] * pos_energy.shape[0])
        labels.extend([0] * neg_energy.shape[0])

    pos, negs = np.array(pos), np.array(negs)
    np.save("pos.npy", pos)
    np.save("neg.npy", negs)
    auroc = sk.roc_auc_score(labels, probs)
    print("Roc score of {}".format(auroc))
Example #2
0
    args = parser.parse_args()

    # Seed
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if torch.cuda.is_available(): torch.cuda.manual_seed(args.seed)
    else:
        print('[CUDA unavailable]')
        sys.exit()

    if not os.path.exists('asset/checkpoints'):
        os.makedirs('asset/checkpoints')
    if not os.path.exists('asset/checkpoints/' + args.experiment):
        os.makedirs('asset/checkpoints/' + args.experiment)

    dataset = Cifar100()
    stm = PreResNet(depth=32, num_classes=100)
    ltm = PreResNet(depth=32, num_classes=100)

    seg = []
    for module in ltm.modules():
        if isinstance(module, nn.Conv2d) and module.weight.data.shape[
                1] % args.units_x == 0 and module.weight.data.shape[2] == 3:
            seg.append(module.weight.data.shape[0] // args.units_x)
    print("seg: ", seg)

    dim_output = 64  # output dim of PreResNet
    gen = Generator(in_features=dim_output + args.hidden_dim,
                    out_features=sum(seg) * args.num_units,
                    total_class=args.total_class,
                    hidden_dim=args.hidden_dim,
Example #3
0
    for k in keys:
        d = dims[k]
        size = np.prod(d)
        values = weights[idx:idx + size]

        idx += size
        w[k] = values.reshape(d)
    return w


config = ConfigProto()
config.gpu_options.allow_growth = True
tf.debugging.set_log_device_placement(True)

#X, Y = Cifar10(train=True)
X, Y = Cifar100(train=True)
#X, Y = mnist(train=True)

xTrain = X
yTrain = Y

#X, Y = Cifar10(train=False)
X, Y = Cifar100(train=False)
#X, Y = mnist(train=False)

xVal = X
yVal = Y

_, w, h, nChannels = xTrain.shape
nSamples, nClasses = yTrain.shape
print('Number of training samples: ', nSamples, ' Number of classes: ',
Example #4
0
from data import Cifar10, Cifar100,mnist
from VGG import VGG16

import numpy as np
import tensorflow as tf 

from sklearn.metrics import accuracy_score, log_loss

xTest, yTest = Cifar100(train=False)
#xTest, yTest = Cifar10(train=False)
#xTest, yTest = mnist(train=False)
_,w,h,nChannels = xTest.shape
nSamples, nClasses = yTest.shape

sess = tf.Session()

xInput = tf.placeholder(tf.float32, [None, w, h, nChannels])
yInput = tf.placeholder(tf.float32, [None, nClasses])

vgg = VGG16(xInput,0,nClasses, train=False)
logits = vgg.fc8

parameters = np.load("SGD.npz")
vgg.loadW(parameters, sess)

crossEntropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
	labels=yInput, logits=logits))
predictions = tf.nn.softmax(logits)
acc = tf.equal(tf.argmax(predictions,1), tf.argmax(yInput,1))
accuracy = tf.reduce_mean(tf.cast(acc, tf.float32))