def test_battery(model, dataname): 

    if dataname == "cifar10":
        data_source_dir = '../data/cifar10/'
        classes = 10
    elif dataname == "cifar100":
        data_source_dir = '../data/cifar100/'
        classes = 100

    train_loader, valid_loader, unlabelled_loader, test_loader, num_classes = load_data_subset(0, 128, 2 ,dataname, data_source_dir, 0.0, labels_per_class = 5000)

    #run_test_adversarial(True, model, test_loader, num_classes=num_classes, attack_type='pgd', attack_params={'eps':0.03, 'eps_iter': 0.01, 'iter': 200})

    interpolation_tests(test_loader, model, classes)
    #return
    #adversarial_example_tests(test_loader, model, classes)
    #interpolation_tests(test_loader, model, classes)

    return

    res_lst = []
    shear_lst = [0.5, 1.0, 2.0, 2.5, 3.0]
    for shearing in shear_lst:
        deform_transform = [Shear(shearing)]
        acc = deformation_tests(model, dataname, data_source_dir, deform_transform)
        print "shearing", shearing, acc
        #res_lst.append(acc)

    print "shearing exp"
    print shear_lst
    print res_lst

    res_lst = []
    rot_lst = [20.0, 40.0, 60.0, 80.0]
    for rotation in rot_lst:
        deform_transform = [Rotation(rotation)]
        acc = deformation_tests(model, dataname, data_source_dir, deform_transform)
        print "rot", rotation, acc
        #res_lst.append(acc)

    print "rotation exp"
    print rot_lst
    print res_lst

    res_lst = []
    zoom_lst = [0.2, 0.4, 0.6, 0.8, 1.2, 1.4, 1.6, 1.8]
    for zoom in zoom_lst:
        deform_transform = [Zoom((zoom,zoom))]
        acc = deformation_tests(model, dataname, data_source_dir, deform_transform)
        print "zoom", zoom, acc
        #res_lst.append(acc)

    print "zoom exp"
    print zoom_lst
    print res_lst
Exemple #2
0
def load_mnist(data_aug, batch_size, test_batch_size, cuda, data_target_dir):

    if data_aug == 1:
        hw_size = 24
        transform_train = transforms.Compose([
            transforms.RandomCrop(hw_size),
            transforms.ToTensor(),
            Rotation(15),
            Zoom((0.85, 1.15)),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])
        transform_test = transforms.Compose([
            transforms.CenterCrop(hw_size),
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])
    else:
        hw_size = 28
        transform_train = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])
        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])

    kwargs = {'num_workers': 0, 'pin_memory': True} if cuda else {}

    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        data_target_dir, train=True, download=True, transform=transform_train),
                                               batch_size=batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        data_target_dir, train=False, transform=transform_test),
                                              batch_size=test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    return train_loader, test_loader
Exemple #3
0
def load_mnist_subset(data_aug,
                      batch_size,
                      test_batch_size,
                      cuda,
                      data_target_dir,
                      n_labels=10,
                      labels_per_class=100):
    import numpy as np
    from functools import reduce
    from operator import __or__
    from torch.utils.data.sampler import SubsetRandomSampler
    from torchvision.datasets import MNIST
    import torchvision.transforms as transforms
    #from utils import onehot

    if data_aug == 1:
        hw_size = 24
        transform_train = transforms.Compose([
            transforms.RandomCrop(hw_size),
            transforms.ToTensor(),
            Rotation(15),
            Zoom((0.85, 1.15)),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])
        transform_test = transforms.Compose([
            transforms.CenterCrop(hw_size),
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])
    else:
        hw_size = 28
        transform_train = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(
                (0.5, ), (0.5, ))  #transforms.Normalize((0.1307,), (0.3081,))
        ])
        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(
                (0.5, ), (0.5, ))  #transforms.Normalize((0.1307,), (0.3081,))
        ])

    kwargs = {'num_workers': 0, 'pin_memory': True} if cuda else {}

    mnist_train = datasets.MNIST(data_target_dir,
                                 train=True,
                                 download=True,
                                 transform=transform_train)
    mnist_valid = datasets.MNIST(data_target_dir,
                                 train=False,
                                 transform=transform_test)

    def get_sampler(labels, n=None):
        # Only choose digits in n_labels
        (indices, ) = np.where(
            reduce(__or__, [labels == i for i in np.arange(n_labels)]))

        # Ensure uniform distribution of labels
        np.random.shuffle(indices)
        indices = np.hstack([
            list(filter(lambda idx: labels[idx] == i, indices))[:n]
            for i in range(n_labels)
        ])
        #print indices
        indices = torch.from_numpy(indices)
        sampler = SubsetRandomSampler(indices)
        return sampler

    #print type(mnist_train.train_labels)
    # Dataloaders for MNIST
    labelled = torch.utils.data.DataLoader(
        mnist_train,
        batch_size=batch_size,
        sampler=get_sampler(mnist_train.train_labels.numpy(),
                            labels_per_class),
        **kwargs)
    unlabelled = torch.utils.data.DataLoader(
        mnist_train,
        batch_size=batch_size,
        sampler=get_sampler(mnist_train.train_labels.numpy()),
        **kwargs)
    validation = torch.utils.data.DataLoader(
        mnist_valid,
        batch_size=test_batch_size,
        sampler=get_sampler(mnist_valid.test_labels.numpy()),
        **kwargs)

    return labelled, unlabelled, validation