Exemplo n.º 1
0
    def testExponentialScheduler(self):
        model = models.LeNet(10, [1, 12, 12], channels=2)

        cuda = True
        if cuda:
            model = model.cuda()

        lr = 0.1
        momentum = 0.9
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=lr,
                                    momentum=momentum)
        gamma = 0.9
        scheduler = common.train.get_exponential_scheduler(
            optimizer, batches_per_epoch=len(self.trainset), gamma=gamma)
        writer = common.summary.SummaryDictWriter()
        augmentation = None

        trainer = common.train.NormalTraining(model,
                                              self.trainset,
                                              self.testset,
                                              optimizer,
                                              scheduler,
                                              augmentation=augmentation,
                                              writer=writer,
                                              cuda=cuda)
        trainer.summary_gradients = True

        epochs = 10
        for e in range(epochs):
            trainer.step(e)
            self.assertAlmostEqual(scheduler.get_lr()[0], lr * gamma**(e + 1))
    def testAdversarialTraining(self):
        model = models.LeNet(10, [1, 28, 28], channels=12)

        cuda = True
        if cuda:
            model = model.cuda()

        optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)
        writer = torch.utils.tensorboard.SummaryWriter('./logs/')
        augmentation = None

        epsilon = 0.3
        attack = attacks.BatchGradientDescent()
        attack.max_iterations = 2
        attack.base_lr = 0.1
        attack.momentum = 0
        attack.c = 0
        attack.lr_factor = 1.5
        attack.normalized = True
        attack.backtrack = True
        attack.initialization = attacks.initializations.LInfUniformInitialization(epsilon)
        attack.norm = attacks.norms.LInfNorm()
        attack.projection = attacks.projections.SequentialProjections([attacks.projections.LInfProjection(epsilon), attacks.projections.BoxProjection()])
        objective = attacks.objectives.UntargetedF0Objective()

        trainer = common.train.AdversarialTraining(model, self.trainset, self.testset, optimizer, scheduler, attack, objective, fraction=0.5, augmentation=augmentation, writer=writer, cuda=cuda)
        trainer.summary_gradients = True

        epochs = 10
        trainer.test(-1)
        for e in range(epochs):
            trainer.step(e)
            writer.flush()
            print(e)
Exemplo n.º 3
0
    def testTest(self):
        model = models.LeNet(10, [1, 28, 28], channels=12)

        if self.cuda:
            model = model.cuda()

        model.eval()
        probabilities = common.test.test(model, self.testset, cuda=self.cuda)
        eval = common.eval.CleanEvaluation(probabilities,
                                           self.testset.dataset.labels)
        self.assertGreaterEqual(0.05, abs(0.9 - eval.test_error()))
Exemplo n.º 4
0
    def testCyclicScheduler(self):
        model = models.LeNet(10, [1, 12, 12], channels=2)

        cuda = True
        if cuda:
            model = model.cuda()

        lr = 0.1
        momentum = 0.9
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=lr,
                                    momentum=momentum)
        # should move in triangle between 0.01*lr and lr every two epochs
        scheduler = common.train.get_cyclic_scheduler(optimizer,
                                                      batches_per_epoch=len(
                                                          self.trainset),
                                                      base_lr=0.01 * lr,
                                                      max_lr=lr,
                                                      step_size_factor=2)
        writer = common.summary.SummaryDictWriter()
        augmentation = None

        epoch_lrs = [
            (0.01 * lr + lr) / 2,  # lr AFTER first epoch (i.e., for e = 0)
            lr,
            (0.01 * lr + lr) / 2,
            0.01 * lr,
            (0.01 * lr + lr) / 2,
            lr,
            (0.01 * lr + lr) / 2,
            0.01 * lr,
            (0.01 * lr + lr) / 2,
            lr,
            (0.01 * lr + lr) / 2,
            0.01 * lr,
        ]

        trainer = common.train.NormalTraining(model,
                                              self.trainset,
                                              self.testset,
                                              optimizer,
                                              scheduler,
                                              augmentation=augmentation,
                                              writer=writer,
                                              cuda=cuda)
        trainer.summary_gradients = True

        epochs = 10
        for e in range(epochs):
            trainer.step(e)

            self.assertAlmostEqual(scheduler.get_lr()[0], epoch_lrs[e])
Exemplo n.º 5
0
    def testLeNet(self):
        resolutions = [
            [1, 2, 2],
            [1, 3, 3],
            [1, 4, 4],
            [1, 5, 5],
            [1, 4, 5],
            [1, 5, 4],
            [1, 27, 32],
            [1, 32, 27],
            [1, 32, 32],
            [3, 32, 32],
        ]
        channels = [1, 2]
        activations = [
            torch.nn.ReLU,
            torch.nn.Sigmoid,
            torch.nn.Tanh,
        ]
        normalizations = [
            True,
            False
        ]

        clamps = [
            True,
            False
        ]

        scales_and_whitens = [
            (False, False),
            (True, False),
            (False, True),
        ]

        classes = 10
        for resolution in resolutions:
            for channel in channels:
                for activation in activations:
                    for normalization in normalizations:
                        for clamp in clamps:
                            for scale_and_whiten in scales_and_whitens:
                                original_model = models.LeNet(classes, resolution, clamp=clamp, scale=scale_and_whiten[0], whiten=scale_and_whiten[1], channels=channel, activation=activation, normalization=normalization)
                                for parameters in original_model.parameters():
                                    parameters.data.zero_()

                                common.state.State.checkpoint(self.filepath, original_model)
                                state = common.state.State.load(self.filepath)
                                loaded_model = state.model

                                for parameters in loaded_model.parameters():
                                    self.assertEqual(torch.sum(parameters).item(), 0)
    def testBatchGradientDescentNormalizedBacktrack(self):
        cuda = True
        model_file = 'mnist_lenet.pth.tar'
        if os.path.exists(model_file):
            state = common.state.State.load(model_file)
            model = state.model

            if cuda:
                model = model.cuda()
        else:
            model = models.LeNet(10, [1, 28, 28], channels=12)
            if cuda:
                model = model.cuda()

            optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
            scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 5, 0.1)
            writer = common.summary.SummaryWriter()
            augmentation = None

            trainer = common.train.NormalTraining(model, self.trainset, self.testset, optimizer, scheduler, augmentation=augmentation, writer=writer, cuda=cuda)
            for e in range(10):
                trainer.step(e)
                print(e)

            common.state.State.checkpoint(model_file, model, optimizer, scheduler, e)

        model.eval()
        epsilon = 0.3
        writer = torch.utils.tensorboard.SummaryWriter('./logs/')

        attack = attacks.batch_gradient_descent.BatchGradientDescent()
        attack.max_iterations = 5
        attack.base_lr = 0.1
        attack.momentum = 0
        attack.c = 0
        attack.lr_factor = 1.5
        attack.normalized = True
        attack.backtrack = True
        attack.initialization = attacks.initializations.LInfUniformInitialization(epsilon)
        attack.projection = attacks.projections.SequentialProjections([attacks.projections.LInfProjection(epsilon), attacks.projections.BoxProjection()])
        attack.norm = attacks.norms.LInfNorm()

        for b, (images, labels) in enumerate(self.testset):
            break

        images = common.torch.as_variable(images, cuda).permute(0, 3, 1, 2)
        labels = common.torch.as_variable(labels, cuda)

        objective = attacks.objectives.UntargetedF0Objective()
        objective.set(labels)
        attack.run(model, images, objective, writer=writer)
        writer.flush()
Exemplo n.º 7
0
    def testModelOptimizer(self):
        original_model = models.LeNet(10, [1, 32, 32])
        original_optimizer = torch.optim.SGD(original_model.parameters(), lr=0.01, momentum=0.9)
        state = common.state.State(original_model, original_optimizer)
        state.save(self.filepath)

        state = common.state.State.load(self.filepath)
        loaded_model = state.model
        loaded_optimizer = torch.optim.SGD(original_model.parameters(), lr=0.99, momentum=0.1)
        loaded_optimizer.load_state_dict(state.optimizer)

        for param_group in loaded_optimizer.param_groups:
            self.assertEqual(param_group['lr'], 0.01)
            self.assertEqual(param_group['momentum'], 0.9)
Exemplo n.º 8
0
def create_net(num_classes, dnn='resnet20', **kwargs):
    ext = None
    if dnn in ['resnet20', 'resnet56', 'resnet110']:
        net = models.__dict__[dnn](num_classes=num_classes)
    elif dnn == 'resnet50':
        net = torchvision.models.resnet50(num_classes=num_classes)
    elif dnn == 'resnet101':
        net = torchvision.models.resnet101(num_classes=num_classes)
    elif dnn == 'resnet152':
        net = torchvision.models.resnet152(num_classes=num_classes)
    elif dnn == 'densenet121':
        net = torchvision.models.densenet121(num_classes=num_classes)
    elif dnn == 'densenet161':
        net = torchvision.models.densenet161(num_classes=num_classes)
    elif dnn == 'densenet201':
        net = torchvision.models.densenet201(num_classes=num_classes)
    elif dnn == 'inceptionv4':
        net = models.inceptionv4(num_classes=num_classes)
    elif dnn == 'inceptionv3':
        net = torchvision.models.inception_v3(num_classes=num_classes)
    elif dnn == 'vgg16i':  # vgg16 for imagenet
        net = torchvision.models.vgg16(num_classes=num_classes)
    elif dnn == 'googlenet':
        net = models.googlenet()
    elif dnn == 'mnistnet':
        net = MnistNet()
    elif dnn == 'fcn5net':
        net = models.FCN5Net()
    elif dnn == 'lenet':
        net = models.LeNet()
    elif dnn == 'lr':
        net = models.LinearRegression()
    elif dnn == 'vgg16':
        net = models.VGG(dnn.upper())
    elif dnn == 'alexnet':
        #net = models.AlexNet()
        net = torchvision.models.alexnet()
    elif dnn == 'lstman4':
        net, ext = models.LSTMAN4(datapath=kwargs['datapath'])
    elif dnn == 'lstm':
        # model = lstm(embedding_dim=args.hidden_size, num_steps=args.num_steps, batch_size=args.batch_size,
        #              vocab_size=vocab_size, num_layers=args.num_layers, dp_keep_prob=args.dp_keep_prob)
        net = lstmpy.lstm(vocab_size=kwargs['vocab_size'],
                          batch_size=kwargs['batch_size'])

    else:
        errstr = 'Unsupport neural network %s' % dnn
        logger.error(errstr)
        raise errstr
    return net, ext
Exemplo n.º 9
0
def main(n, max):
    _, raw_testing_data = tf.keras.datasets.mnist.load_data()
    model = models.LeNet()
    activations = [
        'LeNet/conv2d/Relu:0', 'LeNet/conv2d_1/Relu:0', 'LeNet/dense/Relu:0'
    ]
    coverage_analyzer = cov_anal.NC(activations)
    checkpoint_path = "./backup/lenet.ckpt"
    test_lenet(checkpoint_path=checkpoint_path,
               model=model,
               n_elements=n,
               max_iterations=max,
               testing_dataset=raw_testing_data,
               coverage_analyzer=coverage_analyzer)
Exemplo n.º 10
0
    def testModelOnly(self):
        original_model = models.LeNet(10, [1, 32, 32])
        for parameters in original_model.parameters():
            parameters.data.zero_()

        state = common.state.State(original_model)
        state.save(self.filepath)

        state = common.state.State.load(self.filepath)
        loaded_model = state.model

        self.assertEqual(loaded_model.__class__.__name__, original_model.__class__.__name__)
        self.assertListEqual(loaded_model.resolution, original_model.resolution)

        for parameters in loaded_model.parameters():
            self.assertEqual(torch.sum(parameters).item(), 0)
Exemplo n.º 11
0
    def testModelOptimizerScheduler(self):
        original_model = models.LeNet(10, [1, 32, 32])
        original_optimizer = torch.optim.SGD(original_model.parameters(), lr=0.01, momentum=0.9)
        original_scheduler = torch.optim.lr_scheduler.StepLR(original_optimizer, step_size=10, gamma=0.9)
        state = common.state.State(original_model, original_optimizer, original_scheduler)
        state.save(self.filepath)

        state = common.state.State.load(self.filepath)
        loaded_model = state.model
        loaded_optimizer = torch.optim.SGD(original_model.parameters(), lr=0.99, momentum=0.1)
        loaded_optimizer.load_state_dict(state.optimizer)
        loaded_scheduler = torch.optim.lr_scheduler.StepLR(original_optimizer, step_size=10, gamma=0.9)
        loaded_scheduler.load_state_dict(state.scheduler)

        self.assertEqual(original_scheduler.step_size, loaded_scheduler.step_size)
        self.assertEqual(original_scheduler.gamma, loaded_scheduler.gamma)
Exemplo n.º 12
0
def train_lenet(N_epochs, batch_size, checkpoint_path):
    (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
    train_data = DataLoaderFromArrays(x_train,
                                      y_train,
                                      one_hot=True,
                                      normalization=True)
    test_data = DataLoaderFromArrays(x_test,
                                     y_test,
                                     one_hot=True,
                                     normalization=True)
    X_test, Y_test = test_data.get_data()
    model = models.LeNet()
    saver = tf.train.Saver()
    best_test_accurary = 0.0
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for epoch in range(N_epochs):
            for i in range(train_data.N_instances // batch_size):
                batch_x, batch_y = train_data.next_batch(batch_size)
                sess.run(model.train_op,
                         feed_dict={
                             model.features: batch_x,
                             model.labels: batch_y
                         })
                if i % 50 == 0:
                    loss, accurary = sess.run([model.loss, model.accuracy],
                                              feed_dict={
                                                  model.features: batch_x,
                                                  model.labels: batch_y
                                              })
                    print('[Epoch {}] i: {} Loss: {} Accurary: {}'.format(
                        epoch, i, loss, accurary))
            test_accurary = sess.run(model.accuracy,
                                     feed_dict={
                                         model.features: X_test,
                                         model.labels: Y_test
                                     })
            print('Test Accurary: {}'.format(test_accurary))
            if best_test_accurary < test_accurary:
                saver.save(sess, checkpoint_path)
                best_test_accurary = test_accurary
            print('Best Test Accurary: {}'.format(best_test_accurary))
    def testNormalTraining(self):
        model = models.LeNet(10, [1, 28, 28], channels=12)

        cuda = True
        if cuda:
            model = model.cuda()

        optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)
        writer = torch.utils.tensorboard.SummaryWriter('./logs/')
        augmentation = None

        trainer = common.train.NormalTraining(model, self.trainset, self.testset, optimizer, scheduler, augmentation=augmentation, writer=writer, cuda=cuda)
        trainer.summary_gradients = True

        epochs = 25
        trainer.test(-1)
        for e in range(epochs):
            trainer.step(e)
            writer.flush()
            print(e)
Exemplo n.º 14
0
def get_model(p, X_train=None, Y_train_onehot=None):
    # make sure data types are right
    if 'num_layers' in vars(p).keys():
        p.num_layers = int(p.num_layers)
    if 'hidden_size' in vars(p).keys():
        p.hidden_size = int(p.hidden_size)

    # actually look for data
    if 'mnist' in p.dset:
        if p.use_conv_special:
            model = models.Linear_then_conv()
        elif p.use_conv:
            model = models.LeNet()
        elif p.num_layers > 0:
            model = models.LinearNet(p.num_layers, 28 * 28, p.hidden_size, 10)
        else:
            model = models.LinearNet(3, 28 * 28, 256, 10)
    elif 'cifar10' in p.dset:
        if p.use_conv_special:
            model = models.LinearThenConvCifar()
        elif p.use_conv == 'stacknet':
            import cnns.stacknet
            model = cnns.stacknet.StackNet()
        elif p.use_conv:
            model = models.Cifar10Conv()
        elif p.num_layers > 0:
            model = models.LinearNet(p.num_layers, 32 * 32 * 3, p.hidden_size,
                                     10)
        else:
            model = models.LinearNet(3, 32 * 32 * 3, 256, 10)
    elif p.dset in ['bars', 'noise']:
        model = models.LinearNet(p.num_layers, 8 * 8, p.hidden_size, 16)
    if 'siamese' in vars(p).keys() and p.siamese:
        model = siamese.SiameseNet(model, X_train, Y_train_onehot, p.reps,
                                   p.similarity, p.siamese_init,
                                   p.train_prototypes, p.prototype_dim)
    if 'linear' in p.dset:
        model = models.LinearNet(p.num_layers, p.num_features, p.hidden_size,
                                 1)
    return model
Exemplo n.º 15
0
def create_net(num_classes, dnn='resnet20', **kwargs):
    ext = None
    if dnn in ['resnet20', 'resnet56', 'resnet110']:
        net = models.__dict__[dnn](num_classes=num_classes)
    elif dnn == 'resnet50':
        #net = models.__dict__['resnet50'](num_classes=num_classes)
        net = torchvision.models.resnet50(num_classes=num_classes)
    elif dnn == 'inceptionv4':
        net = models.inceptionv4(num_classes=num_classes)
    elif dnn == 'inceptionv3':
        net = torchvision.models.inception_v3(num_classes=num_classes)
    elif dnn == 'vgg16i':  # vgg16 for imagenet
        net = torchvision.models.vgg16(num_classes=num_classes)
    elif dnn == 'googlenet':
        net = models.googlenet()
    elif dnn == 'mnistnet':
        net = MnistNet()
    elif dnn == 'fcn5net':
        net = models.FCN5Net()
    elif dnn == 'lenet':
        net = models.LeNet()
    elif dnn == 'lr':
        net = models.LinearRegression()
    elif dnn == 'vgg16':
        net = models.VGG(dnn.upper())
    elif dnn == 'alexnet':
        net = torchvision.models.alexnet()
    elif dnn == 'lstman4':
        net, ext = models.LSTMAN4(datapath=kwargs['datapath'])
    elif dnn == 'lstm':
        net = lstmpy.lstm(vocab_size=kwargs['vocab_size'],
                          batch_size=kwargs['batch_size'])

    else:
        errstr = 'Unsupport neural network %s' % dnn
        logger.error(errstr)
        raise errstr
    return net, ext
Exemplo n.º 16
0
    def testLeNet(self):
        resolutions = [
            [1, 2, 2],
            [1, 3, 3],
            [1, 4, 4],
            [1, 5, 5],
            [1, 4, 5],
            [1, 5, 4],
            [1, 27, 32],
            [1, 32, 27],
            [1, 32, 32],
            [3, 32, 32],
        ]
        channels = [1, 2]
        activations = [
            torch.nn.ReLU,
            torch.nn.Sigmoid,
            torch.nn.Tanh,
        ]
        normalizations = [True, False]

        classes = 10
        batch_size = 100
        for resolution in resolutions:
            for channel in channels:
                for activation in activations:
                    for normalization in normalizations:
                        model = models.LeNet(classes,
                                             resolution,
                                             clamp=True,
                                             channels=channel,
                                             activation=activation,
                                             normalization=normalization)
                        output = model(
                            torch.autograd.Variable(
                                torch.zeros([batch_size] + resolution)))
                        self.assertEqual(output.size()[0], batch_size)
                        self.assertEqual(output.size()[1], classes)
Exemplo n.º 17
0
    filenames = ["accuracy_loss", "result", "knn"]
    filenames = add_to_str_ls(results_folder, filenames, before=True)
    filenames.append("../weights/weights")
    for arg in vars(args):
        add_ = "_" + arg + "=" + str(getattr(args, arg))
        filenames = add_to_str_ls(add_, filenames)
    filenames1 = add_to_str_ls(".csv", filenames[:2])
    acc_name, result_name = filenames1
    f_name = filenames[-2] + ".txt"
    weights_name = filenames[-1] + ".pth"

    ## MODEL
    if (args.model == "convnet"):
        network = models.ConvNet(shape=args.shape)
    if (args.model == "lenet"):
        network = models.LeNet(shape=args.shape)
    if (args.model == "gao"):
        network = models.Gao(shape=args.shape)
    if (args.model == "jitaree"):
        network = models.Jitaree(shape=args.shape)
    if (args.model == "vae"):
        network = models.VAE(shape=args.shape, batch_size=args.batch)
    #if (args.model == "waae"):
    #	network = models.WAAE(shape=args.shape)
    if (args.model == "knn"):
        network = models.kNN()
    if (args.model == "xgboost"):
        network = models.XGBoost()
    if (not args.model in ["knn", "xgboost"]):
        if (bool(args.load)):
            network.load_state_dict(torch.load(weights_name))
Exemplo n.º 18
0
from models import *
import models
import os, sys
import time

train_dataset = equation_linear_images_dataset_train(cv_set_file='cv_set_linear.p', right_asnwer_chance=.5)
test_dataset = equation_linear_images_dataset_cv(cv_set_file='cv_set_linear.p', right_asnwer_chance=.5)

train_loader = DataLoader(train_dataset, batch_size=50, shuffle=True, num_workers=16)
test_loader = DataLoader(test_dataset, batch_size=50, shuffle=True, num_workers=16)

# for i_batch, sample_batched in enumerate(dataloader):
#    print(sample_batched['feature_vector'].shape)
#    if i_batch == 3: break

model = models.LeNet()
model.cuda()
summary = repr(model)
# model = torch.load('test.pt')
print(summary)

mod_name = summary.split('\n')[0][:-1]  # model name
dir = mod_name + "_" + time.strftime("%m-%d-%H:%M")
os.makedirs(dir)

f = open('./'+ dir + '/' + 'log_' + mod_name + '.log', 'w')
f.write(summary + '\n')

# if torch.cuda.device_count() > 1:
#  print("Let's use", torch.cuda.device_count(), "GPUs!")
#  model = nn.DataParallel(model)
def main(args):
    # Set GPUs and device
    os.environ['CUDA_VISIBLE_DEVICES'] = args.visible_gpus
    device = 'cuda' if torch.cuda.is_available else 'cpu'
    print('Running on %s' % device)

    # Set environment
    torch.manual_seed(args.seed)

    # Get dataset and define transformations
    dst = datasets.CIFAR100(args.data_dir, download=True)
    tp = transforms.Compose([
        transforms.Resize(32),
        transforms.CenterCrop(32),
        transforms.ToTensor()
    ])
    tt = transforms.ToPILImage()

    # Construct model and intiaize weights
    net = models.LeNet().to(device)
    net.apply(utils.weights_init)

    # Define criterion
    criterion = utils.cross_entropy_for_onehot

    # Get attack data and label
    gt_data = tp(dst[args.image_idx][0]).to(device)
    gt_data = gt_data.view(1, *gt_data.size())
    gt_image = tt(gt_data[0].cpu())
    gt_label = torch.Tensor([dst[args.image_idx][1]]).long().to(device)
    gt_label = gt_label.view(1, )
    gt_onehot_label = utils.label_to_onehot(gt_label, num_classes=100)

    # Compute original gradient
    out = net(gt_data)
    y = criterion(out, gt_onehot_label)
    dy_dx = torch.autograd.grad(y, net.parameters())

    # Share the gradients with other clients
    original_dy_dx = list((_.detach().clone() for _ in dy_dx))

    # Generate dummy data and label
    dummy_data = torch.randn(gt_data.size()).to(device).requires_grad_(True)
    dummy_label = torch.randn(
        gt_onehot_label.size()).to(device).requires_grad_(True)

    dummy_init_image = tt(dummy_data[0].cpu())
    dummy_init_label = torch.argmax(dummy_label, dim=-1)

    # Define optimizer
    optimizer = torch.optim.LBFGS([dummy_data, dummy_label])

    # Run DLG method
    dummy_grads, dummy_lbfgs_num_iter, history = \
        dlg_method(dummy_data, dummy_label, original_dy_dx,
                   net, criterion, optimizer, tt, max_iters=args.max_iters)

    # Save model
    params_path = os.path.join(args.exp_dir,
                               '%04d_params.pkl' % args.image_idx)
    torch.save(net.state_dict(), params_path)
    print('Save model parameters to %s' % params_path)

    # Index computation functions
    compute_l2norm = lambda x: (x**2).sum().item()**0.5
    compute_min = lambda x: x.min().item()
    compute_max = lambda x: x.max().item()
    compute_mean = lambda x: x.mean().item()
    compute_median = lambda x: x.median().item()

    original_grads_norm = [compute_l2norm(e) for e in original_dy_dx]
    original_grads_min = [compute_min(e) for e in original_dy_dx]
    original_grads_max = [compute_max(e) for e in original_dy_dx]
    original_grads_mean = [compute_mean(e) for e in original_dy_dx]
    original_grads_median = [compute_median(e) for e in original_dy_dx]

    dummy_grads_norm = np.array([[compute_l2norm(e) for e in r]
                                 for r in dummy_grads])
    dummy_grads_min = np.array([[compute_min(e) for e in r]
                                for r in dummy_grads])
    dummy_grads_max = np.array([[compute_max(e) for e in r]
                                for r in dummy_grads])
    dummy_grads_mean = np.array([[compute_mean(e) for e in r]
                                 for r in dummy_grads])
    dummy_grads_median = np.array([[compute_median(e) for e in r]
                                   for r in dummy_grads])

    # Plot and save figures
    fig_dir = os.path.join(args.exp_dir, 'figures')
    if not os.path.exists(fig_dir):
        os.makedirs(fig_dir)

    img_history = [[tt(hd), hl] for hd, hl in history]
    utils.plot_history([gt_image, gt_label],
                       [dummy_init_image, dummy_init_label],
                       img_history,
                       title='Image %04d History' % args.image_idx,
                       fig_path=os.path.join(
                           fig_dir, '%04d_history.png' % args.image_idx))

    utils.plot_convergency_curve(
        dummy_grads_norm,
        original_grads_norm,
        title='Image %04d L2 Norm Convergency' % args.image_idx,
        fig_path=os.path.join(fig_dir, '%04d_l2norm.png' % args.image_idx))

    utils.plot_convergency_curve(
        dummy_grads_min,
        original_grads_min,
        title='Image %04d Min Value Convergency' % args.image_idx,
        fig_path=os.path.join(fig_dir, '%04d_min.png' % args.image_idx))

    utils.plot_convergency_curve(
        dummy_grads_max,
        original_grads_max,
        title='Image %04d Max Value Convergency' % args.image_idx,
        fig_path=os.path.join(fig_dir, '%04d_max.png' % args.image_idx))

    utils.plot_convergency_curve(
        dummy_grads_mean,
        original_grads_mean,
        title='Image %04d Mean Convergency' % args.image_idx,
        fig_path=os.path.join(fig_dir, '%04d_mean.png' % args.image_idx))

    utils.plot_convergency_curve(
        dummy_grads_median,
        original_grads_median,
        title='Image %04d Median Value Convergency' % args.image_idx,
        fig_path=os.path.join(fig_dir, '%04d_median.png' % args.image_idx))

    compute_mse = lambda x, y: ((x - y)**2).sum().item()**0.5
    final_mse = compute_mse(dummy_data, gt_data)
    converged = final_mse < args.mse_tol
    print('Converged!! (MSE=%2.6f)' %
          final_mse if converged else 'Diverged!! (MSE=%2.6f)' % final_mse)

    # Save MSEs
    mses = np.array([compute_mse(hd.cuda(), gt_data) for hd, _ in history])
    with open(os.path.join(args.exp_dir, '%04d_mses.npy' % args.image_idx),
              'wb') as opf:
        np.save(opf, mses)
Exemplo n.º 20
0
 def getModel(cls):
     return models.LeNet(10, [1, 28, 28])
Exemplo n.º 21
0
    def testAttack(self):
        model = models.LeNet(10, [1, 28, 28], channels=12)
        #state = common.state.State.load('mnist_lenet.pth.tar')
        #model = state.model

        if self.cuda:
            model = model.cuda()

        epsilon = 0.3
        attack = attacks.BatchGradientDescent()
        attack.max_iterations = 2
        attack.base_lr = 0.1
        attack.momentum = 0
        attack.c = 0
        attack.lr_factor = 1
        attack.normalized = True
        attack.backtrack = False
        attack.initialization = attacks.initializations.LInfUniformInitialization(
            epsilon)
        attack.norm = attacks.norms.LInfNorm()
        attack.projection = attacks.projections.SequentialProjections([
            attacks.projections.LInfProjection(epsilon),
            attacks.projections.BoxProjection()
        ])
        objective = attacks.objectives.UntargetedF0Objective()

        model.eval()
        attempts = 1
        perturbations, adversarial_probabilities, errors = common.test.attack(
            model,
            self.adversarialset,
            attack,
            objective,
            attempts=attempts,
            writer=common.summary.SummaryWriter(),
            cuda=self.cuda)

        self.assertEqual(perturbations.shape[0], attempts)
        self.assertEqual(perturbations.shape[1],
                         self.adversarialset.dataset.images.shape[0])
        self.assertEqual(perturbations.shape[2],
                         self.adversarialset.dataset.images.shape[3])
        self.assertEqual(perturbations.shape[3],
                         self.adversarialset.dataset.images.shape[1])
        self.assertEqual(perturbations.shape[4],
                         self.adversarialset.dataset.images.shape[2])
        self.assertEqual(adversarial_probabilities.shape[0], attempts)
        self.assertEqual(adversarial_probabilities.shape[1],
                         perturbations.shape[1])
        self.assertEqual(adversarial_probabilities.shape[2],
                         numpy.max(self.adversarialset.dataset.labels) + 1)

        perturbations = numpy.transpose(perturbations, (0, 1, 3, 4, 2))
        adversarialloader = torch.utils.data.DataLoader(
            common.datasets.AdversarialDataset(
                self.adversarialset.dataset.images, perturbations,
                self.adversarialset.dataset.labels),
            batch_size=100,
            shuffle=False)
        self.assertEqual(len(adversarialloader),
                         attempts * len(self.adversarialset))
        clean_probabilities = common.test.test(model,
                                               adversarialloader,
                                               cuda=self.cuda)

        adversarial_probabilities = adversarial_probabilities.reshape(
            adversarial_probabilities.shape[0] *
            adversarial_probabilities.shape[1],
            adversarial_probabilities.shape[2])
        self.assertTrue(
            numpy.all(
                numpy.sum(perturbations.reshape(
                    perturbations.shape[0] * perturbations.shape[1], -1),
                          axis=1) > 0))
        numpy.testing.assert_array_almost_equal(clean_probabilities,
                                                adversarial_probabilities)
Exemplo n.º 22
0
import models

name_to_model = {
    'LeNet': lambda args: models.LeNet(**args),
    'AlexNet': lambda args: models.AlexNet(**args),
    'MLP': lambda args: models.MLP(**args),
    'ResNet18': lambda args: models.ResNet18(**args),
    'PResNet18': lambda args: models.PResNet18(**args),
    'Permutation': lambda args: models.TensorPermutation(32, 32, **args),
    'ResNet20Original': lambda args: models.resnet20original(),
    'MobileNet': lambda args: models.MobileNet(**args),
    'ShuffleNet': lambda args: models.ShuffleNetG2(),
    'WideResNet28': lambda args: models.WideResNet28(**args),
}


def get_model(model_config):
    name = model_config['name']
    return name_to_model[name](model_config.get('args', None))
Exemplo n.º 23
0
    test_loader = torch.utils.data.DataLoader(
        datasets.MNIST('data', train=False, transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,))
        ])),
        batch_size=args.test_batch_size, shuffle=True, **kwargs)

    best_acc = 0.0
    model_ori = None
    model_train = None
    model_test = None

    # generate the model
    if args.arch == 'LeNet':
        model_ori = models.LeNet()
        if args.cuda:
            model_ori.cuda()
        if args.pretrained:
            model_ori.load_state_dict(torch.load(args.pretrained))


    elif args.arch == 'Bin_LeNet':
        model_train = models.Bin_LeNet_train()
        model_test = models.Bin_LeNet_test()
        if args.cuda:
            model_train = model_train.cuda()
            model_test = model_test.cuda()

        if args.pretrained:
            model_test.load_state_dict(torch.load(args.pretrained))
 def getModel(cls):
     return models.LeNet(10, [1, 28, 28], channels=64)