Example #1
0
def main():
    parser = argparse.ArgumentParser(description='Adversarial test')
    parser.add_argument('--expdir', type=str, default=None, required=True,
                        help='experiment directory containing model')
    args = parser.parse_args()
    progress = default_progress()
    experiment_dir = args.expdir
    perturbation1 = numpy.load('perturbation/VGG-19.npy')
    perturbation = numpy.load('perturbation/perturb_synth.npy')
    print('Original std %e new std %e' % (numpy.std(perturbation1),
        numpy.std(perturbation)))
    perturbation *= numpy.std(perturbation1) / numpy.std(perturbation)
    # To deaturate uncomment.
    # perturbation = numpy.repeat(perturbation[:,:,1:2], 3, axis=2)

    val_loader = torch.utils.data.DataLoader(
        CachedImageFolder('dataset/miniplaces/simple/val',
            transform=transforms.Compose([
                        transforms.Resize(128),
                        transforms.CenterCrop(112),
                        AddPerturbation(perturbation[48:160,48:160]),
                        transforms.ToTensor(),
                        transforms.Normalize(IMAGE_MEAN, IMAGE_STDEV),
                        ])),
        batch_size=32, shuffle=False,
        num_workers=0, pin_memory=True)
    # Create a simplified ResNet with half resolution.
    model = CustomResNet(18, num_classes=100, halfsize=True)
    checkpoint_filename = 'best_miniplaces.pth.tar'
    best_checkpoint = os.path.join(experiment_dir, checkpoint_filename)
    checkpoint = torch.load(best_checkpoint)
    iter_num = checkpoint['iter']
    model.load_state_dict(checkpoint['state_dict'])
    model.eval()
    model.cuda()
    criterion = nn.CrossEntropyLoss().cuda()

    val_loss, val_acc = AverageMeter(), AverageMeter()
    for input, target in progress(val_loader):
        # Load data
        input_var, target_var = [d.cuda() for d in [input, target]]
        # Evaluate model
        with torch.no_grad():
            output = model(input_var)
            loss = criterion(output, target_var)
            _, pred = output.max(1)
            accuracy = (target_var.eq(pred)
                    ).data.float().sum().item() / input.size(0)
        val_loss.update(loss.data.item(), input.size(0))
        val_acc.update(accuracy, input.size(0))
        # Check accuracy
        post_progress(l=val_loss.avg, a=val_acc.avg)
    print_progress('Loss %e, validation accuracy %.4f' %
            (val_loss.avg, val_acc.avg))
    with open(os.path.join(experiment_dir, 'adversarial_test.json'), 'w') as f:
        json.dump(dict(
            adversarial_acc=val_acc.avg,
            adversarial_loss=val_loss.avg), f)
Example #2
0
def main():
    progress = default_progress()
    experiment_dir = 'experiment/resnet'
    val_loader = torch.utils.data.DataLoader(
        CachedImageFolder(
            'dataset/miniplaces/simple/val',
            transform=transforms.Compose([
                transforms.Resize(128),
                # transforms.CenterCrop(112),
                transforms.ToTensor(),
                transforms.Normalize(IMAGE_MEAN, IMAGE_STDEV),
            ])),
        batch_size=32,
        shuffle=False,
        num_workers=24,
        pin_memory=True)
    # Create a simplified ResNet with half resolution.
    model = CustomResNet(18, num_classes=100, halfsize=True)
    checkpoint_filename = 'best_miniplaces.pth.tar'
    best_checkpoint = os.path.join(experiment_dir, checkpoint_filename)
    checkpoint = torch.load(best_checkpoint)
    iter_num = checkpoint['iter']
    model.load_state_dict(checkpoint['state_dict'])
    model.eval()
    model.cuda()
    criterion = nn.CrossEntropyLoss().cuda()

    val_loss, val_acc = AverageMeter(), AverageMeter()
    for input, target in progress(val_loader):
        # Load data
        input_var, target_var = [d.cuda() for d in [input, target]]
        # Evaluate model
        with torch.no_grad():
            output = model(input_var)
            loss = criterion(output, target_var)
            _, pred = output.max(1)
            accuracy = (
                target_var.eq(pred)).data.float().sum().item() / input.size(0)
        val_loss.update(loss.data.item(), input.size(0))
        val_acc.update(accuracy, input.size(0))
        # Check accuracy
        post_progress(l=val_loss.avg, a=val_acc.avg)
    print_progress('Loss %e, validation accuracy %.4f' %
                   (val_loss.avg, val_acc.avg))
Example #3
0
def main():
    parser = argparse.ArgumentParser(description='Adversarial test')
    parser.add_argument('--expdir',
                        type=str,
                        default='experiment/resnet',
                        help='experiment directory containing model')
    args = parser.parse_args()
    progress = default_progress()
    experiment_dir = args.expdir
    perturbations = [
        numpy.zeros((224, 224, 3), dtype='float32'),
        numpy.load('perturbation/VGG-19.npy'),
        numpy.load('perturbation/perturb_synth.npy'),
        numpy.load('perturbation/perturb_synth_histmatch.npy'),
        numpy.load('perturbation/perturb_synth_newspectrum.npy'),
        numpy.load('perturbation/perturbation_rotated.npy'),
        numpy.load('perturbation/perturbation_rotated_averaged.npy'),
        numpy.load('perturbation/perturbation_noisefree.npy'),
        numpy.load('perturbation/perturbation_noisefree_nodc.npy'),
    ]
    perturbation_name = [
        "unattacked",
        "universal adversary",
        "synthetic",
        "histmatch",
        "newspectrum",
        "rotated",
        "rotated_averaged",
        "noisefree",
        "noisefree_nodc",
    ]
    print('Original std %e new std %e' %
          (numpy.std(perturbations[1]), numpy.std(perturbations[2])))
    perturbations[2] *= (numpy.std(perturbations[1]) /
                         numpy.std(perturbations[2]))
    # To deaturate uncomment.
    loaders = [
        torch.utils.data.DataLoader(
            CachedImageFolder(
                'dataset/miniplaces/simple/val',
                transform=transforms.Compose([
                    transforms.Resize(128),
                    # transforms.CenterCrop(112),
                    AddPerturbation(perturbation[40:168, 40:168]),
                    transforms.ToTensor(),
                    transforms.Normalize(IMAGE_MEAN, IMAGE_STDEV),
                ])),
            batch_size=32,
            shuffle=False,
            num_workers=0,
            pin_memory=True) for perturbation in perturbations
    ]
    # Create a simplified ResNet with half resolution.
    model = CustomResNet(18, num_classes=100, halfsize=True)
    layernames = ['relu', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']
    retain_layers(model, layernames)
    checkpoint_filename = 'best_miniplaces.pth.tar'
    best_checkpoint = os.path.join(experiment_dir, checkpoint_filename)
    checkpoint = torch.load(best_checkpoint)
    iter_num = checkpoint['iter']
    model.load_state_dict(checkpoint['state_dict'])
    model.eval()
    model.cuda()
    criterion = nn.CrossEntropyLoss().cuda()
    val_acc = [AverageMeter() for _ in loaders]
    diffs, maxdiffs, signflips = [
        [defaultdict(AverageMeter) for _ in perturbations] for _ in [1, 2, 3]
    ]
    for all_batches in progress(zip(*loaders), total=len(loaders[0])):
        # Load data
        indata = [b[0].cuda() for b in all_batches]
        target = all_batches[0][1].cuda()
        # Evaluate model
        retained = defaultdict(list)
        with torch.no_grad():
            for i, inp in enumerate(indata):
                output = model(inp)
                _, pred = output.max(1)
                accuracy = (
                    target.eq(pred)).data.float().sum().item() / inp.size(0)
                val_acc[i].update(accuracy, inp.size(0))
                for layer, data in model.retained.items():
                    retained[layer].append(data)
        for layer, vals in retained.items():
            for i in range(1, len(indata)):
                diffs[i][layer].update(
                    (vals[i] - vals[0]).pow(2).mean().item(), len(target))
                maxdiffs[i][layer].update(
                    (vals[i] - vals[0]).view(len(target),
                                             -1).max(1)[0].mean().item(),
                    len(target))
                signflips[i][layer].update(
                    ((vals[i] > 0).float() -
                     (vals[0] > 0).float()).abs().mean().item(), len(target))
        # Check accuracy
        post_progress(a=val_acc[0].avg)
    # Report on findings
    for i, acc in enumerate(val_acc):
        print_progress('Test #%d (%s), validation accuracy %.4f' %
                       (i, perturbation_name[i], acc.avg))
        if i > 0:
            for layer in layernames:
                print_progress(
                    'Layer %s RMS diff %.3e maxdiff %.3e signflip %.3e' %
                    (layer, math.sqrt(diffs[i][layer].avg),
                     maxdiffs[i][layer].avg, signflips[i][layer].avg))