def main():
    args = parser.parse_args()

    cfgs = [config_from_string(s) for s in args.ensemble]

    target_model = create_ensemble(cfgs, args.ensemble_weights, args.checkpoint_paths).cuda()
    target_model.eval()

    if args.targeted:
        dataset = Dataset(args.input_dir)
    else:
        dataset = Dataset(args.input_dir, target_file='')

    attack = AttackIterative(
        model=target_model,
        targeted=args.targeted,
        random_start=args.random_start,
        max_epsilon=args.max_epsilon,
        norm=args.norm,
        step_alpha=args.step_alpha,
        num_steps=args.steps,
        debug=args.debug)

    runner = ImageSaveAttackRunner(dataset, args.output_dir)
    runner.run(attack, batch_size=args.batch_size)
Esempio n. 2
0
def main():
    args = parser.parse_args()

    cfgs = [config_from_string(s) for s in args.ensemble]

    ensemble = create_ensemble(cfgs,
                               args.ensemble_weights,
                               checkpoint_paths=args.checkpoint_paths).cuda()
    ensemble.eval()

    tf = transforms.Compose([
        transforms.Scale(args.img_size),
        transforms.CenterCrop(args.img_size),
        transforms.ToTensor()
    ])
    dataset = Dataset(args.input_dir, transform=tf, target_file='')

    defense = Base(args.input_dir,
                   args.output_file,
                   ensemble,
                   dataset,
                   img_size=args.img_size,
                   batch_size=args.batch_size)

    defense.run()
def main():
    args = parser.parse_args()

    tf = transforms.Compose([
        transforms.ToTensor(),
    ])

    dataset = Dataset(args.input_dir, transform=tf)
    loader = data.DataLoader(dataset, batch_size=8, shuffle=False)

    model_config_srings = ['Resnet18', 'InceptionResnetV2']
    cfgs = [config_from_string(s) for s in model_config_srings]

    ensemble = create_ensemble(cfgs, [1.0 for _ in cfgs])
    augment = augmentations.AugmentationComposer([
        augmentations.RandomCrop(269),
        augmentations.Mirror(0.5),
        augmentations.Blur(0.5, 0.5)
    ])

    for cfg, model in zip(cfgs, ensemble.models):
        checkpoint_path = os.path.join('/checkpoints/', cfg['checkpoint_file'])
        checkpoint = torch.load(checkpoint_path)
        if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
            model.get_core_model().load_state_dict(checkpoint['state_dict'])
        else:
            model.get_core_model().load_state_dict(checkpoint)
    ensemble.cuda()
    ensemble.eval()

    outputs = []
    for batch_idx, (input, _) in enumerate(loader):
        input = input.cuda()
        input_var = autograd.Variable(input, volatile=True)
        labels = ensemble(augment(input_var))
        labels = labels.max(1)[1]
        outputs.append(labels.data.cpu().numpy())
    outputs = np.concatenate(outputs, axis=0)

    with open(args.output_file, 'w') as out_file:
        filenames = dataset.filenames()
        for filename, label in zip(filenames, outputs):
            filename = os.path.basename(filename)
            out_file.write('{0},{1}\n'.format(filename, label))
def main():
    args = parser.parse_args()

    dataset = Dataset(args.input_dir, target_file='')

    cfgs = [config_from_string(s) for s in args.ensemble]

    target_model = create_ensemble(cfgs, args.ensemble_weights,
                                   args.checkpoint_paths).cuda()
    target_model.eval()

    attack = SelectiveUniversal(target_model,
                                args.npy_files,
                                max_epsilon=args.max_epsilon,
                                try_mirrors=args.try_mirrors)

    runner = ImageSaveAttackRunner(dataset, args.output_dir)
    # Only supports batch size of 1
    runner.run(attack, 1)
def main():
    args = parser.parse_args()

    cfgs = [config_from_string(s) for s in args.ensemble]

    target_model = create_ensemble(cfgs, args.ensemble_weights,
                                   args.checkpoint_paths).cuda()
    target_model.eval()

    if args.targeted:
        dataset = Dataset(args.input_dir)
    else:
        dataset = Dataset(args.input_dir, target_file='')

    attack = AttackCarliniWagnerL2(targeted=args.targeted,
                                   max_steps=args.steps,
                                   search_steps=args.search_steps,
                                   debug=args.debug)

    runner = ImageSaveAttackRunner(dataset, args.output_dir)
    runner.run(attack, batch_size=args.batch_size)
def main():
    args = parser.parse_args()

    cfgs = [config_from_string(s) for s in args.ensemble]

    target_model = create_ensemble(cfgs, args.ensemble_weights,
                                   args.checkpoint_paths).cuda()
    target_model.eval()

    if args.no_augmentation:
        augmentation = lambda x: x
    else:
        augmentation = processing.build_anp_augmentation_module(
            saturation=args.saturation,
            brightness_contrast=args.brightness_contrast,
            gaus_blur_prob=args.gaus_blur_prob,
            gaus_blur_size=args.gaus_blur_size,
            gaus_blur_sigma=args.gaus_blur_sigma).cuda()

    if args.targeted:
        dataset = Dataset(args.input_dir)
    else:
        dataset = Dataset(args.input_dir, target_file='')

    attack = CWInspired(target_model,
                        augmentation,
                        max_epsilon=args.max_epsilon,
                        n_iter=args.n_iter,
                        lr=args.lr,
                        targeted=args.targeted,
                        target_nth_highest=args.target_nth_highest,
                        prob_dont_augment=0.0,
                        always_target=args.always_target,
                        random_start=args.random_start,
                        n_restarts=args.n_restarts)

    runner = ImageSaveAttackRunner(dataset,
                                   args.output_dir,
                                   time_limit_per_100=args.time_limit_per_100)
    runner.run(attack, args.batch_size)
Esempio n. 7
0
def main():
    args = parser.parse_args()

    num_gpu = args.num_gpu
    if num_gpu == 1:
        input_devices = [0]
        output_devices = [0]
    elif num_gpu == 2:
        input_devices = [1]
        output_devices = [0]
    elif num_gpu == 3:
        input_devices = [0, 1]
        output_devices = [2]
    elif num_gpu == 4:
        input_devices = [0, 1]
        output_devices = [2, 3]
    else:
        assert False, 'Unsupported number of gpus'

    master_output_device = output_devices[0]

    train_dir = os.path.join(args.data, 'train')
    val_dir = os.path.join(args.data, 'validation')

    train_dataset = datasets.ImageFolder(
        train_dir,
        transforms.Compose([
            transforms.RandomSizedCrop(args.img_size),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor()
        ]))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    attack_cfgs = [
        {
            'attack_name': 'iterative',
            'targeted': True,
            'num_steps': 10,
            'target_rand': True
        },
        {
            'attack_name': 'iterative',
            'targeted': False,
            'num_steps': 1,
            'random_start': True
        },
        {
            'attack_name': 'cw_inspired',
            'targeted': True,
            'n_iter': 38
        },
        {
            'attack_name': 'cw_inspired',
            'targeted': False,
            'n_iter': 38
        },
    ]

    attack_model_cfgs = [  # FIXME these are currently just test configs, need to setup properly
        {
            'models': ['inception_v3_tf']
        },
        {
            'models': ['inception_resnet_v2', 'resnet34'],
            'weights': [1.0, 1.0]
        },
        {
            'models': ['adv_inception_resnet_v2', 'inception_v3_tf']
        },
    ]

    adv_generator = AdversarialGenerator(
        train_loader,
        model_cfgs=attack_model_cfgs,
        attack_cfgs=attack_cfgs,
        attack_probs=[0.4, 0.4, 0.1, 0.1],
        output_batch_size=args.batch_size,
        input_devices=input_devices,
        master_output_device=master_output_device)

    if args.mp:
        adv_generator = MpFeeder(adv_generator, maxsize=8)

    val_dataset = datasets.ImageFolder(
        val_dir,
        transforms.Compose([
            transforms.Scale(int(math.floor(args.img_size / 0.875))),
            transforms.CenterCrop(args.img_size),
            transforms.ToTensor()
        ]))

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    with torch.cuda.device(master_output_device):
        defense_ensemble = ['adv_inception_resnet_v2',
                            'dpn68b_extra']  #FIXME argument
        defense_cfgs = [
            config_from_string(s, output_fn='') for s in defense_ensemble
        ]
        if len(defense_ensemble) > 1:
            defense_model = create_ensemble(defense_cfgs, None)
        else:
            defense_model = create_model_from_cfg(defense_cfgs[0])

        if args.mt:
            if len(defense_ensemble) > 1:
                defense_model = multi_task.MultiTaskEnsemble(
                    defense_model.models, use_features=False)
            else:
                defense_model = multi_task.MultiTask(defense_model)

        if len(output_devices) > 1:
            defense_model = torch.nn.DataParallel(defense_model,
                                                  output_devices).cuda()
        else:
            defense_model.cuda()

        if args.df:
            adv_generator.set_dogfood(defense_model)

        if args.opt == 'sgd':
            optimizer = torch.optim.SGD(get_opt_params(
                defense_model, classifier_only=args.co),
                                        args.lr,
                                        momentum=args.momentum,
                                        weight_decay=args.weight_decay)
        elif args.opt == 'adam':
            optimizer = torch.optim.Adam(get_opt_params(
                defense_model, classifier_only=args.co),
                                         args.lr,
                                         weight_decay=args.weight_decay)
        else:
            assert False, "Invalid optimizer specified"

        if args.resume:
            if os.path.isfile(args.resume):
                print("=> loading checkpoint '{}'".format(args.resume))
                checkpoint = torch.load(args.resume)
                if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
                    args.start_epoch = checkpoint['epoch']
                    #best_prec1 = checkpoint['best_prec1']
                    defense_model.load_state_dict(checkpoint['state_dict'])
                    optimizer.load_state_dict(checkpoint['optimizer'])
                    print("=> loaded checkpoint '{}' (epoch {})".format(
                        args.resume, checkpoint['epoch']))
                else:
                    # load from a non-training state dict only checkpoint
                    defense_model.load_state_dict(checkpoint)
                    print("=> loaded checkpoint '{}'".format(args.resume))
            else:
                print("=> no checkpoint found at '{}'".format(args.resume))
                exit(-1)

        criterion = torch.nn.CrossEntropyLoss().cuda()

        best_prec1 = 0
        for epoch in range(args.start_epoch, args.epochs):
            adjust_learning_rate(args.lr,
                                 optimizer,
                                 epoch,
                                 decay_epochs=args.decay_epochs)

            # train for one epoch
            train(args, adv_generator, defense_model, criterion, optimizer,
                  epoch)

            # evaluate on validation set
            prec1 = validate(args, val_loader, defense_model, criterion)

            #FIXME run another validation on all adversarial examples?

            # remember best prec@1 and save checkpoint
            is_best = prec1 > best_prec1
            best_prec1 = max(prec1, best_prec1)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': 'FIXME',
                    'state_dict': defense_model.state_dict(),
                    'best_prec1': best_prec1,
                    'optimizer': optimizer.state_dict(),
                },
                is_best,
                filename='checkpoint-%d.pth.tar' % epoch)

    if args.mp:
        adv_generator.shutdown()
        adv_generator.done()
Esempio n. 8
0
def main():
    attack_start = time.time()

    args = parser.parse_args()

    dataset = Dataset(args.input_dir, target_file='')

    batches_of_100 = len(dataset) / 100.0
    print("Batches of 100: {}".format(batches_of_100))
    print("Time limit per 100: {}".format(args.time_limit_per_100))
    time_limit = max(
        batches_of_100 * args.time_limit_per_100, 1.0
    )  # Helps pass the validation tool. Otherwise we see 0 images and therefore 0 seconds
    print("Time remaining: {}".format(time_limit))

    FINAL_DEADLINE = attack_start + time_limit

    sys.stdout.flush()

    cfgs = [config_from_string(s) for s in args.ensemble]

    target_model = create_ensemble(cfgs, args.ensemble_weights,
                                   args.checkpoint_paths).cuda()
    target_model.eval()

    attack = SelectiveUniversal(args.max_epsilon,
                                target_model,
                                args.npy_files,
                                try_mirrors=args.try_mirrors)

    runner = ImageSaveAttackRunner(dataset,
                                   args.output_dir,
                                   time_limit_per_100=args.time_limit_per_100)
    performance = runner.run(attack, 1)

    del attack

    remaining_indices = [
        idx for idx, perf in enumerate(performance) if not performance[idx]
    ]

    dataset2 = Subset(dataset, remaining_indices)

    if args.no_augmentation:
        augmentation = lambda x: x
    else:
        augmentation = nn.Sequential(
            processing.RandomMirror(0.5),
            processing.RandomGaussianBlur(0.5, 5, 3),
            processing.RandomCrop(),
        )

    attack = CWInspired(target_model,
                        augmentation,
                        max_epsilon=args.max_epsilon,
                        n_iter=200,
                        lr=args.lr,
                        targeted=False,
                        target_nth_highest=3,
                        prob_dont_augment=0.0,
                        initial_w_matrix=None)

    time_remaining = FINAL_DEADLINE - time.time()

    images_remaining = len(dataset2)
    time_remaining_per_100 = time_remaining / (images_remaining / 100.0)

    print("Images remaining for cw_inspired: {}".format(images_remaining))
    print("Time remaining: {}".format(time_remaining))
    print("Time remaining per 100: {}".format(time_remaining_per_100))
    sys.stdout.flush()

    runner = ImageSaveAttackRunner(dataset2,
                                   args.output_dir,
                                   time_limit_per_100=time_remaining_per_100)
    runner.run(attack, args.batch_size)
Esempio n. 9
0
ensemble = [
    'adv_inception_resnet_v2', 'inception_v3_tf', 'adv_inception_v3',
    'densenet161', 'dpn68b_extra', 'resnet101'
]
ensemble_weights = [3.0, 2.0, 1.0, 1.0, 1.0, 1.0]
"""

#Just inception v3
from models.model_configs import config_from_string
ensemble = ['adv_inception_v3']
ensemble_weights = [1.0]
"""

checkpoint_paths = [
    os.path.join(CHECKPOINT_DIR,
                 config_from_string(m)['checkpoint_file']) for m in ensemble
]

out_filename = '{}_{}_eps{}'.format(''.join(ensemble), TARGET_CLASS,
                                    max_epsilon)

dataset = Dataset(input_dir)

img_size = 299
cfgs = [config_from_string(s) for s in ensemble]
target_model = create_ensemble(cfgs, ensemble_weights, checkpoint_paths).cuda()
target_model.eval()

eps = max_epsilon / 255.0

loader = data.DataLoader(dataset, batch_size=batch_size, shuffle=True)