예제 #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--arch',
                        type=str,
                        choices=ARCHS_LIST,
                        default='resnet50')
    parser.add_argument('--dataset',
                        type=str,
                        default='dataset/imagenet-airplanes.pt')
    parser.add_argument('--pretrained', default=False, action='store_true')
    parser.add_argument('--checkpoint_location', type=str, default=None)
    parser.add_argument('--epochs', type=int, default=1)
    parser.add_argument('--learning_rate', type=float, default=1e-2)
    parser.add_argument('--weight_averaging',
                        default=False,
                        action='store_true')
    parser.add_argument('--adversarial', default=False, action='store_true')
    parser.add_argument('--save_file_location',
                        type=str,
                        default='models/' + str(get_current_time()) + '.pt')
    args_dict = vars(parser.parse_args())

    validate_save_file_location(args_dict['save_file_location'])

    if os.path.exists(args_dict['dataset']):
        dataset_properties = torch.load(args_dict['dataset'])

        pgd_args_dict = PGD_DEFAULT_ARGS_DICT
        pgd_args_dict['arch'] = args_dict['arch']
        pgd_args_dict['dataset'] = dataset_properties['images']
        pgd_args_dict['eps'] = 32 / 255.0
        pgd_args_dict['step_size'] = 32 / 255.0

        images = torch.load(dataset_properties['images'])

        if dataset_properties['labels'] is None:
            eval_model = get_model(arch=args_dict['arch'], pretrained=True)
            normalize = Normalizer(mean=[0.485, 0.456, 0.406],
                                   std=[0.229, 0.224, 0.225])
            labels = [
                torch.argmax(eval_model(normalize(x.unsqueeze(0))))
                for x in images
            ]
        else:
            labels = torch.load(dataset_properties['labels'])

        trainer = Trainer(args_dict, pgd_args_dict)
        trainer.fit(images, labels)
        trainer.serialize()
    else:
        raise ValueError('Specified dataset location is incorrect!')
예제 #2
0
파일: pgd.py 프로젝트: RoZvEr/adversarial
def normalize_args_dict(args_dict):
    time = str(get_current_time())
    if args_dict['save_file_location'] is None:
        args_dict['save_file_location'] = 'results/pgd_new_experiments/' + time + '.pt'
    validate_save_file_location(args_dict['save_file_location'])

    if args_dict['norm'] == 'linf':
        args_dict['eps'] = args_dict['eps'] / 255.0
    args_dict['step_size'] = args_dict['step_size'] / 255.0
    args_dict['sigma'] = args_dict['sigma'] / 255.0

    if args_dict['norm'] == 'linf':
        args_dict['restart_iterations'] = int((args_dict['eps'] / args_dict['step_size']) * 2)
    else:
        args_dict['restart_iterations'] = 10

    return args_dict
예제 #3
0
def main():
    time = get_current_time()

    parser = argparse.ArgumentParser()
    parser.add_argument('--arch',
                        type=str,
                        choices=ARCHS_LIST,
                        default='resnet50')
    parser.add_argument('--pretrained', default=False, action='store_true')
    parser.add_argument('--checkpoint_location', type=str, default=None)
    parser.add_argument('--from_robustness',
                        default=False,
                        action='store_true')
    parser.add_argument('--dataset', type=str, default='dataset/coco')
    parser.add_argument('--normalize_grads',
                        default=False,
                        action='store_true')
    parser.add_argument('--save_file_location',
                        type=str,
                        default='results/gradient/' + time + '.pt')
    args_dict = vars(parser.parse_args())

    validate_save_file_location(args_dict['save_file_location'])

    if args_dict['checkpoint_location'] is not None:
        model = load_model(
            location=args_dict['checkpoint_location'],
            arch=args_dict['arch'],
            from_robustness=args_dict['from_robustness']).cuda().eval()
    else:
        model = get_model(
            args_dict['arch'],
            True if [args_dict['pretrained']] else False).cuda().eval()

    criterion = torch.nn.CrossEntropyLoss(reduction='none')

    averages = get_averages_dict(model, criterion, args_dict)
    torch.save({
        'averages': averages,
        'args': args_dict
    }, args_dict['save_file_location'])
예제 #4
0
파일: patch.py 프로젝트: RoZvEr/adversarial
def main():
    time = str(get_current_time())
    parser = argparse.ArgumentParser()
    parser.add_argument('--arch',
                        type=str,
                        choices=ARCHS_LIST,
                        default='resnet50')
    parser.add_argument('--checkpoint_location', type=str, default=None)
    parser.add_argument('--from_robustness',
                        default=False,
                        action='store_true')
    parser.add_argument('--dataset',
                        type=str,
                        default='dataset/imagenet-airplanes-images.pt')
    parser.add_argument('--masks', default=False, action='store_true')
    parser.add_argument('--eps', type=float, default=8)
    parser.add_argument('--norm',
                        type=str,
                        choices=['l2', 'linf'],
                        default='linf')
    parser.add_argument('--step_size', type=float, default=1)
    parser.add_argument('--num_iterations', type=int, default=10)
    parser.add_argument('--unadversarial', default=False, action='store_true')
    parser.add_argument('--targeted', default=False, action='store_true')
    parser.add_argument('--eot', default=False, action='store_true')
    parser.add_argument('--transfer', default=False, action='store_true')
    parser.add_argument('--selective_transfer',
                        default=False,
                        action='store_true')
    parser.add_argument('--num_surrogates',
                        type=int,
                        choices=range(0,
                                      len(ARCHS_LIST) - 1),
                        default=5)
    parser.add_argument('--save_file_location',
                        type=str,
                        default='results/pgd_new_experiments/patch-' + time +
                        '.pt')
    args_ns = parser.parse_args()

    args_dict = vars(args_ns)

    validate_save_file_location(args_dict['save_file_location'])

    args_dict['eps'], args_dict[
        'step_size'] = args_dict['eps'] / 255.0, args_dict['step_size'] / 255.0

    print('Running PGD experiment with the following arguments:')
    print(str(args_dict) + '\n')

    if args_dict['checkpoint_location'] is None:
        model = get_model(arch=args_dict['arch'], parameters='standard').eval()
    else:
        model = load_model(
            location=args_dict['checkpoint_location'],
            arch=args_dict['arch'],
            from_robustness=args_dict['from_robustness']).eval()

    attacker = Attacker(model, args_dict)
    target = torch.zeros(1000)
    target[TARGET_CLASS] = 1

    print('Loading dataset...')
    if args_dict['masks']:
        dataset = torch.load(args_dict['dataset'])
        dataset_length = dataset.__len__()
    else:
        images = torch.load(args_dict['dataset'])
        masks = [torch.zeros_like(images[0])] * images.__len__()
        dataset = zip(images, masks)
        dataset_length = images.__len__()
    print('Finished!\n')

    adversarial_examples_list = []
    predictions_list = []

    print('Starting PGD...')
    for index, (image, mask) in enumerate(dataset):
        print('Image: ' + str(index + 1) + '/' + str(dataset_length))
        original_prediction = model(image.unsqueeze(0))

        if not args_dict['targeted']:
            target = original_prediction

        patch_mask = get_patch_mask(mask)
        patch_mask = torch.cat(3 * [patch_mask]).view(image.size())

        image = image * flip_values(patch_mask)

        adversarial_example = attacker(image.cuda(), patch_mask.cuda(), target,
                                       False)
        adversarial_prediction = model(adversarial_example.unsqueeze(0))

        if args_dict['unadversarial'] or args_dict['targeted']:
            expression = torch.argmax(adversarial_prediction) == torch.argmax(
                target)
        else:
            expression = torch.argmax(adversarial_prediction) != torch.argmax(
                target)

        status = 'Success' if expression else 'Failure'
        print('Attack status: ' + status + '\n')

        adversarial_examples_list.append(adversarial_example)
        predictions_list.append({
            'original': original_prediction,
            'adversarial': adversarial_prediction
        })
    print('Finished!')

    print('Serializing results...')
    torch.save(
        {
            'adversarial_examples': adversarial_examples_list,
            'predictions': predictions_list,
            'args_dict': args_dict
        }, args_dict['save_file_location'])
    print('Finished!\n')
예제 #5
0
def main():
    time = get_current_time()

    parser = argparse.ArgumentParser()
    parser.add_argument('--model', type=str, choices=ARCHS_LIST, default='resnet50')
    parser.add_argument('--dataset', type=str, default='dataset/imagenet')
    parser.add_argument('--masks', default=False, action='store_true')
    parser.add_argument('--num_samples', type=int, default=50)
    parser.add_argument('--gradient_priors', default=False, action='store_true')
    parser.add_argument('--grad_iterations', type=int, default=32)
    parser.add_argument('--attack_type', type=str, choices=['nes', 'simba'], default='simba')
    parser.add_argument('--conv', default=False, action='store_true')
    parser.add_argument('--substitute_model', type=str, choices=ARCHS_LIST, default='resnet152')
    parser.add_argument('--ensemble_selection', default=False, action='store_true')
    parser.add_argument('--transfer', default=False, action='store_true')
    parser.add_argument('--eps', type=float, default=1)
    parser.add_argument('--step_size', type=float, default=1/255.0)
    parser.add_argument('--num_iterations', type=int, default=1)
    parser.add_argument('--save_file_location', type=str, default='results/blackbox/' + time + '.pt')
    args_dict = vars(parser.parse_args())

    validate_save_file_location(args_dict['save_file_location'])

    model = get_model(args_dict['model'], pretrained=True, freeze=True).cuda().eval()

    if not args_dict['masks']:
        dataset = load_imagenet(args_dict['dataset'])
        loader, _ = dataset.make_loaders(workers=10, batch_size=1)
    else:
        loader = torch.load(args_dict['dataset'])

    adversarial_examples_list = []
    predictions_list = []
    substitute_model, criterion, pgd_attacker = None, None, None

    if args_dict['attack_type'] == 'nes':
        attack = nes
    else:
        attack = simba
        if args_dict['gradient_priors']:
            if args_dict['ensemble_selection']:
                pgd_attacker = Attacker(model.cuda(), PGD_DEFAULT_ARGS_DICT)
                pgd_attacker.args_dict['label_shifts'] = 0
                pgd_attacker.available_surrogates_list = ARCHS_LIST
                pgd_attacker.available_surrogates_list.remove(args_dict['model'])
            else:
                substitute_model = get_model(args_dict['substitute_model'],
                                             pretrained=True, freeze=True).cuda().eval()

    for index, entry in enumerate(loader):
        if args_dict['masks']:
            image, mask = entry
            image.unsqueeze_(0)
            original_prediction = predict(model, image.cuda())
            label = torch.argmax(original_prediction, dim=1)
        else:
            image, label = entry
            mask = torch.ones_like(image)

            with torch.no_grad():
                original_prediction = predict(model, image.cuda())
                predicted_label = torch.argmax(original_prediction, dim=1)
                if label.item() != predicted_label.item():
                    continue

        criterion = torch.nn.CrossEntropyLoss(reduction='none')

        image.squeeze_(0)
        delta = attack(model, image.cuda(), label.cuda(), mask.cuda(),
                       args_dict, substitute_model, criterion, pgd_attacker)
        adversarial_example = (image.cuda() + delta).clamp(0, 1)

        with torch.no_grad():
            adversarial_prediction = predict(model, adversarial_example.unsqueeze(0))

        adversarial_examples_list.append(adversarial_example.cpu())
        predictions_list.append({'original': original_prediction.cpu(),
                                 'adversarial': adversarial_prediction.cpu()})

        if index == args_dict['num_samples'] - 1:
            break

    torch.save({'adversarial_examples': adversarial_examples_list,
                'predictions': predictions_list,
                'args_dict': args_dict},
               args_dict['save_file_location'])