Esempio n. 1
0
def _index_patches():
    print("=" * 30 + "GENERATING QUILTING PATCHES AND INDICES" + "=" * 30)
    args = opts.parse_args(opts.OptType.QUILTING_PATCHES)
    args.num_patches = 10000
    args.quilting_patch_size = 5
    args.index_file = str(os.path.join(args.quilting_index_root,
        "index_{}.faiss".format(args.quilting_patch_size)))
    args.patches_file = str(os.path.join(args.quilting_patch_root,
        "patches_{}.pickle".format(args.quilting_patch_size)))
    create_faiss_patches(args)
def _train_model():
    args = opts.parse_args(opts.OptType.TRAIN)

    # edit default args
    # To classify transformed images using transformed model update defenses to
    #  <tvm|quilting>
    args.defenses = None  # defense=<(raw, tvm, quilting, jpeg, quantization)>
    args.model = "resnet50"
    args.normalize = True  # apply normalization on input data
    args.resume = True  # Resume training from checkpoint if available

    train_model(args)
def main():
    # parse input arguments:
    args = opts.parse_args(opts.OptType.ADVERSARIAL)

    # Only runs one method at a time
    assert args.operation is not None, \
        "operation to run can't be None"
    assert OperationType.has_value(args.operation), \
        "\"{}\" operation not defined".format(args.operation)

    if args.operation == str(OperationType.GENERATE_ADVERSARIAL):
        generate_adversarial_images(args)
    elif args.operation == str(OperationType.CONCAT_ADVERSARIAL):
        concat_adversarial(args)
    elif args.operation == str(OperationType.COMPUTE_STATS):
        compute_stats(args)
def _generate_adversarial_images():
    # load default args for adversary functions
    args = opts.parse_args(opts.OptType.ADVERSARIAL)
    # edit default args
    args.operation = "generate_adversarial"
    args.model = "resnet50"
    args.adversary_to_generate = "fgs"
    args.defenses = None
    args.partition_size = 1  # Number of samples to generate
    args.n_samples = 10000  # Total samples in input data
    args.data_type = "val"  # input dataset type
    args.normalize = True  # apply normalization on input data
    args.attack_type = "blackbox"  # For <whitebox> attack, use transformed models
    args.pretrained = True  # Use pretrained model from model-zoo

    generate_adversarial_images(args)
def _classify_images():
    # classify images without any attack or defense
    args = opts.parse_args(opts.OptType.CLASSIFY)

    # edit default args
    args.n_samples = 1  # Total samples in input data
    args.normalize = True  # apply normalization on input data
    args.pretrained = True  # Use pretrained model from model-zoo
    # To classify transformed images using transformed model update defenses to
    #  <tvm|quilting>
    args.defenses = None
    # To classify attack images update attack_type to <blackbox|whitebox>
    args.attack_type = None
    # To classify attack images update adversary to <fgs|ifgs|cwl2\deepfool>
    args.adversary = None

    classify_images(args)
def _generate_transformed_images():
    # load default args for transformation functions
    args = opts.parse_args(opts.OptType.TRANSFORMATION)
    # edit default args
    # Apply transformations on raw images,
    #  for adversarial images use "transformation_on_adv"
    args.operation = "transformation_on_raw"
    args.adversary = None  # update to adversary for operation "transformation_on_adv"
    # For quilting expects patches data at QUILTING_ROOT (defined in
    #  path_config.json or passed in args)
    args.defenses = ["quilting"]  # <"tvm", "quilting", "jpeg", quantize>
    args.partition_size = 1  # Number of samples to generate
    args.data_type = "val"  # input dataset type

    # args.n_samples = 50000  # Total samples in input data when reading from .pth files
    # args.attack_type = "blackbox"  # Used for file paths for "transformation_on_adv"

    generate_transformed_images(args)
    print('Transformed images saved at {}'.format(
        os.path.join(args.partition_dir, args.defenses[0])))
Esempio n. 7
0
def main():
    # parse input arguments:
    args = opts.parse_args(opts.OptType.ADVERSARIAL)

    # Only runs one method at a time
    assert args.operation is not None, \
        "operation to run can't be None"
    assert OperationType.has_value(args.operation), \
        "\"{}\" operation not defined".format(args.operation)
    if args.attack_type == str(constants.AttackType.WHITEBOX):
        assert args.defenses is not None, \
            "For whitebox attacks, atleast one defense is required"
    elif args.defenses is not None:
        print("Warning: Defenses will be unused for non whitebox attacks")

    if args.operation == str(OperationType.GENERATE_ADVERSARIAL):
        generate_adversarial_images(args)
    elif args.operation == str(OperationType.CONCAT_ADVERSARIAL):
        concat_adversarial(args)
    elif args.operation == str(OperationType.COMPUTE_STATS):
        compute_stats(args)
    image_dataset.imgs = image_dataset.imgs[:20000]  # we don't need all images

    # gather image patches:
    print('| gather image patches...')
    patches = gather_patches(
        image_dataset,
        args.num_patches,
        args.patch_size,
        patch_transform=None,
    )

    # build faiss index:
    print('| training faiss index...')
    faiss_index, sub_index = index_patches(patches, pca_dims=args.pca_dims)
    # NOTE: Keep reference to sub_index to prevent it from being GC'ed

    # save faiss index and patches:
    print('| writing faiss index to %s' % args.index_file)
    faiss.write_index(faiss_index, args.index_file)
    with open(args.patches_file, 'wb') as fwrite:
        print('| writing patches to %s' % args.patches_file)
        pickle.dump(patches, fwrite, pickle.HIGHEST_PROTOCOL)
    print('| done.')


# run:
if __name__ == '__main__':
    # parse input arguments:
    args = opts.parse_args(opts.OptType.QUILTING_PATCHES)
    create_faiss_patches(args)
Esempio n. 9
0
            adv_params = constants.get_adv_params(args, idx)
            print("| adv_params: ", adv_params)
            if args.data_batches is None:
                concatenate_data(args, defense_name, adv_params)
            else:
                for i in range(args.data_batches):
                    concatenate_data(args,
                                     defense_name,
                                     adv_params,
                                     data_batch_idx=i)

    elif args.operation == str(OperationType.TRANSFORM_RAW):
        start_class_idx = args.partition * args.partition_size
        end_class_idx = (args.partition + 1) * args.partition_size
        class_indices = range(start_class_idx, end_class_idx)
        for defense_name in args.defenses:
            defense = get_defense(defense_name, args)
            data_type = args.data_type if args.data_type == "train" else "valid"
            dataset = load_dataset(args,
                                   data_type,
                                   defense,
                                   class_indices=class_indices)
            transformation_on_raw(args, dataset, defense_name)


# run:
if __name__ == '__main__':
    # parse input arguments:
    args = opts.parse_args(opts.OptType.TRANSFORMATION)
    generate_transformed_images(args)
Esempio n. 10
0
        dataset = None
        model = None

    # Ensemble defense probabilities
    if args.ensemble == 'max':
        all_defense_probs = torch.max(all_defense_probs, dim=0)[0]
    elif args.ensemble == 'avg':  # for average ensembling
        all_defense_probs = torch.mean(all_defense_probs, dim=0)
    else:  # for no ensembling
        assert all_defense_probs.size(0) == 1
        all_defense_probs = all_defense_probs[0]
    # Calculate top1 and top5 accuracy
    prec1, prec5 = accuracy(all_defense_probs, targets, topk=(1, 5))
    print('=' * 50)
    print('Results for model={}, attack={}, ensemble_type={} '.format(
        args.model, args.adversary, args.ensemble))
    prec1 = prec1[0]
    prec5 = prec5[0]
    print('| classification accuracy @1: %2.5f' % (prec1))
    print('| classification accuracy @5: %2.5f' % (prec5))
    print('| classification error @1: %2.5f' % (100. - prec1))
    print('| classification error @5: %2.5f' % (100. - prec5))
    print('| done.')


# run:
if __name__ == '__main__':
    # parse input arguments
    args = opts.parse_args(opts.OptType.CLASSIFY)
    classify_images(args)
            print('| epoch %d, Loading data:' % epoch)
            for key in {'train', 'valid'}:
                loaders[key] = get_data_loader(
                    load_dataset(args, key, defense),
                    batchsize=args.batchsize,
                    device=args.device,
                    shuffle=True,
                )

        return loaders['train']

    # train the model:
    print('| training model...')
    train(model,
          criterion,
          optimizer,
          start_epoch_hook=start_epoch_hook,
          end_epoch_hook=end_epoch_hook,
          data_loader_hook=data_loader_hook,
          start_epoch=args.start_epoch,
          end_epoch=args.end_epoch,
          learning_rate=args.lr)
    print('| done.')


# run all the things:
if __name__ == '__main__':
    # parse input arguments:
    args = opts.parse_args(opts.OptType.TRAIN)
    train_model(args)