Пример #1
0
def gradient_inversion(gradient, labels, model, data_shape, dm, ds):
    print('Performing a Gradientinversion attack.')
    #build inversefed library specific config for the reconstruction attack
    c = dict(
        signed=True,
        boxed=True,
        cost_fn='sim',
        indices='def',
        weights='equal',
        lr=0.1,
        optim='adam',
        restarts=1,
        max_iterations=7000,
        total_variation=1e-6,
        init='randn',
        filter='none',
        lr_decay=True,
        scoring_choice='loss',
        loss_fn=torch.nn.CrossEntropyLoss(
            weight=None,
            size_average=None,
            ignore_index=-100,
            reduce=None,
            reduction='mean'),  #(Loss fn the model was trianed with)
        early_stopper=EarlyStopping(1000, 0, 'ReconstructionLoss', 'min',
                                    False))
    rec_machine = inversefed.GradientReconstructor(model, (dm, ds),
                                                   c,
                                                   num_images=labels.shape[0])
    output, stats = rec_machine.reconstruct(gradient,
                                            labels,
                                            img_shape=data_shape)

    return output, stats['opt']
                          cost_fn='l2',
                          indices='def',
                          weights='equal',
                          lr=1e-4,
                          optim='LBFGS',
                          restarts=args.restarts,
                          max_iterations=300,
                          total_variation=args.tv,
                          init=args.init,
                          filter='none',
                          lr_decay=False,
                          scoring_choice=args.scoring_choice)

        rec_machine = inversefed.GradientReconstructor(
            model, (dm, ds),
            config,
            num_images=args.num_images,
            ref_img=ground_truth)
        output, stats = rec_machine.reconstruct(input_gradient,
                                                labels,
                                                img_shape=img_shape,
                                                dryrun=args.dryrun)

    else:
        local_gradient_steps = args.accumulation
        local_lr = 1e-4
        input_parameters = inversefed.reconstruction_algorithms.loss_steps(
            model,
            ground_truth,
            labels,
            lr=local_lr,
Пример #3
0
                else:
                    raise ValueError(
                        f'Unknown data type argument {args.dtype}.')
                print(
                    f'Model and input parameter moved to {args.dtype}-precision.'
                )
                dm = torch.as_tensor(inversefed.consts.cifar10_mean,
                                     **setup)[:, None, None]
                ds = torch.as_tensor(inversefed.consts.cifar10_std,
                                     **setup)[:, None, None]
                ground_truth = ground_truth.to(**setup)
                input_gradient = [g.to(**setup) for g in input_gradient]
                model.to(**setup)
                model.eval()

            rec_machine = inversefed.GradientReconstructor(
                model, (dm, ds), config, num_images=args.num_images)

            if args.optim == 'zhu' and args.soft_labels:
                rec_machine.iDLG = False
                output, stats = rec_machine.reconstruct(input_gradient,
                                                        None,
                                                        img_shape=img_shape,
                                                        dryrun=args.dryrun)
            else:
                output, stats = rec_machine.reconstruct(input_gradient,
                                                        labels,
                                                        img_shape=img_shape,
                                                        dryrun=args.dryrun)

        else:
            local_gradient_steps = args.accumulation
Пример #4
0
def reconstruct(idx, model, loss_fn, trainloader, validloader):

    if opt.data == 'cifar100':
        dm = torch.as_tensor(inversefed.consts.cifar10_mean, **setup)[:, None, None]
        ds = torch.as_tensor(inversefed.consts.cifar10_std, **setup)[:, None, None]
    elif opt.data == 'FashionMinist':
        dm = torch.Tensor([0.1307]).view(1, 1, 1).cuda()
        ds = torch.Tensor([0.3081]).view(1, 1, 1).cuda()
    else:
        raise NotImplementedError

    # prepare data
    ground_truth, labels = [], []
    while len(labels) < num_images:
        img, label = validloader.dataset[idx]
        idx += 1
        if label not in labels:
            labels.append(torch.as_tensor((label,), device=setup['device']))
            ground_truth.append(img.to(**setup))

    ground_truth = torch.stack(ground_truth)
    labels = torch.cat(labels)
    model.zero_grad()
    target_loss, _, _ = loss_fn(model(ground_truth), labels)
    param_list = [param for param in model.parameters() if param.requires_grad]
    input_gradient = torch.autograd.grad(target_loss, param_list)


    # attack
    print('ground truth label is ', labels)
    rec_machine = inversefed.GradientReconstructor(model, (dm, ds), config, num_images=num_images)
    if opt.data == 'cifar100':
        shape = (3, 32, 32)
    elif opt.data == 'FashionMinist':
        shape = (1, 32, 32)

    if opt.rlabel:
        output, stats = rec_machine.reconstruct(input_gradient, None, img_shape=shape) # reconstruction label
    else:
        output, stats = rec_machine.reconstruct(input_gradient, labels, img_shape=shape) # specify label

    output_denormalized = output * ds + dm
    input_denormalized = ground_truth * ds + dm
    mean_loss = torch.mean((input_denormalized - output_denormalized) * (input_denormalized - output_denormalized))
    print("after optimization, the true mse loss {}".format(mean_loss))

    save_dir = create_save_dir()
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    torchvision.utils.save_image(output_denormalized.cpu().clone(), '{}/rec_{}.jpg'.format(save_dir, idx))
    torchvision.utils.save_image(input_denormalized.cpu().clone(), '{}/ori_{}.jpg'.format(save_dir, idx))


    test_mse = (output_denormalized.detach() - input_denormalized).pow(2).mean().cpu().detach().numpy()
    feat_mse = (model(output.detach())- model(ground_truth)).pow(2).mean()
    test_psnr = inversefed.metrics.psnr(output_denormalized, input_denormalized)

    return {'test_mse': test_mse,
        'feat_mse': feat_mse,
        'test_psnr': test_psnr
    }