def avg_successful_lpips(self, eval_label, attack_out): ###################################################################### # First set up evaluation result if doesn't exist: # ###################################################################### if self.results[eval_label] is None: self.results[eval_label] = utils.AverageMeter() self.dist_model = dm.DistModel(net='alex', manual_gpu=self.use_gpu) result = self.results[eval_label] if self.params[eval_label] is None: dist_model = dm.DistModel(net='alex', manual_gpu=self.use_gpu) self.params[eval_label] = {'dist_model': dist_model} dist_model = self.params[eval_label]['dist_model'] ###################################################################### # Compute which attacks were successful # ###################################################################### successful_pert, successful_orig = self._get_successful_attacks( attack_out) if successful_pert is None or successful_pert.numel() == 0: return successful_pert = Variable(successful_pert) successful_orig = Variable(successful_orig) num_successful = successful_pert.shape[0] xform = lambda im: im * 2.0 - 1.0 lpips_dist = self.dist_model.forward_var(xform(successful_pert), xform(successful_orig)) avg_lpips_dist = float(torch.mean(lpips_dist)) result.update(avg_lpips_dist, n=num_successful)
def __init__(self, classifier_net, normalizer, loss_fxn, scale_constant, num_bin_search_steps=5, num_optim_steps=1000, distance_metric_type='l2', confidence=0.0, use_gpu=False): """ Most effective implementation of Carlini/Wagner's L2 attack as outlined in their paper: https://arxiv.org/pdf/1608.04644.pdf Reference Implementations: - https://github.com/rwightman/pytorch-nips2017-attack-example - https://github.com/tensorflow/cleverhans/ ARGS: classifier_net: Pytorch NN normalizer: DifferentiableNormalize object to prep objects into classifier loss_fxn: RegularizedLoss object - partially applied loss fxn that [0.0, 1.0] image Variables and labels and outputs a scalar loss variable. Also has a zero_grad method REFACTOR WITH BETTER SPECS! distance_metric: function that takes two arguments and returns the 'distance' between two arguments. Defaults to l2 distance between [0.0, 1.0] images """ super(CW, self).__init__(classifier_net, normalizer, use_gpu=use_gpu) self.loss_fxn = loss_fxn self.scale_constant = scale_constant self.num_bin_search_steps = num_bin_search_steps self.num_optim_steps = num_optim_steps self.confidence = confidence self.use_gpu = use_gpu if distance_metric_type == 'l2': # x, y should be in [0., 1.0] range distance_metric = lambda x, y: torch.norm(x - y, 2) elif distance_metric_type == 'linf': # x, y should be in [0.0, 1.0] range distance_metric = lambda x, y: torch.max(torch.abs(x - y)) elif distance_metric_type == 'lpips': # Perceptual distance is a little more involved... not defined here # x, y should be in [0., 1.0] range dist_model = dm.DistModel(net='alex', use_gpu=self.use_gpu) def distance_metric(x, y, dist_model=dist_model): xform = lambda im: im * 2.0 - 1.0 dist = dist_model.forward_var(Variable(xform(x.unsqueeze(0))), Variable(xform(y.unsqueeze(0)))) return float(dist) self.distance_metric = distance_metric
def __init__(self, fix_im, **kwargs): super(LpipsRegularization, self).__init__(fix_im) manual_gpu = kwargs.get('manual_gpu', None) if manual_gpu is not None: self.use_gpu = manual_gpu else: self.use_gpu = utils.use_gpu() self.dist_model = dm.DistModel(net='alex', manual_gpu=self.use_gpu)
def __init__(self, fix_im, **kwargs): super(LpipsRegularization, self).__init__(fix_im) use_gpu = kwargs.get('use_gpu', False) self.use_gpu = use_gpu self.dist_model = dm.DistModel(net='alex', use_gpu=self.use_gpu)