def avg_successful_ssim(self, eval_label, attack_out, ground_examples,
                            labels):
        # We actually compute (1-ssim) to match better with notion of a 'metric'
        ######################################################################
        #  First set up evaluation result if doesn't exist:                  #
        ######################################################################
        if self.results[eval_label] is None:
            self.results[eval_label] = utils.AverageMeter()

        ######################################################################
        #  Compute which attacks were successful                             #
        ######################################################################
        successful_pert, successful_orig = self._get_successful_attacks(
            attack_out)
        if successful_pert is None or successful_pert.numel() == 0:
            return

        successful_pert = Variable(successful_pert)
        successful_orig = Variable(successful_orig)

        count = 0
        runsum = 0
        for og, adv in zip(successful_orig, successful_pert):
            count += 1
            runsum += ssim(og.transpose(0, 2).cpu().numpy(),
                           adv.transpose(0, 2).cpu().numpy(),
                           multichannel=True)

        avg_minus_ssim = 1 - (runsum / float(count))
        result.update(avg_minus_ssim, n=num_successful)
    def avg_loss_value(self, eval_label, attack_out):
        """ Computes and keeps track of the average attack loss
        """

        ######################################################################
        #   First set up evaluation result if it doesn't exist               #
        ######################################################################

        if self.results[eval_label] is None:
            self.results[eval_label] = utils.AverageMeter()
        result = self.results[eval_label]

        ######################################################################
        #   Next collect the loss function and compute loss                  #
        ######################################################################
        attack_obj = self.attack_params.adv_attack_obj

        # Structure of loss objects varies based on which attack class used
        if isinstance(attack_obj, (aa.FGSM, aa.PGD)):
            attack_loss = attack_obj.loss_fxn
        elif isinstance(attack_obj, aa.CarliniWagner):
            attack_loss = attack_obj._construct_loss_fxn(1.0, 0.0)

        attack_loss.setup_attack_batch(attack_out[0])

        loss_val = attack_loss.forward(attack_out[0],
                                       attack_out[1],
                                       perturbation=attack_out[4])
        loss_val_sum = float(torch.sum(loss_val))

        count = attack_out[0].shape[0]
        result.update(loss_val_sum, n=count)
    def avg_successful_lpips(self, eval_label, attack_out):
        ######################################################################
        #  First set up evaluation result if doesn't exist:                  #
        ######################################################################
        if self.results[eval_label] is None:
            self.results[eval_label] = utils.AverageMeter()
            self.dist_model = dm.DistModel(net='alex', manual_gpu=self.use_gpu)

        result = self.results[eval_label]

        if self.params[eval_label] is None:
            dist_model = dm.DistModel(net='alex', manual_gpu=self.use_gpu)
            self.params[eval_label] = {'dist_model': dist_model}

        dist_model = self.params[eval_label]['dist_model']

        ######################################################################
        #  Compute which attacks were successful                             #
        ######################################################################
        successful_pert, successful_orig = self._get_successful_attacks(
            attack_out)

        if successful_pert is None or successful_pert.numel() == 0:
            return

        successful_pert = Variable(successful_pert)
        successful_orig = Variable(successful_orig)
        num_successful = successful_pert.shape[0]
        xform = lambda im: im * 2.0 - 1.0

        lpips_dist = self.dist_model.forward_var(xform(successful_pert),
                                                 xform(successful_orig))
        avg_lpips_dist = float(torch.mean(lpips_dist))

        result.update(avg_lpips_dist, n=num_successful)
    def __init__(self,
                 classifier_net,
                 normalizer,
                 manual_gpu=None,
                 loss_fxn=None):
        self.classifier_net = classifier_net
        self.normalizer = normalizer
        if manual_gpu is not None:
            self.use_gpu = manual_gpu
        else:
            self.use_gpu = utils.use_gpu()

        self.loss_fxn = loss_fxn or nn.CrossEntropyLoss()
        self.results = {
            'top1': utils.AverageMeter(),
            'avg_loss_value': utils.AverageMeter()
        }
    def __init__(self, classifier_net, normalizer, manual_gpu=False):
        self.classifier_net = classifier_net
        self.normalizer = normalizer
        if manual_gpu is not None:
            self.use_gpu = manual_gpu
        else:
            self.use_gpu = utils.use_gpu()

        self.results = {'top1': utils.AverageMeter()}
    def top1_accuracy(self, eval_label, attack_out, ground_examples, labels):

        ######################################################################
        #  First set up evaluation result if doesn't exist:                  #
        ######################################################################
        if self.results[eval_label] is None:
            self.results[eval_label] = utils.AverageMeter()

        result = self.results[eval_label]

        ######################################################################
        #  Computes the top 1 accuracy and updates the averageMeter          #
        ######################################################################
        attack_examples = utils.safe_var(attack_out[0])
        pre_adv_labels = utils.safe_var(attack_out[1])
        num_examples = float(attack_examples.shape[0])

        attack_accuracy_int = self.attack_params.eval_attack_only(
            attack_examples, pre_adv_labels, topk=1)
        result.update(attack_accuracy_int / num_examples, n=int(num_examples))
    def __init__(self, classifier_net, normalizer, use_gpu=False):
        self.classifier_net = classifier_net
        self.normalizer = normalizer
        self.use_gpu = use_gpu

        self.results = {'top1': utils.AverageMeter()}