def eval(self, examples, labels):
        assert list(self.results.keys()) == ['top1']
        ground_avg = self.results['top1']
        ground_output = self.classifier_net(self.normalizer(Variable(examples)))
        minibatch = float(examples.shape[0])

        ground_accuracy_int = utils.accuracy_int(ground_output,
                                                Variable(labels), topk=1)
        ground_avg.update(ground_accuracy_int / minibatch,
                          n=int(minibatch))
    def eval_attack_only(self, adversarials, labels, topk=1):
        """ Outputs the accuracy of the adv_inputs only
        ARGS:
            adv_inputs: Variable NxCxHxW - examples after we did adversarial
                                           perturbation
            labels: Variable (longtensor N) - correct labels of classification
                                              output
            topk: int - criterion for 'correct' classification
        RETURNS:
            (int) number of correctly classified examples
        """

        normed_advs = self.normalizer.forward(adversarials)

        adv_output = self.classifier_net.forward(normed_advs)
        return utils.accuracy_int(adv_output, labels, topk=topk)
    def eval(self, examples, labels):
        assert list(self.results.keys()) == ['top1', 'avg_loss_value']

        ground_output = self.classifier_net(self.normalizer(
            Variable(examples)))
        minibatch = float(examples.shape[0])

        # Compute accuracy
        ground_avg = self.results['top1']
        minibatch_accuracy_int = utils.accuracy_int(ground_output,
                                                    Variable(labels),
                                                    topk=1)
        ground_avg.update(minibatch_accuracy_int / minibatch, n=int(minibatch))

        # Compute loss
        ground_avg_loss = self.results['avg_loss_value']
        minibatch_loss = float(self.loss_fxn(ground_output, labels))
        ground_avg_loss.update(minibatch_loss, n=(int(minibatch)))