def full_attack(self,
                    data_loader,
                    attack_parameters,
                    output_filename,
                    num_minibatches=None,
                    continue_attack=True,
                    checkpoint_minibatch=10,
                    verbose=True,
                    save_xform=img_utils.nhwc255_xform):
        """ Builds an attack on the data and outputs the resulting attacked
            images into a .numpy file
        ARGS:
            data_loader : torch.utils.data.DataLoader - object that loads the
                          evaluation data.
                          NOTE: for Madry challenge this shouldn't be shuffled
            attack_parameters : AdversarialAttackParameters object - wrapper to
                                contain the attack
            output_filename : string - name of the file we want to output.
                              should just be the base name (extension is .npy)
            num_minibatches : int - if not None, we only build attacks for this
                                    many minibatches of data
            continue_attack : bool - if True, we do the following :
                              1) check if output_filename exists. If it doesn't
                                 exist, proceed to make full attack as usual.
                              2) if output_filename exists, figure out how many
                                 minibatches it went through and skip to the
                                 next minibatch in the data loader
                           This is kinda like a checkpointing system for attacks
            checkpoint_minibatch: int - how many minibatches until we checkpoint
            verbose: bool - if True, we print out which minibatch we're in out
                            of total number of minibatches
            save_xform: fxn, np.ndarray -> np.ndarray - function that
                        transforms our adv_example.data.numpy() to the form that
                        we want to store it in in the .npy output file
        RETURNS:
            numpy array of attacked examples
        """
        raise NotImplementedError("BROKEN!!!")
        ######################################################################
        #   Setup and assert things                                          #
        ######################################################################

        self.classifier_net.eval()

        # Check if loader is shuffled. print warning if random
        assert isinstance(data_loader, torch.utils.data.DataLoader)
        if isinstance(data_loader.batch_sampler.sampler,
                      torch.utils.data.sampler.RandomSampler):
            print "WARNING: data loader is shuffled!"
        total_num_minibatches = int(
            math.ceil(len(data_loader.dataset) / data_loader.batch_size))
        minibatch_digits = len(str(total_num_minibatches))

        # Do cuda stuff
        utils.cuda_assert(self.use_gpu)
        attack_parameters.set_gpu(self.use_gpu)
        if self.use_gpu:
            self.classifier_net.cuda()

        # Check attack is attacking everything
        assert attack_parameters.proportion_attacked == 1.0

        # handle output_file + continue_attack stuff
        assert os.path.basename(output_filename) == output_filename, \
               "Provided output_filename was %s, should have been %s" % \
               (output_filename, os.path.basename(output_filename))

        output_file = os.path.join(config.OUTPUT_IMAGE_PATH,
                                   output_filename + '.npy')

        minibatch_attacks = []  # list of 4d numpy arrays
        num_prev_minibatches = 0
        if continue_attack and len(glob.glob(output_file)) != 0:
            # load file and see how many minibatches we went through
            saved_data = np.load(output_file)
            saved_num_examples = saved_data.shape[0]
            loader_batch_size = data_loader.batch_size
            if saved_num_examples % loader_batch_size != 0:
                print "WARNING: incomplete minibatch in previously saved attack"

            minibatch_attacks.append(saved_data)
            num_prev_minibatches = saved_num_examples / loader_batch_size

        if verbose:

            def printer(num):
                print("Minibatch %%0%dd/%s" %
                      (minibatch_digits, total_num_minibatches) % num)
        else:
            printer = lambda num: None

        ######################################################################
        #   Start attacking and saving                                       #
        ######################################################################
        for minibatch_num, data in enumerate(data_loader):

            # Handle skippy cases
            if minibatch_num < num_prev_minibatches:  # CAREFUL ABOUT OBOEs HERE
                continue

            if num_minibatches is not None and minibatch_num >= num_minibatches:
                break

            printer(minibatch_num)

            # Load data and build minibatch of attacked images
            inputs, labels = data
            if self.use_gpu:
                inputs = inputs.cuda()
                labels = labels.cuda()

            adv_examples = attack_parameters.attack(inputs, labels)[0]

            # Convert to numpy and append to our save buffer
            adv_data = utils.safe_tensor(adv_examples).cpu().numpy()
            minibatch_attacks.append(save_xform(adv_data))

            # Perform checkpoint if necessary
            if minibatch_num > 0 and minibatch_num % checkpoint_minibatch == 0:
                minibatch_attacks = utils.checkpoint_incremental_array(
                    output_file, minibatch_attacks, return_concat=True)

        return utils.checkpoint_incremental_array(output_file,
                                                  minibatch_attacks,
                                                  return_concat=True)[0]
    def evaluate_ensemble(self,
                          data_loader,
                          attack_ensemble,
                          skip_ground=False,
                          verbose=True,
                          num_minibatches=None):
        """ Runs evaluation against attacks generated by attack ensemble over
            the entire training set
        ARGS:
            data_loader : torch.utils.data.DataLoader - object that loads the
                          evaluation data
            attack_ensemble : dict {string -> EvaluationResult}
                             is a dict of attacks that we want to make.
                             None of the strings can be 'ground'
            skip_ground : bool - if True we don't evaluate the no-attack case
            verbose : bool - if True, we print things
            num_minibatches: int - if not None, we only validate on a fixed
                                   number of minibatches
        RETURNS:
            a dict same keys as attack_ensemble, as well as the key 'ground'.
            The values are utils.AverageMeter objects
        """

        ######################################################################
        #   Setup input validations                                          #
        ######################################################################

        self.classifier_net.eval()
        assert isinstance(data_loader, torch.utils.data.DataLoader)

        if attack_ensemble is None:
            attack_ensemble = {}

        if not skip_ground:
            assert 'ground' not in attack_ensemble
            # Build ground result
            ground_result = IdentityEvaluation(self.classifier_net,
                                               self.normalizer,
                                               use_gpu=self.use_gpu)
            attack_ensemble['ground'] = ground_result

        # Do GPU checks
        utils.cuda_assert(self.use_gpu)
        if self.use_gpu:
            self.classifier_net.cuda()

        for eval_result in attack_ensemble.values():
            eval_result.set_gpu(self.use_gpu)

        ######################################################################
        #   Loop through validation set and attack efficacy                  #
        ######################################################################

        for i, data in enumerate(data_loader, 0):
            if num_minibatches is not None and i >= num_minibatches:
                break
            if verbose:
                print "Starting minibatch %s..." % i

            inputs, labels = data
            if self.use_gpu:
                inputs = inputs.cuda()
                labels = labels.cuda()

            for k, result in attack_ensemble.items():
                if verbose:
                    print "\t (mb: %s) evaluating %s..." % (i, k)
                result.eval(inputs, labels)

        return attack_ensemble