def load_data(self):
        """
        Load data.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(numpy.float32)
        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
        log('[Testing] read %s' % self.args.test_images_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(numpy.int)
        self.test_codes = self.test_codes[:, self.args.label_index]
        log('[Testing] read %s' % self.args.test_codes_file)

        self.perturbations = utils.read_hdf5(self.args.perturbations_file).astype(numpy.float32)
        self.N_attempts = self.perturbations.shape[0]
        self.N_samples = self.perturbations.shape[1]
        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        if len(self.perturbations.shape) <= 4:
            self.perturbations = self.perturbations.reshape((self.perturbations.shape[0] * self.perturbations.shape[1], self.perturbations.shape[2], self.perturbations.shape[3], 1))
        else:
            self.perturbations = self.perturbations.reshape((self.perturbations.shape[0] * self.perturbations.shape[1], self.perturbations.shape[2], self.perturbations.shape[3], self.perturbations.shape[4]))

        log('[Testing] read %s' % self.args.perturbations_file)

        self.original_success = utils.read_hdf5(self.args.original_success_file)
        self.original_success = numpy.swapaxes(self.original_success, 0, 1)
        self.original_success = self.original_success.reshape((self.original_success.shape[0] * self.original_success.shape[1]))
        log('[Testing] read %s' % self.args.original_success_file)

        self.original_accuracy = utils.read_hdf5(self.args.original_accuracy_file)
        log('[Testing] read %s' % self.args.original_accuracy_file)

        self.perturbation_codes = numpy.repeat(self.test_codes[:self.N_samples], self.N_attempts, axis=0)
        self.transfer_success = numpy.copy(self.original_success)
Beispiel #2
0
    def load_data(self):
        """
        Load data and model.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(numpy.float32)
        log('[Testing] read %s' % self.args.test_images_file)

        # For handling both color and gray images.
        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
            log('[Testing] no color images, adjusted size')
        self.resolution = self.test_images.shape[2]
        log('[Testing] resolution %d' % self.resolution)

        self.train_images = utils.read_hdf5(self.args.train_images_file).astype(numpy.float32)
        # !
        self.train_images = self.train_images.reshape((self.train_images.shape[0], -1))
        log('[Testing] read %s' % self.args.train_images_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(numpy.int)
        self.test_codes = self.test_codes[:, self.args.label_index]
        self.N_class = numpy.max(self.test_codes) + 1
        log('[Testing] read %s' % self.args.test_codes_file)

        self.accuracy = utils.read_hdf5(self.args.accuracy_file)
        log('[Testing] read %s' % self.args.accuracy_file)

        self.perturbations = utils.read_hdf5(self.args.perturbations_file).astype(numpy.float32)
        self.N_attempts = self.perturbations.shape[0]

        # First, repeat relevant data.
        self.test_images = numpy.repeat(self.test_images[:self.perturbations.shape[1]], self.N_attempts, axis=0)
        self.perturbation_codes = numpy.repeat(self.test_codes[:self.perturbations.shape[1]], self.N_attempts, axis=0)
        self.perturbation_codes = numpy.squeeze(self.perturbation_codes)
        self.accuracy = numpy.repeat(self.accuracy[:self.perturbations.shape[1]], self.N_attempts, axis=0)

        # Then, reshape the perturbations!
        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        self.perturbations = self.perturbations.reshape((self.perturbations.shape[0] * self.perturbations.shape[1], -1))
        assert self.perturbations.shape[1] == self.args.N_theta
        log('[Testing] read %s' % self.args.perturbations_file)
        assert not numpy.any(self.perturbations != self.perturbations), 'NaN in perturbations'

        self.success = utils.read_hdf5(self.args.success_file)
        self.success = numpy.swapaxes(self.success, 0, 1)
        self.success = self.success.reshape((self.success.shape[0] * self.success.shape[1]))
        log('[Testing] read %s' % self.args.success_file)

        log('[Testing] using %d input channels' % self.test_images.shape[3])
        assert self.args.N_theta > 0 and self.args.N_theta <= 9
        decoder = models.STNDecoder(self.args.N_theta)
        # decoder.eval()
        log('[Testing] set up STN decoder')

        self.model = decoder
Beispiel #3
0
    def load_data(self):
        """
        Load data.
        """

        test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.int)
        self.test_fonts = test_codes[:, 1]
        self.test_classes = test_codes[:, 2]
        log('[Testing] read %s' % self.args.test_codes_file)

        self.perturbations = utils.read_hdf5(
            self.args.perturbations_file).astype(numpy.float32)
        self.N_attempts = self.perturbations.shape[0]
        self.N_samples = self.perturbations.shape[1]
        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        self.perturbations = self.perturbations.reshape(
            (self.perturbations.shape[0] * self.perturbations.shape[1], -1))
        log('[Testing] read %s' % self.args.perturbations_file)

        self.test_fonts = numpy.repeat(self.test_fonts[:self.N_samples],
                                       self.N_attempts,
                                       axis=0)
        self.test_classes = numpy.repeat(self.test_classes[:self.N_samples],
                                         self.N_attempts,
                                         axis=0)
    def load_data(self):
        """
        Load data.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file)
        log('[Testing] read %s' % self.args.test_images_file)

        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
        self.resolution = self.test_images.shape[1]
        self.image_channels = self.test_images.shape[3]

        self.test_codes = utils.read_hdf5(self.args.test_codes_file)
        self.test_codes = self.test_codes[:, self.args.label_index]
        log('[Testing] read %s' % self.args.test_codes_file)

        self.perturbations = utils.read_hdf5(
            self.args.perturbations_file).astype(numpy.float32)
        self.N_attempts = self.perturbations.shape[0]
        self.N_samples = self.perturbations.shape[1]
        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        self.perturbations = self.perturbations.reshape(
            (self.perturbations.shape[0] * self.perturbations.shape[1], -1))
        log('[Testing] read %s' % self.args.perturbations_file)

        self.test_codes = numpy.repeat(self.test_codes[:self.N_samples],
                                       self.N_attempts,
                                       axis=0)
        self.test_images = numpy.repeat(self.test_images[:self.N_samples],
                                        self.N_attempts,
                                        axis=0)
Beispiel #5
0
    def test(self):
        """
        Test classifier to identify valid samples to attack.
        """

        num_batches = int(
            math.ceil(self.perturbations.shape[0] / self.args.batch_size))

        for b in range(num_batches):
            b_start = b * self.args.batch_size
            b_end = min((b + 1) * self.args.batch_size,
                        self.perturbations.shape[0])
            batch_fonts = self.test_fonts[b_start:b_end]
            batch_classes = self.test_classes[b_start:b_end]
            batch_code = numpy.concatenate(
                (common.numpy.one_hot(batch_fonts, self.N_font),
                 common.numpy.one_hot(batch_classes, self.N_class)),
                axis=1).astype(numpy.float32)

            batch_inputs = common.torch.as_variable(
                self.perturbations[b_start:b_end], self.args.use_gpu)
            batch_code = common.torch.as_variable(batch_code,
                                                  self.args.use_gpu)

            # This basically allows to only optimize over theta, keeping the font/class code fixed.
            self.model.set_code(batch_code)
            output_images = self.model(batch_inputs)

            output_images = numpy.squeeze(
                numpy.transpose(output_images.cpu().detach().numpy(),
                                (0, 2, 3, 1)))
            self.perturbation_images = common.numpy.concatenate(
                self.perturbation_images, output_images)

            if b % 100 == 0:
                log('[Testing] computing perturbation images %d' % b)

        utils.makedir(os.path.dirname(self.args.perturbation_images_file))
        if len(self.perturbation_images.shape) > 3:
            self.perturbation_images = self.perturbation_images.reshape(
                self.N_samples, self.N_attempts,
                self.perturbation_images.shape[1],
                self.perturbation_images.shape[2],
                self.perturbation_images.shape[3])
        else:
            self.perturbation_images = self.perturbation_images.reshape(
                self.N_samples, self.N_attempts,
                self.perturbation_images.shape[1],
                self.perturbation_images.shape[2])
        self.perturbation_images = numpy.swapaxes(self.perturbation_images, 0,
                                                  1)
        utils.write_hdf5(self.args.perturbation_images_file,
                         self.perturbation_images)
        log('[Testing] wrote %s' % self.args.perturbation_images_file)
    def test(self):
        """
        Test classifier to identify valid samples to attack.
        """

        num_batches = int(
            math.ceil(self.perturbations.shape[0] / self.args.batch_size))

        for b in range(num_batches):
            b_start = b * self.args.batch_size
            b_end = min((b + 1) * self.args.batch_size,
                        self.perturbations.shape[0])
            batch_images = common.torch.as_variable(
                self.test_images[b_start:b_end], self.args.use_gpu)
            batch_inputs = common.torch.as_variable(
                self.perturbations[b_start:b_end], self.args.use_gpu)

            self.model.set_image(batch_images)
            output_images = self.model(batch_inputs)

            output_images = numpy.squeeze(
                numpy.transpose(output_images.cpu().detach().numpy(),
                                (0, 2, 3, 1)))
            self.perturbation_images = common.numpy.concatenate(
                self.perturbation_images, output_images)

            if b % 100 == 0:
                log('[Testing] computing perturbation images %d' % b)

        utils.makedir(os.path.dirname(self.args.perturbation_images_file))
        if len(self.perturbation_images.shape) > 3:
            self.perturbation_images = self.perturbation_images.reshape(
                self.N_samples, self.N_attempts,
                self.perturbation_images.shape[1],
                self.perturbation_images.shape[2],
                self.perturbation_images.shape[3])
        else:
            self.perturbation_images = self.perturbation_images.reshape(
                self.N_samples, self.N_attempts,
                self.perturbation_images.shape[1],
                self.perturbation_images.shape[2])
        self.perturbation_images = numpy.swapaxes(self.perturbation_images, 0,
                                                  1)
        utils.write_hdf5(self.args.perturbation_images_file,
                         self.perturbation_images)
        log('[Testing] wrote %s' % self.args.perturbation_images_file)
Beispiel #7
0
    def compute_statistics(self):
        """
        Compute statistics based on distances.
        """

        num_attempts = self.perturbations.shape[0]

        perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        perturbations = perturbations.reshape(
            (perturbations.shape[0] * perturbations.shape[1],
             perturbations.shape[2]))
        success = numpy.swapaxes(self.success, 0, 1)
        success = success.reshape((success.shape[0] * success.shape[1]))

        probabilities = numpy.swapaxes(self.probabilities, 0, 1)
        probabilities = probabilities.reshape(
            (probabilities.shape[0] * probabilities.shape[1], -1))
        confidences = numpy.max(probabilities, 1)

        perturbation_probabilities = self.test_probabilities[:self.success.
                                                             shape[1]]
        perturbation_probabilities = numpy.repeat(perturbation_probabilities,
                                                  num_attempts,
                                                  axis=0)
        perturbation_confidences = numpy.max(perturbation_probabilities, 1)

        probability_ratios = confidences / perturbation_confidences

        raw_overall_success = success >= 0
        log('[Testing] %d valid attacks' % numpy.sum(raw_overall_success))

        # For off-manifold attacks this should not happen, but save is save.
        if not numpy.any(raw_overall_success):
            for type in [
                    'raw_success', 'raw_iteration', 'raw_roc',
                    'raw_confidence_weighted_success', 'raw_confidence',
                    'raw_ratios'
            ]:
                self.results[type] = 0
            if self.args.results_file:
                utils.write_pickle(self.args.results_file, self.results)
                log('[Testing] wrote %s' % self.args.results_file)
            log('[Testing] no successful attacks found, no plots')
            return

        #
        # We compute some simple statistics:
        # - raw success rate: fraction of successful attack without considering epsilon
        # - corrected success rate: fraction of successful attacks within epsilon-ball
        # - raw average perturbation: average distance to original samples (for successful attacks)
        # - corrected average perturbation: average distance to original samples for perturbations
        #   within epsilon-ball (for successful attacks).
        # These statistics can also be computed per class.
        # And these statistics are computed with respect to three norms.

        if self.args.plot_directory and utils.display():
            iterations = success[raw_overall_success]
            x = numpy.arange(numpy.max(iterations) + 1)
            y = numpy.bincount(iterations)
            plot_file = os.path.join(self.args.plot_directory, 'iterations')
            plot.bar(plot_file,
                     x,
                     y,
                     title='Distribution of Iterations of Successful Attacks',
                     xlabel='Number of Iterations',
                     ylabel='Count')
            log('[Testing] wrote %s' % plot_file)

            plot_file = os.path.join(self.args.plot_directory, 'probabilities')
            plot.histogram(plot_file, confidences[raw_overall_success], 50)
            log('[Testing] wrote %s' % plot_file)

            plot_file = os.path.join(self.args.plot_directory,
                                     'probability_ratios')
            plot.histogram(plot_file, probability_ratios, 50)
            log('[Testing] wrote %s' % plot_file)

            plot_file = os.path.join(self.args.plot_directory,
                                     'test_probabilities')
            plot.histogram(
                plot_file, self.test_probabilities[
                    numpy.arange(self.test_probabilities.shape[0]),
                    self.test_codes], 50)
            log('[Testing] wrote %s' % plot_file)

        y_true = numpy.concatenate(
            (numpy.zeros(confidences.shape[0]),
             numpy.ones(perturbation_confidences.shape[0])))
        y_score = numpy.concatenate((confidences, perturbation_confidences))
        roc_auc_score = sklearn.metrics.roc_auc_score(y_true, y_score)

        self.results['raw_roc'] = roc_auc_score
        self.results['raw_confidence_weighted_success'] = numpy.sum(
            confidences[raw_overall_success]) / numpy.sum(
                perturbation_confidences)
        self.results['raw_confidence'] = numpy.mean(
            probabilities[raw_overall_success])
        self.results['raw_ratios'] = numpy.mean(
            probability_ratios[raw_overall_success])
        self.results['raw_success'] = numpy.sum(
            raw_overall_success) / success.shape[0]
        self.results['raw_iteration'] = numpy.average(
            success[raw_overall_success])

        if self.args.results_file:
            utils.write_pickle(self.args.results_file, self.results)
            log('[Testing] wrote %s' % self.args.results_file)
    def test(self):
        """
        Test classifier to identify valid samples to attack.
        """

        self.model.eval()
        assert self.model.training is False
        assert self.perturbation_codes.shape[0] == self.perturbations.shape[0]
        assert self.test_codes.shape[0] == self.test_images.shape[0]
        assert len(self.perturbations.shape) == 4
        assert len(self.test_images.shape) == 4

        perturbations_accuracy = None
        num_batches = int(math.ceil(self.perturbations.shape[0] / self.args.batch_size))

        for b in range(num_batches):
            b_start = b * self.args.batch_size
            b_end = min((b + 1) * self.args.batch_size, self.perturbations.shape[0])
            batch_perturbations = common.torch.as_variable(self.perturbations[b_start: b_end], self.args.use_gpu)
            batch_classes = common.torch.as_variable(self.perturbation_codes[b_start: b_end], self.args.use_gpu)
            batch_perturbations = batch_perturbations.permute(0, 3, 1, 2)

            output_classes = self.model(batch_perturbations)
            values, indices = torch.max(torch.nn.functional.softmax(output_classes, dim=1), dim=1)
            errors = torch.abs(indices - batch_classes)
            perturbations_accuracy = common.numpy.concatenate(perturbations_accuracy, errors.data.cpu().numpy())

            for n in range(batch_perturbations.size(0)):
                log('[Testing] %d: original success=%d, transfer accuracy=%d' % (n, self.original_success[b_start + n], errors[n].item()))

        self.transfer_success[perturbations_accuracy == 0] = -1
        self.transfer_success = self.transfer_success.reshape((self.N_samples, self.N_attempts))
        self.transfer_success = numpy.swapaxes(self.transfer_success, 0, 1)

        utils.makedir(os.path.dirname(self.args.transfer_success_file))
        utils.write_hdf5(self.args.transfer_success_file, self.transfer_success)
        log('[Testing] wrote %s' % self.args.transfer_success_file)

        num_batches = int(math.ceil(self.test_images.shape[0] / self.args.batch_size))
        for b in range(num_batches):
            b_start = b * self.args.batch_size
            b_end = min((b + 1) * self.args.batch_size, self.test_images.shape[0])
            batch_images = common.torch.as_variable(self.test_images[b_start: b_end], self.args.use_gpu)
            batch_classes = common.torch.as_variable(self.test_codes[b_start: b_end], self.args.use_gpu)
            batch_images = batch_images.permute(0, 3, 1, 2)

            output_classes = self.model(batch_images)
            values, indices = torch.max(torch.nn.functional.softmax(output_classes, dim=1), dim=1)
            errors = torch.abs(indices - batch_classes)

            self.transfer_accuracy = common.numpy.concatenate(self.transfer_accuracy, errors.data.cpu().numpy())

            if b % 100 == 0:
                log('[Testing] computing accuracy %d' % b)

        self.transfer_accuracy = self.transfer_accuracy == 0
        log('[Testing] original accuracy=%g' % (numpy.sum(self.original_accuracy)/float(self.original_accuracy.shape[0])))
        log('[Testing] transfer accuracy=%g' % (numpy.sum(self.transfer_accuracy)/float(self.transfer_accuracy.shape[0])))
        log('[Testing] accuracy difference=%g' % (numpy.sum(self.transfer_accuracy != self.original_accuracy)/float(self.transfer_accuracy.shape[0])))
        log('[Testing] accuracy difference on %d samples=%g' % (self.N_samples, numpy.sum(self.transfer_accuracy[:self.N_samples] != self.original_accuracy[:self.N_samples])/float(self.N_samples)))
        self.transfer_accuracy = numpy.logical_and(self.transfer_accuracy, self.original_accuracy)

        utils.makedir(os.path.dirname(self.args.transfer_accuracy_file))
        utils.write_hdf5(self.args.transfer_accuracy_file, self.transfer_accuracy)
        log('[Testing] wrote %s' % self.args.transfer_accuracy_file)
    def load_data(self):
        """
        Load data and model.
        """

        with logw('[Detection] read %s' % self.args.train_images_file):
            self.nearest_neighbor_images = utils.read_hdf5(
                self.args.train_images_file)
            assert len(self.nearest_neighbor_images.shape) == 3

        with logw('[Detection] read %s' % self.args.test_images_file):
            self.test_images = utils.read_hdf5(self.args.test_images_file)
            if len(self.test_images.shape) < 4:
                self.test_images = numpy.expand_dims(self.test_images, axis=3)

        with logw('[Detection] read %s' % self.args.train_codes_file):
            self.train_codes = utils.read_hdf5(self.args.train_codes_file)

        with logw('[Detection] read %s' % self.args.test_codes_file):
            self.test_codes = utils.read_hdf5(self.args.test_codes_file)

        with logw('[Detection] read %s' % self.args.test_theta_file):
            self.test_theta = utils.read_hdf5(self.args.test_theta_file)

        with logw('[Detection] read %s' % self.args.perturbations_file):
            self.perturbations = utils.read_hdf5(self.args.perturbations_file)
            assert len(self.perturbations.shape) == 3

        with logw('[Detection] read %s' % self.args.success_file):
            self.success = utils.read_hdf5(self.args.success_file)

        with logw('[Detection] read %s' % self.args.accuracy_file):
            self.accuracy = utils.read_hdf5(self.args.accuracy_file)

        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        num_attempts = self.perturbations.shape[1]
        self.test_images = self.test_images[:self.perturbations.shape[0]]
        self.train_images = self.nearest_neighbor_images[:self.perturbations.
                                                         shape[0]]
        self.test_codes = self.test_codes[:self.perturbations.shape[0]]
        self.accuracy = self.accuracy[:self.perturbations.shape[0]]
        self.test_theta = self.test_theta[:self.perturbations.shape[0]]

        self.perturbations = self.perturbations.reshape(
            (self.perturbations.shape[0] * self.perturbations.shape[1],
             self.perturbations.shape[2]))
        self.success = numpy.swapaxes(self.success, 0, 1)
        self.success = self.success.reshape(
            (self.success.shape[0] * self.success.shape[1]))

        self.accuracy = numpy.repeat(self.accuracy, num_attempts, axis=0)
        self.test_images = numpy.repeat(self.test_images, num_attempts, axis=0)
        self.train_images = numpy.repeat(self.train_images,
                                         num_attempts,
                                         axis=0)
        self.test_codes = numpy.repeat(self.test_codes, num_attempts, axis=0)
        self.test_theta = numpy.repeat(self.test_theta, num_attempts, axis=0)

        max_samples = self.args.max_samples
        self.success = self.success[:max_samples]
        self.accuracy = self.accuracy[:max_samples]
        self.perturbations = self.perturbations[:max_samples]
        self.test_images = self.test_images[:max_samples]
        self.train_images = self.train_images[:max_samples]
        self.test_codes = self.test_codes[:max_samples]
        self.test_theta = self.test_theta[:max_samples]

        with logw('[Testing] read %s' % self.args.database_file):
            database = utils.read_hdf5(self.args.database_file)

            self.N_font = database.shape[0]
            self.N_class = database.shape[1]
            self.N_theta = self.test_theta.shape[1]

            database = database.reshape((database.shape[0] * database.shape[1],
                                         database.shape[2], database.shape[3]))
            database = torch.from_numpy(database)
            if self.args.use_gpu:
                database = database.cuda()
            database = torch.autograd.Variable(database, False)

            self.model = models.AlternativeOneHotDecoder(
                database, self.N_font, self.N_class, self.N_theta)
            self.model.eval()

        self.compute_images()
    def load_data_and_model(self):
        """
        Load data and model.
        """

        database = utils.read_hdf5(self.args.database_file).astype(
            numpy.float32)
        log('[Visualization] read %s' % self.args.database_file)

        N_font = database.shape[0]
        N_class = database.shape[1]
        resolution = database.shape[2]

        database = database.reshape((database.shape[0] * database.shape[1],
                                     database.shape[2], database.shape[3]))
        database = torch.from_numpy(database)
        if self.args.use_gpu:
            database = database.cuda()
        database = torch.autograd.Variable(database, False)

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)

        self.perturbations = utils.read_hdf5(
            self.args.perturbations_file).astype(numpy.float32)
        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        log('[Visualization] read %s' % self.args.perturbations_file)

        self.success = utils.read_hdf5(self.args.success_file)
        self.success = numpy.swapaxes(self.success, 0, 1)
        log('[Visualization] read %s' % self.args.success_file)

        self.accuracy = utils.read_hdf5(self.args.accuracy_file)
        log('[Visualization] read %s' % self.args.success_file)

        self.test_theta = utils.read_hdf5(self.args.test_theta_file).astype(
            numpy.float32)
        self.test_theta = self.test_theta[:self.perturbations.shape[0]]
        N_theta = self.test_theta.shape[1]
        log('[Visualization] using %d N_theta' % N_theta)
        log('[Visualization] read %s' % self.args.test_theta_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.int)
        self.test_codes = self.test_codes[:self.perturbations.shape[0]]
        self.test_codes = self.test_codes[:, 1:3]
        self.test_codes = numpy.concatenate(
            (common.numpy.one_hot(self.test_codes[:, 0], N_font),
             common.numpy.one_hot(self.test_codes[:, 1], N_class)),
            axis=1).astype(numpy.float32)
        log('[Attack] read %s' % self.args.test_codes_file)

        image_channels = 1 if N_theta <= 7 else 3
        network_units = list(map(int, self.args.network_units.split(',')))
        log('[Visualization] using %d input channels' % image_channels)
        self.classifier = models.Classifier(
            N_class,
            resolution=(image_channels, resolution, resolution),
            architecture=self.args.network_architecture,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            start_channels=self.args.network_channels,
            dropout=self.args.network_dropout,
            units=network_units)
        self.decoder = models.AlternativeOneHotDecoder(database, N_font,
                                                       N_class, N_theta)
        self.decoder.eval()

        assert os.path.exists(
            self.args.classifier_file
        ), 'state file %s not found' % self.args.classifier_file
        state = State.load(self.args.classifier_file)
        log('[Visualization] read %s' % self.args.classifier_file)

        self.classifier.load_state_dict(state.model)
        if self.args.use_gpu and not cuda.is_cuda(self.classifier):
            log('[Visualization] classifier is not CUDA')
            self.classifier = self.classifier.cuda()
        log('[Visualization] loaded classifier')

        self.classifier.eval()
        log('[Visualization] set classifier to eval')
    def load_data_and_model(self):
        """
        Load data and model.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
        resolution = self.test_images.shape[2]
        log('[Visualization] read %s' % self.args.test_images_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.int)
        self.test_codes = self.test_codes[:, self.args.label_index]
        N_class = numpy.max(self.test_codes) + 1
        log('[Visualization] read %s' % self.args.test_codes_file)

        self.perturbations = utils.read_hdf5(
            self.args.perturbations_file).astype(numpy.float32)
        if len(self.perturbations.shape) < 5:
            self.perturbations = numpy.expand_dims(self.perturbations, axis=4)

        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        self.test_images = self.test_images[:self.perturbations.shape[0]]
        log('[Visualization] read %s' % self.args.perturbations_file)

        self.success = utils.read_hdf5(self.args.success_file)
        self.success = numpy.swapaxes(self.success, 0, 1)
        self.success = self.success >= 0
        log('[Visualization] read %s' % self.args.success_file)

        if self.args.selection_file:
            selection = utils.read_hdf5(self.args.selection_file)
            log('[Visualization] read %s' % self.args.selection_file)

            selection = numpy.swapaxes(selection, 0, 1)
            selection = selection[:self.success.shape[0]]
            selection = selection >= 0

            assert len(selection.shape) == len(self.success.shape)
            self.success = numpy.logical_and(self.success, selection)
            log('[Visualization] updated selection')

        self.accuracy = utils.read_hdf5(self.args.accuracy_file)
        log('[Visualization] read %s' % self.args.success_file)

        log('[Visualization] using %d input channels' %
            self.test_images.shape[3])
        network_units = list(map(int, self.args.network_units.split(',')))
        self.model = models.Classifier(
            N_class,
            resolution=(self.test_images.shape[3], self.test_images.shape[1],
                        self.test_images.shape[2]),
            architecture=self.args.network_architecture,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            start_channels=self.args.network_channels,
            dropout=self.args.network_dropout,
            units=network_units)

        assert os.path.exists(
            self.args.classifier_file
        ), 'state file %s not found' % self.args.classifier_file
        state = State.load(self.args.classifier_file)
        log('[Visualization] read %s' % self.args.classifier_file)

        self.model.load_state_dict(state.model)
        if self.args.use_gpu and not cuda.is_cuda(self.model):
            log('[Visualization] classifier is not CUDA')
            self.model = self.model.cuda()
        log('[Visualization] loaded classifier')

        self.model.eval()
        log('[Visualization] set model to eval')
Beispiel #12
0
    def load_data_and_model(self):
        """
        Load data and model.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
        resolution = (self.test_images.shape[3], self.test_images.shape[1],
                      self.test_images.shape[2])
        log('[Visualization] read %s' % self.args.test_images_file)

        self.perturbations = utils.read_hdf5(
            self.args.perturbations_file).astype(numpy.float32)
        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        log('[Visualization] read %s' % self.args.perturbations_file)

        self.success = utils.read_hdf5(self.args.success_file)
        self.success = numpy.swapaxes(self.success, 0, 1)
        log('[Visualization] read %s' % self.args.success_file)

        self.accuracy = utils.read_hdf5(self.args.accuracy_file)
        log('[Visualization] read %s' % self.args.success_file)

        self.test_theta = utils.read_hdf5(self.args.test_theta_file).astype(
            numpy.float32)
        self.test_theta = self.test_theta[:self.perturbations.shape[0]]
        log('[Visualization] read %s' % self.args.test_theta_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.int)
        self.test_codes = self.test_codes[:, self.args.label_index]
        self.N_class = numpy.max(self.test_codes) + 1
        self.test_codes = self.test_codes[:self.perturbations.shape[0]]
        log('[Visualization] read %s' % self.args.test_codes_file)

        network_units = list(map(int, self.args.network_units.split(',')))
        self.classifier = models.Classifier(
            self.N_class,
            resolution=resolution,
            architecture=self.args.network_architecture,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            start_channels=self.args.network_channels,
            dropout=self.args.network_dropout,
            units=network_units)

        assert os.path.exists(
            self.args.classifier_file
        ), 'state file %s not found' % self.args.classifier_file
        state = State.load(self.args.classifier_file)
        log('[Visualization] read %s' % self.args.classifier_file)

        self.classifier.load_state_dict(state.model)
        if self.args.use_gpu and not cuda.is_cuda(self.classifier):
            log('[Visualization] classifier is not CUDA')
            self.classifier = self.classifier.cuda()
        log('[Visualization] loaded classifier')

        self.classifier.eval()
        log('[Visualization] set classifier to eval')

        assert self.args.decoder_files
        decoder_files = self.args.decoder_files.split(',')
        for decoder_file in decoder_files:
            assert os.path.exists(
                decoder_file), 'could not find %s' % decoder_file

        log('[Visualization] using %d input channels' %
            self.test_images.shape[3])
        decoder_units = list(map(int, self.args.decoder_units.split(',')))

        if len(decoder_files) > 1:
            log('[Visualization] loading multiple decoders')
            decoders = []
            for i in range(len(decoder_files)):
                decoder = models.LearnedDecoder(
                    self.args.latent_space_size,
                    resolution=resolution,
                    architecture=self.args.decoder_architecture,
                    start_channels=self.args.decoder_channels,
                    activation=self.args.decoder_activation,
                    batch_normalization=not self.args.
                    decoder_no_batch_normalization,
                    units=decoder_units)

                state = State.load(decoder_files[i])
                decoder.load_state_dict(state.model)
                if self.args.use_gpu and not cuda.is_cuda(decoder):
                    decoder = decoder.cuda()
                decoders.append(decoder)

                decoder.eval()
                log('[Visualization] loaded %s' % decoder_files[i])
            self.decoder = models.SelectiveDecoder(decoders,
                                                   resolution=resolution)
        else:
            log('[Visualization] loading one decoder')
            decoder = models.LearnedDecoder(
                self.args.latent_space_size,
                resolution=resolution,
                architecture=self.args.decoder_architecture,
                start_channels=self.args.decoder_channels,
                activation=self.args.decoder_activation,
                batch_normalization=not self.args.
                decoder_no_batch_normalization,
                units=decoder_units)

            state = State.load(decoder_files[0])
            decoder.load_state_dict(state.model)
            if self.args.use_gpu and not cuda.is_cuda(decoder):
                decoder = decoder.cuda()
            decoder.eval()
            log('[Visualization] read decoder')

            self.decoder = decoder
Beispiel #13
0
    def load_data(self):
        """
        Load data and model.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        log('[Testing] read %s' % self.args.test_images_file)

        if len(self.test_images.shape) <= 3:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
            log('[Testing] no color images, adjusted size')

        self.train_images = utils.read_hdf5(
            self.args.train_images_file).astype(numpy.float32)
        # !
        self.train_images = self.train_images.reshape(
            (self.train_images.shape[0], -1))
        log('[Testing] read %s' % self.args.train_images_file)

        self.test_theta = utils.read_hdf5(self.args.test_theta_file).astype(
            numpy.float32)
        log('[Testing] read %s' % self.args.test_theta_file)

        self.train_theta = utils.read_hdf5(self.args.train_theta_file).astype(
            numpy.float32)
        log('[Testing] read %s' % self.args.train_theta_file)

        self.test_codes = utils.read_hdf5(
            self.args.test_codes_file).astype(int)
        log('[Testing] read %s' % self.args.test_codes_file)

        self.accuracy = utils.read_hdf5(self.args.accuracy_file)
        log('[Testing] read %s' % self.args.accuracy_file)

        self.perturbations = utils.read_hdf5(
            self.args.perturbations_file).astype(numpy.float32)
        self.N_attempts = self.perturbations.shape[0]

        # First, repeat relevant data.
        self.perturbation_theta = numpy.repeat(
            self.test_theta[:self.perturbations.shape[1]],
            self.N_attempts,
            axis=0)
        self.perturbation_codes = numpy.repeat(
            self.test_codes[:self.perturbations.shape[1]],
            self.N_attempts,
            axis=0)
        self.accuracy = numpy.repeat(
            self.accuracy[:self.perturbations.shape[1]],
            self.N_attempts,
            axis=0)

        # Then, reshape the perturbations!
        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        self.perturbations = self.perturbations.reshape(
            (self.perturbations.shape[0] * self.perturbations.shape[1], -1))
        log('[Testing] read %s' % self.args.perturbations_file)

        self.success = utils.read_hdf5(self.args.success_file)
        self.success = numpy.swapaxes(self.success, 0, 1)
        self.success = self.success.reshape(
            (self.success.shape[0] * self.success.shape[1]))
        log('[Testing] read %s' % self.args.success_file)

        database = utils.read_hdf5(self.args.database_file)
        log('[Testing] read %s' % self.args.database_file)

        self.N_font = database.shape[0]
        self.N_class = database.shape[1]
        N_theta = self.test_theta.shape[1]
        log('[Testing] using %d N_theta' % N_theta)

        database = database.reshape((database.shape[0] * database.shape[1],
                                     database.shape[2], database.shape[3]))
        database = torch.from_numpy(database)
        if self.args.use_gpu:
            database = database.cuda()
        database = torch.autograd.Variable(database, False)

        self.model = models.AlternativeOneHotDecoder(database, self.N_font,
                                                     self.N_class, N_theta)
        self.model.eval()
    def load_data(self):
        """
        Load data and model.
        """

        with logw('[Detection] read %s' % self.args.train_images_file):
            self.nearest_neighbor_images = utils.read_hdf5(self.args.train_images_file)
            assert len(self.nearest_neighbor_images.shape) == 3

        with logw('[Detection] read %s' % self.args.test_images_file):
            self.test_images = utils.read_hdf5(self.args.test_images_file)
            if len(self.test_images.shape) < 4:
                self.test_images = numpy.expand_dims(self.test_images, axis=3)

        with logw('[Detection] read %s' % self.args.perturbations_file):
            self.perturbations = utils.read_hdf5(self.args.perturbations_file)
            assert len(self.perturbations.shape) == 4

        with logw('[Detection] read %s' % self.args.success_file):
            self.success = utils.read_hdf5(self.args.success_file)

        with logw('[Detection] read %s' % self.args.accuracy_file):
            self.accuracy = utils.read_hdf5(self.args.accuracy_file)

        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        num_attempts = self.perturbations.shape[1]
        self.test_images = self.test_images[:self.perturbations.shape[0]]
        self.train_images = self.nearest_neighbor_images[:self.perturbations.shape[0]]
        self.accuracy = self.accuracy[:self.perturbations.shape[0]]

        self.perturbations = self.perturbations.reshape((self.perturbations.shape[0]*self.perturbations.shape[1], self.perturbations.shape[2], self.perturbations.shape[3]))
        self.success = numpy.swapaxes(self.success, 0, 1)
        self.success = self.success.reshape((self.success.shape[0]*self.success.shape[1]))

        self.accuracy = numpy.repeat(self.accuracy, num_attempts, axis=0)
        self.test_images = numpy.repeat(self.test_images, num_attempts, axis=0)
        self.train_images = numpy.repeat(self.train_images, num_attempts, axis=0)

        max_samples = self.args.max_samples
        self.success = self.success[:max_samples]
        self.accuracy = self.accuracy[:max_samples]
        self.perturbations = self.perturbations[:max_samples]
        self.test_images = self.test_images[:max_samples]
        self.train_images = self.train_images[:max_samples]

        if self.args.mode == 'true':
            assert self.args.database_file
            assert self.args.test_codes_file
            assert self.args.test_theta_file

            self.test_codes = utils.read_hdf5(self.args.test_codes_file)
            log('[Detection] read %s' % self.args.test_codes_file)

            self.test_theta = utils.read_hdf5(self.args.test_theta_file)
            log('[Detection] read %s' % self.args.test_theta_file)

            self.test_codes = self.test_codes[:self.perturbations.shape[0]]
            self.test_theta = self.test_theta[:self.perturbations.shape[0]]

            self.test_codes = numpy.repeat(self.test_codes, num_attempts, axis=0)
            self.test_theta = numpy.repeat(self.test_theta, num_attempts, axis=0)

            self.test_codes = self.test_codes[:max_samples]
            self.test_theta = self.test_theta[:max_samples]

            database = utils.read_hdf5(self.args.database_file)
            log('[Detection] read %s' % self.args.database_file)

            self.N_font = database.shape[0]
            self.N_class = database.shape[1]
            self.N_theta = self.test_theta.shape[1]

            database = database.reshape((database.shape[0]*database.shape[1], database.shape[2], database.shape[3]))
            database = torch.from_numpy(database)
            if self.args.use_gpu:
                database = database.cuda()
            database = torch.autograd.Variable(database, False)

            self.model = models.AlternativeOneHotDecoder(database, self.N_font, self.N_class, self.N_theta)
            self.model.eval()
            log('[Detection] initialized decoder')
        elif self.args.mode == 'appr':
            assert self.args.decoder_files
            assert self.args.test_codes_file
            assert self.args.test_theta_file

            self.test_codes = utils.read_hdf5(self.args.test_codes_file)
            log('[Detection] read %s' % self.args.test_codes_file)

            self.test_theta = utils.read_hdf5(self.args.test_theta_file)
            log('[Detection] read %s' % self.args.test_theta_file)

            self.test_codes = self.test_codes[:self.perturbations.shape[0]]
            self.test_theta = self.test_theta[:self.perturbations.shape[0]]

            self.test_codes = numpy.repeat(self.test_codes, num_attempts, axis=0)
            self.test_theta = numpy.repeat(self.test_theta, num_attempts, axis=0)

            self.test_codes = self.test_codes[:max_samples]
            self.test_theta = self.test_theta[:max_samples]

            assert self.args.decoder_files
            decoder_files = self.args.decoder_files.split(',')
            for decoder_file in decoder_files:
                assert os.path.exists(decoder_file), 'could not find %s' % decoder_file

            resolution = [1 if len(self.test_images.shape) <= 3 else self.test_images.shape[3], self.test_images.shape[1], self.test_images.shape[2]]
            decoder_units = list(map(int, self.args.decoder_units.split(',')))

            if len(decoder_files) > 1:
                log('[Detection] loading multiple decoders')
                decoders = []
                for i in range(len(decoder_files)):
                    decoder = models.LearnedDecoder(self.args.latent_space_size,
                                                    resolution=resolution,
                                                    architecture=self.args.decoder_architecture,
                                                    start_channels=self.args.decoder_channels,
                                                    activation=self.args.decoder_activation,
                                                    batch_normalization=not self.args.decoder_no_batch_normalization,
                                                    units=decoder_units)

                    state = State.load(decoder_files[i])
                    decoder.load_state_dict(state.model)
                    if self.args.use_gpu and not cuda.is_cuda(decoder):
                        decoder = decoder.cuda()
                    decoders.append(decoder)

                    decoder.eval()
                    log('[Detection] loaded %s' % decoder_files[i])
                self.model = models.SelectiveDecoder(decoders, resolution=resolution)
            else:
                log('[Detection] loading one decoder')
                decoder = models.LearnedDecoder(self.args.latent_space_size,
                                                resolution=resolution,
                                                architecture=self.args.decoder_architecture,
                                                start_channels=self.args.decoder_channels,
                                                activation=self.args.decoder_activation,
                                                batch_normalization=not self.args.decoder_no_batch_normalization,
                                                units=decoder_units)

                state = State.load(decoder_files[0])
                decoder.load_state_dict(state.model)
                if self.args.use_gpu and not cuda.is_cuda(decoder):
                    decoder = decoder.cuda()
                decoder.eval()
                log('[Detection] read decoder')

                self.model = decoder
Beispiel #15
0
    def load_data(self):
        """
        Load data and model.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(numpy.float32)
        log('[Testing] read %s' % self.args.test_images_file)

        # For handling both color and gray images.
        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
            log('[Testing] no color images, adjusted size')
        self.resolution = self.test_images.shape[2]
        log('[Testing] resolution %d' % self.resolution)

        self.train_images = utils.read_hdf5(self.args.train_images_file).astype(numpy.float32)
        # !
        self.train_images = self.train_images.reshape((self.train_images.shape[0], -1))
        log('[Testing] read %s' % self.args.train_images_file)

        self.test_theta = utils.read_hdf5(self.args.test_theta_file).astype(numpy.float32)
        log('[Testing] read %s' % self.args.test_theta_file)

        self.train_theta = utils.read_hdf5(self.args.train_theta_file).astype(numpy.float32)
        log('[Testing] read %s' % self.args.train_theta_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(numpy.int)
        self.test_codes = self.test_codes[:, self.args.label_index]
        self.N_class = numpy.max(self.test_codes) + 1
        log('[Testing] read %s' % self.args.test_codes_file)

        self.accuracy = utils.read_hdf5(self.args.accuracy_file)
        log('[Testing] read %s' % self.args.accuracy_file)

        self.perturbations = utils.read_hdf5(self.args.perturbations_file).astype(numpy.float32)
        self.N_attempts = self.perturbations.shape[0]
        assert not numpy.any(self.perturbations != self.perturbations), 'NaN in perturbations'

        # First, repeat relevant data.
        self.perturbation_theta = numpy.repeat(self.test_theta[:self.perturbations.shape[1]], self.N_attempts, axis=0)
        self.perturbation_codes = numpy.repeat(self.test_codes[:self.perturbations.shape[1]], self.N_attempts, axis=0)
        self.perturbation_codes = numpy.squeeze(self.perturbation_codes)
        self.accuracy = numpy.repeat(self.accuracy[:self.perturbations.shape[1]], self.N_attempts, axis=0)

        # Then, reshape the perturbations!
        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        self.perturbations = self.perturbations.reshape((self.perturbations.shape[0] * self.perturbations.shape[1], -1))
        log('[Testing] read %s' % self.args.perturbations_file)

        self.success = utils.read_hdf5(self.args.success_file)
        self.success = numpy.swapaxes(self.success, 0, 1)
        self.success = self.success.reshape((self.success.shape[0] * self.success.shape[1]))
        log('[Testing] read %s' % self.args.success_file)

        assert self.args.decoder_files
        decoder_files = self.args.decoder_files.split(',')
        for decoder_file in decoder_files:
            assert os.path.exists(decoder_file), 'could not find %s' % decoder_file

        log('[Testing] using %d input channels' % self.test_images.shape[3])
        decoder_units = list(map(int, self.args.decoder_units.split(',')))

        if len(decoder_files) > 1:
            log('[Testing] loading multiple decoders')
            decoders = []
            for i in range(len(decoder_files)):
                decoder = models.LearnedDecoder(self.args.latent_space_size, resolution=(self.test_images.shape[3], self.test_images.shape[1], self.test_images.shape[2]),
                                                architecture=self.args.decoder_architecture,
                                                start_channels=self.args.decoder_channels,
                                                activation=self.args.decoder_activation,
                                                batch_normalization=not self.args.decoder_no_batch_normalization,
                                                units=decoder_units)

                state = State.load(decoder_files[i])
                decoder.load_state_dict(state.model)
                if self.args.use_gpu and not cuda.is_cuda(decoder):
                    decoder = decoder.cuda()
                decoders.append(decoder)

                decoder.eval()
                log('[Testing] loaded %s' % decoder_files[i])
            self.model = models.SelectiveDecoder(decoders, resolution=(self.test_images.shape[3], self.test_images.shape[1], self.test_images.shape[2]))
        else:
            log('[Testing] loading one decoder')
            decoder = models.LearnedDecoder(self.args.latent_space_size, resolution=(self.test_images.shape[3], self.test_images.shape[1], self.test_images.shape[2]),
                                            architecture=self.args.decoder_architecture,
                                            start_channels=self.args.decoder_channels,
                                            activation=self.args.decoder_activation,
                                            batch_normalization=not self.args.decoder_no_batch_normalization,
                                            units=decoder_units)

            state = State.load(decoder_files[0])
            decoder.load_state_dict(state.model)
            if self.args.use_gpu and not cuda.is_cuda(decoder):
                decoder = decoder.cuda()
            decoder.eval()
            log('[Testing] read decoder')

            self.model = decoder