コード例 #1
0
    def load_model(self):
        """
        Load model.
        """

        database = utils.read_hdf5(self.args.database_file).astype(
            numpy.float32)
        log('[Testing] read %sd' % self.args.database_file)

        self.N_font = database.shape[0]
        self.N_class = database.shape[1]

        database = database.reshape((database.shape[0] * database.shape[1],
                                     database.shape[2], database.shape[3]))
        database = torch.from_numpy(database)
        if self.args.use_gpu:
            database = database.cuda()
        database = torch.autograd.Variable(database, False)

        test_theta = utils.read_hdf5(self.args.test_theta_file)
        N_theta = test_theta.shape[1]
        log('[Testing] read %s' % self.args.test_theta_file)

        log('[Testing] using %d N_theta' % N_theta)
        self.model = models.AlternativeOneHotDecoder(database, self.N_font,
                                                     self.N_class, N_theta)
        self.model.eval()
コード例 #2
0
    def load_data(self):
        """
        Load data.
        """

        test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.int)
        self.test_fonts = test_codes[:, 1]
        self.test_classes = test_codes[:, 2]
        log('[Testing] read %s' % self.args.test_codes_file)

        self.perturbations = utils.read_hdf5(
            self.args.perturbations_file).astype(numpy.float32)
        self.N_attempts = self.perturbations.shape[0]
        self.N_samples = self.perturbations.shape[1]
        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        self.perturbations = self.perturbations.reshape(
            (self.perturbations.shape[0] * self.perturbations.shape[1], -1))
        log('[Testing] read %s' % self.args.perturbations_file)

        self.test_fonts = numpy.repeat(self.test_fonts[:self.N_samples],
                                       self.N_attempts,
                                       axis=0)
        self.test_classes = numpy.repeat(self.test_classes[:self.N_samples],
                                         self.N_attempts,
                                         axis=0)
    def main(self):
        """
        Main.
        """

        if not os.path.exists(self.args.images_file):
            log('[Visualization] file %s not found' % self.args.images_file)
            exit(1)

        if not os.path.exists(
                self.args.output_directory) and self.args.output_directory:
            log('[Visualization] creating %s' % self.args.output_directory)

        images = utils.read_hdf5(self.args.images_file)
        if len(images.shape) > 3 and images.shape[3] != 3:
            images = images.reshape(-1, images.shape[2], images.shape[3])
        log('[Visualization] read %s' % self.args.images_file)

        if self.args.codes_file:
            codes = utils.read_hdf5(self.args.codes_file)
            log('[Visualization] read %s' % self.args.codes_file)
            codes = codes[:, self.args.label_index]

            if self.args.label >= 0:
                images = images[codes == self.args.label]

        for n in range(min(images.shape[0], self.args.max_images)):
            png_file = os.path.join(self.args.output_directory, '%d.png' % n)
            vis.image(png_file, images[n], 5, 'gray', self.args.vmin,
                      self.args.vmax)
            log('[Visualization] wrote %s' % png_file)
コード例 #4
0
    def main(self):
        """
        Main.
        """

        train_images_file = paths.celeba_train_images_file()
        test_images_file = paths.celeba_test_images_file()

        assert os.path.exists(train_images_file)
        assert os.path.exists(test_images_file)

        train_images = utils.read_hdf5(train_images_file)
        log('read %s' % train_images_file)

        test_images = utils.read_hdf5(test_images_file)
        log('read %s' % test_images_file)

        log('[Data] before train: %g %g' %
            (numpy.min(train_images), numpy.max(train_images)))
        log('[Data] before test: %g %g' %
            (numpy.min(train_images), numpy.max(train_images)))

        train_images *= 255
        test_images *= 255

        log('[Data] after train: %g %g' %
            (numpy.min(train_images), numpy.max(train_images)))
        log('[Data] after test: %g %g' %
            (numpy.min(train_images), numpy.max(train_images)))

        utils.write_hdf5(train_images_file, train_images.astype(numpy.float32))
        log('[Data] wrote %s' % train_images_file)
        utils.write_hdf5(test_images_file, test_images.astype(numpy.float32))
        log('[Data] wrote %s' % test_images_file)
    def main(self):
        """
        Main method.
        """

        database = utils.read_hdf5(self.args.database_file)
        log('[Data] read %s' % self.args.database_file)

        N_font = database.shape[0]
        N_class = database.shape[1]

        assert database.shape[2] == database.shape[3]
        database = database.reshape((database.shape[0] * database.shape[1],
                                     database.shape[2], database.shape[3]))
        database = torch.from_numpy(database).float()
        if self.args.use_gpu:
            database = database.cuda()

        database = torch.autograd.Variable(database)

        codes = utils.read_hdf5(self.args.codes_file)
        codes = codes[:, 0]
        codes = common.numpy.one_hot(codes, N_font * N_class)
        log('[Data] read %s' % self.args.codes_file)

        theta = utils.read_hdf5(self.args.theta_file)
        N = theta.shape[0]
        N_theta = theta.shape[1]
        log('[Data] read %s' % self.args.theta_file)

        model = models.OneHotDecoder(database, N_theta)
        images = []

        num_batches = int(math.ceil(float(N) / self.args.batch_size))
        for b in range(num_batches):
            batch_theta = torch.from_numpy(
                theta[b * self.args.batch_size:min((b + 1) *
                                                   self.args.batch_size, N)])
            batch_codes = torch.from_numpy(
                codes[b * self.args.batch_size:min((b + 1) *
                                                   self.args.batch_size, N)])
            batch_codes, batch_theta = batch_codes.float(), batch_theta.float()

            if self.args.use_gpu:
                batch_codes, batch_theta = batch_codes.cuda(
                ), batch_theta.cuda()

            batch_codes, batch_theta = torch.autograd.Variable(
                batch_codes), torch.autograd.Variable(batch_theta)
            output = model(batch_codes, batch_theta)

            images.append(output.data.cpu().numpy().squeeze())
            if b % 1000 == 0:
                log('[Data] processed %d/%d batches' % (b + 1, num_batches))

        images = numpy.concatenate(images, axis=0)
        if len(images.shape) > 3:
            images = numpy.transpose(images, (0, 2, 3, 1))
        utils.write_hdf5(self.args.images_file, images)
        log('[Data] wrote %s' % self.args.images_file)
    def load_data(self):
        """
        Load data.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file)
        log('[Testing] read %s' % self.args.test_images_file)

        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
        self.resolution = self.test_images.shape[1]
        self.image_channels = self.test_images.shape[3]

        self.test_codes = utils.read_hdf5(self.args.test_codes_file)
        self.test_codes = self.test_codes[:, self.args.label_index]
        log('[Testing] read %s' % self.args.test_codes_file)

        self.perturbations = utils.read_hdf5(
            self.args.perturbations_file).astype(numpy.float32)
        self.N_attempts = self.perturbations.shape[0]
        self.N_samples = self.perturbations.shape[1]
        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        self.perturbations = self.perturbations.reshape(
            (self.perturbations.shape[0] * self.perturbations.shape[1], -1))
        log('[Testing] read %s' % self.args.perturbations_file)

        self.test_codes = numpy.repeat(self.test_codes[:self.N_samples],
                                       self.N_attempts,
                                       axis=0)
        self.test_images = numpy.repeat(self.test_images[:self.N_samples],
                                        self.N_attempts,
                                        axis=0)
    def load_data(self):
        """
        Load data.
        """

        self.train_images = utils.read_hdf5(self.args.train_images_file).astype(numpy.float32)
        log('[Training] read %s' % self.args.train_images_file)

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(numpy.float32)
        log('[Training] read %s' % self.args.test_images_file)

        # For handling both color and gray images.
        if len(self.train_images.shape) < 4:
            self.train_images = numpy.expand_dims(self.train_images, axis=3)
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
            log('[Training] no color images, adjusted size')
        self.resolution = self.train_images.shape[2]
        log('[Training] resolution %d' % self.resolution)

        self.train_codes = utils.read_hdf5(self.args.train_codes_file).astype(numpy.float32)
        log('[Training] read %s' % self.args.train_codes_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(numpy.float32)
        log('[Training] read %s' % self.args.test_codes_file)

        self.train_codes = self.train_codes[:, self.args.label_index]
        self.test_codes = self.test_codes[:, self.args.label_index]

        if self.args.label >= 0:
            self.train_images = self.train_images[self.train_codes == self.args.label]
            self.test_images = self.test_images[self.test_codes == self.args.label]
コード例 #8
0
    def load_data(self):
        """
        Load data.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        log('[Attack] read %s' % self.args.test_images_file)

        # For color and gray images.
        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.int)
        self.test_codes = self.test_codes[:, self.args.label_index]
        log('[Attack] read %s' % self.args.test_codes_file)

        self.test_theta = utils.read_hdf5(self.args.test_theta_file).astype(
            numpy.float32)
        log('[Attack] read %s' % self.args.test_theta_file)

        self.N_class = numpy.max(self.test_codes) + 1
        self.min_bound = numpy.min(self.test_theta, 0)
        self.max_bound = numpy.max(self.test_theta, 0)

        if self.args.max_samples < 0:
            self.args.max_samples = self.test_theta.shape[0]
        else:
            self.args.max_samples = min(self.args.max_samples,
                                        self.test_theta.shape[0])
    def load_data(self):
        """
        Load data.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(numpy.float32)
        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
        log('[Testing] read %s' % self.args.test_images_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(numpy.int)
        self.test_codes = self.test_codes[:, self.args.label_index]
        log('[Testing] read %s' % self.args.test_codes_file)

        self.perturbations = utils.read_hdf5(self.args.perturbations_file).astype(numpy.float32)
        self.N_attempts = self.perturbations.shape[0]
        self.N_samples = self.perturbations.shape[1]
        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        if len(self.perturbations.shape) <= 4:
            self.perturbations = self.perturbations.reshape((self.perturbations.shape[0] * self.perturbations.shape[1], self.perturbations.shape[2], self.perturbations.shape[3], 1))
        else:
            self.perturbations = self.perturbations.reshape((self.perturbations.shape[0] * self.perturbations.shape[1], self.perturbations.shape[2], self.perturbations.shape[3], self.perturbations.shape[4]))

        log('[Testing] read %s' % self.args.perturbations_file)

        self.original_success = utils.read_hdf5(self.args.original_success_file)
        self.original_success = numpy.swapaxes(self.original_success, 0, 1)
        self.original_success = self.original_success.reshape((self.original_success.shape[0] * self.original_success.shape[1]))
        log('[Testing] read %s' % self.args.original_success_file)

        self.original_accuracy = utils.read_hdf5(self.args.original_accuracy_file)
        log('[Testing] read %s' % self.args.original_accuracy_file)

        self.perturbation_codes = numpy.repeat(self.test_codes[:self.N_samples], self.N_attempts, axis=0)
        self.transfer_success = numpy.copy(self.original_success)
コード例 #10
0
    def load_data(self):
        """
        Load data.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        log('[Visualization] read %s' % self.args.test_images_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.int)
        self.test_codes = self.test_codes[:, self.args.label_index]
        log('[Visualization] read %s' % self.args.test_codes_file)

        self.N_class = numpy.max(self.test_codes) + 1
        self.resolution = self.test_images.shape[1]
        self.image_channels = self.test_images.shape[3] if len(
            self.test_images.shape) > 3 else 1
        log('[Visualization] resolution %d' % self.resolution)

        if self.args.max_samples < 0:
            self.args.max_samples = self.test_codes.shape[0]
        else:
            self.args.max_samples = min(self.args.max_samples,
                                        self.test_codes.shape[0])
コード例 #11
0
    def load_data(self):
        """
        Load data and model.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        log('[Testing] read %s' % self.args.test_images_file)

        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file)
        self.test_codes = self.test_codes[:, self.args.label_index]
        log('[Testing] read %s' % self.args.test_codes_file)

        self.perturbations = utils.read_hdf5(self.args.perturbations_file)
        if len(self.perturbations.shape) > 3:
            self.perturbations = self.perturbations.reshape(
                (self.perturbations.shape[0], self.perturbations.shape[1], -1))
        self.perturbation_images = self.test_images[:self.perturbations.
                                                    shape[1]].reshape(
                                                        self.perturbations.
                                                        shape[1], -1)
        self.perturbation_codes = self.test_codes[:self.perturbations.shape[1]]
        log('[Testing] read %s' % self.args.perturbations_file)
        assert not numpy.any(
            self.perturbations != self.perturbations), 'NaN in perturbations'

        self.success = utils.read_hdf5(self.args.success_file)
        log('[Testing] read %s' % self.args.success_file)

        self.probabilities = utils.read_hdf5(self.args.probabilities_file)
        log('[Testing] read %s' % self.args.probabilities_file)
コード例 #12
0
    def main(self):
        """
        Main which should be overwritten.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        log('[Testing] read %s' % self.args.test_images_file)

        # For handling both color and gray images.
        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
            log('[Testing] no color images, adjusted size')
        self.resolution = self.test_images.shape[2]
        log('[Testing] resolution %d' % self.resolution)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.int)
        self.test_codes = self.test_codes[:, self.args.label_index]
        log('[Testing] read %s' % self.args.test_codes_file)

        N_class = numpy.max(self.test_codes) + 1
        network_units = list(map(int, self.args.network_units.split(',')))
        log('[Testing] using %d input channels' % self.test_images.shape[3])
        self.model = models.Classifier(
            N_class,
            resolution=(self.test_images.shape[3], self.test_images.shape[1],
                        self.test_images.shape[2]),
            architecture=self.args.network_architecture,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            start_channels=self.args.network_channels,
            dropout=self.args.network_dropout,
            units=network_units)

        assert os.path.exists(
            self.args.state_file
        ), 'state file %s not found' % self.args.state_file
        state = State.load(self.args.state_file)
        log('[Testing] read %s' % self.args.state_file)

        self.model.load_state_dict(state.model)
        if self.args.use_gpu and not cuda.is_cuda(self.model):
            log('[Testing] model is not CUDA')
            self.model = self.model.cuda()
        log('[Testing] loaded model')

        self.model.eval()
        log('[Testing] set classifier to eval')

        self.test()
    def main(self):
        """
        Main method.
        """

        with logw('[Data] read %s' % self.args.database_file):
            database = utils.read_hdf5(self.args.database_file)
        with logw('[Data] read %s' % self.args.codes_file):
            codes = utils.read_hdf5(self.args.codes_file)
        with logw('[Data] read %s' % self.args.theta_file):
            theta = utils.read_hdf5(self.args.theta_file)
        with logw('[Data] read %s' % self.args.images_file):
            images = utils.read_hdf5(self.args.images_file)

        with logw('[Data] read %s' % self.args.train_codes_file):
            train_codes = utils.read_hdf5(self.args.train_codes_file)
        with logw('[Data] read %s' % self.args.train_theta_file):
            train_theta = utils.read_hdf5(self.args.train_theta_file)
        with logw('[Data] read %s' % self.args.train_images_file):
            train_images = utils.read_hdf5(self.args.train_images_file)

        with logw('[Data] read %s' % self.args.test_codes_file):
            test_codes = utils.read_hdf5(self.args.test_codes_file)
        with logw('[Data] read %s' % self.args.test_theta_file):
            test_theta = utils.read_hdf5(self.args.test_theta_file)
        with logw('[Data] read %s' % self.args.test_images_file):
            test_images = utils.read_hdf5(self.args.test_images_file)

        log('[Data] database: %s' %
            'x'.join([str(dim) for dim in database.shape]))
        log('[Data] codes: %s' % 'x'.join([str(dim) for dim in codes.shape]))
        log('[Data] theta: %s' % 'x'.join([str(dim) for dim in theta.shape]))
        log('[Data] images: %s' % 'x'.join([str(dim) for dim in images.shape]))

        log('[Data] train_codes: %s' %
            'x'.join([str(dim) for dim in train_codes.shape]))
        log('[Data] train_theta: %s' %
            'x'.join([str(dim) for dim in train_theta.shape]))
        log('[Data] train_images: %s' %
            'x'.join([str(dim) for dim in train_images.shape]))

        log('[Data] test_codes: %s' %
            'x'.join([str(dim) for dim in test_codes.shape]))
        log('[Data] test_theta: %s' %
            'x'.join([str(dim) for dim in test_theta.shape]))
        log('[Data] test_images: %s' %
            'x'.join([str(dim) for dim in test_images.shape]))
    def main(self):
        """
        Main method.
        """

        codes = utils.read_hdf5(self.args.codes_file)
        log('[Data] read %s' % self.args.codes_file)

        theta = utils.read_hdf5(self.args.theta_file)
        log('[Data] read %s' % self.args.theta_file)

        images = utils.read_hdf5(self.args.images_file)
        log('[Data] read %s' % self.args.images_file)

        #
        # The set is not splitted randomly or so.
        # This simplifies training set subselection while enforcing balanced datasets.
        # For example, for 10 classes, every subset that is a multiple of 10 will
        # be balanced by construction.
        #

        N = codes.shape[0]
        N_train = self.args.N_train

        train_codes = codes[:N_train]
        test_codes = codes[N_train:]

        train_theta = theta[:N_train]
        test_theta = theta[N_train:]

        train_images = images[:N_train]
        test_images = images[N_train:]

        utils.write_hdf5(self.args.train_codes_file, train_codes)
        log('[Data] wrote %s' % self.args.train_codes_file)
        utils.write_hdf5(self.args.test_codes_file, test_codes)
        log('[Data] wrote %s' % self.args.test_codes_file)

        utils.write_hdf5(self.args.train_theta_file, train_theta)
        log('[Data] wrote %s' % self.args.train_theta_file)
        utils.write_hdf5(self.args.test_theta_file, test_theta)
        log('[Data] wrote %s' % self.args.test_theta_file)

        utils.write_hdf5(self.args.train_images_file, train_images)
        log('[Data] wrote %s' % self.args.train_images_file)
        utils.write_hdf5(self.args.test_images_file, test_images)
        log('[Data] wrote %s' % self.args.test_images_file)
    def load_data(self):
        """
        Load data.
        """

        test_codes = utils.read_hdf5(self.args.test_codes_file).astype(numpy.int)
        self.test_fonts = test_codes[:, 1]
        self.test_classes = test_codes[:, 2]
        log('[Attack] read %s' % self.args.test_codes_file)

        self.test_theta = utils.read_hdf5(self.args.test_theta_file).astype(numpy.float32)
        log('[Attack] read %s' % self.args.test_theta_file)

        self.min_bound = numpy.min(self.test_theta, 0)
        self.max_bound = numpy.max(self.test_theta, 0)

        if self.args.max_samples < 0:
            self.args.max_samples = self.test_theta.shape[0]
        else:
            self.args.max_samples = min(self.args.max_samples, self.test_theta.shape[0])
    def main(self):
        """
        Main method.
        """

        theta = utils.read_hdf5(self.args.theta_file)
        log('[Data] read %s' % self.args.theta_file)

        if theta.shape[1] == 1:
            log('[Data] theta min: [%f]' % (
                numpy.min(theta[:, 0])))
            log('[Data] theta max: [%f]' % (
                numpy.max(theta[:, 0])))
        elif theta.shape[1] == 2:
            log('[Data] theta min: [%f, %f]' % (
                numpy.min(theta[:, 0]), numpy.min(theta[:, 1])))
            log('[Data] theta max: [%f, %f]' % (
                numpy.max(theta[:, 0]), numpy.max(theta[:, 1])))
        elif theta.shape[1] == 3:
            log('[Data] theta min: [%f, %f, %f]' % (
                numpy.min(theta[:, 0]), numpy.min(theta[:, 1]), numpy.min(theta[:, 2])))
            log('[Data] theta max: [%f, %f, %f]' % (
                numpy.max(theta[:, 0]), numpy.max(theta[:, 1]), numpy.max(theta[:, 2])))
        elif theta.shape[1] == 4:
            log('[Data] theta min: [%f, %f, %f, %f]' % (
                numpy.min(theta[:, 0]), numpy.min(theta[:, 1]), numpy.min(theta[:, 2]),
                numpy.min(theta[:, 3])))
            log('[Data] theta max: [%f, %f, %f, %f]' % (
                numpy.max(theta[:, 0]), numpy.max(theta[:, 1]), numpy.max(theta[:, 2]),
                numpy.max(theta[:, 3])))
        elif theta.shape[1] == 6:
            log('[Data] theta min: [%f, %f, %f, %f, %f, %f]' % (
                numpy.min(theta[:, 0]), numpy.min(theta[:, 1]), numpy.min(theta[:, 2]),
                numpy.min(theta[:, 3]), numpy.min(theta[:, 4]), numpy.min(theta[:, 5])))
            log('[Data] theta max: [%f, %f, %f, %f, %f, %f]' % (
                numpy.max(theta[:, 0]), numpy.max(theta[:, 1]), numpy.max(theta[:, 2]),
                numpy.max(theta[:, 3]), numpy.max(theta[:, 4]), numpy.max(theta[:, 5])))

        codes = utils.read_hdf5(self.args.codes_file)
        log('[Data] read %s' % self.args.codes_file)
        print(codes)
    def load_data(self):
        """
        Load data.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        log('[Attack] read %s' % self.args.test_images_file)

        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.int)
        self.test_codes = self.test_codes[:, self.args.label_index]
        log('[Attack] read %s' % self.args.test_codes_file)

        if self.args.max_samples < 0:
            self.args.max_samples = self.test_images.shape[0]
        else:
            self.args.max_samples = min(self.args.max_samples,
                                        self.test_images.shape[0])
コード例 #18
0
    def main(self):
        """
        Main.
        """

        if not os.path.exists(self.args.images_file):
            log('[Visualization] file %s not found' % self.args.images_file)
            exit(1)

        if not os.path.exists(
                self.args.output_directory) and self.args.output_directory:
            log('[Visualization] creating %s' % self.args.output_directory)

        assert self.args.cols > 0, 'number of columns has to be larger than 0'
        assert self.args.rows > 0, 'number of rows has to be larger than 0'

        images = utils.read_hdf5(self.args.images_file)
        if len(images.shape) > 3 and images.shape[3] != 3:
            images = images.reshape(-1, images.shape[2], images.shape[3])
        log('[Visualization] read %s' % self.args.images_file)

        if self.args.codes_file:
            codes = utils.read_hdf5(self.args.codes_file)
            log('[Visualization] read %s' % self.args.codes_file)
            codes = codes[:, self.args.label_index]

            if self.args.label >= 0:
                images = images[codes == self.args.label]

        batch_size = self.args.rows * self.args.cols
        num_batches = math.ceil(images.shape[0] / batch_size)

        for b in range(min(num_batches, self.args.max_images)):
            png_file = os.path.join(self.args.output_directory, '%d.png' % b)
            batch = images[b * batch_size:min((b + 1) *
                                              batch_size, images.shape[0])]
            vis.mosaic(png_file, batch, self.args.cols, 5, 'gray',
                       self.args.vmin, self.args.vmax)
            log('[Visualization] wrote %s' % png_file)
    def load_model(self):
        """
        Load model.
        """

        database = utils.read_hdf5(self.args.database_file).astype(numpy.float32)
        log('[Attack] read %sd' % self.args.database_file)

        self.N_font = database.shape[0]
        self.N_class = database.shape[1]
        resolution = database.shape[2]

        database = database.reshape((database.shape[0] * database.shape[1], database.shape[2], database.shape[3]))
        database = torch.from_numpy(database)
        if self.args.use_gpu:
            database = database.cuda()
        database = torch.autograd.Variable(database, False)

        N_theta = self.test_theta.shape[1]
        log('[Attack] using %d N_theta' % N_theta)
        decoder = models.AlternativeOneHotDecoder(database, self.N_font, self.N_class, N_theta)
        decoder.eval()

        image_channels = 1 if N_theta <= 7 else 3
        network_units = list(map(int, self.args.network_units.split(',')))
        log('[Attack] using %d input channels' % image_channels)
        classifier = models.Classifier(self.N_class, resolution=(image_channels, resolution, resolution),
                                       architecture=self.args.network_architecture,
                                       activation=self.args.network_activation,
                                       batch_normalization=not self.args.network_no_batch_normalization,
                                       start_channels=self.args.network_channels,
                                       dropout=self.args.network_dropout,
                                       units=network_units)

        assert os.path.exists(self.args.classifier_file), 'state file %s not found' % self.args.classifier_file
        state = State.load(self.args.classifier_file)
        log('[Attack] read %s' % self.args.classifier_file)

        classifier.load_state_dict(state.model)
        if self.args.use_gpu and not cuda.is_cuda(classifier):
            log('[Attack] classifier is not CUDA')
            classifier = classifier.cuda()
        log('[Attack] loaded classifier')

        # !
        classifier.eval()
        log('[Attack] set classifier to eval')

        self.model = models.DecoderClassifier(decoder, classifier)
コード例 #20
0
    def load_data(self):
        """
        Load data and model.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(numpy.float32)
        log('[Testing] read %s' % self.args.test_images_file)

        # For handling both color and gray images.
        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
            log('[Testing] no color images, adjusted size')
        self.resolution = self.test_images.shape[2]
        log('[Testing] resolution %d' % self.resolution)

        self.train_images = utils.read_hdf5(self.args.train_images_file).astype(numpy.float32)
        # !
        self.train_images = self.train_images.reshape((self.train_images.shape[0], -1))
        log('[Testing] read %s' % self.args.train_images_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(numpy.int)
        self.test_codes = self.test_codes[:, self.args.label_index]
        self.N_class = numpy.max(self.test_codes) + 1
        log('[Testing] read %s' % self.args.test_codes_file)

        self.accuracy = utils.read_hdf5(self.args.accuracy_file)
        log('[Testing] read %s' % self.args.accuracy_file)

        self.perturbations = utils.read_hdf5(self.args.perturbations_file).astype(numpy.float32)
        self.N_attempts = self.perturbations.shape[0]

        # First, repeat relevant data.
        self.test_images = numpy.repeat(self.test_images[:self.perturbations.shape[1]], self.N_attempts, axis=0)
        self.perturbation_codes = numpy.repeat(self.test_codes[:self.perturbations.shape[1]], self.N_attempts, axis=0)
        self.perturbation_codes = numpy.squeeze(self.perturbation_codes)
        self.accuracy = numpy.repeat(self.accuracy[:self.perturbations.shape[1]], self.N_attempts, axis=0)

        # Then, reshape the perturbations!
        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        self.perturbations = self.perturbations.reshape((self.perturbations.shape[0] * self.perturbations.shape[1], -1))
        assert self.perturbations.shape[1] == self.args.N_theta
        log('[Testing] read %s' % self.args.perturbations_file)
        assert not numpy.any(self.perturbations != self.perturbations), 'NaN in perturbations'

        self.success = utils.read_hdf5(self.args.success_file)
        self.success = numpy.swapaxes(self.success, 0, 1)
        self.success = self.success.reshape((self.success.shape[0] * self.success.shape[1]))
        log('[Testing] read %s' % self.args.success_file)

        log('[Testing] using %d input channels' % self.test_images.shape[3])
        assert self.args.N_theta > 0 and self.args.N_theta <= 9
        decoder = models.STNDecoder(self.args.N_theta)
        # decoder.eval()
        log('[Testing] set up STN decoder')

        self.model = decoder
    def main(self):
        """
        Main method.
        """

        images = utils.read_hdf5(self.args.images_file)
        log('[Data] read %s' % self.args.images_file)
        log('[Data] #images: %d' % images.shape[0])

        images = images.reshape((images.shape[0], -1))
        l2_norm = numpy.average(numpy.linalg.norm(images, ord=2, axis=1))
        l1_norm = numpy.average(numpy.linalg.norm(images, ord=1, axis=1))
        linf_norm = numpy.average(
            numpy.linalg.norm(images, ord=float('inf'), axis=1))
        log('[Data] average L_2 norm: %g' % l2_norm)
        log('[Data] average L_1 norm: %g' % l1_norm)
        log('[Data] average L_inf norm: %g' % linf_norm)
コード例 #22
0
    def load_data(self):
        """
        Load data and model.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file)
        self.test_images = self.test_images.reshape(
            (self.test_images.shape[0], -1))
        log('[Testing] read %s' % self.args.test_images_file)

        self.train_images = utils.read_hdf5(self.args.train_images_file)
        self.train_images = self.train_images.reshape(
            (self.train_images.shape[0], -1))
        log('[Testing] read %s' % self.args.train_images_file)

        self.train_theta = utils.read_hdf5(self.args.train_theta_file)
        log('[Testing] read %s' % self.args.train_theta_file)

        self.test_theta = utils.read_hdf5(self.args.test_theta_file)
        log('[Testing] read %s' % self.args.test_theta_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file)
        self.test_codes = self.test_codes[:, self.args.label_index]
        log('[Testing] read %s' % self.args.test_codes_file)

        self.perturbations = utils.read_hdf5(self.args.perturbations_file)
        if len(self.perturbations.shape) > 3:
            self.perturbations = self.perturbations.reshape(
                (self.perturbations.shape[0], self.perturbations.shape[1], -1))
        self.perturbation_images = self.test_images[:self.perturbations.
                                                    shape[1]]
        self.perturbation_codes = self.test_codes[:self.perturbations.shape[1]]
        log('[Testing] read %s' % self.args.perturbations_file)
        assert not numpy.any(
            self.perturbations != self.perturbations), 'NaN in perturbations'

        self.success = utils.read_hdf5(self.args.success_file)
        log('[Testing] read %s' % self.args.success_file)

        self.accuracy = utils.read_hdf5(self.args.accuracy_file)
        self.accuracy = self.accuracy[:self.perturbations.shape[1]]
        log('[Testing] read %s' % self.args.accuracy_file)
コード例 #23
0
    def main(self):
        """
        Main method.
        """

        images = utils.read_hdf5(self.args.images_file)
        log('read %s' % self.args.images_file)

        rows = 10
        cols = 10
        print(images.shape)
        for n in range(
                min(self.args.max_images, images.shape[0] // (rows * cols))):
            log('%d/%d' % ((n + 1) * rows * cols, images.shape[0]))
            plot_file = os.path.join(self.args.output_directory,
                                     str(n) + '.png')
            vis.mosaic(plot_file,
                       images[n * rows * cols:(n + 1) * rows * cols, :],
                       cols=10)
            log('wrote %s' % plot_file)
    def main(self):
        """
        Main which should be overwritten.
        """

        test_images = utils.read_hdf5(self.args.test_images_file)
        log('[Sampling] read %s' % self.args.test_images_file)

        if len(test_images.shape) < 4:
            test_images = numpy.expand_dims(test_images, axis=3)

        network_units = list(map(int, self.args.network_units.split(',')))
        self.decoder = models.LearnedDecoder(
            self.args.latent_space_size,
            resolution=(test_images.shape[3], test_images.shape[1],
                        test_images.shape[2]),
            architecture=self.args.network_architecture,
            start_channels=self.args.network_channels,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            units=network_units)
        log(self.decoder)

        assert os.path.exists(self.args.decoder_file)
        state = State.load(self.args.decoder_file)
        log('[Sampling] loaded %s' % self.args.decoder_file)

        self.decoder.load_state_dict(state.model)
        log('[Sampling] loaded decoder')

        if self.args.use_gpu and not cuda.is_cuda(self.decoder):
            self.decoder = self.decoder.cuda()

        log('[Sampling] model needs %gMiB' %
            ((cuda.estimate_size(self.decoder)) / (1024 * 1024)))
        self.sample()
    def load_model_and_scheduler(self):
        """
        Load model.
        """

        params = {
            'lr': self.args.lr,
            'lr_decay': self.args.lr_decay,
            'lr_min': 0.0000001,
            'weight_decay': self.args.weight_decay,
        }

        log('[Training] using %d input channels' % self.train_images.shape[3])
        network_units = list(map(int, self.args.network_units.split(',')))
        self.model = models.Classifier(
            self.N_class,
            resolution=(self.train_images.shape[3], self.train_images.shape[1],
                        self.train_images.shape[2]),
            architecture=self.args.network_architecture,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            start_channels=self.args.network_channels,
            dropout=self.args.network_dropout,
            units=network_units)

        self.epoch = 0
        if os.path.exists(self.args.state_file):
            state = State.load(self.args.state_file)
            log('[Training] loaded %s' % self.args.state_file)

            self.model.load_state_dict(state.model)

            # needs to be done before costructing optimizer.
            if self.args.use_gpu and not cuda.is_cuda(self.model):
                self.model = self.model.cuda()
                log('[Training] model is not CUDA')
            log('[Training] loaded model')

            optimizer = torch.optim.Adam(self.model.parameters(), params['lr'])
            optimizer.load_state_dict(state.optimizer)
            self.scheduler = ADAMScheduler(optimizer, **params)

            self.epoch = state.epoch + 1
            self.scheduler.update(self.epoch)

            assert os.path.exists(self.args.training_file) and os.path.exists(
                self.args.testing_file)
            self.train_statistics = utils.read_hdf5(self.args.training_file)
            log('[Training] read %s' % self.args.training_file)
            self.test_statistics = utils.read_hdf5(self.args.testing_file)
            log('[Training] read %s' % self.args.testing_file)

            if utils.display():
                self.plot()
        else:
            if self.args.use_gpu and not cuda.is_cuda(self.model):
                self.model = self.model.cuda()
                log('[Training] model is not CUDA')
            log('[Training] did not load model, using new one')

            self.scheduler = ADAMScheduler(self.model.parameters(), **params)
            self.scheduler.initialize()  # !

        log(self.model)
    def load_data(self):
        """
        Load data.
        """

        assert self.args.batch_size % 4 == 0

        self.train_images = utils.read_hdf5(
            self.args.train_images_file).astype(numpy.float32)
        log('[Training] read %s' % self.args.train_images_file)

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        log('[Training] read %s' % self.args.test_images_file)

        # For handling both color and gray images.
        if len(self.train_images.shape) < 4:
            self.train_images = numpy.expand_dims(self.train_images, axis=3)
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
            log('[Training] no color images, adjusted size')
        self.resolution = self.train_images.shape[2]
        log('[Training] resolution %d' % self.resolution)

        self.train_codes = utils.read_hdf5(self.args.train_codes_file).astype(
            numpy.int)
        assert self.train_codes.shape[1] >= self.args.label_index + 1
        self.train_codes = self.train_codes[:, self.args.label_index]
        log('[Training] read %s' % self.args.train_codes_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.int)
        assert self.test_codes.shape[1] >= self.args.label_index + 1
        self.test_codes = self.test_codes[:, self.args.label_index]
        log('[Training] read %s' % self.args.test_codes_file)

        assert self.train_codes.shape[0] == self.train_images.shape[0]
        assert self.test_codes.shape[0] == self.test_images.shape[0]

        # Select subset of samples
        if self.args.training_samples < 0:
            self.args.training_samples = self.train_images.shape[0]
        else:
            self.args.training_samples = min(self.args.training_samples,
                                             self.train_images.shape[0])
        self.N_class = numpy.max(self.train_codes) + 1
        log('[Training] using %d training samples' %
            self.args.training_samples)

        if self.args.test_samples < 0:
            self.args.test_samples = self.test_images.shape[0]
        else:
            self.args.test_samples = min(self.args.test_samples,
                                         self.test_images.shape[0])

        if self.args.early_stopping:
            assert self.args.validation_samples > 0
            assert self.args.training_samples + self.args.validation_samples <= self.train_images.shape[
                0]
            self.val_images = self.train_images[self.train_images.shape[0] -
                                                self.args.validation_samples:]
            self.val_codes = self.train_codes[self.train_codes.shape[0] -
                                              self.args.validation_samples:]
            self.train_images = self.train_images[:self.train_images.shape[0] -
                                                  self.args.validation_samples]
            self.train_codes = self.train_codeſ[:self.train_codes.shape[0] -
                                                self.args.validation_samples]
            assert self.val_images.shape[
                0] == self.args.validation_samples and self.val_codes.shape[
                    0] == self.args.validation_samples

        log('[Training] found %d classes' % self.N_class)
        if self.args.random_samples:
            perm = numpy.random.permutation(self.train_images.shape[0] // 10)
            perm = perm[:self.args.training_samples // 10]
            perm = numpy.repeat(perm, self.N_class, axis=0) * 10 + numpy.tile(
                numpy.array(range(self.N_class)), (perm.shape[0]))
            self.train_images = self.train_images[perm]
            self.train_codes = self.train_codes[perm]
        else:
            self.train_images = self.train_images[:self.args.training_samples]
            self.train_codes = self.train_codes[:self.args.training_samples]

        # Check that the dataset is balanced.
        number_samples = self.train_codes.shape[0] // self.N_class
        for c in range(self.N_class):
            number_samples_ = numpy.sum(self.train_codes == c)
            if number_samples_ != number_samples:
                log(
                    '[Training] dataset not balanced, class %d should have %d samples but has %d'
                    % (c, number_samples, number_samples_), LogLevel.WARNING)
コード例 #27
0
    def main(self):
        """
        Main which should be overwritten.
        """

        self.train_images = utils.read_hdf5(
            self.args.train_images_file).astype(numpy.float32)
        log('[Testing] read %s' % self.args.train_images_file)

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        log('[Testing] read %s' % self.args.test_images_file)

        # For handling both color and gray images.
        if len(self.train_images.shape) < 4:
            self.train_images = numpy.expand_dims(self.train_images, axis=3)
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
            log('[Testing] no color images, adjusted size')
        self.resolution = self.train_images.shape[2]
        log('[Testing] resolution %d' % self.resolution)

        self.train_codes = utils.read_hdf5(self.args.train_codes_file).astype(
            numpy.float32)
        log('[Testing] read %s' % self.args.train_codes_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.float32)
        log('[Testing] read %s' % self.args.test_codes_file)

        self.train_codes = self.train_codes[:, self.args.label_index]
        self.test_codes = self.test_codes[:, self.args.label_index]

        if self.args.label >= 0:
            self.train_images = self.train_images[self.train_codes ==
                                                  self.args.label]
            self.test_images = self.test_images[self.test_codes ==
                                                self.args.label]

        log('[Testing] using %d input channels' % self.test_images.shape[3])
        network_units = list(map(int, self.args.network_units.split(',')))
        self.encoder = models.LearnedVariationalEncoder(
            self.args.latent_space_size,
            0,
            resolution=(self.train_images.shape[3], self.train_images.shape[1],
                        self.train_images.shape[2]),
            architecture=self.args.network_architecture,
            start_channels=self.args.network_channels,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            units=network_units)
        self.decoder = models.LearnedDecoder(
            self.args.latent_space_size,
            resolution=(self.train_images.shape[3], self.train_images.shape[1],
                        self.train_images.shape[2]),
            architecture=self.args.network_architecture,
            start_channels=self.args.network_channels,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            units=network_units)
        log(self.encoder)
        log(self.decoder)

        assert os.path.exists(self.args.encoder_file) and os.path.exists(
            self.args.decoder_file)
        state = State.load(self.args.encoder_file)
        log('[Testing] loaded %s' % self.args.encoder_file)

        self.encoder.load_state_dict(state.model)
        log('[Testing] loaded encoder')

        state = State.load(self.args.decoder_file)
        log('[Testing] loaded %s' % self.args.decoder_file)

        self.decoder.load_state_dict(state.model)
        log('[Testing] loaded decoder')

        if self.args.use_gpu and not cuda.is_cuda(self.encoder):
            self.encoder = self.encoder.cuda()
        if self.args.use_gpu and not cuda.is_cuda(self.decoder):
            self.decoder = self.decoder.cuda()

        log('[Testing] model needs %gMiB' %
            ((cuda.estimate_size(self.encoder) +
              cuda.estimate_size(self.decoder)) / (1024 * 1024)))
        self.test()
    def load_data(self):
        """
        Load data and model.
        """

        with logw('[Detection] read %s' % self.args.train_images_file):
            self.nearest_neighbor_images = utils.read_hdf5(
                self.args.train_images_file)
            assert len(self.nearest_neighbor_images.shape) == 3

        with logw('[Detection] read %s' % self.args.test_images_file):
            self.test_images = utils.read_hdf5(self.args.test_images_file)
            if len(self.test_images.shape) < 4:
                self.test_images = numpy.expand_dims(self.test_images, axis=3)

        with logw('[Detection] read %s' % self.args.train_codes_file):
            self.train_codes = utils.read_hdf5(self.args.train_codes_file)

        with logw('[Detection] read %s' % self.args.test_codes_file):
            self.test_codes = utils.read_hdf5(self.args.test_codes_file)

        with logw('[Detection] read %s' % self.args.test_theta_file):
            self.test_theta = utils.read_hdf5(self.args.test_theta_file)

        with logw('[Detection] read %s' % self.args.perturbations_file):
            self.perturbations = utils.read_hdf5(self.args.perturbations_file)
            assert len(self.perturbations.shape) == 3

        with logw('[Detection] read %s' % self.args.success_file):
            self.success = utils.read_hdf5(self.args.success_file)

        with logw('[Detection] read %s' % self.args.accuracy_file):
            self.accuracy = utils.read_hdf5(self.args.accuracy_file)

        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        num_attempts = self.perturbations.shape[1]
        self.test_images = self.test_images[:self.perturbations.shape[0]]
        self.train_images = self.nearest_neighbor_images[:self.perturbations.
                                                         shape[0]]
        self.test_codes = self.test_codes[:self.perturbations.shape[0]]
        self.accuracy = self.accuracy[:self.perturbations.shape[0]]
        self.test_theta = self.test_theta[:self.perturbations.shape[0]]

        self.perturbations = self.perturbations.reshape(
            (self.perturbations.shape[0] * self.perturbations.shape[1],
             self.perturbations.shape[2]))
        self.success = numpy.swapaxes(self.success, 0, 1)
        self.success = self.success.reshape(
            (self.success.shape[0] * self.success.shape[1]))

        self.accuracy = numpy.repeat(self.accuracy, num_attempts, axis=0)
        self.test_images = numpy.repeat(self.test_images, num_attempts, axis=0)
        self.train_images = numpy.repeat(self.train_images,
                                         num_attempts,
                                         axis=0)
        self.test_codes = numpy.repeat(self.test_codes, num_attempts, axis=0)
        self.test_theta = numpy.repeat(self.test_theta, num_attempts, axis=0)

        max_samples = self.args.max_samples
        self.success = self.success[:max_samples]
        self.accuracy = self.accuracy[:max_samples]
        self.perturbations = self.perturbations[:max_samples]
        self.test_images = self.test_images[:max_samples]
        self.train_images = self.train_images[:max_samples]
        self.test_codes = self.test_codes[:max_samples]
        self.test_theta = self.test_theta[:max_samples]

        with logw('[Testing] read %s' % self.args.database_file):
            database = utils.read_hdf5(self.args.database_file)

            self.N_font = database.shape[0]
            self.N_class = database.shape[1]
            self.N_theta = self.test_theta.shape[1]

            database = database.reshape((database.shape[0] * database.shape[1],
                                         database.shape[2], database.shape[3]))
            database = torch.from_numpy(database)
            if self.args.use_gpu:
                database = database.cuda()
            database = torch.autograd.Variable(database, False)

            self.model = models.AlternativeOneHotDecoder(
                database, self.N_font, self.N_class, self.N_theta)
            self.model.eval()

        self.compute_images()
    def attack(self):
        """
        Test the model.
        """

        assert self.model is not None
        assert self.model.classifier.training is False

        concatenate_axis = -1
        if os.path.exists(self.args.perturbations_file) and os.path.exists(self.args.success_file):
            self.original_perturbations = utils.read_hdf5(self.args.perturbations_file)
            assert len(self.original_perturbations.shape) == 3, self.original_perturbations.shape
            log('[Attack] read %s' % self.args.perturbations_file)

            self.original_success = utils.read_hdf5(self.args.success_file)
            log('[Attack] read %s' % self.args.success_file)

            assert self.original_perturbations.shape[0] == self.original_success.shape[0]
            assert self.original_perturbations.shape[1] == self.original_success.shape[1]
            assert self.original_perturbations.shape[2] == self.test_theta.shape[1]

            if self.original_perturbations.shape[1] <= self.args.max_samples and self.original_perturbations.shape[0] <= self.args.max_attempts:
                log('[Attack] found %d attempts, %d samples, requested no more' % (self.original_perturbations.shape[0], self.original_perturbations.shape[1]))
                return
            elif self.original_perturbations.shape[0] == self.args.max_attempts or self.original_perturbations.shape[1] == self.args.max_samples:
                if self.original_perturbations.shape[0] == self.args.max_attempts:
                    self.test_theta = self.test_theta[self.original_perturbations.shape[1]:]
                    self.test_fonts = self.test_fonts[self.original_perturbations.shape[1]:]
                    self.test_classes = self.test_classes[self.original_perturbations.shape[1]:]
                    self.args.max_samples = self.args.max_samples - self.original_perturbations.shape[1]
                    concatenate_axis = 1
                    log('[Attack] found %d attempts with %d perturbations, computing %d more perturbations' % (
                    self.original_perturbations.shape[0], self.original_perturbations.shape[1], self.args.max_samples))
                elif self.original_perturbations.shape[1] == self.args.max_samples:
                    self.args.max_attempts = self.args.max_attempts - self.original_perturbations.shape[0]
                    concatenate_axis = 0
                    log('[Attack] found %d attempts with %d perturbations, computing %d more attempts' % (
                    self.original_perturbations.shape[0], self.original_perturbations.shape[1], self.args.max_attempts))

        self.perturbations = numpy.zeros((self.args.max_attempts, self.args.max_samples, self.test_theta.shape[1]))
        self.success = numpy.ones((self.args.max_attempts, self.args.max_samples), dtype=int) * -1

        if self.args.attack.find('Batch') >= 0:
            batch_size = min(self.args.batch_size, self.args.max_samples)
        else:
            batch_size = 1

        objective = self.objective_class()
        num_batches = int(math.ceil(self.args.max_samples/batch_size))

        for i in range(num_batches):
            if i*batch_size == self.args.max_samples:
                break

            i_start = i * batch_size
            i_end = min((i + 1) * batch_size, self.args.max_samples)

            batch_fonts = self.test_fonts[i_start: i_end]
            batch_classes = self.test_classes[i_start: i_end]
            batch_code = numpy.concatenate((common.numpy.one_hot(batch_fonts, self.N_font), common.numpy.one_hot(batch_classes, self.N_class)), axis=1).astype(numpy.float32)

            batch_classes = common.torch.as_variable(batch_classes, self.args.use_gpu)
            batch_inputs = common.torch.as_variable(self.test_theta[i_start: i_end], self.args.use_gpu)
            batch_code = common.torch.as_variable(batch_code, self.args.use_gpu)

            t = 0
            # This basically allows to only optimize over theta, keeping the font/class code fixed.
            self.model.decoder.set_code(batch_code)

            while True and t < self.args.max_attempts:
                attack = self.setup_attack(batch_inputs, batch_classes)
                success, perturbations, probabilities, norm, _ = attack.run(objective)
                assert not numpy.any(perturbations != perturbations), perturbations

                # Note that we save the perturbed image, not only the perturbation!
                perturbations = perturbations.reshape(batch_inputs.size())  # hack for when only one dimensional latent space is used!
                self.perturbations[t][i_start: i_end] = perturbations + batch_inputs.cpu().numpy()
                self.success[t][i_start: i_end] = success
                t += 1

            log('[Attack] %d: completed' % i)

        if concatenate_axis >= 0:
            if self.perturbations.shape[0] == self.args.max_attempts:
                self.perturbations = numpy.concatenate((self.original_perturbations, self.perturbations), axis=concatenate_axis)
                self.success = numpy.concatenate((self.original_success, self.success), axis=concatenate_axis)
                log('[Attack] concatenated')

        utils.write_hdf5(self.args.perturbations_file, self.perturbations)
        log('[Attack] wrote %s' % self.args.perturbations_file)
        utils.write_hdf5(self.args.success_file, self.success)
        log('[Attack] wrote %s' % self.args.success_file)
    def load_data_and_model(self):
        """
        Load data and model.
        """

        database = utils.read_hdf5(self.args.database_file).astype(
            numpy.float32)
        log('[Visualization] read %s' % self.args.database_file)

        N_font = database.shape[0]
        N_class = database.shape[1]
        resolution = database.shape[2]

        database = database.reshape((database.shape[0] * database.shape[1],
                                     database.shape[2], database.shape[3]))
        database = torch.from_numpy(database)
        if self.args.use_gpu:
            database = database.cuda()
        database = torch.autograd.Variable(database, False)

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)

        self.perturbations = utils.read_hdf5(
            self.args.perturbations_file).astype(numpy.float32)
        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        log('[Visualization] read %s' % self.args.perturbations_file)

        self.success = utils.read_hdf5(self.args.success_file)
        self.success = numpy.swapaxes(self.success, 0, 1)
        log('[Visualization] read %s' % self.args.success_file)

        self.accuracy = utils.read_hdf5(self.args.accuracy_file)
        log('[Visualization] read %s' % self.args.success_file)

        self.test_theta = utils.read_hdf5(self.args.test_theta_file).astype(
            numpy.float32)
        self.test_theta = self.test_theta[:self.perturbations.shape[0]]
        N_theta = self.test_theta.shape[1]
        log('[Visualization] using %d N_theta' % N_theta)
        log('[Visualization] read %s' % self.args.test_theta_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.int)
        self.test_codes = self.test_codes[:self.perturbations.shape[0]]
        self.test_codes = self.test_codes[:, 1:3]
        self.test_codes = numpy.concatenate(
            (common.numpy.one_hot(self.test_codes[:, 0], N_font),
             common.numpy.one_hot(self.test_codes[:, 1], N_class)),
            axis=1).astype(numpy.float32)
        log('[Attack] read %s' % self.args.test_codes_file)

        image_channels = 1 if N_theta <= 7 else 3
        network_units = list(map(int, self.args.network_units.split(',')))
        log('[Visualization] using %d input channels' % image_channels)
        self.classifier = models.Classifier(
            N_class,
            resolution=(image_channels, resolution, resolution),
            architecture=self.args.network_architecture,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            start_channels=self.args.network_channels,
            dropout=self.args.network_dropout,
            units=network_units)
        self.decoder = models.AlternativeOneHotDecoder(database, N_font,
                                                       N_class, N_theta)
        self.decoder.eval()

        assert os.path.exists(
            self.args.classifier_file
        ), 'state file %s not found' % self.args.classifier_file
        state = State.load(self.args.classifier_file)
        log('[Visualization] read %s' % self.args.classifier_file)

        self.classifier.load_state_dict(state.model)
        if self.args.use_gpu and not cuda.is_cuda(self.classifier):
            log('[Visualization] classifier is not CUDA')
            self.classifier = self.classifier.cuda()
        log('[Visualization] loaded classifier')

        self.classifier.eval()
        log('[Visualization] set classifier to eval')