Esempio n. 1
0
    def forward(self, input):
        """
        Wrapper forward function that also allows to call forward on only the code or theta
        after setting theta or the code fixed.

        The fixed one should not allow gradients.

        :param input: code or theta
        :type input: torch.autograd.Variable
        :return: output image
        :rtype: torch.autograd.Variable
        """

        assert self._code is not None

        use_gpu = cuda.is_cuda(self.decoders[0])
        output = torch.zeros([
            self._code.size()[0], self.resolution[0], self.resolution[1],
            self.resolution[2]
        ])
        if use_gpu:
            output = output.cuda()
        output = torch.autograd.Variable(output)

        for c in range(len(self.decoders)):
            if torch.sum(self._code == c).item() > 0:
                input_ = input[self._code == c]
                output_ = self.decoders[c].forward(input_)
                # workaround for in-place assignments ...
                output[self._code == c] = output_

        return output
Esempio n. 2
0
    def load_models(self):
        """
        Load models.
        """

        self.N_class = numpy.max(self.test_codes) + 1
        network_units = list(map(int, self.args.network_units.split(',')))
        log('[Testing] using %d input channels' % self.test_images.shape[3])
        self.model = models.Classifier(
            self.N_class,
            resolution=(self.test_images.shape[3], self.test_images.shape[1],
                        self.test_images.shape[2]),
            architecture=self.args.network_architecture,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            start_channels=self.args.network_channels,
            dropout=self.args.network_dropout,
            units=network_units)
        assert os.path.exists(
            self.args.classifier_file
        ), 'state file %s not found' % self.args.classifier_file
        state = State.load(self.args.classifier_file)
        log('[Testing] read %s' % self.args.classifier_file)

        self.model.load_state_dict(state.model)
        if self.args.use_gpu and not cuda.is_cuda(self.model):
            log('[Testing] classifier is not CUDA')
            self.model = self.model.cuda()
        log('[Testing] loaded classifier')

        # !
        self.model.eval()
        log('[Testing] set classifier to eval')
    def set_encoder_bound(self, encoder, min, max):
        """
        Set a bound on the encoder of the perturbation; this allows to easily implement "on-manifold constraints".
        If None is specified, bounds are not enforced.

        :param encoder: encoder
        :type encoder: torch.nn.Module
        :param min: minimum bound
        :type min: float, torch.Tensor or None
        :param max: maximum bound
        :return: float, torch.Tensor or None
        """

        assert isinstance(min, torch.Tensor) or isinstance(
            min, float) or min is None, 'min needs to be float or torch.Tensor'
        assert isinstance(max, torch.Tensor) or isinstance(
            max, float) or max is None, 'max needs to be float or torch.Tensor'
        assert isinstance(
            encoder, torch.nn.Module), 'encoder needs to be torch.nn.Module'

        self.encoder = encoder

        if min is None:
            self.encoder_min_bound = None
        else:
            if isinstance(min, torch.Tensor):
                self.encoder_min_bound = min
            elif isinstance(min, float):
                self.encoder_min_bound = torch.ones_like(self.images) * min

            if cuda.is_cuda(self.model):
                self.encoder_min_bound = self.encoder_min_bound.cuda()

        if max is None:
            self.encoder_max_bound = None
        else:
            if isinstance(max, torch.Tensor):
                self.encoder_max_bound = max
            elif isinstance(max, float):
                self.encoder_max_bound = torch.ones_like(self.images) * max

            if cuda.is_cuda(self.model):
                self.encoder_max_bound = self.encoder_max_bound.cuda()
Esempio n. 4
0
    def initialize_optimizer(self):
        """
        Initalize optimizer and parameters to optimize.
        """

        if cuda.is_cuda(self.model):
            self.perturbations = self.perturbations.cuda()

        self.perturbations = torch.nn.Parameter(self.perturbations.data)
        self.optimizer = torch.optim.Adam([self.perturbations],
                                          lr=self.base_lr)
    def initialize_optimizer(self):
        """
        Initalize optimizer and parameters to optimize.
        """

        if cuda.is_cuda(self.model):
            self.w = self.w.cuda()

        # We directly optimize the reparameterized variables w!
        self.w = torch.nn.Parameter(self.w.data)
        self.optimizer = torch.optim.Adam([self.w], lr=self.base_lr)
Esempio n. 6
0
    def initialize_zero(self):
        """
        Initialize the attack.
        """

        random = numpy.zeros(self.images.size())
        self.perturbations = torch.from_numpy(random.astype(numpy.float32))
        if cuda.is_cuda(self.model):
            self.perturbations = self.perturbations.cuda()
        self.perturbations = torch.autograd.Variable(self.perturbations,
                                                     requires_grad=True)
Esempio n. 7
0
    def main(self):
        """
        Main which should be overwritten.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        log('[Testing] read %s' % self.args.test_images_file)

        # For handling both color and gray images.
        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
            log('[Testing] no color images, adjusted size')
        self.resolution = self.test_images.shape[2]
        log('[Testing] resolution %d' % self.resolution)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.int)
        self.test_codes = self.test_codes[:, self.args.label_index]
        log('[Testing] read %s' % self.args.test_codes_file)

        N_class = numpy.max(self.test_codes) + 1
        network_units = list(map(int, self.args.network_units.split(',')))
        log('[Testing] using %d input channels' % self.test_images.shape[3])
        self.model = models.Classifier(
            N_class,
            resolution=(self.test_images.shape[3], self.test_images.shape[1],
                        self.test_images.shape[2]),
            architecture=self.args.network_architecture,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            start_channels=self.args.network_channels,
            dropout=self.args.network_dropout,
            units=network_units)

        assert os.path.exists(
            self.args.state_file
        ), 'state file %s not found' % self.args.state_file
        state = State.load(self.args.state_file)
        log('[Testing] read %s' % self.args.state_file)

        self.model.load_state_dict(state.model)
        if self.args.use_gpu and not cuda.is_cuda(self.model):
            log('[Testing] model is not CUDA')
            self.model = self.model.cuda()
        log('[Testing] loaded model')

        self.model.eval()
        log('[Testing] set classifier to eval')

        self.test()
    def load_model(self):
        """
        Load model.
        """

        database = utils.read_hdf5(self.args.database_file).astype(numpy.float32)
        log('[Attack] read %sd' % self.args.database_file)

        self.N_font = database.shape[0]
        self.N_class = database.shape[1]
        resolution = database.shape[2]

        database = database.reshape((database.shape[0] * database.shape[1], database.shape[2], database.shape[3]))
        database = torch.from_numpy(database)
        if self.args.use_gpu:
            database = database.cuda()
        database = torch.autograd.Variable(database, False)

        N_theta = self.test_theta.shape[1]
        log('[Attack] using %d N_theta' % N_theta)
        decoder = models.AlternativeOneHotDecoder(database, self.N_font, self.N_class, N_theta)
        decoder.eval()

        image_channels = 1 if N_theta <= 7 else 3
        network_units = list(map(int, self.args.network_units.split(',')))
        log('[Attack] using %d input channels' % image_channels)
        classifier = models.Classifier(self.N_class, resolution=(image_channels, resolution, resolution),
                                       architecture=self.args.network_architecture,
                                       activation=self.args.network_activation,
                                       batch_normalization=not self.args.network_no_batch_normalization,
                                       start_channels=self.args.network_channels,
                                       dropout=self.args.network_dropout,
                                       units=network_units)

        assert os.path.exists(self.args.classifier_file), 'state file %s not found' % self.args.classifier_file
        state = State.load(self.args.classifier_file)
        log('[Attack] read %s' % self.args.classifier_file)

        classifier.load_state_dict(state.model)
        if self.args.use_gpu and not cuda.is_cuda(classifier):
            log('[Attack] classifier is not CUDA')
            classifier = classifier.cuda()
        log('[Attack] loaded classifier')

        # !
        classifier.eval()
        log('[Attack] set classifier to eval')

        self.model = models.DecoderClassifier(decoder, classifier)
Esempio n. 9
0
    def __init__(self, decoder, classifier):
        """
        Constructor.

        :param decoder: decoder
        :type decoder: torch.nn.Module
        :param classifier: classifier
        :type classifier: torch.nn.Module
        """

        assert isinstance(decoder, Decoder) or isinstance(
            decoder, LearnedDecoder) or isinstance(
                decoder, SelectiveDecoder) or isinstance(decoder, STNDecoder)
        assert isinstance(classifier, Classifier)

        super(DecoderClassifier, self).__init__()
        assert cuda.is_cuda(decoder) == cuda.is_cuda(
            classifier), 'decoder and classifier have to be both cuda or not'

        self.decoder = decoder
        """ (torch.nn.Module) Decoder. """

        self.classifier = classifier
        """ (torch.nn.Module) Classifier. """
    def set_bound(self, min, max):
        """
        Set minimum and maximum bound; if None is specified, bounds are not enforced.

        :param min: minimum bound
        :type min: float, torch.Tensor or None
        :param max: maximum bound
        :return: float, torch.Tensor or None
        """

        assert isinstance(min, torch.Tensor) or isinstance(
            min, float) or min is None, 'min needs to be float or torch.Tensor'
        assert isinstance(max, torch.Tensor) or isinstance(
            max, float) or max is None, 'max needs to be float or torch.Tensor'

        if min is None:
            self.min_bound = None
        else:
            if isinstance(min, torch.Tensor):
                self.min_bound = min
            elif isinstance(min, float):
                self.min_bound = torch.ones_like(self.images) * min

            if cuda.is_cuda(self.model):
                self.min_bound = self.min_bound.cuda()

        if max is None:
            self.max_bound = None
        else:
            if isinstance(max, torch.Tensor):
                self.max_bound = max
            elif isinstance(max, float):
                self.max_bound = torch.ones_like(self.images) * max

            if cuda.is_cuda(self.model):
                self.max_bound = self.max_bound.cuda()
    def initialize_zero(self):
        """
        Initialize the attack.
        """

        assert self.min_bound is not None, 'reparameterization only works with valid upper and lower bounds'
        assert self.max_bound is not None, 'reparameterization only works with valid upper and lower bounds'

        self.w = numpy.arctanh(
            (2 - self.EPS) *
            (self.images.data.cpu().numpy() - self.min_bound.cpu().numpy()) /
            (self.max_bound.cpu().numpy() - self.min_bound.cpu().numpy()) - 1 +
            self.EPS)
        self.w = torch.from_numpy(self.w)
        self.w = torch.autograd.Variable(self.w, requires_grad=True)

        if cuda.is_cuda(self.model):
            self.w = torch.autograd.Variable(self.w.cuda(), requires_grad=True)
        else:
            self.w = torch.autograd.Variable(self.w, requires_grad=True)
    def project_auto_encoder(self, perturbations):
        """
        Project the current image + perturbation onto the manifold and deduce the new perurbation from it.

        :param perturbations: current perturbations
        :type perturbations: torch.Tensor
        :return: projected perturbations
        :rtype: torch.Tensor
        """

        assert self.auto_encoder is not None, 'called project_auto_encoder without setting the auto encoder first'
        assert isinstance(
            perturbations,
            torch.Tensor), 'given perturbation needs to be torch.Tensor'

        images = torch.autograd.Variable(self.images + perturbations, False)
        if cuda.is_cuda(self.auto_encoder):
            images = images.cuda()

        reconstruction, _, _ = self.auto_encoder.forward(images)
        return reconstruction.data - self.images.data  # Retrieve the perturbation from the projected image!
    def main(self):
        """
        Main which should be overwritten.
        """

        test_images = utils.read_hdf5(self.args.test_images_file)
        log('[Sampling] read %s' % self.args.test_images_file)

        if len(test_images.shape) < 4:
            test_images = numpy.expand_dims(test_images, axis=3)

        network_units = list(map(int, self.args.network_units.split(',')))
        self.decoder = models.LearnedDecoder(
            self.args.latent_space_size,
            resolution=(test_images.shape[3], test_images.shape[1],
                        test_images.shape[2]),
            architecture=self.args.network_architecture,
            start_channels=self.args.network_channels,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            units=network_units)
        log(self.decoder)

        assert os.path.exists(self.args.decoder_file)
        state = State.load(self.args.decoder_file)
        log('[Sampling] loaded %s' % self.args.decoder_file)

        self.decoder.load_state_dict(state.model)
        log('[Sampling] loaded decoder')

        if self.args.use_gpu and not cuda.is_cuda(self.decoder):
            self.decoder = self.decoder.cuda()

        log('[Sampling] model needs %gMiB' %
            ((cuda.estimate_size(self.decoder)) / (1024 * 1024)))
        self.sample()
Esempio n. 14
0
    def run(self, untargeted_objective, verbose=True):
        """
        Run the attack.

        :param untargeted_objective: untargeted objective
        :type untargeted_objective: UntargetedObjective
        :param verbose: output progress
        :type verbose: bool
        :return: success, perturbations, probabilities, norms, iteration
        :rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray, int
        """

        assert isinstance(
            untargeted_objective, UntargetedObjective
        ), 'expected an objective of type UntargetedObjective, got %s instead' % untargeted_objective

        # self.perturbations IS TRANSFERRED TO CUDA IN INITIALIZATION AS self.perturbations.grad
        # WILL NOT WORK ON NON-LEAF VARIABLES!

        self.gradients = torch.zeros_like(self.perturbations)
        #self.gradients = torch.autograd.Variable(self.gradients, requires_grad=False)
        if cuda.is_cuda(self.model):
            self.gradients = self.gradients.cuda()

        # Will hold the individually best results (however, optimization
        # is run for all samples until all are successful or the maximum number
        # of iterations is reached.
        success = numpy.ones(
            (self.perturbations.size()[0]), dtype=numpy.int32) * -1
        success_error = numpy.zeros((self.perturbations.size()[0]),
                                    dtype=numpy.float32)
        success_perturbations = numpy.zeros(self.perturbations.size(),
                                            dtype=numpy.float32)
        success_probabilities = numpy.zeros(
            (self.perturbations.size()[0], self.logits.size()[1]),
            dtype=numpy.float32)
        success_norms = numpy.zeros((self.perturbations.size()[0]),
                                    dtype=numpy.float32)

        i = 0
        gradient = 0

        for i in range(self.max_iterations + 1):
            # MAIN LOOP OF ATTACK
            # ORDER IMPORTANT
            self.gradients.zero_()

            # 0/
            # Project current perturbation.
            self.project()

            # 1/
            # Reset gradients and compute the logits for the current perturbations.
            # This is not applicable to batches, so check that the logits have been computed for one sample
            # only.
            self.gradients.zero_()
            output_logits = self.model.forward(self.images +
                                               self.perturbations)

            # 2/
            # Compute current probabilities and current class.
            # This is mainly used as return value and to check the break criterion, i.e.
            # if the class actually changes.
            output_probabilities = torch.nn.functional.softmax(
                output_logits, 1)
            _, other_classes = torch.max(output_probabilities, 1)

            # 3/
            # Compute the objective to take gradients from.
            error = untargeted_objective.f(output_logits, self.logits,
                                           self.classes)

            # 4/
            # Logging and break condition.
            check_norm = self.norm()
            batch_size = self.images.size()[0]

            for b in range(self.perturbations.size()[0]):
                # We explicitly do not check the norm here.
                # This allows to evaluate both success and average distance separately.
                if self.training_mode:
                    if error[b].item() < success_error[b] or success[b] < 0:
                        if other_classes.data[b] != self.classes.data[
                                b] and success[b] < 0:
                            success[b] = i
                        success_error[b] = error[b].data.cpu()
                        success_perturbations[b] = numpy.copy(
                            self.perturbations[b].data.cpu().numpy())
                        success_probabilities[b] = output_probabilities[
                            b].data.cpu()
                        success_norms[b] = check_norm[b].data.cpu()
                else:
                    if other_classes.data[b] != self.classes.data[b] and success[
                            b] < 0:  # and check_norm.data[b] <= self.epsilon
                        success[b] = i
                        success_error[b] = error[b].data.cpu()
                        success_perturbations[b] = numpy.copy(
                            self.perturbations[b].data.cpu().numpy())
                        success_probabilities[b] = output_probabilities[
                            b].data.cpu()
                        success_norms[b] = check_norm[b].data.cpu()

            self.history.append({
                'iteration':
                i,
                'class':
                other_classes.cpu().numpy(),
                'error':
                error.detach().cpu().numpy(),
                'probabilities':
                output_probabilities.detach().cpu().numpy(),
                'norms':
                check_norm.detach().cpu().numpy()
            })

            if verbose and i % self.skip == 0:
                log('[%s] %d: objective=%g success=%g gradient=%g' %
                    (self.__class__.__name__, i, torch.sum(error.data) /
                     batch_size, numpy.sum(success >= 0), gradient))

            # 5/
            # Break condition.
            if numpy.all(success >= 0) and not self.training_mode:
                if verbose:
                    log('[%s] %d: objective=%g success=%g gradient=%g' %
                        (self.__class__.__name__, i, torch.sum(error.data) /
                         batch_size, numpy.sum(success >= 0), gradient))
                break

            # Quick hack for handling the last iteration correctly.
            if i == self.max_iterations:
                if verbose:
                    log('[%s] %d: objective=%g success=%g gradient=%g' %
                        (self.__class__.__name__, i, torch.sum(error.data) /
                         batch_size, numpy.sum(success >= 0), gradient))
                break

            # 6/
            # Put together the error for differentiation and do backward pass.
            error = torch.sum(error, 0)
            error.backward()

            # 7/
            # Get the gradients and normalize.
            self.gradients = self.perturbations.grad.clone()
            self.normalize()

            # 8/
            # Update step according to learning rate.
            self.perturbations.data -= self.base_lr * self.gradients

            if verbose:
                gradient = torch.mean(torch.abs(self.perturbations.grad))

        for b in range(self.perturbations.size()[0]):
            # In any case, we return the current perturbations for non-successful attacks.
            if success[b] < 0:
                success_perturbations[b] = numpy.copy(
                    self.perturbations[b].data.cpu().numpy())
                success_probabilities[b] = output_probabilities[b].data.cpu()
                success_norms[b] = check_norm[b].data.cpu()

        return success, success_perturbations, success_probabilities, success_norms, i
    def load_data_and_model(self):
        """
        Load data and model.
        """

        database = utils.read_hdf5(self.args.database_file).astype(
            numpy.float32)
        log('[Visualization] read %s' % self.args.database_file)

        N_font = database.shape[0]
        N_class = database.shape[1]
        resolution = database.shape[2]

        database = database.reshape((database.shape[0] * database.shape[1],
                                     database.shape[2], database.shape[3]))
        database = torch.from_numpy(database)
        if self.args.use_gpu:
            database = database.cuda()
        database = torch.autograd.Variable(database, False)

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)

        self.perturbations = utils.read_hdf5(
            self.args.perturbations_file).astype(numpy.float32)
        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        log('[Visualization] read %s' % self.args.perturbations_file)

        self.success = utils.read_hdf5(self.args.success_file)
        self.success = numpy.swapaxes(self.success, 0, 1)
        log('[Visualization] read %s' % self.args.success_file)

        self.accuracy = utils.read_hdf5(self.args.accuracy_file)
        log('[Visualization] read %s' % self.args.success_file)

        self.test_theta = utils.read_hdf5(self.args.test_theta_file).astype(
            numpy.float32)
        self.test_theta = self.test_theta[:self.perturbations.shape[0]]
        N_theta = self.test_theta.shape[1]
        log('[Visualization] using %d N_theta' % N_theta)
        log('[Visualization] read %s' % self.args.test_theta_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.int)
        self.test_codes = self.test_codes[:self.perturbations.shape[0]]
        self.test_codes = self.test_codes[:, 1:3]
        self.test_codes = numpy.concatenate(
            (common.numpy.one_hot(self.test_codes[:, 0], N_font),
             common.numpy.one_hot(self.test_codes[:, 1], N_class)),
            axis=1).astype(numpy.float32)
        log('[Attack] read %s' % self.args.test_codes_file)

        image_channels = 1 if N_theta <= 7 else 3
        network_units = list(map(int, self.args.network_units.split(',')))
        log('[Visualization] using %d input channels' % image_channels)
        self.classifier = models.Classifier(
            N_class,
            resolution=(image_channels, resolution, resolution),
            architecture=self.args.network_architecture,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            start_channels=self.args.network_channels,
            dropout=self.args.network_dropout,
            units=network_units)
        self.decoder = models.AlternativeOneHotDecoder(database, N_font,
                                                       N_class, N_theta)
        self.decoder.eval()

        assert os.path.exists(
            self.args.classifier_file
        ), 'state file %s not found' % self.args.classifier_file
        state = State.load(self.args.classifier_file)
        log('[Visualization] read %s' % self.args.classifier_file)

        self.classifier.load_state_dict(state.model)
        if self.args.use_gpu and not cuda.is_cuda(self.classifier):
            log('[Visualization] classifier is not CUDA')
            self.classifier = self.classifier.cuda()
        log('[Visualization] loaded classifier')

        self.classifier.eval()
        log('[Visualization] set classifier to eval')
Esempio n. 16
0
    def load_decoder(self):
        """
        Load the decoder.
        """

        assert self.args.decoder_files
        decoder_files = self.args.decoder_files.split(',')
        for decoder_file in decoder_files:
            assert os.path.exists(
                decoder_file), 'could not find %s' % decoder_file

        log('[Training] using %d input channels' % self.train_images.shape[3])
        decoder_units = list(map(int, self.args.decoder_units.split(',')))

        if len(decoder_files) > 1:
            log('[Training] loading multiple decoders')
            decoders = []
            for i in range(len(decoder_files)):
                decoder = models.LearnedDecoder(
                    self.args.latent_space_size,
                    resolution=(self.train_images.shape[3],
                                self.train_images.shape[1],
                                self.train_images.shape[2]),
                    architecture=self.args.decoder_architecture,
                    start_channels=self.args.decoder_channels,
                    activation=self.args.decoder_activation,
                    batch_normalization=not self.args.
                    decoder_no_batch_normalization,
                    units=decoder_units)

                state = State.load(decoder_files[i])
                decoder.load_state_dict(state.model)
                if self.args.use_gpu and not cuda.is_cuda(decoder):
                    decoder = decoder.cuda()
                decoders.append(decoder)

                decoder.eval()
                log('[Training] loaded %s' % decoder_files[i])
            self.decoder = models.SelectiveDecoder(
                decoders,
                resolution=(self.train_images.shape[3],
                            self.train_images.shape[1],
                            self.train_images.shape[2]))
        else:
            log('[Training] loading one decoder')
            decoder = models.LearnedDecoder(
                self.args.latent_space_size,
                resolution=(self.train_images.shape[3],
                            self.train_images.shape[1],
                            self.train_images.shape[2]),
                architecture=self.args.decoder_architecture,
                start_channels=self.args.decoder_channels,
                activation=self.args.decoder_activation,
                batch_normalization=not self.args.
                decoder_no_batch_normalization,
                units=decoder_units)

            state = State.load(decoder_files[0])
            decoder.load_state_dict(state.model)
            if self.args.use_gpu and not cuda.is_cuda(decoder):
                decoder = decoder.cuda()
            decoder.eval()
            log('[Training] read decoder')

            self.decoder = decoder

        self.decoder_classifier = models.DecoderClassifier(
            self.decoder, self.model)
Esempio n. 17
0
    def load_data(self):
        """
        Load data and model.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(numpy.float32)
        log('[Testing] read %s' % self.args.test_images_file)

        # For handling both color and gray images.
        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
            log('[Testing] no color images, adjusted size')
        self.resolution = self.test_images.shape[2]
        log('[Testing] resolution %d' % self.resolution)

        self.train_images = utils.read_hdf5(self.args.train_images_file).astype(numpy.float32)
        # !
        self.train_images = self.train_images.reshape((self.train_images.shape[0], -1))
        log('[Testing] read %s' % self.args.train_images_file)

        self.test_theta = utils.read_hdf5(self.args.test_theta_file).astype(numpy.float32)
        log('[Testing] read %s' % self.args.test_theta_file)

        self.train_theta = utils.read_hdf5(self.args.train_theta_file).astype(numpy.float32)
        log('[Testing] read %s' % self.args.train_theta_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(numpy.int)
        self.test_codes = self.test_codes[:, self.args.label_index]
        self.N_class = numpy.max(self.test_codes) + 1
        log('[Testing] read %s' % self.args.test_codes_file)

        self.accuracy = utils.read_hdf5(self.args.accuracy_file)
        log('[Testing] read %s' % self.args.accuracy_file)

        self.perturbations = utils.read_hdf5(self.args.perturbations_file).astype(numpy.float32)
        self.N_attempts = self.perturbations.shape[0]
        assert not numpy.any(self.perturbations != self.perturbations), 'NaN in perturbations'

        # First, repeat relevant data.
        self.perturbation_theta = numpy.repeat(self.test_theta[:self.perturbations.shape[1]], self.N_attempts, axis=0)
        self.perturbation_codes = numpy.repeat(self.test_codes[:self.perturbations.shape[1]], self.N_attempts, axis=0)
        self.perturbation_codes = numpy.squeeze(self.perturbation_codes)
        self.accuracy = numpy.repeat(self.accuracy[:self.perturbations.shape[1]], self.N_attempts, axis=0)

        # Then, reshape the perturbations!
        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        self.perturbations = self.perturbations.reshape((self.perturbations.shape[0] * self.perturbations.shape[1], -1))
        log('[Testing] read %s' % self.args.perturbations_file)

        self.success = utils.read_hdf5(self.args.success_file)
        self.success = numpy.swapaxes(self.success, 0, 1)
        self.success = self.success.reshape((self.success.shape[0] * self.success.shape[1]))
        log('[Testing] read %s' % self.args.success_file)

        assert self.args.decoder_files
        decoder_files = self.args.decoder_files.split(',')
        for decoder_file in decoder_files:
            assert os.path.exists(decoder_file), 'could not find %s' % decoder_file

        log('[Testing] using %d input channels' % self.test_images.shape[3])
        decoder_units = list(map(int, self.args.decoder_units.split(',')))

        if len(decoder_files) > 1:
            log('[Testing] loading multiple decoders')
            decoders = []
            for i in range(len(decoder_files)):
                decoder = models.LearnedDecoder(self.args.latent_space_size, resolution=(self.test_images.shape[3], self.test_images.shape[1], self.test_images.shape[2]),
                                                architecture=self.args.decoder_architecture,
                                                start_channels=self.args.decoder_channels,
                                                activation=self.args.decoder_activation,
                                                batch_normalization=not self.args.decoder_no_batch_normalization,
                                                units=decoder_units)

                state = State.load(decoder_files[i])
                decoder.load_state_dict(state.model)
                if self.args.use_gpu and not cuda.is_cuda(decoder):
                    decoder = decoder.cuda()
                decoders.append(decoder)

                decoder.eval()
                log('[Testing] loaded %s' % decoder_files[i])
            self.model = models.SelectiveDecoder(decoders, resolution=(self.test_images.shape[3], self.test_images.shape[1], self.test_images.shape[2]))
        else:
            log('[Testing] loading one decoder')
            decoder = models.LearnedDecoder(self.args.latent_space_size, resolution=(self.test_images.shape[3], self.test_images.shape[1], self.test_images.shape[2]),
                                            architecture=self.args.decoder_architecture,
                                            start_channels=self.args.decoder_channels,
                                            activation=self.args.decoder_activation,
                                            batch_normalization=not self.args.decoder_no_batch_normalization,
                                            units=decoder_units)

            state = State.load(decoder_files[0])
            decoder.load_state_dict(state.model)
            if self.args.use_gpu and not cuda.is_cuda(decoder):
                decoder = decoder.cuda()
            decoder.eval()
            log('[Testing] read decoder')

            self.model = decoder
    def load_model_and_scheduler(self):
        """
        Load model.
        """

        params = {
            'lr': self.args.lr,
            'lr_decay': self.args.lr_decay,
            'lr_min': 0.0000001,
            'weight_decay': self.args.weight_decay,
        }

        log('[Training] using %d input channels' % self.train_images.shape[3])
        network_units = list(map(int, self.args.network_units.split(',')))
        self.model = models.Classifier(
            self.N_class,
            resolution=(self.train_images.shape[3], self.train_images.shape[1],
                        self.train_images.shape[2]),
            architecture=self.args.network_architecture,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            start_channels=self.args.network_channels,
            dropout=self.args.network_dropout,
            units=network_units)

        self.epoch = 0
        if os.path.exists(self.args.state_file):
            state = State.load(self.args.state_file)
            log('[Training] loaded %s' % self.args.state_file)

            self.model.load_state_dict(state.model)

            # needs to be done before costructing optimizer.
            if self.args.use_gpu and not cuda.is_cuda(self.model):
                self.model = self.model.cuda()
                log('[Training] model is not CUDA')
            log('[Training] loaded model')

            optimizer = torch.optim.Adam(self.model.parameters(), params['lr'])
            optimizer.load_state_dict(state.optimizer)
            self.scheduler = ADAMScheduler(optimizer, **params)

            self.epoch = state.epoch + 1
            self.scheduler.update(self.epoch)

            assert os.path.exists(self.args.training_file) and os.path.exists(
                self.args.testing_file)
            self.train_statistics = utils.read_hdf5(self.args.training_file)
            log('[Training] read %s' % self.args.training_file)
            self.test_statistics = utils.read_hdf5(self.args.testing_file)
            log('[Training] read %s' % self.args.testing_file)

            if utils.display():
                self.plot()
        else:
            if self.args.use_gpu and not cuda.is_cuda(self.model):
                self.model = self.model.cuda()
                log('[Training] model is not CUDA')
            log('[Training] did not load model, using new one')

            self.scheduler = ADAMScheduler(self.model.parameters(), **params)
            self.scheduler.initialize()  # !

        log(self.model)
    def loop(self):
        """
        Main loop for training and testing, saving ...
        """

        auto_encoder_params = {
            'lr': self.args.base_lr,
            'lr_decay': self.args.base_lr_decay,
            'lr_min': 0.000000001,
            'weight_decay': self.args.weight_decay
        }

        classifier_params = {
            'lr': self.args.base_lr,
            'lr_decay': self.args.base_lr_decay,
            'lr_min': 0.000000001,
            'weight_decay': self.args.weight_decay
        }

        e = 0
        if os.path.exists(self.args.encoder_file) and os.path.exists(
                self.args.decoder_file) and os.path.exists(
                    self.args.classifier_file):
            state = State.load(self.args.encoder_file)
            log('[Training] loaded %s' % self.args.encoder_file)
            self.encoder.load_state_dict(state.model)
            log('[Training] loaded encoder')

            if self.args.use_gpu and not cuda.is_cuda(self.encoder):
                self.encoder = self.encoder.cuda()

            optimizer = torch.optim.Adam(list(self.encoder.parameters()),
                                         auto_encoder_params['lr'])
            optimizer.load_state_dict(state.optimizer)
            self.encoder_scheduler = ADAMScheduler(optimizer,
                                                   **auto_encoder_params)

            state = State.load(self.args.decoder_file)
            log('[Training] loaded %s' % self.args.decoder_file)
            self.decoder.load_state_dict(state.model)
            log('[Training] loaded decoder')

            if self.args.use_gpu and not cuda.is_cuda(self.decoder):
                self.decoder = self.decoder.cuda()

            optimizer = torch.optim.Adam(list(self.decoder.parameters()),
                                         auto_encoder_params['lr'])
            optimizer.load_state_dict(state.optimizer)
            self.decoder_scheduler = ADAMScheduler(optimizer,
                                                   **auto_encoder_params)

            state = State.load(self.args.classifier_file)
            log('[Training] loaded %s' % self.args.classifier_file)
            self.classifier.load_state_dict(state.model)
            log('[Training] loaded decoder')

            if self.args.use_gpu and not cuda.is_cuda(self.classifier):
                self.classifier = self.classifier.cuda()

            optimizer = torch.optim.Adam(list(self.classifier.parameters()),
                                         classifier_params['lr'])
            optimizer.load_state_dict(state.optimizer)
            self.classifier_scheduler = ADAMScheduler(optimizer,
                                                      **classifier_params)

            e = state.epoch + 1
            self.encoder_scheduler.update(e)
            self.decoder_scheduler.udpate(e)
            self.classifier_scheduler.update(e)
        else:
            if self.args.use_gpu and not cuda.is_cuda(self.encoder):
                self.encoder = self.encoder.cuda()
            if self.args.use_gpu and not cuda.is_cuda(self.decoder):
                self.decoder = self.decoder.cuda()
            if self.args.use_gpu and not cuda.is_cuda(self.classifier):
                self.classifier = self.classifier.cuda()

            self.encoder_scheduler = ADAMScheduler(
                list(self.encoder.parameters()), **auto_encoder_params)
            self.encoder_scheduler.initialize()  # !

            self.decoder_scheduler = ADAMScheduler(
                list(self.decoder.parameters()), **auto_encoder_params)
            self.decoder_scheduler.initialize()  # !

            self.classifier_scheduler = ADAMScheduler(
                list(self.classifier.parameters()), **classifier_params)
            self.classifier_scheduler.initialize()  # !

        log('[Training] model needs %gMiB' %
            (cuda.estimate_size(self.encoder) / (1024 * 1024)))

        while e < self.args.epochs:
            log('[Training] %s' % self.encoder_scheduler.report())
            log('[Training] %s' % self.decoder_scheduler.report())
            log('[Training] %s' % self.classifier_scheduler.report())

            testing = elapsed(functools.partial(self.test, e))
            training = elapsed(functools.partial(self.train, e))
            log('[Training] %gs training, %gs testing' % (training, testing))

            #utils.remove(self.args.encoder_file + '.%d' % (e - 1))
            #utils.remove(self.args.decoder_file + '.%d' % (e - 1))
            #utils.remove(self.args.classifier_file + '.%d' % (e - 1))
            State.checkpoint(self.encoder, self.encoder_scheduler.optimizer, e,
                             self.args.encoder_file + '.%d' % e)
            State.checkpoint(self.decoder, self.decoder_scheduler.optimizer, e,
                             self.args.decoder_file + '.%d' % e)
            State.checkpoint(self.classifier,
                             self.classifier_scheduler.optimizer, e,
                             self.args.classifier_file + '.%d' % e)

            log('[Training] %d: checkpoint' % e)
            torch.cuda.empty_cache()  # necessary?

            # Save statistics and plots.
            if self.args.training_file:
                utils.write_hdf5(self.args.training_file,
                                 self.train_statistics)
                log('[Training] %d: wrote %s' % (e, self.args.training_file))
            if self.args.testing_file:
                utils.write_hdf5(self.args.testing_file, self.test_statistics)
                log('[Training] %d: wrote %s' % (e, self.args.testing_file))

            #if utils.display():
            #    self.plot()

            e += 1  # !

        testing = elapsed(functools.partial(self.test, e))
        log('[Training] %gs testing' % (testing))

        #utils.remove(self.args.encoder_file + '.%d' % (e - 1))
        #utils.remove(self.args.decoder_file + '.%d' % (e - 1))
        #utils.remove(self.args.classifier_file + '.%d' % (e - 1))
        State.checkpoint(self.encoder, self.encoder_scheduler.optimizer, e,
                         self.args.encoder_file)
        State.checkpoint(self.decoder, self.decoder_scheduler.optimizer, e,
                         self.args.decoder_file)
        State.checkpoint(self.classifier, self.classifier_scheduler.optimizer,
                         e, self.args.classifier_file)

        self.results = {
            'training_statistics': self.train_statistics,
            'testing_statistics': self.test_statistics,
        }
        if self.args.results_file:
            utils.write_pickle(self.args.results_file, self.results)
            log('[Training] wrote %s' % self.args.results_file)
Esempio n. 20
0
    def load_model(self):
        """
        Load model.
        """

        assert self.args.decoder_files
        decoder_files = self.args.decoder_files.split(',')
        for decoder_file in decoder_files:
            assert os.path.exists(
                decoder_file), 'could not find %s' % decoder_file

        decoder_units = list(map(int, self.args.decoder_units.split(',')))
        log('[Attack] using %d input channels' % self.test_images.shape[3])

        if len(decoder_files) > 1:
            log('[Attack] loading multiple decoders')
            decoders = []
            for i in range(len(decoder_files)):
                decoder = models.LearnedDecoder(
                    self.args.latent_space_size,
                    resolution=(self.test_images.shape[3],
                                self.test_images.shape[1],
                                self.test_images.shape[2]),
                    architecture=self.args.decoder_architecture,
                    start_channels=self.args.decoder_channels,
                    activation=self.args.decoder_activation,
                    batch_normalization=not self.args.
                    decoder_no_batch_normalization,
                    units=decoder_units)
                log(decoder)
                state = State.load(decoder_files[i])
                decoder.load_state_dict(state.model)
                if self.args.use_gpu and not cuda.is_cuda(decoder):
                    decoder = decoder.cuda()
                decoders.append(decoder)

                decoder.eval()
                log('[Attack] loaded %s' % decoder_files[i])
            decoder = models.SelectiveDecoder(
                decoders,
                resolution=(self.test_images.shape[3],
                            self.test_images.shape[1],
                            self.test_images.shape[2]))
        else:
            log('[Attack] loading one decoder')
            decoder = models.LearnedDecoder(
                self.args.latent_space_size,
                resolution=(self.test_images.shape[3],
                            self.test_images.shape[1],
                            self.test_images.shape[2]),
                architecture=self.args.decoder_architecture,
                start_channels=self.args.decoder_channels,
                activation=self.args.decoder_activation,
                batch_normalization=not self.args.
                decoder_no_batch_normalization,
                units=decoder_units)

            state = State.load(decoder_files[0])
            decoder.load_state_dict(state.model)
            if self.args.use_gpu and not cuda.is_cuda(decoder):
                decoder = decoder.cuda()
            decoder.eval()
            log('[Attack] read decoder')

        classifier_units = list(map(int, self.args.network_units.split(',')))
        classifier = models.Classifier(
            self.N_class,
            resolution=(self.test_images.shape[3], self.test_images.shape[1],
                        self.test_images.shape[2]),
            architecture=self.args.network_architecture,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            start_channels=self.args.network_channels,
            dropout=self.args.network_dropout,
            units=classifier_units)

        assert os.path.exists(
            self.args.classifier_file
        ), 'state file %s not found' % self.args.classifier_file
        state = State.load(self.args.classifier_file)
        log('[Attack] read %s' % self.args.classifier_file)

        classifier.load_state_dict(state.model)
        if self.args.use_gpu and not cuda.is_cuda(classifier):
            log('[Attack] classifier is not CUDA')
            classifier = classifier.cuda()
        log('[Attack] loaded classifier')

        # !
        classifier.eval()
        log('[Attack] set classifier to eval')

        self.model = models.DecoderClassifier(decoder, classifier)
    def load_data(self):
        """
        Load data and model.
        """

        with logw('[Detection] read %s' % self.args.train_images_file):
            self.nearest_neighbor_images = utils.read_hdf5(self.args.train_images_file)
            assert len(self.nearest_neighbor_images.shape) == 3

        with logw('[Detection] read %s' % self.args.test_images_file):
            self.test_images = utils.read_hdf5(self.args.test_images_file)
            if len(self.test_images.shape) < 4:
                self.test_images = numpy.expand_dims(self.test_images, axis=3)

        with logw('[Detection] read %s' % self.args.perturbations_file):
            self.perturbations = utils.read_hdf5(self.args.perturbations_file)
            assert len(self.perturbations.shape) == 4

        with logw('[Detection] read %s' % self.args.success_file):
            self.success = utils.read_hdf5(self.args.success_file)

        with logw('[Detection] read %s' % self.args.accuracy_file):
            self.accuracy = utils.read_hdf5(self.args.accuracy_file)

        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        num_attempts = self.perturbations.shape[1]
        self.test_images = self.test_images[:self.perturbations.shape[0]]
        self.train_images = self.nearest_neighbor_images[:self.perturbations.shape[0]]
        self.accuracy = self.accuracy[:self.perturbations.shape[0]]

        self.perturbations = self.perturbations.reshape((self.perturbations.shape[0]*self.perturbations.shape[1], self.perturbations.shape[2], self.perturbations.shape[3]))
        self.success = numpy.swapaxes(self.success, 0, 1)
        self.success = self.success.reshape((self.success.shape[0]*self.success.shape[1]))

        self.accuracy = numpy.repeat(self.accuracy, num_attempts, axis=0)
        self.test_images = numpy.repeat(self.test_images, num_attempts, axis=0)
        self.train_images = numpy.repeat(self.train_images, num_attempts, axis=0)

        max_samples = self.args.max_samples
        self.success = self.success[:max_samples]
        self.accuracy = self.accuracy[:max_samples]
        self.perturbations = self.perturbations[:max_samples]
        self.test_images = self.test_images[:max_samples]
        self.train_images = self.train_images[:max_samples]

        if self.args.mode == 'true':
            assert self.args.database_file
            assert self.args.test_codes_file
            assert self.args.test_theta_file

            self.test_codes = utils.read_hdf5(self.args.test_codes_file)
            log('[Detection] read %s' % self.args.test_codes_file)

            self.test_theta = utils.read_hdf5(self.args.test_theta_file)
            log('[Detection] read %s' % self.args.test_theta_file)

            self.test_codes = self.test_codes[:self.perturbations.shape[0]]
            self.test_theta = self.test_theta[:self.perturbations.shape[0]]

            self.test_codes = numpy.repeat(self.test_codes, num_attempts, axis=0)
            self.test_theta = numpy.repeat(self.test_theta, num_attempts, axis=0)

            self.test_codes = self.test_codes[:max_samples]
            self.test_theta = self.test_theta[:max_samples]

            database = utils.read_hdf5(self.args.database_file)
            log('[Detection] read %s' % self.args.database_file)

            self.N_font = database.shape[0]
            self.N_class = database.shape[1]
            self.N_theta = self.test_theta.shape[1]

            database = database.reshape((database.shape[0]*database.shape[1], database.shape[2], database.shape[3]))
            database = torch.from_numpy(database)
            if self.args.use_gpu:
                database = database.cuda()
            database = torch.autograd.Variable(database, False)

            self.model = models.AlternativeOneHotDecoder(database, self.N_font, self.N_class, self.N_theta)
            self.model.eval()
            log('[Detection] initialized decoder')
        elif self.args.mode == 'appr':
            assert self.args.decoder_files
            assert self.args.test_codes_file
            assert self.args.test_theta_file

            self.test_codes = utils.read_hdf5(self.args.test_codes_file)
            log('[Detection] read %s' % self.args.test_codes_file)

            self.test_theta = utils.read_hdf5(self.args.test_theta_file)
            log('[Detection] read %s' % self.args.test_theta_file)

            self.test_codes = self.test_codes[:self.perturbations.shape[0]]
            self.test_theta = self.test_theta[:self.perturbations.shape[0]]

            self.test_codes = numpy.repeat(self.test_codes, num_attempts, axis=0)
            self.test_theta = numpy.repeat(self.test_theta, num_attempts, axis=0)

            self.test_codes = self.test_codes[:max_samples]
            self.test_theta = self.test_theta[:max_samples]

            assert self.args.decoder_files
            decoder_files = self.args.decoder_files.split(',')
            for decoder_file in decoder_files:
                assert os.path.exists(decoder_file), 'could not find %s' % decoder_file

            resolution = [1 if len(self.test_images.shape) <= 3 else self.test_images.shape[3], self.test_images.shape[1], self.test_images.shape[2]]
            decoder_units = list(map(int, self.args.decoder_units.split(',')))

            if len(decoder_files) > 1:
                log('[Detection] loading multiple decoders')
                decoders = []
                for i in range(len(decoder_files)):
                    decoder = models.LearnedDecoder(self.args.latent_space_size,
                                                    resolution=resolution,
                                                    architecture=self.args.decoder_architecture,
                                                    start_channels=self.args.decoder_channels,
                                                    activation=self.args.decoder_activation,
                                                    batch_normalization=not self.args.decoder_no_batch_normalization,
                                                    units=decoder_units)

                    state = State.load(decoder_files[i])
                    decoder.load_state_dict(state.model)
                    if self.args.use_gpu and not cuda.is_cuda(decoder):
                        decoder = decoder.cuda()
                    decoders.append(decoder)

                    decoder.eval()
                    log('[Detection] loaded %s' % decoder_files[i])
                self.model = models.SelectiveDecoder(decoders, resolution=resolution)
            else:
                log('[Detection] loading one decoder')
                decoder = models.LearnedDecoder(self.args.latent_space_size,
                                                resolution=resolution,
                                                architecture=self.args.decoder_architecture,
                                                start_channels=self.args.decoder_channels,
                                                activation=self.args.decoder_activation,
                                                batch_normalization=not self.args.decoder_no_batch_normalization,
                                                units=decoder_units)

                state = State.load(decoder_files[0])
                decoder.load_state_dict(state.model)
                if self.args.use_gpu and not cuda.is_cuda(decoder):
                    decoder = decoder.cuda()
                decoder.eval()
                log('[Detection] read decoder')

                self.model = decoder
Esempio n. 22
0
    def main(self):
        """
        Main which should be overwritten.
        """

        self.train_images = utils.read_hdf5(
            self.args.train_images_file).astype(numpy.float32)
        log('[Testing] read %s' % self.args.train_images_file)

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        log('[Testing] read %s' % self.args.test_images_file)

        # For handling both color and gray images.
        if len(self.train_images.shape) < 4:
            self.train_images = numpy.expand_dims(self.train_images, axis=3)
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
            log('[Testing] no color images, adjusted size')
        self.resolution = self.train_images.shape[2]
        log('[Testing] resolution %d' % self.resolution)

        self.train_codes = utils.read_hdf5(self.args.train_codes_file).astype(
            numpy.float32)
        log('[Testing] read %s' % self.args.train_codes_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.float32)
        log('[Testing] read %s' % self.args.test_codes_file)

        self.train_codes = self.train_codes[:, self.args.label_index]
        self.test_codes = self.test_codes[:, self.args.label_index]

        if self.args.label >= 0:
            self.train_images = self.train_images[self.train_codes ==
                                                  self.args.label]
            self.test_images = self.test_images[self.test_codes ==
                                                self.args.label]

        log('[Testing] using %d input channels' % self.test_images.shape[3])
        network_units = list(map(int, self.args.network_units.split(',')))
        self.encoder = models.LearnedVariationalEncoder(
            self.args.latent_space_size,
            0,
            resolution=(self.train_images.shape[3], self.train_images.shape[1],
                        self.train_images.shape[2]),
            architecture=self.args.network_architecture,
            start_channels=self.args.network_channels,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            units=network_units)
        self.decoder = models.LearnedDecoder(
            self.args.latent_space_size,
            resolution=(self.train_images.shape[3], self.train_images.shape[1],
                        self.train_images.shape[2]),
            architecture=self.args.network_architecture,
            start_channels=self.args.network_channels,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            units=network_units)
        log(self.encoder)
        log(self.decoder)

        assert os.path.exists(self.args.encoder_file) and os.path.exists(
            self.args.decoder_file)
        state = State.load(self.args.encoder_file)
        log('[Testing] loaded %s' % self.args.encoder_file)

        self.encoder.load_state_dict(state.model)
        log('[Testing] loaded encoder')

        state = State.load(self.args.decoder_file)
        log('[Testing] loaded %s' % self.args.decoder_file)

        self.decoder.load_state_dict(state.model)
        log('[Testing] loaded decoder')

        if self.args.use_gpu and not cuda.is_cuda(self.encoder):
            self.encoder = self.encoder.cuda()
        if self.args.use_gpu and not cuda.is_cuda(self.decoder):
            self.decoder = self.decoder.cuda()

        log('[Testing] model needs %gMiB' %
            ((cuda.estimate_size(self.encoder) +
              cuda.estimate_size(self.decoder)) / (1024 * 1024)))
        self.test()
Esempio n. 23
0
    def load_data_and_model(self):
        """
        Load data and model.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
        resolution = (self.test_images.shape[3], self.test_images.shape[1],
                      self.test_images.shape[2])
        log('[Visualization] read %s' % self.args.test_images_file)

        self.perturbations = utils.read_hdf5(
            self.args.perturbations_file).astype(numpy.float32)
        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        log('[Visualization] read %s' % self.args.perturbations_file)

        self.success = utils.read_hdf5(self.args.success_file)
        self.success = numpy.swapaxes(self.success, 0, 1)
        log('[Visualization] read %s' % self.args.success_file)

        self.accuracy = utils.read_hdf5(self.args.accuracy_file)
        log('[Visualization] read %s' % self.args.success_file)

        self.test_theta = utils.read_hdf5(self.args.test_theta_file).astype(
            numpy.float32)
        self.test_theta = self.test_theta[:self.perturbations.shape[0]]
        log('[Visualization] read %s' % self.args.test_theta_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.int)
        self.test_codes = self.test_codes[:, self.args.label_index]
        self.N_class = numpy.max(self.test_codes) + 1
        self.test_codes = self.test_codes[:self.perturbations.shape[0]]
        log('[Visualization] read %s' % self.args.test_codes_file)

        network_units = list(map(int, self.args.network_units.split(',')))
        self.classifier = models.Classifier(
            self.N_class,
            resolution=resolution,
            architecture=self.args.network_architecture,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            start_channels=self.args.network_channels,
            dropout=self.args.network_dropout,
            units=network_units)

        assert os.path.exists(
            self.args.classifier_file
        ), 'state file %s not found' % self.args.classifier_file
        state = State.load(self.args.classifier_file)
        log('[Visualization] read %s' % self.args.classifier_file)

        self.classifier.load_state_dict(state.model)
        if self.args.use_gpu and not cuda.is_cuda(self.classifier):
            log('[Visualization] classifier is not CUDA')
            self.classifier = self.classifier.cuda()
        log('[Visualization] loaded classifier')

        self.classifier.eval()
        log('[Visualization] set classifier to eval')

        assert self.args.decoder_files
        decoder_files = self.args.decoder_files.split(',')
        for decoder_file in decoder_files:
            assert os.path.exists(
                decoder_file), 'could not find %s' % decoder_file

        log('[Visualization] using %d input channels' %
            self.test_images.shape[3])
        decoder_units = list(map(int, self.args.decoder_units.split(',')))

        if len(decoder_files) > 1:
            log('[Visualization] loading multiple decoders')
            decoders = []
            for i in range(len(decoder_files)):
                decoder = models.LearnedDecoder(
                    self.args.latent_space_size,
                    resolution=resolution,
                    architecture=self.args.decoder_architecture,
                    start_channels=self.args.decoder_channels,
                    activation=self.args.decoder_activation,
                    batch_normalization=not self.args.
                    decoder_no_batch_normalization,
                    units=decoder_units)

                state = State.load(decoder_files[i])
                decoder.load_state_dict(state.model)
                if self.args.use_gpu and not cuda.is_cuda(decoder):
                    decoder = decoder.cuda()
                decoders.append(decoder)

                decoder.eval()
                log('[Visualization] loaded %s' % decoder_files[i])
            self.decoder = models.SelectiveDecoder(decoders,
                                                   resolution=resolution)
        else:
            log('[Visualization] loading one decoder')
            decoder = models.LearnedDecoder(
                self.args.latent_space_size,
                resolution=resolution,
                architecture=self.args.decoder_architecture,
                start_channels=self.args.decoder_channels,
                activation=self.args.decoder_activation,
                batch_normalization=not self.args.
                decoder_no_batch_normalization,
                units=decoder_units)

            state = State.load(decoder_files[0])
            decoder.load_state_dict(state.model)
            if self.args.use_gpu and not cuda.is_cuda(decoder):
                decoder = decoder.cuda()
            decoder.eval()
            log('[Visualization] read decoder')

            self.decoder = decoder
    def load_data_and_model(self):
        """
        Load data and model.
        """

        self.test_images = utils.read_hdf5(self.args.test_images_file).astype(
            numpy.float32)
        if len(self.test_images.shape) < 4:
            self.test_images = numpy.expand_dims(self.test_images, axis=3)
        resolution = self.test_images.shape[2]
        log('[Visualization] read %s' % self.args.test_images_file)

        self.test_codes = utils.read_hdf5(self.args.test_codes_file).astype(
            numpy.int)
        self.test_codes = self.test_codes[:, self.args.label_index]
        N_class = numpy.max(self.test_codes) + 1
        log('[Visualization] read %s' % self.args.test_codes_file)

        self.perturbations = utils.read_hdf5(
            self.args.perturbations_file).astype(numpy.float32)
        if len(self.perturbations.shape) < 5:
            self.perturbations = numpy.expand_dims(self.perturbations, axis=4)

        self.perturbations = numpy.swapaxes(self.perturbations, 0, 1)
        self.test_images = self.test_images[:self.perturbations.shape[0]]
        log('[Visualization] read %s' % self.args.perturbations_file)

        self.success = utils.read_hdf5(self.args.success_file)
        self.success = numpy.swapaxes(self.success, 0, 1)
        self.success = self.success >= 0
        log('[Visualization] read %s' % self.args.success_file)

        if self.args.selection_file:
            selection = utils.read_hdf5(self.args.selection_file)
            log('[Visualization] read %s' % self.args.selection_file)

            selection = numpy.swapaxes(selection, 0, 1)
            selection = selection[:self.success.shape[0]]
            selection = selection >= 0

            assert len(selection.shape) == len(self.success.shape)
            self.success = numpy.logical_and(self.success, selection)
            log('[Visualization] updated selection')

        self.accuracy = utils.read_hdf5(self.args.accuracy_file)
        log('[Visualization] read %s' % self.args.success_file)

        log('[Visualization] using %d input channels' %
            self.test_images.shape[3])
        network_units = list(map(int, self.args.network_units.split(',')))
        self.model = models.Classifier(
            N_class,
            resolution=(self.test_images.shape[3], self.test_images.shape[1],
                        self.test_images.shape[2]),
            architecture=self.args.network_architecture,
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            start_channels=self.args.network_channels,
            dropout=self.args.network_dropout,
            units=network_units)

        assert os.path.exists(
            self.args.classifier_file
        ), 'state file %s not found' % self.args.classifier_file
        state = State.load(self.args.classifier_file)
        log('[Visualization] read %s' % self.args.classifier_file)

        self.model.load_state_dict(state.model)
        if self.args.use_gpu and not cuda.is_cuda(self.model):
            log('[Visualization] classifier is not CUDA')
            self.model = self.model.cuda()
        log('[Visualization] loaded classifier')

        self.model.eval()
        log('[Visualization] set model to eval')
Esempio n. 25
0
    def load_model(self):
        """
        Load the decoder.
        """

        assert self.args.N_theta > 0 and self.args.N_theta <= 9

        min_translation_x, max_translation_x = map(
            float, self.args.translation_x.split(','))
        min_translation_y, max_translation_y = map(
            float, self.args.translation_y.split(','))
        min_shear_x, max_shear_x = map(float, self.args.shear_x.split(','))
        min_shear_y, max_shear_y = map(float, self.args.shear_y.split(','))
        min_scale, max_scale = map(float, self.args.scale.split(','))
        min_rotation, max_rotation = map(float, self.args.rotation.split(','))
        min_color, max_color = self.args.color, 1

        self.min_bound = numpy.array([
            min_translation_x,
            min_translation_y,
            min_shear_x,
            min_shear_y,
            min_scale,
            min_rotation,
            min_color,
            min_color,
            min_color,
        ])
        self.max_bound = numpy.array([
            max_translation_x, max_translation_y, max_shear_x, max_shear_y,
            max_scale, max_rotation, max_color, max_color, max_color
        ])

        self.min_bound = self.min_bound[:self.args.N_theta].astype(
            numpy.float32)
        self.max_bound = self.max_bound[:self.args.N_theta].astype(
            numpy.float32)

        decoder = models.STNDecoder(self.args.N_theta)
        log('[Attack] set up STN decoder')

        classifier = models.Classifier(
            self.N_class,
            resolution=(self.test_images.shape[3], self.test_images.shape[1],
                        self.test_images.shape[2]),
            architecture='standard',
            activation=self.args.network_activation,
            batch_normalization=not self.args.network_no_batch_normalization,
            start_channels=self.args.network_channels,
            dropout=self.args.network_dropout)

        assert os.path.exists(
            self.args.classifier_file
        ), 'state file %s not found' % self.args.classifier_file
        state = State.load(self.args.classifier_file)
        log('[Attack] read %s' % self.args.classifier_file)

        classifier.load_state_dict(state.model)
        if self.args.use_gpu and not cuda.is_cuda(classifier):
            log('[Attack] classifier is not CUDA')
            classifier = classifier.cuda()
        classifier.eval()
        log('[Attack] loaded classifier')

        self.model = models.DecoderClassifier(decoder, classifier)
        log('[Training] set up decoder classifier')