def compute_ppca(self):
        """
        Compute PPCA.
        """

        success = numpy.logical_and(self.success >= 0, self.accuracy)
        log('[Detection] %d valid attacked samples' % numpy.sum(success))

        nearest_neighbor_images = self.nearest_neighbor_images.reshape(self.nearest_neighbor_images.shape[0], -1)
        nearest_neighbor_images = nearest_neighbor_images[:self.args.n_fit]
        
        perturbations = self.perturbations.reshape(self.perturbations.shape[0], -1)
        test_images = self.test_images.reshape(self.test_images.shape[0], -1)
        pure_perturbations = perturbations - test_images

        ppca = PPCA(n_components=self.args.n_pca)
        ppca.fit(nearest_neighbor_images)
        log('[Experiment] computed PPCA on nearest neighbor images')

        reconstructed_test_images = ppca.inverse_transform(ppca.transform(test_images))
        reconstructed_perturbations = ppca.inverse_transform(ppca.transform(perturbations))
        reconstructed_pure_perturbations = ppca.inverse_transform(ppca.transform(pure_perturbations))

        self.distances['test'] = numpy.average(numpy.multiply(reconstructed_test_images - test_images, reconstructed_test_images - test_images), axis=1)
        self.distances['perturbation'] = numpy.average(numpy.multiply(reconstructed_perturbations - perturbations, reconstructed_perturbations - perturbations), axis=1)
        self.distances['true'] = numpy.average(numpy.multiply(reconstructed_pure_perturbations - pure_perturbations, reconstructed_pure_perturbations - pure_perturbations), axis=1)

        self.angles['test'] = numpy.rad2deg(common.numpy.angles(test_images.T, reconstructed_test_images.T))
        self.angles['perturbation'] = numpy.rad2deg(common.numpy.angles(reconstructed_perturbations.T, perturbations.T))
        self.angles['true'] = numpy.rad2deg(common.numpy.angles(reconstructed_pure_perturbations.T, pure_perturbations.T))

        self.distances['test'] = self.distances['test'][success]
        self.distances['perturbation'] = self.distances['perturbation'][success]
        self.distances['true'] = self.distances['true'][success]
    def compute_normalized_ppca(self):
        """
        Compute PPCA.
        """

        nearest_neighbor_images = self.nearest_neighbor_images.reshape(self.nearest_neighbor_images.shape[0], -1)
        nearest_neighbor_images = nearest_neighbor_images[:self.args.n_fit]

        perturbations = self.perturbations.reshape(self.perturbations.shape[0], -1)
        test_images = self.test_images.reshape(self.test_images.shape[0], -1)
        pure_perturbations = perturbations - test_images

        nearest_neighbor_images_norms = numpy.linalg.norm(nearest_neighbor_images, ord=2, axis=1)
        perturbations_norms = numpy.linalg.norm(perturbations, ord=2, axis=1)
        test_images_norms = numpy.linalg.norm(test_images, ord=2, axis=1)
        pure_perturbations_norms = numpy.linalg.norm(pure_perturbations, ord=2, axis=1)

        success = numpy.logical_and(numpy.logical_and(self.success >= 0, self.accuracy), pure_perturbations_norms > 1e-4)
        log('[Detection] %d valid attacked samples' % numpy.sum(success))

        perturbations_norms = perturbations_norms[success]
        test_images_norms = test_images_norms[success]
        pure_perturbations_norms = pure_perturbations_norms[success]

        perturbations = perturbations[success]
        test_images = test_images[success]
        pure_perturbations = pure_perturbations[success]

        nearest_neighbor_images /= numpy.repeat(nearest_neighbor_images_norms.reshape(-1, 1), nearest_neighbor_images.shape[1], axis=1)
        perturbations /= numpy.repeat(perturbations_norms.reshape(-1, 1), perturbations.shape[1], axis=1)
        test_images /= numpy.repeat(test_images_norms.reshape(-1, 1), test_images.shape[1], axis=1)
        pure_perturbations /= numpy.repeat(pure_perturbations_norms.reshape(-1, 1), pure_perturbations.shape[1], axis=1)

        assert not numpy.any(nearest_neighbor_images != nearest_neighbor_images)
        assert not numpy.any(perturbations != perturbations)
        assert not numpy.any(test_images != test_images)
        assert not numpy.any(pure_perturbations != pure_perturbations)

        ppca = PPCA(n_components=self.args.n_pca)
        ppca.fit(nearest_neighbor_images)
        log('[Experiment] computed PPCA on nearest neighbor images')

        reconstructed_test_images = ppca.inverse_transform(ppca.transform(test_images))
        reconstructed_perturbations = ppca.inverse_transform(ppca.transform(perturbations))
        reconstructed_pure_perturbations = ppca.inverse_transform(ppca.transform(pure_perturbations))
        
        #self.probabilities['test'] = ppca.marginal(test_images)
        #self.probabilities['perturbation'] = ppca.marginal(perturbations)
        #self.probabilities['true'] = ppca.marginal(pure_perturbations)

        self.distances['test'] = numpy.average(numpy.multiply(reconstructed_test_images - test_images, reconstructed_test_images - test_images), axis=1)
        self.distances['perturbation'] = numpy.average(numpy.multiply(reconstructed_perturbations - perturbations, reconstructed_perturbations - perturbations), axis=1)
        self.distances['true'] = numpy.average(numpy.multiply(reconstructed_pure_perturbations - pure_perturbations, reconstructed_pure_perturbations - pure_perturbations), axis=1)

        self.angles['test'] = numpy.rad2deg(common.numpy.angles(test_images.T, reconstructed_test_images.T))
        self.angles['perturbation'] = numpy.rad2deg(common.numpy.angles(reconstructed_perturbations.T, perturbations.T))
        self.angles['true'] = numpy.rad2deg(common.numpy.angles(reconstructed_pure_perturbations.T, pure_perturbations.T))
    def compute_local_pca(self):
        """
        Compute PCA.
        """

        success = numpy.logical_and(self.success >= 0, self.accuracy)
        log('[Detection] %d valid attacked samples' % numpy.sum(success))

        nearest_neighbor_images = self.nearest_neighbor_images.reshape(self.nearest_neighbor_images.shape[0], -1)
        nearest_neighbor_images = nearest_neighbor_images[:self.args.n_fit]

        perturbations = self.perturbations.reshape(self.perturbations.shape[0], -1)
        test_images = self.test_images.reshape(self.test_images.shape[0], -1)
        pure_perturbations = perturbations - test_images

        nearest_neighbors_indices = self.compute_nearest_neighbors(perturbations)

        self.distances['true'] = numpy.zeros((success.shape[0]))
        self.distances['test'] = numpy.zeros((success.shape[0]))
        self.distances['perturbation'] = numpy.zeros((success.shape[0]))

        self.angles['true'] = numpy.zeros((success.shape[0]))
        self.angles['test'] = numpy.zeros((success.shape[0]))
        self.angles['perturbation'] = numpy.zeros((success.shape[0]))

        for n in range(pure_perturbations.shape[0]):
            if success[n]:
                nearest_neighbors = nearest_neighbor_images[nearest_neighbors_indices[n, :]]
                nearest_neighbors = numpy.concatenate((nearest_neighbors, test_images[n].reshape(1, -1)), axis=0)

                pca = sklearn.decomposition.IncrementalPCA(n_components=self.args.n_pca)
                pca.fit(nearest_neighbors)

                reconstructed_test_images = pca.inverse_transform(pca.transform(test_images[n].reshape(1, -1)))
                reconstructed_perturbations = pca.inverse_transform(pca.transform(perturbations[n].reshape(1, -1)))
                reconstructed_pure_perturbations = pca.inverse_transform(pca.transform(pure_perturbations[n].reshape(1, -1)))

                self.distances['test'][n] = numpy.average(numpy.multiply(reconstructed_test_images - test_images[n], reconstructed_test_images - test_images[n]), axis=1)
                self.distances['perturbation'][n] = numpy.average(numpy.multiply(reconstructed_perturbations - perturbations[n], reconstructed_perturbations - perturbations[n]), axis=1)
                self.distances['true'][n] = numpy.average(numpy.multiply(reconstructed_pure_perturbations - pure_perturbations[n], reconstructed_pure_perturbations - pure_perturbations[n]), axis=1)

                self.angles['test'][n] = numpy.rad2deg(common.numpy.angles(reconstructed_test_images.T, test_images[n].T))
                self.angles['perturbation'][n] = numpy.rad2deg(common.numpy.angles(reconstructed_perturbations.T, perturbations[n].T))
                self.angles['true'][n] = numpy.rad2deg(common.numpy.angles(reconstructed_pure_perturbations.T, pure_perturbations[n].T))

                log('[Detection] %d: true distance=%g angle=%g' % (n, self.distances['true'][n], self.angles['true'][n]))
                log('[Detection] %d: perturbation distance=%g angle=%g' % (n, self.distances['perturbation'][n], self.angles['perturbation'][n]))
                log('[Detection] %d: test distance=%g angle=%g' % (n, self.distances['test'][n], self.angles['test'][n]))

        self.distances['test'] = self.distances['test'][success]
        self.distances['perturbation'] = self.distances['perturbation'][success]
        self.distances['true'] = self.distances['true'][success]
    def compute_nn(self, inclusive=False):
        """
        Test detector.
        """

        success = numpy.logical_and(self.success >= 0, self.accuracy)
        log('[Detection] %d valid attacked samples' % numpy.sum(success))

        nearest_neighbor_images = self.nearest_neighbor_images.reshape(self.nearest_neighbor_images.shape[0], -1)
        perturbations = self.perturbations.reshape(self.perturbations.shape[0], -1)
        test_images = self.test_images.reshape(self.test_images.shape[0], -1)

        nearest_neighbors_indices = self.compute_nearest_neighbors(perturbations)
        pure_perturbations = perturbations - test_images
        log('[Detection] computed nearest neighbors for perturbations')

        self.distances['true'] = numpy.zeros((success.shape[0]))
        self.distances['test'] = numpy.zeros((success.shape[0]))
        self.distances['perturbation'] = numpy.zeros((success.shape[0]))

        self.angles['true'] = numpy.zeros((success.shape[0]))
        self.angles['test'] = numpy.zeros((success.shape[0]))
        self.angles['perturbation'] = numpy.zeros((success.shape[0]))

        for n in range(pure_perturbations.shape[0]):
            if success[n]:
                nearest_neighbors = nearest_neighbor_images[nearest_neighbors_indices[n, :]]

                if inclusive:
                    nearest_neighbors = numpy.concatenate((nearest_neighbors, test_images[n].reshape(1, -1)), axis=0)
                    nearest_neighbor_mean = test_images[n]
                else:
                    nearest_neighbor_mean = numpy.average(nearest_neighbors, axis=0)

                nearest_neighbor_basis = nearest_neighbors - nearest_neighbor_mean

                relative_perturbation = perturbations[n] - nearest_neighbor_mean
                relative_test_image = test_images[n] - nearest_neighbor_mean

                if inclusive:
                    assert numpy.allclose(relative_test_image, nearest_neighbor_basis[-1])

                nearest_neighbor_vectors = numpy.stack((
                    pure_perturbations[n],
                    relative_perturbation,
                    relative_test_image
                ), axis=1)

                nearest_neighbor_projections = common.numpy.project_orthogonal(nearest_neighbor_basis.T, nearest_neighbor_vectors)
                assert nearest_neighbor_vectors.shape[0] == nearest_neighbor_projections.shape[0]
                assert nearest_neighbor_vectors.shape[1] == nearest_neighbor_projections.shape[1]

                angles = numpy.rad2deg(common.numpy.angles(nearest_neighbor_vectors, nearest_neighbor_projections))
                distances = numpy.linalg.norm(nearest_neighbor_vectors - nearest_neighbor_projections, ord=2, axis=0)

                assert distances.shape[0] == 3
                assert angles.shape[0] == 3

                self.distances['true'][n] = distances[0]
                self.distances['perturbation'][n] = distances[1]
                self.distances['test'][n] = distances[2]

                self.angles['true'][n] = angles[0]
                self.angles['perturbation'][n] = angles[1]
                self.angles['test'][n] = angles[2]

                log('[Detection] %d: true distance=%g angle=%g' % (n, self.distances['true'][n], self.angles['true'][n]))
                log('[Detection] %d: perturbation distance=%g angle=%g' % (n, self.distances['perturbation'][n], self.angles['perturbation'][n]))
                log('[Detection] %d: test distance=%g angle=%g' % (n, self.distances['test'][n], self.angles['test'][n]))

        self.distances['true'] = self.distances['true'][success]
        self.distances['test'] = self.distances['test'][success]
        self.distances['perturbation'] = self.distances['perturbation'][success]

        self.angles['true'] = self.angles['true'][success]
        self.angles['test'] = self.angles['test'][success]
        self.angles['perturbation'] = self.angles['perturbation'][success]

        if inclusive:
            self.distances['test'][:] = 0
            self.angles['test'][:] = 0
    def compute_appr(self):
        """
        Compute approximate.
        """

        assert self.test_codes is not None
        num_batches = int(math.ceil(self.perturbations.shape[0] / self.args.batch_size))

        for b in range(num_batches):
            b_start = b * self.args.batch_size
            b_end = min((b + 1) * self.args.batch_size, self.perturbations.shape[0])

            batch_classes = common.torch.as_variable(self.test_codes[b_start: b_end], self.args.use_gpu)
            batch_theta = common.torch.as_variable(self.test_theta[b_start: b_end].astype(numpy.float32), self.args.use_gpu, True)
            batch_perturbation = common.torch.as_variable(self.perturbations[b_start: b_end].astype(numpy.float32), self.args.use_gpu)

            if isinstance(self.model, models.SelectiveDecoder):
                self.model.set_code(batch_classes)
            batch_theta = torch.nn.Parameter(batch_theta)
            optimizer = torch.optim.Adam([batch_theta], lr=0.1)

            log('[Detection] %d: start' % b)
            for t in range(100):
                optimizer.zero_grad()
                output_perturbation = self.model.forward(batch_theta)
                error = torch.mean(torch.mul(output_perturbation - batch_perturbation, output_perturbation - batch_perturbation))
                error.backward()
                optimizer.step()

                log('[Detection] %d: %d = %g' % (b, t, error.item()))

            output_perturbation = numpy.squeeze(output_perturbation.cpu().detach().numpy())
            self.projected_perturbations = common.numpy.concatenate(self.projected_perturbations, output_perturbation)

            batch_theta = common.torch.as_variable(self.test_theta[b_start: b_end].astype(numpy.float32), self.args.use_gpu, True)
            batch_images = common.torch.as_variable(self.test_images[b_start: b_end].astype(numpy.float32), self.args.use_gpu)

            batch_theta = torch.nn.Parameter(batch_theta)
            optimizer = torch.optim.Adam([batch_theta], lr=0.5)

            log('[Detection] %d: start' % b)
            for t in range(100):
                optimizer.zero_grad()
                output_images = self.model.forward(batch_theta)
                error = torch.mean(torch.mul(output_images - batch_images, output_images - batch_images))
                error.backward()
                optimizer.step()

                log('[Detection] %d: %d = %g' % (b, t, error.item()))

            output_images = numpy.squeeze(output_images.cpu().detach().numpy())
            self.projected_test_images = common.numpy.concatenate(self.projected_test_images, output_images)

        projected_perturbations = self.projected_perturbations.reshape((self.projected_perturbations.shape[0], -1))
        projected_test_images = self.projected_test_images.reshape((self.projected_test_images.shape[0], -1))

        perturbations = self.perturbations.reshape((self.perturbations.shape[0], -1))
        test_images = self.test_images.reshape((self.test_images.shape[0], -1))

        success = numpy.logical_and(self.success >= 0, self.accuracy)
        log('[Detection] %d valid attacked samples' % numpy.sum(success))

        self.distances['true'] = numpy.linalg.norm(perturbations - projected_perturbations, ord=2, axis=1)
        self.angles['true'] = numpy.rad2deg(common.numpy.angles(perturbations.T, projected_perturbations.T))

        self.distances['true'] = self.distances['true'][success]
        self.angles['true'] = self.angles['true'][success]

        self.distances['test'] = numpy.linalg.norm(test_images - projected_test_images, ord=2, axis=1)
        self.angles['test'] = numpy.rad2deg(common.numpy.angles(test_images.T, projected_test_images.T))

        self.distances['test'] = self.distances['test'][success]
        self.angles['test'] = self.angles['test'][success]
    def compute_true(self):
        """
        Compute true.
        """

        assert self.test_codes is not None
        num_batches = int(math.ceil(self.perturbations.shape[0] / self.args.batch_size))

        params = {
            'lr': 0.09,
            'lr_decay': 0.95,
            'lr_min': 0.0000001,
            'weight_decay': 0,
        }

        for b in range(num_batches):
            b_start = b * self.args.batch_size
            b_end = min((b + 1) * self.args.batch_size, self.perturbations.shape[0])

            batch_fonts = self.test_codes[b_start: b_end, 1]
            batch_classes = self.test_codes[b_start: b_end, 2]
            batch_code = numpy.concatenate((common.numpy.one_hot(batch_fonts, self.N_font), common.numpy.one_hot(batch_classes, self.N_class)), axis=1).astype( numpy.float32)
            batch_code = common.torch.as_variable(batch_code, self.args.use_gpu)

            batch_images = common.torch.as_variable(self.test_images[b_start: b_end], self.args.use_gpu)
            batch_images = batch_images.permute(0, 3, 1, 2)

            batch_theta = common.torch.as_variable(self.test_theta[b_start: b_end].astype(numpy.float32), self.args.use_gpu, True)
            batch_perturbation = common.torch.as_variable(self.perturbations[b_start: b_end].astype(numpy.float32), self.args.use_gpu)

            self.model.set_code(batch_code)

            #output_images = self.model.forward(batch_theta)
            #test_error = torch.mean(torch.mul(output_images - batch_images, output_images - batch_images))
            #print(test_error.item())
            #vis.mosaic('true.png', batch_images.cpu().detach().numpy()[:, 0, :, :])
            #vis.mosaic('output.png', output_images.cpu().detach().numpy()[:, 0, :, :])
            # print(batch_images.cpu().detach().numpy()[0])
            # print(output_images.cpu().detach().numpy()[0, 0])

            #_batch_images = batch_images.cpu().detach().numpy()
            #_output_images = output_images.cpu().detach().numpy()[:, 0, :, :]
            #test_error = numpy.max(numpy.abs(_batch_images.reshape(_batch_images.shape[0], -1) - _output_images.reshape(_output_images.shape[0], -1)), axis=1)
            #print(test_error)
            #test_error = numpy.mean(numpy.multiply(_batch_images - _output_images, _batch_images - _output_images), axis=1)
            #print(test_error)

            batch_theta = torch.nn.Parameter(batch_theta)
            scheduler = ADAMScheduler([batch_theta], **params)

            log('[Detection] %d: start' % b)
            for t in range(100):
                scheduler.update(t//10, float(t)/10)
                scheduler.optimizer.zero_grad()
                output_perturbation = self.model.forward(batch_theta)
                error = torch.mean(torch.mul(output_perturbation - batch_perturbation, output_perturbation - batch_perturbation))
                test_error = torch.mean(torch.mul(output_perturbation - batch_images, output_perturbation - batch_images))
                #error.backward()
                #scheduler.optimizer.step()

                log('[Detection] %d: %d = %g, %g' % (b, t, error.item(), test_error.item()))

                output_perturbation = numpy.squeeze(numpy.transpose(output_perturbation.cpu().detach().numpy(), (0, 2, 3, 1)))
            self.projected_perturbations = common.numpy.concatenate(self.projected_perturbations, output_perturbation)

        projected_perturbations = self.projected_perturbations.reshape((self.projected_perturbations.shape[0], -1))
        perturbations = self.perturbations.reshape((self.perturbations.shape[0], -1))

        success = numpy.logical_and(self.success >= 0, self.accuracy)
        log('[Detection] %d valid attacked samples' % numpy.sum(success))

        self.distances['true'] = numpy.linalg.norm(perturbations - projected_perturbations, ord=2, axis=1)
        self.angles['true'] = numpy.rad2deg(common.numpy.angles(perturbations.T, projected_perturbations.T))

        self.distances['true'] = self.distances['true'][success]
        self.angles['true'] = self.angles['true'][success]

        self.distances['test'] = numpy.zeros((numpy.sum(success)))
        self.angles['test'] = numpy.zeros((numpy.sum(success)))