Exemplo n.º 1
0
    def test_loss_sensitivity(self):
        # Get MNIST
        (x_train, y_train), (_, _), _, _ = load_mnist()
        x_train, y_train = x_train[:NB_TRAIN], y_train[:NB_TRAIN]

        # Get classifier
        classifier = self._cnn_mnist_k([28, 28, 1])
        classifier.fit(x_train, y_train, batch_size=BATCH_SIZE, nb_epochs=2)

        l = loss_sensitivity(classifier, x_train, y_train)
        self.assertGreaterEqual(l, 0)
Exemplo n.º 2
0
    def evaluate_robust(self, data_loader):
        data_iter = iter(data_loader)
        examples, labels = next(data_iter)
        examples, labels = examples.cpu().numpy(), labels.cpu().numpy()
        labels_one_hot = np.eye(self.nb_classes)[labels]

        losses = []
        # Compute loss with implicit batching
        batch_size = 256
        for batch_id in range(
                int(np.ceil(examples.shape[0] / float(batch_size)))):
            batch_index_1, batch_index_2 = batch_id * batch_size, (
                batch_id + 1) * batch_size
            batch = examples[batch_index_1:batch_index_2]
            batch_labels = labels_one_hot[batch_index_1:batch_index_2]

            loss = loss_sensitivity(self.classifier, batch, batch_labels)
            losses.append(loss * batch.shape[0])

        res = sum(losses) / examples.shape[0]
        return res