def test_preprocessing(self):
        # Get MNIST
        (_, _), (x_test, _) = self._mnist

        # Create classifier
        classifier = MXClassifier((0, 1), self._model, (1, 28, 28), 10,
                                  self._trainer)
        classifier_preproc = MXClassifier((0, 1),
                                          self._model, (1, 28, 28),
                                          10,
                                          self._trainer,
                                          preprocessing=(1, 2))

        preds = classifier.predict((x_test - 1.) / 2)
        preds_preproc = classifier_preproc.predict(x_test)
        self.assertTrue(np.sum(preds - preds_preproc) == 0)
    def test_preprocessing(self):
        # Get MNIST
        (_, _), (x_test, _) = self.mnist

        # Create classifier
        classifier_preproc = MXClassifier(model=self.classifier._model, clip_values=(0, 1), input_shape=(1, 28, 28),
                                          nb_classes=10, optimizer=self.classifier._optimizer, preprocessing=(1, 2))

        preds = self.classifier.predict((x_test - 1.) / 2)
        preds_preproc = classifier_preproc.predict(x_test)
        self.assertEqual(np.sum(preds - preds_preproc), 0)
    def test_fit_predict(self):
        (x_train, y_train), (x_test, y_test) = self._mnist

        # Fit classifier
        classifier = MXClassifier((0, 1), self._model, (1, 28, 28), 10,
                                  self._trainer)
        classifier.fit(x_train, y_train, batch_size=128, nb_epochs=2)

        preds = classifier.predict(x_test)
        acc = np.sum(np.argmax(preds, axis=1) == np.argmax(
            y_test, axis=1)) / len(y_test)
        print("\nAccuracy: %.2f%%" % (acc * 100))
        self.assertGreater(acc, 0.1)
Exemplo n.º 4
0
    def test_preprocessing(self):
        # Create classifier
        loss = gluon.loss.SoftmaxCrossEntropyLoss()
        classifier_preproc = MXClassifier(
            model=self.classifier._model,
            loss=loss,
            clip_values=(0, 1),
            input_shape=(1, 28, 28),
            nb_classes=10,
            optimizer=self.classifier._optimizer,
            preprocessing=(1, 2),
        )

        preds = self.classifier.predict((self.x_test_mnist - 1.0) / 2)
        preds_preproc = classifier_preproc.predict(self.x_test_mnist)
        self.assertEqual(np.sum(preds - preds_preproc), 0)
Exemplo n.º 5
0
                          loss=loss,
                          input_shape=(28, 28, 1),
                          nb_classes=10,
                          optimizer=trainer,
                          ctx=None,
                          channel_index=1,
                          defences=None,
                          preprocessing=(0, 1))

# Step 4: 训练ART分类器

classifier.fit(x_train, y_train, batch_size=64, nb_epochs=3)

# Step 5: 在良性的测试实例上评价ART分类器

predictions = classifier.predict(x_test)
accuracy = np.sum(
    np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
print('Accuracy on benign test examples: {}%'.format(accuracy * 100))

# Step 6: 生成对抗性测试示例
attack = FastGradientMethod(classifier=classifier, eps=0.2)
x_test_adv = attack.generate(x=x_test)

# Step 7: 通过对抗性测试实例对ART分类器进行评价

predictions = classifier.predict(x_test_adv)
accuracy = np.sum(
    np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
print('Accuracy on adversarial test examples: {}%'.format(accuracy * 100))