def _test_backend_mnist(self, classifier, x_test, y_test):
        x_test_original = x_test.copy()

        df = VirtualAdversarialMethod(classifier, batch_size=100)

        from art.classifiers import TensorFlowClassifier
        if isinstance(classifier, TensorFlowClassifier):
            with self.assertRaises(TypeError) as context:
                x_test_adv = df.generate(x_test)

            self.assertIn('This attack requires a classifier predicting probabilities in the range [0, 1] as output.'
                          'Values smaller than 0.0 or larger than 1.0 have been detected.', str(context.exception))
        else:
            x_test_adv = df.generate(x_test)

            self.assertFalse((x_test == x_test_adv).all())

            y_pred = get_labels_np_array(classifier.predict(x_test_adv))
            self.assertFalse((y_test == y_pred).all())

            acc = np.sum(np.argmax(y_pred, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0]
            logger.info('Accuracy on adversarial examples: %.2f%%', (acc * 100))

            # Check that x_test has not been modified by attack and classifier
            self.assertAlmostEqual(float(np.max(np.abs(x_test_original - x_test))), 0.0, delta=0.00001)
Beispiel #2
0
    def _test_backend_mnist(self, classifier):
        # Get MNIST
        (_, _), (x_test, y_test) = self.mnist
        x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST]

        df = VirtualAdversarialMethod(classifier, batch_size=100)

        from art.classifiers import TensorFlowClassifier
        if isinstance(classifier, TensorFlowClassifier):
            with self.assertRaises(TypeError) as context:
                x_test_adv = df.generate(x_test)

            self.assertIn(
                'This attack requires a classifier predicting probabilities in the range [0, 1] as output.'
                'Values smaller than 0.0 or larger than 1.0 have been detected.',
                str(context.exception))
        else:
            x_test_adv = df.generate(x_test)

            self.assertFalse((x_test == x_test_adv).all())

            y_pred = get_labels_np_array(classifier.predict(x_test_adv))
            self.assertFalse((y_test == y_pred).all())

            acc = np.sum(
                np.argmax(y_pred, axis=1) == np.argmax(
                    y_test, axis=1)) / y_test.shape[0]
            logging.info('Accuracy on adversarial examples: %.2f%%',
                         (acc * 100))
    def test_pytorch_iris(self):
        (_, _), (x_test, y_test) = self.iris
        classifier = get_iris_classifier_pt()

        attack = VirtualAdversarialMethod(classifier, eps=.1)

        with self.assertRaises(TypeError) as context:
            x_test_adv = attack.generate(x_test.astype(np.float32))

        self.assertIn('This attack requires a classifier predicting probabilities in the range [0, 1] as output.'
                      'Values smaller than 0.0 or larger than 1.0 have been detected.', str(context.exception))
    def test_tensorflow_iris(self):
        classifier, _ = get_tabular_classifier_tf()

        attack = VirtualAdversarialMethod(classifier, eps=0.1)

        with self.assertRaises(TypeError) as context:
            x_test_iris_adv = attack.generate(self.x_test_iris)

        self.assertIn(
            "This attack requires a classifier predicting probabilities in the range [0, 1] as output."
            "Values smaller than 0.0 or larger than 1.0 have been detected.",
            str(context.exception),
        )
    def test_keras_iris_clipped(self):
        classifier = get_tabular_classifier_kr()

        # Test untargeted attack
        attack = VirtualAdversarialMethod(classifier, eps=0.1)
        x_test_iris_adv = attack.generate(self.x_test_iris)
        self.assertFalse((self.x_test_iris == x_test_iris_adv).all())
        self.assertTrue((x_test_iris_adv <= 1).all())
        self.assertTrue((x_test_iris_adv >= 0).all())

        preds_adv = np.argmax(classifier.predict(x_test_iris_adv), axis=1)
        self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all())
        acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]
        logger.info("Accuracy on Iris with VAT adversarial examples: %.2f%%", (acc * 100))
    def test_keras_iris_unbounded(self):
        classifier = get_tabular_classifier_kr()

        # Recreate a classifier without clip values
        classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1)
        attack = VirtualAdversarialMethod(classifier, eps=1)
        x_test_iris_adv = attack.generate(self.x_test_iris)
        self.assertFalse((self.x_test_iris == x_test_iris_adv).all())
        self.assertTrue((x_test_iris_adv > 1).any())
        self.assertTrue((x_test_iris_adv < 0).any())

        preds_adv = np.argmax(classifier.predict(x_test_iris_adv), axis=1)
        self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all())
        acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]
        logger.info("Accuracy on Iris with VAT adversarial examples: %.2f%%", (acc * 100))
    def test_keras_iris_clipped(self):
        (_, _), (x_test, y_test) = self.iris
        classifier = get_iris_classifier_kr()

        # Test untargeted attack
        attack = VirtualAdversarialMethod(classifier, eps=.1)
        x_test_adv = attack.generate(x_test)
        self.assertFalse((x_test == x_test_adv).all())
        self.assertTrue((x_test_adv <= 1).all())
        self.assertTrue((x_test_adv >= 0).all())

        preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
        self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
        acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
        logger.info('Accuracy on Iris with VAT adversarial examples: %.2f%%', (acc * 100))
    def _test_backend_mnist(self, classifier, x_test, y_test):
        x_test_original = x_test.copy()

        df = VirtualAdversarialMethod(classifier, batch_size=100, max_iter=2)

        x_test_adv = df.generate(x_test)

        self.assertFalse((x_test == x_test_adv).all())

        y_pred = get_labels_np_array(classifier.predict(x_test_adv))
        self.assertFalse((y_test == y_pred).all())

        acc = np.sum(np.argmax(y_pred, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0]
        logger.info("Accuracy on adversarial examples: %.2f%%", (acc * 100))

        # Check that x_test has not been modified by attack and classifier
        self.assertAlmostEqual(float(np.max(np.abs(x_test_original - x_test))), 0.0, delta=0.00001)