def _test_backend_mnist(self, classifier, x_test, y_test):
        x_test_original = x_test.copy()

        df = VirtualAdversarialMethod(classifier, batch_size=100)

        from art.classifiers import TensorFlowClassifier
        if isinstance(classifier, TensorFlowClassifier):
            with self.assertRaises(TypeError) as context:
                x_test_adv = df.generate(x_test)

            self.assertIn('This attack requires a classifier predicting probabilities in the range [0, 1] as output.'
                          'Values smaller than 0.0 or larger than 1.0 have been detected.', str(context.exception))
        else:
            x_test_adv = df.generate(x_test)

            self.assertFalse((x_test == x_test_adv).all())

            y_pred = get_labels_np_array(classifier.predict(x_test_adv))
            self.assertFalse((y_test == y_pred).all())

            acc = np.sum(np.argmax(y_pred, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0]
            logger.info('Accuracy on adversarial examples: %.2f%%', (acc * 100))

            # Check that x_test has not been modified by attack and classifier
            self.assertAlmostEqual(float(np.max(np.abs(x_test_original - x_test))), 0.0, delta=0.00001)
예제 #2
0
    def _test_backend_mnist(self, classifier):
        # Get MNIST
        (_, _), (x_test, y_test) = self.mnist
        x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST]

        df = VirtualAdversarialMethod(classifier, batch_size=100)

        from art.classifiers import TensorFlowClassifier
        if isinstance(classifier, TensorFlowClassifier):
            with self.assertRaises(TypeError) as context:
                x_test_adv = df.generate(x_test)

            self.assertIn(
                'This attack requires a classifier predicting probabilities in the range [0, 1] as output.'
                'Values smaller than 0.0 or larger than 1.0 have been detected.',
                str(context.exception))
        else:
            x_test_adv = df.generate(x_test)

            self.assertFalse((x_test == x_test_adv).all())

            y_pred = get_labels_np_array(classifier.predict(x_test_adv))
            self.assertFalse((y_test == y_pred).all())

            acc = np.sum(
                np.argmax(y_pred, axis=1) == np.argmax(
                    y_test, axis=1)) / y_test.shape[0]
            logging.info('Accuracy on adversarial examples: %.2f%%',
                         (acc * 100))
    def test_pytorch_iris(self):
        (_, _), (x_test, y_test) = self.iris
        classifier = get_iris_classifier_pt()

        attack = VirtualAdversarialMethod(classifier, eps=.1)

        with self.assertRaises(TypeError) as context:
            x_test_adv = attack.generate(x_test.astype(np.float32))

        self.assertIn('This attack requires a classifier predicting probabilities in the range [0, 1] as output.'
                      'Values smaller than 0.0 or larger than 1.0 have been detected.', str(context.exception))
    def test_tensorflow_iris(self):
        classifier, _ = get_tabular_classifier_tf()

        attack = VirtualAdversarialMethod(classifier, eps=0.1)

        with self.assertRaises(TypeError) as context:
            x_test_iris_adv = attack.generate(self.x_test_iris)

        self.assertIn(
            "This attack requires a classifier predicting probabilities in the range [0, 1] as output."
            "Values smaller than 0.0 or larger than 1.0 have been detected.",
            str(context.exception),
        )
    def test_keras_iris_clipped(self):
        classifier = get_tabular_classifier_kr()

        # Test untargeted attack
        attack = VirtualAdversarialMethod(classifier, eps=0.1)
        x_test_iris_adv = attack.generate(self.x_test_iris)
        self.assertFalse((self.x_test_iris == x_test_iris_adv).all())
        self.assertTrue((x_test_iris_adv <= 1).all())
        self.assertTrue((x_test_iris_adv >= 0).all())

        preds_adv = np.argmax(classifier.predict(x_test_iris_adv), axis=1)
        self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all())
        acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]
        logger.info("Accuracy on Iris with VAT adversarial examples: %.2f%%", (acc * 100))
    def test_keras_iris_unbounded(self):
        classifier = get_tabular_classifier_kr()

        # Recreate a classifier without clip values
        classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1)
        attack = VirtualAdversarialMethod(classifier, eps=1)
        x_test_iris_adv = attack.generate(self.x_test_iris)
        self.assertFalse((self.x_test_iris == x_test_iris_adv).all())
        self.assertTrue((x_test_iris_adv > 1).any())
        self.assertTrue((x_test_iris_adv < 0).any())

        preds_adv = np.argmax(classifier.predict(x_test_iris_adv), axis=1)
        self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all())
        acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]
        logger.info("Accuracy on Iris with VAT adversarial examples: %.2f%%", (acc * 100))
    def test_keras_iris_clipped(self):
        (_, _), (x_test, y_test) = self.iris
        classifier = get_iris_classifier_kr()

        # Test untargeted attack
        attack = VirtualAdversarialMethod(classifier, eps=.1)
        x_test_adv = attack.generate(x_test)
        self.assertFalse((x_test == x_test_adv).all())
        self.assertTrue((x_test_adv <= 1).all())
        self.assertTrue((x_test_adv >= 0).all())

        preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
        self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())
        acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]
        logger.info('Accuracy on Iris with VAT adversarial examples: %.2f%%', (acc * 100))
    def _test_backend_mnist(self, classifier, x_test, y_test):
        x_test_original = x_test.copy()

        df = VirtualAdversarialMethod(classifier, batch_size=100, max_iter=2)

        x_test_adv = df.generate(x_test)

        self.assertFalse((x_test == x_test_adv).all())

        y_pred = get_labels_np_array(classifier.predict(x_test_adv))
        self.assertFalse((y_test == y_pred).all())

        acc = np.sum(np.argmax(y_pred, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0]
        logger.info("Accuracy on adversarial examples: %.2f%%", (acc * 100))

        # Check that x_test has not been modified by attack and classifier
        self.assertAlmostEqual(float(np.max(np.abs(x_test_original - x_test))), 0.0, delta=0.00001)
예제 #9
0
def GetAttackers(classifier, x_test, attacker_name):
    """
    Function:
        Load classifier and generate adversarial samples
    """
    t_start = time.time()
    if attacker_name == "FGSM":
        attacker = FastGradientMethod(classifier=classifier, eps=0.3)
    elif attacker_name == "Elastic":
        attacker = ElasticNet(classifier=classifier, confidence=0.5)
    elif attacker_name == "BasicIterativeMethod":
        attacker = BasicIterativeMethod(classifier=classifier, eps=0.3)
    elif attacker_name == "NewtonFool":
        attacker = NewtonFool(classifier=classifier, max_iter=20)
    elif attacker_name == "HopSkipJump":
        attacker = HopSkipJump(classifier=classifier, max_iter=20)
    elif attacker_name == "ZooAttack":
        attacker = ZooAttack(classifier=classifier, max_iter=20)
    elif attacker_name == "VirtualAdversarialMethod":
        attacker = VirtualAdversarialMethod(classifier=classifier, max_iter=20)
    elif attacker_name == "UniversalPerturbation":
        attacker = UniversalPerturbation(classifier=classifier, max_iter=20)
    elif attacker_name == "AdversarialPatch":
        attacker = AdversarialPatch(classifier=classifier, max_iter=20)
    elif attacker_name == "Attack":
        attacker = Attack(classifier=classifier)
    elif attacker_name == "BoundaryAttack":
        attacker = BoundaryAttack(classifier=classifier,
                                  targeted=False,
                                  epsilon=0.05,
                                  max_iter=20)  #, max_iter=20
    elif attacker_name == "CarliniL2":
        attacker = CarliniL2Method(classifier=classifier,
                                   confidence=0.5,
                                   learning_rate=0.001,
                                   max_iter=15)
    elif attacker_name == "CarliniLinf":
        attacker = CarliniLInfMethod(classifier=classifier,
                                     confidence=0.5,
                                     learning_rate=0.001,
                                     max_iter=15)
    elif attacker_name == "DeepFool":
        attacker = DeepFool(classifier)
    elif attacker_name == "SMM":
        attacker = SaliencyMapMethod(classifier=classifier, theta=2)
    elif attacker_name == "PGD":
        attacker = ProjectedGradientDescent(classifier=classifier,
                                            norm=2,
                                            eps=1,
                                            eps_step=0.5)
    else:
        raise ValueError("Please get the right attacker's name for the input.")
    test_adv = attacker.generate(x_test)
    dt = time.time() - t_start
    return test_adv, dt
    def test_classifier_type_check_fail_classifier(self):
        # Use a useless test classifier to test basic classifier properties
        class ClassifierNoAPI:
            pass

        classifier = ClassifierNoAPI
        with self.assertRaises(TypeError) as context:
            _ = VirtualAdversarialMethod(classifier=classifier)

        self.assertIn('For `VirtualAdversarialMethod` classifier must be an instance of '
                      '`art.classifiers.classifier.Classifier`, the provided classifier is instance of '
                      '(<class \'object\'>,).', str(context.exception))
    def test_classifier_type_check_fail_gradients(self):
        # Use a test classifier not providing gradients required by white-box attack
        from art.classifiers.scikitlearn import ScikitlearnDecisionTreeClassifier
        from sklearn.tree import DecisionTreeClassifier

        classifier = ScikitlearnDecisionTreeClassifier(model=DecisionTreeClassifier())
        with self.assertRaises(TypeError) as context:
            _ = VirtualAdversarialMethod(classifier=classifier)

        self.assertIn('For `VirtualAdversarialMethod` classifier must be an instance of '
                      '`art.classifiers.classifier.ClassifierNeuralNetwork` and '
                      '`art.classifiers.classifier.ClassifierGradients`, the provided classifier is instance of '
                      '(<class \'art.classifiers.scikitlearn.ScikitlearnClassifier\'>,).', str(context.exception))