def test_4_pytorch_iris(self): classifier = get_tabular_classifier_pt() attack = VirtualAdversarialMethod(classifier, eps=0.1, verbose=False) with self.assertRaises(TypeError) as context: _ = attack.generate(self.x_test_iris.astype(np.float32)) self.assertIn( "This attack requires a classifier predicting probabilities in the range [0, 1] as output." "Values smaller than 0.0 or larger than 1.0 have been detected.", str(context.exception), )
def test_tensorflow_iris(self): classifier, _ = get_tabular_classifier_tf() attack = VirtualAdversarialMethod(classifier, eps=0.1) with self.assertRaises(TypeError) as context: _ = attack.generate(self.x_test_iris) self.assertIn( "This attack requires a classifier predicting probabilities in the range [0, 1] as output." "Values smaller than 0.0 or larger than 1.0 have been detected.", str(context.exception), )
def test_6_keras_iris_clipped(self): classifier = get_tabular_classifier_kr() # Test untargeted attack attack = VirtualAdversarialMethod(classifier, eps=0.1, verbose=False) x_test_iris_adv = attack.generate(self.x_test_iris) self.assertFalse((self.x_test_iris == x_test_iris_adv).all()) self.assertTrue((x_test_iris_adv <= 1).all()) self.assertTrue((x_test_iris_adv >= 0).all()) preds_adv = np.argmax(classifier.predict(x_test_iris_adv), axis=1) self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all()) acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0] logger.info("Accuracy on Iris with VAT adversarial examples: %.2f%%", (acc * 100))
def test_7_keras_iris_unbounded(self): classifier = get_tabular_classifier_kr() # Recreate a classifier without clip values classifier = KerasClassifier(model=classifier._model, use_logits=False, channels_first=True) attack = VirtualAdversarialMethod(classifier, eps=1, verbose=False) x_test_iris_adv = attack.generate(self.x_test_iris) self.assertFalse((self.x_test_iris == x_test_iris_adv).all()) self.assertTrue((x_test_iris_adv > 1).any()) self.assertTrue((x_test_iris_adv < 0).any()) preds_adv = np.argmax(classifier.predict(x_test_iris_adv), axis=1) self.assertFalse((np.argmax(self.y_test_iris, axis=1) == preds_adv).all()) acc = np.sum(preds_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0] logger.info("Accuracy on Iris with VAT adversarial examples: %.2f%%", (acc * 100))
def _test_backend_mnist(self, classifier, x_test, y_test): x_test_original = x_test.copy() df = VirtualAdversarialMethod(classifier, batch_size=100, max_iter=2, verbose=False) x_test_adv = df.generate(x_test) self.assertFalse((x_test == x_test_adv).all()) y_pred = get_labels_np_array(classifier.predict(x_test_adv)) self.assertFalse((y_test == y_pred).all()) acc = np.sum(np.argmax(y_pred, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0] logger.info("Accuracy on adversarial examples: %.2f%%", (acc * 100)) # Check that x_test has not been modified by attack and classifier self.assertAlmostEqual(float(np.max(np.abs(x_test_original - x_test))), 0.0, delta=0.00001)
def test_check_params(self): ptc = get_image_classifier_pt(from_logits=True) with self.assertRaises(ValueError): _ = VirtualAdversarialMethod(ptc, max_iter=1.0) with self.assertRaises(ValueError): _ = VirtualAdversarialMethod(ptc, max_iter=-1) with self.assertRaises(ValueError): _ = VirtualAdversarialMethod(ptc, eps=-1) with self.assertRaises(ValueError): _ = VirtualAdversarialMethod(ptc, finite_diff=1) with self.assertRaises(ValueError): _ = VirtualAdversarialMethod(ptc, finite_diff=-1.0) with self.assertRaises(ValueError): _ = VirtualAdversarialMethod(ptc, batch_size=-1) with self.assertRaises(ValueError): _ = VirtualAdversarialMethod(ptc, verbose="true")