def test_tfclassifier(self): """ First test with the TFClassifier. :return: """ # Build TFClassifier tfc, sess = get_classifier_tf() # Get MNIST (x_train, _), (_, _) = self.mnist # Attack attack_ap = AdversarialPatch(tfc, rotation_max=22.5, scale_min=0.1, scale_max=1.0, learning_rate=5.0, patch_shape=(28, 28, 1), batch_size=10) patch_adv, _ = attack_ap.generate(x_train) self.assertTrue(patch_adv[8, 8, 0] - (-3.1106631027725005) < 0.01) self.assertTrue(patch_adv[14, 14, 0] - 18.954278294246386 < 0.01) self.assertTrue(np.sum(patch_adv) - 794.2447019737851 < 0.01) sess.close() tf.reset_default_graph()
def test_tfclassifier(self): """ First test with the TFClassifier. :return: """ # Build TFClassifier tfc, sess = get_classifier_tf() # Get MNIST (x_train, y_train), (x_test, y_test) = self.mnist # Attack up = UniversalPerturbation(tfc, max_iter=1, attacker="newtonfool", attacker_params={"max_iter": 5}) x_train_adv = up.generate(x_train) self.assertTrue((up.fooling_rate >= 0.2) or not up.converged) x_test_adv = x_test + up.noise self.assertFalse((x_test == x_test_adv).all()) train_y_pred = np.argmax(tfc.predict(x_train_adv), axis=1) test_y_pred = np.argmax(tfc.predict(x_test_adv), axis=1) self.assertFalse((np.argmax(y_test, axis=1) == test_y_pred).all()) self.assertFalse((np.argmax(y_train, axis=1) == train_y_pred).all())
def test_tfclassifier(self): """ First test with the TFClassifier. :return: """ # Build TFClassifier tfc, sess = get_classifier_tf() # Get MNIST (_, _), (x_test, y_test) = self.mnist # First attack clinfm = CarliniLInfMethod(classifier=tfc, targeted=True, max_iter=10, eps=0.5) params = {'y': random_targets(y_test, tfc.nb_classes)} x_test_adv = clinfm.generate(x_test, **params) self.assertFalse((x_test == x_test_adv).all()) self.assertTrue((x_test_adv <= 1.0001).all()) self.assertTrue((x_test_adv >= -0.0001).all()) target = np.argmax(params['y'], axis=1) y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1) logger.debug('CW0 Target: %s', target) logger.debug('CW0 Actual: %s', y_pred_adv) logger.info('CW0 Success Rate: %.2f', (np.sum(target == y_pred_adv) / float(len(target)))) self.assertTrue((target == y_pred_adv).any()) # Second attack, no batching clinfm = CarliniLInfMethod(classifier=tfc, targeted=False, max_iter=10, eps=0.5, batch_size=1) x_test_adv = clinfm.generate(x_test) self.assertTrue((x_test_adv <= 1.0001).all()) self.assertTrue((x_test_adv >= -0.0001).all()) target = np.argmax(params['y'], axis=1) y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1) logger.debug('CW0 Target: %s', target) logger.debug('CW0 Actual: %s', y_pred_adv) logger.info('CW0 Success Rate: %.2f', (np.sum(target != y_pred_adv) / float(len(target)))) self.assertTrue((target != y_pred_adv).any()) # Clean-up session sess.close() tf.reset_default_graph()
def test_krclassifier(self): """ Second test with the KerasClassifier. :return: """ # Build KerasClassifier krc, sess = get_classifier_tf() # Get MNIST (_, _), (x_test, y_test) = self.mnist # First attack clinfm = CarliniLInfMethod(classifier=krc, targeted=True, max_iter=10, eps=0.5) params = {'y': random_targets(y_test, krc.nb_classes)} x_test_adv = clinfm.generate(x_test, **params) self.assertFalse((x_test == x_test_adv).all()) self.assertTrue((x_test_adv <= 1.0001).all()) self.assertTrue((x_test_adv >= -0.0001).all()) target = np.argmax(params['y'], axis=1) y_pred_adv = np.argmax(krc.predict(x_test_adv), axis=1) logger.debug('CW0 Target: %s', target) logger.debug('CW0 Actual: %s', y_pred_adv) logger.info('CW0 Success Rate: %.2f', (np.sum(target == y_pred_adv) / float(len(target)))) self.assertTrue((target == y_pred_adv).any()) # Second attack clinfm = CarliniLInfMethod(classifier=krc, targeted=False, max_iter=10, eps=0.5) x_test_adv = clinfm.generate(x_test) self.assertTrue((x_test_adv <= 1.0001).all()) self.assertTrue((x_test_adv >= -0.0001).all()) target = np.argmax(params['y'], axis=1) y_pred_adv = np.argmax(krc.predict(x_test_adv), axis=1) logger.debug('CW0 Target: %s', target) logger.debug('CW0 Actual: %s', y_pred_adv) logger.info('CW0 Success Rate: %.2f', (np.sum(target != y_pred_adv) / float(len(target)))) self.assertTrue((target != y_pred_adv).any()) # Clean-up k.clear_session()
def test_tfclassifier(self): """ First test with the TFClassifier. :return: """ # Build TFClassifier tfc, sess = get_classifier_tf() # Get MNIST (_, _), (x_test, _) = self.mnist # Attack # import time nf = NewtonFool(tfc, max_iter=5) # print("Test Tensorflow....") # starttime = time.clock() # x_test_adv = nf.generate(x_test, batch_size=1) # self.assertFalse((x_test == x_test_adv).all()) # endtime = time.clock() # print(1, endtime - starttime) # starttime = time.clock() # x_test_adv = nf.generate(x_test, batch_size=10) # endtime = time.clock() # print(10, endtime - starttime) # starttime = time.clock() x_test_adv = nf.generate(x_test, batch_size=100) # endtime = time.clock() # print(100, endtime - starttime) # # starttime = time.clock() # x_test_adv = nf.generate(x_test, batch_size=1000) # endtime = time.clock() # print(1000, endtime - starttime) self.assertFalse((x_test == x_test_adv).all()) y_pred = tfc.predict(x_test) y_pred_adv = tfc.predict(x_test_adv) y_pred_bool = y_pred.max(axis=1, keepdims=1) == y_pred y_pred_max = y_pred.max(axis=1) y_pred_adv_max = y_pred_adv[y_pred_bool] self.assertTrue((y_pred_max >= y_pred_adv_max).all())
def test_failure_attack(self): """ Test the corner case when attack fails. :return: """ # Build TFClassifier tfc, sess = get_classifier_tf() # Get MNIST x_test, _ = self.mnist # Failure attack zoo = ZooAttack(classifier=tfc, max_iter=0, binary_search_steps=0, learning_rate=0) x_test_adv = zoo.generate(x_test) self.assertTrue((x_test_adv <= 1.0001).all()) self.assertTrue((x_test_adv >= -0.0001).all()) np.testing.assert_almost_equal(x_test, x_test_adv, 3) # Clean-up session sess.close() tf.reset_default_graph()