def test_check_params(art_warning, image_dl_estimator_for_attack): try: from art.attacks.evasion import FastGradientMethod classifier = image_dl_estimator_for_attack(FrameSaliencyAttack, from_logits=True) attacker = FastGradientMethod(estimator=classifier) with pytest.raises(ValueError): _ = FrameSaliencyAttack(classifier, attacker="attack") with pytest.raises(ValueError): _ = FrameSaliencyAttack(classifier, attacker=attacker, method="test") with pytest.raises(ValueError): _ = FrameSaliencyAttack(classifier, attacker=attacker, frame_index=0) with pytest.raises(ValueError): _ = FrameSaliencyAttack(classifier, attacker=attacker, batch_size=-1) with pytest.raises(ValueError): _ = FrameSaliencyAttack(classifier, attacker=attacker, verbose="true") except ARTTestException as e: art_warning(e)
def test_iterative_saliency(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack): try: classifier = image_dl_estimator_for_attack(FastGradientMethod) expected_values_axis_1 = { "nb_perturbed_frames": ExpectedValue(np.asarray([10, 1, 2, 12, 16, 1, 2, 7, 4, 11, 5]), 2) } expected_values_axis_2 = { "nb_perturbed_frames": ExpectedValue(np.asarray([11, 1, 2, 6, 14, 2, 2, 13, 4, 8, 4]), 2) } attacker = FastGradientMethod(classifier, eps=0.3, batch_size=128) attack = FrameSaliencyAttack(classifier, attacker, "iterative_saliency") backend_check_adverse_frames(attack, fix_get_mnist_subset, expected_values_axis_1) # test with non-default frame index: attack = FrameSaliencyAttack(classifier, attacker, "iterative_saliency", frame_index=2) backend_check_adverse_frames(attack, fix_get_mnist_subset, expected_values_axis_2) except ARTTestException as e: art_warning(e)
def test_iterative_saliency(fix_get_mnist_subset, image_dl_estimator_for_attack): classifier_list = image_dl_estimator_for_attack(FastGradientMethod) # TODO this if statement must be removed once we have a classifier for both image and tabular data if classifier_list is None: logging.warning( "Couldn't perform this test because no classifier is defined") return expected_values_axis_1 = { "nb_perturbed_frames": ExpectedValue(np.asarray([10, 1, 2, 12, 16, 1, 2, 7, 4, 11, 5]), 2) } expected_values_axis_2 = { "nb_perturbed_frames": ExpectedValue(np.asarray([11, 1, 2, 6, 14, 2, 2, 13, 4, 8, 4]), 2) } for classifier in classifier_list: attacker = FastGradientMethod(classifier, eps=0.3, batch_size=128) attack = FrameSaliencyAttack(classifier, attacker, "iterative_saliency") backend_check_adverse_frames(attack, fix_get_mnist_subset, expected_values_axis_1) # test with non-default frame index: attack = FrameSaliencyAttack(classifier, attacker, "iterative_saliency", frame_index=2) backend_check_adverse_frames(attack, fix_get_mnist_subset, expected_values_axis_2)
def get_frame_saliency(classifier, inner_config=None, **kwargs): from art.attacks.evasion import FrameSaliencyAttack from armory.utils import config_loading attacker = config_loading.load_attack(inner_config, classifier) attack = FrameSaliencyAttack(classifier, attacker, **kwargs) return attack
def test_one_shot(fix_get_mnist_subset, image_dl_estimator_for_attack): classifier_list = image_dl_estimator_for_attack(FastGradientMethod) # TODO this if statement must be removed once we have a classifier for both image and tabular data if classifier_list is None: logging.warning( "Couldn't perform this test because no classifier is defined") return # for the one-shot method, frame saliency attack should resort to plain FastGradientMethod expected_values = { "x_test_mean": ExpectedValue(0.2346725, 0.002), "x_test_min": ExpectedValue(-1.0, 0.00001), "x_test_max": ExpectedValue(1.0, 0.00001), "y_test_pred_adv_expected": ExpectedValue(np.asarray([4, 4, 4, 7, 7, 4, 7, 2, 2, 3, 0]), 2), } for classifier in classifier_list: attacker = FastGradientMethod(classifier, eps=1, batch_size=128) attack = FrameSaliencyAttack(classifier, attacker, "one_shot") backend_check_adverse_values(attack, fix_get_mnist_subset, expected_values)
def test_one_shot(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack): try: classifier = image_dl_estimator_for_attack(FastGradientMethod) # for the one-shot method, frame saliency attack should resort to plain FastGradientMethod expected_values = { "x_test_mean": ExpectedValue(0.2346725, 0.002), "x_test_min": ExpectedValue(-1.0, 0.00001), "x_test_max": ExpectedValue(1.0, 0.00001), "y_test_pred_adv_expected": ExpectedValue(np.asarray([4, 4, 4, 7, 7, 4, 7, 2, 2, 3, 0]), 2), } attacker = FastGradientMethod(classifier, eps=1.0, batch_size=128) attack = FrameSaliencyAttack(classifier, attacker, "one_shot") backend_check_adverse_values(attack, fix_get_mnist_subset, expected_values) except ARTTestException as e: art_warning(e)
model_path = "" # load model here # Step 3: Create the ART classifier classifier = TensorFlowClassifier(model=model_path, ) # Step 4: Train the ART classifier classifier.fit(x_train, y_train, batch_size=4, nb_epochs=3) # Step 5: Evaluate the ART classifier on benign test examples predictions = classifier.predict(x_test) accuracy = np.sum( np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test) print("Accuracy on benign test examples: {}%".format(accuracy * 100)) # Step 6: Generate adversarial test examples attack = FrameSaliencyAttack(classifier=classifier, attacker=FastGradientMethod(estimator=classifier), method="iterative_saliency", frame_index=2, batch_size=4) x_test_adv = attack.generate(x=x_test) # Step 7: Evaluate the ART classifier on adversarial test examples predictions = classifier.predict(x_test_adv) accuracy = np.sum( np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test) print("Accuracy on adversarial test examples: {}%".format(accuracy * 100))