def test_minimal_perturbations_images(fix_get_mnist_subset,
                                      get_image_classifier_list_for_attack):
    classifier_list = get_image_classifier_list_for_attack(FastGradientMethod)
    # TODO this if statement must be removed once we have a classifier for both image and tabular data
    if classifier_list is None:
        logging.warning(
            "Couldn't perform  this test because no classifier is defined")
        return

    for classifier in classifier_list:
        attack = FastGradientMethod(classifier, eps=1.0, batch_size=11)
        attack_params = {"minimal": True, "eps_step": 0.1, "eps": 5.0}
        attack.set_params(**attack_params)

        expected_values = {
            "x_test_mean":
            ExpectedValue(0.03896513, 0.01),
            "x_test_min":
            ExpectedValue(-0.30000000, 0.00001),
            "x_test_max":
            ExpectedValue(0.30000000, 0.00001),
            "y_test_pred_adv_expected":
            ExpectedValue(np.asarray([4, 2, 4, 7, 0, 4, 7, 2, 0, 7, 0]), 2),
        }
        backend_check_adverse_values(attack, fix_get_mnist_subset,
                                     expected_values)
 def attack(self, eps=0.05):
     # Create Adversarial Examples using FGSM.
     self.utility.print_message(
         NOTE, 'Creating Adversarial Examples using FGSM.')
     attack = FastGradientMethod(estimator=self.model, eps=eps)
     X_adv = attack.generate(x=self.dataset)
     return X_adv
예제 #3
0
def test_inverse_gan(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack):
    try:
        (x_train_mnist, y_train_mnist, x_test_mnist, y_test_mnist) = fix_get_mnist_subset

        gan, inverse_gan, sess = get_gan_inverse_gan_ft()
        if gan is None:
            logging.warning("Couldn't perform  this test because no gan is defined for this framework configuration")
            return

        classifier = image_dl_estimator_for_attack(FastGradientMethod)

        attack = FastGradientMethod(classifier, eps=0.2)
        x_test_adv = attack.generate(x=x_test_mnist)

        inverse_gan = InverseGAN(sess=sess, gan=gan, inverse_gan=inverse_gan)

        x_test_defended = inverse_gan(x_test_adv, maxiter=1)

        np.testing.assert_array_almost_equal(
            float(np.mean(x_test_defended - x_test_adv)),
            0.08818667382001877,
            decimal=0.01,
        )
    except ARTTestException as e:
        art_warning(e)
def test_inverse_gan(fix_get_mnist_subset, image_dl_estimator_for_attack):
    (x_train_mnist, y_train_mnist, x_test_mnist,
     y_test_mnist) = fix_get_mnist_subset

    gan, inverse_gan, sess = get_gan_inverse_gan_ft()
    if gan is None:
        logging.warning(
            "Couldn't perform  this test because no gan is defined for this framework configuration"
        )
        return

    classifier_list = image_dl_estimator_for_attack(FastGradientMethod)

    if classifier_list is None:
        logging.warning(
            "Couldn't perform  this test because no classifier is defined")
        return

    classifier = classifier_list[0]

    attack = FastGradientMethod(classifier, eps=0.2)
    x_test_adv = attack.generate(x=x_test_mnist)

    inverse_gan = InverseGAN(sess=sess, gan=gan, inverse_gan=inverse_gan)

    x_test_defended = inverse_gan(x_test_adv, maxiter=1)

    assert np.mean(x_test_defended - x_test_adv) == pytest.approx(0.33819187,
                                                                  abs=0.05)
예제 #5
0
 def generate_adv_samples(self, model_path, name):
     name = name
     model = models.load_model(model_path)
     x_train, x_test, y_train, y_test = self.data_pre_process(name)
     classifier = KerasClassifier(clip_values=(np.min(x_train), np.max(x_train)), model=model)
     fgsm = FastGradientMethod(classifier, eps=0.01, minimal=True, eps_step=0.01, num_random_init=35,
                               targeted=False, batch_size=128)
     x_adv_test = fgsm.generate(x=x_test)
     return x_adv_test
def test_targeted_images(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack):
    try:
        classifier = image_dl_estimator_for_attack(FastGradientMethod)
        attack = FastGradientMethod(classifier, eps=1.0, targeted=True)
        attack_params = {"minimal": True, "eps_step": 0.01, "eps": 1.0}
        attack.set_params(**attack_params)

        backend_targeted_images(attack, fix_get_mnist_subset)
    except ARTTestException as e:
        art_warning(e)
def test_tabular(art_warning, tabular_dl_estimator, framework, get_iris_dataset, targeted, clipped):
    try:
        classifier = tabular_dl_estimator(clipped=clipped)

        if targeted:
            attack = FastGradientMethod(classifier, targeted=True, eps=0.1, batch_size=128)
            backend_targeted_tabular(attack, get_iris_dataset)
        else:
            attack = FastGradientMethod(classifier, eps=0.1)
            backend_untargeted_tabular(attack, get_iris_dataset, clipped=clipped)
    except ARTTestException as e:
        art_warning(e)
예제 #8
0
 def generate_adv_samples(self):
     model = models.load_model(self.model_path)
     x_train, x_test, y_train, y_test = self.data_pre_process()
     classifier = KerasClassifier(clip_values=(np.min(x_train), np.max(x_train)), model=model)
     if self.attack == 'fgsm':
         fgsm = FastGradientMethod(classifier, eps=0.01, minimal=True, eps_step=0.01, num_random_init=35,
                                   targeted=False, batch_size=128)
         x_adv_test = fgsm.generate(x=x_test)
         if self.defence == True:
             return x_test, x_adv_test, y_test
         else:
             return x_test, x_adv_test, y_test
예제 #9
0
def attack_FGSM_nontargeted(dataloader, model, model_info, args,
                            checkpoint_dir):
    """
    FGSM attack
    """
    device = args.device
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.01)

    img_size = model_info["model_img_size"]
    n_classes = model_info["num_classes"]

    classifier = PyTorchClassifier(
        model=model,
        loss=criterion,
        clip_values=(0.0, 1.0),
        optimizer=optimizer,
        input_shape=(img_size, img_size),
        nb_classes=n_classes,
        device_type=device,
    )

    # attack = FastGradientMethod(estimator=classifier, batch_size=args.batch_size)
    attack = FastGradientMethod(estimator=classifier,
                                batch_size=args.batch_size)

    # Launching a non-targeted attack
    # t = args.target_class
    print(f"Launching FGSM nontargeted attack")
    dest_images = os.path.join(checkpoint_dir, args.model_name)
    os.makedirs(dest_images, exist_ok=True)

    # Running over the entire-batch to compute a universal perturbation
    for data in tqdm(dataloader):
        sample, label, img_path = data
        sample = sample.to(device)
        # Launch attack
        sample_adv = attack.generate(x=sample.cpu())

        # Code to save these images
        img_path = [it.split("/")[-1] for it in img_path]

        for i in range(len(sample_adv)):
            _img = sample_adv[i].transpose(1, 2, 0)
            skimage.io.imsave(os.path.join(dest_images, img_path[i]),
                              img_as_ubyte(_img))

    with open(os.path.join(dest_images, "stats.txt"), "w") as f:
        f.write(f"Fooling-rate was nan\n")

    return dest_images
def test_targeted_images(fix_get_mnist_subset, image_dl_estimator_for_attack):
    classifier_list = image_dl_estimator_for_attack(FastGradientMethod)
    # TODO this if statement must be removed once we have a classifier for both image and tabular data
    if classifier_list is None:
        logging.warning(
            "Couldn't perform  this test because no classifier is defined")
        return

    for classifier in classifier_list:
        attack = FastGradientMethod(classifier, eps=1.0, targeted=True)
        attack_params = {"minimal": True, "eps_step": 0.01, "eps": 1.0}
        attack.set_params(**attack_params)

        backend_targeted_images(attack, fix_get_mnist_subset)
 def adv_model_fit(self):
     model = self.choose_model
     x_train, x_test, y_train, y_test = self.data_pre_process()
     classifier = KerasClassifier(clip_values=(np.min(x_train), np.max(x_train)), model=model)
     fgsm = FastGradientMethod(classifier, eps=0.01, minimal=True, eps_step=0.01, num_random_init=35,
                               targeted=False, batch_size=128)
     x_train, x_test = self.data_reshape(self.model_choice, x_train, x_test)
     x_adv_train = fgsm.generate(x=x_train)
     history = model.fit(x_adv_train, y_train, epochs=self.epochs, batch_size=32, validation_split=0.2)
     data_record = DataRecord()
     data_record.model = model
     data_record.summary = model.to_yaml()
     data_record.history = history
     data_record.epochs = self.epochs
     self.result_save(data_record, self.save_adv_dir)
def test_classifier_defended_images(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack):
    try:
        classifier = image_dl_estimator_for_attack(FastGradientMethod, defended=True)
        attack = FastGradientMethod(classifier, eps=1.0, batch_size=128)
        backend_test_defended_images(attack, fix_get_mnist_subset)
    except ARTTestException as e:
        art_warning(e)
def test_norm_images(art_warning, norm, fix_get_mnist_subset, image_dl_estimator_for_attack):
    try:
        classifier = image_dl_estimator_for_attack(FastGradientMethod)

        if norm == np.inf:
            expected_values = {
                "x_test_mean": ExpectedValue(0.2346725, 0.002),
                "x_test_min": ExpectedValue(-1.0, 0.00001),
                "x_test_max": ExpectedValue(1.0, 0.00001),
                "y_test_pred_adv_expected": ExpectedValue(np.asarray([4, 4, 4, 7, 7, 4, 7, 2, 2, 3, 0]), 2),
            }

        elif norm == 1:
            expected_values = {
                "x_test_mean": ExpectedValue(0.00051374, 0.002),
                "x_test_min": ExpectedValue(-0.01486498, 0.001),
                "x_test_max": ExpectedValue(0.014761963, 0.001),
                "y_test_pred_adv_expected": ExpectedValue(np.asarray([7, 1, 1, 4, 4, 1, 4, 4, 4, 4, 4]), 4),
            }
        elif norm == 2:
            expected_values = {
                "x_test_mean": ExpectedValue(0.007636416, 0.001),
                "x_test_min": ExpectedValue(-0.211054801, 0.001),
                "x_test_max": ExpectedValue(0.209592223, 0.001),
                "y_test_pred_adv_expected": ExpectedValue(np.asarray([7, 2, 4, 4, 4, 7, 7, 4, 0, 4, 4]), 2),
            }

        attack = FastGradientMethod(classifier, eps=1.0, norm=norm, batch_size=128)

        backend_check_adverse_values(attack, fix_get_mnist_subset, expected_values)
    except ARTTestException as e:
        art_warning(e)
예제 #14
0
def test_iterative_saliency(fix_get_mnist_subset,
                            image_dl_estimator_for_attack):
    classifier_list = image_dl_estimator_for_attack(FastGradientMethod)
    # TODO this if statement must be removed once we have a classifier for both image and tabular data
    if classifier_list is None:
        logging.warning(
            "Couldn't perform  this test because no classifier is defined")
        return

    expected_values_axis_1 = {
        "nb_perturbed_frames":
        ExpectedValue(np.asarray([10, 1, 2, 12, 16, 1, 2, 7, 4, 11, 5]), 2)
    }

    expected_values_axis_2 = {
        "nb_perturbed_frames":
        ExpectedValue(np.asarray([11, 1, 2, 6, 14, 2, 2, 13, 4, 8, 4]), 2)
    }

    for classifier in classifier_list:
        attacker = FastGradientMethod(classifier, eps=0.3, batch_size=128)
        attack = FrameSaliencyAttack(classifier, attacker,
                                     "iterative_saliency")
        backend_check_adverse_frames(attack, fix_get_mnist_subset,
                                     expected_values_axis_1)

        # test with non-default frame index:
        attack = FrameSaliencyAttack(classifier,
                                     attacker,
                                     "iterative_saliency",
                                     frame_index=2)
        backend_check_adverse_frames(attack, fix_get_mnist_subset,
                                     expected_values_axis_2)
예제 #15
0
def test_one_shot(fix_get_mnist_subset, image_dl_estimator_for_attack):
    classifier_list = image_dl_estimator_for_attack(FastGradientMethod)
    # TODO this if statement must be removed once we have a classifier for both image and tabular data
    if classifier_list is None:
        logging.warning(
            "Couldn't perform  this test because no classifier is defined")
        return

    # for the one-shot method, frame saliency attack should resort to plain FastGradientMethod
    expected_values = {
        "x_test_mean":
        ExpectedValue(0.2346725, 0.002),
        "x_test_min":
        ExpectedValue(-1.0, 0.00001),
        "x_test_max":
        ExpectedValue(1.0, 0.00001),
        "y_test_pred_adv_expected":
        ExpectedValue(np.asarray([4, 4, 4, 7, 7, 4, 7, 2, 2, 3, 0]), 2),
    }

    for classifier in classifier_list:
        attacker = FastGradientMethod(classifier, eps=1, batch_size=128)
        attack = FrameSaliencyAttack(classifier, attacker, "one_shot")
        backend_check_adverse_values(attack, fix_get_mnist_subset,
                                     expected_values)
예제 #16
0
def test_fgsm_defences(fix_get_mnist_subset, image_dl_estimator,
                       is_tf_version_2):
    if is_tf_version_2:

        clip_values = (0, 1)
        smooth_3x3 = SpatialSmoothingTensorFlowV2(window_size=3,
                                                  channels_first=False)
        smooth_5x5 = SpatialSmoothingTensorFlowV2(window_size=5,
                                                  channels_first=False)
        smooth_7x7 = SpatialSmoothingTensorFlowV2(window_size=7,
                                                  channels_first=False)
        classifier_, _ = image_dl_estimator(one_classifier=True)

        loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
        classifier = TensorFlowV2Classifier(
            clip_values=clip_values,
            model=classifier_.model,
            preprocessing_defences=[smooth_3x3, smooth_5x5, smooth_7x7],
            loss_object=loss_object,
            input_shape=(28, 28, 1),
            nb_classes=10,
        )
        assert len(classifier.preprocessing_defences) == 3

        attack = FastGradientMethod(classifier, eps=1, batch_size=128)
        backend_test_defended_images(attack, fix_get_mnist_subset)
def test_masked_images(art_warning, fix_get_mnist_subset, image_dl_estimator_for_attack):
    try:
        classifier = image_dl_estimator_for_attack(FastGradientMethod)
        attack = FastGradientMethod(classifier, eps=1.0, num_random_init=1)
        backend_masked_images(attack, fix_get_mnist_subset)
    except ARTTestException as e:
        art_warning(e)
def test_iterative_saliency(art_warning, fix_get_mnist_subset,
                            image_dl_estimator_for_attack):
    try:
        classifier = image_dl_estimator_for_attack(FastGradientMethod)

        expected_values_axis_1 = {
            "nb_perturbed_frames":
            ExpectedValue(np.asarray([10, 1, 2, 12, 16, 1, 2, 7, 4, 11, 5]), 2)
        }

        expected_values_axis_2 = {
            "nb_perturbed_frames":
            ExpectedValue(np.asarray([11, 1, 2, 6, 14, 2, 2, 13, 4, 8, 4]), 2)
        }

        attacker = FastGradientMethod(classifier, eps=0.3, batch_size=128)
        attack = FrameSaliencyAttack(classifier, attacker,
                                     "iterative_saliency")
        backend_check_adverse_frames(attack, fix_get_mnist_subset,
                                     expected_values_axis_1)

        # test with non-default frame index:
        attack = FrameSaliencyAttack(classifier,
                                     attacker,
                                     "iterative_saliency",
                                     frame_index=2)
        backend_check_adverse_frames(attack, fix_get_mnist_subset,
                                     expected_values_axis_2)
    except ARTTestException as e:
        art_warning(e)
예제 #19
0
def test_check_params(art_warning, image_dl_estimator_for_attack):
    try:
        from art.attacks.evasion import FastGradientMethod

        classifier = image_dl_estimator_for_attack(FrameSaliencyAttack, from_logits=True)

        attacker = FastGradientMethod(estimator=classifier)

        with pytest.raises(ValueError):
            _ = FrameSaliencyAttack(classifier, attacker="attack")

        with pytest.raises(ValueError):
            _ = FrameSaliencyAttack(classifier, attacker=attacker, method="test")

        with pytest.raises(ValueError):
            _ = FrameSaliencyAttack(classifier, attacker=attacker, frame_index=0)

        with pytest.raises(ValueError):
            _ = FrameSaliencyAttack(classifier, attacker=attacker, batch_size=-1)

        with pytest.raises(ValueError):
            _ = FrameSaliencyAttack(classifier, attacker=attacker, verbose="true")

    except ARTTestException as e:
        art_warning(e)
예제 #20
0
def test_check_params(art_warning, image_dl_estimator_for_attack):
    try:
        from art.attacks.evasion import FastGradientMethod

        classifier = image_dl_estimator_for_attack(AutoAttack)

        attacks = [FastGradientMethod(estimator=classifier)]

        with pytest.raises(ValueError):
            _ = AutoAttack(classifier, attacks=attacks, norm=0)

        with pytest.raises(ValueError):
            _ = AutoAttack(classifier, attacks=attacks, eps="1")
        with pytest.raises(ValueError):
            _ = AutoAttack(classifier, attacks=attacks, eps=-1.0)

        with pytest.raises(ValueError):
            _ = AutoAttack(classifier, attacks=attacks, eps_step="1")
        with pytest.raises(ValueError):
            _ = AutoAttack(classifier, attacks=attacks, eps_step=-1.0)

        with pytest.raises(ValueError):
            _ = AutoAttack(classifier, attacks=attacks, batch_size=1.0)
        with pytest.raises(ValueError):
            _ = AutoAttack(classifier, attacks=attacks, batch_size=-1)

    except ARTTestException as e:
        art_warning(e)
def test_fgsm_defences(fix_get_mnist_subset, image_dl_estimator, device_type):

    clip_values = (0, 1)
    smooth_3x3 = SpatialSmoothingPyTorch(window_size=3,
                                         channels_first=True,
                                         device_type=device_type)
    smooth_5x5 = SpatialSmoothingPyTorch(window_size=5,
                                         channels_first=True,
                                         device_type=device_type)
    smooth_7x7 = SpatialSmoothingPyTorch(window_size=7,
                                         channels_first=True,
                                         device_type=device_type)
    classifier_, _ = image_dl_estimator(one_classifier=True)

    criterion = nn.CrossEntropyLoss()
    classifier = PyTorchClassifier(
        clip_values=clip_values,
        model=classifier_.model,
        preprocessing_defences=[smooth_3x3, smooth_5x5, smooth_7x7],
        loss=criterion,
        input_shape=(1, 28, 28),
        nb_classes=10,
        device_type=device_type,
    )
    assert len(classifier.preprocessing_defences) == 3

    attack = FastGradientMethod(classifier, eps=1, batch_size=128)
    backend_test_defended_images(attack, fix_get_mnist_subset)
예제 #22
0
 def __init__(self, linf_args={}, cond_ratio=0.01, ref_data=None, **kwds):
     super().__init__(**kwds)
     self.metric = LInf(**linf_args)
     self.cond_ratio = cond_ratio
     self.ref_data = ref_data
     ## define a global attacker
     classifier = KerasClassifier(clip_values=(MIN, -MIN), model=self.dnn)
     self.adv_crafter = FastGradientMethod(classifier)
예제 #23
0
def FGSM(points=10):
    from art.attacks.evasion import FastGradientMethod
    from art.estimators.classification import TensorFlowV2Classifier

    loss_object = tf.keras.losses.SparseCategoricalCrossentropy()

    classifier = TensorFlowV2Classifier(model=model,
                                        nb_classes=10,
                                        input_shape=(28, 28, 1),
                                        loss_object=loss_object,
                                        clip_values=(0, 1),
                                        channels_first=False)

    # Craft adversarial samples with FGSM
    epsilons = [0.05 * i for i in range(points)]  # Maximum perturbation
    preds = np.argmax(classifier.predict(x_test), axis=1)
    acc = np.sum(preds == np.argmax(y_test, axis=1)) / y_test.shape[0]
    print("\nTest accuracy on normal sample: %.2f%% eps: %.2f" %
          (acc * 100, 0))
    accuracies = [acc]
    examples = []
    for epsilon in epsilons[1:]:
        adv_crafter = FastGradientMethod(classifier, eps=epsilon)
        x_test_adv = adv_crafter.generate(x=x_test)

        # Evaluate the classifier on the adversarial examples
        preds = np.argmax(classifier.predict(x_test_adv), axis=1)
        acc = np.sum(preds == np.argmax(y_test, axis=1)) / y_test.shape[0]
        print("\nTest accuracy on adversarial sample: %.2f%% eps: %.2f" %
              (acc * 100, epsilon))
        accuracies.append(acc)
        example = []
        preds = np.argmax(classifier.predict(x_test_adv), axis=1)
        labels = np.argmax(y_test, axis=1)
        for i in range(len(preds)):
            p, l = preds[i], labels[i]
            if p != l:
                orig = l
                adv = p
                ex = x_test_adv[i]
                example.append((orig, adv, ex))
            if len(example) == 5:
                break
        examples.append(example)
    plot_accuracies(epsilons, accuracies)
    plot_examples(epsilons[1:], examples)
예제 #24
0
def test_non_classification(art_warning, fix_get_mnist_subset,
                            image_dl_estimator_for_attack, fix_get_rcnn):
    try:
        classifier = fix_get_rcnn
        attack = FastGradientMethod(classifier, num_random_init=3)
        backend_test_random_initialisation_images(attack, fix_get_mnist_subset)
    except ARTTestException as e:
        art_warning(e)
def test_masked_images(fix_get_mnist_subset, image_dl_estimator_for_attack):
    classifier_list = image_dl_estimator_for_attack(FastGradientMethod)
    # TODO this if statement must be removed once we have a classifier for both image and tabular data
    if classifier_list is None:
        logging.warning(
            "Couldn't perform  this test because no classifier is defined")
        return

    for classifier in classifier_list:
        attack = FastGradientMethod(classifier, eps=1.0, num_random_init=1)
        backend_masked_images(attack, fix_get_mnist_subset)
예제 #26
0
 def __init__(self, linf_args={}, cond_ratio=0.01, ref_data=None, **kwds):
     """
 TODO: FGM also accepts the L1 and L2 norms, in addition to Linf.
 """
     super().__init__(**kwds)
     self.metric = LInf(**linf_args)
     self.cond_ratio = cond_ratio
     self.ref_data = ref_data
     ## define a global attacker
     classifier = KerasClassifier(clip_values=(MIN, -MIN), model=self.dnn)
     self.adv_crafter = FastGradientMethod(classifier)
def test_tabular(get_tabular_classifier_list, framework, get_iris_dataset,
                 targeted, clipped):
    classifier_list = get_tabular_classifier_list(FastGradientMethod,
                                                  clipped=clipped)

    if classifier_list is None:
        logging.warning(
            "Couldn't perform  this test because no classifier is defined")
        return
    for classifier in classifier_list:
        if targeted:
            attack = FastGradientMethod(classifier,
                                        targeted=True,
                                        eps=0.1,
                                        batch_size=128)
            backend_targeted_tabular(attack, get_iris_dataset)
        else:
            attack = FastGradientMethod(classifier, eps=0.1)
            backend_untargeted_tabular(attack,
                                       get_iris_dataset,
                                       clipped=clipped)
def test_norm_images(norm, fix_get_mnist_subset,
                     get_image_classifier_list_for_attack):
    classifier_list = get_image_classifier_list_for_attack(FastGradientMethod)
    # TODO this if statement must be removed once we have a classifier for both image and tabular data
    if classifier_list is None:
        logging.warning(
            "Couldn't perform  this test because no classifier is defined")
        return

    elif norm == np.inf:
        expected_values = {
            "x_test_mean":
            ExpectedValue(0.2346725, 0.002),
            "x_test_min":
            ExpectedValue(-1.0, 0.00001),
            "x_test_max":
            ExpectedValue(1.0, 0.00001),
            "y_test_pred_adv_expected":
            ExpectedValue(np.asarray([4, 4, 4, 7, 7, 4, 7, 2, 2, 3, 0]), 2),
        }

    elif norm == 1:
        expected_values = {
            "x_test_mean":
            ExpectedValue(0.00051374, 0.002),
            "x_test_min":
            ExpectedValue(-0.01486498, 0.001),
            "x_test_max":
            ExpectedValue(0.014761963, 0.001),
            "y_test_pred_adv_expected":
            ExpectedValue(np.asarray([7, 1, 1, 4, 4, 1, 4, 4, 4, 4, 4]), 4),
        }
    elif norm == 2:
        expected_values = {
            "x_test_mean":
            ExpectedValue(0.007636416, 0.001),
            "x_test_min":
            ExpectedValue(-0.211054801, 0.001),
            "x_test_max":
            ExpectedValue(0.209592223, 0.001),
            "y_test_pred_adv_expected":
            ExpectedValue(np.asarray([7, 2, 4, 4, 4, 7, 7, 4, 0, 4, 4]), 2),
        }

    for classifier in classifier_list:
        attack = FastGradientMethod(classifier,
                                    eps=1,
                                    norm=norm,
                                    batch_size=128)

        backend_check_adverse_values(attack, fix_get_mnist_subset,
                                     expected_values)
 def pre_adv_model_fit(self):
     model = models.load_model(os.path.join(self.save_dir, f'{self.model_choice}/model.h5'))
     x_train, x_test, y_train, y_test = self.data_pre_process()
     classifier = KerasClassifier(clip_values=(np.min(x_train), np.max(x_train)), model=model)
     fgsm = FastGradientMethod(classifier, eps=0.01, minimal=True, eps_step=0.01, num_random_init=35,
                               targeted=False, batch_size=32)
     x_train, x_test = self.data_reshape(self.model_choice, x_train, x_test)
     x_adv_train = fgsm.generate(x=x_train)
     # x_adv_test = fgsm.generate(x=x_test)
     # adv_trainer = AdversarialTrainer(classifier, attacks=fgsm, ratio=1.0)
     # # samples = np.array(list(range(0, y_train.shape[0])))
     # # y_train = np.column_stack((samples, y_train))
     # y_train = np.reshape(y_train, (y_train.shape[0],))
     # print(y_train.shape)
     # adv_trainer.fit(x_adv_train, y_train, batch_size=128, nb_epochs=10)
     history = model.fit(x_adv_train, y_train, epochs=self.epochs, batch_size=32, validation_split=0.2)
     data_record = DataRecord()
     data_record.model = model
     data_record.summary = model.to_yaml()
     data_record.history = history
     data_record.epochs = self.epochs
     self.result_save(data_record, self.save_pre_adv_dir)
def test_classifier_defended_images(fix_get_mnist_subset,
                                    image_dl_estimator_for_attack):

    classifier_list = image_dl_estimator_for_attack(FastGradientMethod,
                                                    defended=True)
    # TODO this if statement must be removed once we have a classifier for both image and tabular data
    if classifier_list is None:
        logging.warning(
            "Couldn't perform  this test because no classifier is defined")
        return

    for classifier in classifier_list:
        attack = FastGradientMethod(classifier, eps=1, batch_size=128)
        backend_test_defended_images(attack, fix_get_mnist_subset)