def test_poison(art_warning, get_default_mnist_subset, image_dl_estimator):
    try:
        (x_train, y_train), (_, _) = get_default_mnist_subset
        classifier, _ = image_dl_estimator()
        target = to_categorical([9], 10)[0]
        backdoor = PoisoningAttackBackdoor(add_pattern_bd)
        attack = PoisoningAttackCleanLabelBackdoor(backdoor, classifier, target)
        poison_data, poison_labels = attack.poison(x_train, y_train)

        np.testing.assert_equal(poison_data.shape, x_train.shape)
        np.testing.assert_equal(poison_labels.shape, y_train.shape)
    except ARTTestException as e:
        art_warning(e)
def test_failure_modes(art_warning, get_default_mnist_subset, image_dl_estimator, params):
    try:
        (x_train, y_train), (_, _) = get_default_mnist_subset
        classifier, _ = image_dl_estimator()
        target = to_categorical([9], 10)[0]
        backdoor = PoisoningAttackBackdoor(add_pattern_bd)
        with pytest.raises(ValueError):
            attack = PoisoningAttackCleanLabelBackdoor(backdoor, classifier, target, **params)
    except ARTTestException as e:
        art_warning(e)
Ejemplo n.º 3
0
def poison_loader_clbd(**kwargs):
    backdoor_kwargs = kwargs.pop("backdoor_kwargs")
    backdoor = poison_loader_GTSRB(**backdoor_kwargs)

    # Targets is a one-hot numpy array -- need to map from sparse representation
    target = kwargs.pop("target")
    n_classes = kwargs.pop("n_classes")
    targets = to_categorical([target], n_classes)[0]

    return (
        PoisoningAttackCleanLabelBackdoor(backdoor=backdoor,
                                          target=targets,
                                          **kwargs),
        backdoor,
    )