Exemple #1
0
def poison_loader_GTSRB(**kwargs):
    poison_type = kwargs["poison_type"]
    if poison_type == "pattern":

        def mod(x):
            return perturbations.add_pattern_bd(x, pixel_value=255)

    elif poison_type == "pixel":

        def mod(x):
            return perturbations.add_single_bd(x, pixel_value=255)

    elif poison_type == "image":
        backdoor_path = kwargs.get("backdoor_path")
        if backdoor_path is None:
            raise ValueError(
                "poison_type 'image' requires 'backdoor_path' kwarg path to image"
            )
        size = kwargs.get("size")
        if size is None:
            raise ValueError("poison_type 'image' requires 'size' kwarg tuple")
        size = tuple(size)

        def mod(x):
            return perturbations.insert_image(x,
                                              backdoor_path=backdoor_path,
                                              size=size)

    else:
        raise ValueError(f"Unknown poison_type {poison_type}")

    return PoisoningAttackBackdoor(mod)
Exemple #2
0
def poison_loader_GTSRB(**kwargs):
    poison_type = kwargs["poison_type"]
    if poison_type == "pattern":

        def mod(x):
            return perturbations.add_pattern_bd(x, pixel_value=1)

    elif poison_type == "pixel":

        def mod(x):
            return perturbations.add_single_bd(x, pixel_value=1)

    elif poison_type == "image":
        backdoor_path = kwargs.get("backdoor_path")
        if backdoor_path is None:
            raise ValueError(
                "poison_type 'image' requires 'backdoor_path' kwarg path to image"
            )
        backdoor_packaged_with_armory = kwargs.get(
            "backdoor_packaged_with_armory", False
        )
        if backdoor_packaged_with_armory:
            backdoor_path = os.path.join(
                # Get base directory where armory is pip installed
                os.path.dirname(os.path.dirname(armory.__file__)),
                backdoor_path,
            )
        size = kwargs.get("size")
        if size is None:
            raise ValueError("poison_type 'image' requires 'size' kwarg tuple")
        size = tuple(size)
        mode = kwargs.get("mode", "RGB")
        blend = kwargs.get("blend", 0.6)
        base_img_size_x = kwargs.get("base_img_size_x", 48)
        base_img_size_y = kwargs.get("base_img_size_y", 48)
        channels_first = kwargs.get("channels_first", False)
        x_shift = kwargs.get("x_shift", (base_img_size_x - size[0]) // 2)
        y_shift = kwargs.get("y_shift", (base_img_size_y - size[1]) // 2)

        def mod(x):
            return perturbations.insert_image(
                x,
                backdoor_path=backdoor_path,
                size=size,
                mode=mode,
                x_shift=x_shift,
                y_shift=y_shift,
                channels_first=channels_first,
                blend=blend,
                random=False,
            )

    else:
        raise ValueError(f"Unknown poison_type {poison_type}")

    return PoisoningAttackBackdoor(mod)
def test_failure_modes(art_warning, get_default_mnist_subset, image_dl_estimator, params):
    try:
        (x_train, y_train), (_, _) = get_default_mnist_subset
        classifier, _ = image_dl_estimator()
        target = to_categorical([9], 10)[0]
        backdoor = PoisoningAttackBackdoor(add_pattern_bd)
        with pytest.raises(ValueError):
            attack = PoisoningAttackCleanLabelBackdoor(backdoor, classifier, target, **params)
    except ARTTestException as e:
        art_warning(e)
def test_poison(art_warning, get_default_mnist_subset, image_dl_estimator):
    try:
        (x_train, y_train), (_, _) = get_default_mnist_subset
        classifier, _ = image_dl_estimator()
        target = to_categorical([9], 10)[0]
        backdoor = PoisoningAttackBackdoor(add_pattern_bd)
        attack = PoisoningAttackCleanLabelBackdoor(backdoor, classifier, target)
        poison_data, poison_labels = attack.poison(x_train, y_train)

        np.testing.assert_equal(poison_data.shape, x_train.shape)
        np.testing.assert_equal(poison_labels.shape, y_train.shape)
    except ARTTestException as e:
        art_warning(e)
Exemple #5
0
def test_poison(art_warning, get_default_mnist_subset, image_dl_estimator):
    try:
        (x_train, y_train), (_, _) = get_default_mnist_subset
        classifier, _ = image_dl_estimator(functional=True)

        if isinstance(classifier, PyTorchClassifier):

            def mod(x):
                original_dtype = x.dtype
                x = np.transpose(x, (0, 2, 3, 1)).astype(np.float32)
                x = add_pattern_bd(x)
                x = np.transpose(x, (0, 3, 1, 2)).astype(np.float32)
                return x.astype(original_dtype)

        else:

            def mod(x):
                original_dtype = x.dtype
                x = add_pattern_bd(x)
                return x.astype(original_dtype)

        backdoor = PoisoningAttackBackdoor(mod)
        target = y_train[0]
        diff_index = list(
            set(np.arange(len(y_train))) -
            set(np.where(np.all(y_train == target, axis=1))[0]))[0]
        source = y_train[diff_index]
        attack = HiddenTriggerBackdoor(
            classifier,
            eps=0.3,
            target=target,
            source=source,
            feature_layer=len(classifier.layer_names) - 2,
            backdoor=backdoor,
            decay_coeff=0.95,
            decay_iter=1,
            max_iter=2,
            batch_size=1,
        )
        poison_data, poison_inds = attack.poison(x_train, y_train)

        with pytest.raises(AssertionError):
            np.testing.assert_equal(poison_data, x_train[poison_inds])

    except ARTTestException as e:
        art_warning(e)
Exemple #6
0
def test_check_params(art_warning, get_default_mnist_subset,
                      image_dl_estimator):
    try:
        (x_train, y_train), (_, _) = get_default_mnist_subset
        classifier, _ = image_dl_estimator(functional=True)

        if isinstance(classifier, PyTorchClassifier):

            def mod(x):
                original_dtype = x.dtype
                x = np.transpose(x, (0, 2, 3, 1)).astype(np.float32)
                x = add_pattern_bd(x)
                x = np.transpose(x, (0, 3, 1, 2)).astype(np.float32)
                return x.astype(original_dtype)

        else:

            def mod(x):
                original_dtype = x.dtype
                x = add_pattern_bd(x)
                return x.astype(original_dtype)

        backdoor = PoisoningAttackBackdoor(mod)
        target = y_train[0]
        diff_index = list(
            set(np.arange(len(y_train))) -
            set(np.where(np.all(y_train == target, axis=1))[0]))[0]
        source = y_train[diff_index]

        # Test non-array target
        with pytest.raises(ValueError):
            _ = HiddenTriggerBackdoor(
                classifier,
                eps=0.3,
                target=0,
                source=source,
                feature_layer=len(classifier.layer_names) - 2,
                backdoor=backdoor,
                decay_coeff=0.95,
                decay_iter=1,
                max_iter=2,
                batch_size=1,
                poison_percent=0.1,
                learning_rate=-1,
            )
        # Test negative LR
        with pytest.raises(ValueError):
            _ = HiddenTriggerBackdoor(
                classifier,
                eps=0.3,
                target=target,
                source=source,
                feature_layer=len(classifier.layer_names) - 2,
                backdoor=backdoor,
                decay_coeff=0.95,
                decay_iter=1,
                max_iter=2,
                batch_size=1,
                poison_percent=0.1,
                learning_rate=-1,
            )
        # Test same target/source
        with pytest.raises(ValueError):
            _ = HiddenTriggerBackdoor(
                classifier,
                eps=0.3,
                target=source,
                source=source,
                feature_layer=len(classifier.layer_names) - 2,
                backdoor=backdoor,
                decay_coeff=0.95,
                decay_iter=1,
                max_iter=2,
                batch_size=1,
                poison_percent=0.1,
            )
        # Test Bad Backdoor type
        with pytest.raises(TypeError):
            _ = HiddenTriggerBackdoor(
                classifier,
                eps=0.3,
                target=target,
                source=source,
                feature_layer=len(classifier.layer_names) - 2,
                backdoor=source,
                decay_coeff=0.95,
                decay_iter=1,
                max_iter=2,
                batch_size=1,
                poison_percent=0.1,
            )
        # Test eps
        with pytest.raises(ValueError):
            _ = HiddenTriggerBackdoor(
                classifier,
                eps=-1,
                target=target,
                source=source,
                feature_layer=len(classifier.layer_names) - 2,
                backdoor=backdoor,
                decay_coeff=0.95,
                decay_iter=1,
                max_iter=2,
                batch_size=1,
                poison_percent=0.1,
            )
        # Test bad feature layer
        with pytest.raises(TypeError):
            _ = HiddenTriggerBackdoor(
                classifier,
                eps=0.3,
                target=target,
                source=source,
                feature_layer=2.5,
                backdoor=backdoor,
                decay_coeff=0.95,
                decay_iter=1,
                max_iter=2,
                batch_size=1,
                poison_percent=0.1,
            )
        # Test negative feature layer
        with pytest.raises(ValueError):
            _ = HiddenTriggerBackdoor(
                classifier,
                eps=0.3,
                target=target,
                source=source,
                feature_layer=-1,
                backdoor=backdoor,
                decay_coeff=0.95,
                decay_iter=1,
                max_iter=2,
                batch_size=1,
                poison_percent=0.1,
            )
        # Test negative decay
        with pytest.raises(ValueError):
            _ = HiddenTriggerBackdoor(
                classifier,
                eps=0.3,
                target=target,
                source=source,
                feature_layer=len(classifier.layer_names) - 2,
                backdoor=backdoor,
                decay_coeff=-1,
                decay_iter=1,
                max_iter=2,
                batch_size=1,
                poison_percent=0.1,
            )
        # Test invalid poison_percent
        with pytest.raises(ValueError):
            _ = HiddenTriggerBackdoor(
                classifier,
                eps=0.3,
                target=target,
                source=source,
                feature_layer=len(classifier.layer_names) - 2,
                backdoor=backdoor,
                decay_coeff=0.95,
                decay_iter=1,
                max_iter=2,
                batch_size=1,
                poison_percent=1.1,
            )

    except ARTTestException as e:
        art_warning(e)