def test_gaussian_blur_attack():
    channels = 3
    batch_size = 8
    h = w = 32
    bounds = (0, 1)

    class Model(nn.Module):
        def forward(self, x):
            # instead of our usual model that's robust to the BlurAttack,
            # we use a slighlty different model that can be attacked
            x = x[:, :, 1:, :] - x[:, :, :-1, :]
            x = x[:, :, :, 1:] - x[:, :, :, :-1]
            x = torch.mean(x, 3)
            x = torch.mean(x, 2)
            return x

    model = Model().eval()
    fmodel = PyTorchModel(model, bounds=bounds)

    np.random.seed(0)
    x = np.random.uniform(*bounds, size=(batch_size, channels, h, w)).astype(np.float32)
    x = torch.from_numpy(x).to(fmodel.device)
    y = fmodel.forward(x).argmax(axis=-1)

    attack = GaussianBlurAttack(fmodel, channel_axis=1)
    advs = attack(x, y)

    perturbations = ep.astensor(advs - x)
    norms = flatten(perturbations).square().sum(axis=-1).sqrt()
    y_advs = fmodel.forward(advs).argmax(axis=-1)

    assert x.shape == advs.shape
    assert norms.max().item() <= 20.0 + 1e-7
    assert (y_advs == y).float().mean() < 1
def test_linear_search_contrast_reduction_attack():
    channels = 3
    batch_size = 8
    h = w = 32
    bounds = (0, 1)

    class Model(nn.Module):
        def forward(self, x):
            x = torch.mean(x, 3)
            x = torch.mean(x, 2)
            return x

    model = Model().eval()
    fmodel = PyTorchModel(model, bounds=bounds)

    np.random.seed(0)
    x = np.random.uniform(*bounds,
                          size=(batch_size, channels, h, w)).astype(np.float32)
    x = torch.from_numpy(x).to(fmodel.device)
    y = fmodel.forward(x).argmax(axis=-1)

    attack = LinearSearchContrastReductionAttack(fmodel)
    advs = attack(x, y)

    perturbations = ep.astensor(advs - x)
    norms = flatten(perturbations).square().sum(axis=-1).sqrt()
    y_advs = fmodel.forward(advs).argmax(axis=-1)

    assert x.shape == advs.shape
    assert norms.max().item() <= 20.0 + 1e-7
    assert (y_advs == y).float().mean() < 1
Exemple #3
0
def test_l2_basic_iterative_attack():
    channels = 3
    batch_size = 8
    h = w = 32
    bounds = (0, 1)

    class Model(nn.Module):
        def forward(self, x):
            x = torch.mean(x, 3)
            x = torch.mean(x, 2)
            return x

    model = Model().eval()
    fmodel = PyTorchModel(model, bounds=bounds)

    np.random.seed(0)
    x = np.random.uniform(*bounds,
                          size=(batch_size, channels, h, w)).astype(np.float32)
    x = torch.from_numpy(x).to(fmodel.device)
    y = fmodel.forward(x).argmax(axis=-1)

    attack = L2BasicIterativeAttack(fmodel)
    advs = attack(x, y, rescale=False, epsilon=2.0, step_size=0.4)

    perturbations = ep.astensor(advs - x)
    norms = flatten(perturbations).square().sum(axis=-1).sqrt()
    y_advs = fmodel.forward(advs).argmax(axis=-1)

    assert x.shape == advs.shape
    assert norms.max().item() <= 2.0 + 1e-7
    assert (y_advs == y).float().mean() < 1
def test_l1_brendel_bethge_attack():
    channels = 3
    batch_size = 8
    h = w = 32
    bounds = (0, 1)

    class Model(nn.Module):
        def forward(self, x):
            x = torch.mean(x, 3)
            x = torch.mean(x, 2)
            return x

    model = Model().eval()
    fmodel = PyTorchModel(model, bounds=bounds)

    np.random.seed(0)
    x = np.random.uniform(*bounds,
                          size=(batch_size, channels, h, w)).astype(np.float32)
    x = torch.from_numpy(x).to(fmodel.device)
    y = fmodel.forward(x).argmax(axis=-1)

    attack = L1BrendelBethgeAttack(fmodel)
    advs = attack(x, y, steps=100, lr_num_decay=10)

    perturbations = ep.astensor(advs - x)
    norms = flatten(perturbations).abs().sum(axis=-1)
    y_advs = fmodel.forward(advs).argmax(axis=-1)

    assert x.shape == advs.shape
    assert norms.max().item() <= 32 * 32 * 3 / 2
    assert (y_advs == y).float().mean() < 1e-5
Exemple #5
0
def test_dataset_attack():
    channels = 3
    batch_size = 64
    h = w = 32
    bounds = (0, 1)

    class Model(nn.Module):
        def forward(self, x):
            x = torch.mean(x, 3)
            x = torch.mean(x, 2)
            return x

    model = Model().eval()
    fmodel = PyTorchModel(model, bounds=bounds)

    np.random.seed(0)
    x = np.random.uniform(*bounds,
                          size=(batch_size, channels, h, w)).astype(np.float32)
    x = torch.from_numpy(x).to(fmodel.device)
    y = fmodel.forward(x).argmax(axis=-1)

    attack = DatasetAttack(fmodel)
    attack.feed(x)
    x = x[:8]
    y = y[:8]
    advs = attack(x, y)

    perturbations = ep.astensor(advs - x)
    norms = flatten(perturbations).square().sum(axis=-1).sqrt()
    y_advs = fmodel.forward(advs).argmax(axis=-1)

    assert x.shape == advs.shape
    assert norms.max().item() <= 40.0
    assert (y_advs == y).float().mean() == 0
def test_binary_search_contrast_reduction_attack():
    channels = 3
    batch_size = 8
    h = w = 32
    bounds = (0, 1)

    class Model(nn.Module):
        def forward(self, x):
            x = x.clone()
            x[x >= 0.5] = 1.0
            x[x < 0.5] = 0.0
            x = torch.mean(x, 3)
            x = torch.mean(x, 2)
            return x

    model = Model().eval()
    fmodel = PyTorchModel(model, bounds=bounds)

    np.random.seed(0)
    x = np.random.uniform(*bounds, size=(batch_size, channels, h, w)).astype(np.float32)
    x = torch.from_numpy(x).to(fmodel.device)
    y = fmodel.forward(x).argmax(axis=-1)

    attack = BinarySearchContrastReductionAttack(fmodel)
    advs = attack(x, y)

    perturbations = ep.astensor(advs - x)
    norms = flatten(perturbations).square().sum(axis=-1).sqrt()
    y_advs = fmodel.forward(advs).argmax(axis=-1)
    assert (y_advs == y).float().mean() < 1

    attack2 = BinarizationRefinementAttack(fmodel)
    advs2 = attack2(x, y, adversarials=advs, criterion=misclassification)

    perturbations2 = ep.astensor(advs2 - x)
    norms2 = flatten(perturbations2).square().sum(axis=-1).sqrt()
    y_advs2 = fmodel.forward(advs2).argmax(axis=-1)
    assert (y_advs == y_advs2).all()
    assert (norms2 <= norms).all()
    assert (norms2 < norms).any()
Exemple #7
0
def test_ead_attack_cw():
    channels = 3
    batch_size = 8
    h = w = 32
    bounds = (0, 1)

    class Model(nn.Module):
        def forward(self, x):
            x = torch.mean(x, 3)
            x = torch.mean(x, 2)
            return x

    model = Model().eval()
    fmodel = PyTorchModel(model, bounds=bounds)

    np.random.seed(0)
    x = np.random.uniform(*bounds,
                          size=(batch_size, channels, h, w)).astype(np.float32)
    x = torch.from_numpy(x).to(fmodel.device)
    y = fmodel.forward(x).argmax(axis=-1)

    attack = EADAttack(fmodel)
    cw_attack = L2CarliniWagnerAttack(fmodel)
    advs = attack(x,
                  y,
                  regularization=0,
                  binary_search_steps=5,
                  max_iterations=1000)
    advs_cw = cw_attack(x, y, binary_search_steps=5, max_iterations=1000)

    perturbations = ep.astensor(advs - x)
    perturbations_cw = ep.astensor(advs_cw - x)
    y_advs = fmodel.forward(advs).argmax(axis=-1)
    y_advs_cw = fmodel.forward(advs).argmax(axis=-1)

    diff = flatten(perturbations -
                   perturbations_cw).square().sum(axis=-1).sqrt()

    assert x.shape == advs.shape
    assert diff.max().item() <= 40.0 + 1e-7
    assert (y_advs == y_advs_cw).float().mean() == 1