Ejemplo n.º 1
0
 def __init__(self, classifier, num_classes=10, noise=None, sigma=0.25):
     super(RandModel, self).__init__()
     self.classifier = classifier
     if noise == "Normal":
         self.noise = normal.Normal(0, sigma)
         self.sigma = sigma
     elif noise == "Laplace":
         self.noise = laplace.Laplace(0, sigma / np.sqrt(2))
         self.sigma = sigma
     else:
         self.noise = None
Ejemplo n.º 2
0
def rand_init_delta(delta, x, ord, eps, clip_min, clip_max):
    # TODO: Currently only considered one way of "uniform" sampling
    # for Linf, there are 3 ways:
    #   1) true uniform sampling by first calculate the rectangle then sample
    #   2) uniform in eps box then truncate using data domain (implemented)
    #   3) uniform sample in data domain then truncate with eps box
    # for L2, true uniform sampling is hard, since it requires uniform sampling
    #   inside a intersection of cube and ball, so there are 2 ways:
    #   1) uniform sample in the data domain, then truncate using the L2 ball
    #       (implemented)
    #   2) uniform sample in the L2 ball, then truncate using the data domain
    # for L1: uniform l1 ball init, then truncate using the data domain

    if isinstance(eps, torch.Tensor):
        assert len(eps) == len(delta)

    if ord == np.inf:
        delta.data.uniform_(-1, 1)
        delta.data = batch_multiply(eps, delta.data)
    elif ord == 2:
        delta.data.uniform_(clip_min, clip_max)
        delta.data = delta.data - x
        delta.data = clamp_by_pnorm(delta.data, ord, eps)
    elif ord == 1:
        ini = laplace.Laplace(
            loc=delta.new_tensor(0), scale=delta.new_tensor(1))
        delta.data = ini.sample(delta.data.shape)
        delta.data = normalize_by_pnorm(delta.data, p=1)
        ray = uniform.Uniform(0, eps).sample()
        delta.data *= ray
        delta.data = clamp(x.data + delta.data, clip_min, clip_max) - x.data
    else:
        error = "Only ord = inf, ord = 1 and ord = 2 have been implemented"
        raise NotImplementedError(error)

    delta.data = clamp(
        x + delta.data, min=clip_min, max=clip_max) - x
    return delta.data
Ejemplo n.º 3
0
def attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts, norm,
               init):
    max_loss = torch.zeros(y.shape[0]).cuda()
    max_delta = torch.zeros_like(X).cuda()
    for _ in range(restarts):
        #init
        if init == 'zero':
            delta = torch.zeros_like(X).cuda()
        elif init == 'random':
            if norm == 'l2-scaled':
                delta = torch.zeros_like(X).cuda().normal_()
                dnorm = delta.view(delta.size(0), -1).norm(p=2, dim=1).view(
                    delta.size(0), 1, 1, 1)
                r = torch.zeros_like(dnorm).uniform_(0, 1)
                delta.data *= r * epsilon / dnorm
            elif norm == 'l1':
                delta = torch.zeros_like(X).cuda()
                ini = laplace.Laplace(loc=delta.new_tensor(0),
                                      scale=delta.new_tensor(1))
                delta.data = ini.sample(delta.data.shape)
                delta.data = (2.0 * delta.data - 1.0) * epsilon
                delta.data /= norms_l1(delta.detach()).clamp(min=epsilon)
                delta.data = clamp(delta, 0 - X, 1 - X)
            else:
                delta = torch.zeros_like(X).cuda().uniform_(-epsilon, epsilon)
        delta.requires_grad = True

        for _ in range(attack_iters):
            output = model(X + delta)
            index = torch.where(output.max(1)[1] == y)[0]
            if len(index) == 0:
                break
            loss = F.cross_entropy(output, y)
            loss.backward()
            grad = delta.grad.detach()
            d = delta[index, :, :, :]
            g = grad[index, :, :, :]
            x = X[index, :, :, :]

            if norm == 'linf':
                d = torch.clamp(d + alpha * torch.sign(g), -epsilon, epsilon)
            elif norm == 'l2':  #using sign of gradient --> limits directions of gradient
                d = d + alpha * torch.sign(g)
                d_flat = d.view(d.size(0), -1)
                norm = d_flat.norm(p=2, dim=1).clamp(min=epsilon).view(
                    d.size(0), 1, 1, 1)
                d *= epsilon / norm
            elif norm == 'l2-scaled':
                g_norm = torch.norm(g.view(g.shape[0], -1),
                                    dim=1).view(-1, 1, 1, 1)
                scaled_g = g / (g_norm + 1e-10)
                d = (d + scaled_g * alpha).view(d.size(0), -1).renorm(
                    p=2, dim=0, maxnorm=epsilon).view_as(d)
            elif norm == 'l1':
                k = 20
                d = d + alpha * l1_dir_topk(g, d, x, k)
                d = proj_l1ball(d, epsilon=epsilon)

            d = clamp(d, 0 - x, 1 - x)
            delta.data[index, :, :, :] = d
            delta.grad.zero_()
        all_loss = F.cross_entropy(model(X + delta), y, reduction='none')
        max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss]
        max_loss = torch.max(max_loss, all_loss)
    return max_delta