def attack(self, model, xo, untargeted, target, w, loss_function=ai.stdLoss, **kargs):
        w = self.epsilon.getVal(c = w, **kargs)

        x = nn.Parameter(xo.clone(), requires_grad=True)
        gradorg = h.zeros(x.shape)
        is_eq = 1

        w = h.ones(x.shape) * w
        for i in range(self.k):
            if self.restart is not None and i % int(self.k / self.restart) == 0:
                x = is_eq * (torch.rand_like(xo) * w + xo) + (1 - is_eq) * x
                x = nn.Parameter(x, requires_grad = True)

            model.optimizer.zero_grad()

            out = model(x).vanillaTensorPart()
            loss = loss_function(out, target)

            loss.sum().backward(retain_graph=True)
            with torch.no_grad():
                oth = x.grad / torch.norm(x.grad, p=1)
                gradorg *= self.mu 
                gradorg += oth
                grad = (self.r * w / self.k) * ai.mysign(gradorg)
                if self.should_end:
                    is_eq = ai.mulIfEq(grad, out, target)
                x = (x + grad * is_eq) if untargeted else (x - grad * is_eq)

                x = xo + torch.min(torch.max(x - xo, -w),w)
                x.requires_grad_()

        model.optimizer.zero_grad()

        return x
Example #2
0
    def attack(self,
               model,
               epsilon,
               xo,
               untargeted,
               target,
               loss_function=ai.stdLoss):
        if not self.epsilon is None:
            epsilon = self.epsilon
        x = nn.Parameter(xo.clone(), requires_grad=True)
        gradorg = h.zeros(x.shape)
        is_eq = 1
        for i in range(self.k):
            if self.restart is not None and i % int(
                    self.k / self.restart) == 0:
                x = is_eq * (torch.randn_like(xo) * epsilon + xo) + (1 -
                                                                     is_eq) * x
                x = nn.Parameter(x, requires_grad=True)

            model.optimizer.zero_grad()

            out = model(x)
            loss = loss_function(out, target)

            loss.backward()
            with torch.no_grad():
                oth = x.grad / torch.norm(x.grad, p=1)
                gradorg *= self.mu
                gradorg += oth
                grad = (self.r * epsilon / self.k) * ai.mysign(gradorg)
                if self.should_end:
                    is_eq = ai.mulIfEq(grad, out, target)
                x = (x + grad * is_eq) if untargeted else (x - grad * is_eq)
                x = xo + torch.clamp(x - xo, -epsilon, epsilon)
                x.requires_grad_()

        model.optimizer.zero_grad()
        return x