def attack(self, model, xo, untargeted, target, w, loss_function=ai.stdLoss, **kargs):
        w = self.epsilon.getVal(c = w, **kargs)

        x = nn.Parameter(xo.clone(), requires_grad=True)
        gradorg = h.zeros(x.shape)
        is_eq = 1

        w = h.ones(x.shape) * w
        for i in range(self.k):
            if self.restart is not None and i % int(self.k / self.restart) == 0:
                x = is_eq * (torch.rand_like(xo) * w + xo) + (1 - is_eq) * x
                x = nn.Parameter(x, requires_grad = True)

            model.optimizer.zero_grad()

            out = model(x).vanillaTensorPart()
            loss = loss_function(out, target)

            loss.sum().backward(retain_graph=True)
            with torch.no_grad():
                oth = x.grad / torch.norm(x.grad, p=1)
                gradorg *= self.mu 
                gradorg += oth
                grad = (self.r * w / self.k) * ai.mysign(gradorg)
                if self.should_end:
                    is_eq = ai.mulIfEq(grad, out, target)
                x = (x + grad * is_eq) if untargeted else (x - grad * is_eq)

                x = xo + torch.min(torch.max(x - xo, -w),w)
                x.requires_grad_()

        model.optimizer.zero_grad()

        return x
Exemplo n.º 2
0
 def box(original, diameter):
     """
     This version of it takes advantage of betas being uncorrelated.  
     Unfortunately they stay uncorrelated forever.  
     Counterintuitively, tests show more accuracy - this is because the other box
     creates lots of 0 errors which get accounted for by the calcultion of the newhead in relu 
     which is apparently worse than not accounting for errors.
     """
     return Box(original,
                h.ones(original.size()) * diameter, None).checkSizes()
 def box(self, original, w, **kargs):  
     """
     This version of it takes advantage of betas being uncorrelated.  
     Unfortunately they stay uncorrelated forever.  
     Counterintuitively, tests show more accuracy - this is because the other box
     creates lots of 0 errors which get accounted for by the calcultion of the newhead in relu 
     which is apparently worse than not accounting for errors.
     """
     radius = self.w.getVal(c = w, **kargs)
     return self.domain(original, h.ones(original.size()) * radius, None).checkSizes()
Exemplo n.º 4
0
    def stochasticCorrelate(self, num_correlate, choices=None):
        if num_correlate == 0:
            return self

        domshape = self.head.shape
        batch_size = domshape[0]
        num_pixs = h.product(domshape[1:])
        num_correlate = min(num_correlate, num_pixs)
        ucc_mask = h.ones([batch_size, num_pixs]).long()

        cc_indx_batch_beta = h.cudify(
            torch.multinomial(
                ucc_mask.to_dtype(), num_correlate,
                replacement=False)) if choices is None else choices
        return self.correlate(cc_indx_batch_beta)
Exemplo n.º 5
0
    def stochasticDecorrelate(self,
                              num_decorrelate,
                              choices=None,
                              num_to_keep=False):
        dummy = self.dummyDecorrelate(num_decorrelate)
        if dummy is not None:
            return dummy
        num_error_terms = self.errors.shape[0]
        batch_size = self.head.shape[0]

        ucc_mask = h.ones([batch_size, self.errors.shape[0]]).long()
        cc_indx_batch_err = h.cudify(
            torch.multinomial(
                ucc_mask.to_dtype(),
                num_decorrelate if num_to_keep else num_error_terms -
                num_decorrelate,
                replacement=False)) if choices is None else choices
        return self.decorrelate(cc_indx_batch_err)
Exemplo n.º 6
0
 def forward(self, x, time=0, **kargs):
     if self.training:
         with torch.no_grad():
             p = self.p.getVal(time=time)
             mask = (F.dropout2d if self.use_2d else F.dropout)(
                 h.ones(x.size()), p=p, training=True)
         if self.alpha_dropout:
             with torch.no_grad():
                 keep_prob = 1 - p
                 alpha = -1.7580993408473766
                 a = math.pow(
                     keep_prob + alpha * alpha * keep_prob *
                     (1 - keep_prob), -0.5)
                 b = -a * alpha * (1 - keep_prob)
                 mask = mask * a
             return x * mask + b
         else:
             return x * mask
     else:
         return x
Exemplo n.º 7
0
    def softplus(self):
        if self.errors is None:
            if self.beta is None:
                return self.new(F.softplus(self.head), None, None)
            tp = F.softplus(self.head + self.beta)
            bt = F.softplus(self.head - self.beta)
            return self.new((tp + bt) / 2, (tp - bt) / 2, None)

        errors = self.concreteErrors()
        o = h.ones(self.head.size())

        def sp(hd):
            return F.softplus(
                hd)  # torch.log(o + torch.exp(hd))  # not very stable

        def spp(hd):
            ehd = torch.exp(hd)
            return ehd.div(ehd + o)

        def sppp(hd):
            ehd = torch.exp(hd)
            md = ehd + o
            return ehd.div(md.mul(md))

        fa = sp(self.head)
        fpa = spp(self.head)

        a = self.head

        k = torch.sum(errors.abs(), 0)

        def evalG(r):
            return r.mul(r).mul(sppp(a + r))

        m = torch.max(evalG(h.zeros(k.size())), torch.max(evalG(k), evalG(-k)))
        m = h.ifThenElse(a.abs().lt(k),
                         torch.max(m, torch.max(evalG(a), evalG(-a))), m)
        m /= 2

        return self.new(fa, m if self.beta is None else m + self.beta.mul(fpa),
                        None if self.errors is None else self.errors.mul(fpa))
Exemplo n.º 8
0
 def calcData(data, target):
     box = m.model.boxSpec(data, target)[0]
     with torch.no_grad():
         if m.model.ty in POINT_DOMAINS:
             preder = m.model(box[0]).data
             pred = preder.max(
                 1, keepdim=True
             )[1]  # get the index of the max log-probability
             org = m.model(data).max(1, keepdim=True)[1]
             stat.proved += float(org.eq(pred).sum())
             stat.safe += float(
                 pred.eq(target.data.view_as(pred)).sum())
         else:
             bs = m.model(box[1])
             stat.width += m.model.widthL(bs).data[
                 0]  # sum up batch loss
             stat.safe += m.model.isSafeDom(
                 bs, target).sum().item()
             stat.proved += sum([
                 m.model.isSafeDom(bs,
                                   (h.ones(target.size()) *
                                    n).long()).sum().item()
                 for n in range(num_classes)
             ])
Exemplo n.º 9
0
 def line(self, o1, o2, w=0, **kargs):
     if not self.w is None:
         w = self.w
     return self.Domain((o1 + o2) / 2,
                        ((o2 - o1) / 2).abs() + h.ones(o2.size()) * w,
                        None).checkSizes()
Exemplo n.º 10
0
 def line(self, o1, o2, **kargs):
     w = self.w.getVal(c = 0, **kargs)
     return self.domain((o1 + o2) / 2, ((o2 - o1) / 2).abs() + h.ones(o2.size()) * w, None).checkSizes()
Exemplo n.º 11
0
 def lineSpec(self, x, target):
     eps = h.ones(x.size()) * self.w
     return [(x, self.ty.line(x - eps, x + eps, None), target)]