示例#1
0
    def pending_f(self, x_p):
        """Pending is the loss function is less than 0
        """
        targetlabel_mask = torch.from_numpy(onehot_like(np.zeros(self.classnum), self.target))
        secondlargest_mask = torch.from_numpy(np.ones(self.classnum)) - targetlabel_mask
        targetlabel_mask = targetlabel_mask.to(self.device)
        secondlargest_mask = secondlargest_mask.to(self.device)

        Zx_i = np.max((self.model.get_logits(x_p).double().to(self.device) * secondlargest_mask).cpu().detach().numpy())
        Zx_t = np.max((self.model.get_logits(x_p).double().to(self.device) * targetlabel_mask).cpu().detach().numpy())

        if ( Zx_i - Zx_t  < - self.confidence):
            return True
        else:
            return False
示例#2
0
    def loss_function(self, x_p, const, target, reconstructed_original,
                      confidence, min_, max_):
        """Returns the loss and the gradient of the loss w.r.t. x,
        assuming that logits = model(x)."""

        ## get the output of model before softmax
        x_p.requires_grad = True
        logits = self.model.get_logits(x_p).to(self.device)

        ## find the largest class except the target class
        targetlabel_mask = (torch.from_numpy(
            onehot_like(np.zeros(self.classnum), target))).double()
        secondlargest_mask = (torch.from_numpy(np.ones(self.classnum)) -
                              targetlabel_mask).to(self.device)

        secondlargest = np.argmax(
            (logits.double() * secondlargest_mask).cpu().detach().numpy(),
            axis=1)

        is_adv_loss = logits[0][secondlargest] - logits[0][target]

        # is_adv is True as soon as the is_adv_loss goes below 0
        # but sometimes we want additional confidence
        is_adv_loss += confidence

        if is_adv_loss == 0:
            is_adv_loss_grad = 0
        else:
            is_adv_loss.backward()
            is_adv_loss_grad = x_p.grad

        is_adv_loss = max(0, is_adv_loss)

        s = max_ - min_
        squared_l2_distance = np.sum(
            ((x_p - reconstructed_original)**2).cpu().detach().numpy()) / s**2
        total_loss = squared_l2_distance + const * is_adv_loss

        squared_l2_distance_grad = (2 / s**2) * (x_p - reconstructed_original)

        #print(is_adv_loss_grad)
        total_loss_grad = squared_l2_distance_grad + const * is_adv_loss_grad
        return total_loss, total_loss_grad