예제 #1
0
    def _run(self, x, y, x_init=None):
        self.f_model.reset()
        if self.y_target is None:
            criterion = fb.criteria.Misclassification(
                as_tensor(y.ravel().astype('int64')))
        else:
            criterion = fb.criteria.TargetedMisclassification(
                torch.tensor([self.y_target]))

        x_t = as_tensor(x, requires_grad=False)
        advx, clipped, is_adv = self.attack(self.f_model,
                                            x_t,
                                            criterion,
                                            epsilons=self.epsilon)

        if isinstance(clipped, list):
            if len(clipped) == 1:
                clipped = x[0]
            else:
                raise ValueError("This attack is returning a list. Please,"
                                 "use a single value of epsilon.")

        # f_opt is computed only in class-specific wrappers
        f_opt = NaN

        self._last_f_eval = self.f_model.f_eval
        self._last_grad_eval = self.f_model.grad_eval
        path = self.f_model.x_path
        self._x_seq = CArray(path.numpy())

        # reset again to clean cached data
        self.f_model.reset()
        return as_carray(clipped), f_opt
예제 #2
0
 def _run(self, x, y, x_init=None):
     self._x0 = as_tensor(x)
     self._y0 = as_tensor(y)
     out, _ = super(CFoolboxBasicIterative, self)._run(x, y, x_init)
     self._f_seq = self.objective_function(self.x_seq)
     f_opt = self.objective_function(out)
     return out, f_opt
예제 #3
0
 def _run(self, x, y, x_init=None):
     self._x0 = as_tensor(x)
     self._y0 = as_tensor(y)
     out, _ = super(CFoolboxL2CarliniWagner, self)._run(x, y, x_init)
     self._consts = self.attack.consts
     self._f_seq = self.objective_function(self.x_seq)
     self.best_c_ = self._consts[self.attack._best_const]
     f_opt = self.objective_function(out)
     return out, f_opt
    def _check_adv_example(self, secml_attack, fb_attack):
        x0_tensor = as_tensor(self.x0.atleast_2d())
        y0_tensor = as_tensor(self.y0.ravel())

        y_target = secml_attack.y_target

        if y_target is None:
            criterion = fb.criteria.Misclassification(y0_tensor)
        else:
            criterion = fb.criteria.TargetedMisclassification(torch.tensor([y_target]))

        y_pred, scores, adv_ds, f_obj = secml_attack.run(self.x0, self.y0)
        _, adv_fb, _ = fb_attack(secml_attack.f_model, x0_tensor, criterion, epsilons=secml_attack.epsilon)
        adv_fb = CArray(adv_fb.numpy())
        return adv_ds, adv_fb
예제 #5
0
 def objective_function_gradient(self, x):
     x_t = as_tensor(x).detach()
     x_t.requires_grad_()
     loss = self._adv_objective_function(x_t)
     loss.sum().backward()
     gradient = x_t.grad
     return as_carray(gradient)
예제 #6
0
    def _run(self, x, y, x_init=None):
        self._x0 = as_tensor(x)
        self._y0 = as_tensor(y)
        out, _ = super(CFoolboxDeepfool, self)._run(x, y, x_init)
        # fix the shape of the x_seq path in order to have
        # always the same length as the number of steps
        num_effective_steps = self.x_seq.shape[0]
        if num_effective_steps < self.attack.steps:
            added_vals = CArray.zeros((self.attack.steps - num_effective_steps,
                                       *self.x_seq.shape[1:]))
            added_vals += self.x_seq[-1, :]
            self._x_seq = self._x_seq.append(added_vals, axis=0)
        self.num_effective_steps = num_effective_steps  # keep in case we need it
        self._f_seq = self.objective_function(self.x_seq)
        f_opt = self.objective_function(out)

        return out, f_opt
예제 #7
0
 def _run(self, x, y, x_init=None):
     self._y0 = as_tensor(y)
     out, _ = super(CFoolboxFGM, self)._run(x, y, x_init)
     self._f_seq = self.objective_function(out)
     f_opt = self._f_seq[-1]
     # add last point of the path
     self._x_seq = self._x_seq.append(out, axis=0)
     return out, f_opt
예제 #8
0
 def objective_function_gradient(self, x):
     """
     Deepfool uses the gradient to find the closest class.
     For this reason, if we need the gradient, the function
     attempts to run the backward twice, which can be avoided
     if we take care of saving the gradient at the first
     pass.
     """
     x_t = as_tensor(x).detach()
     x_t.requires_grad_()
     loss, gradient = self._adv_objective_function(x_t)
     return as_carray(gradient)
예제 #9
0
파일: cw_loss.py 프로젝트: pralab/secml
    def _adv_objective_function(self, x):
        if self._x0 is None:
            raise Exception('Attack not run yet')
        l2dist = torch.norm(self._x0 - x.flatten(start_dim=1), dim=1, p=2)**2

        loss = super(CWLoss, self)._adv_objective_function(x)
        if x.shape[0] == self._consts.shape[0]:
            c = as_tensor(self._consts)
        else:
            c = self._consts[-1].item()
        total_loss = c * loss + l2dist
        return total_loss
예제 #10
0
 def _run(self, x, y, x_init=None):
     self._y0 = as_tensor(y)
     out, _ = super(CFoolboxFGM, self)._run(x, y, x_init)
     self._f_seq = self.objective_function(out)
     f_opt = self._f_seq[-1]
     return out, f_opt
예제 #11
0
 def objective_function(self, x):
     """
     Accordingly, we should also return only the first
     returned value of the function.
     """
     return as_carray(self._adv_objective_function(as_tensor(x))[0])
예제 #12
0
 def objective_function(self, x):
     return as_carray(self._adv_objective_function(as_tensor(x)))