예제 #1
0
 def _suggest(self, xs_t, loss_fct, metric_fct):
     _shape = list(xs_t.shape)
     dim = np.prod(_shape[1:])
     add_queries = 0
     if self.is_new_batch:
         self.xo_t = xs_t.clone()
         self.i = 0
     if self.i == 0:
         self.sgn_t = sign(ch.ones(_shape[0], dim))
         fxs_t = lp_step(self.xo_t, self.sgn_t.view(_shape), self.epsilon, self.p)
         bxs_t = self.xo_t
         est_deriv = (loss_fct(fxs_t.cpu().numpy()) - loss_fct(bxs_t.cpu().numpy())) / self.epsilon
         self.best_est_deriv = est_deriv
         add_queries = 2
     self.sgn_t[:, self.i] *= -1
     fxs_t = lp_step(self.xo_t, self.sgn_t.view(_shape), self.epsilon, self.p)
     bxs_t = self.xo_t
     est_deriv = (loss_fct(fxs_t.cpu().numpy()) - loss_fct(bxs_t.cpu().numpy())) / self.epsilon
     self.sgn_t[[i for i, val in enumerate(est_deriv < self.best_est_deriv) if val], self.i] *= -1.
     self.best_est_deriv = (est_deriv >= self.best_est_deriv) * est_deriv + (
             est_deriv < self.best_est_deriv) * self.best_est_deriv
     # compute the cosine similarity
     cos_sims, ham_sims = metric_fct(self.xo_t.cpu().numpy(), self.sgn_t.cpu().numpy())
     # perform the step
     new_xs = lp_step(self.xo_t, self.sgn_t.view(_shape), self.epsilon, self.p)
     self.i += 1
     if self.i == dim:
         self.xo_t = new_xs.clone()
         self.i = 0
     return new_xs, np.ones(_shape[0]) + add_queries, cos_sims, ham_sims
 def _suggest(self, xs_t, loss_fct, metric_fct):
     _shape = list(xs_t.shape)
     dim = np.prod(_shape[1:])
     # additional queries at the start
     add_queries = 0
     if self.is_new_batch:
         self.xo_t = xs_t.clone()
         self.h = 0
         self.i = 0
     if self.i == 0 and self.h == 0:
         self.sgn_t = sign(ch.ones(_shape[0], dim))
         fxs_t = lp_step(self.xo_t, self.sgn_t.view(_shape), self.epsilon,
                         self.p)
         bxs_t = self.xo_t  # - self.epsilon * self.sgn_t.view(_shape)
         est_deriv = (loss_fct(fxs_t.cpu().numpy()) -
                      loss_fct(bxs_t.cpu().numpy())) / self.epsilon
         self.best_est_deriv = est_deriv
         add_queries = 3  # because of bxs_t and the 2 evaluations in the i=0, h=0, case.
     chunk_len = np.ceil(dim / (2**self.h)).astype(int)
     #istart = self.i * chunk_len
     iend = min(dim, (self.i + 1) * chunk_len)
     randinds = np.random.choice(dim, chunk_len, replace=False)
     self.sgn_t[:, randinds] *= -1.
     fxs_t = lp_step(self.xo_t, self.sgn_t.view(_shape), self.epsilon,
                     self.p)
     bxs_t = self.xo_t
     est_deriv = (loss_fct(fxs_t.cpu().numpy()) -
                  loss_fct(bxs_t.cpu().numpy())) / self.epsilon
     switch_flag = np.array([
         i for i, val in enumerate(est_deriv < self.best_est_deriv) if val
     ])
     self.sgn_t[switch_flag[:, None], randinds] *= -1.
     self.best_est_deriv = (
         est_deriv >= self.best_est_deriv) * est_deriv + (
             est_deriv < self.best_est_deriv) * self.best_est_deriv
     # compute the cosine similarity
     cos_sims, ham_sims = metric_fct(self.xo_t.cpu().numpy(),
                                     self.sgn_t.cpu().numpy())
     # perform the step
     new_xs = lp_step(self.xo_t, self.sgn_t.view(_shape), self.epsilon,
                      self.p)
     # update i and h for next iteration
     self.i += 1
     if self.i == 2**self.h or iend == dim:
         self.h += 1
         self.i = 0
         # if h is exhausted, set xo_t to be xs_t
         if self.h == np.ceil(np.log2(dim)).astype(int) + 1:
             self.xo_t = xs_t.clone()
             self.h = 0
             print("new change")
     return new_xs, np.ones(_shape[0]) + add_queries, cos_sims, ham_sims
예제 #3
0
 def _suggest(self, xs_t, loss_fct, metric_fct):
     _shape = list(xs_t.shape)
     dim = np.prod(_shape[1:])
     b_sz = _shape[0]
     add_queries = 0
     if self.is_new_batch:
         #self.xo_t = xs_t.clone()
         self.i = 0
         self.perm = ch.rand(b_sz, dim).argsort(dim=1)
     if self.i == 0:
         #self.sgn_t = sign(ch.ones(_shape[0], dim))
         #fxs_t = lp_step(self.xo_t, self.sgn_t.view(_shape), self.epsilon, self.p)
         #bxs_t = self.xo_t
         loss = loss_fct(xs_t.cpu().numpy())
         self.best_loss = loss
         add_queries = 1
     diff = ch.zeros(b_sz, dim)
     # % if iterations are greater than dim
     idx = self.perm[:, self.i % dim]
     diff = diff.scatter(1, idx.unsqueeze(1), 1)
     new_xs = xs_t.clone().view(b_sz, -1)
     # left attempt
     left_xs = lp_step(xs_t, diff.view_as(xs_t), self.delta, self.p)
     left_loss = loss_fct(left_xs.cpu().numpy())
     replace_flag = ch.tensor(
         (left_loss > self.best_loss).astype(np.float32)).unsqueeze(1)
     #print(replace_flag.shape)
     self.best_loss = replace_flag.squeeze(1).cpu().numpy() * left_loss + (
         1 - replace_flag.squeeze(1).cpu().numpy()) * self.best_loss
     new_xs = replace_flag * left_xs.view(b_sz,
                                          -1) + (1. - replace_flag) * new_xs
     # right attempt
     right_xs = lp_step(xs_t, diff.view_as(xs_t), -self.delta, self.p)
     right_loss = loss_fct(right_xs.cpu().numpy())
     # replace only those that have greater right loss and was not replaced
     # in the left attempt
     replace_flag = ch.tensor((right_loss > self.best_loss).astype(
         np.float32)).unsqueeze(1) * (1 - replace_flag)
     #print(replace_flag.shape)
     self.best_loss = replace_flag.squeeze(1).cpu().numpy() * right_loss + (
         1 - replace_flag.squeeze(1).cpu().numpy()) * self.best_loss
     new_xs = replace_flag * right_xs.view(b_sz,
                                           -1) + (1 - replace_flag) * new_xs
     # compute the cosine similarity
     cos_sims, ham_sims = metric_fct(
         xs_t.cpu().numpy(), (new_xs - xs_t.view(b_sz, -1)).cpu().numpy())
     self.i += 1
     # number of queries: add_queries (if first iteration to init best_loss) + left queries + (right queries if any)
     num_queries = add_queries + np.ones(
         b_sz) + np.ones(b_sz) * replace_flag.squeeze(1).cpu().numpy()
     return new_xs.view_as(xs_t), num_queries, cos_sims, ham_sims
    def perturb(self, x_nat, y, sess):
        """Given a set of examples (x_nat, y), returns a set of adversarial
           examples within epsilon of x_nat in l_infinity norm."""
        if self.rand:
            x = x_nat + np.random.uniform(-self.epsilon, self.epsilon,
                                          x_nat.shape)
            x = np.clip(x, self.lb, self.ub)
        else:
            x = np.copy(x_nat)

        for i in range(self.num_steps):
            grad = sess.run(self.grad,
                            feed_dict={
                                self.model.x_input: x,
                                self.model.y_input: y
                            })

            # x = np.add(x,
            #            self.step_size * noisy_sign(grad,
            #                                        crit=self.crit,
            #                                        retain_p=self.retain_p,
            #                                        is_ns_sign=self.is_ns_sign),
            #            casting='unsafe')
            g = noisy_sign(grad,
                           crit=self.crit,
                           retain_p=self.retain_p,
                           is_ns_sign=self.is_ns_sign)

            x = lp_step(x, g, self.step_size, self.p)

            # x = np.clip(x, x_nat - self.epsilon, x_nat + self.epsilon)
            x = np.clip(x, self.lb, self.ub)  # ensure valid pixel range
        return x
예제 #5
0
 def _suggest(self, xs_t, loss_fct, metric_fct):
     _shape = list(xs_t.shape)
     _gs = self.grad_fct(xs_t.cpu().numpy())
     # compute the cosine similarity
     cos_sims, ham_sims = metric_fct(xs_t.cpu().numpy(), _gs)
     # perform the step
     new_xs = lp_step(xs_t, t(_gs.reshape(_shape)), self.lr, self.p)
     return new_xs, 2 * np.ones(_shape[0]), cos_sims, ham_sims
예제 #6
0
    def _suggest(self, xs_t, loss_fct, metric_fct):
        _shape = list(xs_t.shape)
        dim = np.prod(_shape[1:])
        if self.is_new_batch:
            self.xo_t = xs_t.clone()
        sgn_t = sign(ch.rand(_shape[0], dim) - 0.5)

        # compute the cosine similarity
        cos_sims, ham_sims = metric_fct(self.xo_t.cpu().numpy(),
                                        sgn_t.cpu().numpy())
        # perform the step
        new_xs = lp_step(self.xo_t, sgn_t.view(_shape), self.epsilon, self.p)
        return new_xs, np.ones(_shape[0]), cos_sims, ham_sims
 def _suggest(self, xs_t, loss_fct, metric_fct):
     _shape = list(xs_t.shape)
     dim = np.prod(_shape[1:])
     num_axes = len(_shape[1:])
     gs_t = ch.zeros_like(xs_t)
     for i in range(self.q):
         exp_noise = ch.randn_like(xs_t) / (dim**0.5)
         fxs_t = xs_t + self.fd_eta * exp_noise
         bxs_t = xs_t - self.fd_eta * exp_noise
         est_deriv = (loss_fct(fxs_t.cpu().numpy()) -
                      loss_fct(bxs_t.cpu().numpy())) / (2. * self.fd_eta)
         gs_t += t(est_deriv.reshape(-1, *[1] * num_axes)) * exp_noise
     # compute the cosine similarity
     cos_sims, ham_sims = metric_fct(xs_t.cpu().numpy(),
                                     gs_t.view(_shape[0], -1).cpu().numpy())
     # perform the step
     new_xs = lp_step(xs_t, gs_t, self.lr, self.p)
     # print(np.linalg.norm(new_xs.numpy().reshape(_shape[0], -1)), np.linalg.norm(xs_t.numpy().reshape(_shape[0], -1), axis=1))
     return new_xs, 2 * self.q * np.ones(_shape[0]), cos_sims, ham_sims
예제 #8
0
 def _suggest(self, xs_t, loss_fct, metric_fct):
     _shape = list(xs_t.shape)
     dim = np.prod(_shape[1:])
     num_axes = len(_shape[1:])
     gs_t = ch.zeros_like(xs_t)
     for i in range(self.q):
         exp_noise = ch.randn_like(xs_t) / (dim**0.5)
         fxs_t = xs_t + self.fd_eta * exp_noise
         bxs_t = xs_t
         est_deriv = (loss_fct(fxs_t.cpu().numpy()) -
                      loss_fct(bxs_t.cpu().numpy())) / self.fd_eta
         gs_t += t(est_deriv.reshape(-1, *[1] * num_axes)) * exp_noise
     # compute the cosine similarity
     cos_sims, ham_sims = metric_fct(xs_t.cpu().numpy(),
                                     gs_t.view(_shape[0], -1).cpu().numpy())
     # perform the sign step regardless of the lp-ball constraint
     # this is the main difference in the method.
     new_xs = lp_step(xs_t, gs_t, self.lr, 'inf')
     # the number of queries required for forward difference is q (forward sample) + 1 at xs_t
     return new_xs, (self.q + 1) * np.ones(_shape[0]), cos_sims, ham_sims
예제 #9
0
 def _suggest(self, xs_t, loss_fct, metric_fct):
     """
     The core of the bandit algorithm
     since this is compute intensive, it is implemented with torch support to push ops into gpu (if available)
     however, the input / output are numpys
     :param xs: numpy
     :return new_xs: returns a torch tensor
     """
     if xs_t.dim(
     ) != 2 and self.prior_size is not None:  # for cifar10 and imagenet data needs to be transpose into c x  h x w for upsample method
         xs_t = xs_t.transpose(1, 3)
     _shape = list(xs_t.shape)
     eff_shape = list(xs_t.shape)
     # since the upsampling assumes xs_t is batch_size x c x h x w. This is not the case for mnist,
     # which is batch_size x dim, let's take care of that below
     if len(_shape) == 2:
         eff_shape = [_shape[0], 1, self.data_size, self.data_size]
     if self.prior_size is None:
         prior_shape = eff_shape
     else:
         prior_shape = eff_shape[:-2] + [self.prior_size] * 2
     # reset the prior if xs  is a new batch
     if self.is_new_batch:
         self.prior = ch.zeros(prior_shape)
     # create noise for exploration, estimate the gradient, and take a PGD step
     exp_noise = ch.randn(prior_shape) / (np.prod(prior_shape[1:])**0.5
                                          )  # according to the paper
     # Query deltas for finite difference estimator
     if self.prior_size is None:
         q1 = step(self.prior, exp_noise, self.prior_exploration)
         q2 = step(self.prior, exp_noise, -self.prior_exploration)
     else:
         q1 = self.prior_upsample_fct(
             step(self.prior, exp_noise, self.prior_exploration))
         q2 = self.prior_upsample_fct(
             step(self.prior, exp_noise, -self.prior_exploration))
     # Loss points for finite difference estimator
     if xs_t.dim() != 2 and self.prior_size is not None:
         l1 = loss_fct(
             l2_step(xs_t, q1.view(_shape),
                     self.fd_eta).transpose(1, 3).cpu().numpy())
         l2 = loss_fct(
             l2_step(xs_t, q2.view(_shape),
                     self.fd_eta).transpose(1, 3).cpu().numpy())
     else:
         l1 = loss_fct(
             l2_step(xs_t, q1.view(_shape), self.fd_eta).cpu().numpy())
         l2 = loss_fct(
             l2_step(xs_t, q2.view(_shape), self.fd_eta).cpu().numpy())
     # finite differences estimate of directional derivative
     est_deriv = (l1 - l2) / (self.fd_eta * self.prior_exploration)
     # 2-query gradient estimate
     # TODO: to make it consistent with Ilyas' implementation multiply the below by self.prior_exploration
     est_grad = ch.Tensor(est_deriv.reshape(
         -1, *[1] * len(prior_shape[1:]))) * exp_noise
     # update prior with the estimated gradient:
     self.prior = self.prior_step(self.prior, est_grad, self.prior_lr)
     # gradient step in the data space
     if self.prior_size is None:
         gs = self.prior.clone()
     else:
         gs = self.prior_upsample_fct(self.prior)
     if xs_t.dim() != 2 and self.prior_size is not None:
         gs = gs.transpose(1, 3)
         xs_t = xs_t.transpose(1, 3)
         _shape = list(xs_t.shape)
     # compute the cosine similarity
     cos_sims, ham_sims = metric_fct(
         xs_t.cpu().numpy(),
         gs.cpu().numpy().reshape(_shape[0], -1))
     # perform the step
     new_xs = lp_step(xs_t, gs.view(_shape), self.lr, self.p)
     return new_xs, 2 * np.ones(_shape[0]), cos_sims, ham_sims