def project_onto_l1_ball(x: ep.Tensor, eps: ep.Tensor) -> ep.Tensor: """Computes Euclidean projection onto the L1 ball for a batch. [#Duchi08]_ Adapted from the pytorch version by Tony Duan: https://gist.github.com/tonyduan/1329998205d88c566588e57e3e2c0c55 Args: x: Batch of arbitrary-size tensors to project, possibly on GPU eps: radius of l-1 ball to project onto References: ..[#Duchi08] Efficient Projections onto the l1-Ball for Learning in High Dimensions John Duchi, Shai Shalev-Shwartz, Yoram Singer, and Tushar Chandra. International Conference on Machine Learning (ICML 2008) """ original_shape = x.shape x = flatten(x) mask = (ep.norms.l1(x, axis=1) <= eps).astype(x.dtype).expand_dims(1) mu = ep.flip(ep.sort(ep.abs(x)), axis=-1).astype(x.dtype) cumsum = ep.cumsum(mu, axis=-1) arange = ep.arange(x, 1, x.shape[1] + 1).astype(x.dtype) rho = (ep.max( ((mu * arange > (cumsum - eps.expand_dims(1)))).astype(x.dtype) * arange, axis=-1, ) - 1) # samples already under norm will have to select rho = ep.maximum(rho, 0) theta = (cumsum[ep.arange(x, x.shape[0]), rho.astype(ep.arange(x, 1).dtype)] - eps) / (rho + 1.0) proj = (ep.abs(x) - theta.expand_dims(1)).clip(min_=0, max_=ep.inf) x = mask * x + (1 - mask) * proj * ep.sign(x) return x.reshape(original_shape)
def project_onto_l1_ball(x: ep.Tensor, eps: ep.Tensor): """ Compute Euclidean projection onto the L1 ball for a batch. min ||x - u||_2 s.t. ||u||_1 <= eps Inspired by the corresponding numpy version by Adrien Gaidon. Adapted from the pytorch version by Tony Duan: https://gist.github.com/tonyduan/1329998205d88c566588e57e3e2c0c55 Parameters ---------- x: (batch_size, *) torch array batch of arbitrary-size tensors to project, possibly on GPU eps: float radius of l-1 ball to project onto Returns ------- u: (batch_size, *) torch array batch of projected tensors, reshaped to match the original Notes ----- The complexity of this algorithm is in O(dlogd) as it involves sorting x. References ---------- [1] Efficient Projections onto the l1-Ball for Learning in High Dimensions John Duchi, Shai Shalev-Shwartz, Yoram Singer, and Tushar Chandra. International Conference on Machine Learning (ICML 2008) """ original_shape = x.shape x = flatten(x) mask = (ep.norms.l1(x, axis=1) < eps).astype(x.dtype).expand_dims(1) mu = ep.flip(ep.sort(ep.abs(x)), axis=-1) cumsum = ep.cumsum(mu, axis=-1) arange = ep.arange(x, 1, x.shape[1] + 1) rho = ep.max( (mu * arange > (cumsum - eps.expand_dims(1))) * arange, axis=-1) - 1 theta = (cumsum[ep.arange(x, x.shape[0]), rho] - eps) / (rho + 1.0) proj = (ep.abs(x) - theta.expand_dims(1)).clip(min_=0, max_=ep.inf) x = mask * x + (1 - mask) * proj * ep.sign(x) return x.reshape(original_shape)
def test_sign(t: Tensor) -> Tensor: return ep.sign(t)
def _binary_search_on_alpha( self, function_evolution: Callable[[ep.Tensor], ep.Tensor], lower: ep.Tensor) -> ep.Tensor: # Upper --> not adversarial / Lower --> adversarial v_type = function_evolution(lower) def get_alpha(theta: ep.Tensor) -> ep.Tensor: return 1 - ep.astensor(self._cos(theta.raw * np.pi / 180)) check_opposite = lower > 0 # if param < 0: abs(param) doesn't work # Get the upper range upper = ep.where( abs(lower) != self.theta_max, lower + ep.sign(lower) * self.theta_max / self.T, ep.zeros_like(lower) ) mask_upper = (upper == 0) while mask_upper.any(): # Find the correct lower/upper range # if True in mask_upper, the range haven't been found new_upper = lower + ep.sign(lower) * self.theta_max / self.T potential_x = function_evolution(new_upper) x = ep.where( atleast_kd(mask_upper, potential_x.ndim), potential_x, ep.zeros_like(potential_x) ) is_advs = self._is_adversarial(x) lower = ep.where(ep.logical_and(mask_upper, is_advs), new_upper, lower) upper = ep.where(ep.logical_and(mask_upper, is_advs.logical_not()), new_upper, upper) mask_upper = mask_upper * is_advs step = 0 over_gamma = abs(get_alpha(upper) - get_alpha(lower)) > self._BS_gamma while step < self._BS_max_iteration and over_gamma.any(): mid_bound = (upper + lower) / 2 mid = ep.where( atleast_kd(ep.logical_and(mid_bound != 0, over_gamma), v_type.ndim), function_evolution(mid_bound), ep.zeros_like(v_type) ) is_adv = self._is_adversarial(mid) mid_opp = ep.where( atleast_kd(ep.logical_and(ep.astensor(check_opposite), over_gamma), mid.ndim), function_evolution(-mid_bound), ep.zeros_like(mid) ) is_adv_opp = self._is_adversarial(mid_opp) lower = ep.where(over_gamma * is_adv, mid_bound, lower) lower = ep.where(over_gamma * is_adv.logical_not() * check_opposite * is_adv_opp, -mid_bound, lower) upper = ep.where(over_gamma * is_adv.logical_not() * check_opposite * is_adv_opp, - upper, upper) upper = ep.where(over_gamma * (abs(lower) != abs(mid_bound)), mid_bound, upper) check_opposite = over_gamma * check_opposite * is_adv_opp * (lower > 0) over_gamma = abs(get_alpha(upper) - get_alpha(lower)) > self._BS_gamma step += 1 return ep.astensor(lower)
def run( self, model: Model, inputs: T, criterion: Union[Criterion, T], *, early_stop: Optional[float] = None, starting_points: Optional[T] = None, **kwargs: Any, ) -> T: raise_if_kwargs(kwargs) originals, restore_type = ep.astensor_(inputs) del inputs, kwargs verify_input_bounds(originals, model) criterion = get_criterion(criterion) is_adversarial = get_is_adversarial(criterion, model) if starting_points is None: init_attack: MinimizationAttack if self.init_attack is None: init_attack = LinearSearchBlendedUniformNoiseAttack(steps=50) logging.info( f"Neither starting_points nor init_attack given. Falling" f" back to {init_attack!r} for initialization.") else: init_attack = self.init_attack # TODO: use call and support all types of attacks (once early_stop is # possible in __call__) x_advs = init_attack.run(model, originals, criterion, early_stop=early_stop) else: x_advs = ep.astensor(starting_points) is_adv = is_adversarial(x_advs) if not is_adv.all(): failed = is_adv.logical_not().float32().sum() if starting_points is None: raise ValueError( f"init_attack failed for {failed} of {len(is_adv)} inputs") else: raise ValueError( f"{failed} of {len(is_adv)} starting_points are not adversarial" ) del starting_points tb = TensorBoard(logdir=self.tensorboard) # Project the initialization to the boundary. x_advs = self._binary_search(is_adversarial, originals, x_advs) assert ep.all(is_adversarial(x_advs)) distances = self.distance(originals, x_advs) for step in range(self.steps): delta = self.select_delta(originals, distances, step) # Choose number of gradient estimation steps. num_gradient_estimation_steps = int( min([ self.initial_num_evals * math.sqrt(step + 1), self.max_num_evals ])) gradients = self.approximate_gradients( is_adversarial, x_advs, num_gradient_estimation_steps, delta) if self.constraint == "linf": update = ep.sign(gradients) else: update = gradients if self.stepsize_search == "geometric_progression": # find step size. epsilons = distances / math.sqrt(step + 1) while True: x_advs_proposals = ep.clip( x_advs + atleast_kd(epsilons, x_advs.ndim) * update, 0, 1) success = is_adversarial(x_advs_proposals) epsilons = ep.where(success, epsilons, epsilons / 2.0) if ep.all(success): break # Update the sample. x_advs = ep.clip( x_advs + atleast_kd(epsilons, update.ndim) * update, 0, 1) assert ep.all(is_adversarial(x_advs)) # Binary search to return to the boundary. x_advs = self._binary_search(is_adversarial, originals, x_advs) assert ep.all(is_adversarial(x_advs)) elif self.stepsize_search == "grid_search": # Grid search for stepsize. epsilons_grid = ep.expand_dims( ep.from_numpy( distances, np.logspace( -4, 0, num=20, endpoint=True, dtype=np.float32), ), 1, ) * ep.expand_dims(distances, 0) proposals_list = [] for epsilons in epsilons_grid: x_advs_proposals = ( x_advs + atleast_kd(epsilons, update.ndim) * update) x_advs_proposals = ep.clip(x_advs_proposals, 0, 1) mask = is_adversarial(x_advs_proposals) x_advs_proposals = self._binary_search( is_adversarial, originals, x_advs_proposals) # only use new values where initial guess was already adversarial x_advs_proposals = ep.where(atleast_kd(mask, x_advs.ndim), x_advs_proposals, x_advs) proposals_list.append(x_advs_proposals) proposals = ep.stack(proposals_list, 0) proposals_distances = self.distance( ep.expand_dims(originals, 0), proposals) minimal_idx = ep.argmin(proposals_distances, 0) x_advs = proposals[minimal_idx] distances = self.distance(originals, x_advs) # log stats tb.histogram("norms", distances, step) return restore_type(x_advs)