def approximate_gradients( self, is_adversarial: Callable[[ep.Tensor], ep.Tensor], x_advs: ep.Tensor, steps: int, delta: ep.Tensor, ) -> ep.Tensor: # (steps, bs, ...) noise_shape = tuple([steps] + list(x_advs.shape)) if self.constraint == "l2": rv = ep.normal(x_advs, noise_shape) elif self.constraint == "linf": rv = ep.uniform(x_advs, low=-1, high=1, shape=noise_shape) rv /= atleast_kd(ep.norms.l2(flatten(rv, keep=1), -1), rv.ndim) + 1e-12 scaled_rv = atleast_kd(ep.expand_dims(delta, 0), rv.ndim) * rv perturbed = ep.expand_dims(x_advs, 0) + scaled_rv perturbed = ep.clip(perturbed, 0, 1) rv = (perturbed - x_advs) / atleast_kd(ep.expand_dims(delta + 1e-8, 0), rv.ndim) multipliers_list: List[ep.Tensor] = [] for step in range(steps): decision = is_adversarial(perturbed[step]) multipliers_list.append( ep.where( decision, ep.ones( x_advs, (len(x_advs, )), ), -ep.ones( x_advs, (len(decision, )), ), )) # (steps, bs, ...) multipliers = ep.stack(multipliers_list, 0) vals = ep.where( ep.abs(ep.mean(multipliers, axis=0, keepdims=True)) == 1, multipliers, multipliers - ep.mean(multipliers, axis=0, keepdims=True), ) grad = ep.mean(atleast_kd(vals, rv.ndim) * rv, axis=0) grad /= ep.norms.l2(atleast_kd(flatten(grad), grad.ndim)) + 1e-12 return grad
def test_mean_none_keepdims(t: Tensor) -> Tensor: return ep.mean(t, axis=None, keepdims=True)
def test_mean_keepdims(t: Tensor) -> Tensor: return ep.mean(t, axis=0, keepdims=True)
def test_mean_axes(dummy: Tensor) -> Tensor: t = ep.ones(dummy, 30).float32().reshape((3, 5, 2)) return ep.mean(t, axis=(0, 1))
def test_mean_axis(t: Tensor) -> Tensor: return ep.mean(t, axis=0)
def test_mean(t: Tensor) -> Tensor: return ep.mean(t)
def test_mean_int(t: Tensor) -> None: with pytest.raises(ValueError): ep.mean(ep.arange(t, 5))
def test_mean_bool(t: Tensor) -> None: with pytest.raises(ValueError): ep.mean(t != 0)