Ejemplo n.º 1
0
 def value_and_grad(
     # can be overridden by users
     self,
     loss_fn: Callable[[ep.Tensor], ep.Tensor],
     x: ep.Tensor,
 ) -> Tuple[ep.Tensor, ep.Tensor]:
     return ep.value_and_grad(loss_fn, x)
    def __call__(self, model: Model, inputs: T,
                 criterion: Union[Misclassification, T]) -> T:
        x0, restore_type = ep.astensor_(inputs)
        criterion_ = get_criterion(criterion)
        del inputs, criterion

        if not isinstance(criterion_, Misclassification):
            raise ValueError("unsupported criterion")

        labels = criterion_.labels

        def loss_fn(inputs: ep.Tensor) -> ep.Tensor:
            logits = model(inputs)
            return ep.crossentropy(logits, labels).sum()

        x = x0

        if self.random_start:
            x = x + ep.uniform(x, x.shape, -self.epsilon, self.epsilon)
            x = ep.clip(x, *model.bounds)

        for _ in range(self.steps):
            _, gradients = ep.value_and_grad(loss_fn, x)
            gradients = gradients.sign()
            x = x + self.stepsize * gradients
            x = x0 + ep.clip(x - x0, -self.epsilon, self.epsilon)
            x = ep.clip(x, *model.bounds)

        return restore_type(x)
Ejemplo n.º 3
0
    def __call__(
        self,
        inputs,
        labels,
        *,
        rescale=False,
        epsilon=2.0,
        step_size=0.4,
        num_steps=10,
    ):
        def loss_fn(inputs: ep.Tensor, labels: ep.Tensor) -> ep.Tensor:
            logits = ep.astensor(self.model.forward(inputs.tensor))
            return ep.crossentropy(logits, labels).sum()

        if rescale:
            min_, max_ = self.model.bounds()
            scale = (max_ - min_) * np.sqrt(np.prod(inputs.shape[1:]))
            epsilon = epsilon * scale
            step_size = step_size * scale

        x = ep.astensor(inputs)
        y = ep.astensor(labels)
        assert x.shape[0] == y.shape[0]
        assert y.ndim == 1

        x0 = x

        for _ in range(num_steps):
            _, gradients = ep.value_and_grad(loss_fn, x, y)
            gradients = normalize_l2_norms(gradients)
            x = x + step_size * gradients
            x = x0 + clip_l2_norms(x - x0, epsilon)
            x = ep.clip(x, *self.model.bounds())

        return x.tensor
Ejemplo n.º 4
0
def test_value_and_grad_multiple_args(dummy: Tensor) -> None:
    if isinstance(dummy, ep.NumPyTensor):
        pytest.skip()

    def f(x: Tensor, y: Tensor) -> Tensor:
        return (x * y).sum()

    t = ep.arange(dummy, 8).float32().reshape((2, 4))
    v, g = ep.value_and_grad(f, t, t)
    assert v.item() == 140
    assert (g == t).all()
Ejemplo n.º 5
0
    def __call__(self, model, input_data, labels, epsilon):
        labels = ep.astensor(labels)
        loss_function = self.get_loss_function(model, labels)
        modified_data = input_data

        # algorytm FGSM
        _, gradients = ep.value_and_grad(loss_function, input_data)
        gradient_sign = gradients.sign()
        modified_data = input_data + epsilon * gradient_sign
        modified_data = ep.clip(modified_data, *model.bounds)
        return modified_data
    def __call__(self, model: Model, inputs, labels):
        inputs, labels, restore = wrap(inputs, labels)

        def loss_fn(inputs):
            logits = model.forward(inputs)
            return ep.crossentropy(logits, labels).sum()

        x = x0 = inputs

        for _ in range(self.steps):
            _, gradients = ep.value_and_grad(loss_fn, x)
            gradients = normalize_l2_norms(gradients)
            x = x + self.stepsize * gradients
            x = x0 + clip_l2_norms(x - x0, self.epsilon)
            x = ep.clip(x, *model.bounds())

        return restore(x)
    def __call__(self, model: Model, inputs, labels):
        inputs, labels, restore = wrap(inputs, labels)

        def loss_fn(inputs):
            logits = model.forward(inputs)
            return ep.crossentropy(logits, labels).sum()

        x = x0 = inputs

        if self.random_start:
            x = x + ep.uniform(x, x.shape, -self.epsilon, self.epsilon)
            x = ep.clip(x, *model.bounds())

        for _ in range(self.steps):
            _, gradients = ep.value_and_grad(loss_fn, x)
            gradients = gradients.sign()
            x = x + self.stepsize * gradients
            x = x0 + ep.clip(x - x0, -self.epsilon, self.epsilon)
            x = ep.clip(x, *model.bounds())

        return restore(x)
Ejemplo n.º 8
0
    def __call__(
        self,
        inputs,
        labels,
        *,
        rescale=False,
        epsilon=0.3,
        step_size=0.05,
        num_steps=10,
        random_start=False,
    ):
        def loss_fn(inputs: ep.Tensor, labels: ep.Tensor) -> ep.Tensor:
            logits = ep.astensor(self.model.forward(inputs.tensor))
            return ep.crossentropy(logits, labels).sum()

        if rescale:
            min_, max_ = self.model.bounds()
            scale = max_ - min_
            epsilon = epsilon * scale
            step_size = step_size * scale

        x = ep.astensor(inputs)
        y = ep.astensor(labels)
        assert x.shape[0] == y.shape[0]
        assert y.ndim == 1

        x0 = x

        if random_start:
            x = x + ep.uniform(x, x.shape, -epsilon, epsilon)
            x = ep.clip(x, *self.model.bounds())

        for _ in range(num_steps):
            _, gradients = ep.value_and_grad(loss_fn, x, y)
            gradients = gradients.sign()
            x = x + step_size * gradients
            x = x0 + ep.clip(x - x0, -epsilon, epsilon)
            x = ep.clip(x, *self.model.bounds())

        return x.tensor
def value_and_grad(loss_fn: Callable[[ep.Tensor], ep.Tensor],
                   x: ep.Tensor) -> Tuple[ep.Tensor, ep.Tensor]:
    return ep.value_and_grad(loss_fn, x)
Ejemplo n.º 10
0
 def value_and_grad(self, loss_fn, x):
     val1 = loss_fn(x)
     loss_fn2 = self.get_loss_fn(model2, self.labels)
     _, grad2 = ep.value_and_grad(loss_fn2, x)
     return val1, grad2