def _gradient(self, inputs, labels):
        """
        Calculate the gradient of input samples.

        Args:
            inputs (Union[numpy.ndarray, tuple]): Input samples.
            labels (Union[numpy.ndarray, tuple]): Original/target labels. \
                For each input if it has more than one label, it is wrapped in a tuple.

        Returns:
            numpy.ndarray, gradient of labels w.r.t inputs.

        Examples:
            >>> grad = self._gradient([[0.5, 0.3, 0.4]],
            >>>                       [[0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
        """
        # get grad of loss over x
        inputs_tensor = to_tensor_tuple(inputs)
        labels_tensor = to_tensor_tuple(labels)
        out_grad = self._loss_grad(*inputs_tensor, *labels_tensor)
        if isinstance(out_grad, tuple):
            out_grad = out_grad[0]
        gradient = out_grad.asnumpy()

        if self._is_targeted:
            gradient = -gradient
        return normalize_value(gradient, self._norm_level)
def _projection(values, eps, norm_level):
    """
    Implementation of values normalization within eps.

    Args:
        values (numpy.ndarray): Input data.
        eps (float): Project radius.
        norm_level (Union[int, char, numpy.inf]): Order of the norm. Possible
            values: np.inf, 1 or 2.

    Returns:
        numpy.ndarray, normalized values.

    Raises:
        NotImplementedError: If the norm_level is not in [1, 2, np.inf, '1',
            '2', 'inf'].
    """
    if norm_level in (1, '1'):
        sample_batch = values.shape[0]
        x_flat = values.reshape(sample_batch, -1)
        proj_flat = _reshape_l1_projection(x_flat, eps)
        return proj_flat.reshape(values.shape)
    if norm_level in (2, '2'):
        return eps * normalize_value(values, norm_level)
    if norm_level in (np.inf, 'inf'):
        return eps * np.sign(values)
    msg = 'Values of `norm_level` different from 1, 2 and `np.inf` are ' \
          'currently not supported.'
    LOGGER.error(TAG, msg)
    raise NotImplementedError(msg)
    def _gradient(self, inputs, labels):
        """
        Calculate the gradient of input samples.

        Args:
            inputs (numpy.ndarray): Input samples.
            labels (numpy.ndarray): Original/target labels.

        Returns:
            numpy.ndarray, gradient of labels w.r.t inputs.

        Examples:
            >>> grad = self._gradient([[0.5, 0.3, 0.4]],
            >>>                       [[0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
        """
        sens = Tensor(np.array([1.0], inputs.dtype))
        # get grad of loss over x
        out_grad = self._loss_grad(Tensor(inputs), Tensor(labels), sens)
        if isinstance(out_grad, tuple):
            out_grad = out_grad[0]
        gradient = out_grad.asnumpy()

        if self._is_targeted:
            gradient = -gradient
        return normalize_value(gradient, self._norm_level)
Esempio n. 4
0
    def _gradient(self, inputs, labels):
        """
        Calculate gradients based on input samples and original/target labels.

        Args:
            inputs (numpy.ndarray): Input sample.
            labels (Union[numpy.ndarray, tuple]): Original/target labels. \
                For each input if it has more than one label, it is wrapped in a tuple.

        Returns:
            numpy.ndarray, gradient of inputs.
        """
        if isinstance(labels, tuple):
            labels_tensor = tuple()
            for item in labels:
                labels_tensor += (Tensor(item), )
        else:
            labels_tensor = (Tensor(labels), )
        out_grad = self._grad_all(Tensor(inputs), *labels_tensor)
        if isinstance(out_grad, tuple):
            out_grad = out_grad[0]
        gradient = out_grad.asnumpy()

        if self._is_targeted:
            gradient = -gradient
        return normalize_value(gradient, self._norm_level)
Esempio n. 5
0
    def _gradient(self, inputs, labels):
        """
        Calculate gradients based on input samples and original/target labels.

        Args:
            inputs (numpy.ndarray): Input sample.
            labels (numpy.ndarray): Original/target label.

        Returns:
            numpy.ndarray, gradient of inputs.
        """
        out_grad = self._grad_all(Tensor(inputs), Tensor(labels))
        if isinstance(out_grad, tuple):
            out_grad = out_grad[0]
        gradient = out_grad.asnumpy()

        if self._is_targeted:
            gradient = -gradient
        return normalize_value(gradient, self._norm_level)
Esempio n. 6
0
    def _gradient(self, inputs, labels):
        """
        Calculate gradients based on input samples and original/target labels.

        Args:
            inputs (numpy.ndarray): Input sample.
            labels (numpy.ndarray): Original/target label.

        Returns:
            numpy.ndarray, gradient of inputs.

        Examples:
            >>> grad = self._gradient([[0.2, 0.3, 0.4]],
            >>> [[0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
        """
        sens = Tensor(np.array([1.0], self._dtype))
        out_grad = self._grad_all(Tensor(inputs), Tensor(labels), sens)
        if isinstance(out_grad, tuple):
            out_grad = out_grad[0]
        gradient = out_grad.asnumpy()

        if self._is_targeted:
            gradient = -gradient
        return normalize_value(gradient, self._norm_level)
Esempio n. 7
0
    def _generate_one(self, one_input, label, epsilons=10):
        """
        Increases the amount of salt and pepper noise to generate adversarial
        samples.

        Args:
            one_input (numpy.ndarray): The original, unperturbed input.
            label (numpy.ndarray): The target label.
            epsilons (int) : Number of steps to try probability between 0
                and 1. Default: 10

        Returns:
            - numpy.ndarray, bool values for result.

            - numpy.ndarray, adversarial example.

            - numpy.ndarray, query times for this sample.

        Examples:
            >>> one_adv = self._generate_one(input, label)
        """
        # use binary search to get epsilons
        low_ = 0.0
        high_ = 1.0
        query_count = 0
        input_shape = one_input.shape
        input_dtype = one_input.dtype
        one_input = one_input.reshape(-1)
        depth = np.abs(np.subtract(self._bounds[0], self._bounds[1]))
        best_adv = np.copy(one_input)
        best_eps = high_
        find_adv = False
        for _ in range(self._max_iter):
            min_eps = low_
            max_eps = (low_ + high_) / 2
            for _ in range(epsilons):
                adv = np.copy(one_input)
                noise = np.random.uniform(low=low_, high=high_, size=one_input.size)
                eps = (min_eps + max_eps) / 2
                # add salt
                adv[noise < eps] = -depth
                # add pepper
                adv[noise >= (high_ - eps)] = depth
                # normalized sample
                adv = normalize_value(np.expand_dims(adv, axis=0), 'l2').astype(input_dtype)
                query_count += 1
                ite_bool = self._model.is_adversarial(adv.reshape(input_shape),
                                                      label,
                                                      is_targeted=self._is_targeted)
                if ite_bool:
                    find_adv = True
                    if best_eps > eps:
                        best_adv = adv
                        best_eps = eps
                    max_eps = eps
                    LOGGER.debug(TAG, 'Attack succeed, epsilon is {}'.format(eps))
                else:
                    min_eps = eps
            if find_adv:
                break
        return find_adv, best_adv.reshape(input_shape), query_count