コード例 #1
0
def test_broadcastable_mean_std(art_warning):
    try:
        mean, std = broadcastable_mean_std(np.ones((1, 3, 20, 20)), np.ones(3), np.ones(3))
        assert mean.shape == std.shape == (1, 3, 1, 1)

        mean, std = broadcastable_mean_std(np.ones((1, 3, 20, 20)), np.ones((1, 3, 1, 1)), np.ones((1, 3, 1, 1)))
        assert mean.shape == std.shape == (1, 3, 1, 1)
    except ARTTestException as e:
        art_warning(e)
コード例 #2
0
    def __call__(
        self,
        x: np.ndarray,
        y: Optional[np.ndarray] = None,
    ) -> Tuple[np.ndarray, Optional[np.ndarray]]:
        """
        Apply StandardisationMeanStd inputs `x`.

        :param x: Input samples to standardise.
        :param y: Label data, will not be affected by this preprocessing.
        :return: Standardise input samples and unmodified labels.
        """
        if x.dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:
            raise TypeError(
                "The data type of input data `x` is {} and cannot represent negative values. Consider "
                "changing the data type of the input data `x` to a type that supports negative values e.g. "
                "np.float32.".format(x.dtype))

        if self._broadcastable_mean is None:
            self._broadcastable_mean, self._broadcastable_std = broadcastable_mean_std(
                x, self.mean, self.std)

        x_norm = x - self._broadcastable_mean
        x_norm = x_norm / self._broadcastable_std
        x_norm = x_norm.astype(ART_NUMPY_DTYPE)

        return x_norm, y
コード例 #3
0
    def estimate_gradient(self, x: np.ndarray, gradient: np.ndarray) -> np.ndarray:
        """
        Provide an estimate of the gradients of preprocessor for the backward pass. If the preprocessor is not
        differentiable, this is an estimate of the gradient, most often replacing the computation performed by the
        preprocessor with the identity function (the default).

        :param x: Input data for which the gradient is estimated. First dimension is the batch size.
        :param grad: Gradient value so far.
        :return: The gradient (estimate) of the defence.
        """
        _, std = broadcastable_mean_std(x, self.mean, self.std)
        gradient_back = gradient / std

        return gradient_back
コード例 #4
0
    def forward(
        self, x: "torch.Tensor", y: Optional["torch.Tensor"] = None
    ) -> Tuple["torch.Tensor", Optional["torch.Tensor"]]:
        """
        Apply standardisation with mean and standard deviation to input `x`.

        :param x: Input samples to standardise.
        :param y: Label data, will not be affected by this preprocessing.
        :return: Standardised input samples and unmodified labels.
        """
        import torch  # lgtm [py/repeated-import]

        if self._broadcastable_mean is None:
            self._broadcastable_mean, self._broadcastable_std = broadcastable_mean_std(x, self.mean, self.std)

        x_norm = x - torch.tensor(self._broadcastable_mean, device=self._device, dtype=torch.float32)
        x_norm = x_norm / torch.tensor(self._broadcastable_std, device=self._device, dtype=torch.float32)

        return x_norm, y
コード例 #5
0
    def forward(
        self,
        x: "tf.Tensor",
        y: Optional["tf.Tensor"] = None
    ) -> Tuple["tf.Tensor", Optional["tf.Tensor"]]:
        """
        Apply standardisation with mean and standard deviation to input `x`.

        :param x: Input samples to standardise.
        :param y: Label data, will not be affected by this preprocessing.
        :return: Standardised input samples and unmodified labels.
        """
        import tensorflow as tf  # lgtm [py/repeated-import]

        if self._broadcastable_mean is None:
            self._broadcastable_mean, self._broadcastable_std = broadcastable_mean_std(
                x, self.mean, self.std)

        x_norm = x - self._broadcastable_mean
        x_norm = x_norm / self._broadcastable_std
        x_norm = tf.cast(x_norm, dtype=ART_NUMPY_DTYPE)  # pylint: disable=E1123,E1120

        return x_norm, y