Пример #1
0
    def __call__(self, input_or_adv, label=None, unpack=True, net=None):
        """
        Binary search for magnitudes to zero out.

        :param input_or_adv: the adversarial image
        :param label: the correct label
        :param unpack: not used
        :param net: the ml model
        :return: an adversarial image
        """
        onesided = True
        is_next_power2 = False
        net = self._default_model._model  # we operate directly in Pytorch
        if torch.cuda.is_available():
            device = torch.device('cuda')
        else:
            device = torch.device('cpu')
        net.to(device)
        net = pytorch_net(net)
        input = torch.tensor(input_or_adv).unsqueeze(dim=0).to(device)
        xfft, _, _ = get_xfft_hw(input=input,
                                 onesided=onesided,
                                 is_next_power2=is_next_power2)
        spectrum = get_spectrum(xfft, squeeze=False)
        min = spectrum.min()
        max = spectrum.max()

        def decrease_func(image, high):
            return fft_zero_low_magnitudes(input=image,
                                           high=high,
                                           low=min,
                                           is_next_power2=is_next_power2,
                                           onesided=onesided)

        _, high = bisearch_to_decrease_rate(input=input,
                                            label=label,
                                            net=net,
                                            low=min,
                                            high=max,
                                            func=decrease_func)

        if high is None:
            return None

        def increase_func(image, low):
            return fft_zero_low_magnitudes(input=image,
                                           high=high,
                                           low=low,
                                           is_next_power2=is_next_power2,
                                           onesided=onesided)

        adv_image, _ = bisearch_to_increase_rate(input=input,
                                                 label=label,
                                                 net=net,
                                                 low=min,
                                                 high=high,
                                                 func=increase_func)
        if adv_image is None:
            return None
        else:
            return adv_image.detach().squeeze().cpu().numpy()
Пример #2
0
    def __call__(self,
                 input_or_adv,
                 label=None,
                 unpack=True,
                 max_frequencies=100000,
                 debug=True):
        """Sets the smallest frequency coefficients in their magnitudes to 0.

        Parameters
        ----------
        input_or_adv : `numpy.ndarray` or :class:`Adversarial`
            The original, correctly classified image. If image is a
            numpy array, label must be passed as well. If image is
            an :class:`Adversarial` instance, label must not be passed.
        label : int
            The reference label of the original image. Must be passed
            if image is a numpy array, must not be passed if image is
            an :class:`Adversarial` instance.
        unpack : bool
            If true, returns the adversarial image, otherwise returns
            the Adversarial object.
        max_pixels : int
            Maximum number of pixels to try.

        """
        a = input_or_adv
        del input_or_adv
        del label
        del unpack

        # Give the axis for the color channel.
        channel_axis = a.channel_axis(batch=False)
        assert channel_axis == 0
        image = a.original_image
        axes = [i for i in range(image.ndim) if i != channel_axis]
        assert len(axes) == 2
        H = image.shape[axes[0]]
        W = image.shape[axes[1]]

        image_torch = torch.from_numpy(image).unsqueeze(
            0)  # we need the batch dim
        is_next_power2 = False
        onesided = True
        xfft, H_fft, W_fft = get_xfft_hw(input=image_torch,
                                         is_next_power2=is_next_power2,
                                         onesided=onesided)
        freqs = get_sorted_spectrum_indices(xfft=xfft)
        W_xfft = xfft.shape[-2]
        perturbed_xfft = xfft.clone()
        channel_size = H_fft * W_xfft
        zero_value = torch.tensor([0.0, 0.0])
        last_magnitude = 0.0
        # freqs = freqs[:max_frequencies]
        for i, freq in enumerate(freqs):
            c = freq // channel_size
            w = freq % W_xfft
            h = (freq - c * channel_size) // W_xfft

            location = [c, h, w]
            # Add the batch dim.
            location.insert(0, 0)
            location = tuple(location)
            if debug:
                elem = perturbed_xfft[location]
                re = elem[0]
                im = elem[1]
                magnitude = np.sqrt(re**2 + im**2)
                assert magnitude >= last_magnitude
                last_magnitude = magnitude
            perturbed_xfft[location] = zero_value
            perturbed = get_ifft_hw(xfft=perturbed_xfft,
                                    H_fft=H_fft,
                                    W_fft=W_fft,
                                    H=H,
                                    W=W)
            perturbed = perturbed.detach().cpu().numpy().squeeze()
            _, is_adv = a.predictions(perturbed)
            if is_adv:
                if debug:
                    print('# of frequencies zeroed out: ', i + 1)
                return
Пример #3
0
    def __call__(self, input_or_adv, label=None, unpack=True):
        """Perturbs just a single frequency and sets it to the min or max.

        Parameters
        ----------
        input_or_adv : `numpy.ndarray` or :class:`Adversarial`
            The original, correctly classified image. If image is a
            numpy array, label must be passed as well. If image is
            an :class:`Adversarial` instance, label must not be passed.
        label : int
            The reference label of the original image. Must be passed
            if image is a numpy array, must not be passed if image is
            an :class:`Adversarial` instance.
        unpack : bool
            If true, returns the adversarial image, otherwise returns
            the Adversarial object.
        max_pixels : int
            Maximum number of pixels to try.

        """
        a = input_or_adv
        del input_or_adv
        del label
        del unpack

        # Give the axis for the color channel.
        channel_axis = a.channel_axis(batch=False)
        assert channel_axis == 0
        image = a.original_image
        axes = [i for i in range(image.ndim) if i != channel_axis]
        assert len(axes) == 2
        H = image.shape[axes[0]]
        W = image.shape[axes[1]]

        image_torch = torch.from_numpy(image).unsqueeze(
            0)  # we need the batch dim
        is_next_power2 = False
        onesided = True
        xfft, H_fft, W_fft = get_xfft_hw(input=image_torch,
                                         is_next_power2=is_next_power2,
                                         onesided=onesided)
        # maxf, minf = get_max_min_complex(xfft=xfft)
        value = torch.tensor([0.0, 0.0])
        W_xfft = xfft.shape[-2]
        total_freqs = H_fft * W_xfft
        max_frequencies = int(total_freqs * self.max_frequencies_percent / 100)
        for iter in range(self.iterations):
            freqs = nprng.permutation(total_freqs)
            freqs = freqs[:max_frequencies]
            perturbed_xfft = xfft.clone()
            for num_freqs, freq in enumerate(freqs):
                w = freq % W_xfft
                h = freq // W_xfft

                location = [h, w]
                # Add the channel dimension.
                location.insert(channel_axis, slice(None))
                # Add the batch dim.
                location.insert(0, slice(None))
                location = tuple(location)

                # if np.random.randint(0, 2) == 1:
                #     value = minf
                # else:
                #     value = maxf
                # value = check_real_vals(
                #     H_fft=H_fft, W_fft=W_fft, h=h, w=w, value=value)
                perturbed_xfft[location] = value
                perturbed = get_ifft_hw(xfft=perturbed_xfft,
                                        H_fft=H_fft,
                                        W_fft=W_fft,
                                        H=H,
                                        W=W)
                perturbed = perturbed.detach().cpu().numpy().squeeze()
                if self.is_strict:
                    perturbed = np.clip(perturbed,
                                        a_min=self.args.min,
                                        a_max=self.args.max)
                _, is_adv, _, dist = a.predictions(perturbed,
                                                   return_details=True)
                if is_adv:
                    if self.is_debug:
                        dist = np.sqrt(dist.value)
                        print(f'iterations: {iter}, '
                              f'number of modified frequencies: {num_freqs}, '
                              f'dist: {dist}')
                    if self.is_fast:
                        return
                    break
Пример #4
0
    def __call__(self, input_or_adv, label=None, unpack=True):
        """Perturbs multiple frequencies and sets them to zero.

        Parameters
        ----------
        input_or_adv : `numpy.ndarray` or :class:`Adversarial`
            The original, correctly classified image. If image is a
            numpy array, label must be passed as well. If image is
            an :class:`Adversarial` instance, label must not be passed.
        label : int
            The reference label of the original image. Must be passed
            if image is a numpy array, must not be passed if image is
            an :class:`Adversarial` instance.
        unpack : bool
            to preserve the inheritance requirement.
        """
        a = input_or_adv
        del input_or_adv
        del label
        del unpack

        # Give the axis for the color channel.
        channel_axis = a.channel_axis(batch=False)
        assert channel_axis == 0
        image = a.original_image
        axes = [i for i in range(image.ndim) if i != channel_axis]
        assert len(axes) == 2
        H = image.shape[axes[0]]
        W = image.shape[axes[1]]
        # We need the batch dim.
        image_torch = torch.from_numpy(image).unsqueeze(0)
        is_next_power2 = False
        onesided = True
        xfft, H_fft, W_fft = get_xfft_hw(input=image_torch,
                                         is_next_power2=is_next_power2,
                                         onesided=onesided)
        # maxf, minf = get_max_min_complex(xfft=xfft)
        value = torch.tensor([0.0, 0.0])
        W_xfft = xfft.shape[-2]
        total_freqs = H_fft * W_xfft

        for iter in range(self.iterations):
            low = 0
            high = total_freqs
            freqs = nprng.permutation(total_freqs)
            while low <= high:
                # What is the percentage of modified frequencies?
                mid = (low + high) // 2
                freqs = freqs[:mid]
                perturbed_xfft = xfft.clone()
                for num_freqs, freq in enumerate(freqs):
                    w = freq % W_xfft
                    h = freq // W_xfft
                    perturbed_xfft[:, :, h, w] = value
                perturbed = get_ifft_hw(xfft=perturbed_xfft,
                                        H_fft=H_fft,
                                        W_fft=W_fft,
                                        H=H,
                                        W=W)
                perturbed = perturbed.detach().cpu().numpy().squeeze()
                if self.is_strict:
                    perturbed = np.clip(perturbed,
                                        a_min=self.args.min,
                                        a_max=self.args.max)
                _, is_adv, _, dist = a.predictions(perturbed,
                                                   return_details=True)
                if is_adv:
                    high = mid - self.resolution
                    if self.is_debug:
                        dist = np.sqrt(dist.value)
                        print(f'iterations: {iter}, '
                              f'number of modified frequencies: {num_freqs}, '
                              f'dist: {dist}')
                else:
                    low = mid + self.resolution
Пример #5
0
    def __call__(self,
                 input_or_adv,
                 label=None,
                 unpack=True,
                 max_frequencies=1000):
        """Perturbs just a single frequency and sets it to the min or max.

        Parameters
        ----------
        input_or_adv : `numpy.ndarray` or :class:`Adversarial`
            The original, correctly classified image. If image is a
            numpy array, label must be passed as well. If image is
            an :class:`Adversarial` instance, label must not be passed.
        label : int
            The reference label of the original image. Must be passed
            if image is a numpy array, must not be passed if image is
            an :class:`Adversarial` instance.
        unpack : bool
            If true, returns the adversarial image, otherwise returns
            the Adversarial object.
        max_pixels : int
            Maximum number of pixels to try.

        """
        a = input_or_adv
        del input_or_adv
        del label
        del unpack

        # Give the axis for the color channel.
        channel_axis = a.channel_axis(batch=False)
        assert channel_axis == 0
        image = a.original_image
        axes = [i for i in range(image.ndim) if i != channel_axis]
        assert len(axes) == 2
        H = image.shape[axes[0]]
        W = image.shape[axes[1]]

        image_torch = torch.from_numpy(image).unsqueeze(
            0)  # we need the batch dim
        is_next_power2 = False
        onesided = True
        xfft, H_fft, W_fft = get_xfft_hw(input=image_torch,
                                         is_next_power2=is_next_power2,
                                         onesided=onesided)
        maxf, minf = get_max_min_complex(xfft=xfft)
        W_xfft = xfft.shape[-2]
        total_freqs = H_fft * W_xfft
        freqs = nprng.permutation(total_freqs)
        # freqs = freqs[:max_frequencies]
        for i, freq in enumerate(freqs):
            w = freq % W_xfft
            h = freq // W_xfft

            location = [h, w]
            # Add the channel dimension.
            location.insert(channel_axis, slice(None))
            # Add the batch dim.
            location.insert(0, slice(None))
            location = tuple(location)

            for value in [minf, maxf]:
                perturbed_xfft = xfft.clone()
                value = check_real_vals(H_fft=H_fft,
                                        W_fft=W_fft,
                                        h=h,
                                        w=w,
                                        value=value)
                perturbed_xfft[location] = value
                perturbed = get_ifft_hw(xfft=perturbed_xfft,
                                        H_fft=H_fft,
                                        W_fft=W_fft,
                                        H=H,
                                        W=W)
                perturbed = perturbed.detach().cpu().numpy().squeeze()
                _, is_adv = a.predictions(perturbed)
                if is_adv:
                    return