def get_image_and_mask(dataset_root, image_reference, mask_path=None):
    """
    Given a reference to an image (it's name or path), return its path
    :param image_reference: name of an imge belonging to one of the datasets, or a path orf an external image
    :return: path of the image
    """

    # image reference and mask_path are both provided they have to be direct paths
    if mask_path is not None and Path(mask_path).exists():

        if image_reference is None or not Path(image_reference).exists():
            raise Exception(
                "Provided a valid mask path but an invalid image path")

        return Picture(image_reference), Picture(
            mask_2_binary(Picture(mask_path)), mask_path)

    # select the first dataset having an image with the corresponding name
    dataset = find_dataset_of_image(dataset_root, image_reference)
    if not dataset:
        raise ImageNotFoundError(image_reference)

    dataset = dataset(dataset_root)

    image_path = dataset.get_image(image_reference)
    mask, mask_path = dataset.get_mask_of_image(image_reference)

    # load the image as a 3 dimensional numpy array
    image = Picture(image_path)

    # load mask as a picture
    mask = Picture(mask, mask_path)

    return image, mask
 def attacked_image(self):
     """
     Compute the attacked image using the original image and the cumulative noise to reduce
     rounding artifacts caused by translating the noise from one to 3 channels and vie versa multiple times,
     still this operation here is done once so some rounding error is still present.
     Use attacked_image_monochannel to get the one channel version of the image withoud rounding errors
     :return:
     """
     return Picture((self.target_image - Picture(self.noise)).clip(0, 255))
コード例 #3
0
    def read_arguments(dataset_root) -> dict:
        """
        Read arguments from the command line or ask for them if they are not present, validate them raising
        an exception if they are invalid, it is called by the launcher script
        :param args: args dictionary containing the arguments passed while launching the program
        :return: kwargs to pass to the attack
        """
        kwarg = BaseNoiseprintAttack.read_arguments(dataset_root)

        parser = argparse.ArgumentParser()
        parser.add_argument(
            '--source_image',
            required=True,
            help='Name of the image to use as source for the noiseprint')
        args = parser.parse_known_args()[0]

        image_path = args.source_image

        try:
            image, mask = get_image_and_mask(dataset_root, image_path)
        except ImageNotFoundError:
            # the image is not present in the dataset, look if a direct reference has been given
            image_path = Path(image_path)

            if image_path.exists():
                image = Picture(str(image_path))
                mask = np.where(np.all(image == (255, 255, 255), axis=-1), 0,
                                1)
            else:
                raise

        kwarg["source_image"] = image
        kwarg["source_image_mask"] = mask
        return kwarg
コード例 #4
0
 def attacked_image_monochannel(self):
     """
     Compute the attacked image using the original image and the compulative noise to reduce
     rounding artifacts caused by translating the nosie from one to 3 channels and vie versa multiple times
     :return:
     """
     return Picture(
         (self.target_image.one_channel() - self.noise).clip(0, 255))
コード例 #5
0
    def attack(self, image_to_attack: Picture, *args, **kwargs):
        """
        Perform step of the attack executing the following steps:
            (1) -> prepare the image to be used by noiseprint
            (2) -> compute the gradient
            (3) -> normalize the gradient
            (4) -> apply the gradient to the image with the desired strength
            (5) -> return the image
        :return: attacked image
        """

        # compute the attacked image using the original image and the compulative noise to reduce
        # rounding artifacts caused by translating the noise from one to 3 channels and vice versa multiple times
        image_one_channel = Picture(
            (image_to_attack.one_channel() - self.noise).clip(0, 255))

        return super(BaseNoiseprintAttack,
                     self).attack(image_one_channel, args, kwargs)
    def _on_after_attack(self, attacked_image: Picture):
        """
        Instructions executed after performing the attack
        :return:
        """

        psnr = PSNR(self.target_image,np.array(attacked_image,np.int))

        path = os.path.join(self.debug_folder,"attacked_image.png")
        attacked_image.save(path)

        attacked_image = Picture(path=path)

        self.detector.prediction_pipeline(attacked_image, os.path.join(self.debug_folder, "final result"),original_picture=self.target_image,omask=self.target_image_mask,note="final result PSNR:{:.2f}".format(psnr))

        end_time = datetime.now()
        timedelta = end_time - self.start_time

        self.write_to_logs("Attack pipeline terminated in {}".format(timedelta))
コード例 #7
0
    def _compute_target_representation(
            self, target_representation_source_image: Picture,
            target_representation_source_image_mask: Picture):
        """
            This type of attack tries to "paste" noiseprint generated fon an authentic image on top of a
            forged one. The target representation is simply the noiseprint of the authentic image
        """
        image = prepare_image_noiseprint(target_representation_source_image)

        # generate an image wise noiseprint representation on the entire image
        original_noiseprint = Picture(self._engine.predict(image))
        return original_noiseprint
    def attack(self, image_to_attack: Picture, *args, **kwargs):
        qf = self.initial_quality_level  - int((self.initial_quality_level - self.final_quality_level) * self.progress_proportion)
        self.write_to_logs("quality level: {}".format(qf))

        path = os.path.join(self.debug_folder, 'compressed_image.jpg')
        img = Image.fromarray(np.array(self.target_image, np.uint8))
        img.save(path, quality=qf)

        if isinstance(self.detector,NoiseprintVisualizer):
            self.detector.load_quality(max(51,min(101,qf)))

        return Picture(path=path)
    def predict(self, image: Picture, path=None):

        image_one_channel = image.one_channel().to_float()

        heatmap, mask = self._engine.detect(image_one_channel)

        plt.imshow(mask)

        if path:
            plt.savefig(path)
            plt.close()
        else:
            return plt
    def attack(self, image_to_attack: Picture, *args, **kwargs):
        """
        Add noise to the image and return the result
        :param image_to_attack: image on which to perform the attack
        :param args: none
        :param kwargs: none
        :return: image + gaussian noise
        """
        mean = self.initial_mean + (
            self.final_mean - self.initial_mean) * self.progress_proportion
        standard_deviation = self.initial_standard_deviation + (
            self.final_standard_deviation -
            self.initial_standard_deviation) * self.progress_proportion

        self.write_to_logs("Mean:{:.2f}, Standard deviation:{:.2f}".format(
            mean, standard_deviation))

        noise = np.random.normal(mean,
                                 standard_deviation,
                                 size=image_to_attack.shape)
        return Picture((image_to_attack + noise).clip(0, 255))
コード例 #11
0
    def _compute_target_representation(
            self, target_representation_source_image: Picture,
            target_representation_source_image_mask: Picture):
        """
        Compute the target representation, being this a mimicking attack the target representation is just
        a list of target feature vectors towards which we have to drive the individual patches of our image
        :param target_representation_source_image:  image on which to compute the target feature vectors
        :param target_representation_source_image_mask: mask of the image
        :return: list of 4096-dimensional feature vectors
        """
        target_representation = []

        patches = Picture(
            target_representation_source_image).divide_in_patches(
                (128, 128), force_shape=False, stride=self.stride)

        with self._sess.as_default():
            for batch_idx in tqdm(
                    range(0, (len(patches) + self.batch_size - 1) //
                          self.batch_size, 1)):
                starting_idx = self.batch_size * batch_idx

                batch_patches = patches[
                    starting_idx:min(starting_idx +
                                     self.batch_size, len(patches))]

                for i, patch in enumerate(batch_patches):
                    batch_patches[i] = prepare_image(patch)

                patch = np.array(batch_patches)
                tensor_patch = tf.convert_to_tensor(patch, dtype=tf.float32)

                features = self._engine.extract_features_resnet50(
                    tensor_patch, "test", reuse=True).eval()

                target_representation = target_representation + [
                    np.array(t) for t in features.tolist()
                ]

            return target_representation
    def attack(self, image_to_attack: Picture, *args, **kwargs):
        """
        Perform step of the attack executing the following steps:
            (2) -> compute the gradient
            (3) -> normalize the gradient
            (4) -> apply the gradient to the image with the desired strength
            (5) -> return the image
        :return: attacked image
        """

        # apply Nesterov momentum
        image_to_attack = np.array(image_to_attack,
                                   dtype=float) - self.moving_avg_gradient

        # compute the gradient
        image_gradient, loss = self._get_gradient_of_image(
            image_to_attack, self.target_representation, Picture(self.noise))

        # save loss value to plot it
        self.loss_steps.append(loss)

        # compute the decaying alpha
        alpha = self.alpha / (1 + 0.05 * self.step_counter)

        # normalize the gradient
        image_gradient = normalize_gradient(image_gradient, 0) * alpha

        # update the moving average
        self.moving_avg_gradient = self.moving_avg_gradient * self.momentum_coeficient + (
            1 - self.momentum_coeficient) * image_gradient

        # add this iteration contribution to the cumulative noise
        self.noise += self.moving_avg_gradient / (
            1 - self.momentum_coeficient**(1 + self.step_counter))

        return self.attacked_image
    def _get_gradient_of_image(self,
                               image: Picture,
                               target: Picture,
                               old_perturbation: Picture = None):
        """
        Compute the gradient on the entire imabe by executing the following steps:
            1) Divide the entire image into patches
            2) Compute the gradient of each patch with respect to the patch-tirget representation
            3) Recombine all the patch-gradients to obtain a image wide gradient
            4) Apply the image-gradient to the image
            5) Convert then the image to the range of values of integers [0,255] and convert it back to the range
               [0,1]
        :return: gradient, loss
        """

        # variable to store the cumulative loss across all patches
        cumulative_loss = 0

        # image wide gradient
        image_gradient = np.zeros((image.shape[0:2]))

        # divide the image into patches
        img_patches = image.divide_in_patches(self.patch_size,
                                              self.padding_size,
                                              zero_padding=True)

        # analyze the image patch by patch
        for patch in tqdm(img_patches):

            # check if we are on a border and therefore we have to "cut"tareget representation
            target_patch_representation = target

            # if we are on a border, cut away the "overflowing target representation"
            if target_patch_representation.shape != patch.shape:
                target_patch_representation = target_patch_representation[:
                                                                          patch
                                                                          .
                                                                          shape[
                                                                              0], :
                                                                          patch
                                                                          .
                                                                          shape[
                                                                              1]]

            # compute the gradient of the input w.r.t. the target representation
            patch_gradient, patch_loss = self._get_gradient_of_patch(
                patch, target_patch_representation)

            # check that the retrieved gradient has the correct shape
            assert (patch_gradient.shape == patch.shape)

            # add this patch's loss contribution
            cumulative_loss += patch_loss

            # remove padding from the gradient
            patch_gradient = patch.no_paddings(patch_gradient)

            # Add the contribution of this patch to the image wide gradient removing the padding
            image_gradient = patch.add_to_image(image_gradient, patch_gradient)

        return image_gradient, cumulative_loss
 def compute_difference(self, original_image, image, enhance_factor=100):
     return Picture(1 - np.abs(original_image - image) * enhance_factor).clip(0, 1).one_channel()
    def prediction_pipeline(self,
                            image: Picture,
                            path=None,
                            original_picture=None,
                            note="",
                            omask=None,
                            debug=False,
                            adversarial_noise=None):

        if image.max() > 1:
            image = image.to_float()

        image_one_channel = image
        if len(image_one_channel.shape
               ) > 2 and image_one_channel.shape[2] == 3:
            image_one_channel = image.one_channel()

        if original_picture is not None:
            original_picture = prepare_image_noiseprint(original_picture)

        n_cols = 4

        if original_picture is not None:
            n_cols += 1
        else:
            debug = False

        if debug:
            fig, axs = plt.subplots(2, n_cols, figsize=(n_cols * 5, 5))

            axs0, axs1 = axs[0], axs[1]
        else:
            fig, axs0 = plt.subplots(1, n_cols, figsize=(n_cols * 5, 5))

        noiseprint = self.get_noiseprint(image_one_channel)

        heatmap = self._engine.detect(image_one_channel)

        #this is the first computation, compute the best f1 threshold initially

        axs0[0].imshow(image)
        axs0[0].set_title('Image')

        axs0[1].imshow(normalize_noiseprint(noiseprint),
                       clim=[0, 1],
                       cmap='gray')
        axs0[1].set_title('Noiseprint')

        axs0[2].imshow(heatmap,
                       clim=[np.nanmin(heatmap),
                             np.nanmax(heatmap)],
                       cmap='jet')
        axs0[2].set_title('Heatmap')

        if omask is not None:
            threshold = find_best_theshold(heatmap, omask)
            mask = np.array(heatmap > threshold, int).clip(0, 1)

            axs0[3].imshow(mask, clim=[0, 1], cmap='gray')
            axs0[3].set_title('Mask')

        # remove the x and y ticks
        for ax in axs0:
            ax.set_xticks([])
            ax.set_yticks([])

        if original_picture is not None:
            noise = self.compute_difference(original_picture.one_channel(),
                                            image_one_channel)
            axs0[4].imshow(noise, clim=[0, 1], cmap='gray')
            axs0[4].set_title('Difference')

        if debug:
            original_noiseprint = self._engine.predict(
                original_picture.one_channel())
            axs1[0].imshow(original_picture)
            axs1[0].set_title('Original Image')

            axs1[1].imshow(normalize_noiseprint(original_noiseprint),
                           clim=[0, 1],
                           cmap='gray')
            axs1[1].set_title('Original Noiseprint')

            axs1[2].imshow(omask, clim=[0, 1], cmap='gray')
            axs1[2].set_title('Original Mask')

            noise = self.compute_difference(noiseprint,
                                            original_noiseprint,
                                            enhance_factor=100)
            axs1[3].imshow(noise, cmap='gray')
            axs1[3].set_title('Noiseprint differences')

            axs1[4].imshow(np.where(np.abs(adversarial_noise) > 0, 1, 0),
                           clim=[0, 1],
                           cmap='gray')
            axs1[4].set_title('Gradient')

            # remove the x and y ticks
            for ax in axs1:
                ax.set_xticks([])
                ax.set_yticks([])

        if note:
            fig.text(0.9,
                     0.2,
                     note,
                     size=14,
                     horizontalalignment='right',
                     verticalalignment='top')

        if path:
            plt.savefig(path, bbox_inches='tight')
            plt.close()
        else:
            return plt
コード例 #16
0
    def _get_gradient_of_image(self,
                               image: Picture,
                               target: list,
                               old_perturbation: Picture = None):
        """
        Function to compute the gradient of the exif model on the entire image
        :param image: image for which we want to compute the gradient
        :param target: target representation we are trying to approximate, in this case it is a list of feature vectors
        :param old_perturbation: old perturbation that has been already applied to the image
        :return: numpy array containing the gradient, loss
        """

        # create object to store the combined gradient of all the patches
        gradient_map = np.zeros(image.shape)

        # create object to store how many patches have contributed to the gradient of a signle pixel
        count_map = np.zeros(image.shape)

        # divide the image into patches of (128,128) pixels
        patches = Picture(image).divide_in_patches((128, 128),
                                                   force_shape=False,
                                                   stride=self.stride)

        # verify that the number of patches and corresponding target feature vectors is the same
        assert (len(patches) == len(target))

        # object to store the cumulative loss between all the patches of all the batches
        loss = 0

        # iterate through the batches to process
        for batch_idx in tqdm(
                range(0,
                      (len(patches) + self.batch_size - 1) // self.batch_size,
                      1)):

            # compute the index of the first element in the batch
            starting_idx = self.batch_size * batch_idx

            # get a list of all the elements in the batch
            batch_patches = patches[
                starting_idx:min(starting_idx + self.batch_size, len(patches))]

            # prepare all the patches to be feeded into the model
            batch_patches_ready = [
                prepare_image(patch) for patch in batch_patches
            ]

            # get corresponding list of target patches
            target_batch_patches = target[
                starting_idx:min(starting_idx + self.batch_size, len(patches))]

            # prepare patches and target vectors to be fed into the model
            x_tensor = np.array(batch_patches_ready, dtype=np.float32)
            y_tensor = np.array(target_batch_patches, dtype=np.float32)

            batch_gradients, batch_loss = self._sess.run(
                [self.gradient_op, self.loss_op],
                feed_dict={
                    self.x: x_tensor,
                    self.y: y_tensor
                })

            # add the batch loss to the cumulative loss
            loss += batch_loss

            # construct an image wide gradient map by combining the gradients computed on sing epatches
            for i, patch_gradient in enumerate(list(batch_gradients[0])):
                gradient_map = batch_patches[i].add_to_image(
                    gradient_map, patch_gradient)
                count_map = batch_patches[i].add_to_image(
                    count_map, np.ones(patch_gradient.shape))

        gradient_map[np.isnan(gradient_map)] = 0

        # average the patches contribution foreach pixel in the gradient
        gradient_map = gradient_map / count_map

        return gradient_map, loss
    def _get_gradient_of_image(self,
                               image: Picture,
                               target: Picture,
                               old_perturbation: Picture = None):
        """
        Perform step of the attack executing the following steps:

            1) Divide the entire image into patches
            2) Compute the gradient of each patch with respect to the patch-tirget representation
            3) Recombine all the patch-gradients to obtain a image wide gradient
            4) Apply the image-gradient to the image
        :return: image_gradient, cumulative_loss
        """

        assert (len(image.shape) == 2)

        pad_size = ((self.padding_size[0], self.padding_size[2]),
                    (self.padding_size[3], self.padding_size[1]))

        image = image.pad(pad_size, mode="reflect")
        target = target.pad(pad_size, mode="reflect")

        if old_perturbation is not None:
            old_perturbation = old_perturbation.pad(pad_size, "reflect")

        # variable to store the cumulative loss across all patches
        cumulative_loss = 0

        # image wide gradient
        image_gradient = np.zeros(image.shape)

        if image.shape[0] * image.shape[1] < NoiseprintEngine.large_limit:
            # the image can be processed as a single patch
            image_gradient, cumulative_loss = self._get_gradient_of_patch(
                image, target)

        else:
            # the image is too big, we have to divide it in patches to process separately
            # iterate over x and y, strides = self.slide, window size = self.slide+2*self.overlap
            for x in range(0, image.shape[0], self._engine.slide):
                x_start = x - self._engine.overlap
                x_end = x + self._engine.slide + self._engine.overlap
                for y in range(0, image.shape[1], self._engine.slide):
                    y_start = y - self._engine.overlap
                    y_end = y + self._engine.slide + self._engine.overlap

                    # get the patch we are currently working on
                    patch = image[max(x_start, 0):min(x_end, image.shape[0]),
                                  max(y_start, 0):min(y_end, image.shape[1])]

                    # get the desired target representation for this patch
                    target_patch = target[
                        max(x_start, 0):min(x_end, image.shape[0]),
                        max(y_start, 0):min(y_end, image.shape[1])]

                    perturbation_patch = None
                    if old_perturbation is not None:
                        perturbation_patch = old_perturbation[
                            max(x_start, 0):min(x_end, image.shape[0]),
                            max(y_start, 0):min(y_end, image.shape[1])]

                    patch_gradient, patch_loss = self._get_gradient_of_patch(
                        patch, target_patch)

                    # discard initial overlap if not the row or first column
                    if x > 0:
                        patch_gradient = patch_gradient[
                            self._engine.overlap:, :]
                    if y > 0:
                        patch_gradient = patch_gradient[:,
                                                        self._engine.overlap:]

                    # add this patch loss to the total loss
                    cumulative_loss += patch_loss

                    # add this patch's gradient to the image gradient
                    # discard data beyond image size
                    patch_gradient = patch_gradient[:min(
                        self._engine.slide, patch.shape[0]
                    ), :min(self._engine.slide, patch.shape[1])]

                    # copy data to output buffer
                    image_gradient[
                        x:min(x + self._engine.slide, image_gradient.shape[0]),
                        y:min(y + self._engine.slide, image_gradient.shape[1]
                              )] = patch_gradient

        if self.padding_size[0] > 0:
            image_gradient = image_gradient[self.padding_size[0]:, :]

        if self.padding_size[1] > 0:
            image_gradient = image_gradient[:, self.padding_size[1]]

        if self.padding_size[2] > 0:
            image_gradient = image_gradient[:-self.padding_size[2], :]

        if self.padding_size[3] > 0:
            image_gradient = image_gradient[:, :-self.padding_size[3]]

        return image_gradient, cumulative_loss
    def _compute_target_representation(
            self, target_representation_source_image: Picture,
            target_representation_source_image_mask: Picture):
        """
        Generate the target representation executing the following steps:

            1) Generate an image wise noiseprint representation on the entire image
            2) Divide this noiseprint map into patches
            3) Average these patches
            4) Create an image wide target representation by tiling these patches together

        :return: the target representation in the shape of a numpy array
        """

        # conver the image in the standard required by noiseprint
        image = prepare_image_noiseprint(target_representation_source_image)

        # generate an image wise noiseprint representation on the entire image
        original_noiseprint = Picture(self._engine.predict(image))

        # cut away section along borders
        original_noiseprint[0:self.patch_size[0], :] = 0
        original_noiseprint[-self.patch_size[0]:, :] = 0
        original_noiseprint[:, 0:self.patch_size[1]] = 0
        original_noiseprint[:, -self.patch_size[1]:] = 0

        # exctract the authentic patches from the image
        authentic_patches = original_noiseprint.get_authentic_patches(
            target_representation_source_image_mask,
            self.patch_size,
            force_shape=True,
            zero_padding=False)

        # create target patch object
        target_patch = np.zeros(self.patch_size)

        patches_map = np.zeros(image.shape)

        for patch in tqdm(authentic_patches):
            assert (patch.clean_shape == target_patch.shape)

            target_patch += patch / len(authentic_patches)

            patches_map = patch.no_paddings().add_to_image(patches_map)

        # compute the tiling factors along the X and Y axis
        repeat_factors = (ceil(image.shape[0] / target_patch.shape[0]),
                          ceil(image.shape[1] / target_patch.shape[1]))

        # tile the target representations together
        image_target_representation = np.tile(target_patch, repeat_factors)

        # cut away "overflowing" margins
        image_target_representation = image_target_representation[:image.shape[
            0], :image.shape[1]]

        # save tile visualization
        visuallize_matrix_values(
            target_patch,
            os.path.join(self.debug_folder, "image-target-raw.png"))

        patches_map = Picture(normalize_noiseprint(patches_map))
        patches_map.save(os.path.join(self.debug_folder, "patches-map.png"))

        return Picture(image_target_representation)
    cumulative_f1 = 0
    cumulative_mcc = 0

    for image_name in tqdm(images):

        dataset = find_dataset_of_image(DATASETS_ROOT, image_name)
        if not dataset:
            raise InvalidArgumentException(
                "Impossible to find the dataset this image belongs to")

        dataset = dataset(DATASETS_ROOT)

        image_path = dataset.get_image(image_name)
        mask, _ = dataset.get_mask_of_image(image_name)

        image = Picture(image_path)

        image = (image + np.random.normal(
            0, standard_deviation * 0.5, size=image.shape)).clip(0, 255)

        heatmap, predicted_mask = engine.detect(image.to_float())

        predicted_mask = np.rint(predicted_mask)

        cv2.imwrite(
            os.path.join(root_folder_level,
                         '{}.png'.format(basename(image_name))),
            predicted_mask * 255)

        cumulative_mcc += matthews_corrcoef(mask.flatten(),
                                            predicted_mask.flatten())
    def _compute_target_representation(
            self, target_representation_source_image: Picture,
            target_representation_source_image_mask: Picture):
        """
        Generate the target representation executing the following steps:

            1) Divide the image into patches
            2) Select only the authentic patches
            3) Foreach patch compute its noiseptint
            4) Average all the noiseprint maps

        :return: the target representation in the shape of a numpy array
        """

        # format the target image in the standard that the noiseprint requires
        target_representation_source_image = prepare_image_noiseprint(
            target_representation_source_image)

        # spit the image into patches
        authentic_patches = target_representation_source_image.get_authentic_patches(
            target_representation_source_image_mask,
            self.patch_size,
            self.padding_size,
            force_shape=True,
            zero_padding=True)

        complete_patch_size = (self.patch_size[0] + self.padding_size[1] +
                               self.padding_size[3], self.patch_size[1] +
                               self.padding_size[0] + self.padding_size[2])

        # create target patch object
        target_patch = np.zeros(complete_patch_size)

        # create a map in which to store the used patches for visualization
        patches_map = np.zeros(target_representation_source_image.shape)

        # generate authentic target representation
        self.write_to_logs("Generating target representation...")

        # foreach authentic patch
        for original_patch in tqdm(authentic_patches):
            assert (original_patch.shape == target_patch.shape)

            # compute its noiseprint
            noiseprint_patch = np.squeeze(
                self._engine.model(original_patch[np.newaxis, :, :,
                                                  np.newaxis]))

            # add the noiseprint to the mean target patch object
            target_patch += noiseprint_patch / len(authentic_patches)

            # add the result to the map of patches
            patches_map = original_patch.no_paddings().add_to_image(
                patches_map)

        self.write_to_logs("Target representation generated")

        t_no_padding = authentic_patches[0].no_paddings(target_patch)

        # save a visualization of the target representation
        normalized_noiseprint = normalize_noiseprint_no_margins(t_no_padding)

        plt.imsave(fname=os.path.join(self.debug_folder, "image-target.png"),
                   arr=normalized_noiseprint,
                   cmap='gray',
                   format='png')

        visuallize_matrix_values(
            t_no_padding,
            os.path.join(self.debug_folder, "image-target-raw.png"))

        patches_map = Picture(patches_map)
        patches_map.save(os.path.join(self.debug_folder, "patches-map.png"))

        # save target representation t in a 8x8 grid for visualization purposes
        if t_no_padding.shape[0] % 8 == 0 and t_no_padding.shape[1] % 8 == 0:

            patch_8 = np.zeros((8, 8))
            n_patches8 = (t_no_padding.shape[0] //
                          8) * (t_no_padding.shape[1] // 8)
            for x in range(0, t_no_padding.shape[0], 8):
                for y in range(0, t_no_padding.shape[1], 8):
                    patch_8 += t_no_padding[x:x + 8, y:y + 8] / n_patches8

            visuallize_matrix_values(
                patch_8,
                os.path.join(self.debug_folder, "clean_target_patch.png"))

        return target_patch
コード例 #21
0
    "normal-42.png",
    "splicing-93.png",
    "splicing-86.png",
]

source_image_path = "./Data/custom/splicing-70-artificial.png"

images_to_attack_bb = ["splicing-28.png", "splicing-29.png", "splicing-30.png"]

if __name__ == "__main__":

    debug_folder = os.path.join(create_debug_folder(DEBUG_ROOT))
    mimicking_debug_folder = os.path.join(debug_folder, "mimiking attack")
    os.makedirs(mimicking_debug_folder)

    source_image = Picture(str(source_image_path))
    source_image_mask = Picture(
        np.where(np.all(source_image == (255, 255, 255), axis=-1), 0, 1))

    noise = np.zeros(source_image_mask.shape)

    for image_path in images_for_geneating_noise:

        image, mask = get_image_and_mask(DATASETS_ROOT, image_path)

        attack = NoiseprintMimickingAttack(image,
                                           mask,
                                           source_image,
                                           source_image_mask,
                                           50,
                                           5,