def _forward_pass(self, file_path):
     lr_img = imageio.imread(file_path)
     if lr_img.shape[2] == 3:
         lr_img = process_array(lr_img)
         sr_img = self.model.model.predict(lr_img)
         sr_img = process_output(sr_img)
         return sr_img
     else:
         self.logger.error('{} is not an image with 3 channels.'.format(file_path))
Example #2
0
    def predict(self, input_image_array, by_patch_of_size=None, batch_size=10, padding_size=2):
        """
        Processes the image array into a suitable format
        and transforms the network output in a suitable image format.

        Args:
            input_image_array: input image array.
            by_patch_of_size: for large image inference. Splits the image into
                patches of the given size.
            padding_size: for large image inference. Padding between the patches.
                Increase the value if there is seamlines.
            batch_size: for large image inferce. Number of patches processed at a time.
                Keep low and increase by_patch_of_size instead.
        Returns:
            sr_img: image output.
        """

        if by_patch_of_size:
            lr_img = process_array(input_image_array, expand=False)
            patches, p_shape = split_image_into_overlapping_patches(
                lr_img, patch_size=by_patch_of_size, padding_size=padding_size
            )
            # return patches
            mag_num = int(np.log2(self.scale))
            collect={}

            for i in range(0, len(patches), batch_size):
                batch = self.model.predict(patches[i: i + batch_size])
                if i == 0:
                    for j in range(mag_num):
                        collect[j] = batch[j]
                else:
                    for j in range(mag_num):
                        collect[j] = np.append(collect[j], batch[j], axis=0)
            scale=2
            sr_img={}
            for i in range(mag_num):
                padded_size_scaled = tuple(np.multiply(p_shape[0:2], int(np.exp2(i+1)))) + (3,)
                scaled_image_shape = tuple(np.multiply(input_image_array.shape[0:2], int(np.exp2(i+1)))) + (3,)
                sr_img[i] = stich_together(
                    collect[i],
                    padded_image_shape=padded_size_scaled,
                    target_shape=scaled_image_shape,
                    padding_size=padding_size * int(np.exp2(i+1))
                )

        else:
            lr_img = process_array(input_image_array)
            sr_img = self.model.predict(lr_img)[0]

        for i in range(mag_num):
            sr_img[i] = process_output(sr_img[i])
        return sr_img
Example #3
0
    def predict(self, input_image_array):
        """
        Processes the image array into a suitable format
        and transforms the network output in a suitable image format.

        Args:
            input_image_array: input image array.
        Returns:
            sr_img: image output.
        """
        lr_img = process_array(input_image_array)
        sr_img = self.model.predict(lr_img)
        sr_img = process_output(sr_img)
        return sr_img
Example #4
0
    def _predict(model,
                 input_image_array,
                 by_patch_of_size=None,
                 batch_size=10,
                 padding_size=2):
        """
        Processes the image array into a suitable format
        and transforms the network output in a suitable image format.

        Args:
            input_image_array: input image array.
            by_patch_of_size: for large image inference. Splits the image into
                patches of the given size.
            padding_size: for large image inference. Padding between the patches.
                Increase the value if there is seamlines.
            batch_size: for large image inferce. Number of patches processed at a time.
                Keep low and increase by_patch_of_size instead.
        Returns:
            sr_img: image output.
        """

        if by_patch_of_size:
            self.logger.info("Patches of size {}".format(by_patch_of_size))
            lr_img = process_array(input_image_array, expand=False)
            patches, p_shape = split_image_into_overlapping_patches(
                lr_img, patch_size=by_patch_of_size, padding_size=padding_size)
            # return patches
            self.logger.info("No. patches: {}".format(len(patches)))
            for i in range(0, len(patches), batch_size):
                self.logger.info("Processing patch: {}/{}".format(
                    i, len(patches)))
                batch = model.predict(patches[i:i + batch_size])
                if i == 0:
                    collect = batch
                else:
                    collect = np.append(collect, batch, axis=0)

                scale = model.scale
                padded_size_scaled = tuple(np.multiply(p_shape[0:2],
                                                       scale)) + (3, )
                scaled_image_shape = tuple(
                    np.multiply(input_image_array.shape[0:2], scale)) + (3, )
                sr_img = stich_together(
                    collect,
                    padded_image_shape=padded_size_scaled,
                    target_shape=scaled_image_shape,
                    padding_size=padding_size * scale,
                )
                sr_img
                process_output(sr_img)

            scale = model.scale
            padded_size_scaled = tuple(np.multiply(p_shape[0:2],
                                                   scale)) + (3, )
            scaled_image_shape = tuple(
                np.multiply(input_image_array.shape[0:2], scale)) + (3, )
            sr_img = stich_together(
                collect,
                padded_image_shape=padded_size_scaled,
                target_shape=scaled_image_shape,
                padding_size=padding_size * scale,
            )

        else:
            lr_img = process_array(input_image_array)
            sr_img = model.predict(lr_img)[0]

        sr_img = process_output(sr_img)
        return sr_img