Ejemplo n.º 1
0
    def complete_image(self):
        """
        Completes the selected region of the image and updates the visible image to reflect the changes
        """
        image_processor = ImageProcessor()

        # Convert the visible coordinates to actual pixel coordinates
        selection_coordinates = self.canvas.coords(self.selection)
        patch_start_x = int(selection_coordinates[0] // self.ratio)
        patch_start_y = int(selection_coordinates[1] // self.ratio)

        # Get the image components required to generate the patch and insert it back into the original
        g, masked_image, surrounding_region = image_processor.create_image_components(
            self.img, patch_start_x, patch_start_y)

        # Generated the patch
        generated_patch = self.sess.run(self.g_output_patch_only,
                                        feed_dict={
                                            self.g_input: g,
                                            self.surrounding_region:
                                            surrounding_region,
                                            self.training: False
                                        })

        # Store the last generated patch details to allow quick adjustments to the sharpness
        self.last_generated_patch = generated_patch[0]
        self.last_masked_image = masked_image
        self.last_patch_start_x = patch_start_x
        self.last_patch_start_y = patch_start_y

        # Sharpen generated patch and merge back into original
        generated_patch = image_processor.unsharp_mask(
            self.last_generated_patch)
        img = image_processor.merge_patch_with_image(generated_patch,
                                                     masked_image,
                                                     patch_start_x,
                                                     patch_start_y)

        # View the complete image and set state of relevant controls
        self.img = img.astype('uint8')
        img = Image.fromarray(self.img, 'RGB')
        img = self.resize_image(img)
        self.completed_image = ImageTk.PhotoImage(img)
        self.display_image(self.completed_image)

        self.unsharp_mask_slider.config(state=NORMAL)
        self.unsharp_mask_slider.set(50)
        self.selection_visible = False
        self.original_image_visible = False
Ejemplo n.º 2
0
    def generatePatch(self):
        """
        Completes a batch of masked out images

        """

        image_processor = ImageProcessor()

        # Load the network______________________________________________________________________________________________
        #     - g_input: Input to the generator
        #     - g_output_patch_only: Patch generated
        #     - surrounding_region: Region surrounding the masked image to be merged with the generated patch
        #     - training: Whether the model is training or not. When invoking the model, False should be passed in

        network = Network()
        d_input, g_input, g_output, g_output_patch_only, d_optimizer, g_optimizer, surrounding_region, \
            patch_ground_truth, d_cost_fake, d_cost_real, g_cost, training = network.network(batch_size)


        # Create a new TensorFlow session
        sess = tf.InteractiveSession()
        sess.run(tf.global_variables_initializer())


        # Get the paths of all the files within the test dataset location and shuffle the images
        file_paths = np.array(glob.glob(self.test_dataset_location))
        number_of_instances = len(file_paths)
        indexes = np.random.permutation(number_of_instances)
        file_paths = file_paths[indexes]


        # Load learnt model
        mi.load_checkpoint(sess)


        # Iterate through each batch of images
        for i in range(number_of_instances // batch_size):

            # Retrieve batch of training images
            batch_file_paths = file_paths[i * batch_size: i * batch_size + batch_size]
            _, g_batch, image_full, surrounding_region_batch, _ = image_processor.create_batch(batch_file_paths)

            # Generate patches for the batch of images
            generated_patches = sess.run(g_output_patch_only, feed_dict={g_input: g_batch,
                                         surrounding_region: surrounding_region_batch, training: False})

            # Save the completed images. Both the ground truth (1) and images with the generated patch using unsharp
            # intensities of the default 2.5 and 0.4 are saved
            for k in range(0, batch_size):
                img_id = batch_size * i + k

                image_processor.save_image(image_full[k], img_id, 1)

                generated_patch = generated_patches[k]

                sharpened_patch = image_processor.unsharp_mask(generated_patch)
                sharpened_image = image_processor.merge_patch_with_image(sharpened_patch, image_full[k],
                                                                         patch_startX, patch_startY)
                image_processor.save_image(sharpened_image, img_id, 2)

                sharpened_patch = image_processor.unsharp_mask(generated_patch, 0.5)
                sharpened_image = image_processor.merge_patch_with_image(sharpened_patch, image_full[k],
                                                                         patch_startX, patch_startY)
                image_processor.save_image(sharpened_image, img_id, 3)

            print(i * batch_size)