Ejemplo n.º 1
0
    def __init__(self, logger: LogService):

        self.query_tracks = "https://ws.audioscrobbler.com/2.0/?method=user.gettoptracks&user={}&api_key={}&format=json&period={}&limit={}"
        self.query_albums = "https://ws.audioscrobbler.com/2.0/?method=user.gettopalbums&user={}&api_key={}&format=json&period={}&limit={}"
        self.query_artists = "https://ws.audioscrobbler.com/2.0/?method=user.gettopartists&user={}&api_key={}&format=json&period={}&limit={}"

        self.imageProcessor = ImageProcessor()
        self.log = logger
Ejemplo n.º 2
0
 def start_game(self):
     w, h = self.__get_wh()
     if self.file:
         if h > 1 and w > 1:
             self.processor = ImageProcessor(self.file[0], (w, h), self.__get_new_size())
             self.matrix = self.processor.get_image_matrix()
     if self.processor:
         self.game = Game(self.processor.size, self.matrix)
         self.game.show()
Ejemplo n.º 3
0
def tello_main(args):
    """
    Main function used to control your drone using hand.

    :param args:
    :return:
    """
    image_processor = ImageProcessor(
        finish_drawing_sign=args.finish_drawing,
        hand_detector_confidence=args.hand_detection_confidence)
    drone_processor = DroneProcessor(
        max_area_cm=args.max_area,
        min_length_between_points_cm=args.min_length,
        starting_move_up_cm=args.takeoff_offset)

    # Start pinigng tello to prevent it from landing
    drone_processor.start_pinging_tello()

    # Drawing loop
    while True:
        key = cv2.waitKey(1)
        if key & 0xFF == ord("q"):
            # Exit if q pressed
            cv2.destroyAllWindows()
            break

        frame = drone_processor.get_last_frame()
        if frame is not None:
            image_resize_drawed, path_img, finish_drawing, drawing_points = image_processor.process_img(
                frame)
            frame_and_path = cv2.hconcat([image_resize_drawed, path_img])
            if finish_drawing:
                cv2.imshow("frame", frame_and_path)
                break
            cv2.imshow("frame", frame_and_path)

    # Stop pinging, so we can send move commands to the drone
    drone_processor.stop_pinging_tello()
    # Rescale points from range 0-1 to range defined by max_area.
    rescaled_points = drone_processor.rescale_points(drawing_points)
    # Reduce number of points to reproduce
    discrete_path = drone_processor.discrete_path(rescaled_points)
    # Convert point list, to list of differences between previous point in list
    discrete_path_distance = convert_to_distance_in_xy(discrete_path)
    # Reproduce path by the drone
    drone_processor.reproduce_discrete_path_by_drone(discrete_path_distance)
    # Finish drawing
    drone_processor.finish_drawing()

    cv2.destroyAllWindows()
Ejemplo n.º 4
0
    def complete_image(self):
        """
        Completes the selected region of the image and updates the visible image to reflect the changes
        """
        image_processor = ImageProcessor()

        # Convert the visible coordinates to actual pixel coordinates
        selection_coordinates = self.canvas.coords(self.selection)
        patch_start_x = int(selection_coordinates[0] // self.ratio)
        patch_start_y = int(selection_coordinates[1] // self.ratio)

        # Get the image components required to generate the patch and insert it back into the original
        g, masked_image, surrounding_region = image_processor.create_image_components(
            self.img, patch_start_x, patch_start_y)

        # Generated the patch
        generated_patch = self.sess.run(self.g_output_patch_only,
                                        feed_dict={
                                            self.g_input: g,
                                            self.surrounding_region:
                                            surrounding_region,
                                            self.training: False
                                        })

        # Store the last generated patch details to allow quick adjustments to the sharpness
        self.last_generated_patch = generated_patch[0]
        self.last_masked_image = masked_image
        self.last_patch_start_x = patch_start_x
        self.last_patch_start_y = patch_start_y

        # Sharpen generated patch and merge back into original
        generated_patch = image_processor.unsharp_mask(
            self.last_generated_patch)
        img = image_processor.merge_patch_with_image(generated_patch,
                                                     masked_image,
                                                     patch_start_x,
                                                     patch_start_y)

        # View the complete image and set state of relevant controls
        self.img = img.astype('uint8')
        img = Image.fromarray(self.img, 'RGB')
        img = self.resize_image(img)
        self.completed_image = ImageTk.PhotoImage(img)
        self.display_image(self.completed_image)

        self.unsharp_mask_slider.config(state=NORMAL)
        self.unsharp_mask_slider.set(50)
        self.selection_visible = False
        self.original_image_visible = False
Ejemplo n.º 5
0
class Menu(QtWidgets.QMainWindow, design.Ui_MainWindow):
    def __init__(self):
        super().__init__()
        self.setupUi(self)
        self.OpenImageButton.clicked.connect(self.open_image)
        self.StartGameButton.clicked.connect(self.start_game)
        self.processor = None
        
        
    def open_image(self):

        self.file = QtWidgets.QFileDialog.getOpenFileName(self, "Select image")
        
            

    def start_game(self):
        w, h = self.__get_wh()
        if self.file:
            if h > 1 and w > 1:
                self.processor = ImageProcessor(self.file[0], (w, h), self.__get_new_size())
                self.matrix = self.processor.get_image_matrix()
        if self.processor:
            self.game = Game(self.processor.size, self.matrix)
            self.game.show()

    def __get_wh(self):
        return self.spinBox_w.value(), self.spinBox_h.value()

    def __get_new_size(self):  
        if self.newSize.text() and re.fullmatch("[0-9]+x[0-9]+", self.newSize.text()):
            return [int(value) for value in self.newSize.text().split("x")]
        return None;
Ejemplo n.º 6
0
def main_webcam_stream():
    """
    Apply a model on a stream of data coming from a connected cam.
    """
    image_processor = ImageProcessor(option='dlib_68landmarks')
    stream_processor = StreamProcessor(image_processor, index_cam=0)

    stream_processor.run()
Ejemplo n.º 7
0
def development_main(image_source, args):
    """
    Main function used to development using built-in camera or file.

    :param image_source:
    :param args:
    :return:
    """
    if image_source == "built_camera":
        cap = cv2.VideoCapture(args.camera_index)
    else:
        cap = cv2.VideoCapture(args.filepath)

    image_processor = ImageProcessor(
        finish_drawing_sign=args.finish_drawing,
        hand_detector_confidence=args.hand_detection_confidence)

    while cap.isOpened():
        while True:
            key = cv2.waitKey(1)
            if key & 0xFF == ord("q"):
                # Exit if q pressed
                cv2.destroyAllWindows()
                break

            ret, frame = cap.read()
            if ret:
                image_resize_drawed, path_img, finish_drawing, drawing_points = image_processor.process_img(
                    frame)
                frame_and_path = cv2.hconcat([image_resize_drawed, path_img])
                if finish_drawing:
                    cv2.imshow("frame", frame_and_path)
                    key = cv2.waitKey(0)
                    break

                cv2.imshow("frame", frame_and_path)
            else:
                break
        break

    cap.release()
    cv2.destroyAllWindows()
Ejemplo n.º 8
0
def main_flir_image_processor():
    """
    Process an image taken with a Flir One Pro thermal camera.
    The image can be taken either with the official app or a custom app using the Flir's SDK.
    This does not work on videos splitted frame by frame, because the camera doesn't save
    thermal information when recording a video.

    I have left a picture to test out the script.
    """
    input_file = 'test_images/' + 'flir_20190617T163823.jpg'

    fie = FlirImageExtractor()
    fie.process_image(input_file, upsample_thermal=True, transform_rgb=True)

    # fie.plot()

    rgb_image = fie.get_rgb_np()
    rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB)
    thermal_image_3d = fie.img_thermal_rgb
    thermal_image_3d = cv2.cvtColor(thermal_image_3d, cv2.COLOR_BGR2RGB)

    # Creating region contours
    image_processor = ImageProcessor(option='dlib_68landmarks')
    image_processor.process_image(rgb_image)

    cv2.imshow("RGB image with contours", rgb_image)
    # cv2.waitKey(0)

    image_processor.apply_saved_contours(thermal_image_3d)

    cv2.imshow("Thermal image with contours", thermal_image_3d)
    cv2.waitKey(0)

    # thermal_image_raw = fie.get_thermal_np()
    # regions = image_processor.all_regions
    # print('Region\tMean T\tStd T')
    #
    # for region in regions:
    #     mean, std = region.get_mean_std_temperature(thermal_image_raw)
    #     print('{}\t{}\t{}'.format(region.name, mean, std))

    # fie.export_thermal_to_csv('thermals_csv/'+file_name+'_thermal_csv.csv')

    fie.save_images()
Ejemplo n.º 9
0
class Train:
    """
    Train

    Train is responsible for carrying out the training process. This includes:
        - Loading of the dataset
        - Calling methods to carry out any necessary pre-processing steps such as masking the training images
        - Saving and restoring the learnt models
        - Running the specified number of epochs to optimise both the generator and discriminator

    """

    image_processor = ImageProcessor()
    training_dataset_path = root + './datasets/training/*'

    def train(self):
        """
        Trains the network on the requested dataset for a set number of epochs

        """

        # Retrieve the tensors from the network
        network = Network()
        d_input, g_input, g_output, g_output_patch_only, d_optimizer, g_optimizer, surrounding_region, \
            patch_ground_truth, d_cost_fake, d_cost_real, g_cost, training = network.network(batch_size)

        # Create a new TensorFlow session
        sess = tf.InteractiveSession()
        sess.run(tf.global_variables_initializer())
        saver = mi.load_checkpoint(sess)

        # Get the paths of all the files within the training dataset
        file_paths = np.array(glob.glob(self.training_dataset_path))
        number_of_instances = len(file_paths)

        for epoch in range(number_of_epochs):

            # Shuffle images
            file_paths = file_paths[np.random.permutation(number_of_instances)]

            # Iterate through each batch of images
            for i in range(number_of_instances // batch_size):

                # Retrieve batch of training images_____________________________________________________________________

                batch_file_paths = file_paths[i * batch_size:i * batch_size +
                                              batch_size]
                d_batch, g_batch, full_image_batch, surrounding_region_batch, patch_ground_truth_batch = \
                    self.image_processor.create_batch(batch_file_paths)

                # Optimise discriminator and generator__________________________________________________________________
                _ = sess.run(
                    [d_optimizer],
                    feed_dict={
                        g_input: g_batch,
                        surrounding_region: surrounding_region_batch,
                        d_input: d_batch,
                        training: True
                    })
                _ = sess.run(
                    [g_optimizer],
                    feed_dict={
                        g_input: g_batch,
                        surrounding_region: surrounding_region_batch,
                        d_input: d_batch,
                        patch_ground_truth: patch_ground_truth_batch,
                        training: True
                    })

                # Calculate and print error_____________________________________________________________________________
                if i % 10 == 0:
                    d_error_real = d_cost_real.eval({
                        d_input: d_batch,
                        training: True
                    })

                    d_error_fake = d_cost_fake.eval({
                        g_input: g_batch,
                        surrounding_region: surrounding_region_batch,
                        patch_ground_truth: patch_ground_truth_batch,
                        training: True
                    })

                    g_error = g_cost.eval({
                        g_input: g_batch,
                        surrounding_region: surrounding_region_batch,
                        patch_ground_truth: patch_ground_truth_batch,
                        training: True
                    })

                    print(epoch, i, d_error_real, d_error_fake, g_error)

                # Save model____________________________________________________________________________________________

                if i % 1000 == 0:
                    saver.save(sess, model_path + '/model.ckpt')
Ejemplo n.º 10
0
import os
import cv2
from tqdm import tqdm

from image_processing import ImageProcessor, NoFaceFoundException

# TODO Get config by arparse?
folder_path = "/data/Pictures/Selfies_Everyday_Jun_Sep_2019/"
output_dim = 1000
padding = 1.2
video_name = 'video_{}_fps.avi'
fps_options = [1, 5, 10, 15, 25, 30, 59, 60]
file_type = ".jpg"

image_processor = ImageProcessor(output_dim, padding)

# Get all files in the folder sorted by time and filtered by file_type
file_names = sorted(os.listdir(folder_path),
                    key=lambda file_name: os.path.getmtime(
                        os.path.join(folder_path, file_name)))
image_names = [img for img in file_names if img.endswith(file_type)]

video_writters = []

# Prepare all video writers
for fps in fps_options:
    video_writters.append(
        cv2.VideoWriter(video_name.format(fps), 0, fps,
                        (output_dim, output_dim)))

for image_name in tqdm(image_names):
Ejemplo n.º 11
0
class CollageService():
    def __init__(self, logger: LogService):

        self.query_tracks = "https://ws.audioscrobbler.com/2.0/?method=user.gettoptracks&user={}&api_key={}&format=json&period={}&limit={}"
        self.query_albums = "https://ws.audioscrobbler.com/2.0/?method=user.gettopalbums&user={}&api_key={}&format=json&period={}&limit={}"
        self.query_artists = "https://ws.audioscrobbler.com/2.0/?method=user.gettopartists&user={}&api_key={}&format=json&period={}&limit={}"

        self.imageProcessor = ImageProcessor()
        self.log = logger

    async def top_list(self,
                       username: str,
                       period: str,
                       thing: str = "albums",
                       limit: int = 6) -> tuple[BotResponseCode, str]:

        if thing == "albums":
            rqs = [
                grequests.get(
                    self.query_albums.format(username, FM_API_KEY, period,
                                             limit))
            ]
        elif thing == "artists":
            rqs = [
                grequests.get(
                    self.query_artists.format(username, FM_API_KEY, period,
                                              limit))
            ]
        else:
            rqs = [
                grequests.get(
                    self.query_tracks.format(username, FM_API_KEY, period,
                                             limit))
            ]

        responses = grequests.map(rqs)
        res = responses[0].json()

        try:
            if thing == "albums":
                top_albums = [
                    "{} by {} ({} plays)".format(album["name"],
                                                 album["artist"]["name"],
                                                 album["playcount"])
                    for album in res["topalbums"]["album"]
                ][0:limit]
            elif thing == "artists":
                top_albums = [
                    "{} ({} plays)".format(album["name"], album["playcount"])
                    for album in res["topartists"]["artist"]
                ][0:limit]
            else:
                top_albums = [
                    "{} by {} {} ({} plays)".format(
                        album["name"],
                        album["artist"]["name"],
                        duration_helper(album["duration"]),
                        album["playcount"],
                    ) for album in res["toptracks"]["track"]
                ][0:limit]
        except:
            response = "no albums found for user {} :pensive:".format(username)
            return BotResponseCode.ERROR, response

        if len(top_albums) == 0:
            response = "no albums found for user {} :pensive:".format(username)
            return BotResponseCode.ERROR, response

        if username[-1] == "s":
            username = username + "'"
        else:
            username = username + "'s"

        response = "{} top {} are:\n{}".format(username, thing,
                                               "\n".join(top_albums))
        return BotResponseCode.TEXT, response

    async def top_collage(
        self,
        username: str,
        period: str,
        dims: str = "3x3"
    ) -> tuple[BotResponseCode, str] or tuple[BotResponseCode, BytesIO]:

        by_x, by_y = [int(x) for x in dims.split("x")]

        rqs = [
            grequests.get(
                self.query_albums.format(username, FM_API_KEY, period,
                                         by_x * by_y))
        ]
        responses = grequests.map(rqs)
        res = responses[0].json()

        try:
            top_albums = [
                get_meta(album) for album in res["topalbums"]["album"]
            ]
            if len(top_albums) != len(res["topalbums"]["album"]):
                response = "huh i couldn't grab all the images i needed"
                return BotResponseCode.ERROR, response

        except:
            response = "no albums found for user {} :pensive:".format(username)
            return BotResponseCode.ERROR, response

        if len(top_albums) == 0:
            response = "no albums found for user {} :pensive:".format(username)
            return BotResponseCode.ERROR, response

        if by_x * by_y > len(top_albums):
            response = "you don't have enough albums in that period for a {}x{} collage, bucko".format(
                by_x, by_y)
            return BotResponseCode.ERROR, response

        rqs = (grequests.get(album["cover_url"]) for album in top_albums)
        responses = grequests.map(rqs)

        full_data = list(zip(responses, map(lambda a: a["info"], top_albums)))

        image_binary = self.imageProcessor.generate_collage_binary(
            full_data, by_x, by_y)

        return BotResponseCode.IMAGE, image_binary
Ejemplo n.º 12
0
class ImageInpaint:
    """
    ImageInpaint is responsible for providing a front-end user interface, which provides access to the following tasks:
        - Browse and choose an image from their computer
        - Select a region to be complete
        - Invoke the generator to complete the image
        - Save the completed image to their computer
    """

    image_processor = ImageProcessor()
    start_x = 0
    start_y = 0
    ratio = 4
    canvas_max_size = 512
    selection_box_width = patch_width * ratio
    patch_width_absolute = patch_width
    padding = patch_width * ratio

    selection_coordinates = []

    selection_visible = False
    original_image_visible = False
    faces_model_enabled = False

    completed_image = None
    selection = None
    sess = None
    saver = None
    g_input = None
    g_output_patch_only = None
    surrounding_region = None
    training = None
    img = None
    original_image_resized = None
    original_image = None
    image_height = None
    image_width = None
    unsharp_mask_slider = None
    last_generated_patch = None
    last_masked_image = None
    last_patch_start_x = None
    last_patch_start_y = None

    def __init__(self):
        """
        Initialises the window and controls, loads the model, and starts the main loop

        """

        # Create the window and canvas__________________________________________________________________________________

        self.window = tk.Tk()
        self.window.title("Image Inpainter")
        self.window.geometry("800x600")
        self.window.configure(background='white')

        self.canvas = Canvas(self.window,
                             width=1,
                             height=1,
                             borderwidth=0,
                             bd=0,
                             highlightthickness=0,
                             relief='ridge')

        self.canvas.bind("<B1-Motion>", self.mouse_move)
        self.canvas.bind("<Button-1>", self.mouse_down)

        # Add containers to the window which hold the controls and image________________________________________________

        controls_row_1 = Frame(self.window)
        controls_row_2 = Frame(self.window)
        image_holder = Frame(self.window)

        controls_row_1.pack(side=TOP)
        controls_row_2.pack(side=TOP, pady=10)
        image_holder.pack(side=BOTTOM, pady=10)

        # Add controls__________________________________________________________________________________________________

        button_width = 10

        self.open_image_button = Button(self.window,
                                        text="Open",
                                        command=self.open_image,
                                        width=button_width)

        self.selection_button = Button(self.window,
                                       text="Select",
                                       command=self.toggle_selection,
                                       width=button_width,
                                       state=DISABLED)

        self.complete_button = Button(self.window,
                                      text="Complete",
                                      command=self.complete_image,
                                      width=button_width,
                                      state=DISABLED)

        self.save_button = Button(self.window,
                                  text="Save",
                                  command=self.save_image,
                                  width=button_width,
                                  state=DISABLED)

        self.toggle_original_button = Button(
            self.window,
            text="Ground Truth",
            command=self.toggle_original_image,
            width=button_width,
            state=DISABLED)

        self.switch_model_button = Button(self.window,
                                          text="Faces Model",
                                          command=self.switch_model,
                                          width=button_width)

        self.unsharp_mask_slider = Scale(self.window,
                                         from_=0,
                                         to_=100,
                                         orient=HORIZONTAL,
                                         bg='white',
                                         bd=1,
                                         troughcolor='white',
                                         activebackground='#e7e7e7',
                                         length=150,
                                         width=10,
                                         command=self.unsharp_mask,
                                         showvalue=0,
                                         state=DISABLED)

        self.unsharp_mask_label = Label(self.window, text="Unsharp Mask: -%")

        self.open_image_button.pack(in_=controls_row_1, side=LEFT)
        self.save_button.pack(in_=controls_row_1, side=LEFT)
        self.selection_button.pack(in_=controls_row_1, side=LEFT)
        self.complete_button.pack(in_=controls_row_1, side=LEFT)
        self.toggle_original_button.pack(in_=controls_row_1, side=LEFT)
        self.switch_model_button.pack(in_=controls_row_1, side=LEFT)

        self.unsharp_mask_label.pack(in_=controls_row_2, side=LEFT, padx=2)
        self.unsharp_mask_slider.pack(in_=controls_row_2, side=LEFT, padx=5)

        # Load model and start main loop________________________________________________________________________________
        self.setup_network()
        self.load_model(model_path)
        self.window.mainloop()

    def switch_model(self):
        """
        Switches between the generic model and the one trained on just faces
        """
        self.faces_model_enabled = not self.faces_model_enabled

        if self.faces_model_enabled:
            self.load_model(faces_model_path)
            self.switch_model_button.config(text="Generic Model")
        else:
            self.load_model(model_path)
            self.switch_model_button.config(text="Faces Model")

    def unsharp_mask(self, strength):
        """
        Sharpens the generated patch
        :param
            strength:   Controls the amount the image is sharpened by. The greater the value, the more sharp it becomes.
                        A value of 0 makes no change to the image

        """
        strength = int(strength)
        self.unsharp_mask_label.config(
            text="Unsharp Mask: {:d}%".format(strength))

        if self.last_generated_patch is None:
            return

        strength /= 20

        img = self.image_processor.unsharp_mask(self.last_generated_patch,
                                                strength)

        img = self.image_processor.merge_patch_with_image(
            img, self.last_masked_image, self.last_patch_start_x,
            self.last_patch_start_y)

        self.img = img.astype('uint8')
        img = Image.fromarray(self.img, 'RGB')
        img = self.resize_image(img)
        self.completed_image = ImageTk.PhotoImage(img)
        self.display_image(self.completed_image)

    def open_image(self):
        """
        Displays the browse file dialog and, upon opening an image, displays the image and enables the remaining buttons
        """

        # Present a file dialog window, allowing only jpg and png files to be selected
        file = filedialog.askopenfile(parent=self.window,
                                      mode='rb',
                                      title='Select Image',
                                      filetypes=[('Jpeg', '*.jpeg'),
                                                 ('Jpg', '*.jpg'),
                                                 ('png', '*.png')])

        self.unsharp_mask_slider.config(state=DISABLED)

        if file is not None:

            img = Image.open(file)
            if img.mode != 'RGB':
                img = img.convert('RGB')

            # This copy is used by the network to complete the image
            self.img = np.array(img)
            # self.img copy will be wrote over when a user makes a modification, allowing multiple changes to a
            # a single image. self.original_image remains the same for the entire time the image is still loaded into
            # the program
            self.original_image = np.array(img)

            # Resize the image to visually fit the visible canvas
            img = self.resize_image(img)

            # Convert to a tkinter image and display it in the window
            self.completed_image = ImageTk.PhotoImage(img)
            self.original_image_resized = self.completed_image
            self.display_image(self.completed_image)

            self.original_image_visible = True
            self.selection_visible = False
            self.enable_buttons()

    def enable_buttons(self):
        """
        When the program is first launched, no image is loaded, therefore the following buttons are initially disabled.
        Upon importing an image, this function should be called to enable them
        """
        self.selection_button.config(state=NORMAL)
        self.complete_button.config(state=NORMAL)
        self.toggle_original_button.config(state=NORMAL)
        self.save_button.config(state=NORMAL)

    def save_image(self):
        """
        Presents the user with a save dialog allowing them to save their modified image
        """
        file = filedialog.asksaveasfile(mode='wb', defaultextension=".png")
        # Check a file has successfully been opened, and whether the user has the original image displayed or the
        # generated one and save the relevant one
        if file:
            if self.original_image_visible:
                Image.fromarray(self.original_image).save(file)
            else:
                Image.fromarray(self.img).save(file)

    def toggle_original_image(self):
        """
        Switch between displaying the original image or the one being modified
        """

        self.selection_visible = False
        self.canvas.itemconfig(self.selection, state='hidden')

        if self.original_image_visible:
            self.display_image(self.completed_image)
            self.original_image_visible = False
            self.toggle_original_button.config(text="Completed")
        else:
            self.display_image(self.original_image_resized)
            self.original_image_visible = True
            self.toggle_original_button.config(text="Ground Truth")

    def toggle_selection(self):
        """
        Toggle the visibility of the selection box
        """
        if self.selection_visible:
            self.canvas.itemconfig(self.selection, state='hidden')
            self.selection_visible = False
        else:
            self.canvas.itemconfig(self.selection, state='normal')
            self.selection_visible = True

    def display_image(self, img):
        """
        Displays the image in the window
        :argument
            img:    Image to be displayed in the window
        """

        self.canvas.config(width=img.width(), height=img.height())

        self.canvas.delete("all")
        self.canvas.create_image(0, 0, image=img, anchor="nw")

        self.canvas.pack()
        self.add_selection_box()

    def add_selection_box(self):
        """
        Draws a selection box which the user can move around the image to choose the region they wish to complete
        """
        self.selection = self.canvas.create_rectangle(
            self.image_width // 2 - self.selection_box_width // 2,
            self.image_height // 2 - self.selection_box_width // 2,
            self.image_width // 2 + self.selection_box_width // 2,
            self.image_height // 2 + self.selection_box_width // 2,
            fill='black',
            width=2,
            state='hidden')

    def setup_network(self):
        """
        Setup the network tensors
            - g_input: Input to the generator
            - g_output_patch_only: Patch generated
            - surrounding_region: Region surrounding the masked image to be merged with the generated patch
            - training: Whether the model is training or not. When invoking the model, False should be passed in
        """
        network = Network()
        d_input, g_input, g_output, g_output_patch_only, d_optimizer, g_optimizer, surrounding_region, \
            patch_ground_truth, d_cost_fake, d_cost_real, g_cost, training = network.network()

        # Create a new TensorFlow session
        self.sess = tf.InteractiveSession()
        self.sess.run(tf.global_variables_initializer())
        self.saver = tf.train.Saver(max_to_keep=1)

        self.g_input = g_input
        self.g_output_patch_only = g_output_patch_only
        self.surrounding_region = surrounding_region
        self.training = training

    def load_model(self, current_model_path):
        """
        Load the learnt model
        :param
            current_model_path:     Path to the learnt model

        """
        self.open_image_button.config(state=DISABLED)
        self.complete_button.config(state=DISABLED)

        # If the model is not successfully restored, disable the browse button to prevent attempts to invoke the network
        checkpoint = tf.train.get_checkpoint_state(
            os.path.dirname(current_model_path))
        if checkpoint and checkpoint.model_checkpoint_path:
            self.saver.restore(self.sess, checkpoint.model_checkpoint_path)
            self.open_image_button.config(state=NORMAL)
            self.complete_button.config(state=NORMAL)
            print("Model Restored")
        else:
            print("WARNING: Model not restored")

    def complete_image(self):
        """
        Completes the selected region of the image and updates the visible image to reflect the changes
        """
        image_processor = ImageProcessor()

        # Convert the visible coordinates to actual pixel coordinates
        selection_coordinates = self.canvas.coords(self.selection)
        patch_start_x = int(selection_coordinates[0] // self.ratio)
        patch_start_y = int(selection_coordinates[1] // self.ratio)

        # Get the image components required to generate the patch and insert it back into the original
        g, masked_image, surrounding_region = image_processor.create_image_components(
            self.img, patch_start_x, patch_start_y)

        # Generated the patch
        generated_patch = self.sess.run(self.g_output_patch_only,
                                        feed_dict={
                                            self.g_input: g,
                                            self.surrounding_region:
                                            surrounding_region,
                                            self.training: False
                                        })

        # Store the last generated patch details to allow quick adjustments to the sharpness
        self.last_generated_patch = generated_patch[0]
        self.last_masked_image = masked_image
        self.last_patch_start_x = patch_start_x
        self.last_patch_start_y = patch_start_y

        # Sharpen generated patch and merge back into original
        generated_patch = image_processor.unsharp_mask(
            self.last_generated_patch)
        img = image_processor.merge_patch_with_image(generated_patch,
                                                     masked_image,
                                                     patch_start_x,
                                                     patch_start_y)

        # View the complete image and set state of relevant controls
        self.img = img.astype('uint8')
        img = Image.fromarray(self.img, 'RGB')
        img = self.resize_image(img)
        self.completed_image = ImageTk.PhotoImage(img)
        self.display_image(self.completed_image)

        self.unsharp_mask_slider.config(state=NORMAL)
        self.unsharp_mask_slider.set(50)
        self.selection_visible = False
        self.original_image_visible = False

    def resize_image(self, img):
        """
        Resize the image to be displayed to the user. NOTE: This does not resize the image being completed or saved, but
        is rather just to fill the visible window
        :argument
            img:    Image to be resized
        """

        # Check which dimension is the maximum and fill the window along that dimension. Need to calculate the ratio if
        # a non-square image is loaded and resize the smaller side appropriately
        image_height = float(img.height)
        image_width = float(img.width)
        max_dimen = max(image_height, image_width)

        if max_dimen == image_height:
            ratio = float(self.canvas_max_size) / image_height
            image_height = float(self.canvas_max_size)
            image_width *= ratio
        else:
            ratio = float(self.canvas_max_size) / image_width
            image_width = float(self.canvas_max_size)
            image_height *= ratio

        # Store the ratio of the original image with respect to the size of the visible canvas and adjust the size of
        # the selection box
        self.ratio = ratio
        self.selection_box_width = self.patch_width_absolute * ratio
        self.padding = patch_width * ratio

        self.image_height = int(image_height)
        self.image_width = int(image_width)

        return img.resize((self.image_width, self.image_height))

    def mouse_down(self, event):
        """
        On mouse down, store the position of the current selection and the point where the user has clicked.
        This is required to calculate the new position by mouse_move
        :argument
            event:  Contains information about the mouse event such as location

        """
        self.selection_coordinates = self.canvas.coords(self.selection)
        self.start_x = event.x
        self.start_y = event.y

    def mouse_move(self, event):
        """
        Move the selection box to the current mouse position on drag.
        The conditional checks are required to ensure the selection box does not go out of bounds, which is 16 pixels
        from any edge since this is a requirement of the neural network itself
        :argument
            event:  Contains information about the mouse event such as location

        """

        diff_x = self.start_x - event.x
        diff_y = self.start_y - event.y

        # Calculate the new locations for the four corners of the selection box
        start_x_new = self.selection_coordinates[0] - diff_x
        start_y_new = self.selection_coordinates[1] - diff_y
        end_x_new = self.selection_coordinates[2] - diff_x
        end_y_new = self.selection_coordinates[3] - diff_y

        # Ensure that the selection box does not leave the bounds of the image. This should leave a margin of pixels
        # surrounding the patch which the network uses to complete the masked out region
        if start_x_new < self.padding:
            start_x_new = self.padding
            end_x_new = start_x_new + self.selection_box_width

        if start_y_new < self.selection_box_width:
            start_y_new = self.padding
            end_y_new = start_y_new + self.selection_box_width

        if end_x_new > self.image_width - self.padding:
            start_x_new = self.image_width - self.selection_box_width - self.padding
            end_x_new = start_x_new + self.selection_box_width

        if end_y_new > self.image_height - self.padding:
            start_y_new = self.image_height - self.selection_box_width - self.padding
            end_y_new = start_y_new + self.selection_box_width

        self.canvas.coords(self.selection, start_x_new, start_y_new, end_x_new,
                           end_y_new)
Ejemplo n.º 13
0
    def generatePatch(self):
        """
        Completes a batch of masked out images

        """

        image_processor = ImageProcessor()

        # Load the network______________________________________________________________________________________________
        #     - g_input: Input to the generator
        #     - g_output_patch_only: Patch generated
        #     - surrounding_region: Region surrounding the masked image to be merged with the generated patch
        #     - training: Whether the model is training or not. When invoking the model, False should be passed in

        network = Network()
        d_input, g_input, g_output, g_output_patch_only, d_optimizer, g_optimizer, surrounding_region, \
            patch_ground_truth, d_cost_fake, d_cost_real, g_cost, training = network.network(batch_size)


        # Create a new TensorFlow session
        sess = tf.InteractiveSession()
        sess.run(tf.global_variables_initializer())


        # Get the paths of all the files within the test dataset location and shuffle the images
        file_paths = np.array(glob.glob(self.test_dataset_location))
        number_of_instances = len(file_paths)
        indexes = np.random.permutation(number_of_instances)
        file_paths = file_paths[indexes]


        # Load learnt model
        mi.load_checkpoint(sess)


        # Iterate through each batch of images
        for i in range(number_of_instances // batch_size):

            # Retrieve batch of training images
            batch_file_paths = file_paths[i * batch_size: i * batch_size + batch_size]
            _, g_batch, image_full, surrounding_region_batch, _ = image_processor.create_batch(batch_file_paths)

            # Generate patches for the batch of images
            generated_patches = sess.run(g_output_patch_only, feed_dict={g_input: g_batch,
                                         surrounding_region: surrounding_region_batch, training: False})

            # Save the completed images. Both the ground truth (1) and images with the generated patch using unsharp
            # intensities of the default 2.5 and 0.4 are saved
            for k in range(0, batch_size):
                img_id = batch_size * i + k

                image_processor.save_image(image_full[k], img_id, 1)

                generated_patch = generated_patches[k]

                sharpened_patch = image_processor.unsharp_mask(generated_patch)
                sharpened_image = image_processor.merge_patch_with_image(sharpened_patch, image_full[k],
                                                                         patch_startX, patch_startY)
                image_processor.save_image(sharpened_image, img_id, 2)

                sharpened_patch = image_processor.unsharp_mask(generated_patch, 0.5)
                sharpened_image = image_processor.merge_patch_with_image(sharpened_patch, image_full[k],
                                                                         patch_startX, patch_startY)
                image_processor.save_image(sharpened_image, img_id, 3)

            print(i * batch_size)