Beispiel #1
0
    def insert(self, rectangle_list: list, frame_id: int, motion_estimate: np.ndarray = None):
        """Inserts all rectangles contained in the list  passed as first parameter into the global map representation.

        :param rectangle_list: List of detected rectangles to be inserted into the global module map.
        :param frame_id: Frame id of the current image frame associated to the detected rectangles.
        :param motion_estimate: Numpy array representing the motion estimate between the last frame (ID-1) and the frame containing the rectangles.
        """

        Logger.debug("Inserting a new rectangle list into the module map at frame {}".format(frame_id))
        # When no information about the motion estimate is given, assume no motion.
        if motion_estimate is None:
            motion_estimate = np.array([0.0, 0.0])

        # In case there are no rectangles in the global map (first step in the simulation) store all the ones passed to
        # the function as if they were new modules.
        if len(self.global_module_map) == 0:
            for rectangle in rectangle_list:
                # Give a new ID to each rectangle.
                next_ID = ID.next_id()
                self.global_module_map[next_ID] = self.__ModuleInMap(next_ID, rectangle, frame_id)
        else:
            # Correspondences between the rectangles passed to the function and the ones already stored in the global map.
            # If no correspondence is found (e.g. a rectangle is new), then set the correspondence to None.
            correspondences = []
            for rectangle in rectangle_list:
                rectangle_center = np.mean(rectangle, axis=0)
                # Shift the rectangle center using the motion estimate in order to align with the previous frame.
                rectangle_center -= motion_estimate

                # Compute the ID of the rectangle in the global map which is most similar to the current rectangle.
                most_similar_ID = self.__find_most_similar_module(rectangle, area_threshold_ratio=0.5)

                if most_similar_ID is None:
                    correspondences.append(None)
                else:
                    # If the query rectangle's center is inside the nearest rectangle, set it as a correspondence.
                    closest_rectangle = self.global_module_map[most_similar_ID].last_rectangle
                    if rectangle_contains(closest_rectangle, rectangle_center):
                        correspondences.append(most_similar_ID)
                    else:
                        correspondences.append(None)

            # Update the current module map representation by considering the correspondences determined above.
            for rectangle_index, correspondence in enumerate(correspondences):
                # If there was no correspondence, add the rectangle as a new module in the global map.
                if correspondence is None:
                    next_ID = ID.next_id()
                    self.global_module_map[next_ID] = self.__ModuleInMap(next_ID, rectangle_list[rectangle_index],
                                                                         frame_id)
                else:
                    # Update the correspondet module in the map with the newest coordinates.
                    self.global_module_map[correspondence].add(rectangle_list[rectangle_index], frame_id)

            # Update the rectangles in the global map with the motion estimate.
            for _, rectangle_in_map in self.global_module_map.items():
                rectangle_in_map.add_motion(frame_id, motion_estimate)

        # All modules which have not been detected for more than a fixed timespan, are shifted to a database which
        # stores them but will not update their coordinates if an association is found.
        self.__store_old_modules(frame_id)
    def __load_video_from_file(self):
        open_directory = ""
        if self.last_folder_opened is not None:
            open_directory = self.last_folder_opened
        video_file_name, _ = QtWidgets.QFileDialog.getOpenFileName(
            caption="Select a video",
            filter="Videos (*.mov *.mp4 *.avi)",
            directory=open_directory)
        Logger.debug("Selected video path: <{}>".format(video_file_name))
        if video_file_name == "":
            return
        self.last_folder_opened = os.path.dirname(video_file_name)

        self.thermo_thread.input_file_name = video_file_name

        self.is_stoppable = True
        self.setWindowTitle("Thermography: {}".format(video_file_name))

        start_frame = self.video_from_index.value()
        end_frame = self.video_to_index.value()
        if end_frame == -1:
            end_frame = None

        Logger.debug("Start frame: {}, end frame: {}".format(
            start_frame, end_frame))
        self.thermo_thread.load_video(start_frame=start_frame,
                                      end_frame=end_frame)

        self.global_progress_bar.setMinimum(0)
        self.global_progress_bar.setMaximum(
            len(self.thermo_thread.app.frames) - 1)

        self.thermo_thread.iteration_signal.connect(
            self.__update_global_progress_bar)
Beispiel #3
0
    def __init__(self):
        """Initializes the GUI and connects its widgets to the corresponding functions."""
        super(self.__class__, self).__init__()
        Logger.info("Creating dataset creation GUI")
        self.setupUi(self)
        self.__set_logo_icon()

        self.last_folder_opened = None
        self.frames = []
        self.last_frame_image = None
        self.current_frame_id = 0
        self.current_module_id_in_frame = 0
        self.current_frame_modules = []
        self.discarded_modules = {}
        self.accepted_modules = {}
        self.misdetected_modules = {}

        self.module_counter = {
            "automatic": {
                "accepted": 0,
                "discarded": 0,
                "misdetected": 0
            },
            "manual": {
                "accepted": 0,
                "discarded": 0,
                "misdetected": 0
            }
        }

        self.thermo_thread = None

        self.__connect_widgets()
 def __set_webcam_port(self, port):
     Logger.debug("Setting webcam port {}".format(port))
     self.webcam_port = port
     self.thermo_thread.use_webcam(self.webcam_port)
     self.is_stoppable = False
     self.setWindowTitle("Thermography: Webcam")
     self.__play_all_frames()
Beispiel #5
0
    def __load_video_from_file(self):
        """Initializes a :class:`.VideoLoaderThread` thread and runs it to load a video from file."""
        open_directory = ""
        if self.last_folder_opened is not None:
            open_directory = self.last_folder_opened
        video_file_name, _ = QtWidgets.QFileDialog.getOpenFileName(
            caption="Select a video",
            filter="Videos (*.mov *.mp4 *.avi)",
            directory=open_directory)
        Logger.debug("Selected video path: <{}>".format(video_file_name))

        if video_file_name == "":
            return

        self.last_folder_opened = os.path.dirname(video_file_name)
        self.setWindowTitle("Thermography: {}".format(video_file_name))

        start_frame = self.video_from_index.value()
        end_frame = self.video_to_index.value()
        if end_frame == -1:
            end_frame = None

        Logger.debug("Start frame: {}, end frame: {}".format(
            start_frame, end_frame))

        video_loader_thread = VideoLoaderThread(video_path=video_file_name,
                                                from_index=start_frame,
                                                to_index=end_frame,
                                                parent=self)
        video_loader_thread.start()
        video_loader_thread.finish_signal.connect(self.__video_loader_finished)
Beispiel #6
0
    def __load_default_paths(self):
        # Load camera parameters.
        settings_dir = tg.settings.get_settings_dir()

        self.camera_param_file_name = os.path.join(settings_dir, "camera_parameters.json")
        Logger.debug("Using default camera param file: {}".format(self.camera_param_file_name))
        tg.settings.set_data_dir("/Users/paolo/thermography/")
Beispiel #7
0
    def __init__(self, working_modules: dict, broken_modules: dict, misdetected_modules: dict, parent=None):
        """Initializes the dialog with the files to save to disk.

        :param working_modules: Dictionary of working modules.
        :param broken_modules: Dictionary of broken modules.
        :param misdetected_modules: Dictionary of misdetected modules.
        :param parent: Parent window of this dialog.
        """
        super(self.__class__, self).__init__(parent=parent)

        Logger.debug("Opened 'Save Images' dialog")

        self.setupUi(self)
        self.__set_logo_icon()

        self.working_modules = working_modules
        self.broken_modules = broken_modules
        self.misdetected_modules = misdetected_modules

        self.output_directory = " "

        self.choose_directory_button.clicked.connect(self.__open_directory_dialog)
        self.save_button.clicked.connect(self.__save_module_dataset)
        self.progress_bar_all_frames.setMinimum(0)
        self.progress_bar_all_frames.setMaximum(
            len(self.working_modules.keys()) + len(self.broken_modules.keys()) + len(
                self.misdetected_modules.keys()) - 1)
 def __pause_all_frames(self):
     Logger.debug("Pausing all frames")
     self.thermo_thread.is_paused = True
     self.play_video_button.setEnabled(True)
     if self.is_stoppable:
         self.stop_video_button.setEnabled(True)
     self.pause_video_button.setEnabled(False)
Beispiel #9
0
    def __init__(self, oasi_settings_file: str = None):
        if oasi_settings_file is None:
            oasi_settings_file = os.path.join(pm.Settings.resources_dir,
                                              "oasi_settings.json")
        self.settings_file = oasi_settings_file
        Logger.debug("Loading Oasi settings from {}".format(self.settings_file))

        self.__parse_settings_file()
Beispiel #10
0
    def use_webcam(self, webcam_port: int):
        """If this image is called, then the :class:`~thermography.thermo_app.ThermoApp` encapsulated by this thread
        uses the webcam as input instead of loading a video from disk."""

        Logger.debug("Thermo thread uses webcam port {}".format(webcam_port))
        self.webcam_port = webcam_port
        self.cap = cv2.VideoCapture(self.webcam_port)
        self.should_use_webcam = True
Beispiel #11
0
    def __load_default_paths(self):
        # Load camera parameters.
        settings_dir = tg.settings.get_settings_dir()

        self.camera_param_file_name = os.path.join(settings_dir, "camera_parameters.json")
        tg.settings.set_data_dir("Z:/SE/SEI/Servizi Civili/Del Don Carlo/termografia/")
        self.input_file_name = os.path.join(tg.settings.get_data_dir(), "Ispez Termografica Ghidoni 1.mov")
        Logger.debug("Using default camera param file: {}\n"
                     "Default input file name: {}".format(self.camera_param_file_name, self.input_file_name))
Beispiel #12
0
    def __init__(self, input_video_path, camera_param_file):
        """Initializes the :class:`ThermoApp` instance by defining default parameters.

        :param input_video_path: Absolute path to the input video.
        :param camera_param_file: Parameter file of the camera.
        """
        Logger.debug("Starting thermo app")
        self.input_video_path = input_video_path
        self.camera_param_file = camera_param_file

        # Camera object containing the corresponding parameters.
        self.camera = None

        # Object responsible for loading the video passed as parameter.
        self.video_loader = None

        # Global module map with associated motion detector.
        self.module_map = ModuleMap()
        self.motion_detector = MotionDetector(scaling=0.15)

        # Parameters used for image classification.
        self.image_shape = np.array([96, 120, 1])
        self.num_classes = 3
        checkpoint_dir = os.path.join(get_resources_dir(), "weights")
        self.inference = Inference(checkpoint_dir=checkpoint_dir,
                                   model_class=ThermoNet3x3,
                                   image_shape=self.image_shape,
                                   num_classes=self.num_classes)

        # Objects referring to the items computed during the last frame.
        self.last_input_frame = None
        self.last_preprocessed_image = None
        self.last_attention_image = None
        self.last_scaled_frame_rgb = None
        self.last_scaled_frame = None
        self.last_edges_frame = None
        self.last_raw_intersections = None
        self.last_intersections = None
        self.last_segments = None
        self.last_cluster_list = None
        self.last_rectangles = None
        self.last_mean_motion = None
        self.last_frame_id = 0
        self.last_probabilities = {}

        # Runtime parameters for detection.
        self.should_undistort_image = True
        self.preprocessing_parameters = PreprocessingParams()
        self.edge_detection_parameters = EdgeDetectorParams()
        self.segment_detection_parameters = SegmentDetectorParams()
        self.segment_clustering_parameters = SegmentClustererParams()
        self.cluster_cleaning_parameters = ClusterCleaningParams()
        self.intersection_detection_parameters = IntersectionDetectorParams()
        self.rectangle_detection_parameters = RectangleDetectorParams()

        # Load the camera and module parameters.
        self.__load_params()
Beispiel #13
0
    def __load_default_paths(self):
        # Load camera parameters.
        settings_dir = tg.settings.get_settings_dir()

        self.camera_param_file_name = os.path.join(settings_dir, "camera_parameters.json")
        tg.settings.set_data_dir("/Users/paolo/thermography/")
        self.input_file_name = os.path.join(tg.settings.get_data_dir(), "Ispez Termografica Ghidoni 1.mov")
        Logger.debug("Using default camera param file: {}\n"
                     "Default input file name: {}".format(self.camera_param_file_name, self.input_file_name))
Beispiel #14
0
    def __load_default_paths(self):
        # Load camera parameters.
        settings_dir = tg.settings.get_settings_dir()

        self.camera_param_file_name = os.path.join(settings_dir,
                                                   "camera_parameters.json")
        Logger.debug("Using default camera param file: {}".format(
            self.camera_param_file_name))
        tg.settings.set_data_dir(
            "Z:/SE/SEI/Servizi Civili/Del Don Carlo/termografia/")
Beispiel #15
0
    def __init__(self, image_path: str, mode: Modality = Modality.DEFAULT):
        """Initializes and loads the image associated to the file indicated by the path passed as argument.

        :param image_path: Absolute path to the image file to be loaded.
        :param mode: Modality to be used when loading the image.
        """
        Logger.debug("Loading image at {}".format(image_path))
        self.image_path = image_path
        self.mode = mode
        self.image_raw = cv2.imread(self.image_path, self.mode)
 def __set_logo_icon(self):
     """Sets the default logo icon."""
     gui_path = os.path.join(
         os.path.join(tg.settings.get_thermography_root_dir(), os.pardir),
         "gui")
     logo_path = os.path.join(gui_path, "img/logo.png")
     Logger.debug("Setting logo {}".format(logo_path))
     icon = QtGui.QIcon()
     icon.addPixmap(QtGui.QPixmap(logo_path), QtGui.QIcon.Normal,
                    QtGui.QIcon.Off)
     self.setWindowIcon(icon)
Beispiel #17
0
 def __connect_thermo_thread(self):
     """Connects the signals emitted by the
     :class:`~gui.threads.thermo_thread_dataset_creation.ThermoDatasetCreationThread` to the functions which must be
     executed when receiving those signals.
     """
     Logger.debug("Connecting thermo thread")
     self.thermo_thread.last_frame_signal.connect(
         lambda x: self.__store_last_frame_image(x))
     self.thermo_thread.module_list_signal.connect(
         lambda x: self.__display_all_modules(x))
     Logger.debug("Thermo thread connected")
Beispiel #18
0
    def __init__(self, camera_path: str):
        """Loads the camera parameters into the object.

        :param camera_path: Absolute path to the camera file parameter.
        """

        self.camera_path = camera_path

        with open(self.camera_path) as param_file:
            self.camera_params = json.load(param_file)

        Logger.debug("Camera parameter file is: \n{}".format(str(self)))
Beispiel #19
0
 def camera_path(self, path: str):
     if not os.path.exists(path):
         Logger.fatal("Camera config file {} not found".format(
             self.camera_path))
         raise FileNotFoundError("Camera config file {} not found".format(
             self.camera_path))
     if not path.endswith("json"):
         Logger.fatal("Can only parse '.json' files")
         raise ValueError(
             "Can only parse '.json' files, passed camera file is {}".
             format(path))
     self.__camera_path = path
    def __play_all_frames(self):
        Logger.debug("Playing all frames")
        self.thermo_thread.is_paused = False
        self.image_scaling_slider.setEnabled(False)
        self.__update_image_scaling()

        self.image_scaling_label.setText("Input image scaling: {:0.2f}".format(
            self.thermo_thread.app.preprocessing_parameters.image_scaling))
        self.play_video_button.setEnabled(False)
        self.pause_video_button.setEnabled(True)
        if self.is_stoppable:
            self.stop_video_button.setEnabled(True)
        self.thermo_thread.start()
Beispiel #21
0
    def __init__(self):
        """Initializes the ThermoThread for dataset creation.
        """
        super(self.__class__, self).__init__()
        Logger.info("Created dataset creation ThermoThread")
        self.camera_param_file_name = None

        self.__load_default_paths()

        self.app = tg.App(input_video_path=None, camera_param_file=self.camera_param_file_name)

        self.processing_frame = None
        self.processing_frame_id = None
 def detect(self) -> None:
     """Detects the rectangles from the input intersections.
     """
     Logger.debug("Detecting rectangles")
     # Iterate over each pair of clusters.
     num_clusters = int((np.sqrt(8 * len(self.intersections) + 1) + 1) / 2)
     for cluster_index_i in range(num_clusters):
         for cluster_index_j in range(cluster_index_i + 1, num_clusters):
             if (cluster_index_i, cluster_index_j) in self.intersections:
                 Logger.debug(
                     "Detecting rectangles between cluster {} and cluster {}"
                     .format(cluster_index_i, cluster_index_j))
                 self.__detect_rectangles_between_clusters(
                     cluster_index_i, cluster_index_j)
Beispiel #23
0
    def run(self):
        """Function executed when this thread is launched.

        This function chooses whether to process the video loaded from disk or to use the images taken as input from
        the webcam.
        The sequence of images is processed by the encapsulated :class:`~thermography.thermo_app.ThermoApp`."""
        if self.should_use_webcam:
            frame_id = 0
            while True:
                while self.is_paused:
                    self.msleep(self.pause_time)

                ret, frame = self.cap.read()
                if ret:
                    Logger.debug("Using webcam frame {}".format(frame_id))
                    self.app.step(frame_id, frame)

                    self.last_frame_signal.emit(self.app.last_scaled_frame_rgb)
                    self.edge_frame_signal.emit(self.app.last_edges_frame)
                    self.segment_frame_signal.emit(self.app.create_segment_image())
                    self.rectangle_frame_signal.emit(self.app.create_rectangle_image())
                    self.module_map_frame_signal.emit(self.app.create_module_map_image())
                    frame_id += 1

                    self.app.reset()
        else:
            for frame_id, frame in enumerate(self.app.frames):
                while self.is_paused:
                    self.msleep(self.pause_time)

                Logger.debug("Using video frame {}".format(frame_id))
                # Perform one step in the input video (i.e. analyze one frame)
                self.app.step(frame_id, frame)
                # Perform inference (classification on the detected modules)
                self.app.classify_detected_modules()

                self.last_frame_signal.emit(self.app.last_scaled_frame_rgb)
                self.attention_frame_signal.emit(self.app.last_attention_image)
                self.edge_frame_signal.emit(self.app.last_edges_frame)
                self.segment_frame_signal.emit(self.app.create_segment_image())
                self.rectangle_frame_signal.emit(self.app.create_rectangle_image())
                self.module_map_frame_signal.emit(self.app.create_module_map_image())
                self.classes_frame_signal.emit(self.app.create_classes_image())
                self.iteration_signal.emit(frame_id)
                self.module_list_signal.emit(self.app.create_module_list())

                self.app.reset()

        self.finish_signal.emit(True)
Beispiel #24
0
    def classify(self, image_list: list) -> np.ndarray:
        """Classifies the image list passed as argument using the model loaded in :attr:`self.model`.

        :param image_list: Python list of numpy arrays representing the images to be classified. All images are classified as a mini-batch.
        :return: A numpy array of shape `[len(image_list), self.num_classes]` containing the class probability for each image passed as argument.

        .. note:: If the images contained in the input parameter are not of the same shape as the one store in :attr:`self.image_shape`, the input images are resized to fit the desired image shape.
        """
        if len(image_list) == 0:
            return np.empty(shape=[0])

        img_tensor = []
        for img in image_list:
            if (img.shape[0:2] != self.image_shape[0:2]).any():
                shape = img.shape
                img = img.astype(np.float32)
                Logger.warning(
                    "Image is of size {}, should be {},  resizing".format(
                        shape, self.image_shape))
                img = cv2.resize(img,
                                 (self.image_shape[1], self.image_shape[0]),
                                 interpolation=cv2.INTER_AREA)
            if img.shape[2] != self.image_shape[2]:
                if self.image_shape[2] == 1:
                    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                    img = img
                elif self.image_shape[2] == 3:
                    img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)

            img_mean = np.mean(img, axis=(0, 1))
            img_std = np.std(img, axis=(0, 1))
            img = (img - img_mean) / img_std

            img_tensor.append(img)

        img_tensor = np.array(img_tensor)

        if len(img_tensor.shape) == 3:
            img_tensor = img_tensor[..., np.newaxis]

        Logger.debug("Classifying {} module image{}".format(
            img_tensor.shape[0], "" if img_tensor.shape[0] == 1 else "s"))

        class_probabilities = self.sess.run(self.probabilities,
                                            feed_dict={
                                                self.x: img_tensor,
                                                self.keep_probability: 1.0
                                            })
        return class_probabilities
Beispiel #25
0
    def __video_loader_finished(self, frame_list: list):
        """Function called when the :class:`.VideoLoaderThread` thread finishes its execution.

        :param frame_list: Python list containing the video frames loaded by the video laoder thread.
        """
        Logger.debug("Video loader finished")
        self.frames = frame_list.copy()
        self.global_progress_bar.setMinimum(0)
        self.global_progress_bar.setMaximum(len(self.frames) - 1)
        Logger.debug("Loaded {} frames".format(len(self.frames)))

        self.play_video_button.setEnabled(True)
        self.module_working_button.setEnabled(True)
        self.module_broken_button.setEnabled(True)
        self.misdetection_button.setEnabled(True)
Beispiel #26
0
    def __init__(self, parent=None):
        super(self.__class__, self).__init__(parent=parent)
        Logger.debug("Opened About dialog")
        self.setWindowTitle("Thermography - About")
        self.setTextFormat(QtCore.Qt.RichText)
        gui_directory = os.path.join(
            os.path.join(tg.settings.get_thermography_root_dir(), os.pardir),
            "gui")
        rich_text_path = os.path.join(gui_directory,
                                      "about/about_rich_text.html")
        logo_path = os.path.join(gui_directory, "img/logo_small.png")
        with open(rich_text_path, 'r') as rich_text_file:
            text = rich_text_file.read()
            self.setText(text.format(logo_path=logo_path))

        self.__set_logo_icon()
Beispiel #27
0
            def save_modules_into_directory(module_dict: dict, directory: str):
                global overall_iter

                os.mkdir(os.path.abspath(directory))
                for module_number, (module_id, registered_modules) in enumerate(module_dict.items()):
                    Logger.debug("Saving all views of module ID {}: view {}/{}".format(module_id, module_number,
                                                                                       len(module_dict.keys()) - 1))
                    self.progress_bar_all_frames.setValue(self.progress_bar_all_frames.value() + 1)
                    self.progress_bar_intra_frame.setValue(0)
                    self.progress_bar_intra_frame.setMaximum(len(registered_modules))
                    for m_index, m in enumerate(registered_modules):
                        name = "id_{0:05d}_frame_{1:05d}.jpg".format(module_id, m["frame_id"])
                        path = os.path.join(directory, name)
                        img = cv2.cvtColor(m["image"], cv2.COLOR_RGB2BGR)
                        cv2.imwrite(path, img)
                        self.progress_bar_intra_frame.setValue(m_index + 1)
    def __init__(self):
        """Initializes the GUI and connects its widgets to the corresponding functions."""
        super(self.__class__, self).__init__()
        Logger.info("Creating themoGUI")
        self.setupUi(self)
        self.__set_logo_icon()
        self.thermo_thread = ThermoGuiThread()
        self.is_stoppable = True

        self.last_folder_opened = None

        self.__connect_widgets()
        self.__connect_thermo_thread()

        self.capture = None
        self.webcam_port = None
Beispiel #29
0
    def detect(self):
        """Detects the intersections between the segments passed to the constructor using the parameters passed to the
        constructor.

        .. note:: The intersections are only computed between segments belonging to different clusters, and never between segments of the same cluster.
        """
        Logger.debug("Detecting intersection")
        self.cluster_cluster_intersections = {}
        self.raw_intersections = []
        num_clusters = len(self.segments)
        for cluster_index_i in range(num_clusters):
            for cluster_index_j in range(cluster_index_i + 1, num_clusters):
                print(
                    "Detecting intersections between cluster {} and cluster {}"
                    .format(cluster_index_i, cluster_index_j))
                self.__detect_intersections_between_clusters(
                    cluster_index_i, cluster_index_j)
Beispiel #30
0
    def create_module_list(self):
        Logger.debug("Creating module list")
        module_list = []
        module_width = 90
        module_height = 66
        padding = 15
        image_width = module_width + 2 * padding
        image_height = module_height + 2 * padding
        module_image_size = (image_width, image_height)

        for rectangle_id, rectangle in self.module_map.global_module_map.items(
        ):
            # Only iterate over the last detected rectangles.
            if rectangle.frame_id_history[-1] != self.last_frame_id:
                continue

            module_coordinates = rectangle.last_rectangle
            module_aspect_ratio = aspect_ratio(module_coordinates)
            is_horizontal = module_aspect_ratio >= 1.0
            if is_horizontal:
                projection_rectangle = np.float32(
                    [[0 + padding, 0 + padding],
                     [image_width - 1 - padding, 0 + padding],
                     [image_width - 1 - padding, image_height - 1 - padding],
                     [0 + padding, image_height - 1 - padding]])
            else:
                projection_rectangle = np.float32(
                    [[0 + padding, image_height - 1 - padding],
                     [0 + padding, 0 + padding],
                     [image_width - 1 - padding, 0 + padding],
                     [image_width - 1 - padding, image_height - 1 - padding]])

            transformation_matrix = cv2.getPerspectiveTransform(
                np.float32(module_coordinates), projection_rectangle)
            extracted = cv2.warpPerspective(self.last_scaled_frame_rgb,
                                            transformation_matrix,
                                            module_image_size)

            module_list.append({
                "coordinates": rectangle.last_rectangle,
                "image": extracted,
                "id": rectangle.ID
            })

        return module_list